diff --git a/unified-tina/Makefile b/unified-tina/Makefile new file mode 100755 index 0000000..69cfb1e --- /dev/null +++ b/unified-tina/Makefile @@ -0,0 +1,11 @@ + +all:$(TARGET) install + +install: $(TARGET) + -@mkdir -p $(INSTALL_PREFIX)/usr/lib + -@mkdir -p $(INSTALL_PREFIX)/usr/include + @cp -rf inc/* $(INSTALL_PREFIX)/usr/include + @cp lib/$(C_LIB_TYPE)/*.so* $(INSTALL_PREFIX)/usr/lib + +clean: + rm -rf *.o *~ diff --git a/unified-tina/common.target b/unified-tina/common.target new file mode 100644 index 0000000..297105e --- /dev/null +++ b/unified-tina/common.target @@ -0,0 +1,109 @@ +############################################################################## +# +# Copyright (c) 2005 - 2021 by Vivante Corp. All rights reserved. +# +# The material in this file is confidential and contains trade secrets +# of Vivante Corporation. This is proprietary information owned by +# Vivante Corporation. No part of this work may be disclosed, +# reproduced, copied, transmitted, or used in any way for any purpose, +# without the express written permission of Vivante Corporation. +# +############################################################################## + + +################################################################################ +# Define a shortcut for the main target. + +TARGET_OUTPUT = $(OBJ_DIR)/$(TARGET_NAME) + +ifeq ($(TARGET_SONAME),) +TARGET_SONAME := $(TARGET_NAME) +endif + +ifeq ($(OBJ_DIR),) +ifeq ($(DEBUG), 1) +OBJ_DIR ?= bin_d +else +OBJ_DIR ?= bin_r +endif +endif + +################################################################################ +# Specify targets. + +all: $(TARGET_OUTPUT) + +clean: + @rm -rf $(OBJ_DIR)/* $(OBJ_DIR) + @rm -rf $(CLEAN_EXTRA) + +install: $(TARGET_OUTPUT) +ifneq ($(INSTALL_DIR),) + @mkdir -p $(INSTALL_DIR) + @-cp $(TARGET_OUTPUT) $(INSTALL_DIR) +endif + +MAKEFILE_NAME = makefile.linux + +ifeq ($(gcdSTATIC_LINK),1) +ifneq ($(USE_ARMCC), 1) + PFLAGS += -static +else + PFLAGS += -L--no_search_dynamic_libraries +endif +else +ifneq ($(QNX), 1) +LIBS += -lrt +endif +PFLAGS += -Wl,-rpath-link $(VIVANTE_SDK_LIB) +endif + +ifeq ($(PROGRAM), 1) +$(TARGET_OUTPUT): $(OBJECTS) +ifeq ($(SRC_CXX),) + @echo " LINK \033[1m$(notdir $@)\033[0m" + @$(CC) $(PFLAGS) $(OBJECTS) -o $(TARGET_OUTPUT) $(LIBS) +else + @echo " LINK \033[1m$(notdir $@)\033[0m" + @$(CXX) $(PFLAGS) $(OBJECTS) -o $(TARGET_OUTPUT) $(LIBS) +endif + +ifneq ($(USE_ARMCC), 1) +ifneq ($(DEBUG), 1) + @$(STRIP) $(TARGET_OUTPUT) +endif +endif +endif + +ifeq ($(DYNAMIC), 1) +ifeq ($(USE_ARMCC), 1) +LDFLAGS += --shared -L--soname=,$(TARGET_NAME) +else +LDFLAGS += -Wall -shared -Wl,-soname,$(TARGET_NAME) -Wl,-z,defs +endif +$(TARGET_OUTPUT): $(OBJECTS) + @echo " LINK \033[1m$(notdir $@)\033[0m" + @$(CC) $(LDFLAGS) $(OBJECTS) -o $(TARGET_OUTPUT) $(LIBS) +endif + +ifeq ($(STATIC), 1) +$(TARGET_OUTPUT): $(OBJECTS) + @echo " ARCHIVE \033[1m$(notdir $@)\033[0m" + @$(AR) -r -c $(TARGET_OUTPUT) $(OBJECTS) + $(RANLIB) $(TARGET_OUTPUT) +endif + +$(OBJ_DIR)/%.o: %.c + @echo " COMPILE $(abspath $<)" + @mkdir -p $(OBJ_DIR) + @$(CC) -c $(CFLAGS) -o $@ $< + +$(OBJ_DIR)/%.o: %.cpp + @echo " COMPILE $(abspath $<)" + @mkdir -p $(OBJ_DIR) + @$(CXX) -c $(CFLAGS) -o $@ $< + +$(OBJ_DIR)/%.o: %.cc + @echo " COMPILE $(abspath $<)" + @mkdir -p $(OBJ_DIR) + @$(CXX) -c $(CFLAGS) -o $@ $< diff --git a/unified-tina/inc/HAL/aqHal.h b/unified-tina/inc/HAL/aqHal.h new file mode 100644 index 0000000..6deb4fe --- /dev/null +++ b/unified-tina/inc/HAL/aqHal.h @@ -0,0 +1 @@ +#include "HAL/gc_hal.h" diff --git a/unified-tina/inc/HAL/gc_hal.h b/unified-tina/inc/HAL/gc_hal.h new file mode 100644 index 0000000..30acc5e --- /dev/null +++ b/unified-tina/inc/HAL/gc_hal.h @@ -0,0 +1,1636 @@ +/**************************************************************************** +* +* Copyright (c) 2005 - 2023 by Vivante Corp. All rights reserved. +* +* The material in this file is confidential and contains trade secrets +* of Vivante Corporation. This is proprietary information owned by +* Vivante Corporation. No part of this work may be disclosed, +* reproduced, copied, transmitted, or used in any way for any purpose, +* without the express written permission of Vivante Corporation. +* +*****************************************************************************/ + + +#ifndef __gc_hal_h_ +#define __gc_hal_h_ + +#include "gc_hal_types.h" +#include "gc_hal_enum.h" +#include "gc_hal_base.h" +#include "gc_hal_profiler.h" +#include "gc_hal_driver.h" +#if gcdENABLE_3D +#include "gc_hal_statistics.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct _gckVIDMEM *gckVIDMEM; +typedef struct _gckKERNEL *gckKERNEL; +typedef struct _gckCOMMAND *gckCOMMAND; +typedef struct _gckEVENT *gckEVENT; +typedef struct _gckDB *gckDB; +typedef struct _gckDVFS *gckDVFS; +typedef struct _gckMMU *gckMMU; +typedef struct _gcsDEVICE *gckDEVICE; + +/****************************************************************************** + ****************************** Alignment Macros ****************************** + ******************************************************************************/ + +/* Alignment with a non-power of two value. */ +#define gcmALIGN_NP2(n, align) (((n) + (align) - 1) - (((n) + (align) - 1) % (align))) + +#define gcmALIGN_NP2_SAFE(n, align) \ +(\ + (gcmALIGN_NP2((n) & ~0ULL, (align) & ~0ULL) ^ gcmALIGN_NP2(n, align)) ? \ + (n) : gcmALIGN_NP2(n, align) \ +) + +/* Alignment with a power of two value. */ +#define gcmALIGN(n, align) (((n) + ((align) - 1)) & ~((align) - 1)) + +#define gcmALIGN_SAFE(n, align) \ +(\ + (gcmALIGN((n) & ~0ULL, (align) & ~0ULL) ^ gcmALIGN(n, align)) ? \ + (n) : gcmALIGN(n, align) \ +) + +#define gcmALIGN_CHECK_OVERFLOW(n, align) \ +(\ + (gcmALIGN((n) & ~0ULL, (align) & ~0ULL) ^ gcmALIGN(n, align)) ? \ + gcvSTATUS_RESLUT_OVERFLOW : gcvSTATUS_OK \ +) + +#define gcmALIGN_BASE(n, align) \ +(\ + ((n) & ~((align) - 1)) \ +) + +/****************************************************************************** + **************************** Element Count Macro ***************************** + ******************************************************************************/ + +#define gcmSIZEOF(a) ((gctSIZE_T)(sizeof(a))) + +#define gcmCOUNTOF(a) (sizeof(a) / sizeof(a[0])) + +/****************************************************************************** + ******************************** Cast Macro ********************************** + ******************************************************************************/ +#define gcmNAME_TO_PTR(na) \ + gckKERNEL_QueryPointerFromName(kernel, gcmALL_TO_UINT32(na)) + +#define gcmPTR_TO_NAME(ptr) \ + gckKERNEL_AllocateNameFromPointer(kernel, ptr) + +#define gcmRELEASE_NAME(na) \ + gckKERNEL_DeleteName(kernel, gcmALL_TO_UINT32(na)) + +#define gcmALL_TO_UINT32(t) ((gctUINT32)(gctUINTPTR_T)(t)) + +#define gcmPTR_TO_UINT64(p) ((gctUINT64)(gctUINTPTR_T)(p)) + +#define gcmUINT64_TO_PTR(u) ((gctPOINTER)(gctUINTPTR_T)(u)) + +#define gcmUINT64_TO_TYPE(u, t) ((t)(gctUINTPTR_T)(u)) + +/****************************************************************************** + ******************************* Useful Macro ********************************* + ******************************************************************************/ + +#define gcvINVALID_ADDRESS ~0ULL +#define gcvINVALID_VALUE 0xCCCCCCCC + +#define gcvINVALID_PHYSICAL_ADDRESS ~0ULL + +#define gcmGET_PRE_ROTATION(rotate) \ + ((rotate) & (~(gcvSURF_POST_FLIP_X | gcvSURF_POST_FLIP_Y))) + +#define gcmGET_POST_ROTATION(rotate) \ + ((rotate) & (gcvSURF_POST_FLIP_X | gcvSURF_POST_FLIP_Y)) + +typedef struct _gckHARDWARE *gckHARDWARE; + +#define gcdMAX_GPU_COUNT gcvCORE_COUNT + +#define gcdMAX_SURF_LAYERS 4 + +#define gcdMAX_DRAW_BUFFERS 16 + +#define gcdMAX_3DGPU_COUNT 8 + +#define gcdMAX_VERTEX_STREAM_COUNT 4 +/******************************************************************************* + ** + ** gcmVERIFY_OBJECT + ** + ** Assert if an object is invalid or is not of the specified type. If the + ** object is invalid or not of the specified type, gcvSTATUS_INVALID_OBJECT + ** will be returned from the current function. In retail mode this macro + ** does nothing. + ** + ** ARGUMENTS: + ** + ** obj Object to test. + ** t Expected type of the object. + */ +#if gcmIS_DEBUG(gcdDEBUG_TRACE) +#define _gcmVERIFY_OBJECT(prefix, obj, t) \ + if ((obj) == gcvNULL) \ + { \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "VERIFY_OBJECT failed: NULL"); \ + prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \ + gcmCC_PRINT(t)); \ + prefix##ASSERT((obj) != gcvNULL); \ + prefix##FOOTER_ARG("status=%d", gcvSTATUS_INVALID_OBJECT); \ + return gcvSTATUS_INVALID_OBJECT; \ + } \ + else if (((gcsOBJECT*) (obj))->type != t) \ + { \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "VERIFY_OBJECT failed: %c%c%c%c", \ + gcmCC_PRINT(((gcsOBJECT*) (obj))->type)); \ + prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \ + gcmCC_PRINT(t)); \ + prefix##ASSERT(((gcsOBJECT*)(obj))->type == t); \ + prefix##FOOTER_ARG("status=%d", gcvSTATUS_INVALID_OBJECT); \ + return gcvSTATUS_INVALID_OBJECT; \ + } + +#define gcmVERIFY_OBJECT(obj, t) _gcmVERIFY_OBJECT(gcm, obj, t) +#define gcmkVERIFY_OBJECT(obj, t) _gcmVERIFY_OBJECT(gcmk, obj, t) +#else +#define gcmVERIFY_OBJECT(obj, t) do {} while (gcvFALSE) +#define gcmkVERIFY_OBJECT(obj, t) do {} while (gcvFALSE) +#endif + +/******************************************************************************/ +/*VERIFY_OBJECT if special return expected*/ +/******************************************************************************/ +#ifndef EGL_API_ANDROID +# define _gcmVERIFY_OBJECT_RETURN(prefix, obj, t, retVal) \ + do { \ + if ((obj) == gcvNULL) { \ + prefix##PRINT_VERSION(); \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "VERIFY_OBJECT_RETURN failed: NULL"); \ + prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \ + gcmCC_PRINT(t)); \ + prefix##ASSERT((obj) != gcvNULL); \ + prefix##FOOTER_ARG("retVal=%d", retVal); \ + return retVal; \ + } else if (((gcsOBJECT *)(obj))->type != t) { \ + prefix##PRINT_VERSION(); \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "VERIFY_OBJECT_RETURN failed: %c%c%c%c", \ + gcmCC_PRINT(((gcsOBJECT *)(obj))->type)); \ + prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \ + gcmCC_PRINT(t)); \ + prefix##ASSERT(((gcsOBJECT *)(obj))->type == t); \ + prefix##FOOTER_ARG("retVal=%d", retVal); \ + return retVal; \ + } \ + } while (gcvFALSE) +# define gcmVERIFY_OBJECT_RETURN(obj, t, retVal) \ + _gcmVERIFY_OBJECT_RETURN(gcm, obj, t, retVal) +# define gcmkVERIFY_OBJECT_RETURN(obj, t, retVal) \ + _gcmVERIFY_OBJECT_RETURN(gcmk, obj, t, retVal) +#else +# define gcmVERIFY_OBJECT_RETURN(obj, t) do {} while (gcvFALSE) +# define gcmkVERIFY_OBJECT_RETURN(obj, t) do {} while (gcvFALSE) +#endif + +/****************************************************************************** + ********************************* gckOS Object ******************************* + ******************************************************************************/ + +/* Construct a new gckOS object. */ +gceSTATUS +gckOS_Construct(IN gctPOINTER Context, OUT gckOS *Os); + +/* Destroy an gckOS object. */ +gceSTATUS +gckOS_Destroy(IN gckOS Os); + +/* Query the video memory. */ +gceSTATUS +gckOS_QueryVideoMemory(IN gckOS Os, + OUT gctPHYS_ADDR *InternalAddress, + OUT gctSIZE_T *InternalSize, + OUT gctPHYS_ADDR *ExternalAddress, + OUT gctSIZE_T *ExternalSize, + OUT gctPHYS_ADDR *ContiguousAddress, + OUT gctSIZE_T *ContiguousSize); + +/* Allocate memory from the heap. */ +gceSTATUS +gckOS_Allocate(IN gckOS Os, IN gctSIZE_T Bytes, OUT gctPOINTER *Memory); + +/* Free allocated memory. */ +gceSTATUS +gckOS_Free(IN gckOS Os, IN gctPOINTER Memory); + +/* Wrapper for allocation memory.. */ +gceSTATUS +gckOS_AllocateMemory(IN gckOS Os, IN gctSIZE_T Bytes, OUT gctPOINTER *Memory); + +/* Wrapper for freeing memory. */ +gceSTATUS +gckOS_FreeMemory(IN gckOS Os, IN gctPOINTER Memory); + +/* Allocate paged memory. */ +gceSTATUS +gckOS_AllocatePagedMemory(IN gckOS Os, + IN gckKERNEL Kernel, + IN gctUINT32 Flag, + IN gceVIDMEM_TYPE Type, + IN OUT gctSIZE_T *Bytes, + OUT gctUINT32 *Gid, + OUT gctPHYS_ADDR *Physical); + +/* Lock pages. */ +gceSTATUS +gckOS_LockPages(IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + IN gctBOOL Cacheable, + OUT gctPOINTER *Logical); + +/* Map pages. */ +gceSTATUS +gckOS_MapPagesEx(IN gckOS Os, + IN gckKERNEL Kernel, + IN gckMMU Mmu, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Offset, + IN gctSIZE_T PageCount, + IN gctADDRESS Address, + IN gctPOINTER PageTable, + IN gctBOOL Writable, + IN gceVIDMEM_TYPE Type); + +/* Map 1M pages. */ +gceSTATUS +gckOS_Map1MPages(IN gckOS Os, + IN gckKERNEL Kernel, + IN gckMMU Mmu, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T PageCount, + IN gctADDRESS Address, + IN gctPOINTER PageTable, + IN gctBOOL Writable, + IN gceVIDMEM_TYPE Type); + +gceSTATUS +gckOS_UnmapPages(IN gckOS Os, IN gctSIZE_T PageCount, IN gctADDRESS Address); + +/* Unlock pages. */ +gceSTATUS +gckOS_UnlockPages(IN gckOS Os, IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, IN gctPOINTER Logical); + +/* Free paged memory. */ +gceSTATUS +gckOS_FreePagedMemory(IN gckOS Os, IN gctPHYS_ADDR Physical, IN gctSIZE_T Bytes); + +/* Allocate non-paged memory. */ +gceSTATUS +gckOS_AllocateNonPagedMemory(IN gckOS Os, + IN gckKERNEL Kernel, + IN gctBOOL InUserSpace, + IN gctUINT32 Flag, + IN OUT gctSIZE_T *Bytes, + OUT gctPHYS_ADDR *Physical, + OUT gctPOINTER *Logical); + +/* Free non-paged memory. */ +gceSTATUS +gckOS_FreeNonPagedMemory(IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctPOINTER Logical, + IN gctSIZE_T Bytes); + +/* Reserved memory. */ +gceSTATUS +gckOS_RequestReservedMemory(gckOS Os, + gctPHYS_ADDR_T Start, + gctSIZE_T Size, + const char *Name, + gctBOOL Requested, + gctPOINTER *MemoryHandle); + +void +gckOS_ReleaseReservedMemory(gckOS Os, gctPOINTER MemoryHandle); + +/* Reserved memory sub area */ +gceSTATUS +gckOS_RequestReservedMemoryArea(IN gckOS Os, + IN gctPOINTER MemoryHandle, + IN gctSIZE_T Offset, + IN gctSIZE_T Size, + OUT gctPOINTER *MemoryAreaHandle); + +void +gckOS_ReleaseReservedMemoryArea(gctPOINTER MemoryAreaHandle); + +/* Get the number fo bytes per page. */ +gceSTATUS +gckOS_GetPageSize(IN gckOS Os, OUT gctSIZE_T *PageSize); + +/* Get the physical address of a corresponding logical address. */ +gceSTATUS +gckOS_GetPhysicalAddress(IN gckOS Os, IN gctPOINTER Logical, + OUT gctPHYS_ADDR_T *Address); + +/* Get real physical address from handle. */ +gceSTATUS +gckOS_GetPhysicalFromHandle(IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Offset, + OUT gctPHYS_ADDR_T *PhysicalAddress); + +/* Get the physical address of a corresponding user logical address. */ +gceSTATUS +gckOS_UserLogicalToPhysical(IN gckOS Os, IN gctPOINTER Logical, + OUT gctPHYS_ADDR_T *Address); + +/* Map physical memory. */ +gceSTATUS +gckOS_MapPhysical(IN gckOS Os, + IN gctPHYS_ADDR_T Physical, + IN gctSIZE_T Bytes, + OUT gctPOINTER *Logical); + +/* Unmap previously mapped physical memory. */ +gceSTATUS +gckOS_UnmapPhysical(IN gckOS Os, IN gctPOINTER Logical, IN gctSIZE_T Bytes); + +/* Read data from a hardware register. */ +gceSTATUS +gckOS_ReadRegister(IN gckOS Os, IN gctUINT32 Address, OUT gctUINT32 *Data); + +/* Read data from a hardware register. */ +gceSTATUS +gckOS_ReadRegisterEx(IN gckOS Os, + IN gckKERNEL Kernel, + IN gctUINT32 Address, + OUT gctUINT32 *Data); + +/* Write data to a hardware register. */ +gceSTATUS +gckOS_WriteRegister(IN gckOS Os, IN gctUINT32 Address, IN gctUINT32 Data); + +/* Write data to a hardware register. */ +gceSTATUS +gckOS_WriteRegisterEx(IN gckOS Os, + IN gckKERNEL Kernel, + IN gctUINT32 Address, + IN gctUINT32 Data); + +/* Write data to a hardware register without dump. */ +gceSTATUS +gckOS_WriteRegisterEx_NoDump(IN gckOS Os, + IN gckKERNEL Kernel, + IN gctUINT32 Address, + IN gctUINT32 Data); + +#ifdef __QNXNTO__ +static gcmINLINE gceSTATUS +gckOS_WriteMemory(IN gckOS Os, IN gctPOINTER Address, IN gctUINT32 Data) +{ + /* Write memory. */ + *(gctUINT32 *)Address = Data; + return gcvSTATUS_OK; +} + +#else +/* Write data to a 32-bit memory location. */ +gceSTATUS +gckOS_WriteMemory(IN gckOS Os, IN gctPOINTER Address, IN gctUINT32 Data); +#endif + +/* Map physical memory into the process space. */ +gceSTATUS +gckOS_MapMemory(IN gckOS Os, IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, OUT gctPOINTER *Logical); + +/* Unmap physical memory from the specified process space. */ +gceSTATUS +gckOS_UnmapMemoryEx(IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + IN gctPOINTER Logical, + IN gctUINT32 PID); + +/* Unmap physical memory from the process space. */ +gceSTATUS +gckOS_UnmapMemory(IN gckOS Os, IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, IN gctPOINTER Logical); + +/* Delete a mutex. */ +gceSTATUS +gckOS_DeleteMutex(IN gckOS Os, IN gctPOINTER Mutex); + +/* Acquire a mutex. */ +gceSTATUS +gckOS_AcquireMutex(IN gckOS Os, IN gctPOINTER Mutex, IN gctUINT32 Timeout); + +/* Release a mutex. */ +gceSTATUS +gckOS_ReleaseMutex(IN gckOS Os, IN gctPOINTER Mutex); + +/* Atomically exchange a pair of 32-bit values. */ +gceSTATUS +gckOS_AtomicExchange(IN gckOS Os, + IN OUT gctUINT32_PTR Target, + IN gctUINT32 NewValue, + OUT gctUINT32_PTR OldValue); + +/* Atomically exchange a pair of pointers. */ +gceSTATUS +gckOS_AtomicExchangePtr(IN gckOS Os, + IN OUT gctPOINTER *Target, + IN gctPOINTER NewValue, + OUT gctPOINTER *OldValue); + +gceSTATUS +gckOS_AtomSetMask(IN gctPOINTER Atom, IN gctUINT32 Mask); + +gceSTATUS +gckOS_AtomClearMask(IN gctPOINTER Atom, IN gctUINT32 Mask); + +gceSTATUS +gckOS_DumpCallStack(IN gckOS Os); + +gceSTATUS +gckOS_GetProcessNameByPid(IN gctINT Pid, IN gctSIZE_T Length, OUT gctUINT8_PTR String); + +gceSTATUS +gckOS_QueryCPUFrequency(IN gckOS Os, IN gctUINT32 CPUId, OUT gctUINT32 *Frequency); + +gceSTATUS +gckOS_TraceGpuMemory(IN gckOS Os, IN gctINT32 ProcessID, IN gctINT64 Delta); + +void +gckOS_NodeIdAssign(gckOS Os, gcuVIDMEM_NODE_PTR Node); + +#if gcdENABLE_CLEAR_FENCE +gceSTATUS +gckOS_ClearAllFence(gckDEVICE Device); + +gctUINT64 +gckOS_AllocFenceRecordId(IN gckOS Os, IN gcsUSER_FENCE_INFO_PTR fence_info); + +gcsUSER_FENCE_INFO_PTR +gckOS_ReleaseFenceRecordId(IN gckOS Os, IN gctUINT64 recordId); + +void +gckOS_PreLoadFenceRecId(IN gckOS Os); + +void +gckOS_PreLoadEndFenceRecId(IN gckOS Os); + +#endif +/******************************************************************************* + ** + ** gckOS_AtomConstruct + ** + ** Create an atom. + ** + ** INPUT: + ** + ** gckOS Os + ** Pointer to a gckOS object. + ** + ** OUTPUT: + ** + ** gctPOINTER *Atom + ** Pointer to a variable receiving the constructed atom. + */ +gceSTATUS +gckOS_AtomConstruct(IN gckOS Os, OUT gctPOINTER *Atom); + +/******************************************************************************* + ** + ** gckOS_AtomDestroy + ** + ** Destroy an atom. + ** + ** INPUT: + ** + ** gckOS Os + ** Pointer to a gckOS object. + ** + ** gctPOINTER Atom + ** Pointer to the atom to destroy. + ** + ** OUTPUT: + ** + ** Nothing. + */ +gceSTATUS +gckOS_AtomDestroy(IN gckOS Os, OUT gctPOINTER Atom); + +/******************************************************************************* + ** + ** gckOS_AtomGet + ** + ** Get the 32-bit value protected by an atom. + ** + ** INPUT: + ** + ** gckOS Os + ** Pointer to a gckOS object. + ** + ** gctPOINTER Atom + ** Pointer to the atom. + ** + ** OUTPUT: + ** + ** gctINT32_PTR Value + ** Pointer to a variable the receives the value of the atom. + */ +gceSTATUS +gckOS_AtomGet(IN gckOS Os, IN gctPOINTER Atom, OUT gctINT32_PTR Value); + +/******************************************************************************* + ** + ** gckOS_AtomSet + ** + ** Set the 32-bit value protected by an atom. + ** + ** INPUT: + ** + ** gckOS Os + ** Pointer to a gckOS object. + ** + ** gctPOINTER Atom + ** Pointer to the atom. + ** + ** gctINT32 Value + ** The value of the atom. + ** + ** OUTPUT: + ** + ** Nothing. + */ +gceSTATUS +gckOS_AtomSet(IN gckOS Os, IN gctPOINTER Atom, IN gctINT32 Value); + +/******************************************************************************* + ** + ** gckOS_AtomIncrement + ** + ** Atomically increment the 32-bit integer value inside an atom. + ** + ** INPUT: + ** + ** gckOS Os + ** Pointer to a gckOS object. + ** + ** gctPOINTER Atom + ** Pointer to the atom. + ** + ** OUTPUT: + ** + ** gctINT32_PTR Value + ** Pointer to a variable the receives the original value of the atom. + */ +gceSTATUS +gckOS_AtomIncrement(IN gckOS Os, IN gctPOINTER Atom, OUT gctINT32_PTR Value); + +/******************************************************************************* + ** + ** gckOS_AtomDecrement + ** + ** Atomically decrement the 32-bit integer value inside an atom. + ** + ** INPUT: + ** + ** gckOS Os + ** Pointer to a gckOS object. + ** + ** gctPOINTER Atom + ** Pointer to the atom. + ** + ** OUTPUT: + ** + ** gctINT32_PTR Value + ** Pointer to a variable the receives the original value of the atom. + */ +gceSTATUS +gckOS_AtomDecrement(IN gckOS Os, IN gctPOINTER Atom, OUT gctINT32_PTR Value); + +/* Delay a number of milliseconds. */ +gceSTATUS +gckOS_Delay(IN gckOS Os, IN gctUINT32 Delay); + +/* Delay a number of milliseconds. */ +gceSTATUS +gckOS_Udelay(IN gckOS Os, IN gctUINT32 Delay); + +/* Get time in milliseconds. */ +gceSTATUS +gckOS_GetTicks(OUT gctUINT32_PTR Time); + +/* Compare time value. */ +gceSTATUS +gckOS_TicksAfter(IN gctUINT32 Time1, IN gctUINT32 Time2, OUT gctBOOL_PTR IsAfter); + +/* Get time in microseconds. */ +gceSTATUS +gckOS_GetTime(OUT gctUINT64_PTR Time); + +/* Memory barrier. */ +gceSTATUS +gckOS_MemoryBarrier(IN gckOS Os, IN gctPOINTER Address); + +/* Map user pointer. */ +gceSTATUS +gckOS_MapUserPointer(IN gckOS Os, + IN gctPOINTER Pointer, + IN gctSIZE_T Size, + OUT gctPOINTER *KernelPointer); + +/* Unmap user pointer. */ +gceSTATUS +gckOS_UnmapUserPointer(IN gckOS Os, + IN gctPOINTER Pointer, + IN gctSIZE_T Size, + IN gctPOINTER KernelPointer); + +/******************************************************************************* + ** + ** gckOS_QueryNeedCopy + ** + ** Query whether the memory can be accessed or mapped directly or it has to be + ** copied. + ** + ** INPUT: + ** + ** gckOS Os + ** Pointer to an gckOS object. + ** + ** gctUINT32 ProcessID + ** Process ID of the current process. + ** + ** OUTPUT: + ** + ** gctBOOL_PTR NeedCopy + ** Pointer to a boolean receiving gcvTRUE if the memory needs a copy or + ** gcvFALSE if the memory can be accessed or mapped dircetly. + */ +gceSTATUS +gckOS_QueryNeedCopy(IN gckOS Os, IN gctUINT32 ProcessID, OUT gctBOOL_PTR NeedCopy); + +/******************************************************************************* + ** + ** gckOS_CopyFromUserData + ** + ** Copy data from user to kernel memory. + ** + ** INPUT: + ** + ** gckOS Os + ** Pointer to an gckOS object. + ** + ** gctPOINTER KernelPointer + ** Pointer to kernel memory. + ** + ** gctPOINTER Pointer + ** Pointer to user memory. + ** + ** gctSIZE_T Size + ** Number of bytes to copy. + ** + ** OUTPUT: + ** + ** Nothing. + */ +gceSTATUS +gckOS_CopyFromUserData(IN gckOS Os, + IN gctPOINTER KernelPointer, + IN gctPOINTER Pointer, + IN gctSIZE_T Size); + +/******************************************************************************* + ** + ** gckOS_CopyToUserData + ** + ** Copy data from kernel to user memory. + ** + ** INPUT: + ** + ** gckOS Os + ** Pointer to an gckOS object. + ** + ** gctPOINTER KernelPointer + ** Pointer to kernel memory. + ** + ** gctPOINTER Pointer + ** Pointer to user memory. + ** + ** gctSIZE_T Size + ** Number of bytes to copy. + ** + ** OUTPUT: + ** + ** Nothing. + */ +gceSTATUS +gckOS_CopyToUserData(IN gckOS Os, + IN gctPOINTER KernelPointer, + IN gctPOINTER Pointer, + IN gctSIZE_T Size); + +gceSTATUS +gckOS_SuspendInterrupt(IN gckOS Os); + +gceSTATUS +gckOS_SuspendInterruptEx(IN gckOS Os, IN gceCORE Core); + +gceSTATUS +gckOS_ResumeInterrupt(IN gckOS Os); + +gceSTATUS +gckOS_ResumeInterruptEx(IN gckOS Os, IN gceCORE Core); + +/* Get the base address for the physical memory. */ +gceSTATUS +gckOS_GetBaseAddress(IN gckOS Os, OUT gctUINT32_PTR BaseAddress); + +/* Perform a memory copy. */ +gceSTATUS +gckOS_MemCopy(IN gctPOINTER Destination, + IN gctCONST_POINTER Source, + IN gctSIZE_T Bytes); + +/* Zero memory. */ +gceSTATUS +gckOS_ZeroMemory(IN gctPOINTER Memory, IN gctSIZE_T Bytes); + +/******************************************************************************* + ** + ** gckOS_GetProcessID + ** + ** Get current process ID. + ** + ** INPUT: + ** + ** Nothing. + ** + ** OUTPUT: + ** + ** gctUINT32_PTR ProcessID + ** Pointer to the variable that receives the process ID. + */ +gceSTATUS +gckOS_GetProcessID(OUT gctUINT32_PTR ProcessID); + +gceSTATUS +gckOS_GetCurrentProcessID(OUT gctUINT32_PTR ProcessID); + +/******************************************************************************* + ** + ** gckOS_GetThreadID + ** + ** Get current thread ID. + ** + ** INPUT: + ** + ** Nothing. + ** + ** OUTPUT: + ** + ** gctUINT32_PTR ThreadID + ** Pointer to the variable that receives the thread ID. + */ +gceSTATUS +gckOS_GetThreadID(OUT gctUINT32_PTR ThreadID); + +/****************************************************************************** + ********************************* Signal Object ****************************** + ******************************************************************************/ + +/* Create a signal. */ +gceSTATUS +gckOS_CreateSignal(IN gckOS Os, IN gctBOOL ManualReset, OUT gctSIGNAL *Signal); + +/* Destroy a signal. */ +gceSTATUS +gckOS_DestroySignal(IN gckOS Os, IN gctSIGNAL Signal); + +/* Signal a signal. */ +gceSTATUS +gckOS_Signal(IN gckOS Os, IN gctSIGNAL Signal, IN gctBOOL State); + +/* Wait for a signal. */ +gceSTATUS +gckOS_WaitSignal(IN gckOS Os, IN gctSIGNAL Signal, + IN gctBOOL Interruptable, IN gctUINT32 Wait); + +#ifdef __QNXNTO__ +gceSTATUS +gckOS_SignalPulse(IN gckOS Os, IN gctSIGNAL Signal); + +gceSTATUS +gckOS_SignalPending(IN gckOS Os, IN gctSIGNAL Signal); +#endif + +/* Map a user signal to the kernel space. */ +gceSTATUS +gckOS_MapSignal(IN gckOS Os, IN gctSIGNAL Signal, + IN gctHANDLE Process, OUT gctSIGNAL *MappedSignal); + +/* Unmap a user signal */ +gceSTATUS +gckOS_UnmapSignal(IN gckOS Os, IN gctSIGNAL Signal); + +/* Get scatter-gather table from memory. */ +gceSTATUS +gckOS_MemoryGetSGT(IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Offset, + IN gctSIZE_T Bytes, + OUT gctPOINTER *SGT); + +/* Map a page range of memory to user space. */ +gceSTATUS +gckOS_MemoryMmap(IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T skipPages, + IN gctSIZE_T numPages, + INOUT gctPOINTER Vma); + +/* Wrap a user memory to gctPHYS_ADDR. */ +gceSTATUS +gckOS_WrapMemory(IN gckOS Os, + IN gckKERNEL Kernel, + IN gcsUSER_MEMORY_DESC_PTR Desc, + OUT gctSIZE_T *Bytes, + OUT gctPHYS_ADDR *Physical, + OUT gctBOOL *Contiguous, + OUT gctSIZE_T *PageCountCpu); + +gceSTATUS +gckOS_GetPolicyID(IN gckOS Os, + IN gceVIDMEM_TYPE Type, + OUT gctUINT32_PTR PolicyID, + OUT gctUINT32_PTR AXIConfig); + +#if gcdENABLE_MP_SWITCH +gceSTATUS +gckOS_SwitchCoreCount(IN gckOS Os, OUT gctUINT32 *Count); +#endif + +/****************************************************************************** + ************************* Android Native Fence Sync ************************** + ******************************************************************************/ +gceSTATUS +gckOS_CreateSyncTimeline(IN gckOS Os, IN gceCORE Core, OUT gctHANDLE *Timeline); + +gceSTATUS +gckOS_DestroySyncTimeline(IN gckOS Os, IN gctHANDLE Timeline); + +gceSTATUS +gckOS_CreateNativeFence(IN gckOS Os, + IN gctHANDLE Timeline, + IN gctSIGNAL Signal, + OUT gctINT *FenceFD); + +gceSTATUS +gckOS_WaitNativeFence(IN gckOS Os, IN gctHANDLE Timeline, + IN gctINT FenceFD, IN gctUINT32 Timeout); + +#if !USE_NEW_LINUX_SIGNAL +/* Create signal to be used in the user space. */ +gceSTATUS +gckOS_CreateUserSignal(IN gckOS Os, IN gctBOOL ManualReset, OUT gctINT *SignalID); + +/* Destroy signal used in the user space. */ +gceSTATUS +gckOS_DestroyUserSignal(IN gckOS Os, IN gctINT SignalID); + +/* Wait for signal used in the user space. */ +gceSTATUS +gckOS_WaitUserSignal(IN gckOS Os, + IN gctINT SignalID, + IN gctUINT32 Wait, + OUT gceSIGNAL_STATUS *SignalStatus); + +/* Signal a signal used in the user space. */ +gceSTATUS +gckOS_SignalUserSignal(IN gckOS Os, IN gctINT SignalID, IN gctBOOL State); +#endif /* USE_NEW_LINUX_SIGNAL */ + +/* Set a signal owned by a process. */ +#if defined(__QNXNTO__) +gceSTATUS +gckOS_UserSignal(IN gckOS Os, IN gctSIGNAL Signal, + IN gctINT Rcvid, IN const struct sigevent *Event); +#else +gceSTATUS +gckOS_UserSignal(IN gckOS Os, IN gctSIGNAL Signal, IN gctHANDLE Handle); +#endif + +/******************************************************************************\ + ** Cache Support + */ + +gceSTATUS +gckOS_CacheClean(gckOS Os, + gctUINT32 ProcessID, + gctPHYS_ADDR Handle, + gctSIZE_T Offset, + gctPOINTER Logical, + gctSIZE_T Bytes); + +gceSTATUS +gckOS_CacheFlush(gckOS Os, + gctUINT32 ProcessID, + gctPHYS_ADDR Handle, + gctSIZE_T Offset, + gctPOINTER Logical, + gctSIZE_T Bytes); + +gceSTATUS +gckOS_CacheInvalidate(gckOS Os, + gctUINT32 ProcessID, + gctPHYS_ADDR Handle, + gctSIZE_T Offset, + gctPOINTER Logical, + gctSIZE_T Bytes); + +gceSTATUS +gckOS_CPUPhysicalToGPUPhysical(IN gckOS Os, + IN gctPHYS_ADDR_T CPUPhysical, + IN gctPHYS_ADDR_T *GPUPhysical); + +gceSTATUS +gckOS_GPUPhysicalToCPUPhysical(IN gckOS Os, + IN gctPHYS_ADDR_T GPUPhysical, + IN gctPHYS_ADDR_T *CPUPhysical); + +gceSTATUS +gckOS_QueryOption(IN gckOS Os, IN gctCONST_STRING Option, OUT gctUINT64 *Value); + +/******************************************************************************\ + ** Debug Support + */ + +void +gckOS_SetDebugLevel(IN gctUINT32 Level); + +void +gckOS_SetDebugZone(IN gctUINT32 Zone); + +void +gckOS_SetDebugLevelZone(IN gctUINT32 Level, IN gctUINT32 Zone); + +void +gckOS_SetDebugZones(IN gctUINT32 Zones, IN gctBOOL Enable); + +void +gckOS_SetDebugFile(IN gctCONST_STRING FileName); + +gceSTATUS +gckOS_Broadcast(IN gckOS Os, IN gckHARDWARE Hardware, IN gceBROADCAST Reason); + +gceSTATUS +gckOS_BroadcastHurry(IN gckOS Os, IN gckHARDWARE Hardware, IN gctUINT Urgency); + +gceSTATUS +gckOS_BroadcastCalibrateSpeed(IN gckOS Os, + IN gckHARDWARE Hardware, + IN gctUINT Idle, + IN gctUINT Time); + +/******************************************************************************* + ** + ** gckOS_SetGPUPower + ** + ** Set the power of the GPU on or off. + ** + ** INPUT: + ** + ** gckOS Os + ** Pointer to a gckOS object. + ** + ** gckKERNEL Kernel +** Core whose power is set. + ** + ** gctBOOL Clock + ** gcvTRUE to turn on the clock, or gcvFALSE to turn off the clock. + ** + ** gctBOOL Power + ** gcvTRUE to turn on the power, or gcvFALSE to turn off the power. + ** + ** OUTPUT: + ** + ** Nothing. + */ +gceSTATUS +gckOS_SetGPUPower(IN gckOS Os, + IN gckKERNEL Kernel, + IN gctBOOL Clock, + IN gctBOOL Power); + +gceSTATUS +gckOS_SetClockState(IN gckOS Os, IN gckKERNEL Kernel, IN gctBOOL Clock); + +gceSTATUS +gckOS_GetClockState(IN gckOS Os, IN gckKERNEL Kernel, IN gctBOOL *Clock); + +gceSTATUS +gckOS_ResetGPU(IN gckOS Os, IN gckKERNEL Kernel); + +gceSTATUS +gckOS_PrepareGPUFrequency(IN gckOS Os, IN gceCORE Core); + +gceSTATUS +gckOS_FinishGPUFrequency(IN gckOS Os, IN gceCORE Core); + +gceSTATUS +gckOS_QueryGPUFrequency(IN gckOS Os, + IN gceCORE Core, + OUT gctUINT32 *Frequency, + OUT gctUINT8 *Scale); + +gceSTATUS +gckOS_SetGPUFrequency(IN gckOS Os, IN gceCORE Core, IN gctUINT8 Scale); + +/******************************************************************************* + ** Semaphores. + */ + +/* Create a new semaphore. */ +gceSTATUS +gckOS_CreateSemaphore(IN gckOS Os, OUT gctPOINTER *Semaphore); + +gceSTATUS +gckOS_CreateSemaphoreEx(IN gckOS Os, OUT gctPOINTER *Semaphore); + + +/* Delete a semaphore. */ +gceSTATUS +gckOS_DestroySemaphore(IN gckOS Os, IN gctPOINTER Semaphore); + +/* Acquire a semaphore. */ +gceSTATUS +gckOS_AcquireSemaphore(IN gckOS Os, IN gctPOINTER Semaphore); + +/* Try to acquire a semaphore. */ +gceSTATUS +gckOS_TryAcquireSemaphore(IN gckOS Os, IN gctPOINTER Semaphore); + +/* Release a semaphore. */ +gceSTATUS +gckOS_ReleaseSemaphore(IN gckOS Os, IN gctPOINTER Semaphore); + +/* Release a semaphore. */ +gceSTATUS +gckOS_ReleaseSemaphoreEx(IN gckOS Os, IN gctPOINTER Semaphore); + +/******************************************************************************* + ** Timer API. + */ + +typedef void (*gctTIMERFUNCTION)(gctPOINTER); + +/* Create a timer. */ +gceSTATUS +gckOS_CreateTimer(IN gckOS Os, + IN gctTIMERFUNCTION Function, + IN gctPOINTER Data, + OUT gctPOINTER *Timer); + +/* Destroy a timer. */ +gceSTATUS +gckOS_DestroyTimer(IN gckOS Os, IN gctPOINTER Timer); + +/* Start a timer. */ +gceSTATUS +gckOS_StartTimer(IN gckOS Os, IN gctPOINTER Timer, IN gctUINT32 Delay); + +/* Stop a timer. */ +gceSTATUS +gckOS_StopTimer(IN gckOS Os, IN gctPOINTER Timer); + +/****************************************************************************** + ******************************** gckHEAP Object ****************************** + ******************************************************************************/ + +typedef struct _gckHEAP *gckHEAP; + +/* Construct a new gckHEAP object. */ +gceSTATUS +gckHEAP_Construct(IN gckOS Os, IN gctSIZE_T AllocationSize, OUT gckHEAP *Heap); + +/* Destroy an gckHEAP object. */ +gceSTATUS +gckHEAP_Destroy(IN gckHEAP Heap); + +/* Allocate memory. */ +gceSTATUS +gckHEAP_Allocate(IN gckHEAP Heap, IN gctSIZE_T Bytes, OUT gctPOINTER *Node); + +/* Free memory. */ +gceSTATUS +gckHEAP_Free(IN gckHEAP Heap, IN gctPOINTER Node); + +/* Profile the heap. */ +gceSTATUS +gckHEAP_ProfileStart(IN gckHEAP Heap); + +gceSTATUS +gckHEAP_ProfileEnd(IN gckHEAP Heap, IN gctCONST_STRING Title); + +/****************************************************************************** + ******************************* gckKERNEL Object ***************************** + ******************************************************************************/ + +struct _gcsHAL_INTERFACE; + +/* Construct a new gckKERNEL object. */ +gceSTATUS +gckKERNEL_Construct(IN gckOS Os, + IN gceCORE Core, + IN gctUINT ChipID, + IN gctPOINTER Context, + IN gckDEVICE Device, + IN gckDB SharedDB, + OUT gckKERNEL *Kernel); + +/* Destroy an gckKERNEL object. */ +gceSTATUS +gckKERNEL_Destroy(IN gckKERNEL Kernel); + +/* Dispatch a user-level command. */ +gceSTATUS +gckKERNEL_Dispatch(IN gckKERNEL Kernel, + IN gckDEVICE Device, + IN OUT struct _gcsHAL_INTERFACE *Interface); + +/* Query Database requirements. */ +gceSTATUS +gckKERNEL_QueryDatabase(IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN OUT gcsHAL_INTERFACE *Interface); + +/* Query the video memory. */ +gceSTATUS +gckKERNEL_QueryVideoMemory(IN gckKERNEL Kernel, + OUT struct _gcsHAL_INTERFACE *Interface); + +/* Lookup the gckVIDMEM object for a pool. */ +gceSTATUS +gckKERNEL_GetVideoMemoryPool(IN gckKERNEL Kernel, IN gcePOOL Pool, + OUT gckVIDMEM *VideoMemory); + +/* Map dedicated video memory node. */ +gceSTATUS +gckKERNEL_MapVideoMemory(IN gckKERNEL Kernel, + IN gctBOOL InUserSpace, + IN gcePOOL Pool, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Offset, + IN gctSIZE_T Bytes, + OUT gctPOINTER *Logical); + +/* Unmap dedicated video memory. */ +gceSTATUS +gckKERNEL_UnmapVideoMemory(IN gckKERNEL Kernel, + IN gcePOOL Pool, + IN gctPHYS_ADDR Physical, + IN gctPOINTER Logical, + IN gctUINT32 Pid, + IN gctSIZE_T Bytes); + +/* Map memory. */ +gceSTATUS +gckKERNEL_MapMemory(IN gckKERNEL Kernel, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + OUT gctPOINTER *Logical); + +/* Unmap memory. */ +gceSTATUS +gckKERNEL_UnmapMemory(IN gckKERNEL Kernel, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + IN gctPOINTER Logical, + IN gctUINT32 ProcessID); +/* Destroy reserved mem when destroy process*/ +gceSTATUS +gckKERNEL_DestroyProcessReservedUserMap(IN gckKERNEL Kernel, IN gctUINT32 Pid); + +/* Notification of events. */ +gceSTATUS +gckKERNEL_Notify(IN gckKERNEL Kernel, IN gceNOTIFY Notifcation); + +#if gcdENABLE_VIDEO_MEMORY_MIRROR +gceSTATUS +gckKERNEL_SyncVideoMemoryMirror(gckKERNEL Kernel, + gckVIDMEM_NODE Node, + gctSIZE_T Offset, + gctSIZE_T Bytes, + gctUINT32 Reason); +#endif +/******************************************************************************* + ** + ** gckKERNEL_Recovery + ** + ** Try to recover the GPU from a fatal error. + ** + ** INPUT: + ** + ** gckKERNEL Kernel + ** Pointer to an gckKERNEL object. + ** + ** OUTPUT: + ** + ** Nothing. + */ +gceSTATUS +gckKERNEL_Recovery(IN gckKERNEL Kernel); + +/* Get access to the user data. */ +gceSTATUS +gckKERNEL_OpenUserData(IN gckKERNEL Kernel, + IN gctBOOL NeedCopy, + IN gctPOINTER StaticStorage, + IN gctPOINTER UserPointer, + IN gctSIZE_T Size, + OUT gctPOINTER *KernelPointer); + +/* Release resources associated with the user data connection. */ +gceSTATUS +gckKERNEL_CloseUserData(IN gckKERNEL Kernel, + IN gctBOOL NeedCopy, + IN gctBOOL FlushData, + IN gctPOINTER UserPointer, + IN gctSIZE_T Size, + OUT gctPOINTER *KernelPointer); + +/* Query kernel by core index */ +gceSTATUS +gckOS_QueryKernel(IN gckKERNEL Kernel, IN gctINT index, OUT gckKERNEL *KernelOut); + +gceSTATUS +gckDVFS_Construct(IN gckHARDWARE Hardware, OUT gckDVFS *Frequency); + +gceSTATUS +gckDVFS_Destroy(IN gckDVFS Dvfs); + +gceSTATUS +gckDVFS_Start(IN gckDVFS Dvfs); + +gceSTATUS +gckDVFS_Stop(IN gckDVFS Dvfs); + +/****************************************************************************** + ******************************* gckHARDWARE Object *************************** + ******************************************************************************/ + +/* Construct a new gckHARDWARE object. */ +gceSTATUS +gckHARDWARE_Construct(IN gckOS Os, + IN gckKERNEL Kernel, + OUT gckHARDWARE *Hardware); + +/* Post hardware resource allocation after gckHARDWARE object constructed. */ +gceSTATUS +gckHARDWARE_PostConstruct(IN gckHARDWARE Hardware); + +/* Pre-destroy hardwre resource before destroying an gckHARDWARE object. */ +gceSTATUS +gckHARDWARE_PreDestroy(IN gckHARDWARE Hardware); + +/* Destroy an gckHARDWARE object. */ +gceSTATUS +gckHARDWARE_Destroy(IN gckHARDWARE Hardware); + +/* Get hardware type. */ +gceSTATUS +gckHARDWARE_GetType(IN gckHARDWARE Hardware, OUT gceHARDWARE_TYPE *Type); + +/* Query system memory requirements. */ +gceSTATUS +gckHARDWARE_QuerySystemMemory(IN gckHARDWARE Hardware, + OUT gctSIZE_T *SystemSize, + OUT gctUINT32 *SystemBaseAddress); + +/* Build virtual address. */ +gceSTATUS +gckHARDWARE_BuildVirtualAddress(IN gckHARDWARE Hardware, + IN gctUINT32 Index, + IN gctUINT32 Offset, + OUT gctUINT32 *Address); + +/* Query command buffer requirements. */ +gceSTATUS +gckHARDWARE_QueryCommandBuffer(IN gckHARDWARE Hardware, + IN gceENGINE Engine, + OUT gctUINT32 *Alignment, + OUT gctUINT32 *ReservedHead, + OUT gctUINT32 *ReservedTail); + +/* Add a PIPESELECT command in the command queue. */ +gceSTATUS +gckHARDWARE_PipeSelect(IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gcePIPE_SELECT Pipe, + IN OUT gctUINT32 *Bytes); + +/* Query the available memory. */ +gceSTATUS +gckHARDWARE_QueryMemory(IN gckHARDWARE Hardware, + OUT gctSIZE_T *InternalSize, + OUT gctADDRESS *InternalBaseAddress, + OUT gctUINT32 *InternalAlignment, + OUT gctSIZE_T *ExternalSize, + OUT gctADDRESS *ExternalBaseAddress, + OUT gctUINT32 *ExternalAlignment, + OUT gctUINT32 *HorizontalTileSize, + OUT gctUINT32 *VerticalTileSize); + +/* Query the identity of the hardware. */ +gceSTATUS +gckHARDWARE_QueryChipIdentity(IN gckHARDWARE Hardware, + OUT gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity); + +gceSTATUS +gckHARDWARE_QueryChipOptions(IN gckHARDWARE Hardware, + OUT gcsHAL_QUERY_CHIP_OPTIONS_PTR Options); + +/* Split a harwdare specific address into API stuff. */ +gceSTATUS +gckHARDWARE_SplitMemory(IN gckHARDWARE Hardware, + IN gctUINT32 Address, + OUT gcePOOL *Pool, + OUT gctUINT32 *Offset); + +/* Update command queue tail pointer. */ +gceSTATUS +gckHARDWARE_UpdateQueueTail(IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gctUINT32 Offset); + +/* Interrupt manager. */ +gceSTATUS +gckHARDWARE_Interrupt(IN gckHARDWARE Hardware); + +gceSTATUS +gckHARDWARE_Notify(IN gckHARDWARE Hardware); + +/* Program MMU. */ +gceSTATUS +gckHARDWARE_SetMMU(IN gckHARDWARE Hardware, IN gckMMU Mmu); + +/* Flush the MMU. */ +gceSTATUS +gckHARDWARE_FlushMMU(IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gctADDRESS Address, + IN gctUINT32 SubsequentBytes, + IN OUT gctUINT32 *Bytes); + +gceSTATUS +gckHARDWARE_FlushAsyncMMU(IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN OUT gctUINT32 *Bytes); + +gceSTATUS +gckHARDWARE_FlushMcfeMMU(IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN OUT gctUINT32 *Bytes); + +/* Get idle register. */ +gceSTATUS +gckHARDWARE_GetIdle(IN gckHARDWARE Hardware, IN gctBOOL Wait, OUT gctUINT32 *Data); + +/* Flush the caches. */ +gceSTATUS +gckHARDWARE_Flush(IN gckHARDWARE Hardware, + IN gceKERNEL_FLUSH Flush, + IN gctPOINTER Logical, + IN OUT gctUINT32 *Bytes); + +/* Enable/disable fast clear. */ +gceSTATUS +gckHARDWARE_SetFastClear(IN gckHARDWARE Hardware, + IN gctINT Enable, + IN gctINT Compression); + +gceSTATUS +gckHARDWARE_ReadInterrupt(IN gckHARDWARE Hardware, OUT gctUINT32_PTR IDs); + +/* + * State timer helper. + */ +gceSTATUS +gckHARDWARE_StartTimerReset(IN gckHARDWARE Hardware); + +/* Power management. */ +gceSTATUS +gckHARDWARE_SetPowerState(IN gckHARDWARE Hardware, IN gceCHIPPOWERSTATE State); + +gceSTATUS +gckHARDWARE_QueryPowerStateUnlocked(IN gckHARDWARE Hardware, OUT gceCHIPPOWERSTATE *State); + +gceSTATUS +gckHARDWARE_QueryPowerState(IN gckHARDWARE Hardware, OUT gceCHIPPOWERSTATE *State); + +gceSTATUS +gckHARDWARE_EnablePowerManagement(IN gckHARDWARE Hardware, IN gctBOOL Enable); + +gceSTATUS +gckHARDWARE_QueryPowerManagement(IN gckHARDWARE Hardware, OUT gctBOOL *Enable); + +gceSTATUS +gckHARDWARE_SetGpuProfiler(IN gckHARDWARE Hardware, IN gctBOOL GpuProfiler); + +#if gcdENABLE_FSCALE_VAL_ADJUST +gceSTATUS +gckHARDWARE_SetFscaleValue(IN gckHARDWARE Hardware, + IN gctUINT32 FscaleValue, + IN gctUINT32 ShaderFscaleValue); + +gceSTATUS +gckHARDWARE_GetFscaleValue(IN gckHARDWARE Hardware, + IN gctUINT *FscaleValue, + IN gctUINT *MinFscaleValue, + IN gctUINT *MaxFscaleValue); + +gceSTATUS +gckHARDWARE_SetMinFscaleValue(IN gckHARDWARE Hardware, IN gctUINT MinFscaleValue); +#endif + +gceSTATUS +gckHARDWARE_InitializeHardware(IN gckHARDWARE Hardware); + +gceSTATUS +gckHARDWARE_Reset(IN gckHARDWARE Hardware); + +/* Check for Hardware features. */ +gceSTATUS +gckHARDWARE_IsFeatureAvailable(IN gckHARDWARE Hardware, IN gceFEATURE Feature); + +gceSTATUS +gckHARDWARE_DumpMMUException(IN gckHARDWARE Hardware); + +gceSTATUS +gckHARDWARE_DumpGPUState(IN gckHARDWARE Hardware); + +gceSTATUS +gckHARDWARE_InitDVFS(IN gckHARDWARE Hardware); + +gceSTATUS +gckHARDWARE_QueryLoad(IN gckHARDWARE Hardware, OUT gctUINT32 *Load); + +gceSTATUS +gckHARDWARE_SetDVFSPeroid(IN gckHARDWARE Hardware, IN gctUINT32 Frequency); + +gceSTATUS +gckHARDWARE_QueryStateTimer(IN gckHARDWARE Hardware, + OUT gctUINT64_PTR On, + OUT gctUINT64_PTR Off, + OUT gctUINT64_PTR Idle, + OUT gctUINT64_PTR Suspend); + +gceSTATUS +gckHARDWARE_Fence(IN gckHARDWARE Hardware, + IN gceENGINE Engine, + IN gctPOINTER Logical, + IN gctADDRESS FenceAddress, + IN gctUINT64 FenceData, + IN OUT gctUINT32 *Bytes); + +/****************************************************************************** + **************************** gckINTERRUPT Object ***************************** + ******************************************************************************/ + +typedef struct _gckINTERRUPT *gckINTERRUPT; + +typedef gceSTATUS (*gctINTERRUPT_HANDLER)(IN gckKERNEL Kernel); + +gceSTATUS +gckINTERRUPT_Construct(IN gckKERNEL Kernel, OUT gckINTERRUPT *Interrupt); + +gceSTATUS +gckINTERRUPT_Destroy(IN gckINTERRUPT Interrupt); + +gceSTATUS +gckINTERRUPT_SetHandler(IN gckINTERRUPT Interrupt, + IN OUT gctINT32_PTR Id, + IN gctINTERRUPT_HANDLER Handler); + +gceSTATUS +gckINTERRUPT_Notify(IN gckINTERRUPT Interrupt, IN gctBOOL Valid); + +/****************************************************************************** + ******************************** gckMMU Object ******************************* + ******************************************************************************/ + +/* Construct a new gckMMU object. */ +gceSTATUS +gckMMU_Construct(IN gckKERNEL Kernel, IN gctSIZE_T MmuSize, OUT gckMMU *Mmu); + +/* Destroy an gckMMU object. */ +gceSTATUS +gckMMU_Destroy(IN gckMMU Mmu); + +/* Allocate pages inside the MMU. */ +gceSTATUS +gckMMU_AllocatePages(IN gckMMU Mmu, + IN gctSIZE_T PageCount, + IN gcePAGE_TYPE PageType, + OUT gctPOINTER *PageTable, + OUT gctADDRESS *Address); + +gceSTATUS +gckMMU_AllocatePagesEx(IN gckMMU Mmu, + IN gctSIZE_T PageCount, + IN gceVIDMEM_TYPE Type, + IN gcePAGE_TYPE PageType, + IN gctBOOL LowVA, + IN gctBOOL Secure, + OUT gctPOINTER *PageTable, + OUT gctADDRESS *Address); + +/* Remove a page table from the MMU. */ +gceSTATUS +gckMMU_FreePages(IN gckMMU Mmu, + IN gctBOOL Secure, + IN gcePAGE_TYPE PageType, + IN gctBOOL LowVA, + IN gctADDRESS Address, + IN gctPOINTER PageTable, + IN gctSIZE_T PageCount); + +/* Set the MMU page with info. */ +gceSTATUS +gckMMU_SetPage(IN gckMMU Mmu, + IN gctPHYS_ADDR_T PageAddress, + IN gcePAGE_TYPE PageType, + IN gctBOOL LowVA, + IN gctBOOL Writable, + IN gctUINT32 *PageEntry); + +gceSTATUS +gckMMU_Flush(IN gckMMU Mmu, IN gceVIDMEM_TYPE Type); + +gceSTATUS +gckMMU_DumpPageTableEntry(IN gckMMU Mmu, IN gceAREA_TYPE AreaType, IN gctADDRESS Address); + +gceSTATUS +gckMMU_FillFlatMapping(IN gckMMU Mmu, + IN gctUINT64 PhysBase, + IN gctSIZE_T Size, + IN gctBOOL Reserved, + IN gctBOOL AbleToShift, + OUT gctADDRESS *GpuBaseAddress); + +gceSTATUS +gckMMU_IsFlatMapped(IN gckMMU Mmu, + IN gctUINT64 Physical, + IN gctSIZE_T Bytes, + OUT gctBOOL *In, + INOUT gctADDRESS *Address); + +gceSTATUS +gckMMU_GetAreaType(IN gckMMU Mmu, IN gctADDRESS GpuAddress, OUT gceAREA_TYPE *AreaType); + +gceSTATUS +gckHARDWARE_QueryContextProfile(IN gckHARDWARE Hardware, + IN gctBOOL Reset, + OUT gcsPROFILER_COUNTERS_PART1 *Counters_part1, + OUT gcsPROFILER_COUNTERS_PART2 *Counters_part2); + +gceSTATUS +gckHARDWARE_UpdateContextProfile(IN gckHARDWARE Hardware); + +gceSTATUS +gckHARDWARE_InitProfiler(IN gckHARDWARE Hardware); + +gceSTATUS +gckOS_DetectProcessByName(IN gctCONST_POINTER Name); + +void +gckOS_DumpParam(void); + +gceSTATUS +gc_mmuinfo_show(void); + +#ifdef __cplusplus +} +#endif + + +#endif /* __gc_hal_h_ */ + + diff --git a/unified-tina/inc/HAL/gc_hal_base.h b/unified-tina/inc/HAL/gc_hal_base.h new file mode 100644 index 0000000..bfe7c08 --- /dev/null +++ b/unified-tina/inc/HAL/gc_hal_base.h @@ -0,0 +1,4991 @@ +/**************************************************************************** +* +* Copyright (c) 2005 - 2023 by Vivante Corp. All rights reserved. +* +* The material in this file is confidential and contains trade secrets +* of Vivante Corporation. This is proprietary information owned by +* Vivante Corporation. No part of this work may be disclosed, +* reproduced, copied, transmitted, or used in any way for any purpose, +* without the express written permission of Vivante Corporation. +* +*****************************************************************************/ + + +#ifndef __gc_hal_base_h_ +#define __gc_hal_base_h_ + +#include "gc_hal_enum.h" +#include "gc_hal_types.h" +#include "gc_hal_debug_zones.h" +#include "shared/gc_hal_base_shared.h" + + +#ifdef __QNXNTO__ +# define CHECK_PRINTF_FORMAT(string_index, first_to_check) \ + __attribute__((__format__(__printf__, (string_index), (first_to_check)))) +#else +# define CHECK_PRINTF_FORMAT(string_index, first_to_check) +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/****************************************************************************** + ****************************** Object Declarations *************************** + ******************************************************************************/ + +typedef struct _gckOS *gckOS; +typedef struct _gcoHAL *gcoHAL; +typedef struct _gcoOS *gcoOS; +typedef struct _gco2D *gco2D; +typedef struct gcsATOM *gcsATOM_PTR; + +typedef struct _gco3D *gco3D; +typedef struct _gcoCL *gcoCL; +typedef struct _gcoVX *gcoVX; +typedef struct _gcsFAST_FLUSH *gcsFAST_FLUSH_PTR; + +typedef struct _gcoSURF *gcoSURF; +typedef struct _gcsSURF_NODE *gcsSURF_NODE_PTR; +typedef struct _gcsSURF_FORMAT_INFO *gcsSURF_FORMAT_INFO_PTR; +typedef struct _gcsPOINT *gcsPOINT_PTR; +typedef struct _gcsSIZE *gcsSIZE_PTR; +typedef struct _gcsRECT *gcsRECT_PTR; +typedef struct _gcsBOUNDARY *gcsBOUNDARY_PTR; +typedef struct _gcoHARDWARE *gcoHARDWARE; +typedef struct _gcoDEVICE *gcoDEVICE; +#if gcdENABLE_MULTI_DEVICE_MANAGEMENT +typedef struct _gcsHARDWARE_PROFILE gcsHARDWARE_PROFILE; +#endif +typedef union _gcuVIDMEM_NODE *gcuVIDMEM_NODE_PTR; +typedef struct _gcsVIDMEM_NODE *gckVIDMEM_NODE; +typedef struct _gcsVIDMEM_BLOCK *gckVIDMEM_BLOCK; + +typedef void *gcoVG; + +typedef struct _gcoFENCE *gcoFENCE; +typedef struct _gcsSYNC_CONTEXT *gcsSYNC_CONTEXT_PTR; + +typedef struct _gcsUSER_MEMORY_DESC *gcsUSER_MEMORY_DESC_PTR; + +#if gcdENABLE_CLEAR_FENCE +typedef struct _gcsUSER_FENCE_INFO *gcsUSER_FENCE_INFO_PTR; +#endif + +/* Immuatable features from database */ +typedef struct _gcsNN_FIXED_FEATURE { + gctUINT vipCoreCount; + gctUINT vipRingCount; + gctUINT nnMadPerCore; + gctUINT nnInputBufferDepth; + gctUINT nnAccumBufferDepth; + gctUINT nnFCNonPrunAccel; + gctUINT nnInImageOffsetBits; + gctUINT tpCoreCount; /* full-function core count */ + gctUINT tpPwlLUTCount; + gctUINT tpPwlLUTSize; + gctUINT vip7Version; + gctUINT vipBrickMode; + gctUINT tpReorderInImageSize; + gctUINT tpliteCoreCount; /* fc-only core count */ + gctUINT nnFP16XYDPX; + gctUINT nnFP16XYDPY; + gctUINT nnFP16ZDP; + gctUINT zrlBits; + gctUINT uscCacheControllers; + gctUINT uscBanks; + gctUINT nnLanesPerOutCycle; + gctUINT maxOTNumber; + gctUINT physicalVipSramWidthInByte; + gctUINT equivalentVipsramWidthInByte; + gctUINT shaderCoreCount; + gctUINT latencyHidingAtFullAxiBw; + gctUINT axiBusWidth; + gctUINT nnMaxKXSize; + gctUINT nnMaxKYSize; + gctUINT nnMaxKZSize; + gctUINT nnClusterNumForPowerControl; + gctUINT vipMinAxiBurstSize; + gctUINT nnInLinesPerCycle; + gctUINT nnPreprocessorMaxSegmentPerCycle; + + /* stream processor info */ + gctUINT streamProcessorExecCount; + gctUINT streamProcessorVectorSize; + + /* add related information for check in/out size */ + gctUINT outImageXStrideBits; + gctUINT outImageYStrideBits; + gctUINT inImageXStrideBits; + gctUINT inImageYStrideBits; + gctUINT outImageXSizeBits; + gctUINT outImageYSizeBits; + gctUINT inImageXSizeBits; + gctUINT inImageYSizeBits; + gctUINT smallAccumBits; + gctUINT coefDecompressPerfX; +} gcsNN_FIXED_FEATURE; + +/* Features can be customized from outside */ +typedef struct _gcsNN_CUSTOMIZED_FEATURE { + gctUINT nnActiveCoreCount; + gctUINT nnCoreCount; /* total nn core count */ + gctUINT nnCoreCountInt8; /* total nn core count supporting int8 */ + gctUINT nnCoreCountInt16; /* total nn core count supporting int16 */ + gctUINT nnCoreCountUint16; /* total nn core count supporting uint16 */ + gctUINT nnCoreCountFloat16; /* total nn core count supporting float16 */ + gctUINT nnCoreCountBFloat16; /* total nn core count supporting Bfloat16 */ + gctUINT vipSRAMSize; + gctUINT axiSRAMSize; + gctFLOAT ddrReadBWLimit; + gctFLOAT ddrWriteBWLimit; + gctFLOAT ddrTotalBWLimit; + gctFLOAT axiSramReadBWLimit; + gctFLOAT axiSramWriteBWLimit; + gctFLOAT axiSramTotalBWLimit; + gctFLOAT axiBusReadBWLimit; + gctFLOAT axiBusWriteBWLimit; + gctFLOAT axiBusTotalBWLimit; + gctUINT vipSWTiling; + gctFLOAT ddrLatency; + gctUINT freqInMHZ; + gctUINT axiClockFreqInMHZ; + gctUINT maxSocOTNumber; /*max SOC outstanding transfer number*/ + gctUINT nnWriteWithoutUSC; + gctUINT depthWiseSupport; + gctUINT vipVectorPrune; + gctUINT ddrKernelBurstSize; + gctFLOAT axiSRAMLatency; +} gcsNN_CUSTOMIZED_FEATURE; + +/* Features are unified (hardcoded) for hardwares */ +typedef struct _gcsNN_UNIFIED_FEATURE { + gctUINT nnUSCCacheSize; + gctUINT nnCmdSizeInBytes; + gctUINT tpCmdSizeInBytes; + gctUINT vipCoefDecodePerf; + gctUINT vipCachedReadFromSram; + gctUINT vipImagePartialCache; + gctUINT lanesPerConv; + gctUINT maxTileSize; + gctUINT fullCacheKernelHeadFix : 1; + gctUINT conv1x1HalfPerformance : 1; + gctUINT per3DTileBubbleFix : 1; + gctUINT cacheLineModeDisabled : 1; + gctUINT tpReOrderFix : 1; + gctUINT zdp3NoCompressFix : 1; + gctUINT asyncCopyPerfFix : 1; + gctUINT accurateTileBW : 1; + gctUINT zxdp3KernelReadConflictFix : 1; + gctUINT axiSramSlowedDownByAddr : 1; + gctUINT slowNNReqArbitrationFix : 1; + gctUINT singlePortAccBuffer : 1; + gctUINT convOutFifoDepthFix : 1; + gctUINT smallBatchEnable : 1; + gctUINT axiSramOnlySWTiling : 1; + gctUINT imageNotPackedInSram : 1; + gctUINT coefDeltaCordOverFlowZRL8BitFix : 1; + gctUINT lowEfficiencyOfIDWriteImgBufFix : 1; + gctUINT xyOffsetLimitationFix : 1; + gctUINT kernelPerCoreLTOneThirdCoefFix : 1; + gctUINT diffConditionForCachelineModePreFix : 1; +} gcsNN_UNIFIED_FEATURE; + +/* Features are derived from above ones */ +typedef struct _gcsNN_DERIVIED_FEATURE { + gctUINT nnDPAmount; + gctUINT nnXYDPX; + gctUINT nnXYDPY; + gctUINT nnZDP; + gctFLOAT totalLatency; + gctFLOAT internalLatency; + gctFLOAT ddrReadBWInBytePerCycle; + gctFLOAT ddrWriteBWInBytePerCycle; + gctFLOAT totalAxiSRAMLatency; +} gcsNN_DERIVED_FEATURE; + +/****************************************************************************** + ********************* Share obj lock/unlock macros. ************************** + ******************************************************************************/ +#define gcmLOCK_SHARE_OBJ(Obj) \ + { \ + if (Obj->sharedLock != gcvNULL) { \ + (gcoOS_AcquireMutex(gcvNULL, Obj->sharedLock, gcvINFINITE)); \ + } \ + } + +#define gcmUNLOCK_SHARE_OBJ(Obj) \ + { \ + if (Obj->sharedLock != gcvNULL) { \ + (gcoOS_ReleaseMutex(gcvNULL, Obj->sharedLock)); \ + } \ + } + +typedef struct _gcsSystemInfo { + /* memory latency number for SH data fetch, in SH cycle*/ + gctUINT32 memoryLatencySH; +} gcsSystemInfo; + +#define gcPLS_INITIALIZER \ +{ \ + gcvNULL, /* gcoOS object. */ \ + gcvNULL, /* gcoHAL object. */ \ + 0, /* internalSize */ \ + 0, /* internalPhysName */ \ + gcvNULL, /* internalLogical */ \ + 0, /* externalSize */ \ + 0, /* externalPhysName */ \ + gcvNULL, /* externalLogical */ \ + 0, /* contiguousSize */ \ + 0, /* contiguousPhysName */ \ + gcvNULL, /* contiguousLogical */ \ + gcvNULL, /* eglDisplayInfo */ \ + gcvNULL, /* eglSurfaceInfo */ \ + gcvSURF_A8R8G8B8, /* eglConfigFormat */ \ + gcvNULL, /* reference */ \ + 0, /* processID */ \ + 0, /* threadID */ \ + gcvFALSE, /* exiting */ \ + gcvFALSE, /* Special flag for NP2 texture. */ \ + gcvFALSE, /* device open. */ \ + gcvNULL, /* destructor */ \ + gcvNULL, /* accessLock */ \ + gcvNULL, /* GL FE compiler lock*/ \ + gcvNULL, /* CL FE compiler lock*/ \ + gcvNULL, /* VX context lock */ \ + gcvPATCH_NOTINIT, /* global patchID */ \ + gcvNULL, /* global fenceID*/ \ + gcvNULL, /* mainThreadHandle */ \ + gcvFALSE, /* memory profile flag */ \ + gcvNULL, /* profileLock; */ \ + 0, /* allocCount; */ \ + 0, /* allocSize; */ \ + 0, /* maxAllocSize; */ \ + 0, /* freeCount; */ \ + 0, /* freeSize; */ \ + 0, /* currentSize; */ \ + 0, /* video_allocCount; */ \ + 0, /* video_allocSize; */ \ + 0, /* video_maxAllocSize; */ \ + 0, /* video_freeCount; */ \ + 0, /* video_freeSize; */ \ + 0, /* video_currentSize; */ \ +} + +/****************************************************************************** + ******************************* Thread local storage ************************* + ******************************************************************************/ + +typedef struct _gcsDRIVER_TLS *gcsDRIVER_TLS_PTR; + +typedef struct _gcsDRIVER_TLS { + void (*destructor)(gcsDRIVER_TLS_PTR Tls); +} gcsDRIVER_TLS; + +typedef struct _gcsTLS *gcsTLS_PTR; + +typedef struct _gcsTLS { + gceHARDWARE_TYPE currentType; + gceHARDWARE_TYPE targetType; + + /* To which core device control is called, + * it is index in a hardware type. + */ + gctUINT32 currentCoreIndex; + + /* Current device index of this thread. */ + gctUINT32 currentDevIndex; + + /* Current 3D hardwre of this thread */ + gcoHARDWARE currentHardware; + + /* Default 3D hardware of this thread */ + gcoHARDWARE defaultHardware; + + /* Only for separated 3D and 2D */ + gcoHARDWARE hardware2D; +#if gcdENABLE_3D + gco3D engine3D; +#endif + gcoVX engineVX; + + gctBOOL copied; + + /* libGAL.so handle */ + gctHANDLE handle; + + gctHANDLE graph; + + /* If true, do not releas 2d engine and hardware in hal layer */ + gctBOOL release2DUpper; + + /* Driver tls. */ + gcsDRIVER_TLS_PTR driverTLS[gcvTLS_KEY_COUNT]; + +#if gcdENABLE_SW_PREEMPTION + /* PriorityID. */ + gctUINT priorityID; +#endif +} gcsTLS; + +typedef struct _gcsSURF_VIEW { + gcoSURF surf; + gctUINT firstSlice; + gctUINT numSlices; +} gcsSURF_VIEW; + +/* gcsHAL_Limits*/ +typedef struct _gcsHAL_LIMITS { + /* chip info */ + gceCHIPMODEL chipModel; + gctUINT32 chipRevision; + gctUINT32 featureCount; + gctUINT32 *chipFeatures; + + /* target caps */ + gctUINT32 maxWidth; + gctUINT32 maxHeight; + gctUINT32 multiTargetCount; + gctUINT32 maxSamples; + +} gcsHAL_LIMITS; + +typedef struct _gcsHAL_CHIPIDENTITY { + gceCHIPMODEL chipModel; + gctUINT32 chipRevision; + gctUINT32 productID; + gctUINT32 customerID; + gctUINT32 ecoID; + gceCHIP_FLAG chipFlags; + gctUINT64 platformFlagBits; +} gcsHAL_CHIPIDENTITY; + +/****************************************************************************** + ******************************** gcoHAL Object ******************************* + ******************************************************************************/ + +/* Construct a new gcoHAL object. */ +gceSTATUS +gcoHAL_ConstructEx(IN gctPOINTER Context, IN gcoOS Os, OUT gcoHAL *Hal); + +/* Destroy an gcoHAL object. */ +gceSTATUS +gcoHAL_DestroyEx(IN gcoHAL Hal); + +/* Empty function for compatibility. */ +gceSTATUS +gcoHAL_Construct(IN gctPOINTER Context, IN gcoOS Os, OUT gcoHAL *Hal); + +/* Empty function for compatibility. */ +gceSTATUS +gcoHAL_Destroy(IN gcoHAL Hal); + +/* Get HAL options */ +gceSTATUS +gcoHAL_GetOption(IN gcoHAL Hal, IN gceOPTION Option); + +gceSTATUS +gcoHAL_FrameInfoOps(IN gcoHAL Hal, + IN gceFRAMEINFO FrameInfo, + IN gceFRAMEINFO_OP Op, + IN OUT gctUINT *Val); + +/* Set HAL options */ +gceSTATUS +gcoHAL_SetOption(IN gcoHAL Hal, IN gceOPTION Option, IN gctBOOL Value); + +gceSTATUS +gcoHAL_GetHardware(IN gcoHAL Hal, OUT gcoHARDWARE *Hw); + + +#if gcdENABLE_3D +gceSTATUS +gcoHAL_GetSpecialHintData(IN gcoHAL Hal, OUT gctINT *Hint); +/* + ** Deprecated(Don't use it), keep it here for external library(libgcu.so) + */ +gceSTATUS +gcoHAL_Get3DEngine(IN gcoHAL Hal, OUT gco3D *Engine); +#endif /* gcdENABLE_3D */ + +gceSTATUS +gcoHAL_GetProductName(IN gcoHAL Hal, + OUT gctSTRING *ProductName, + OUT gctUINT *PID ); +gceSTATUS +gcoHAL_GetProductNameWithHardware(IN gcoHARDWARE Hardware, + OUT gctSTRING *ProductName, + OUT gctUINT *PID); + +gceSTATUS +gcoHAL_SetFscaleValue(IN gcoHAL Hal, + IN gctUINT CoreIndex, + IN gctUINT FscaleValue, + IN gctUINT ShaderFscaleValue); + +gceSTATUS +gcoHAL_CancelJob(gcoHAL Hal); + +gceSTATUS +gcoHAL_GetFscaleValue(OUT gctUINT *FscaleValue, + OUT gctUINT *MinFscaleValue, + OUT gctUINT *MaxFscaleValue); + +gceSTATUS +gcoHAL_SetBltNP2Texture(gctBOOL enable); + +gceSTATUS +gcoHAL_ExportVideoMemory(IN gctUINT32 Handle, + IN gctUINT32 Flags, + OUT gctINT32 *FD); + +gceSTATUS +gcoHAL_NameVideoMemory(IN gctUINT32 Handle, OUT gctUINT32 *Name); + +gceSTATUS +gcoHAL_ImportVideoMemory(IN gctUINT32 Name, OUT gctUINT32 *Handle); + +gceSTATUS +gcoHAL_GetVideoMemoryFd(IN gctUINT32 Handle, OUT gctINT *Fd); + +gceSTATUS +gcoHAL_GetExportedVideoMemoryFd(IN gctUINT32 Handle, OUT gctINT *Fd); + +/* Verify whether the specified feature is available in hardware. */ +gceSTATUS +gcoHAL_IsFeatureAvailable(IN gcoHAL Hal, IN gceFEATURE Feature); + +gceSTATUS +gcoHAL_IsFeatureAvailableWithHardware(IN gcoHARDWARE Hardware, IN gceFEATURE Feature); + +gceSTATUS +gcoHAL_IsFeatureAvailable1(IN gcoHAL Hal, IN gceFEATURE Feature); + +/* Query the identity of the hardware. */ +gceSTATUS +gcoHAL_QueryChipIdentity(IN gcoHAL Hal, + OUT gceCHIPMODEL *ChipModel, + OUT gctUINT32 *ChipRevision, + OUT gctUINT32 *ChipFeatures, + OUT gctUINT32 *ChipMinorFeatures); + +gceSTATUS +gcoHAL_QueryChipIdentityWithHardware(IN gcoHARDWARE Hardware, + OUT gceCHIPMODEL *ChipModel, + OUT gctUINT32 *ChipRevision); + +gceSTATUS +gcoHAL_QueryChipIdentityEx(IN gcoHAL Hal, + IN gctUINT32 SizeOfParam, + OUT gcsHAL_CHIPIDENTITY *ChipIdentity); + +gceSTATUS +gcoHAL_QuerySuperTileMode(OUT gctUINT32_PTR SuperTileMode); + +gceSTATUS +gcoHAL_QueryChipAxiBusWidth(OUT gctBOOL *AXI128Bits); + +gceSTATUS +gcoHAL_QueryMultiGPUAffinityConfig(IN gceHARDWARE_TYPE Type, + OUT gceMULTI_PROCESSOR_MODE *Mode, + OUT gctUINT32_PTR CoreIndex); + +gceSTATUS +gcoHAL_QueryHwDeviceIdByEnv(IN gcoHAL Hal, + OUT gctUINT32 *DeviceID, + OUT gctBOOL *HasEnv); + +gceSTATUS +gcoHAL_QuerySRAM(IN gcoHAL Hal, + IN gcePOOL Type, + OUT gctUINT32 *Size, + OUT gctADDRESS *GPUVirtAddr, + OUT gctPHYS_ADDR_T *GPUPhysAddr, + OUT gctUINT32 *GPUPhysName, + OUT gctPHYS_ADDR_T *CPUPhysAddr); + +#ifdef LINUX +gctINT32 +gcoOS_EndRecordAllocation(void); +void +gcoOS_RecordAllocation(void); +void +gcoOS_AddRecordAllocation(gctSIZE_T Size); +#endif + +/* Query the amount of video memory. */ +gceSTATUS +gcoHAL_QueryVideoMemory(IN gcoHAL Hal, + OUT gctUINT32 *InternalPhysName, + OUT gctSIZE_T *InternalSize, + OUT gctUINT32 *ExternalPhysName, + OUT gctSIZE_T *ExternalSize, + OUT gctUINT32 *ContiguousPhysName, + OUT gctSIZE_T *ContiguousSize); + +/* Map video memory. */ +gceSTATUS +gcoHAL_MapMemory(IN gcoHAL Hal, IN gctUINT32 PhysName, + IN gctSIZE_T NumberOfBytes, OUT gctPOINTER *Logical); + +/* Unmap video memory. */ +gceSTATUS +gcoHAL_UnmapMemory(IN gcoHAL Hal, IN gctUINT32 PhysName, + IN gctSIZE_T NumberOfBytes, IN gctPOINTER Logical); + +/* Schedule an unmap of a buffer mapped through its physical address. */ +gceSTATUS +gcoHAL_ScheduleUnmapMemory(IN gcoHAL Hal, IN gctUINT32 PhysName, + IN gctSIZE_T NumberOfBytes, IN gctPOINTER Logical); + +/* Allocate video memory. */ +gceSTATUS +gcoOS_AllocateVideoMemory(IN gcoOS Os, + IN gctBOOL InUserSpace, + IN gctBOOL InCacheable, + IN OUT gctSIZE_T *Bytes, + OUT gctUINT32 *Address, + OUT gctPOINTER *Logical, + OUT gctPOINTER *Handle); + +/* Free video memory. */ +gceSTATUS +gcoOS_FreeVideoMemory(IN gcoOS Os, IN gctPOINTER Handle); + +/* Lock video memory. */ +gceSTATUS +gcoOS_LockVideoMemory(IN gcoOS Os, + IN gctPOINTER Handle, + IN gctBOOL InUserSpace, + IN gctBOOL InCacheable, + OUT gctUINT32 *Address, + OUT gctPOINTER *Logical); + +/* Commit the current command buffer. */ +gceSTATUS +gcoHAL_Commit(IN gcoHAL Hal, IN gctBOOL Stall); + +#if gcdENABLE_3D +/* Sencd fence command. */ +gceSTATUS +gcoHAL_SendFence(IN gcoHAL Hal); + +/* Send fence command for GL_TIME_ELAPSED. */ +gceSTATUS +gcoHAL_TimeQuery_SendFence(IN gcoHAL Hal, IN gctADDRESS physical); +/* Wait fence result for GL_TIME_ELAPSED. */ +gceSTATUS +gcoHAL_TimeQuery_WaitFence(IN gcoHAL Hal, + IN gcsSURF_NODE_PTR node, + IN gctPOINTER nodeHeaderLocked, + IN gctPOINTER logical); +#endif /* gcdENABLE_3D */ + +/* Query the tile capabilities. */ +gceSTATUS +gcoHAL_QueryTiled(IN gcoHAL Hal, + OUT gctINT32 *TileWidth2D, + OUT gctINT32 *TileHeight2D, + OUT gctINT32 *TileWidth3D, + OUT gctINT32 *TileHeight3D); + +gceSTATUS +gcoHAL_Compact(IN gcoHAL Hal); + +#if VIVANTE_PROFILER_SYSTEM_MEMORY +gceSTATUS +gcoHAL_ProfileStart(IN gcoHAL Hal); + +gceSTATUS +gcoHAL_ProfileEnd(IN gcoHAL Hal, IN gctCONST_STRING Title); +#endif + +/* Power Management */ +gceSTATUS +gcoHAL_SetPowerManagementState(IN gcoHAL Hal, + IN gceCHIPPOWERSTATE State); + +gceSTATUS +gcoHAL_QueryPowerManagementState(IN gcoHAL Hal, + OUT gceCHIPPOWERSTATE *State); + +/* Set the filter type for filter blit. */ +gceSTATUS +gcoHAL_SetFilterType(IN gcoHAL Hal, + IN gceFILTER_TYPE FilterType); + +/* Call the kernel HAL layer. */ +gceSTATUS +gcoHAL_Call(IN gcoHAL Hal, + IN OUT gcsHAL_INTERFACE_PTR Interface); + +/* Schedule an event. */ +gceSTATUS +gcoHAL_ScheduleEvent(IN gcoHAL Hal, + IN OUT gcsHAL_INTERFACE_PTR Interface); + +/* Request a start/stop timestamp. */ +gceSTATUS +gcoHAL_SetTimer(IN gcoHAL Hal, IN gctUINT32 Index, IN gctBOOL Start); + +/* Get Time delta from a Timer in microseconds. */ +gceSTATUS +gcoHAL_GetTimerTime(IN gcoHAL Hal, IN gctUINT32 Timer, + OUT gctINT32_PTR TimeDelta); + +/* set timeout value. */ +gceSTATUS +gcoHAL_SetTimeOut(IN gcoHAL Hal, IN gctUINT32 timeOut); + +gceSTATUS +gcoHAL_SetHardwareType(IN gcoHAL Hal, + IN gceHARDWARE_TYPE HardwardType); + +gceSTATUS +gcoHAL_GetHardwareType(IN gcoHAL Hal, + OUT gceHARDWARE_TYPE *HardwardType); + +gceSTATUS +gcoHAL_QueryChipCount(IN gcoHAL Hal, OUT gctINT32 *Count); + +gceSTATUS +gcoHAL_Query3DCoreCount(IN gcoHAL Hal, OUT gctUINT32 *Count); + +gceSTATUS +gcoHAL_Query2DCoreCount(IN gcoHAL Hal, OUT gctUINT32 *Count); + +gceSTATUS +gcoHAL_QueryCluster(IN gcoHAL Hal, + OUT gctINT32 *ClusterMinID, + OUT gctINT32 *ClusterMaxID, + OUT gctUINT32 *ClusterCount, + OUT gctUINT32 *ClusterIDWidth); + +gceSTATUS +gcoHAL_QueryUscAttribCacheRatio(IN gcoHAL Hal, + OUT gctUINT32 *UscAttribCacheRatio); + +gceSTATUS +gcoHAL_QueryCoreCount(IN gcoHAL Hal, + IN gceHARDWARE_TYPE Type, + OUT gctUINT *Count, + OUT gctUINT_PTR ChipIDs); + +gceSTATUS +gcoHAL_QuerySeparated2D(IN gcoHAL Hal); + +gceSTATUS +gcoHAL_QueryHybrid2D(IN gcoHAL Hal); + +gceSTATUS +gcoHAL_Is3DAvailable(IN gcoHAL Hal); + +/* Get pointer to gcoVG object. */ +gceSTATUS +gcoHAL_GetVGEngine(IN gcoHAL Hal, OUT gcoVG *Engine); + +gceSTATUS +gcoHAL_QueryChipLimits(IN gcoHAL Hal, + IN gctINT32 Chip, + OUT gcsHAL_LIMITS *Limits); + +gceSTATUS +gcoHAL_QueryChipFeature(IN gcoHAL Hal, IN gctINT32 Chip, IN gceFEATURE Feature); + +gceSTATUS +gcoHAL_SetDeviceIndex(IN gcoHAL Hal, IN gctUINT32 DeviceIndex); + +gceSTATUS +gcoHAL_GetCurrentDeviceIndex(IN gcoHAL Hal, OUT gctUINT32 *DeviceIndex); + +gceSTATUS +gcoHAL_SetCoreIndex(IN gcoHAL Hal, IN gctUINT32 Core); + +gceSTATUS +gcoHAL_GetCurrentCoreIndex(IN gcoHAL Hal, OUT gctUINT32 *Core); + +gceSTATUS +gcoHAL_InitCoreIndexByType(IN gcoHAL Hal, + IN gceHARDWARE_TYPE Type, + IN gctBOOL Init, + OUT gctUINT32 *CoreIndex); + +gceSTATUS +gcoHAL_ConvertCoreIndexGlobal(IN gcoHAL Hal, + IN gceHARDWARE_TYPE Type, + IN gctUINT32 CoreCount, + IN gctUINT32 *LocalCoreIndexs, + OUT gctUINT32 *GlobalCoreIndexs); + +gceSTATUS +gcoHAL_ConvertCoreIndexLocal(IN gcoHAL Hal, + IN gceHARDWARE_TYPE Type, + IN gctUINT32 CoreCount, + IN gctUINT32 *GlobalCoreIndexs, + OUT gctUINT32 *LocalCoreIndexs); + +gceSTATUS +gcoHAL_SelectChannel(IN gcoHAL Hal, IN gctBOOL Priority, IN gctUINT32 ChannelId); + +gceSTATUS +gcoHAL_MCFESemaphore(IN gctUINT32 SemaHandle, IN gctBOOL SendSema); + +gceSTATUS +gcoHAL_AllocateMCFESemaphore(OUT gctUINT32 *SemaHandle); + +gceSTATUS +gcoHAL_FreeMCFESemaphore(IN gctUINT32 SemaHandle); + +/*----------------------------------------------------------------------------*/ +/*----- Shared Buffer --------------------------------------------------------*/ + +/* Create shared buffer. */ +gceSTATUS +gcoHAL_CreateShBuffer(IN gctUINT32 Size, OUT gctSHBUF *ShBuf); + +/* Destroy shared buffer. */ +gceSTATUS +gcoHAL_DestroyShBuffer(IN gctSHBUF ShBuf); + +/* Map shared buffer to current process. */ +gceSTATUS +gcoHAL_MapShBuffer(IN gctSHBUF ShBuf); + +/* Write user data to shared buffer. */ +gceSTATUS +gcoHAL_WriteShBuffer(IN gctSHBUF ShBuf, IN gctCONST_POINTER Data, IN gctUINT32 ByteCount); + +/* Read user data from shared buffer. */ +gceSTATUS +gcoHAL_ReadShBuffer(IN gctSHBUF ShBuf, + IN gctPOINTER Data, + IN gctUINT32 BytesCount, + OUT gctUINT32 *BytesRead); + +/* Config power management to be enabled or disabled. */ +gceSTATUS +gcoHAL_ConfigPowerManagement(IN gctBOOL Enable, OUT gctBOOL *OldValue); + +gceSTATUS +gcoHAL_AllocateVideoMemory(IN gctUINT Alignment, + IN gceVIDMEM_TYPE Type, + IN gctUINT32 Flag, + IN OUT gcePOOL *Pool, + IN OUT gctSIZE_T *Bytes, + OUT gctUINT32_PTR Node); + +gceSTATUS +gcoHAL_LockVideoMemory(IN gctUINT32 Node, + IN gctBOOL Cacheable, + IN gceENGINE engine, + OUT gctADDRESS *Address, + OUT gctPOINTER *Logical); + +gceSTATUS +gcoHAL_LockVideoMemoryEx(IN gctUINT32 Node, + IN gctBOOL Cacheable, + IN gceENGINE engine, + IN gceLOCK_VIDEO_MEMORY_OP Op, + OUT gctADDRESS *Address, + OUT gctPOINTER *Logical); + +gceSTATUS +gcoHAL_UnlockVideoMemory(IN gctUINT32 Node, IN gceVIDMEM_TYPE Type, IN gceENGINE engine); + +gceSTATUS +gcoHAL_UnlockVideoMemoryEX(IN gctUINT32 Node, + IN gceVIDMEM_TYPE Type, + IN gceENGINE Engine, + IN gctBOOL Sync, + IN gceLOCK_VIDEO_MEMORY_OP Op); + +gceSTATUS +gcoHAL_ReleaseVideoMemory(IN gctUINT32 Node); + +#if gcdENABLE_3D +/* Query the target capabilities. */ +gceSTATUS +gcoHAL_QueryTargetCaps(IN gcoHAL Hal, + OUT gctUINT *MaxWidth, + OUT gctUINT *MaxHeight, + OUT gctUINT *MultiTargetCount, + OUT gctUINT *MaxSamples); +#endif + +gceSTATUS +gcoHAL_PrepareVideoMemory(IN gctUINT32 Node); + +gceSTATUS +gcoHAL_FinishVideoMemory(IN gctUINT32 Node); + +gceSTATUS +gcoHAL_WrapUserMemory(IN gcsUSER_MEMORY_DESC_PTR UserMemoryDesc, + IN gceVIDMEM_TYPE Type, + OUT gctUINT32_PTR Node); + +gceSTATUS +gcoHAL_QueryResetTimeStamp(OUT gctUINT64_PTR ResetTimeStamp, + OUT gctUINT64_PTR ContextID); + +gceSTATUS +gcoHAL_WaitFence(IN gctUINT32 Handle, IN gctUINT32 TimeOut); + +gceSTATUS +gcoHAL_ScheduleSignal(IN gctSIGNAL Signal, + IN gctSIGNAL AuxSignal, + IN gctINT ProcessID, + IN gceKERNEL_WHERE FromWhere); + +gceSTATUS +gcoHAL_GetGraphicBufferFd(IN gctUINT32 Node[3], + IN gctSHBUF ShBuf, + IN gctSIGNAL Signal, + OUT gctINT32 *Fd); + +gceSTATUS +gcoHAL_AlignToTile(IN OUT gctUINT32 *Width, + IN OUT gctUINT32 *Height, + IN gceSURF_TYPE Type, + IN gceSURF_FORMAT Format); + +gceSTATUS +gcoHAL_GetLastCommitStatus(IN gcoHAL Hal, OUT gctBOOL *Pending); + +gceSTATUS +gcoHAL_SetLastCommitStatus(IN gcoHAL Hal, IN gctBOOL Pending); + +gceSTATUS +gcoHAL_CommitDone(IN gcoHAL Hal); + +gceSTATUS +gcoHAL_IsFlatMapped(IN gctPHYS_ADDR_T PhysicalAddress, + OUT gctADDRESS *Address); + +gceSTATUS +gcoHAL_QueryMCFESemaphoreCapacity(IN gcoHAL Hal, + OUT gctUINT32 *Capacity); + +#if gcdENABLE_MP_SWITCH +gceSTATUS +gcoHAL_SwitchMpMode(gcoHAL Hal); +#endif + +gceSTATUS +gcoHAL_CommandBufferAutoCommit(gcoHAL Hal, gctBOOL AutoCommit); + +gceSTATUS +gcoHAL_CommandBufferAutoSync(gcoHAL Hal, gctBOOL AutoSync); + +#if gcdENABLE_MULTI_DEVICE_MANAGEMENT +gceSTATUS +gcoHAL_SwitchContext(IN gcoHAL Hal, + IN gcoHARDWARE Hardware, + OUT gcoHARDWARE *SavedHardware, + OUT gceHARDWARE_TYPE *SavedType, + OUT gctUINT32 *SavedHwDeviceIndex, + OUT gctUINT32 *SavedCoreIndex); + +gceSTATUS +gcoHAL_RestoreContext(IN gcoHAL Hal, + IN gcoHARDWARE Hardware, + IN gceHARDWARE_TYPE Type, + IN gctUINT32 HwDeviceIndex, + IN gctUINT32 CoreIndex); + +gceSTATUS +gcoHAL_ShowDeviceInfo(gcoHAL Hal); +#endif + +/****************************************************************************** + ********************************** gcoOS Object ****************************** + ******************************************************************************/ +/* Lock PLS access */ +gceSTATUS +gcoOS_LockPLS(void); + +/* Unlock PLS access */ +gceSTATUS +gcoOS_UnLockPLS(void); + +/* Get PLS value for given key */ +gctPOINTER +gcoOS_GetPLSValue(IN gcePLS_VALUE key); + +/* Set PLS value of a given key */ +void +gcoOS_SetPLSValue(IN gcePLS_VALUE key, OUT gctPOINTER value); + +/* Lock GL FE compiler access */ +gceSTATUS +gcoOS_LockGLFECompiler(void); + +/* Unlock GL FE compiler access */ +gceSTATUS +gcoOS_UnLockGLFECompiler(void); + +/* Lock CL FE compiler access */ +gceSTATUS +gcoOS_LockCLFECompiler(void); + +/* Unlock CL FE compiler access */ +gceSTATUS +gcoOS_UnLockCLFECompiler(void); + +gceSTATUS +gcoOS_GetTLS(OUT gcsTLS_PTR *TLS); + +/* Copy the TLS from a source thread. */ +gceSTATUS +gcoOS_CopyTLS(IN gcsTLS_PTR Source); + +/* Query the thread local storage. */ +gceSTATUS +gcoOS_QueryTLS(OUT gcsTLS_PTR *TLS); + +/* Get access to driver tls. */ +gceSTATUS +gcoOS_GetDriverTLS(IN gceTLS_KEY Key, + OUT gcsDRIVER_TLS_PTR *TLS); + +/* + * Set driver tls. + * May cause memory leak if 'destructor' not set. + */ +gceSTATUS +gcoOS_SetDriverTLS(IN gceTLS_KEY Key, IN gcsDRIVER_TLS *TLS); + +/* Destroy the objects associated with the current thread. */ +void +gcoOS_FreeThreadData(void); + +/* Empty function for compatibility. */ +gceSTATUS +gcoOS_Construct(IN gctPOINTER Context, OUT gcoOS *Os); + +/* Empty function for compatibility. */ +gceSTATUS +gcoOS_Destroy(IN gcoOS Os); + +/* Deprecated API: please use gcoHAL_GetBaseAddr() instead. + ** This API was kept only for legacy BSP usage. + ** + ** Get the base address for the physical memory. + */ +gceSTATUS +gcoOS_GetBaseAddress(IN gcoOS Os, OUT gctUINT32_PTR BaseAddress); + +/* Allocate memory from the heap. */ +gceSTATUS +gcoOS_Allocate(IN gcoOS Os, IN gctSIZE_T Bytes, + OUT gctPOINTER *Memory); + +gceSTATUS +gcoOS_Realloc(IN gcoOS Os, + IN gctSIZE_T Bytes, + IN gctSIZE_T OrgBytes, + OUT gctPOINTER *Memory); + +/* Get allocated memory size. */ +gceSTATUS +gcoOS_GetMemorySize(IN gcoOS Os, IN gctPOINTER Memory, + OUT gctSIZE_T_PTR MemorySize); + +/* Free allocated memory. */ +gceSTATUS +gcoOS_Free(IN gcoOS Os, IN gctPOINTER Memory); + +/* Allocate memory. */ +gceSTATUS +gcoOS_AllocateSharedMemory(IN gcoOS Os, IN gctSIZE_T Bytes, + OUT gctPOINTER *Memory); + +/* Free memory. */ +gceSTATUS +gcoOS_FreeSharedMemory(IN gcoOS Os, IN gctPOINTER Memory); + +/* Allocate memory. */ +gceSTATUS +gcoOS_AllocateMemory(IN gcoOS Os, IN gctSIZE_T Bytes, + OUT gctPOINTER *Memory); + +/* Realloc memory. */ +gceSTATUS +gcoOS_ReallocMemory(IN gcoOS Os, IN gctSIZE_T Bytes, + IN gctSIZE_T OrgBytes, OUT gctPOINTER *Memory); + +/* Free memory. */ +gceSTATUS +gcoOS_FreeMemory(IN gcoOS Os, IN gctPOINTER Memory); + +/* Device I/O Control call to the kernel HAL layer. */ +gceSTATUS +gcoOS_DeviceControl(IN gcoOS Os, + IN gctUINT32 IoControlCode, + IN gctPOINTER InputBuffer, + IN gctSIZE_T InputBufferSize, + IN gctPOINTER OutputBuffer, + IN gctSIZE_T OutputBufferSize); + +#define gcmOS_SAFE_FREE(os, mem) \ + gcoOS_Free(os, mem); \ + mem = gcvNULL + +#define gcmOS_SAFE_FREE_SHARED_MEMORY(os, mem) \ + gcoOS_FreeSharedMemory(os, mem); \ + mem = gcvNULL + +#define gcmkOS_SAFE_FREE(os, mem) \ + gckOS_Free(os, mem); \ + mem = gcvNULL + +#define gcdMAX_PATH 512 + +#define gcdMAX_ARGUMENT_SIZE 1024 +#define gcdMAX_ARGUMENT_COUNT 64 + +/* Open a file. */ +gceSTATUS +gcoOS_Open(IN gcoOS Os, + IN gctCONST_STRING FileName, + IN gceFILE_MODE Mode, + OUT gctFILE *File); + +/* Close a file. */ +gceSTATUS +gcoOS_Close(IN gcoOS Os, IN gctFILE File); + +/* Remove a file. */ +gceSTATUS +gcoOS_Remove(IN gcoOS Os, IN gctCONST_STRING FileName); + +/* Read data from a file. */ +gceSTATUS +gcoOS_Read(IN gcoOS Os, + IN gctFILE File, + IN gctSIZE_T ByteCount, + IN gctPOINTER Data, + OUT gctSIZE_T *ByteRead); + +/* Write data to a file. */ +gceSTATUS +gcoOS_Write(IN gcoOS Os, + IN gctFILE File, + IN gctSIZE_T ByteCount, + IN gctCONST_POINTER Data); + +/* Flush data to a file. */ +gceSTATUS +gcoOS_Flush(IN gcoOS Os, IN gctFILE File); + +/* Close a file descriptor. */ +gceSTATUS +gcoOS_CloseFD(IN gcoOS Os, IN gctINT FD); + +/* Scan a file. */ +gceSTATUS +gcoOS_FscanfI(IN gcoOS Os, + IN gctFILE File, + IN gctCONST_STRING Format, + OUT gctUINT *result); + +/* Dup file descriptor to another. */ +gceSTATUS +gcoOS_DupFD(IN gcoOS Os, + IN gctINT FD, + OUT gctINT *FD2); + +/* Lock a file. */ +gceSTATUS +gcoOS_LockFile(IN gcoOS Os, + IN gctFILE File, + IN gctBOOL Shared, + IN gctBOOL Block); + +/* Unlock a file. */ +gceSTATUS +gcoOS_UnlockFile(IN gcoOS Os, IN gctFILE File); + +/* Create an endpoint for communication. */ +gceSTATUS +gcoOS_Socket(IN gcoOS Os, + IN gctINT Domain, + IN gctINT Type, + IN gctINT Protocol, + OUT gctINT *SockFd); + +/* Close a socket. */ +gceSTATUS +gcoOS_CloseSocket(IN gcoOS Os, IN gctINT SockFd); + +/* Initiate a connection on a socket. */ +gceSTATUS +gcoOS_Connect(IN gcoOS Os, + IN gctINT SockFd, + IN gctCONST_POINTER HostName, + IN gctUINT Port); + +/* Shut down part of connection on a socket. */ +gceSTATUS +gcoOS_Shutdown(IN gcoOS Os, IN gctINT SockFd, IN gctINT How); + +/* Send a message on a socket. */ +gceSTATUS +gcoOS_Send(IN gcoOS Os, + IN gctINT SockFd, + IN gctSIZE_T ByteCount, + IN gctCONST_POINTER Data, + IN gctINT Flags); + +/* Initiate a connection on a socket. */ +gceSTATUS +gcoOS_WaitForSend(IN gcoOS Os, IN gctINT SockFd, + IN gctINT Seconds, IN gctINT MicroSeconds); + +/* Get environment variable value. */ +gceSTATUS +gcoOS_GetEnv(IN gcoOS Os, IN gctCONST_STRING VarName, OUT gctSTRING *Value); + +/* Set environment variable value. */ +gceSTATUS +gcoOS_SetEnv(IN gcoOS Os, IN gctCONST_STRING VarName, IN gctSTRING Value); + +/* Get current working directory. */ +gceSTATUS +gcoOS_GetCwd(IN gcoOS Os, IN gctINT SizeInBytes, OUT gctSTRING Buffer); + +/* Get file status info. */ +gceSTATUS +gcoOS_Stat(IN gcoOS Os, IN gctCONST_STRING FileName, OUT gctPOINTER Buffer); + +/* Set the current position of a file. */ +gceSTATUS +gcoOS_Seek(IN gcoOS Os, IN gctFILE File, IN gctUINT32 Offset, IN gceFILE_WHENCE Whence); + +/* Set the current position of a file. */ +gceSTATUS +gcoOS_SetPos(IN gcoOS Os, IN gctFILE File, IN gctUINT32 Position); + +/* Get the current position of a file. */ +gceSTATUS +gcoOS_GetPos(IN gcoOS Os, IN gctFILE File, OUT gctUINT32 *Position); + +/* Same as strstr. */ +gceSTATUS +gcoOS_StrStr(IN gctCONST_STRING String, + IN gctCONST_STRING SubString, + OUT gctSTRING *Output); + +/* Find the last occurrence of a character inside a string. */ +gceSTATUS +gcoOS_StrFindReverse(IN gctCONST_STRING String, + IN gctINT8 Character, + OUT gctSTRING *Output); + +gceSTATUS +gcoOS_StrDup(IN gcoOS Os, IN gctCONST_STRING String, OUT gctSTRING *Target); + +/* Copy a string. */ +gceSTATUS +gcoOS_StrCopySafe(IN gctSTRING Destination, + IN gctSIZE_T DestinationSize, + IN gctCONST_STRING Source); + +/* Append a string. */ +gceSTATUS +gcoOS_StrCatSafe(IN gctSTRING Destination, + IN gctSIZE_T DestinationSize, + IN gctCONST_STRING Source); + +/* Compare two strings. */ +gceSTATUS +gcoOS_StrCmp(IN gctCONST_STRING String1, IN gctCONST_STRING String2); + +/* Compare characters of two strings. */ +gceSTATUS +gcoOS_StrNCmp(IN gctCONST_STRING String1, + IN gctCONST_STRING String2, + IN gctSIZE_T Count); + +/* Convert string to float. */ +gceSTATUS +gcoOS_StrToFloat(IN gctCONST_STRING String, OUT gctFLOAT *Float); + +/* Convert string to double. */ +gceSTATUS +gcoOS_StrToDouble(IN gctCONST_STRING String, OUT gctDOUBLE* Double); + +/* Convert hex string to integer. */ +gceSTATUS +gcoOS_HexStrToInt(IN gctCONST_STRING String, OUT gctINT *Int); + +/* Convert hex string to float. */ +gceSTATUS +gcoOS_HexStrToFloat(IN gctCONST_STRING String, OUT gctFLOAT *Float); + +/* Convert string to integer. */ +gceSTATUS +gcoOS_StrToInt(IN gctCONST_STRING String, OUT gctINT *Int); + +gceSTATUS +gcoOS_MemCmp(IN gctCONST_POINTER Memory1, + IN gctCONST_POINTER Memory2, + IN gctSIZE_T Bytes); + +gceSTATUS +gcoOS_PrintStrSafe(OUT gctSTRING String, + IN gctSIZE_T StringSize, + IN OUT gctUINT *Offset, + IN gctCONST_STRING Format, + ...) +CHECK_PRINTF_FORMAT(4, 5); + +gceSTATUS +gcoOS_LoadLibrary(IN gcoOS Os, IN gctCONST_STRING Library, OUT gctHANDLE *Handle); + +gceSTATUS +gcoOS_FreeLibrary(IN gcoOS Os, IN gctHANDLE Handle); + +gceSTATUS +gcoOS_GetProcAddress(IN gcoOS Os, + IN gctHANDLE Handle, + IN gctCONST_STRING Name, + OUT gctPOINTER *Function); + +gceSTATUS +gcoOS_Compact(IN gcoOS Os); + +gceSTATUS +gcoOS_AddSignalHandler(IN gceSignalHandlerType SignalHandlerType); + +#if VIVANTE_PROFILER_SYSTEM_MEMORY +gceSTATUS +gcoOS_ProfileStart(IN gcoOS Os); + +gceSTATUS +gcoOS_ProfileEnd(IN gcoOS Os, IN gctCONST_STRING Title); + +gceSTATUS +gcoOS_SetProfileSetting(IN gcoOS Os, + IN gctBOOL Enable, + IN gceProfilerMode ProfileMode, + IN gctCONST_STRING FileName); +#endif + +/* Get the amount of physical system memory */ +gceSTATUS +gcoOS_GetPhysicalSystemMemorySize(OUT gctUINT64 *PhysicalSystemMemorySize); + +/* Query the video memory. */ +gceSTATUS +gcoOS_QueryVideoMemory(IN gcoOS Os, + OUT gctUINT32 *InternalPhysName, + OUT gctSIZE_T *InternalSize, + OUT gctUINT32 *ExternalPhysName, + OUT gctSIZE_T *ExternalSize, + OUT gctUINT32 *ContiguousPhysName, + OUT gctSIZE_T *ContiguousSize); + +gceSTATUS +gcoOS_QueryCurrentProcessName(OUT gctSTRING Name, IN gctSIZE_T Size); + +gceSTATUS +gcoOS_QueryCurrentProcessArguments(OUT gctCHAR Argv[gcdMAX_ARGUMENT_COUNT][gcdMAX_ARGUMENT_SIZE], + OUT gctUINT32 *Argc, + IN gctUINT32 MaxArgc, + IN gctUINT32 MaxSizePerArg); + +/*----------------------------------------------------------------------------*/ +/*----- Atoms ----------------------------------------------------------------*/ + +/* Construct an atom. */ +gceSTATUS +gcoOS_AtomConstruct(IN gcoOS Os, OUT gcsATOM_PTR *Atom); + +/* Destroy an atom. */ +gceSTATUS +gcoOS_AtomDestroy(IN gcoOS Os, IN gcsATOM_PTR Atom); + +/* Get the 32-bit value protected by an atom. */ +gceSTATUS +gcoOS_AtomGet(IN gcoOS Os, IN gcsATOM_PTR Atom, OUT gctINT32_PTR Value); + +/* Set the 32-bit value protected by an atom. */ +gceSTATUS +gcoOS_AtomSet(IN gcoOS Os, IN gcsATOM_PTR Atom, IN gctINT32 Value); + +/* Increment an atom. */ +gceSTATUS +gcoOS_AtomIncrement(IN gcoOS Os, IN gcsATOM_PTR Atom, OUT gctINT32_PTR OldValue); + +/* Decrement an atom. */ +gceSTATUS +gcoOS_AtomDecrement(IN gcoOS Os, IN gcsATOM_PTR Atom, OUT gctINT32_PTR OldValue); + +gctHANDLE +gcoOS_GetCurrentProcessID(void); + +gctHANDLE +gcoOS_GetCurrentThreadID(void); + +/*----------------------------------------------------------------------------*/ +/*----- Time -----------------------------------------------------------------*/ + +/* Get the number of milliseconds since the system started. */ +gctUINT32 +gcoOS_GetTicks(void); + +/* Get time in microseconds. */ +gceSTATUS +gcoOS_GetTime(gctUINT64_PTR Time); + +/* Get CPU usage in microseconds. */ +gceSTATUS +gcoOS_GetCPUTime(gctUINT64_PTR CPUTime); + +/* Get memory usage. */ +gceSTATUS +gcoOS_GetMemoryUsage(gctUINT32_PTR MaxRSS, + gctUINT32_PTR IxRSS, + gctUINT32_PTR IdRSS, + gctUINT32_PTR IsRSS); + +/* Delay a number of milliseconds. */ +gceSTATUS +gcoOS_Delay(IN gcoOS Os, IN gctUINT32 Delay); + + +/* Delay a number of microseconds. */ +gceSTATUS +gcoOS_DelayUs(IN gcoOS Os, IN gctUINT32 Delay); + +/*----------------------------------------------------------------------------*/ +/*----- Threads --------------------------------------------------------------*/ + +#ifdef _WIN32 +/* Cannot include windows.h here because "near" and "far" + * which are used in gcsDEPTH_INFO, are defined to nothing in WinDef.h. + * So, use the real value of DWORD and WINAPI, instead. + * DWORD is unsigned long, and WINAPI is __stdcall. + * If these two are change in WinDef.h, the following two typdefs + * need to be changed, too. + */ +typedef unsigned long gctTHREAD_RETURN; +typedef unsigned long(__stdcall *gcTHREAD_ROUTINE)(void *Argument); +#else +typedef void *gctTHREAD_RETURN; +typedef void *(*gcTHREAD_ROUTINE)(void *); +#endif + +/* Create a new thread. */ +gceSTATUS +gcoOS_CreateThread(IN gcoOS Os, + IN gcTHREAD_ROUTINE Worker, + IN gctPOINTER Argument, + OUT gctPOINTER *Thread); + +/* Close a thread. */ +gceSTATUS +gcoOS_CloseThread(IN gcoOS Os, IN gctPOINTER Thread); + +/*----------------------------------------------------------------------------*/ +/*----- Mutexes --------------------------------------------------------------*/ + +/* Create a new mutex. */ +gceSTATUS +gcoOS_CreateMutex(IN gcoOS Os, OUT gctPOINTER *Mutex); + +/* Delete a mutex. */ +gceSTATUS +gcoOS_DeleteMutex(IN gcoOS Os, IN gctPOINTER Mutex); + +/* Acquire a mutex. */ +gceSTATUS +gcoOS_AcquireMutex(IN gcoOS Os, IN gctPOINTER Mutex, IN gctUINT32 Timeout); + +/* Release a mutex. */ +gceSTATUS +gcoOS_ReleaseMutex(IN gcoOS Os, IN gctPOINTER Mutex); + +/*----------------------------------------------------------------------------*/ +/*----- Signals --------------------------------------------------------------*/ + +/* Create a signal. */ +gceSTATUS +gcoOS_CreateSignal(IN gcoOS Os, IN gctBOOL ManualReset, OUT gctSIGNAL *Signal); + +/* Destroy a signal. */ +gceSTATUS +gcoOS_DestroySignal(IN gcoOS Os, IN gctSIGNAL Signal); + +/* Signal a signal. */ +gceSTATUS +gcoOS_Signal(IN gcoOS Os, IN gctSIGNAL Signal, IN gctBOOL State); + +/* Wait for a signal. */ +gceSTATUS +gcoOS_WaitSignal(IN gcoOS Os, IN gctSIGNAL Signal, IN gctUINT32 Wait); + +/* Map a signal from another process */ +gceSTATUS +gcoOS_MapSignal(IN gctSIGNAL RemoteSignal, OUT gctSIGNAL *LocalSignal); + +/* Unmap a signal mapped from another process */ +gceSTATUS +gcoOS_UnmapSignal(IN gctSIGNAL Signal); + +/*----------------------------------------------------------------------------*/ +/*----- Android Native Fence -------------------------------------------------*/ + +/* Create native fence. */ +gceSTATUS +gcoOS_CreateNativeFence(IN gcoOS Os, IN gctSIGNAL Signal, OUT gctINT *FenceFD); + +/* (CPU) Wait on native fence. */ +gceSTATUS +gcoOS_ClientWaitNativeFence(IN gcoOS Os, IN gctINT FenceFD, IN gctUINT32 Timeout); + +/* (GPU) Wait on native fence. */ +gceSTATUS +gcoOS_WaitNativeFence(IN gcoOS Os, IN gctINT FenceFD, IN gctUINT32 Timeout); + +/*----------------------------------------------------------------------------*/ +/*----- Memory Access and Cache ----------------------------------------------*/ + +/* Write a register. */ +gceSTATUS +gcoOS_WriteRegister(IN gcoOS Os, IN gctUINT32 Address, IN gctUINT32 Data); + +/* Read a register. */ +gceSTATUS +gcoOS_ReadRegister(IN gcoOS Os, IN gctUINT32 Address, OUT gctUINT32 *Data); + +gceSTATUS +gcoOS_CacheClean(IN gcoOS Os, IN gctUINT32 Node, + IN gctPOINTER Logical, IN gctSIZE_T Bytes); + +gceSTATUS +gcoOS_CacheFlush(IN gcoOS Os, IN gctUINT32 Node, + IN gctPOINTER Logical, IN gctSIZE_T Bytes); + +gceSTATUS +gcoOS_CacheInvalidate(IN gcoOS Os, IN gctUINT32 Node, + IN gctPOINTER Logical, IN gctSIZE_T Bytes); + +gceSTATUS +gcoOS_CacheCleanEx(IN gcoOS Os, IN gctUINT32 Node, + IN gctPOINTER Logical, IN gctSIZE_T Offset, IN gctSIZE_T Bytes); + +gceSTATUS +gcoOS_CacheFlushEx(IN gcoOS Os, IN gctUINT32 Node, + IN gctPOINTER Logical, IN gctSIZE_T Offset, IN gctSIZE_T Bytes); + +gceSTATUS +gcoOS_CacheInvalidateEx(IN gcoOS Os, IN gctUINT32 Node, + IN gctPOINTER Logical, IN gctSIZE_T Offset, IN gctSIZE_T Bytes); + + +gceSTATUS +gcoOS_MemoryBarrier(IN gcoOS Os, IN gctPOINTER Logical); + +gceSTATUS +gcoOS_CPUPhysicalToGPUPhysical(IN gctPHYS_ADDR_T CPUPhysical, + OUT gctPHYS_ADDR_T *GPUPhysical); + +gceSTATUS +gcoHAL_QueryCPUFrequency(IN gctUINT32 CPUId, OUT gctUINT32_PTR CPUFrequency); + +gceSTATUS +gcoOS_QuerySystemInfo(IN gcoOS Os, OUT gcsSystemInfo *Info); + + +/*----------------------------------------------------------------------------*/ +/*----- Profile --------------------------------------------------------------*/ + +gceSTATUS +gckOS_GetProfileTick(OUT gctUINT64_PTR Tick); + +gceSTATUS +gckOS_QueryProfileTickRate(OUT gctUINT64_PTR TickRate); + +gctUINT32 +gckOS_ProfileToMS(IN gctUINT64 Ticks); + +gceSTATUS +gcoOS_GetProfileTick(OUT gctUINT64_PTR Tick); + +gceSTATUS +gcoOS_QueryProfileTickRate(OUT gctUINT64_PTR TickRate); + +#if gcdSTATIC_LINK +void gcoOS_ModuleConstructor(void); + +void gcoOS_ModuleDestructor(void); +#endif + +#define _gcmPROFILE_INIT(prefix, freq, start) \ + do { \ + prefix##OS_QueryProfileTickRate(&(freq)); \ + prefix##OS_GetProfileTick(&(start)); \ + } while (gcvFALSE) + +#define _gcmPROFILE_QUERY(prefix, start, ticks) \ + do { \ + prefix##OS_GetProfileTick(&(ticks)); \ + (ticks) = ((ticks) > (start)) ? ((ticks) - (start)) \ + : (~0ull - (start) + (ticks) + 1); \ + } while (gcvFALSE) + +#if gcdENABLE_PROFILING +# define gcmkPROFILE_INIT(freq, start) _gcmPROFILE_INIT(gck, freq, start) +# define gcmkPROFILE_QUERY(start, ticks) _gcmPROFILE_QUERY(gck, start, ticks) +# define gcmPROFILE_INIT(freq, start) _gcmPROFILE_INIT(gco, freq, start) +# define gcmPROFILE_QUERY(start, ticks) _gcmPROFILE_QUERY(gco, start, ticks) +# define gcmPROFILE_ONLY(x) x +# define gcmPROFILE_ELSE(x) do { } while (gcvFALSE) +# define gcmPROFILE_DECLARE_ONLY(x) x +#else +# define gcmkPROFILE_INIT(start, freq) do { } while (gcvFALSE) +# define gcmkPROFILE_QUERY(start, ticks) do { } while (gcvFALSE) +# define gcmPROFILE_INIT(start, freq) do { } while (gcvFALSE) +# define gcmPROFILE_QUERY(start, ticks) do { } while (gcvFALSE) +# define gcmPROFILE_ONLY(x) do { } while (gcvFALSE) +# define gcmPROFILE_ELSE(x) x +# define gcmPROFILE_DECLARE_ONLY(x) do { } while (gcvFALSE) +#endif + +/******************************************************************************* + ** gcoMATH object + */ + +#define gcdPI 3.14159265358979323846f + +/* Kernel. */ +gctINT +gckMATH_ModuloInt(IN gctINT X, IN gctINT Y); + +/* User. */ +gctUINT32 +gcoMATH_Log2in5dot5(IN gctINT X); + +gctFLOAT +gcoMATH_UIntAsFloat(IN gctUINT32 X); + +gctUINT32 +gcoMATH_FloatAsUInt(IN gctFLOAT X); + +gctBOOL +gcoMATH_CompareEqualF(IN gctFLOAT X, IN gctFLOAT Y); + +gctUINT16 +gcoMATH_UInt8AsFloat16(IN gctUINT8 X); + +gctUINT32 +gcoMATH_Float16ToFloat(IN gctUINT16 In); + +gctUINT16 +gcoMATH_FloatToFloat16(IN gctUINT32 In); + +gctUINT32 +gcoMATH_Float11ToFloat(IN gctUINT32 In); + +gctUINT16 +gcoMATH_FloatToFloat11(IN gctUINT32 In); + +gctUINT32 +gcoMATH_Float10ToFloat(IN gctUINT32 In); + +gctUINT16 +gcoMATH_FloatToFloat10(IN gctUINT32 In); + +gctUINT32 +gcoMATH_Float14ToFloat(IN gctUINT16 In); + +/****************************************************************************** + **************************** Coordinate Structures *************************** + ******************************************************************************/ + +typedef struct _gcsPOINT { + gctINT32 x; + gctINT32 y; +} gcsPOINT; + +typedef struct _gcsSIZE { + gctINT32 width; + gctINT32 height; +} gcsSIZE; + +typedef struct _gcsRECT { + gctINT32 left; + gctINT32 top; + gctINT32 right; + gctINT32 bottom; +} gcsRECT; + +typedef struct _gcs2D_RGBU32 +{ + gctUINT32 R; + gctUINT32 G; + gctUINT32 B; +} gcs2D_RGBU32; + +typedef struct _gcsPIXEL { + union { + struct { + gctFLOAT r, g, b, a; + } f; + struct { + gctINT32 r, g, b, a; + } i; + struct { + gctUINT32 r, g, b, a; + } ui; + } color; + + gctFLOAT d; + gctUINT32 s; + +} gcsPIXEL; + +/****************************************************************************** + ******************************** gcoSURF Object ****************************** + ******************************************************************************/ + +/*----------------------------------------------------------------------------*/ +/*------------------------------- gcoSURF Common ------------------------------*/ + +/* Color format component parameters. */ +typedef struct _gcsFORMAT_COMPONENT { + gctUINT8 start; + gctUINT8 width; +} gcsFORMAT_COMPONENT; + +/* RGBA color format class. */ +typedef struct _gcsFORMAT_CLASS_TYPE_RGBA { + gcsFORMAT_COMPONENT alpha; + gcsFORMAT_COMPONENT red; + gcsFORMAT_COMPONENT green; + gcsFORMAT_COMPONENT blue; +} gcsFORMAT_CLASS_TYPE_RGBA; + +/* YUV color format class. */ +typedef struct _gcsFORMAT_CLASS_TYPE_YUV { + gcsFORMAT_COMPONENT y; + gcsFORMAT_COMPONENT u; + gcsFORMAT_COMPONENT v; +} gcsFORMAT_CLASS_TYPE_YUV; + +/* Index color format class. */ +typedef struct _gcsFORMAT_CLASS_TYPE_INDEX { + gcsFORMAT_COMPONENT value; +} gcsFORMAT_CLASS_TYPE_INDEX; + +/* Luminance color format class. */ +typedef struct _gcsFORMAT_CLASS_TYPE_LUMINANCE { + gcsFORMAT_COMPONENT alpha; + gcsFORMAT_COMPONENT value; +} gcsFORMAT_CLASS_TYPE_LUMINANCE; + +/* Bump map color format class. */ +typedef struct _gcsFORMAT_CLASS_TYPE_BUMP { + gcsFORMAT_COMPONENT alpha; + gcsFORMAT_COMPONENT l; + gcsFORMAT_COMPONENT v; + gcsFORMAT_COMPONENT u; + gcsFORMAT_COMPONENT q; + gcsFORMAT_COMPONENT w; +} gcsFORMAT_CLASS_TYPE_BUMP; + +/* Depth and stencil format class. */ +typedef struct _gcsFORMAT_CLASS_TYPE_DEPTH { + gcsFORMAT_COMPONENT depth; + gcsFORMAT_COMPONENT stencil; +} gcsFORMAT_CLASS_TYPE_DEPTH; + +/* Intensity format class. */ +typedef struct _gcsFORMAT_CLASs_TYPE_INTENSITY { + gcsFORMAT_COMPONENT value; +} gcsFORMAT_CLASs_TYPE_INTENSITY; + +typedef union _gcuPIXEL_FORMAT_CLASS { + gcsFORMAT_CLASS_TYPE_BUMP bump; + gcsFORMAT_CLASS_TYPE_RGBA rgba; + gcsFORMAT_CLASS_TYPE_YUV yuv; + gcsFORMAT_CLASS_TYPE_LUMINANCE lum; + gcsFORMAT_CLASS_TYPE_INDEX index; + gcsFORMAT_CLASS_TYPE_DEPTH depth; + gcsFORMAT_CLASs_TYPE_INTENSITY intensity; +} gcuPIXEL_FORMAT_CLASS; + +/* Format parameters. */ +typedef struct _gcsSURF_FORMAT_INFO { + /* Name of the format */ + gctCONST_STRING formatName; + + /* Format code and class. */ + gceSURF_FORMAT format; + gceFORMAT_CLASS fmtClass; + + /* Format data type */ + gceFORMAT_DATATYPE fmtDataType; + + /* The size of one pixel in bits. */ + gctUINT8 bitsPerPixel; + + /* Pixel block dimensions. */ + gctUINT blockWidth; + gctUINT blockHeight; + + /* Pixel block size in bits. */ + gctUINT blockSize; + + /* Some formats are larger than what the GPU can support. */ + /* These formats are read in the number of layers specified. */ + gctUINT8 layers; + + /* The format is faked and software will interpret it differently + * with HW. Most of them can't be blendable(PE) or filterable(TX). + */ + gctBOOL fakedFormat; + + /* Some formats have two neighbour pixels interleaved together. */ + /* To describe such format, set the flag to 1 and add another */ + /* like this one describing the odd pixel format. */ + gctBOOL interleaved; + + /* sRGB format. */ + gctBOOL sRGB; + + /* How GPU read from big-endian host memory */ + gceENDIAN_HINT endian; + + /* Format components. */ + gcuPIXEL_FORMAT_CLASS u; + + /* Format components. */ + gcuPIXEL_FORMAT_CLASS uOdd; + + /* Render format. */ + gceSURF_FORMAT closestRenderFormat; + /*gctCLOSEST_FORMAT dynamicClosestRenderFormat;*/ + gctUINT renderFormat; + const gceTEXTURE_SWIZZLE *pixelSwizzle; + + /* Texture format. */ + gceSURF_FORMAT closestTXFormat; + gctUINT txFormat; + const gceTEXTURE_SWIZZLE *txSwizzle; + gctBOOL txIntFilter; +} gcsSURF_FORMAT_INFO; + +/* Frame buffer information. */ +typedef struct _gcsSURF_FRAMEBUFFER { + gctPOINTER logical; + gctUINT width, height; + gctINT stride; + gceSURF_FORMAT format; +} gcsSURF_FRAMEBUFFER; + +typedef union _gcu2D_STATE_VALUE +{ + gcs2D_RGBU32 minValue; + gcs2D_RGBU32 maxMinReciprocal; + gcs2D_RGBU32 stdReciprocal; + gcs2D_RGBU32 meanValue; + gctBOOL enable; + gctUINT32 stepReciprocal; + gce2D_NORMALIZATION_MODE normalizationMode; + gctBOOL byPassQuantization; + gce2D_U8ToU10_CONVERSION_MODE u8Tu10_Mode; + gce2D_MULTICORE_MODE multicoreMode; +} gcu2D_STATE_VALUE; + +/* Set 2D state */ +typedef struct _gcs2D_STATE_CONFIG +{ + gce2D_STATE_KEY state; + gcu2D_STATE_VALUE value; +} gcs2D_STATE_CONFIG; + +/* Generic pixel component descriptors. */ +extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_XXX8; +extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_XX8X; +extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_X8XX; +extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_8XXX; + +/* Construct a new gcoSURF object. */ +gceSTATUS +gcoSURF_Construct(IN gcoHAL Hal, + IN gctUINT Width, + IN gctUINT Height, + IN gctUINT Depth, + IN gceSURF_TYPE Type, + IN gceSURF_FORMAT Format, + IN gcePOOL Pool, + OUT gcoSURF *Surface); + +gceSTATUS +gcoSURF_ConstructWithUserPool(IN gcoHAL Hal, + IN gctUINT Width, + IN gctUINT Height, + IN gctUINT Depth, + IN gceSURF_TYPE Type, + IN gceSURF_FORMAT Format, + IN gctPOINTER TileStatusLogical, + IN gctPHYS_ADDR_T TileStatusPhysical, + IN gctPOINTER Logical, + IN gctPHYS_ADDR_T Physical, + OUT gcoSURF *Surface); + +/* Destroy an gcoSURF object. */ +gceSTATUS +gcoSURF_Destroy(IN gcoSURF Surface); + +gceSTATUS +gcoSURF_DestroyForAllHWType(IN gcoSURF Surface); + +/* Map user-allocated surface. */ +gceSTATUS +gcoSURF_MapUserSurface(IN gcoSURF Surface, + IN gctUINT Alignment, + IN gctPOINTER Logical, + IN gctPHYS_ADDR_T Physical); + +/* Wrapp surface with known logical/GPU address */ +gceSTATUS +gcoSURF_WrapSurface(IN gcoSURF Surface, + IN gctUINT Alignment, + IN gctPOINTER Logical, + IN gctADDRESS Address); + +/* Query vid mem node info. */ +gceSTATUS +gcoSURF_QueryVidMemNode(IN gcoSURF Surface, + OUT gctUINT32 *Node, + OUT gcePOOL *Pool, + OUT gctSIZE_T_PTR Bytes, + OUT gctUINT32 *TsNode, + OUT gcePOOL *TsPool, + OUT gctSIZE_T_PTR TsBytes); + +/* Query vid mem Multi node info. */ +gceSTATUS +gcoSURF_QueryVidMemMultiNode(IN gcoSURF Surface, + OUT gctUINT32 *Node, + OUT gcePOOL *Pool, + OUT gctSIZE_T_PTR Bytes, + OUT gctUINT32 *Node2, + OUT gcePOOL *Pool2, + OUT gctSIZE_T_PTR Bytes2, + OUT gctUINT32 *Node3, + OUT gcePOOL *Pool3, + OUT gctSIZE_T_PTR Bytes3); + +/* Set the color type of the surface. */ +gceSTATUS +gcoSURF_SetColorType(IN gcoSURF Surface, IN gceSURF_COLOR_TYPE ColorType); + +/* Get the color type of the surface. */ +gceSTATUS +gcoSURF_GetColorType(IN gcoSURF Surface, OUT gceSURF_COLOR_TYPE *ColorType); + +/* Set the color space of the surface. */ +gceSTATUS +gcoSURF_SetColorSpace(IN gcoSURF Surface, IN gceSURF_COLOR_SPACE ColorSpace); + +/* Get the color space of the surface. */ +gceSTATUS +gcoSURF_GetColorSpace(IN gcoSURF Surface, OUT gceSURF_COLOR_SPACE *ColorSpace); + +/* Set the surface ration angle. */ +gceSTATUS +gcoSURF_SetRotation(IN gcoSURF Surface, IN gceSURF_ROTATION Rotation); + +gceSTATUS +gcoSURF_IsValid(IN gcoSURF Surface); + +#if gcdENABLE_3D +/* Verify and return the state of the tile status mechanism. */ +gceSTATUS +gcoSURF_IsTileStatusSupported(IN gcoSURF Surface); + +/* Verify if surface has tile status enabled. */ +gceSTATUS +gcoSURF_IsTileStatusEnabled(IN gcsSURF_VIEW *SurfView); + +/* Verify if surface is compressed. */ +gceSTATUS +gcoSURF_IsCompressed(IN gcsSURF_VIEW *SurfView); + +/* Enable tile status for the specified surface on zero slot. */ +gceSTATUS +gcoSURF_EnableTileStatus(IN gcsSURF_VIEW *Surface); + +/* Enable tile status for the specified surface on specified slot. */ +gceSTATUS +gcoSURF_EnableTileStatusEx(IN gcsSURF_VIEW *surfView, IN gctUINT RtIndex); + +/* Disable tile status for the specified surface. */ +gceSTATUS +gcoSURF_DisableTileStatus(IN gcsSURF_VIEW *SurfView, IN gctBOOL Decompress); + +/* Flush tile status cache for the specified surface. */ +gceSTATUS +gcoSURF_FlushTileStatus(IN gcsSURF_VIEW *SurfView, IN gctBOOL Decompress); +#endif /* gcdENABLE_3D */ + +/* Get surface size. */ +gceSTATUS +gcoSURF_GetSize(IN gcoSURF Surface, + OUT gctUINT *Width, + OUT gctUINT *Height, + OUT gctUINT *Depth); + +/* Get surface information */ +gceSTATUS +gcoSURF_GetInfo(IN gcoSURF Surface, + IN gceSURF_INFO_TYPE InfoType, + IN OUT gctINT32 *Value); + +/* Get surface aligned sizes. */ +gceSTATUS +gcoSURF_GetAlignedSize(IN gcoSURF Surface, + OUT gctUINT *Width, + OUT gctUINT *Height, + OUT gctINT *Stride); + +/* Get alignments. */ +gceSTATUS +gcoSURF_GetAlignment(IN gceSURF_TYPE Type, + IN gceSURF_FORMAT Format, + OUT gctUINT *AddressAlignment, + OUT gctUINT *XAlignment, + OUT gctUINT *YAlignment); + +gceSTATUS +gcoSURF_AlignResolveRect(IN gcoSURF Surf, + IN gcsPOINT_PTR RectOrigin, + IN gcsPOINT_PTR RectSize, + OUT gcsPOINT_PTR AlignedOrigin, + OUT gcsPOINT_PTR AlignedSize); + +/* Get surface type and format. */ +gceSTATUS +gcoSURF_GetFormat(IN gcoSURF Surface, + OUT OPTIONAL gceSURF_TYPE *Type, + OUT OPTIONAL gceSURF_FORMAT *Format); + +/* Get surface information */ +gceSTATUS +gcoSURF_GetFormatInfo(IN gcoSURF Surface, + OUT gcsSURF_FORMAT_INFO_PTR *formatInfo); + +/* Get Surface pack format */ +gceSTATUS +gcoSURF_GetPackedFormat(IN gcoSURF Surface, + OUT gceSURF_FORMAT *Format); + +/* Get surface tiling. */ +gceSTATUS +gcoSURF_GetTiling(IN gcoSURF Surface, OUT gceTILING *Tiling); + +/* Get bottom buffer offset bytes. */ +gceSTATUS +gcoSURF_GetBottomBufferOffset(IN gcoSURF Surface, + OUT gctUINT_PTR BottomBufferOffset); + +/* Lock the surface. */ +gceSTATUS +gcoSURF_Lock(IN gcoSURF Surface, + IN OUT gctADDRESS *Address, + IN OUT gctPOINTER *Memory); + +/* Unlock the surface. */ +gceSTATUS +gcoSURF_Unlock(IN gcoSURF Surface, IN gctPOINTER Memory); + +/*. Query surface flags.*/ +gceSTATUS +gcoSURF_QueryFlags(IN gcoSURF Surface, IN gceSURF_FLAG Flag); + +gceSTATUS +gcoSURF_QueryHints(IN gcoSURF Surface, IN gceSURF_TYPE Hints); + +/* Return pixel format parameters; Info is required to be a pointer to an + * array of at least two items because some formats have up to two records + * of description. + */ +gceSTATUS +gcoSURF_QueryFormat(IN gceSURF_FORMAT Format, + OUT gcsSURF_FORMAT_INFO_PTR *Info); + +/* Compute the color pixel mask. */ +gceSTATUS +gcoSURF_ComputeColorMask(IN gcsSURF_FORMAT_INFO_PTR Format, + OUT gctUINT32_PTR ColorMask); + +/* Flush the surface. */ +gceSTATUS +gcoSURF_Flush(IN gcoSURF Surface); + +gceSTATUS +gcoSURF_3DBlitClearTileStatus(IN gcsSURF_VIEW *SurfView, + IN gctBOOL ClearAsDirty); + +/* Fill surface from it's tile status buffer. */ +gceSTATUS +gcoSURF_FillFromTile(IN gcsSURF_VIEW *SurView); + +/* Fill surface with a value. */ +gceSTATUS +gcoSURF_Fill(IN gcoSURF Surface, + IN gcsPOINT_PTR Origin, + IN gcsSIZE_PTR Size, + IN gctUINT32 Value, + IN gctUINT32 Mask); + +/* Alpha blend two surfaces together. */ +gceSTATUS +gcoSURF_Blend(IN gcoSURF SrcSurf, + IN gcoSURF DstSurf, + IN gcsPOINT_PTR SrcOrigin, + IN gcsPOINT_PTR DstOrigin, + IN gcsSIZE_PTR Size, + IN gceSURF_BLEND_MODE Mode); + +/* Create a new gcoSURF wrapper object. */ +gceSTATUS +gcoSURF_ConstructWrapper(IN gcoHAL Hal, OUT gcoSURF *Surface); + +/* Set surface flags.*/ +gceSTATUS +gcoSURF_SetFlags(IN gcoSURF Surface, IN gceSURF_FLAG Flag, IN gctBOOL Value); + +/* Set the underlying buffer for the surface wrapper. */ +gceSTATUS +gcoSURF_SetBuffer(IN gcoSURF Surface, + IN gceSURF_TYPE Type, + IN gceSURF_FORMAT Format, + IN gctUINT Stride, + IN gctPOINTER Logical, + IN gctUINT64 Physical); + +/* Set the size of the surface in pixels and map the underlying buffer. */ +gceSTATUS +gcoSURF_SetWindow(IN gcoSURF Surface, + IN gctUINT X, + IN gctUINT Y, + IN gctUINT Width, + IN gctUINT Height); + +/* Set the size of the surface in pixels and map the underlying buffer. */ +gceSTATUS +gcoSURF_SetImage(IN gcoSURF Surface, + IN gctUINT X, + IN gctUINT Y, + IN gctUINT Width, + IN gctUINT Height, + IN gctUINT Depth); + +/* Set width/height alignment of the surface directly and calculate stride/size. + * This is only for dri backend now. Please be careful before use. + */ +gceSTATUS +gcoSURF_SetAlignment(IN gcoSURF Surface, IN gctUINT Width, IN gctUINT Height); + +/* Increase reference count of the surface. */ +gceSTATUS +gcoSURF_ReferenceSurface(IN gcoSURF Surface); + +/* Get surface reference count. */ +gceSTATUS +gcoSURF_QueryReferenceCount(IN gcoSURF Surface, OUT gctINT32 *ReferenceCount); + +/* Set surface orientation. */ +gceSTATUS +gcoSURF_SetOrientation(IN gcoSURF Surface, IN gceORIENTATION Orientation); + +/* Query surface orientation. */ +gceSTATUS +gcoSURF_QueryOrientation(IN gcoSURF Surface, OUT gceORIENTATION *Orientation); + +gceSTATUS +gcoSURF_NODE_Cache(IN gcsSURF_NODE_PTR Node, + IN gctPOINTER Logical, + IN gctSIZE_T Bytes, + IN gceCACHEOPERATION Operation); + +gceSTATUS +gcoSURF_NODE_CacheEx(IN gcsSURF_NODE_PTR Node, + IN gctPOINTER Logical, + IN gctSIZE_T Offset, + IN gctSIZE_T Bytes, + IN gceCACHEOPERATION Operation); + +gceSTATUS +gcsSURF_NODE_SetHardwareAddress(IN gcsSURF_NODE_PTR Node, IN gctADDRESS Address); + +gceSTATUS +gcsSURF_NODE_GetHardwareAddress(IN gcsSURF_NODE_PTR Node, + OUT gctADDRESS *Physical, + OUT gctADDRESS *Physical2, + OUT gctADDRESS *Physical3, + OUT gctADDRESS *PhysicalBottom); + +gctADDRESS +gcsSURF_NODE_GetHWAddress(IN gcsSURF_NODE_PTR Node); + +/* Lock and unlock surface node */ +gceSTATUS +gcoSURF_LockNode(IN gcsSURF_NODE_PTR Node, + OUT gctADDRESS *Address, + OUT gctPOINTER *Memory); + +gceSTATUS +gcoSURF_UnLockNode(IN gcsSURF_NODE_PTR Node, IN gceSURF_TYPE Type); + +/* Perform CPU cache operation on surface node */ +gceSTATUS +gcoSURF_NODE_CPUCacheOperation(IN gcsSURF_NODE_PTR Node, + IN gceSURF_TYPE Type, + IN gctSIZE_T Offset, + IN gctSIZE_T Length, + IN gceCACHEOPERATION Operation); + +/* Perform CPU cache operation on surface */ +gceSTATUS +gcoSURF_CPUCacheOperation(IN gcoSURF Surface, + IN gceCACHEOPERATION Operation); + +gceSTATUS +gcoSURF_Swap(IN gcoSURF Surface1, IN gcoSURF Surface2); + +gceSTATUS +gcoSURF_ResetSurWH(IN gcoSURF Surface, + IN gctUINT oriw, + IN gctUINT orih, + IN gctUINT alignw, + IN gctUINT alignh, + IN gceSURF_FORMAT fmt); + +/* Update surface timestamp. */ +gceSTATUS +gcoSURF_UpdateTimeStamp(IN gcoSURF Surface); + +/* Query surface current timestamp. */ +gceSTATUS +gcoSURF_QueryTimeStamp(IN gcoSURF Surface, OUT gctUINT64 *TimeStamp); + +/* + * Allocate shared buffer for this surface, so that + * surface states can be shared across processes. + */ +gceSTATUS +gcoSURF_AllocShBuffer(IN gcoSURF Surface, OUT gctSHBUF *ShBuf); + +/* Bind shared buffer to this surface */ +gceSTATUS +gcoSURF_BindShBuffer(IN gcoSURF Surface, IN gctSHBUF ShBuf); + +/* Push surface shared states to shared buffer. */ +gceSTATUS +gcoSURF_PushSharedInfo(IN gcoSURF Surface); + +/* Pop shared states from shared buffer. */ +gceSTATUS +gcoSURF_PopSharedInfo(IN gcoSURF Surface); + +#if (gcdENABLE_3D) +/* Copy surface. */ +gceSTATUS +gcoSURF_Copy(IN gcoSURF Surface, IN gcoSURF Source); + +/* Set number of samples for a gcoSURF object. */ +gceSTATUS +gcoSURF_SetSamples(IN gcoSURF Surface, IN gctUINT Samples); + +/* Get the number of samples per pixel. */ +gceSTATUS +gcoSURF_GetSamples(IN gcoSURF Surface, OUT gctUINT_PTR Samples); + +/* Append tile status buffer to user pool surface. */ +gceSTATUS +gcoSURF_AppendTileStatus(IN gcoSURF Surface); +#endif + +gceSTATUS +gcoSURF_WrapUserMemory(IN gcoHAL Hal, + IN gctUINT Width, + IN gctUINT Height, + IN gctUINT Stride, + IN gctUINT Depth, + IN gceSURF_TYPE Type, + IN gceSURF_FORMAT Format, + IN gctUINT32 Handle, + IN gctUINT32 Flag, + OUT gcoSURF *Surface); + +#ifdef EMULATOR +/* Wrap user memory by external address. */ +gceSTATUS +gcoSURF_WrapUserMemory_2(IN gcoHAL Hal, + IN gctUINT Width, + IN gctUINT Height, + IN gctUINT Stride, + IN gctUINT Depth, + IN gceSURF_TYPE Type, + IN gceSURF_FORMAT Format, + IN gctUINT32_PTR Address, + IN gctUINT32 Flag, + OUT gcoSURF * Surface); +#endif + +gceSTATUS +gcoSURF_WrapUserMultiBuffer(IN gcoHAL Hal, + IN gctUINT Width, + IN gctUINT Height, + IN gceSURF_TYPE Type, + IN gceSURF_FORMAT Format, + IN gctUINT Stride[3], + IN gctUINT32 Handle[3], + IN gctUINT BufferOffset[3], + IN gctUINT32 Flag, + OUT gcoSURF *Surface); + +gceSTATUS +gcoSURF_UpdateMetadata(IN gcoSURF Surface, IN gctINT TsFD); + +#define MAX_SURF_MIX_SRC_NUM 64 +gceSTATUS +gcoSURF_MixSurfacesCPU(IN gcoSURF TargetSurface, + IN gctUINT TargetSliceIndex, + IN gcoSURF *SourceSurface, + IN gctUINT *SourceSliceIndices, + IN gctFLOAT *Weights, + IN gctINT Count); + +/****************************************************************************** + ****************************** Hash Structure ******************************** + ******************************************************************************/ + +typedef struct _gcsHASH_MD5CTX { + gctBOOL bigEndian; + gctSIZE_T bytes; /* Number of bytes processed */ + gctUINT32 states[4]; + gctUINT8 buffer[64]; +} gcsHASH_MD5CTX; + +void +gcsHASH_MD5Init(gcsHASH_MD5CTX *ctx); +void +gcsHASH_MD5Update(gcsHASH_MD5CTX *ctx, const void *data, gctSIZE_T bytes); +void +gcsHASH_MD5Final(gcsHASH_MD5CTX *ctx, gctUINT8 digest[16]); + +/****************************************************************************** + ******************************* gcsRECT Structure **************************** + ******************************************************************************/ + +/* Initialize rectangle structure. */ +gceSTATUS +gcsRECT_Set(OUT gcsRECT_PTR Rect, + IN gctINT32 Left, + IN gctINT32 Top, + IN gctINT32 Right, + IN gctINT32 Bottom); + +/* Return the width of the rectangle. */ +gceSTATUS +gcsRECT_Width(IN gcsRECT_PTR Rect, OUT gctINT32 *Width); + +/* Return the height of the rectangle. */ +gceSTATUS +gcsRECT_Height(IN gcsRECT_PTR Rect, OUT gctINT32 *Height); + +/* Ensure that top left corner is to the left and above the right bottom. */ +gceSTATUS +gcsRECT_Normalize(IN OUT gcsRECT_PTR Rect); + +/* Compare two rectangles. */ +gceSTATUS +gcsRECT_IsEqual(IN gcsRECT_PTR Rect1, IN gcsRECT_PTR Rect2, OUT gctBOOL *Equal); + +/* Compare the sizes of two rectangles. */ +gceSTATUS +gcsRECT_IsOfEqualSize(IN gcsRECT_PTR Rect1, IN gcsRECT_PTR Rect2, OUT gctBOOL *EqualSize); + +gceSTATUS +gcsRECT_RelativeRotation(IN gceSURF_ROTATION Orientation, + IN OUT gceSURF_ROTATION *Relation); + +gceSTATUS +gcsRECT_Rotate(IN OUT gcsRECT_PTR Rect, + IN gceSURF_ROTATION Rotation, + IN gceSURF_ROTATION toRotation, + IN gctINT32 SurfaceWidth, + IN gctINT32 SurfaceHeight); + +/****************************************************************************** + **************************** gcsBOUNDARY Structure *************************** + ******************************************************************************/ + +typedef struct _gcsBOUNDARY { + gctINT x; + gctINT y; + gctINT width; + gctINT height; +} gcsBOUNDARY; + +/****************************************************************************** + ********************************* gcoHEAP Object ***************************** + ******************************************************************************/ + +typedef struct _gcoHEAP *gcoHEAP; + +/* Construct a new gcoHEAP object. */ +gceSTATUS +gcoHEAP_Construct(IN gcoOS Os, IN gctSIZE_T AllocationSize, OUT gcoHEAP *Heap); + +/* Destroy an gcoHEAP object. */ +gceSTATUS +gcoHEAP_Destroy(IN gcoHEAP Heap); + +/* Allocate memory. */ +gceSTATUS +gcoHEAP_Allocate(IN gcoHEAP Heap, IN gctSIZE_T Bytes, OUT gctPOINTER *Node); + +gceSTATUS +gcoHEAP_GetMemorySize(IN gcoHEAP Heap, IN gctPOINTER Memory, OUT gctSIZE_T_PTR MemorySize); + +/* Free memory. */ +gceSTATUS +gcoHEAP_Free(IN gcoHEAP Heap, IN gctPOINTER Node); + +#if (VIVANTE_PROFILER_SYSTEM_MEMORY || gcdDEBUG) +/* Profile the heap. */ +gceSTATUS +gcoHEAP_ProfileStart(IN gcoHEAP Heap); + +gceSTATUS +gcoHEAP_ProfileEnd(IN gcoHEAP Heap, IN gctCONST_STRING Title); +#endif + +/****************************************************************************** + ******************************* Debugging Macros ***************************** + ******************************************************************************/ + +void +gcoOS_SetDebugLevel(IN gctUINT32 Level); + +void +gcoOS_GetDebugLevel(OUT gctUINT32_PTR DebugLevel); + +void +gcoOS_GetDebugZone(IN gctUINT32 Zone, OUT gctUINT32_PTR DebugZone); + +void +gcoOS_SetDebugZone(IN gctUINT32 Zone); + +void +gcoOS_SetDebugFile(IN gctCONST_STRING FileName); + +void +gcoOS_EnableDebugDump(IN gctBOOL Enable); + +gctFILE +gcoOS_ReplaceDebugFile(IN gctFILE fp); + +/******************************************************************************* + ** + ** gcmFATAL + ** + ** Print a message to the debugger and execute a break point. + ** + ** ARGUMENTS: + ** + ** message . + ** ... Optional arguments. + */ + +void +gckOS_DebugFatal(IN gctCONST_STRING Message, ...); + +void +gcoOS_DebugFatal(IN gctCONST_STRING Message, ...); + +#if gcmIS_DEBUG(gcdDEBUG_FATAL) +# define gcmFATAL gcoOS_DebugFatal +# define gcmkFATAL gckOS_DebugFatal +#elif gcdHAS_ELLIPSIS +# define gcmFATAL(...) +# define gcmkFATAL(...) +#else +gcmINLINE static void +__dummy_fatal(IN gctCONST_STRING Message, ...) +{ +} + +# define gcmFATAL __dummy_fatal +# define gcmkFATAL __dummy_fatal +#endif + +/******************************************************************************* + ** + ** gcmTRACE + ** + ** Print a message to the debugfer if the correct level has been set. In + ** retail mode this macro does nothing. + ** + ** ARGUMENTS: + ** + ** level Level of message. + ** message . + ** ... Optional arguments. + */ +#define gcvLEVEL_NONE -1 +#define gcvLEVEL_ERROR 0 +#define gcvLEVEL_WARNING 1 +#define gcvLEVEL_INFO 2 +#define gcvLEVEL_VERBOSE 3 + +void +gckOS_DebugTrace(IN gctUINT32 Level, IN gctCONST_STRING Message, ...) +CHECK_PRINTF_FORMAT(2, 3); + +void +gcoOS_DebugTrace(IN gctUINT32 Level, IN gctCONST_STRING Message, ...) +CHECK_PRINTF_FORMAT(2, 3); + +#if gcmIS_DEBUG(gcdDEBUG_TRACE) +# define gcmTRACE gcoOS_DebugTrace +# define gcmkTRACE gckOS_DebugTrace +# define gcmkTRACE_N(Level, ArgumentSize, ...) \ + gckOS_DebugTrace(Level, __VA_ARGS__) +#elif gcdHAS_ELLIPSIS +# define gcmTRACE(...) +# define gcmkTRACE(...) +# define gcmkTRACE_N(...) +#else + gcmINLINE static void + __dummy_trace(IN gctUINT32 Level, IN gctCONST_STRING Message, ...) + { + } + + gcmINLINE static void + __dummy_trace_n(IN gctUINT32 Level, IN gctUINT ArgumentSize, IN gctCONST_STRING Message, ...) + { + } + +# define gcmTRACE __dummy_trace +# define gcmkTRACE __dummy_trace +# define gcmkTRACE_N __dummy_trace_n +#endif + +/******************************************************************************* + ** + ** gcmTRACE_ZONE + ** + ** Print a message to the debugger if the correct level and zone has been + ** set. In retail mode this macro does nothing. + ** + ** ARGUMENTS: + ** + ** Level Level of message. + ** Zone Zone of message. + ** Message debuger message. + ** ... Optional arguments. + */ + +void +gckOS_DebugTraceZone(IN gctUINT32 Level, IN gctUINT32 Zone, IN gctCONST_STRING Message, ...); + +void +gcoOS_DebugTraceZone(IN gctUINT32 Level, IN gctUINT32 Zone, IN gctCONST_STRING Message, ...); + +#if gcmIS_DEBUG(gcdDEBUG_TRACE) +# define gcmTRACE_ZONE gcoOS_DebugTraceZone +# define gcmkTRACE_ZONE gckOS_DebugTraceZone +# define gcmkTRACE_ZONE_N(Level, Zone, ArgumentSize, ...) \ + gckOS_DebugTraceZone(Level, Zone, __VA_ARGS__) +#elif gcdHAS_ELLIPSIS +# define gcmTRACE_ZONE(...) +# define gcmkTRACE_ZONE(...) +# define gcmkTRACE_ZONE_N(...) +#else + gcmINLINE static void + __dummy_trace_zone(IN gctUINT32 Level, + IN gctUINT32 Zone, + IN gctCONST_STRING Message, ...) + { + } + + gcmINLINE static void + __dummy_trace_zone_n(IN gctUINT32 Level, + IN gctUINT32 Zone, + IN gctUINT ArgumentSize, + IN gctCONST_STRING Message, + ...) + { + } + +# define gcmTRACE_ZONE __dummy_trace_zone +# define gcmkTRACE_ZONE __dummy_trace_zone +# define gcmkTRACE_ZONE_N __dummy_trace_zone_n +#endif + +/******************************************************************************* + ** + ** gcmDEBUG_ONLY + ** + ** Execute a statement or function only in DEBUG mode. + ** + ** ARGUMENTS: + ** + ** f Statement or function to execute. + */ +#if gcmIS_DEBUG(gcdDEBUG_CODE) +# define gcmDEBUG_ONLY(f) f +#else +# define gcmDEBUG_ONLY(f) +#endif + +/******************************************************************************* + ** + ** gcmSTACK_PUSH + ** gcmSTACK_POP + ** gcmSTACK_DUMP + ** gcmSTACK_REMOVE + ** + ** Push or pop a function with entry arguments on the trace stack. + ** + ** ARGUMENTS: + ** + ** Function Name of function. + ** Line Line number. + ** Text Optional text. + ** ... Optional arguments for text. + ** + ** Thread Thread id. + */ +void +gcoOS_StackPush(IN gctINT8_PTR Identity, + IN gctCONST_STRING Function, + IN gctINT Line, + IN gctCONST_STRING Text, + ...); + +void +gcoOS_StackPop(IN gctINT8_PTR Identity, IN gctCONST_STRING Function); + +void +gcoOS_StackDump(void); + +void +gcoOS_StackRemove(IN gctHANDLE Thread); + +#if gcmIS_DEBUG(gcdDEBUG_STACK) +# define gcmSTACK_PUSH gcoOS_StackPush +# define gcmSTACK_POP gcoOS_StackPop +# define gcmSTACK_DUMP gcoOS_StackDump +# define gcmSTACK_REMOVE gcoOS_StackRemove +#elif gcdHAS_ELLIPSIS +# define gcmSTACK_PUSH(...) +# define gcmSTACK_POP(...) +# define gcmSTACK_DUMP() +# define gcmSTACK_REMOVE(...) +#else + gcmINLINE static void + __dummy_stack_push(IN gctCONST_STRING Function, + IN gctINT Line, + IN gctCONST_STRING Text, ...) + { + } + + gcmINLINE static void + __dummy_stack_pop(IN gctINT8_PTR Identity, IN gctCONST_STRING Function); + + gcmINLINE static void + __dummy_stack_remove(IN gctHANDLE Thread); + +# define gcmSTACK_PUSH __dummy_stack_push +# define gcmSTACK_POP(a, b) __dummy_stack_pop +# define gcmSTACK_DUMP() +# define gcmSTACK_REMOVE(a) __dummy_stack_remove +#endif + +/******************************************************************************* + ** + ** gcmBINARY_TRACE + ** + ** Push or pop a function with entry arguments on the trace stack. + ** + ** ARGUMENTS: + ** + ** Function Name of function + ** Line Line number + ** Text Optional text + ** ... Optional arguments for text. + */ +void +gcoOS_BinaryTrace(IN gctCONST_STRING Function, + IN gctINT Line, + IN gctCONST_STRING Text OPTIONAL, + ...); + +void +gckOS_BinaryTrace(IN gctCONST_STRING Function, + IN gctINT Line, + IN gctCONST_STRING Text OPTIONAL, + ...); + +#if gcdBINARY_TRACE +# define gcmBINARY_TRACE gcoOS_BinaryTrace +# define gcmkBINARY_TRACE gckOS_BinaryTrace +#elif gcdHAS_ELLIPSIS +# define gcmBINARY_TRACE(Function, Line, Text, ...) +# define gcmkBINARY_TRACE(Function, Line, Text, ...) +#else + gcmINLINE static void + __dummy_binary_trace(IN gctCONST_STRING Function, + IN gctINT Line, + IN gctCONST_STRING Text, ...) + { + } + +# define gcmBINARY_TRACE __dummy_binary_trace +# define gcmkBINARY_TRACE __dummy_binary_trace +#endif + +/******************************************************************************* + ** + ** gcmSYSTRACE_BEGIN + ** gcmSYSTRACE_END + ** + ** Systrace is a performance tuning tool on linux. + ** + ** ARGUMENTS: + ** + ** FuncName Function name + ** Zone Systrace zone. Only specified zones are traced. + */ + +void +gcoOS_SysTraceBegin(IN gctUINT32 Zone, IN gctCONST_STRING FuncName); + +void +gcoOS_SysTraceEnd(IN gctUINT32 Zone); + +#if defined(LINUX) && gcdSYSTRACE +# define gcmSYSTRACE_BEGIN gcoOS_SysTraceBegin +# define gcmSYSTRACE_END gcoOS_SysTraceEnd +#elif gcdHAS_ELLIPSIS +# define gcmSYSTRACE_BEGIN(...) +# define gcmSYSTRACE_END(...) +#else + gcmINLINE static void + __dummy_systrace_begin(IN gctUINT32 Zone, + IN gctCONST_STRING FuncName) + { + } + + gcmINLINE static void + __dummy_systrace_end(IN gctUINT32 Zone) + { + } + +# define gcmSYSTRACE_BEGIN __dummy_systrace_begin +# define gcmSYSTRACE_END __dummy_systrace_end +#endif + +/****************************************************************************** + ******************************** Logging Macros ****************************** + ******************************************************************************/ + +#define gcdHEADER_LEVEL gcvLEVEL_VERBOSE + +/* Always enable header/footer when systrace build is on */ +#if defined(LINUX) && gcdSYSTRACE +# undef gcdEMPTY_HEADER_FOOTER +#endif + +#ifndef gcdEMPTY_HEADER_FOOTER +# define gcdEMPTY_HEADER_FOOTER 0 +#endif + +#if gcdENABLE_PROFILING +void +gcoOS_ProfileDB(IN gctCONST_STRING Function, IN OUT gctBOOL_PTR Initialized); + +#define gcmHEADER() \ + gctINT8 __user__ = 1; \ + static gctBOOL __profile__initialized__ = gcvFALSE; \ + gcmSTACK_PUSH(&__user__, __FUNCTION__, __LINE__, gcvNULL, gcvNULL); \ + gcoOS_ProfileDB(__FUNCTION__, &__profile__initialized__) + +#define gcmHEADER_ARG(...) \ + gctINT8 __user__ = 1; \ + static gctBOOL __profile__initialized__ = gcvFALSE; \ + gcmSTACK_PUSH(&__user__, __FUNCTION__, __LINE__, Text, __VA_ARGS__); \ + gcoOS_ProfileDB(__FUNCTION__, &__profile__initialized__) + +#define gcmFOOTER() \ + gcmSTACK_POP(&__user__, __FUNCTION__); \ + gcoOS_ProfileDB(__FUNCTION__, gcvNULL) + +#define gcmFOOTER_NO() \ + gcmSTACK_POP(&__user__, __FUNCTION__); \ + gcoOS_ProfileDB(__FUNCTION__, gcvNULL) + +#define gcmFOOTER_ARG(...) \ + gcmSTACK_POP(&__user__, __FUNCTION__); \ + gcoOS_ProfileDB(__FUNCTION__, gcvNULL) + +#define gcmFOOTER_KILL() \ + gcmSTACK_POP(&__user__, __FUNCTION__); \ + gcoOS_ProfileDB(gcvNULL, gcvNULL) + +#else /* !gcdENABLE_PROFILING */ + +#if gcdEMPTY_HEADER_FOOTER +# define gcmHEADER() +#elif gcdHAS_ELLIPSIS +#define gcmHEADER() \ + gctINT8 __user__ = 1; \ + gctINT8_PTR __user_ptr__ = &__user__; \ + gcmSTACK_PUSH(__user_ptr__, __FUNCTION__, __LINE__, gcvNULL, gcvNULL); \ + gcmSYSTRACE_BEGIN(_GC_OBJ_ZONE, __FUNCTION__); \ + gcmBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \ + gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \ + "++%s(%d)", __FUNCTION__, __LINE__) +#else + gcmINLINE static void + __dummy_header(void) + { + } +# define gcmHEADER __dummy_header +#endif + +#if gcdHAS_ELLIPSIS +#if gcdEMPTY_HEADER_FOOTER +# define gcmHEADER_ARG(Text, ...) +#else +# define gcmHEADER_ARG(Text, ...) \ + gctINT8 __user__ = 1; \ + gctINT8_PTR __user_ptr__ = &__user__; \ + gcmSTACK_PUSH(__user_ptr__, __FUNCTION__, __LINE__, Text, __VA_ARGS__); \ + gcmSYSTRACE_BEGIN(_GC_OBJ_ZONE, __FUNCTION__); \ + gcmBINARY_TRACE(__FUNCTION__, __LINE__, Text, __VA_ARGS__); \ + gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \ + "++%s(%d): " Text, __FUNCTION__, __LINE__, __VA_ARGS__) +#endif +#else + gcmINLINE static void + __dummy_header_arg( + IN gctCONST_STRING Text, + ... + ) + { + } +# define gcmHEADER_ARG __dummy_header_arg +#endif + +#if gcdEMPTY_HEADER_FOOTER +# define gcmFOOTER() +#elif gcdHAS_ELLIPSIS +# define gcmFOOTER() \ + gcmSTACK_POP(__user_ptr__, __FUNCTION__); \ + gcmSYSTRACE_END(_GC_OBJ_ZONE); \ + gcmBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \ + gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \ + "--%s(%d): status=%d(%s)", \ + __FUNCTION__, __LINE__, \ + status, gcoOS_DebugStatus2Name(status)); \ + *__user_ptr__ -= 1 +#else + gcmINLINE static void + __dummy_footer(void) + { + } +# define gcmFOOTER __dummy_footer +#endif + +#if gcdEMPTY_HEADER_FOOTER +# define gcmFOOTER_NO() +#elif gcdHAS_ELLIPSIS +#define gcmFOOTER_NO() \ + gcmSTACK_POP(__user_ptr__, __FUNCTION__); \ + gcmSYSTRACE_END(_GC_OBJ_ZONE); \ + gcmBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \ + gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \ + "--%s(%d)", __FUNCTION__, __LINE__); \ + *__user_ptr__ -= 1 +#else + gcmINLINE static void + __dummy_footer_no(void) + { + } +# define gcmFOOTER_NO __dummy_footer_no +#endif + +#if gcdEMPTY_HEADER_FOOTER +# define gcmFOOTER_KILL() +#elif gcdHAS_ELLIPSIS +#define gcmFOOTER_KILL() \ + gcmSTACK_POP(__user_ptr__, __FUNCTION__); \ + gcmSYSTRACE_END(_GC_OBJ_ZONE); \ + gcmBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \ + gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \ + "--%s(%d)", __FUNCTION__, __LINE__); \ + *__user_ptr__ -= 1 +#else + gcmINLINE static void + __dummy_footer_kill(void) + { + } +# define gcmFOOTER_KILL __dummy_footer_kill +#endif + +#if gcdHAS_ELLIPSIS +#if gcdEMPTY_HEADER_FOOTER +# define gcmFOOTER_ARG(Text, ...) +#else +# define gcmFOOTER_ARG(Text, ...) \ + gcmSTACK_POP(__user_ptr__, __FUNCTION__); \ + gcmSYSTRACE_END(_GC_OBJ_ZONE); \ + gcmBINARY_TRACE(__FUNCTION__, __LINE__, Text, __VA_ARGS__); \ + gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \ + "--%s(%d): " Text, __FUNCTION__, __LINE__, __VA_ARGS__); \ + *__user_ptr__ -= 1 +#endif +#else + gcmINLINE static void + __dummy_footer_arg( + IN gctCONST_STRING Text, + ... + ) + { + } +# define gcmFOOTER_ARG __dummy_footer_arg +#endif + +#endif /* gcdENABLE_PROFILING */ + +#if gcdHAS_ELLIPSIS +# define gcmkHEADER() \ + do { \ + gcmkBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \ + gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \ + "++%s(%d)", __FUNCTION__, __LINE__); \ + } while (0) +#else + gcmINLINE static void + __dummy_kheader(void) + { + } +# define gcmkHEADER __dummy_kheader +#endif + +#if gcdHAS_ELLIPSIS +# define gcmkHEADER_ARG(Text, ...) \ + do { \ + gcmkBINARY_TRACE(__FUNCTION__, __LINE__, Text, __VA_ARGS__); \ + gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \ + "++%s(%d): " Text, \ + __FUNCTION__, __LINE__, __VA_ARGS__); \ + } while (0) +#else + gcmINLINE static void + __dummy_kheader_arg(IN gctCONST_STRING Text, ...) + { + } +# define gcmkHEADER_ARG __dummy_kheader_arg +#endif + +#if gcdHAS_ELLIPSIS +# define gcmkFOOTER() \ + do { \ + gcmkBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, status); \ + gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \ + "--%s(%d): status=%d(%s)", \ + __FUNCTION__, __LINE__, status, \ + gckOS_DebugStatus2Name(status)); \ + } while (0) +#else + gcmINLINE static void + __dummy_kfooter(void) + { + } +# define gcmkFOOTER __dummy_kfooter +#endif + +#if gcdHAS_ELLIPSIS +# define gcmkFOOTER_NO() \ + do { \ + gcmkBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \ + gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \ + "--%s(%d)", __FUNCTION__, __LINE__); \ + } while (0) +#else + gcmINLINE static void + __dummy_kfooter_no(void) + { + } +# define gcmkFOOTER_NO __dummy_kfooter_no +#endif + +#if gcdHAS_ELLIPSIS +# define gcmkFOOTER_ARG(Text, ...) \ + do { \ + gcmkBINARY_TRACE(__FUNCTION__, __LINE__, Text, __VA_ARGS__); \ + gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \ + "--%s(%d): " Text, \ + __FUNCTION__, __LINE__, __VA_ARGS__); \ + } while (0) +#else + gcmINLINE static void + __dummy_kfooter_arg(IN gctCONST_STRING Text, ...) + { + } +# define gcmkFOOTER_ARG __dummy_kfooter_arg +#endif + +#define gcmOPT_VALUE(ptr) (((ptr) == gcvNULL) ? 0 : *(ptr)) +#define gcmOPT_VALUE_INDEX(ptr, index) (((ptr) == gcvNULL) ? 0 : ptr[index]) +#define gcmOPT_POINTER(ptr) (((ptr) == gcvNULL) ? gcvNULL : *(ptr)) +#define gcmOPT_STRING(ptr) (((ptr) == gcvNULL) ? "(nil)" : (ptr)) + +void +gckOS_Print(IN gctCONST_STRING Message, ...) CHECK_PRINTF_FORMAT(1, 2); + +void +gcoOS_Print(IN gctCONST_STRING Message, ...) CHECK_PRINTF_FORMAT(1, 2); + +#define gcmPRINT gcoOS_Print +#define gcmkPRINT gckOS_Print +#define gcmkPRINT_N(ArgumentSize, ...) gckOS_Print(__VA_ARGS__) + +#if gcdHAS_ELLIPSIS +# define gcmPRINT_ONCE(Text, ...) \ + { \ + static gctBOOL _once = gcvFALSE; \ + if (!_once) { \ + gcmPRINT(Text, __VA_ARGS__); \ + _once = gcvTRUE; \ + } \ + } + +#else + gcmINLINE static void + __dummy_printonce_arg(IN gctCONST_STRING Text, ...) + { + } +# define gcmPRINT_ONCE __dummy_printonce_arg +#endif + +#if gcdFEATURE_SANITYCHECK +# define gcmFEATURE_CHECKED gcmPRINT_ONCE +#else +# define gcmFEATURE_CHECKED(Text, ...) +#endif + +#if gcdPRINT_VERSION +# define gcmPRINT_VERSION() \ + do { \ + _gcmPRINT_VERSION(gcm); \ + gcmSTACK_DUMP(); \ + } while (0) +# define gcmkPRINT_VERSION() _gcmPRINT_VERSION(gcmk) +# define _gcmPRINT_VERSION(prefix) \ + prefix##TRACE(gcvLEVEL_ERROR, "Vivante HAL version %s", gcvVERSION_STRING) +#else +# define gcmPRINT_VERSION() \ + do { \ + gcmSTACK_DUMP(); \ + } while (gcvFALSE) +# define gcmkPRINT_VERSION() \ + do { \ + } while (gcvFALSE) +#endif + +void +gckOS_Dump(IN gckOS Os, IN gctCONST_STRING Format, ...); + +void +gckOS_DumpBuffer(IN gckOS Os, + IN gceDUMP_BUFFER_TYPE Type, + IN gctPOINTER Buffer, + IN gctUINT64 Address, + IN gctSIZE_T Size); + +#if gcdDUMP_IN_KERNEL +# define gcmkDUMP gckOS_Dump + +# define gcmkDUMP_BUFFER gckOS_DumpBuffer +#else +# define gcmkDUMP(...) \ + do { \ + } while (0) +# define gcmkDUMP_BUFFER(...) \ + do { \ + } while (0) +#endif + +/******************************************************************************* + ** + ** gcmDUMP_FRAMERATE + ** + ** Print average frame rate + ** + */ +gceSTATUS +gcoOS_DumpFrameRate(void); +#define gcmDUMP_FRAMERATE gcoOS_DumpFrameRate + +/******************************************************************************* + ** + ** gcoOS_SetDumpFlag + ** + ** Dump print switch. + ** + ** ARGUMENTS: + ** + ** DumpState + ** True to enable dump prints. + */ + +gceSTATUS +gcoOS_SetDumpFlag(IN gctBOOL DumpState); + +/******************************************************************************* + ** + ** gcmDUMP + ** + ** Print a dump message. + ** + ** ARGUMENTS: + ** + ** gctSTRING Message. + ** + ** ... Optional arguments. + */ + +#if gcdDUMP +gceSTATUS +gcoOS_Dump(IN gcoOS Os, IN gctCONST_STRING String, ...); +# define gcmDUMP gcoOS_Dump +#else +# define gcmDUMP(...) \ + do { \ + } while (0) +#endif + +/******************************************************************************* + ** + ** gcmDUMP_BUFFER + ** + ** Print a buffer to the dump. + ** + ** ARGUMENTS: + ** + ** gctSTRING Tag + ** Tag for dump. + ** + ** gctADDRESS Address + ** GPU address of buffer. + ** + ** gctPOINTER Logical + ** Logical address of buffer. + ** + ** gctUINT32 Offset + ** Offset into buffer. + ** + ** gctSIZE_T Bytes + ** Number of bytes. + */ + +#if gcdDUMP +gceSTATUS +gcoOS_DumpBuffer(IN gcoOS Os, + IN gceDUMP_BUFFER_TYPE Type, + IN gctADDRESS Address, + IN gctPOINTER Logical, + IN gctSIZE_T Offset, + IN gctSIZE_T Bytes); +# define gcmDUMP_BUFFER gcoOS_DumpBuffer +#else +# define gcmDUMP_BUFFER(...) \ + do { \ + } while (0) +#endif + +#if gcdDUMP +void +gcoOS_DumpLock(void); +# define gcmDUMP_LOCK gcoOS_DumpLock +#else +# define gcmDUMP_LOCK(...) \ + do { \ + } while (0) +#endif + +#if gcdDUMP +void +gcoOS_DumpUnlock(void); +# define gcmDUMP_UNLOCK gcoOS_DumpUnlock +#else +# define gcmDUMP_UNLOCK(...) \ + do { \ + } while (0) +#endif + +/******************************************************************************* + ** + ** gcmDUMP_API + ** + ** Print a dump message for a high level API prefixed by the function name. + ** + ** ARGUMENTS: + ** + ** gctSTRING Message. + ** + ** ... Optional arguments. + */ +gceSTATUS +gcoOS_DumpApi(IN gctCONST_STRING String, ...); +#if gcdDUMP_API +# define gcmDUMP_API gcoOS_DumpApi +#else +# define gcmDUMP_API(...) \ + do { \ + } while (0) +#endif + +/******************************************************************************* + ** + ** gcmDUMP_API_ARRAY + ** + ** Print an array of data. + ** + ** ARGUMENTS: + ** + ** gctUINT32_PTR Pointer to array. + ** gctUINT32 Size. + */ +gceSTATUS +gcoOS_DumpArray(IN gctCONST_POINTER Data, IN gctUINT32 Size); +#if gcdDUMP_API +# define gcmDUMP_API_ARRAY gcoOS_DumpArray +#else +# define gcmDUMP_API_ARRAY(...) \ + do { \ + } while (0) +#endif + +/******************************************************************************* + ** + ** gcmDUMP_API_ARRAY_TOKEN + ** + ** Print an array of data terminated by a token. + ** + ** ARGUMENTS: + ** + ** gctUINT32_PTR Pointer to array. + ** gctUINT32 Termination. + */ +gceSTATUS +gcoOS_DumpArrayToken(IN gctCONST_POINTER Data, IN gctUINT32 Termination); +#if gcdDUMP_API +# define gcmDUMP_API_ARRAY_TOKEN gcoOS_DumpArrayToken +#else +# define gcmDUMP_API_ARRAY_TOKEN(...) \ + do { \ + } while (0) +#endif + +/******************************************************************************* + ** + ** gcmDUMP_API_DATA + ** + ** Print an array of bytes. + ** + ** ARGUMENTS: + ** + ** gctCONST_POINTER Pointer to array. + ** gctSIZE_T Size. + */ +gceSTATUS +gcoOS_DumpApiData(IN gctCONST_POINTER Data, IN gctSIZE_T Size); +#if gcdDUMP_API +# define gcmDUMP_API_DATA gcoOS_DumpApiData +#else +# define gcmDUMP_API_DATA(...) \ + do { \ + } while (0) +#endif + +/******************************************************************************* + ** gcmDUMP_2D_COMMAND + ** + ** Print the 2D command buffer. + ** + ** ARGUMENTS: + ** + ** gctUINT32_PTR Pointer to the command buffer. + ** gctUINT32 Command buffer size. + */ +gceSTATUS +gcoOS_Dump2DCommand(IN gctUINT32_PTR Command, IN gctUINT32 Size); +#if gcdDUMP_2D +# define gcmDUMP_2D_COMMAND(cmd, size) \ + do { \ + if (Hardware->newDump2DLevel > 1) \ + gcoOS_Dump2DCommand(cmd, size); \ + } while (0) +#else +# define gcmDUMP_2D_COMMAND(...) \ + do { \ + } while (0) +#endif + +/******************************************************************************* + ** gcmDUMP_2D_SURFACE + ** + ** Print the 2D surface memory. + ** + ** ARGUMENTS: + ** + ** gctBOOL Src. + ** gctADDRESS Address. + */ +gceSTATUS +gcoOS_Dump2DSurface(IN gctBOOL Src, IN gctADDRESS Address); +#if gcdDUMP_2D +# define gcmDUMP_2D_SURFACE(src, addr) \ + do { \ + if (Hardware->newDump2DLevel > 2) \ + gcoOS_Dump2DSurface(src, addr); \ + } while (0) +#else +# define gcmDUMP_2D_SURFACE(...) \ + do { \ + } while (0) +#endif + +/******************************************************************************* + ** gcmDUMP_ADD_MEMORY_INFO + ** + ** Record the memory info. + ** + ** ARGUMENTS: + ** + ** gctADDRESS Address. + ** gctSIZE_T Size. + */ +gceSTATUS +gcfAddMemoryInfo(IN gctADDRESS GPUAddress, + IN gctPOINTER Logical, + IN gctUINT64 Physical, + IN gctUINT32 Size); +#if gcdDUMP_2D +# define gcmDUMP_ADD_MEMORY_INFO gcfAddMemoryInfo +#else +# define gcmDUMP_ADD_MEMORY_INFO(...) \ + do { \ + } while (0) +#endif + +/******************************************************************************* + ** gcmDUMP_DEL_MEMORY_INFO + ** + ** Record the memory info. + ** + ** ARGUMENTS: + ** + ** gctADDRESS Address. + */ +gceSTATUS +gcfDelMemoryInfo(IN gctADDRESS Address); +#if gcdDUMP_2D +# define gcmDUMP_DEL_MEMORY_INFO gcfDelMemoryInfo +#else +# define gcmDUMP_DEL_MEMORY_INFO(...) \ + do { \ + } while (0) +#endif + +/******************************************************************************* + ** + ** gcmTRACE_RELEASE + ** + ** Print a message to the shader debugger. + ** + ** ARGUMENTS: + ** + ** gctCONST_STRING Message. + ** ... Optional arguments. + */ + +#define gcmTRACE_RELEASE gcoOS_DebugShaderTrace + +void +gcoOS_DebugShaderTrace(IN gctCONST_STRING Message, ...); + +void +gcoOS_SetDebugShaderFiles(IN gctCONST_STRING VSFileName, IN gctCONST_STRING FSFileName); + +void +gcoOS_SetDebugShaderFileType(IN gctUINT32 ShaderType); + +void +gcoOS_EnableDebugBuffer(IN gctBOOL Enable); + +/******************************************************************************* + ** + ** gcmBREAK + ** + ** Break into the debugger. In retail mode this macro does nothing. + ** + ** ARGUMENTS: + ** + ** None. + */ + +void +gcoOS_DebugBreak(void); + +void +gckOS_DebugBreak(void); + +#if gcmIS_DEBUG(gcdDEBUG_BREAK) +# define gcmBREAK gcoOS_DebugBreak +# define gcmkBREAK gckOS_DebugBreak +#else +# define gcmBREAK() +# define gcmkBREAK() +#endif + +/******************************************************************************* + ** + ** gcmASSERT + ** + ** Evaluate an expression and break into the debugger if the expression + ** evaluates to false. In retail mode this macro does nothing. + ** + ** ARGUMENTS: + ** + ** exp Expression to evaluate. + */ +#if gcmIS_DEBUG(gcdDEBUG_ASSERT) +# define _gcmASSERT(prefix, exp) \ + do \ + { \ + if (!(exp)) \ + { \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "ASSERT at %s(%d)", \ + __FUNCTION__, __LINE__); \ + prefix##TRACE(gcvLEVEL_ERROR, \ + "(%s)", #exp); \ + prefix##BREAK(); \ + } \ + } \ + while (gcvFALSE) +# define gcmASSERT(exp) _gcmASSERT(gcm, exp) +# define gcmkASSERT(exp) _gcmASSERT(gcmk, exp) +#else +# define gcmASSERT(exp) +# define gcmkASSERT(exp) +#endif + +/******************************************************************************* + ** + ** gcmSTATIC_ASSERT + ** + ** Tests a software assertion at compile time. If the specific constant + ** expression is false, the compiler displays the specified message and + ** the compilation fails with an error, otherwise the declaration has + ** no effect. + ** Static assert is suitable for both user and kernel space. + ** + ** ARGUMENTS: + ** + ** exp Constant expression. + ** message Error message displayed on assertion failure. + */ +#if defined(__GNUC__) && ((__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4)) +# define gcmSTATIC_ASSERT(constExp, message) \ + do { \ + _Static_assert((constExp), message); \ + } \ + while (0) + +#elif defined(_MSC_VER) && (_MSC_VER >= 1600) +# define gcmSTATIC_ASSERT(constExp, message) \ + static_assert((constExp), message) + +#else +# define gcmSTATIC_ASSERT(constExp, message) \ + do { \ + } while (0) +#endif + +/******************************************************************************* + ** + ** gcmVERIFY + ** + ** Verify if an expression returns true. If the expression does not + ** evaluates to true, an assertion will happen in debug mode. + ** + ** ARGUMENTS: + ** + ** exp Expression to evaluate. + */ +#if gcmIS_DEBUG(gcdDEBUG_ASSERT) +# define gcmVERIFY(exp) gcmASSERT(exp) +# define gcmkVERIFY(exp) gcmkASSERT(exp) +#else +# define gcmVERIFY(exp) ((void)exp) +# define gcmkVERIFY(exp) ((void)exp) +#endif + +/******************************************************************************* + ** + ** gcmVERIFY_OK + ** + ** Verify a function returns gcvSTATUS_OK. If the function does not return + ** gcvSTATUS_OK, an assertion will happen in debug mode. + ** + ** ARGUMENTS: + ** + ** func Function to evaluate. + */ + +void +gcoOS_Verify(IN gceSTATUS status); + +void +gckOS_Verify(IN gceSTATUS status); + +#if gcmIS_DEBUG(gcdDEBUG_ASSERT) +# define gcmVERIFY_OK(func) \ + do { \ + gceSTATUS verifyStatus = func; \ + gcoOS_Verify(verifyStatus); \ + if (verifyStatus != gcvSTATUS_OK) { \ + gcmTRACE(gcvLEVEL_ERROR, \ + "gcmVERIFY_OK(%d): function returned %d", \ + __LINE__, verifyStatus); \ + } \ + gcmASSERT(verifyStatus == gcvSTATUS_OK); \ + } while (gcvFALSE) + +# define gcmkVERIFY_OK(func) \ + do { \ + gceSTATUS verifyStatus = func; \ + if (verifyStatus != gcvSTATUS_OK) { \ + gcmkTRACE(gcvLEVEL_ERROR, \ + "gcmkVERIFY_OK(%d): function returned %d",\ + __LINE__, verifyStatus); \ + } \ + gckOS_Verify(verifyStatus); \ + gcmkASSERT(verifyStatus == gcvSTATUS_OK); \ + } while (gcvFALSE) +#else +# define gcmVERIFY_OK(func) func +# define gcmkVERIFY_OK(func) func +#endif + +gctCONST_STRING +gcoOS_DebugStatus2Name(gceSTATUS status); + +gctCONST_STRING +gckOS_DebugStatus2Name(gceSTATUS status); + +/******************************************************************************* + ** + ** gcmERR_BREAK + ** + ** Executes a break statement on error. + ** + ** ASSUMPTIONS: + ** + ** 'status' variable of gceSTATUS type must be defined. + ** + ** ARGUMENTS: + ** + ** func Function to evaluate. + */ +#define _gcmERR_BREAK(prefix, func) { \ + status = func; \ + if (gcmIS_ERROR(status)) { \ + prefix##PRINT_VERSION(); \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "ERR_BREAK: status=%d(%s) @ %s(%d)", \ + status, gcoOS_DebugStatus2Name(status), \ + __FUNCTION__, __LINE__); \ + break; \ + } \ + do { } while (gcvFALSE); \ +} + +#define _gcmkERR_BREAK(prefix, func) { \ + status = func; \ + if (gcmIS_ERROR(status)) { \ + prefix##PRINT_VERSION(); \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "ERR_BREAK: status=%d(%s) @ %s(%d)", \ + status, gckOS_DebugStatus2Name(status), \ + __FUNCTION__, __LINE__); \ + break; \ + } \ + do { } while (gcvFALSE); \ +} + +#define gcmERR_BREAK(func) _gcmERR_BREAK(gcm, func) +#define gcmkERR_BREAK(func) _gcmkERR_BREAK(gcmk, func) + +/******************************************************************************* + ** + ** gcmERR_RETURN + ** + ** Executes a return on error. + ** + ** ASSUMPTIONS: + ** + ** 'status' variable of gceSTATUS type must be defined. + ** + ** ARGUMENTS: + ** + ** func Function to evaluate. + */ +#define _gcmERR_RETURN(prefix, func) \ + do { \ + status = func; \ + if (gcmIS_ERROR(status)) { \ + prefix##PRINT_VERSION(); \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "ERR_RETURN: status=%d(%s) @ %s(%d)", \ + status, gcoOS_DebugStatus2Name(status), \ + __FUNCTION__, __LINE__); \ + prefix##FOOTER(); \ + return status; \ + } \ + } while (gcvFALSE) +#define _gcmkERR_RETURN(prefix, func) \ + do { \ + status = func; \ + if (gcmIS_ERROR(status)) { \ + prefix##PRINT_VERSION(); \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "ERR_RETURN: status=%d(%s) @ %s(%d)", \ + status, gckOS_DebugStatus2Name(status), \ + __FUNCTION__, __LINE__); \ + prefix##FOOTER(); \ + return status; \ + } \ + } while (gcvFALSE) +#define gcmERR_RETURN(func) _gcmERR_RETURN(gcm, func) +#define gcmkERR_RETURN(func) _gcmkERR_RETURN(gcmk, func) + +/******************************************************************************* + ** + ** gcmONERROR + ** + ** Jump to the error handler in case there is an error. + ** + ** ASSUMPTIONS: + ** + ** 'status' variable of gceSTATUS type must be defined. + ** + ** ARGUMENTS: + ** + ** func Function to evaluate. + */ +#define _gcmONERROR(prefix, func) \ + do { \ + status = func; \ + if (gcmIS_ERROR(status)) { \ + prefix##PRINT_VERSION(); \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "ONERROR: status=%d(%s) @ %s(%d)", \ + status, gcoOS_DebugStatus2Name(status), \ + __FUNCTION__, __LINE__); \ + goto OnError; \ + } \ + } while (gcvFALSE) +#define _gcmkONERROR(prefix, func) \ + do { \ + status = func; \ + if (gcmIS_ERROR(status)) { \ + prefix##PRINT_VERSION(); \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "ONERROR: status=%d(%s) @ %s(%d)", \ + status, gckOS_DebugStatus2Name(status), \ + __FUNCTION__, __LINE__); \ + goto OnError; \ + } \ + } while (gcvFALSE) + +/* Ignore the debug info when the specific error occurs. */ +#define _gcmkONERROR_EX(prefix, func, error) \ + do { \ + status = func; \ + if (gcmIS_ERROR(status)) { \ + if (status != (error)) { \ + prefix##PRINT_VERSION(); \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "ONERROR: status=%d(%s) @ %s(%d)", \ + status, gckOS_DebugStatus2Name(status), \ + __FUNCTION__, __LINE__); \ + } \ + goto OnError; \ + } \ + } while (gcvFALSE) + +#define gcmONERROR(func) _gcmONERROR(gcm, func) +#define gcmkONERROR(func) _gcmkONERROR(gcmk, func) +#define gcmkONERROR_EX(func, error) _gcmkONERROR_EX(gcmk, func, error) + +#define gcmGET_INDEX_SIZE(type, size) \ + switch (type) \ + { \ + case gcvINDEX_8: \ + size = 1; \ + break; \ + case gcvINDEX_16: \ + size = 2; \ + break; \ + case gcvINDEX_32: \ + size = 4; \ + break; \ + default: \ + gcmONERROR(gcvSTATUS_INVALID_ARGUMENT); \ + } \ + +/******************************************************************************* + ** + ** gcmkSAFECASTSIZET + ** + ** Check whether value of a gctSIZE_T variable beyond the capability + ** of 32bits GPU hardware. + ** + ** ASSUMPTIONS: + ** + ** + ** + ** ARGUMENTS: + ** + ** x A gctUINT32 variable + ** y A gctSIZE_T variable + */ +#define gcmkSAFECASTSIZET(x, y) \ + do { \ + gctUINT32 tmp = (gctUINT32)(y); \ + if (gcmSIZEOF(gctSIZE_T) > gcmSIZEOF(gctUINT32)) { \ + gcmkASSERT(tmp <= gcvMAXUINT32); \ + } \ + (x) = tmp; \ + } while (gcvFALSE) + +#define gcmSAFECASTSIZET(x, y) \ + do { \ + gctUINT32 tmp = (gctUINT32)(y); \ + if (gcmSIZEOF(gctSIZE_T) > gcmSIZEOF(gctUINT32)) { \ + gcmASSERT(tmp <= gcvMAXUINT32); \ + } \ + (x) = tmp; \ + } while (gcvFALSE) + +/******************************************************************************* + ** + ** gcmkSAFECASTPHYSADDRT + ** + ** Check whether value of a gctPHYS_ADDR_T variable beyond the capability + ** of 32bits GPU hardware. + ** + ** ASSUMPTIONS: + ** + ** + ** + ** ARGUMENTS: + ** + ** x A gctUINT32 variable + ** y A gctPHYS_ADDR_T variable + */ +#define gcmkSAFECASTPHYSADDRT(x, y) \ + do { \ + gctUINT32 tmp = (gctUINT32)(y); \ + if (gcmSIZEOF(gctPHYS_ADDR_T) > gcmSIZEOF(gctUINT32)) { \ + gcmkASSERT(tmp <= gcvMAXUINT32); \ + } \ + (x) = tmp; \ + } while (gcvFALSE) + +/******************************************************************************* + ** + ** gcmSAFECASTPHYSADDRT + ** + ** Check whether value of a gctPHYS_ADDR_T variable beyond the capability + ** of 32bits GPU hardware. + ** + ** ASSUMPTIONS: + ** + ** + ** + ** ARGUMENTS: + ** + ** x A gctUINT32 variable + ** y A gctPHYS_ADDR_T variable + */ +#define gcmSAFECASTPHYSADDRT(x, y) \ + do { \ + gctUINT32 tmp = (gctUINT32)(y); \ + if (gcmSIZEOF(gctPHYS_ADDR_T) > gcmSIZEOF(gctUINT32)) { \ + gcmASSERT(tmp <= gcvMAXUINT32); \ + } \ + (x) = tmp; \ + } while (gcvFALSE) + +/******************************************************************************* + ** + ** gcmkSAFECASTVA + ** + ** Check whether value of a gctADDRESS variable beyond the capability + ** of 32bits GPU hardware. + ** + ** ASSUMPTIONS: + ** + ** + ** + ** ARGUMENTS: + ** + ** x A gctUINT32 variable + ** y A gctADDRESS variable + */ +#define gcmkSAFECASTVA(x, y) \ + do { \ + gctUINT32 tmp = (gctUINT32)(y); \ + if (gcmSIZEOF(gctADDRESS) > gcmSIZEOF(gctUINT32)) { \ + gcmkASSERT(tmp <= gcvMAXUINT32); \ + } \ + (x) = tmp; \ + } while (gcvFALSE) + +/******************************************************************************* + ** + ** gcmSAFECASTVA + ** + ** Check whether value of a gctADDRESS variable beyond the capability + ** of 32bits GPU hardware. + ** + ** ASSUMPTIONS: + ** + ** + ** + ** ARGUMENTS: + ** + ** x A gctUINT32 variable + ** y A gctADDRESS variable + */ +#define gcmSAFECASTVA(x, y) \ + do { \ + gctUINT32 tmp = (gctUINT32)(y); \ + if (gcmSIZEOF(gctADDRESS) > gcmSIZEOF(gctUINT32)) { \ + gcmASSERT(tmp <= gcvMAXUINT32); \ + } \ + (x) = tmp; \ + } while (gcvFALSE) + +/******************************************************************************* + ** + ** gcmVERIFY_LOCK + ** + ** Verifies whether the surface is locked. + ** + ** ARGUMENTS: + ** + ** surfaceInfo Pointer to the surface iniformational structure. + */ +#define gcmVERIFY_LOCK(surfaceInfo) \ + if (!surfaceInfo->node.valid) \ + { \ + gcmONERROR(gcvSTATUS_MEMORY_UNLOCKED); \ + } \ + +/******************************************************************************* + ** + ** gcmVERIFY_NODE_LOCK + ** + ** Verifies whether the surface node is locked. + ** + ** ARGUMENTS: + ** + ** surfaceInfo Pointer to the surface iniformational structure. + */ +#define gcmVERIFY_NODE_LOCK(surfaceNode) \ + if (!(surfaceNode)->valid) \ + { \ + status = gcvSTATUS_MEMORY_UNLOCKED; \ + break; \ + } \ + do { } while (gcvFALSE) + +/******************************************************************************* + ** + ** gcmBADOBJECT_BREAK + ** + ** Executes a break statement on bad object. + ** + ** ARGUMENTS: + ** + ** obj Object to test. + ** t Expected type of the object. + */ +#define gcmBADOBJECT_BREAK(obj, t) \ + if ((obj == gcvNULL) \ + || (((gcsOBJECT *)(obj))->type != t) \ + ) \ + { \ + status = gcvSTATUS_INVALID_OBJECT; \ + break; \ + } \ + do { } while (gcvFALSE) + +/******************************************************************************* + ** + ** gcmCHECK_STATUS + ** + ** Executes a break statement on error. + ** + ** ASSUMPTIONS: + ** + ** 'status' variable of gceSTATUS type must be defined. + ** + ** ARGUMENTS: + ** + ** func Function to evaluate. + */ +#define _gcmCHECK_STATUS(prefix, func) \ + do { \ + last = func; \ + if (gcmIS_ERROR(last)) { \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "CHECK_STATUS: status=%d(%s) @ %s(%d)", \ + last, gcoOS_DebugStatus2Name(last), \ + __FUNCTION__, __LINE__); \ + status = last; \ + } \ + } while (gcvFALSE) +#define _gcmkCHECK_STATUS(prefix, func) \ + do { \ + last = func; \ + if (gcmIS_ERROR(last)) { \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "CHECK_STATUS: status=%d(%s) @ %s(%d)", \ + last, gckOS_DebugStatus2Name(last), \ + __FUNCTION__, __LINE__); \ + status = last; \ + } \ + } while (gcvFALSE) +#define gcmCHECK_STATUS(func) _gcmCHECK_STATUS(gcm, func) +#define gcmkCHECK_STATUS(func) _gcmkCHECK_STATUS(gcmk, func) + +/******************************************************************************* + ** + ** gcmVERIFY_ARGUMENT + ** + ** Assert if an argument does not apply to the specified expression. If + ** the argument evaluates to false, gcvSTATUS_INVALID_ARGUMENT will be + ** returned from the current function. In retail mode this macro does + ** nothing. + ** + ** ARGUMENTS: + ** + ** arg Argument to evaluate. + */ +#define _gcmVERIFY_ARGUMENT(prefix, arg) \ + do \ + { \ + if (!(arg)) \ + { \ + prefix##TRACE(gcvLEVEL_ERROR, #prefix "VERIFY_ARGUMENT failed:"); \ + prefix##ASSERT(arg); \ + prefix##FOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT); \ + return gcvSTATUS_INVALID_ARGUMENT; \ + } \ + } \ + while (gcvFALSE) +#define gcmVERIFY_ARGUMENT(arg) _gcmVERIFY_ARGUMENT(gcm, arg) +#define gcmkVERIFY_ARGUMENT(arg) _gcmVERIFY_ARGUMENT(gcmk, arg) + +/******************************************************************************* + ** + ** gcmDEBUG_VERIFY_ARGUMENT + ** + ** Works just like gcmVERIFY_ARGUMENT, but is only valid in debug mode. + ** Use this to verify arguments inside non-public API functions. + */ +#if gcdDEBUG +# define gcmDEBUG_VERIFY_ARGUMENT(arg) _gcmVERIFY_ARGUMENT(gcm, arg) +# define gcmkDEBUG_VERIFY_ARGUMENT(arg) _gcmkVERIFY_ARGUMENT(gcm, arg) +#else +# define gcmDEBUG_VERIFY_ARGUMENT(arg) +# define gcmkDEBUG_VERIFY_ARGUMENT(arg) +#endif + +/******************************************************************************* + ** + ** _gcmCHECK_ADD_OVERFLOW + */ +#define _gcmCHECK_ADD_OVERFLOW(x, y) \ +(\ + ((x) > 0 && (y) > 0 && gcvMAXSIZE_T - (x) < (y)) ? gcvSTATUS_RESLUT_OVERFLOW : gcvSTATUS_OK \ +) + +#define gcmCHECK_ADD_OVERFLOW(x, y) _gcmCHECK_ADD_OVERFLOW(x, y) +#define gcmkCHECK_ADD_OVERFLOW(x, y) _gcmCHECK_ADD_OVERFLOW(x, y) + +#define MAX_LOOP_COUNT 0x7FFFFFFF + +/****************************************************************************** + ****************************** User Debug Option ***************************** + ******************************************************************************/ + +typedef struct _gcsUSER_DEBUG_OPTION { + gceDEBUG_MSG debugMsg; +} gcsUSER_DEBUG_OPTION; + +gcsUSER_DEBUG_OPTION * +gcoHAL_GetUserDebugOption(void); + +#if gcdHAS_ELLIPSIS +# define gcmUSER_DEBUG_MSG(level, ...) \ + do \ + { \ + if (level <= gcoHAL_GetUserDebugOption()->debugMsg) \ + { \ + gcoOS_Print(__VA_ARGS__); \ + } \ + } while (gcvFALSE) + +# define gcmUSER_DEBUG_ERROR_MSG(...) gcmUSER_DEBUG_MSG(gcvDEBUG_MSG_ERROR, "Error: " __VA_ARGS__) +# define gcmUSER_DEBUG_WARNING_MSG(...) gcmUSER_DEBUG_MSG(gcvDEBUG_MSG_WARNING, "Warring: " __VA_ARGS__) +#else +# define gcmUSER_DEBUG_MSG +# define gcmUSER_DEBUG_ERROR_MSG +# define gcmUSER_DEBUG_WARNING_MSG +#endif + +/******************************************************************************* + ** + ** A set of macros to aid state loading. + ** + ** ARGUMENTS: + ** + ** CommandBuffer Pointer to a gcoCMDBUF object. + ** StateDelta Pointer to a gcsSTATE_DELTA state delta structure. + ** Memory Destination memory pointer of gctUINT32_PTR type. + ** PartOfContext Whether or not the state is a part of the context. + ** FixedPoint Whether or not the state is of the fixed point format. + ** Count Number of consecutive states to be loaded. + ** Address State address. + ** Data Data to be set to the state. + */ + +/*----------------------------------------------------------------------------*/ + +#if gcmIS_DEBUG(gcdDEBUG_CODE) + +# define gcmSTORELOADSTATE(CommandBuffer, Memory, Address, Count) \ + CommandBuffer->lastLoadStatePtr = gcmPTR_TO_UINT64(Memory); \ + CommandBuffer->lastLoadStateAddress = Address; \ + CommandBuffer->lastLoadStateCount = Count + +# define gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address) \ + gcmASSERT(\ + (gctUINT) (Memory - gcmUINT64_TO_TYPE(CommandBuffer->lastLoadStatePtr, gctUINT32_PTR) - 1) \ + == \ + (gctUINT) (Address - CommandBuffer->lastLoadStateAddress) \ + ); \ + \ + gcmASSERT(CommandBuffer->lastLoadStateCount > 0); \ + \ + CommandBuffer->lastLoadStateCount -= 1 + +# define gcmVERIFYLOADSTATEDONE(CommandBuffer) \ + gcmASSERT(CommandBuffer->lastLoadStateCount == 0); + +# define gcmDEFINELOADSTATEBASE() \ + gctUINT32_PTR LoadStateBase; + +# define gcmSETLOADSTATEBASE(CommandBuffer, OutSide) \ + if (OutSide) \ + {\ + LoadStateBase = (gctUINT32_PTR)*OutSide; \ + }\ + else\ + {\ + LoadStateBase = (gctUINT_PTR)CommandBuffer->buffer;\ + } + + +# define gcmVERIFYLOADSTATEALIGNED(CommandBuffer, Memory) \ + gcmASSERT(((Memory - LoadStateBase) & 1) == 0); + +# define gcmUNSETLOADSTATEBASE() \ + LoadStateBase = LoadStateBase; + +#else + +# define gcmSTORELOADSTATE(CommandBuffer, Memory, Address, Count) +# define gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address) +# define gcmVERIFYLOADSTATEDONE(CommandBuffer) + +# define gcmDEFINELOADSTATEBASE() +# define gcmSETLOADSTATEBASE(CommandBuffer, OutSide) +# define gcmVERIFYLOADSTATEALIGNED(CommandBuffer, Memory) +# define gcmUNSETLOADSTATEBASE() + +#endif + +/*----------------------------------------------------------------------------*/ + +#define gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, Data) + +#define gcmDEFINESTATEBUFFER(CommandBuffer, StateDelta, Memory, ReserveSize) \ + gctSIZE_T ReserveSize; \ + gcoCMDBUF CommandBuffer; \ + gctUINT32_PTR Memory; \ + gcsSTATE_DELTA_PTR StateDelta; \ + gceENGINE CurrentEngine = gcvENGINE_RENDER + +#define gcmBEGINSTATEBUFFER(Hardware, CommandBuffer, StateDelta, Memory, ReserveSize) \ + { \ + gcmONERROR(gcoBUFFER_Reserve(Hardware->engine[CurrentEngine].buffer, \ + ReserveSize, gcvTRUE, \ + gcvCOMMAND_3D, &CommandBuffer)); \ + \ + Memory = (gctUINT32_PTR)gcmUINT64_TO_PTR(CommandBuffer->lastReserve); \ + \ + StateDelta = Hardware->delta; \ + } + +#define gcmENDSTATEBUFFER(Hardware, CommandBuffer, Memory, ReserveSize) \ + { \ + gcmASSERT(gcmUINT64_TO_TYPE(CommandBuffer->lastReserve, gctUINT8_PTR) + ReserveSize \ + == (gctUINT8_PTR)Memory); \ + } + +/*----------------------------------------------------------------------------*/ + +#define gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, Count) \ + { \ + gcmASSERT(((Memory - gcmUINT64_TO_TYPE(CommandBuffer->lastReserve, gctUINT32_PTR)) & 1) \ + == 0); \ + gcmASSERT((gctUINT32)Count <= 1024); \ + \ + gcmVERIFYLOADSTATEDONE(CommandBuffer); \ + \ + gcmSTORELOADSTATE(CommandBuffer, Memory, Address, Count); \ + \ + *Memory++ = gcmSETFIELDVALUE(0, AQ_COMMAND_LOAD_STATE_COMMAND, OPCODE, LOAD_STATE) | \ + gcmSETFIELD(0, AQ_COMMAND_LOAD_STATE_COMMAND, FLOAT, FixedPoint) | \ + gcmSETFIELD(0, AQ_COMMAND_LOAD_STATE_COMMAND, COUNT, Count) | \ + gcmSETFIELD(0, AQ_COMMAND_LOAD_STATE_COMMAND, ADDRESS, Address); \ + } + +#define gcmENDSTATEBATCH(CommandBuffer, Memory) \ + { \ + gcmVERIFYLOADSTATEDONE(CommandBuffer); \ + \ + gcmASSERT(((Memory - gcmUINT64_TO_TYPE(CommandBuffer->lastReserve, gctUINT32_PTR)) & 1) \ + == 0); \ + } + +/*----------------------------------------------------------------------------*/ + +#define gcmSETSTATEDATA(StateDelta, CommandBuffer, Memory, \ + FixedPoint, Address, Data) \ + { \ + gctUINT32 __temp_data32__; \ + \ + gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address); \ + \ + gcmSAFECASTSIZET(__temp_data32__, Data); \ + \ + *Memory++ = __temp_data32__; \ + \ + gcoHARDWARE_UpdateDelta(StateDelta, Address, \ + 0, __temp_data32__); \ + \ + gcmDUMPSTATEDATA(StateDelta, FixedPoint, \ + Address, __temp_data32__); \ + } + +#define gcmSETSTATEDATAWITHMASK(StateDelta, CommandBuffer, Memory, \ + FixedPoint, Address, Mask, Data) \ + { \ + gctUINT32 __temp_data32__; \ + \ + gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address); \ + \ + gcmSAFECASTSIZET(__temp_data32__, Data); \ + \ + *Memory++ = __temp_data32__; \ + \ + gcoHARDWARE_UpdateDelta(StateDelta, Address, \ + Mask, __temp_data32__); \ + \ + gcmDUMPSTATEDATA(StateDelta, FixedPoint, \ + Address, __temp_data32__); \ + } + +#define gcmSETCTRLSTATE(StateDelta, CommandBuffer, \ + Memory, Address, Data) \ + { \ + gctUINT32 __temp_data32__; \ + \ + gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address); \ + \ + gcmSAFECASTSIZET(__temp_data32__, Data); \ + \ + *Memory++ = __temp_data32__; \ + \ + gcmDUMPSTATEDATA(StateDelta, gcvFALSE, \ + Address, __temp_data32__); \ + } + +#define gcmSETFILLER(CommandBuffer, Memory) \ + { \ + gcmVERIFYLOADSTATEDONE(CommandBuffer); \ + \ + *(gctUINT32_PTR)Memory = 0x18000000; \ + Memory += 1; \ + } + +/*----------------------------------------------------------------------------*/ + +#define gcmSETSINGLESTATE(StateDelta, CommandBuffer, \ + Memory, FixedPoint, Address, Data) \ + { \ + gcmBEGINSTATEBATCH(CommandBuffer, Memory, \ + FixedPoint, Address, 1); \ + gcmSETSTATEDATA(StateDelta, CommandBuffer, Memory, \ + FixedPoint, Address, Data); \ + gcmENDSTATEBATCH(CommandBuffer, Memory); \ + } + +#define gcmSETSINGLESTATEWITHMASK(StateDelta, CommandBuffer, Memory,\ + FixedPoint, Address, Mask, Data) \ + \ + { \ + gcmBEGINSTATEBATCH(CommandBuffer, Memory, \ + FixedPoint, Address, 1); \ + gcmSETSTATEDATAWITHMASK(StateDelta, CommandBuffer, Memory, \ + FixedPoint, Address, Mask, Data); \ + \ + gcmENDSTATEBATCH(CommandBuffer, Memory); \ + } + +#define gcmSETSINGLECTRLSTATE(StateDelta, CommandBuffer, \ + Memory, FixedPoint, Address, Data) \ + { \ + gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, 1); \ + gcmSETCTRLSTATE(StateDelta, CommandBuffer, Memory, Address, Data); \ + gcmENDSTATEBATCH(CommandBuffer, Memory); \ + } + +#define gcmSETSEMASTALLPIPE(StateDelta, CommandBuffer, Memory, Data)\ + { \ + gcmSETSINGLECTRLSTATE(StateDelta, CommandBuffer, Memory, \ + gcvFALSE, 0x0E02, Data); \ + \ + *Memory++ = gcmSETFIELDVALUE(0, STALL_COMMAND, OPCODE, STALL); \ + \ + *Memory++ = Data; \ + \ + gcmDUMP(gcvNULL, "#[stall 0x%08X 0x%08X]", \ + gcmSETFIELDVALUE(0, AQ_SEMAPHORE, SOURCE, FRONT_END), \ + gcmSETFIELDVALUE(0, AQ_SEMAPHORE, DESTINATION, PIXEL_ENGINE)); \ + } + +/******************************************************************************* + ** + ** gcmSETSTARTDECOMMAND + ** + ** Form a START_DE command. + ** + ** ARGUMENTS: + ** + ** Memory Destination memory pointer of gctUINT32_PTR type. + ** Count Number of the rectangles. + */ + +#define gcmSETSTARTDECOMMAND(Memory, Count) \ + { \ + *Memory++ = gcmSETFIELDVALUE(0, AQ_COMMAND_START_DE_COMMAND, OPCODE, START_DE) | \ + gcmSETFIELD(0, AQ_COMMAND_START_DE_COMMAND, COUNT, Count) | \ + gcmSETFIELD(0, AQ_COMMAND_START_DE_COMMAND, DATA_COUNT, 0); \ + \ + *Memory++ = 0xDEADDEED; \ + } + +/***************************************** + ** Temp command buffer macro + */ +#define gcmDEFINESTATEBUFFER_NEW(CommandBuffer, StateDelta, Memory) \ + gcmDEFINELOADSTATEBASE() \ + gcsTEMPCMDBUF CommandBuffer = gcvNULL; \ + gctUINT32_PTR Memory; \ + gcsSTATE_DELTA_PTR StateDelta; \ + gceENGINE CurrentEngine = gcvENGINE_RENDER + +#define gcmBEGINSTATEBUFFER_NEW(Hardware, CommandBuffer, \ + StateDelta, Memory, OutSide) \ + { \ + if (OutSide) { \ + Memory = (gctUINT32_PTR)*OutSide; \ + } else { \ + gcmONERROR(gcoBUFFER_StartTEMPCMDBUF(Hardware->engine[CurrentEngine].buffer, \ + Hardware->engine[CurrentEngine].queue, \ + &CommandBuffer)); \ + \ + Memory = (gctUINT32_PTR)(CommandBuffer->buffer); \ + } \ + StateDelta = Hardware->tempDelta; \ + \ + gcmSETLOADSTATEBASE(CommandBuffer, OutSide); \ + } + +#define gcmENDSTATEBUFFER_NEW(Hardware, CommandBuffer, Memory, OutSide) \ + { \ + if (OutSide) { \ + *OutSide = Memory; \ + } else { \ + CommandBuffer->currentByteSize = \ + (gctUINT32)((gctUINT8_PTR)Memory - (gctUINT8_PTR)CommandBuffer->buffer); \ + \ + gcmONERROR(gcoBUFFER_EndTEMPCMDBUF(\ + Hardware->engine[CurrentEngine].buffer, gcvFALSE)); \ + if (Hardware->constructType != gcvHARDWARE_2D) { \ + gcoHARDWARE_UpdateTempDelta(Hardware); \ + } \ + } \ + gcmUNSETLOADSTATEBASE() \ + } + +#define gcmDEFINECTRLSTATEBUFFER(CommandBuffer, Memory) \ + gcmDEFINELOADSTATEBASE() \ + gcsTEMPCMDBUF CommandBuffer = gcvNULL; \ + gctUINT32_PTR Memory; \ + gceENGINE CurrentEngine = gcvENGINE_RENDER + +#define gcmBEGINCTRLSTATEBUFFER(Hardware, CommandBuffer, Memory, OutSide) \ +{ \ + if (OutSide) \ + { \ + Memory = (gctUINT32_PTR)*OutSide; \ + } \ + else \ + { \ + gcmONERROR(gcoBUFFER_StartTEMPCMDBUF(\ + Hardware->engine[CurrentEngine].buffer, \ + Hardware->engine[CurrentEngine].queue, &CommandBuffer \ + )); \ + \ + Memory = (gctUINT32_PTR)(CommandBuffer->buffer); \ + } \ + gcmSETLOADSTATEBASE(CommandBuffer,OutSide); \ +} + +/*----------------------------------------------------------------------------*/ + +#define gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, \ + FixedPoint, Address, Count) \ + { \ + gcmVERIFYLOADSTATEALIGNED(CommandBuffer, Memory); \ + gcmASSERT((gctUINT32)Count <= 1024); \ + \ + *Memory++ = gcmSETFIELDVALUE(0, AQ_COMMAND_LOAD_STATE_COMMAND, OPCODE, LOAD_STATE) | \ + gcmSETFIELD(0, AQ_COMMAND_LOAD_STATE_COMMAND, FLOAT, FixedPoint) | \ + gcmSETFIELD(0, AQ_COMMAND_LOAD_STATE_COMMAND, COUNT, Count) | \ + gcmSETFIELD(0, AQ_COMMAND_LOAD_STATE_COMMAND, ADDRESS, Address); \ + } + +#define gcmENDSTATEBATCH_NEW(CommandBuffer, Memory) \ + gcmVERIFYLOADSTATEALIGNED(CommandBuffer, Memory) + +/*----------------------------------------------------------------------------*/ + +#define gcmSETSTATEDATA_NEW(StateDelta, CommandBuffer, Memory, \ + FixedPoint, Address, Data) \ + { \ + gctUINT32 __temp_data32__; \ + \ + gcmSAFECASTSIZET(__temp_data32__, Data); \ + \ + *Memory++ = __temp_data32__; \ + \ + gcoHARDWARE_UpdateDelta(StateDelta, Address, 0, __temp_data32__); \ + \ + gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \ + } + +#define gcmSETSTATEDATAWITHMASK_NEW(StateDelta, CommandBuffer, Memory, \ + FixedPoint, Address, Mask, Data) \ + { \ + gctUINT32 __temp_data32__; \ + \ + gcmSAFECASTSIZET(__temp_data32__, Data); \ + \ + *Memory++ = __temp_data32__; \ + \ + gcoHARDWARE_UpdateDelta(StateDelta, Address, Mask, __temp_data32__);\ + \ + gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \ + } + +#define gcmSETCTRLSTATE_NEW(StateDelta, CommandBuffer, Memory, Address, Data) \ + { \ + gctUINT32 __temp_data32__; \ + \ + gcmSAFECASTSIZET(__temp_data32__, Data); \ + \ + *Memory++ = __temp_data32__; \ + \ + gcmDUMPSTATEDATA(StateDelta, gcvFALSE, Address, __temp_data32__); \ + } + +#define gcmSETFILLER_NEW(CommandBuffer, Memory) \ + { \ + *(gctUINT32_PTR)Memory = 0x18000000; \ + Memory += 1; \ + } + +/*----------------------------------------------------------------------------*/ + +#define gcmSETSINGLESTATE_NEW(StateDelta, CommandBuffer, Memory, \ + FixedPoint, Address, Data) \ + { \ + gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, \ + FixedPoint, Address, 1); \ + gcmSETSTATEDATA_NEW(StateDelta, CommandBuffer, Memory, \ + FixedPoint, Address, Data); \ + gcmENDSTATEBATCH_NEW(CommandBuffer, Memory); \ + } + +#define gcmSETSINGLESTATEWITHMASK_NEW(StateDelta, CommandBuffer, Memory, \ + FixedPoint, Address, Mask, Data) \ + { \ + gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, \ + FixedPoint, Address, 1); \ + gcmSETSTATEDATAWITHMASK_NEW(StateDelta, CommandBuffer, Memory, \ + FixedPoint, Address, Mask, Data); \ + gcmENDSTATEBATCH_NEW(CommandBuffer, Memory); \ + } + +#define gcmSETSINGLECTRLSTATE_NEW(StateDelta, CommandBuffer, Memory, \ + FixedPoint, Address, Data) \ + { \ + gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, \ + FixedPoint, Address, 1); \ + gcmSETCTRLSTATE_NEW(StateDelta, CommandBuffer, \ + Memory, Address, Data); \ + gcmENDSTATEBATCH_NEW(CommandBuffer, Memory); \ + } + +#define gcmSETBLOCKCTRLSTATE_NEW(StateDelta, CommandBuffer, Memory, \ + FixedPoint, Address, Data, Count) \ + { \ + gctUINT32 c; \ + gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, \ + FixedPoint, Address, Count); \ + for (c = 0; c < Count; c++) { \ + gcmSETCTRLSTATE_NEW(StateDelta, CommandBuffer, \ + Memory, Address, Data); \ + } \ + gcmENDSTATEBATCH_NEW(CommandBuffer, Memory); \ + } + +#define gcmSETCTRLSTATES_NEW(StateDelta, CommandBuffer, Memory, \ + FixedPoint, Address, Data, Count) \ + { \ + gctUINT32 c; \ + gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, \ + FixedPoint, Address, Count); \ + for (c = 0; c < Count; c++) { \ + gcmSETCTRLSTATE_NEW(StateDelta, CommandBuffer, \ + Memory, Address, Data[c]); \ + } \ + gcmENDSTATEBATCH_NEW(CommandBuffer, Memory); \ + } + +#define gcmSETSEMASTALLPIPE_NEW(StateDelta, CommandBuffer, Memory, Data)\ + { \ + gcmSETSINGLECTRLSTATE_NEW(StateDelta, CommandBuffer, Memory, \ + gcvFALSE, 0x0E02, Data); \ + \ + *Memory++ = gcmSETFIELDVALUE(0, STALL_COMMAND, OPCODE, STALL); \ + \ + *Memory++ = Data; \ + \ + gcmDUMP(gcvNULL, "#[stall 0x%08X 0x%08X]", \ + gcmSETFIELDVALUE(0, AQ_SEMAPHORE, SOURCE, FRONT_END), \ + gcmSETFIELDVALUE(0, AQ_SEMAPHORE, DESTINATION, PIXEL_ENGINE)); \ + } + +#define gcmSETSTATEDATA_NEW_FAST(StateDelta, CommandBuffer, Memory, \ + FixedPoint, Address, Data) \ + { \ + gctUINT32 __temp_data32__; \ + \ + gcmSAFECASTSIZET(__temp_data32__, Data); \ + \ + *Memory++ = __temp_data32__; \ + \ + gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \ + } + +#define gcmSETSTATEDATAWITHMASK_NEW_FAST(StateDelta, CommandBuffer, Memory, \ + FixedPoint, Address, Mask, Data) \ + { \ + gctUINT32 __temp_data32__; \ + \ + __temp_data32__ = Data; \ + \ + *Memory++ = __temp_data32__; \ + \ + gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \ + } + +#define gcmSETSINGLESTATE_NEW_FAST(StateDelta, CommandBuffer, Memory, \ + FixedPoint, Address, Data) \ + { \ + gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, \ + FixedPoint, Address, 1); \ + gcmSETSTATEDATA_NEW_FAST(StateDelta, CommandBuffer, Memory, \ + FixedPoint, Address, Data); \ + gcmENDSTATEBATCH_NEW(CommandBuffer, Memory); \ + } + +#define gcmSETSINGLESTATEWITHMASK_NEW_FAST(StateDelta, CommandBuffer, Memory, \ + FixedPoint, Address, Mask, Data) \ + { \ + gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, FixedPoint, Address, 1); \ + gcmSETSTATEDATAWITHMASK_NEW_FAST(StateDelta, CommandBuffer, Memory, \ + FixedPoint, Address, Mask, Data); \ + gcmENDSTATEBATCH_NEW(CommandBuffer, Memory); \ + } + +#define gcmDEFINESTATEBUFFER_NEW_FAST(CommandBuffer, Memory) \ + gcmDEFINELOADSTATEBASE() \ + gcsTEMPCMDBUF CommandBuffer = gcvNULL; \ + gctUINT32_PTR Memory; + +#define gcmBEGINSTATEBUFFER_NEW_FAST(Hardware, CommandBuffer, Memory, OutSide) \ +{ \ + if (OutSide) { \ + Memory = (gctUINT32_PTR)*OutSide; \ + } else { \ + gcmONERROR(gcoBUFFER_StartTEMPCMDBUF(Hardware->engine[gcvENGINE_RENDER].buffer, \ + Hardware->engine[gcvENGINE_RENDER].queue, \ + &CommandBuffer)); \ + \ + Memory = (gctUINT32_PTR)(CommandBuffer->buffer); \ + \ + } \ + \ + gcmSETLOADSTATEBASE(CommandBuffer,OutSide); \ +} + +#define gcmENDSTATEBUFFER_NEW_FAST(Hardware, CommandBuffer, Memory, OutSide) \ + { \ + if (OutSide) { \ + *OutSide = Memory; \ + } else { \ + CommandBuffer->currentByteSize = \ + (gctUINT32)((gctUINT8_PTR)Memory - (gctUINT8_PTR)CommandBuffer->buffer); \ + \ + gcmONERROR(gcoBUFFER_EndTEMPCMDBUF(\ + Hardware->engine[gcvENGINE_RENDER].buffer, gcvFALSE)); \ + } \ + gcmUNSETLOADSTATEBASE() \ + } + +/******************************************************************************* + ** + ** gcmCONFIGUREUNIFORMS + ** + ** Configure uniforms according to chip and numConstants. + */ +#if !gcdENABLE_UNIFIED_CONSTANT +# define gcmCONFIGUREUNIFORMS(ChipModel, ChipRevision, NumConstants, \ + UnifiedConst, VsConstBase, PsConstBase, \ + VsConstMax, PsConstMax, ConstMax) \ + { \ + if (ChipModel == gcv2000 && \ + (ChipRevision == 0x5118 || ChipRevision == 0x5140)) { \ + UnifiedConst = gcvFALSE; \ + VsConstBase = 0x1400; \ + PsConstBase = 0x1C00; \ + VsConstMax = 256; \ + PsConstMax = 64; \ + ConstMax = 320; \ + } else if (NumConstants == 320) { \ + UnifiedConst = gcvFALSE; \ + VsConstBase = 0x1400; \ + PsConstBase = 0x1C00; \ + VsConstMax = 256; \ + PsConstMax = 64; \ + ConstMax = 320; \ + } /* All GC1000 series chips can only support 64 uniforms for ps on non-unified const mode. */ \ + else if (NumConstants > 256 && ChipModel == gcv1000) { \ + UnifiedConst = gcvFALSE; \ + VsConstBase = 0x1400; \ + PsConstBase = 0x1C00; \ + VsConstMax = 256; \ + PsConstMax = 64; \ + ConstMax = 320; \ + } else if (NumConstants > 256) { \ + UnifiedConst = gcvFALSE; \ + VsConstBase = 0x1400; \ + PsConstBase = 0x1C00; \ + VsConstMax = 256; \ + PsConstMax = 256; \ + ConstMax = 512; \ + } else if (NumConstants == 256) { \ + UnifiedConst = gcvFALSE; \ + VsConstBase = 0x1400; \ + PsConstBase = 0x1C00; \ + VsConstMax = 256; \ + PsConstMax = 256; \ + ConstMax = 512; \ + } else { \ + UnifiedConst = gcvFALSE; \ + VsConstBase = 0x1400; \ + PsConstBase = 0x1C00; \ + VsConstMax = 168; \ + PsConstMax = 64; \ + ConstMax = 232; \ + } \ + } +#else +# define gcmCONFIGUREUNIFORMS(ChipModel, ChipRevision, Halti5Avail, \ + SmallBatch, ComputeOnly, NumConstants, \ + UnifiedConst, VsConstBase, PsConstBase, \ + VsConstMax, PsConstMax, ConstMax) \ + { \ + if (NumConstants > 256) { \ + UnifiedConst = gcvTRUE; \ + if (SmallBatch) { \ + VsConstBase = 0xD000; \ + PsConstBase = 0xD000; \ + } else if (Halti5Avail) { \ + VsConstBase = 0xD000; \ + PsConstBase = 0xD800; \ + } else { \ + VsConstBase = 0xC000; \ + PsConstBase = 0xC000; \ + } \ + if ((ChipModel == gcv880) && \ + ((ChipRevision & 0xfff0) == 0x5120)) { \ + VsConstMax = 512; \ + PsConstMax = 64; \ + ConstMax = 576; \ + } else { \ + VsConstMax = gcmMIN(512, NumConstants - 64); \ + PsConstMax = gcmMIN(512, NumConstants - 64); \ + ConstMax = NumConstants; \ + } \ + } else if (NumConstants == 256) { \ + if (ChipModel == gcv2000 && \ + (ChipRevision == 0x5118 || ChipRevision == 0x5140)) { \ + UnifiedConst = gcvFALSE; \ + VsConstBase = 0x1400; \ + PsConstBase = 0x1C00; \ + VsConstMax = 256; \ + PsConstMax = 64; \ + ConstMax = 320; \ + } else { \ + UnifiedConst = gcvFALSE; \ + VsConstBase = 0x1400; \ + PsConstBase = 0x1C00; \ + VsConstMax = 256; \ + PsConstMax = 256; \ + ConstMax = 512; \ + } \ + } else if (NumConstants == 160 && ComputeOnly) { \ + UnifiedConst = gcvTRUE; \ + VsConstBase = 0xD000; \ + PsConstBase = 0xD800; \ + VsConstMax = 0; \ + PsConstMax = 160; \ + ConstMax = 160; \ + } else { \ + UnifiedConst = gcvFALSE; \ + VsConstBase = 0x1400; \ + PsConstBase = 0x1C00; \ + VsConstMax = 168; \ + PsConstMax = 64; \ + ConstMax = 232; \ + } \ + } +#endif + +/******************************************************************************* + ** + ** gcmCONFIGUREUNIFORMS2 + ** only fix clang build error + ** + ** Configure uniforms according to chip and numConstants. + */ +#if !gcdENABLE_UNIFIED_CONSTANT +#define gcmCONFIGUREUNIFORMS2(ChipModel, ChipRevision, NumConstants, \ + UnifiedConst, VsConstMax, PsConstMax) \ +{ \ + if (ChipModel == gcv2000 && (ChipRevision == 0x5118 || ChipRevision == 0x5140)) \ + { \ + UnifiedConst = gcvFALSE; \ + VsConstMax = 256; \ + PsConstMax = 64; \ + } \ + else if (NumConstants == 320) \ + { \ + UnifiedConst = gcvFALSE; \ + VsConstMax = 256; \ + PsConstMax = 64; \ + } \ + /* All GC1000 series chips can only support 64 uniforms for ps on non-unified const mode. */ \ + else if (NumConstants > 256 && ChipModel == gcv1000) \ + { \ + UnifiedConst = gcvFALSE; \ + VsConstMax = 256; \ + PsConstMax = 64; \ + } \ + else if (NumConstants > 256) \ + { \ + UnifiedConst = gcvFALSE; \ + VsConstMax = 256; \ + PsConstMax = 256; \ + } \ + else if (NumConstants == 256) \ + { \ + UnifiedConst = gcvFALSE; \ + VsConstMax = 256; \ + PsConstMax = 256; \ + } \ + else \ + { \ + UnifiedConst = gcvFALSE; \ + VsConstMax = 168; \ + PsConstMax = 64; \ + } \ +} +#else +#define gcmCONFIGUREUNIFORMS2(ChipModel, ChipRevision, Halti5Avail, SmallBatch, ComputeOnly, NumConstants, \ + UnifiedConst, VsConstMax, PsConstMax) \ +{ \ + if (NumConstants > 256) \ + { \ + UnifiedConst = gcvTRUE; \ + if ((ChipModel == gcv880) && ((ChipRevision & 0xfff0) == 0x5120)) \ + { \ + VsConstMax = 512; \ + PsConstMax = 64; \ + } \ + else \ + { \ + VsConstMax = gcmMIN(512, NumConstants - 64); \ + PsConstMax = gcmMIN(512, NumConstants - 64); \ + } \ + } \ + else if (NumConstants == 256) \ + { \ + if (ChipModel == gcv2000 && (ChipRevision == 0x5118 || ChipRevision == 0x5140)) \ + { \ + UnifiedConst = gcvFALSE; \ + VsConstMax = 256; \ + PsConstMax = 64; \ + } \ + else \ + { \ + UnifiedConst = gcvFALSE; \ + VsConstMax = 256; \ + PsConstMax = 256; \ + } \ + } \ + else if (NumConstants == 160 && ComputeOnly) \ + { \ + UnifiedConst = gcvTRUE; \ + VsConstMax = 0; \ + PsConstMax = 160; \ + } \ + else \ + { \ + UnifiedConst = gcvFALSE; \ + VsConstMax = 168; \ + PsConstMax = 64; \ + } \ +} +#endif + +#define gcmAnyTileStatusEnableForFullMultiSlice(SurfView, anyTsEnableForMultiSlice) \ + { \ + gctUINT i = 0; \ + for (; i < (SurfView->surf->requestD); i++) { \ + if ((SurfView->surf->tileStatusNode.pool != gcvPOOL_UNKNOWN) && \ + (SurfView->surf->tileStatusDisabled[i] == gcvFALSE)) { \ + *anyTsEnableForMultiSlice = gcvTRUE; \ + break; \ + } \ + } \ + } + +#define gcmAnyTileStatusEnableForMultiSlice(SurfView, anyTsEnableForMultiSlice) \ + { \ + gctUINT i = SurfView->firstSlice; \ + for (; i < (SurfView->firstSlice + SurfView->numSlices); i++) { \ + if ((SurfView->surf->tileStatusNode.pool != gcvPOOL_UNKNOWN) && \ + (SurfView->surf->tileStatusDisabled[i] == gcvFALSE)) { \ + *anyTsEnableForMultiSlice = gcvTRUE; \ + break; \ + } \ + } \ + } + +#define gcmCanTileStatusEnabledForMultiSlice(SurfView, canTsEnabled) \ + { \ + if (SurfView->numSlices > 1) { \ + if (SurfView->surf->tileStatusNode.pool != gcvPOOL_UNKNOWN) { \ + gctUINT i = 0; \ + for (; i < SurfView->numSlices; i++) { \ + if (SurfView->surf->tileStatusDisabled[i] == gcvTRUE) { \ + *canTsEnabled = gcvFALSE; \ + break; \ + } \ + if (SurfView->surf->fcValue[i] != SurfView->surf->fcValue[0]) { \ + *canTsEnabled = gcvFALSE; \ + break; \ + } \ + \ + if (SurfView->surf->fcValueUpper[i] != SurfView->surf->fcValueUpper[0]) { \ + *canTsEnabled = gcvFALSE; \ + break; \ + } \ + } \ + } else { \ + *canTsEnabled = gcvFALSE; \ + } \ + } else { \ + if ((SurfView->surf->tileStatusNode.pool == gcvPOOL_UNKNOWN) || \ + (SurfView->surf->tileStatusDisabled[SurfView->firstSlice] == gcvTRUE)) { \ + *canTsEnabled = gcvFALSE; \ + } \ + } \ + } + +#define gcmCONFIGUSC(prefix, featureUSC, featureSeparateLS, \ + featureComputeOnly, featureTS, featureGS, \ + featureUSCFullCacheFix, featureL1CacheSize, \ + featureUSCMaxPages, attribCacheRatio, L1CacheRatio) \ + { \ + attribCacheRatio = 0x2; \ + \ + if (featureUSC) { \ + if (featureSeparateLS) { \ + L1CacheRatio = 0x0; \ + } else { \ + gctUINT L1cacheSize; \ + \ + if (featureComputeOnly) { \ + L1cacheSize = featureL1CacheSize; \ + } else { \ + gctUINT attribBufSizeInKB; \ + if (featureTS) { \ + /* GS/TS must be bundled. */ \ + prefix##ASSERT(featureGS); \ + featureGS = featureGS; \ + attribBufSizeInKB = 42; \ + } else { \ + prefix##ASSERT(!featureGS); \ + attribBufSizeInKB = 8; \ + } \ + if (attribBufSizeInKB < featureUSCMaxPages) { \ + L1cacheSize = featureUSCMaxPages - attribBufSizeInKB; \ + } else { \ + attribBufSizeInKB -= 2; \ + L1cacheSize = 2; \ + } \ + } \ + prefix##ASSERT(L1cacheSize); \ + if (L1cacheSize >= featureL1CacheSize) { \ + L1CacheRatio = 0x0; \ + prefix##ASSERT(featureUSCFullCacheFix); \ + featureUSCFullCacheFix = featureUSCFullCacheFix; \ + } else { \ + static const gctINT s_uscCacheRatio[] = { \ + 100000, /* 1.0f */ \ + 50000, /* 0.5f */ \ + 25000, /* 0.25f */ \ + 12500, /* 0.125f */ \ + 62500, /* 0.0625f */ \ + 3125, /* 0.03125f */ \ + 75000, /* 0.75f */ \ + 0, /* 0.0f */ \ + }; \ + gctINT maxL1cacheSize = L1cacheSize * 100000; \ + gctINT delta = 2147483647; /* start with very big delta */ \ + gctINT i = 0; \ + gctINT curIndex = -1; \ + for (; i < gcmCOUNTOF(s_uscCacheRatio); ++i) { \ + gctINT curL1cacheSize = featureL1CacheSize * s_uscCacheRatio[i]; \ + \ + if ((maxL1cacheSize >= curL1cacheSize) && \ + ((maxL1cacheSize - curL1cacheSize) < delta)) { \ + curIndex = i; \ + delta = maxL1cacheSize - curL1cacheSize; \ + } \ + } \ + prefix##ASSERT(-1 != curIndex); \ + L1CacheRatio = curIndex; \ + } \ + } \ + } \ + } + +#define gcmCONFIGUSC2(prefix, Hardware, featureUSC, featureSeparateLS, featureComputeOnly, \ + featureTS, featureL1CacheSize, featureUSCMaxPages, \ + attribCacheRatio, L1CacheRatio) \ +{ \ + attribCacheRatio = 0x2; \ + \ + if (featureUSC) \ + { \ + if (featureSeparateLS) \ + { \ + L1CacheRatio = 0x0; \ + } \ + else \ + { \ + gctUINT L1cacheSize; \ + \ + if (featureComputeOnly) \ + { \ + L1cacheSize = featureL1CacheSize; \ + attribCacheRatio = 0x7; \ + } \ + else \ + { \ + gctUINT attribBufSizeInKB; \ + if (featureTS) \ + { \ + /* GS/TS must be bundled. */ \ + attribBufSizeInKB = 42; \ + attribCacheRatio = (Hardware->identity.chipModel != gcv8800)? \ + 0x3 \ + : 0x4; \ + } \ + else \ + { \ + attribBufSizeInKB = 8; \ + } \ + if (attribBufSizeInKB < featureUSCMaxPages) \ + { \ + L1cacheSize = featureUSCMaxPages - attribBufSizeInKB; \ + } \ + else \ + { \ + attribBufSizeInKB -= 2; \ + L1cacheSize = 2; \ + } \ + } \ + prefix##ASSERT(L1cacheSize); \ + if (L1cacheSize >= featureL1CacheSize) \ + { \ + L1CacheRatio = 0x0; \ + } \ + else \ + { \ + static const gctINT s_uscCacheRatio[] = \ + { \ + 100000,/* 1.0f */ \ + 50000, /* 0.5f */ \ + 25000, /* 0.25f */ \ + 12500, /* 0.125f */ \ + 62500, /* 0.0625f */ \ + 3125, /* 0.03125f */ \ + 75000, /* 0.75f */ \ + 0, /*0.0f */ \ + }; \ + gctINT maxL1cacheSize = L1cacheSize * 100000; \ + gctINT delta = 2147483647; /* start with very big delta */ \ + gctINT i = 0; \ + gctINT curIndex = -1; \ + for (; i < gcmCOUNTOF(s_uscCacheRatio); ++i) \ + { \ + gctINT curL1cacheSize = featureL1CacheSize * s_uscCacheRatio[i]; \ + \ + if ((maxL1cacheSize >= curL1cacheSize) && \ + ((maxL1cacheSize - curL1cacheSize) < delta)) \ + { \ + curIndex = i; \ + delta = maxL1cacheSize - curL1cacheSize; \ + } \ + } \ + prefix##ASSERT(-1 != curIndex); \ + L1CacheRatio = curIndex; \ + } \ + } \ + } \ +} \ + +#if VIVANTE_PROFILER_SYSTEM_MEMORY +typedef struct _memory_profile_info { + struct { + gctUINT64 currentSize; + gctUINT64 peakSize; + gctUINT64 total_allocate; + gctUINT64 total_free; + gctUINT32 total_allocateCount; + gctUINT32 total_freeCount; + } system_memory, gpu_memory; +} memory_profile_info; + +gceSTATUS +gcoOS_GetMemoryProfileInfo(size_t size, struct _memory_profile_info *info); + +gceSTATUS +gcoOS_DumpMemoryProfile(void); +gceSTATUS +gcoOS_InitMemoryProfile(void); +gceSTATUS +gcoOS_DeInitMemoryProfile(void); +#endif + +void +gcd_2D_printf( + const char *format, ... + ); + +#if gcd_2D_PRINT_TIME_STAMP +void +gcd_2D_print_time_stamp( + IN const char* function_name, + IN char* order + ); + +#define gcmPRINT_2D_TIME_STAMP(function_name, order) \ + gcd_2D_print_time_stamp(function_name, order) + +#else +#define gcmPRINT_2D_TIME_STAMP(function_name, order) + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_base_h_ */ + + diff --git a/unified-tina/inc/HAL/gc_hal_debug_zones.h b/unified-tina/inc/HAL/gc_hal_debug_zones.h new file mode 100644 index 0000000..c77da4c --- /dev/null +++ b/unified-tina/inc/HAL/gc_hal_debug_zones.h @@ -0,0 +1,288 @@ +/**************************************************************************** +* +* Copyright (c) 2005 - 2023 by Vivante Corp. All rights reserved. +* +* The material in this file is confidential and contains trade secrets +* of Vivante Corporation. This is proprietary information owned by +* Vivante Corporation. No part of this work may be disclosed, +* reproduced, copied, transmitted, or used in any way for any purpose, +* without the express written permission of Vivante Corporation. +* +*****************************************************************************/ + + +#ifndef __gc_hal_debug_zones_h_ +#define __gc_hal_debug_zones_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/******************************************************************************\ + ************************ Debug Zone Pattern Summary *************************** + * A debug zone is an unsigned integer of 32 bit (Bit 31- Bit 0). * + * Bit 31 to 28 defines API, which is 0 for HAL API and has value of 1 - 14 * + * for Khronos API. Value 15 (0xF) is reserved for gcdZONE_NONE. * + * Bit 27 to 0 defines subzones of each API. Value 0xFFFFFFF is resevered for * + * gcdZONE_ALL. * + * * +\******************************************************************************/ + +/* Retrieve API bits 31 to 28 */ +#define gcmZONE_GET_API(zone) ((zone) >> 28) + +/* Retrieve Subzone bits 27 to 0 */ +#define gcmZONE_GET_SUBZONES(zone) ((zone) << 4) + +/****************************************************************************** + ******************************** HAL Zone ************************************ + ******************************************************************************/ + +#define gcdZONE_API_HAL ((gctUINT32)0 << 28) + +/****************************************************************************** + ******************************** HAL Subzones ******************************** + ******************************************************************************/ + +/* Subzones Kernel and User have in common */ +#define gcvZONE_OS (1 << 0) +#define gcvZONE_HARDWARE (1 << 1) +#define gcvZONE_HEAP (1 << 2) +#define gcvZONE_SIGNAL (1 << 3) + +/* Subzones of HAL Kernel */ +#define gcvZONE_KERNEL (1 << 4) +#define gcvZONE_VIDMEM (1 << 5) +#define gcvZONE_COMMAND (1 << 6) +#define gcvZONE_DRIVER (1 << 7) +#define gcvZONE_CMODEL (1 << 8) +#define gcvZONE_MMU (1 << 9) +#define gcvZONE_EVENT (1 << 10) +#define gcvZONE_DEVICE (1 << 11) +#define gcvZONE_DATABASE (1 << 12) +#define gcvZONE_INTERRUPT (1 << 13) +#define gcvZONE_POWER (1 << 14) +#define gcvZONE_ASYNC_COMMAND (1 << 15) +#define gcvZONE_ALLOCATOR (1 << 16) + +/* Subzones of HAL User */ +#define gcdZONE_HAL_API (1 << 4) +#define gcdZONE_BUFFER (1 << 5) +#define gcdZONE_VGBUFFER (1 << 6) +#define gcdZONE_SURFACE (1 << 7) +#define gcdZONE_INDEX (1 << 8) +#define gcdZONE_STREAM (1 << 9) +#define gcdZONE_TEXTURE (1 << 10) +#define gcdZONE_2D (1 << 11) +#define gcdZONE_3D (1 << 12) +#define gcdZONE_COMPILER (1 << 13) +#define gcdZONE_MEM (1 << 14) +#define gcdZONE_VERTEXARRAY (1 << 15) +#define gcdZONE_CL (1 << 16) +#define gcdZONE_VG (1 << 17) +#define gcdZONE_VX (1 << 18) +#define gcdZONE_UTILITY (1 << 19) +#define gcdZONE_RECT (1 << 20) +#define gcdZONE_BUFOBJ (1 << 21) +#define gcdZONE_PROFILER (1 << 22) +#define gcdZONE_SHADER (1 << 23) + +/****************************************************************************** + ******************************** Khronos API Zones *************************** + ******************************************************************************/ + +#define gcdZONE_API_EGL ((gctUINT32)1 << 28) +#define gcdZONE_API_ES11 ((gctUINT32)2 << 28) +#define gcdZONE_API_ES30 ((gctUINT32)3 << 28) +#define gcdZONE_API_GL40 ((gctUINT32)4 << 28) +#define gcdZONE_API_VG3D ((gctUINT32)5 << 28) +#define gcdZONE_API_CL ((gctUINT32)6 << 28) +#define gcdZONE_API_VX ((gctUINT32)7 << 28) +#define gcdZONE_API_VG ((gctUINT32)8 << 28) + +/****************************************************************************** + ************************* Subzones of Khronos API Zones ********************** + ******************************************************************************/ + +/* Subzones of EGL API */ +#define gcdZONE_EGL_API (gcdZONE_API_EGL | (1 << 0)) +#define gcdZONE_EGL_SURFACE (gcdZONE_API_EGL | (1 << 1)) +#define gcdZONE_EGL_CONTEXT (gcdZONE_API_EGL | (1 << 2)) +#define gcdZONE_EGL_CONFIG (gcdZONE_API_EGL | (1 << 3)) +#define gcdZONE_EGL_OS (gcdZONE_API_EGL | (1 << 4)) /* unused */ +#define gcdZONE_EGL_IMAGE (gcdZONE_API_EGL | (1 << 5)) +#define gcdZONE_EGL_SWAP (gcdZONE_API_EGL | (1 << 6)) +#define gcdZONE_EGL_INIT (gcdZONE_API_EGL | (1 << 7)) +#define gcdZONE_EGL_SYNC (gcdZONE_API_EGL | (1 << 8)) +#define gcdZONE_EGL_COMPOSE (gcdZONE_API_EGL | (1 << 9)) /* unused */ +#define gcdZONE_EGL_RENDER_THREAD (gcdZONE_API_EGL | (1 << 10)) /* unused */ + +/* Subzones of ES11 API */ +#define gcdZONE_ES11_BUFFER (gcdZONE_API_ES11 | (1 << 0)) +#define gcdZONE_ES11_CLEAR (gcdZONE_API_ES11 | (1 << 1)) +#define gcdZONE_ES11_CLIP (gcdZONE_API_ES11 | (1 << 2)) +#define gcdZONE_ES11_CONTEXT (gcdZONE_API_ES11 | (1 << 3)) +#define gcdZONE_ES11_DRAW (gcdZONE_API_ES11 | (1 << 4)) +#define gcdZONE_ES11_ENABLE (gcdZONE_API_ES11 | (1 << 5)) +#define gcdZONE_ES11_EXTENTION (gcdZONE_API_ES11 | (1 << 6)) +#define gcdZONE_ES11_FOG (gcdZONE_API_ES11 | (1 << 7)) +#define gcdZONE_ES11_FRAGMENT (gcdZONE_API_ES11 | (1 << 8)) +#define gcdZONE_ES11_LIGHT (gcdZONE_API_ES11 | (1 << 9)) +#define gcdZONE_ES11_MATRIX (gcdZONE_API_ES11 | (1 << 10)) +#define gcdZONE_ES11_PIXEL (gcdZONE_API_ES11 | (1 << 11)) +#define gcdZONE_ES11_POLIGON (gcdZONE_API_ES11 | (1 << 12)) +#define gcdZONE_ES11_LINE (gcdZONE_API_ES11 | (1 << 13)) /* unused */ +#define gcdZONE_ES11_QUERY (gcdZONE_API_ES11 | (1 << 14)) +#define gcdZONE_ES11_TEXTURE (gcdZONE_API_ES11 | (1 << 15)) +#define gcdZONE_ES11_STATES (gcdZONE_API_ES11 | (1 << 16)) +#define gcdZONE_ES11_STREAM (gcdZONE_API_ES11 | (1 << 17)) +#define gcdZONE_ES11_VIEWPORT (gcdZONE_API_ES11 | (1 << 18)) +#define gcdZONE_ES11_SHADER (gcdZONE_API_ES11 | (1 << 19)) +#define gcdZONE_ES11_HASH (gcdZONE_API_ES11 | (1 << 20)) +#define gcdZONE_ES11_TRACE (gcdZONE_API_ES11 | (1 << 21)) + +/* Subzones of ES30 API */ +#define gcdZONE_ES30_TRACE (gcdZONE_API_ES30 | (1 << 0)) +#define gcdZONE_ES30_BUFFER (gcdZONE_API_ES30 | (1 << 1)) +#define gcdZONE_ES30_CLEAR (gcdZONE_API_ES30 | (1 << 2)) +#define gcdZONE_ES30_CODEC (gcdZONE_API_ES30 | (1 << 3)) +#define gcdZONE_ES30_CONTEXT (gcdZONE_API_ES30 | (1 << 4)) +#define gcdZONE_ES30_DEPTH (gcdZONE_API_ES30 | (1 << 5)) +#define gcdZONE_ES30_DEVICE (gcdZONE_API_ES30 | (1 << 6)) +#define gcdZONE_ES30_DRAW (gcdZONE_API_ES30 | (1 << 7)) +#define gcdZONE_ES30_FBO (gcdZONE_API_ES30 | (1 << 8)) +#define gcdZONE_ES30_PIXEL (gcdZONE_API_ES30 | (1 << 9)) +#define gcdZONE_ES30_SHADER (gcdZONE_API_ES30 | (1 << 10)) +#define gcdZONE_ES30_STATE (gcdZONE_API_ES30 | (1 << 11)) +#define gcdZONE_ES30_TEXTURE (gcdZONE_API_ES30 | (1 << 12)) +#define gcdZONE_ES30_UTILS (gcdZONE_API_ES30 | (1 << 13)) +#define gcdZONE_ES30_PROFILER (gcdZONE_API_ES30 | (1 << 14)) +#define gcdZONE_ES30_CORE (gcdZONE_API_ES30 | (1 << 15)) + +/* Subzones of GL40 API */ +#define gcdZONE_GL40_TRACE (gcdZONE_API_GL40 | (1 << 0)) +#define gcdZONE_GL40_BUFFER (gcdZONE_API_GL40 | (1 << 1)) +#define gcdZONE_GL40_CLEAR (gcdZONE_API_GL40 | (1 << 2)) /* unused */ +#define gcdZONE_GL40_CODEC (gcdZONE_API_GL40 | (1 << 3)) +#define gcdZONE_GL40_CONTEXT (gcdZONE_API_GL40 | (1 << 4)) +#define gcdZONE_GL40_DEPTH (gcdZONE_API_GL40 | (1 << 5)) +#define gcdZONE_GL40_DEVICE (gcdZONE_API_GL40 | (1 << 6)) +#define gcdZONE_GL40_DRAW (gcdZONE_API_GL40 | (1 << 7)) +#define gcdZONE_GL40_FBO (gcdZONE_API_GL40 | (1 << 8)) +#define gcdZONE_GL40_PIXEL (gcdZONE_API_GL40 | (1 << 9)) +#define gcdZONE_GL40_SHADER (gcdZONE_API_GL40 | (1 << 10)) +#define gcdZONE_GL40_STATE (gcdZONE_API_GL40 | (1 << 11)) +#define gcdZONE_GL40_TEXTURE (gcdZONE_API_GL40 | (1 << 12)) +#define gcdZONE_GL40_UTILS (gcdZONE_API_GL40 | (1 << 13)) +#define gcdZONE_GL40_PROFILER (gcdZONE_API_GL40 | (1 << 14)) +#define gcdZONE_GL40_CORE (gcdZONE_API_GL40 | (1 << 15)) +#define gcdZONE_GL40_FIXVERTEX (gcdZONE_API_GL40 | (1 << 16)) +#define gcdZONE_GL40_FIXFRAG (gcdZONE_API_GL40 | (1 << 17)) +#define gcdZONE_GL40_HASH (gcdZONE_API_GL40 | (1 << 18)) + +/* Subzones of VG3D API */ +#define gcdZONE_VG3D_CONTEXT (gcdZONE_API_VG3D | (1 << 0)) +#define gcdZONE_VG3D_DUMP (gcdZONE_API_VG3D | (1 << 1)) +#define gcdZONE_VG3D_EGL (gcdZONE_API_VG3D | (1 << 2)) +#define gcdZONE_VG3D_FONT (gcdZONE_API_VG3D | (1 << 3)) +#define gcdZONE_VG3D_HARDWARE (gcdZONE_API_VG3D | (1 << 4)) +#define gcdZONE_VG3D_IMAGE (gcdZONE_API_VG3D | (1 << 5)) +#define gcdZONE_VG3D_MASK (gcdZONE_API_VG3D | (1 << 6)) +#define gcdZONE_VG3D_MATRIX (gcdZONE_API_VG3D | (1 << 7)) +#define gcdZONE_VG3D_OBJECT (gcdZONE_API_VG3D | (1 << 8)) +#define gcdZONE_VG3D_PAINT (gcdZONE_API_VG3D | (1 << 9)) +#define gcdZONE_VG3D_PATH (gcdZONE_API_VG3D | (1 << 10)) +#define gcdZONE_VG3D_PROFILER (gcdZONE_API_VG3D | (1 << 11)) +#define gcdZONE_VG3D_SCANLINE (gcdZONE_API_VG3D | (1 << 12)) +#define gcdZONE_VG3D_SHADER (gcdZONE_API_VG3D | (1 << 13)) +#define gcdZONE_VG3D_TESSELLATOR (gcdZONE_API_VG3D | (1 << 14)) +#define gcdZONE_VG3D_VGU (gcdZONE_API_VG3D | (1 << 15)) + +/* Subzones of VG11 API */ +#define gcdZONE_VG_ARC (gcdZONE_API_VG | (1 << 0)) +#define gcdZONE_VG_CONTEXT (gcdZONE_API_VG | (1 << 1)) +#define gcdZONE_VG_DEBUG (gcdZONE_API_VG | (1 << 2)) +#define gcdZONE_VG_FILTER (gcdZONE_API_VG | (1 << 3)) +#define gcdZONE_VG_FORMAT (gcdZONE_API_VG | (1 << 4)) +#define gcdZONE_VG_IMAGE (gcdZONE_API_VG | (1 << 5)) +#define gcdZONE_VG_MAIN (gcdZONE_API_VG | (1 << 6)) +#define gcdZONE_VG_MASK (gcdZONE_API_VG | (1 << 7)) +#define gcdZONE_VG_MATRIX (gcdZONE_API_VG | (1 << 8)) +#define gcdZONE_VG_MEMORYMGR (gcdZONE_API_VG | (1 << 9)) +#define gcdZONE_VG_OBJECT (gcdZONE_API_VG | (1 << 10)) +#define gcdZONE_VG_PAINT (gcdZONE_API_VG | (1 << 11)) +#define gcdZONE_VG_PATH (gcdZONE_API_VG | (1 << 12)) +#define gcdZONE_VG_STATE (gcdZONE_API_VG | (1 << 13)) +#define gcdZONE_VG_STROKE (gcdZONE_API_VG | (1 << 14)) +#define gcdZONE_VG_TEXT (gcdZONE_API_VG | (1 << 15)) +#define gcdZONE_VG_VGU (gcdZONE_API_VG | (1 << 16)) + +/* Subzones of CL API */ +#define gcdZONE_CL_COMMAND (gcdZONE_API_CL | (1 << 0)) +#define gcdZONE_CL_CONTEXT (gcdZONE_API_CL | (1 << 1)) +#define gcdZONE_CL_DEVICE (gcdZONE_API_CL | (1 << 2)) +#define gcdZONE_CL_ENQUEUE (gcdZONE_API_CL | (1 << 3)) +#define gcdZONE_CL_EVENT (gcdZONE_API_CL | (1 << 4)) +#define gcdZONE_CL_EXT (gcdZONE_API_CL | (1 << 5)) +#define gcdZONE_CL_GL (gcdZONE_API_CL | (1 << 6)) +#define gcdZONE_CL_KERNEL (gcdZONE_API_CL | (1 << 7)) +#define gcdZONE_CL_MEM (gcdZONE_API_CL | (1 << 8)) +#define gcdZONE_CL_PLATFORM (gcdZONE_API_CL | (1 << 9)) +#define gcdZONE_CL_PROFILER (gcdZONE_API_CL | (1 << 10)) +#define gcdZONE_CL_PROGRAM (gcdZONE_API_CL | (1 << 11)) +#define gcdZONE_CL_SAMPLER (gcdZONE_API_CL | (1 << 12)) +#define gcdZONE_CL_COMMAND_BUFFER (gcdZONE_API_CL | (1 << 13)) + +/* Subzones of VX API */ +#define gcdZONE_VX_ARRAY (gcdZONE_API_VX | (1 << 0)) +#define gcdZONE_VX_BINARY (gcdZONE_API_VX | (1 << 1)) +#define gcdZONE_VX_CONTEXT (gcdZONE_API_VX | (1 << 2)) +#define gcdZONE_VX_CONV (gcdZONE_API_VX | (1 << 3)) +#define gcdZONE_VX_DELAY (gcdZONE_API_VX | (1 << 4)) +#define gcdZONE_VX_DIST (gcdZONE_API_VX | (1 << 5)) +#define gcdZONE_VX_GPULAYER (gcdZONE_API_VX | (1 << 6)) +#define gcdZONE_VX_GRAPH (gcdZONE_API_VX | (1 << 7)) +#define gcdZONE_VX_IMAGE (gcdZONE_API_VX | (1 << 8)) +#define gcdZONE_VX_INTERFACE (gcdZONE_API_VX | (1 << 9)) +#define gcdZONE_VX_KERNEL (gcdZONE_API_VX | (1 << 10)) +#define gcdZONE_VX_LAYER (gcdZONE_API_VX | (1 << 11)) +#define gcdZONE_VX_LUT (gcdZONE_API_VX | (1 << 12)) +#define gcdZONE_VX_MATRIX (gcdZONE_API_VX | (1 << 13)) +#define gcdZONE_VX_MEMORY (gcdZONE_API_VX | (1 << 14)) +#define gcdZONE_VX_METAFMT (gcdZONE_API_VX | (1 << 15)) +#define gcdZONE_VX_NODE (gcdZONE_API_VX | (1 << 16)) +#define gcdZONE_VX_OBJARRAY (gcdZONE_API_VX | (1 << 17)) +#define gcdZONE_VX_PARAM (gcdZONE_API_VX | (1 << 18)) +#define gcdZONE_VX_PROGRAM (gcdZONE_API_VX | (1 << 19)) +#define gcdZONE_VX_PYRAMID (gcdZONE_API_VX | (1 << 20)) +#define gcdZONE_VX_REF (gcdZONE_API_VX | (1 << 21)) +#define gcdZONE_VX_REMAP (gcdZONE_API_VX | (1 << 22)) +#define gcdZONE_VX_SCALAR (gcdZONE_API_VX | (1 << 23)) +#define gcdZONE_VX_TARGET (gcdZONE_API_VX | (1 << 24)) +#define gcdZONE_VX_TENSOR (gcdZONE_API_VX | (1 << 25)) +#define gcdZONE_VX_THRESHOLD (gcdZONE_API_VX | (1 << 26)) +#define gcdZONE_VX_SPINST (gcdZONE_API_VX | (1 << 27)) +#define gcdZONE_VX_SP (gcdZONE_API_VX | (1 << 28)) +#define gcdZONE_VX_OTHERS (gcdZONE_API_VX | (1 << 29)) + +/****************************************************************************** + ******************************** Utility Zones ******************************* + ******************************************************************************/ + +/* Value for Disabling All Subzones */ +#define gcdZONE_NONE 0xF0000000 + +/* Value for Enabling All Subzones */ +#define gcdZONE_ALL 0x0FFFFFFF + +/****************************************************************************** + ********************************** END *************************************** + ******************************************************************************/ + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_debug_zones_h_ */ + + diff --git a/unified-tina/inc/HAL/gc_hal_driver.h b/unified-tina/inc/HAL/gc_hal_driver.h new file mode 100644 index 0000000..a1ca34b --- /dev/null +++ b/unified-tina/inc/HAL/gc_hal_driver.h @@ -0,0 +1,16 @@ +/**************************************************************************** +* +* Copyright (c) 2005 - 2023 by Vivante Corp. All rights reserved. +* +* The material in this file is confidential and contains trade secrets +* of Vivante Corporation. This is proprietary information owned by +* Vivante Corporation. No part of this work may be disclosed, +* reproduced, copied, transmitted, or used in any way for any purpose, +* without the express written permission of Vivante Corporation. +* +*****************************************************************************/ + + +#include "shared/gc_hal_driver_shared.h" + + diff --git a/unified-tina/inc/HAL/gc_hal_dump.h b/unified-tina/inc/HAL/gc_hal_dump.h new file mode 100644 index 0000000..c252639 --- /dev/null +++ b/unified-tina/inc/HAL/gc_hal_dump.h @@ -0,0 +1,104 @@ +/**************************************************************************** +* +* Copyright (c) 2005 - 2023 by Vivante Corp. All rights reserved. +* +* The material in this file is confidential and contains trade secrets +* of Vivante Corporation. This is proprietary information owned by +* Vivante Corporation. No part of this work may be disclosed, +* reproduced, copied, transmitted, or used in any way for any purpose, +* without the express written permission of Vivante Corporation. +* +*****************************************************************************/ + + +#ifndef __gc_hal_dump_h_ +#define __gc_hal_dump_h_ + +/* + * gcdDUMP_KEY + * + * Set this to a string that appears in 'cat /proc//cmdline'. E.g. 'camera'. + * HAL will create dumps for the processes matching this key. + */ +#ifndef gcdDUMP_KEY +#define gcdDUMP_KEY "process" +#endif + +/* + * gcdDUMP_PATH + * + * The dump file location. Some processes cannot write to the sdcard. + * Try apps' data dir, e.g. /data/data/com.android.launcher + */ +#ifndef gcdDUMP_PATH +#if defined(ANDROID) +# define gcdDUMP_PATH "/mnt/sdcard/" +# else +# define gcdDUMP_PATH "./" +# endif +#endif + +/* + * gcdDUMP_FILE_IN_KERNEL + * + * Default dump file for gcdDUMP_IN_KERNEL. + * The file will be writen globally in kernel side. + * Can be overwritten in runtime by debugfs:/gc/dump/dump_file + * + * 2 pseudo files: + * [dmesg]: means dump to kernel debug message. + * [ignored]: means dump ignored, nothing will be dumpped. + */ +#ifndef gcdDUMP_FILE_IN_KERNEL +#define gcdDUMP_FILE_IN_KERNEL "[dmesg]" +#endif + +/* + * gcdDUMP_VERIFY_PER_DRAW + * + * Sub feature of gcdDUMP. + * When set to 1, verify RT and images(if used) for every single draw + * to ease simulation debug. + * Only valid for ES3 driver for now. + */ +#ifndef gcdDUMP_VERIFY_PER_DRAW +#define gcdDUMP_VERIFY_PER_DRAW 0 +#endif + +/* Standalone dump features below. */ + +/* + * gcdDUMP_FRAMERATE + * When set to a value other than zero, averaqe frame rate will be dumped. + * The value set is the starting frame that the average will be calculated. + * This is needed because sometimes first few frames are too slow to be included + * in the average. Frame count starts from 1. + */ +#ifndef gcdDUMP_FRAMERATE +#define gcdDUMP_FRAMERATE 0 +#endif + +/* + * gcdDUMP_FRAME_TGA + * + * When set to a value other than 0, a dump of the frame specified by the value, + * will be done into frame.tga. Frame count starts from 1. + */ +#ifndef gcdDUMP_FRAME_TGA +#define gcdDUMP_FRAME_TGA 0 +#endif + +/* + * gcdDUMP_AHB_ACCESS + * + * When set to 1, a dump of all AHB register access will be printed to kernel + * message. + */ +#ifndef gcdDUMP_AHB_ACCESS +#define gcdDUMP_AHB_ACCESS 0 +#endif + +#endif /* __gc_hal_dump_h_ */ + + + diff --git a/unified-tina/inc/HAL/gc_hal_engine.h b/unified-tina/inc/HAL/gc_hal_engine.h new file mode 100644 index 0000000..169e794 --- /dev/null +++ b/unified-tina/inc/HAL/gc_hal_engine.h @@ -0,0 +1,2069 @@ +/**************************************************************************** +* +* Copyright (c) 2005 - 2023 by Vivante Corp. All rights reserved. +* +* The material in this file is confidential and contains trade secrets +* of Vivante Corporation. This is proprietary information owned by +* Vivante Corporation. No part of this work may be disclosed, +* reproduced, copied, transmitted, or used in any way for any purpose, +* without the express written permission of Vivante Corporation. +* +*****************************************************************************/ + + +#ifndef __gc_hal_engine_h_ +#define __gc_hal_engine_h_ + +#include "gc_hal_types.h" +#include "gc_hal_enum.h" + + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct _gcsSURF_RESOLVE_ARGS { + gceHAL_ARG_VERSION version; + + union _gcsSURF_RESOLVE_ARGS_UNION { + struct _gcsSURF_RESOLVE_ARG_v2 { + gctBOOL yInverted; + gctBOOL directCopy; + gctBOOL resample; + gctBOOL bUploadTex; /* used for upload tex.*/ + gctBOOL bSwap; /* used for swap.*/ + gctBOOL visualizeDepth; /* convert depth to visible color */ + gcsPOINT srcOrigin; + gcsPOINT dstOrigin; + gcsPOINT rectSize; + gctUINT numSlices; + gceENGINE engine; /* 3DBlit engine */ + gctBOOL gpuOnly; /* need only try HW path.*/ + + gctBOOL dump; /* need dump for verify */ + gctBOOL srcSwizzle; /* src surface format swizzle information */ + gctBOOL dstSwizzle; /* dst surface format swizzle information */ + gctBOOL srcCompressed; /* src compressed format*/ + gctBOOL dstCompressed; /* dst compressed format*/ + gctUINT blitToSelf; + + gctUINT rotate; /* rotate of 3DBlt engine */ + } v2; + } uArgs; +} gcsSURF_RESOLVE_ARGS; + +typedef struct _gscBUFFER_VIEW { + gctUINT32 cmd; +} gcsBUFFER_VIEW, *gcsBUFFER_VIEW_PTR; + +typedef struct _gcsIMAGE_VIEW { + gctUINT32 cmd; +} gcsIMAGE_VIEW, *gcsIMAGE_VIEW_PTR; + +#if gcdENABLE_3D +/****************************************************************************** + ***************************** Object Declarations **************************** + ******************************************************************************/ + +typedef struct _gcoSTREAM *gcoSTREAM; +typedef struct _gcoVERTEX *gcoVERTEX; +typedef struct _gcoTEXTURE *gcoTEXTURE; +typedef struct _gcoINDEX *gcoINDEX; +typedef struct _gcsVERTEX_ATTRIBUTES *gcsVERTEX_ATTRIBUTES_PTR; +typedef struct _gcoVERTEXARRAY *gcoVERTEXARRAY; +typedef struct _gcoBUFOBJ *gcoBUFOBJ; + +# define gcdATTRIBUTE_COUNT 32 +# define gcdVERTEXARRAY_POOL_CAPACITY 32 + +# define gcdSTREAM_POOL_SIZE 128 +# define gcdSTREAM_GROUP_SIZE 16 +# define gcdSTREAM_SIGNAL_NUM \ + ((gcdSTREAM_POOL_SIZE + gcdSTREAM_GROUP_SIZE - 1) / gcdSTREAM_GROUP_SIZE) + +# define gcvPORGRAM_STAGE_GPIPE (gcvPROGRAM_STAGE_VERTEX_BIT | \ + gcvPROGRAM_STAGE_TCS_BIT | \ + gcvPROGRAM_STAGE_TES_BIT | \ + gcvPROGRAM_STAGE_GEOMETRY_BIT) + +/****************************************************************************** + ******************************** gcoHAL Object ******************************* + ******************************************************************************/ + +gceSTATUS +gcoHAL_QueryShaderCaps(IN gcoHAL Hal, + OUT gctUINT *UnifiedUniforms, + OUT gctUINT *VertUniforms, + OUT gctUINT *FragUniforms, + OUT gctUINT *Varyings, + OUT gctUINT *ShaderCoreCount, + OUT gctUINT *ThreadCount, + OUT gctUINT *VertInstructionCount, + OUT gctUINT *FragInstructionCount); + +gceSTATUS +gcoHAL_QuerySamplerBase(IN gcoHAL Hal, + OUT gctUINT32 *VertexCount, + OUT gctINT_PTR VertexBase, + OUT gctUINT32 *FragmentCount, + OUT gctINT_PTR FragmentBase); + +gceSTATUS +gcoHAL_QueryUniformBase(IN gcoHAL Hal, OUT gctUINT32 *VertexBase, + OUT gctUINT32 *FragmentBase); + +gceSTATUS +gcoHAL_QueryTextureCaps(IN gcoHAL Hal, + OUT gctUINT *MaxWidth, + OUT gctUINT *MaxHeight, + OUT gctUINT *MaxDepth, + OUT gctBOOL *Cubic, + OUT gctBOOL *NonPowerOfTwo, + OUT gctUINT *VertexSamplers, + OUT gctUINT *PixelSamplers); + +gceSTATUS +gcoHAL_QueryTextureMaxAniso(IN gcoHAL Hal, OUT gctUINT *MaxAnisoValue); + +gceSTATUS +gcoHAL_QueryStreamCaps(IN gcoHAL Hal, + OUT gctUINT32 *MaxAttributes, + OUT gctUINT32 *MaxStreamStride, + OUT gctUINT32 *NumberOfStreams, + OUT gctUINT32 *Alignment, + OUT gctUINT32 *MaxAttribOffset); + +/****************************************************************************** + ******************************** gcoSURF Object ****************************** + ******************************************************************************/ + +/*----------------------------------------------------------------------------*/ +/*--------------------------------- gcoSURF 3D --------------------------------*/ +typedef struct _gcsSURF_BLIT_ARGS { + gcoSURF srcSurface; + gctINT srcX, srcY, srcZ; + gctINT srcWidth, srcHeight, srcDepth; + gcoSURF dstSurface; + gctINT dstX, dstY, dstZ; + gctINT dstWidth, dstHeight, dstDepth; + gctBOOL xReverse; + gctBOOL yReverse; + gctBOOL scissorTest; + gcsRECT scissor; + gctUINT flags; + gctUINT srcNumSlice, dstNumSlice; + gctBOOL needDecode; + gctBOOL readSwap; + gctBOOL writeSwap; +} gcsSURF_BLIT_ARGS; + +typedef struct _gcsSURF_CLEAR_ARGS { + /* + ** Color to fill the color portion of the framebuffer when clear + ** is called. + */ + struct { + gcuVALUE r; + gcuVALUE g; + gcuVALUE b; + gcuVALUE a; + /* Color has multiple value type so we must specify it. */ + gceVALUE_TYPE valueType; + } color; + + gcuVALUE depth; + gctUINT stencil; + + gctUINT8 stencilMask; /* stencil bit-wise mask */ + gctBOOL depthMask; /* Depth Write Mask */ + gctUINT8 colorMask; /* 4-bit channel Mask: ABGR:MSB->LSB */ + gcsRECT_PTR clearRect; /* NULL means full clear */ + gceCLEAR flags; /* clear flags */ + + gctUINT32 offset; /* Offset in surface to cube/array/3D, obsolete in v2 version */ + +} gcsSURF_CLEAR_ARGS, *gcsSURF_CLEAR_ARGS_PTR; + +typedef struct _gscSURF_BLITDRAW_BLIT { + gcoSURF srcSurface; + gcoSURF dstSurface; + gcsRECT srcRect; + gcsRECT dstRect; + gceTEXTURE_FILTER filterMode; + gctBOOL xReverse; + gctBOOL yReverse; + gctBOOL scissorEnabled; + gcsRECT scissor; +} gscSURF_BLITDRAW_BLIT; + +typedef gceSTATUS (*gctSPLIT_DRAW_FUNC_PTR)(IN gctPOINTER gc, + IN gctPOINTER instantDraw, + IN gctPOINTER splitDrawInfo); + +typedef struct _gcsSPLIT_DRAW_INFO { + gceSPLIT_DRAW_TYPE splitDrawType; + gctSPLIT_DRAW_FUNC_PTR splitDrawFunc; + + union _gcsSPLIT_DRAW_UNION { + /* This path will split many draw.*/ + struct __gcsSPLIT_DRAW_INFO_TCS { + gctPOINTER indexPtr; + gctUINT indexPerPatch; + } info_tcs; + + /* This path split into two draw at most. + ** es11 path follow the old code, es30 path + ** add more info parameter to record + */ + struct __gcsSPLIT_DRAW_INFO_INDEX_FETCH { + gctSIZE_T instanceCount; + gctSIZE_T splitCount; + gcePRIMITIVE splitPrimMode; + gctSIZE_T splitPrimCount; + } info_index_fetch; + } u; +} gcsSPLIT_DRAW_INFO, *gcsSPLIT_DRAW_INFO_PTR; + +typedef struct _gscSURF_BLITDRAW_ARGS { + /* always the fist member */ + gceHAL_ARG_VERSION version; + + union _gcsSURF_BLITDRAW_ARGS_UNION { + struct _gscSURF_BLITDRAW_ARG_v1 { + /* Whether it's clear or blit operation, can be extended. */ + gceBLIT_TYPE type; + + union _gscSURF_BLITDRAW_UNION { + gscSURF_BLITDRAW_BLIT blit; + + struct _gscSURF_BLITDRAW_CLEAR { + gcsSURF_CLEAR_ARGS clearArgs; + gcoSURF rtSurface; + gcoSURF dsSurface; + } clear; + } u; + } v1; + } uArgs; +} gcsSURF_BLITDRAW_ARGS; + +typedef struct _gcsSURF_BLITBLT_ARGS { + gctCONST_POINTER buf; + gceSURF_FORMAT format; + gctUINT32 stride; + gcoSURF dstSurf; + gcsPOINT dstOrigin; + gcsPOINT rectSize; + gctUINT32 dstOffset; +} gcsSURF_BLITBLT_ARGS; + +/* CPU Blit with format (including linear <-> tile) conversion*/ +gceSTATUS +gcoSURF_BlitCPU(gcsSURF_BLIT_ARGS *args); + +/* Copy a rectangular area with format conversion. */ +gceSTATUS +gcoSURF_CopyPixels(IN gcsSURF_VIEW *SrcView, + IN gcsSURF_VIEW *DstView, + IN gcsSURF_RESOLVE_ARGS *Args); + +/* Clear surface function. */ +gceSTATUS +gcoSURF_Clear(IN gcsSURF_VIEW *SurfView, + IN gcsSURF_CLEAR_ARGS_PTR ClearArgs); + +/* Preserve pixels from source. */ +gceSTATUS +gcoSURF_Preserve(IN gcoSURF SrcSurf, + IN gcoSURF DstSurf, + IN gcsRECT_PTR MaskRect); + +/* TO BE REMOVED */ +gceSTATUS +depr_gcoSURF_Resolve(IN gcoSURF SrcSurface, + IN gcoSURF DestSurface, + IN gctADDRESS DestAddress, + IN gctPOINTER DestBits, + IN gctINT DestStride, + IN gceSURF_TYPE DestType, + IN gceSURF_FORMAT DestFormat, + IN gctUINT DestWidth, + IN gctUINT DestHeight); + +gceSTATUS +depr_gcoSURF_ResolveRect(IN gcoSURF SrcSurface, + IN gcoSURF DstSurface, + IN gctADDRESS DstAddress, + IN gctPOINTER DstBits, + IN gctINT DstStride, + IN gceSURF_TYPE DstType, + IN gceSURF_FORMAT DstFormat, + IN gctUINT DstWidth, + IN gctUINT DstHeight, + IN gcsPOINT_PTR SrcOrigin, + IN gcsPOINT_PTR gcoSURF, + IN gcsPOINT_PTR RectSize); + +/* Resample surface. */ +gceSTATUS +gcoSURF_Resample(IN gcoSURF SrcSurf, + IN gcoSURF DstSurf, + IN gctBOOL sRGBDecode); + +/* Resolve rectangular area of a surface. */ +gceSTATUS +gcoSURF_ResolveRect(IN gcsSURF_VIEW *SrcView, + IN gcsSURF_VIEW *DstView, + IN gcsSURF_RESOLVE_ARGS *Args); + +gceSTATUS +gcoSURF_GetResolveAlignment(IN gcoSURF Surface, + OUT gctUINT *originX, + OUT gctUINT *originY, + OUT gctUINT *sizeX, + OUT gctUINT *sizeY); + +gceSTATUS +gcoSURF_IsHWResolveable(IN gcoSURF SrcSurf, + IN gcoSURF DstSurf, + IN gcsPOINT_PTR SrcOrigin, + IN gcsPOINT_PTR DstOrigin, + IN gcsPOINT_PTR RectSize); + +/* Set surface resolvability. */ +gceSTATUS +gcoSURF_SetResolvability(IN gcoSURF Surface, IN gctBOOL Resolvable); + +gceSTATUS +gcoSURF_IsRenderable(IN gcoSURF Surface); + +gceSTATUS +gcoSURF_IsFormatRenderableAsRT(IN gcoSURF Surface); + +gceSTATUS +gcoBUFOBJ_SetBindingTarget(IN gcoHAL Hal, IN gctUINT TargetIndex, IN gcoBUFOBJ BufObj); + +gceSTATUS +gcoBUFOBJ_GetFence(IN gcoBUFOBJ BufObj, IN gceFENCE_TYPE Type); + +gceSTATUS +gcoBUFOBJ_WaitFence(IN gcoBUFOBJ BufObj, IN gceFENCE_TYPE Type); + +gceSTATUS +gcoBUFOBJ_IsFenceEnabled(IN gcoBUFOBJ BufObj); + +gceSTATUS +gcoSURF_GetFence(IN gcoSURF Surface, IN gceFENCE_TYPE Type); + +gceSTATUS +gcoSURF_WaitFence(IN gcoSURF Surface); + +gceSTATUS +gcoSTREAM_GetFence(IN gcoSTREAM stream); + +gceSTATUS +gcoSTREAM_WaitFence(IN gcoSTREAM stream); + +gceSTATUS +gcoINDEX_GetFence(IN gcoINDEX Index); + +gceSTATUS +gcoINDEX_WaitFence(IN gcoINDEX Index, IN gceFENCE_TYPE Type); + +gceSTATUS +gcoSURF_DrawBlit(gcsSURF_VIEW *SrcView, + gcsSURF_VIEW *DstView, + gscSURF_BLITDRAW_BLIT *Args); + +gceSTATUS +gcoSURF_DrawBlitDepth(gcsSURF_VIEW *SrcView, + gcsSURF_VIEW *DstView, + gscSURF_BLITDRAW_BLIT *Args); + +gceSTATUS +gcoSURF_ComputeBlit(gcsSURF_VIEW *SrcView, + gcsSURF_VIEW *DstView, + gscSURF_BLITDRAW_BLIT *Args); + +/****************************************************************************** + ******************************* gcoINDEX Object ****************************** + ******************************************************************************/ + +/* Construct a new gcoINDEX object. */ +gceSTATUS +gcoINDEX_Construct(IN gcoHAL Hal, OUT gcoINDEX *Index); + +/* Destroy a gcoINDEX object. */ +gceSTATUS +gcoINDEX_Destroy(IN gcoINDEX Index); + +/* Lock index in memory. */ +gceSTATUS +gcoINDEX_Lock(IN gcoINDEX Index, + OUT gctADDRESS *Address, + OUT gctPOINTER *Memory); + +/* Unlock index that was previously locked with gcoINDEX_Lock. */ +gceSTATUS +gcoINDEX_Unlock(IN gcoINDEX Index); + +/* Upload index data into the memory. */ +gceSTATUS +gcoINDEX_Load(IN gcoINDEX Index, + IN gceINDEX_TYPE IndexType, + IN gctUINT32 IndexCount, + IN gctPOINTER IndexBuffer); + +/* Bind an index object to the hardware. */ +gceSTATUS +gcoINDEX_Bind(IN gcoINDEX Index, IN gceINDEX_TYPE Type); + +/* Bind an index object to the hardware. */ +gceSTATUS +gcoINDEX_BindOffset(IN gcoINDEX Index, + IN gceINDEX_TYPE Type, + IN gctSIZE_T Offset); + +/* Free existing index buffer. */ +gceSTATUS +gcoINDEX_Free(IN gcoINDEX Index); + +/* Upload data into an index buffer. */ +gceSTATUS +gcoINDEX_Upload(IN gcoINDEX Index, + IN gctCONST_POINTER Buffer, + IN gctSIZE_T Bytes); + +/* Upload data into an index buffer starting at an offset. */ +gceSTATUS +gcoINDEX_UploadOffset(IN gcoINDEX Index, + IN gctSIZE_T Offset, + IN gctCONST_POINTER Buffer, + IN gctSIZE_T Bytes); + +/*Merge index2 to index1 from 0, index2 must subset of inex1*/ +gceSTATUS +gcoINDEX_Merge(IN gcoINDEX Index1, IN gcoINDEX Index2); + +/*check if index buffer is enough for this draw*/ +gctBOOL +gcoINDEX_CheckRange(IN gcoINDEX Index, + IN gceINDEX_TYPE Type, + IN gctINT Count, + IN gctUINT32 Indices); + +/* Query the index capabilities. */ +gceSTATUS +gcoINDEX_QueryCaps(OUT gctBOOL *Index8, + OUT gctBOOL *Index16, + OUT gctBOOL *Index32, + OUT gctUINT *MaxIndex); + +/* Determine the index range in the current index buffer. */ +gceSTATUS +gcoINDEX_GetIndexRange(IN gcoINDEX Index, + IN gceINDEX_TYPE Type, + IN gctSIZE_T Offset, + IN gctUINT32 Count, + OUT gctUINT32 *MinimumIndex, + OUT gctUINT32 *MaximumIndex); + +/* Dynamic buffer management. */ +gceSTATUS +gcoINDEX_SetDynamic(IN gcoINDEX Index, + IN gctSIZE_T Bytes, + IN gctUINT Buffers); + +gceSTATUS +gcoCLHardware_Construct(void); +/****************************************************************************** + ********************************* gco3D Object ******************************* + ******************************************************************************/ + +/* Construct a new gco3D object. */ +gceSTATUS +gco3D_Construct(IN gcoHAL Hal, IN gctBOOL Robust, OUT gco3D *Engine); + +/* Destroy an gco3D object. */ +gceSTATUS +gco3D_Destroy(IN gco3D Engine); + +/* Set 3D API type. */ +gceSTATUS +gco3D_SetAPI(IN gco3D Engine, IN gceAPI ApiType); + +/* Get 3D API type. */ +gceSTATUS +gco3D_GetAPI(IN gco3D Engine, OUT gceAPI *ApiType); + +gceSTATUS +gco3D_SetTarget(IN gco3D Engine, + IN gctUINT32 TargetIndex, + IN gcsSURF_VIEW *SurfView, + IN gctUINT32 LayerIndex); + +gceSTATUS +gco3D_UnsetTarget(IN gco3D Engine, IN gctUINT32 TargetIndex, IN gcoSURF Surface); + +gceSTATUS +gco3D_SetPSOutputMapping(IN gco3D Engine, IN gctINT32 *psOutputMapping); + +gceSTATUS +gco3D_SetRenderLayered(IN gco3D Engine, IN gctBOOL Enable, IN gctUINT MaxLayers); + +gceSTATUS +gco3D_SetShaderLayered(IN gco3D Engine, IN gctBOOL Enable); + +gceSTATUS +gco3D_IsProgramSwitched(IN gco3D Engine); + +/* Set depth buffer. */ +gceSTATUS +gco3D_SetDepth(IN gco3D Engine, IN gcsSURF_VIEW *SurfView); + +/* Unset depth buffer. */ +gceSTATUS +gco3D_UnsetDepth(IN gco3D Engine, IN gcoSURF Surface); + +/* Set viewport. */ +gceSTATUS +gco3D_SetViewport(IN gco3D Engine, + IN gctINT32 Left, + IN gctINT32 Top, + IN gctINT32 Right, + IN gctINT32 Bottom); + +/* Set scissors. */ +gceSTATUS +gco3D_SetScissors(IN gco3D Engine, + IN gctINT32 Left, + IN gctINT32 Top, + IN gctINT32 Right, + IN gctINT32 Bottom); + +/* Set clear color. */ +gceSTATUS +gco3D_SetClearColor(IN gco3D Engine, + IN gctUINT8 Red, + IN gctUINT8 Green, + IN gctUINT8 Blue, + IN gctUINT8 Alpha); + +/* Set fixed point clear color. */ +gceSTATUS +gco3D_SetClearColorX(IN gco3D Engine, + IN gctFIXED_POINT Red, + IN gctFIXED_POINT Green, + IN gctFIXED_POINT Blue, + IN gctFIXED_POINT Alpha); + +/* Set floating point clear color. */ +gceSTATUS +gco3D_SetClearColorF(IN gco3D Engine, + IN gctFLOAT Red, + IN gctFLOAT Green, + IN gctFLOAT Blue, + IN gctFLOAT Alpha); + +/* Set fixed point clear depth. */ +gceSTATUS +gco3D_SetClearDepthX(IN gco3D Engine, IN gctFIXED_POINT Depth); + +/* Set floating point clear depth. */ +gceSTATUS +gco3D_SetClearDepthF(IN gco3D Engine, IN gctFLOAT Depth); + +/* Set clear stencil. */ +gceSTATUS +gco3D_SetClearStencil(IN gco3D Engine, IN gctUINT32 Stencil); + +/* Set shading mode. */ +gceSTATUS +gco3D_SetShading(IN gco3D Engine, IN gceSHADING Shading); + +/* Set blending mode. */ +gceSTATUS +gco3D_EnableBlending(IN gco3D Engine, IN gctBOOL Enable); + +/* Set blending function. */ +gceSTATUS +gco3D_SetBlendFunction(IN gco3D Engine, + IN gceBLEND_UNIT Unit, + IN gceBLEND_FUNCTION FunctionRGB, + IN gceBLEND_FUNCTION FunctionAlpha); + +/* Set blending mode. */ +gceSTATUS +gco3D_SetBlendMode(IN gco3D Engine, IN gceBLEND_MODE ModeRGB, IN gceBLEND_MODE ModeAlpha); + +/* Set blending mode for separate rt control */ +gceSTATUS +gco3D_EnableBlendingIndexed(IN gco3D Engine, IN gctUINT Index, IN gctBOOL Enable); + +/* Set blending function for separate rt control */ +gceSTATUS +gco3D_SetBlendFunctionIndexed(IN gco3D Engine, + IN gctUINT Index, + IN gceBLEND_UNIT Unit, + IN gceBLEND_FUNCTION FunctionRGB, + IN gceBLEND_FUNCTION FunctionAlpha); + +/* Set blending mode for separate rt control*/ +gceSTATUS +gco3D_SetBlendModeIndexed(IN gco3D Engine, + IN gctUINT Index, + IN gceBLEND_MODE ModeRGB, + IN gceBLEND_MODE ModeAlpha); + +/* Set blending color. */ +gceSTATUS +gco3D_SetBlendColor(IN gco3D Engine, + IN gctUINT Red, + IN gctUINT Green, + IN gctUINT Blue, + IN gctUINT Alpha); + +/* Set fixed point blending color. */ +gceSTATUS +gco3D_SetBlendColorX(IN gco3D Engine, + IN gctFIXED_POINT Red, + IN gctFIXED_POINT Green, + IN gctFIXED_POINT Blue, + IN gctFIXED_POINT Alpha); + +/* Set floating point blending color. */ +gceSTATUS +gco3D_SetBlendColorF(IN gco3D Engine, + IN gctFLOAT Red, + IN gctFLOAT Green, + IN gctFLOAT Blue, + IN gctFLOAT Alpha); + +/* Set culling mode. */ +gceSTATUS +gco3D_SetCulling(IN gco3D Engine, IN gceCULL Mode); + +/* Enable point size */ +gceSTATUS +gco3D_SetPointSizeEnable(IN gco3D Engine, IN gctBOOL Enable); + +/* Set point sprite */ +gceSTATUS +gco3D_SetPointSprite(IN gco3D Engine, IN gctBOOL Enable); + +/* Enable/Disable primitive-id. */ +gceSTATUS +gco3D_SetPrimitiveIdEnable(IN gco3D Engine, IN gctBOOL Enable); + +/* Set fill mode. */ +gceSTATUS +gco3D_SetFill(IN gco3D Engine, IN gceFILL Mode); + +/* Set depth compare mode. */ +gceSTATUS +gco3D_SetDepthCompare(IN gco3D Engine, IN gceCOMPARE Compare); + +/* Enable depth writing. */ +gceSTATUS +gco3D_EnableDepthWrite(IN gco3D Engine, IN gctBOOL Enable); + +/* Set depth mode. */ +gceSTATUS +gco3D_SetDepthMode(IN gco3D Engine, IN gceDEPTH_MODE Mode); + +/* Set depth range. */ +gceSTATUS +gco3D_SetDepthRangeX(IN gco3D Engine, + IN gceDEPTH_MODE Mode, + IN gctFIXED_POINT Near, + IN gctFIXED_POINT Far); + +/* Set depth range. */ +gceSTATUS +gco3D_SetDepthRangeF(IN gco3D Engine, IN gceDEPTH_MODE Mode, + IN gctFLOAT Near, IN gctFLOAT Far); + +/* Set last pixel enable */ +gceSTATUS +gco3D_SetLastPixelEnable(IN gco3D Engine, IN gctBOOL Enable); + +/* Set depth Bias and Scale */ +gceSTATUS +gco3D_SetDepthScaleBiasX(IN gco3D Engine, + IN gctFIXED_POINT DepthScale, + IN gctFIXED_POINT DepthBias); + +gceSTATUS +gco3D_SetDepthScaleBiasF(IN gco3D Engine, IN gctFLOAT DepthScale, IN gctFLOAT DepthBias); + +/* Set depth near and far clipping plane. */ +gceSTATUS +gco3D_SetDepthPlaneF(IN gco3D Engine, IN gctFLOAT Near, IN gctFLOAT Far); + +/* Enable or disable dithering. */ +gceSTATUS +gco3D_EnableDither(IN gco3D Engine, IN gctBOOL Enable); + +/* Set color write enable bits. */ +gceSTATUS +gco3D_SetColorWrite(IN gco3D Engine, IN gctUINT8 Enable); + +/* Set color write enable bits for separate rt control */ +gceSTATUS +gco3D_SetColorWriteIndexed(IN gco3D Engine, IN gctUINT Index, IN gctUINT8 Enable); + +/* Enable or disable early depth. */ +gceSTATUS +gco3D_SetEarlyDepth(IN gco3D Engine, IN gctBOOL Enable); + +/* Deprecated: Enable or disable all early depth operations. */ +gceSTATUS +gco3D_SetAllEarlyDepthModes(IN gco3D Engine, IN gctBOOL Disable); + +gceSTATUS +gco3D_SetEarlyDepthFromAPP(IN gco3D Engine, IN gctBOOL EarlyDepthFromAPP); + +gceSTATUS +gco3D_SetRADepthWrite(IN gco3D Engine, IN gctBOOL Disable, + IN gctBOOL psReadZ, IN gctBOOL psReadW); + +gceSTATUS +gco3D_SetPatchVertices(IN gco3D Engine, IN gctINT PatchVertices); + +/* Switch dynamic early mode */ +gceSTATUS +gco3D_SwitchDynamicEarlyDepthMode(IN gco3D Engine); + +/* Set dynamic early mode */ +gceSTATUS +gco3D_DisableDynamicEarlyDepthMode(IN gco3D Engine, IN gctBOOL Disable); + +/* Enable or disable depth-only mode. */ +gceSTATUS +gco3D_SetDepthOnly(IN gco3D Engine, IN gctBOOL Enable); + +typedef struct _gcsSTENCIL_INFO *gcsSTENCIL_INFO_PTR; +typedef struct _gcsSTENCIL_INFO { + gceSTENCIL_MODE mode; + + gctUINT8 maskFront; + gctUINT8 maskBack; + gctUINT8 writeMaskFront; + gctUINT8 writeMaskBack; + + gctUINT8 referenceFront; + + gceCOMPARE compareFront; + gceSTENCIL_OPERATION passFront; + gceSTENCIL_OPERATION failFront; + gceSTENCIL_OPERATION depthFailFront; + + gctUINT8 referenceBack; + gceCOMPARE compareBack; + gceSTENCIL_OPERATION passBack; + gceSTENCIL_OPERATION failBack; + gceSTENCIL_OPERATION depthFailBack; +} gcsSTENCIL_INFO; + +/* Set stencil mode. */ +gceSTATUS +gco3D_SetStencilMode(IN gco3D Engine, IN gceSTENCIL_MODE Mode); + +/* Set stencil mask. */ +gceSTATUS +gco3D_SetStencilMask(IN gco3D Engine, IN gctUINT8 Mask); + +/* Set stencil back mask. */ +gceSTATUS +gco3D_SetStencilMaskBack(IN gco3D Engine, IN gctUINT8 Mask); + +/* Set stencil write mask. */ +gceSTATUS +gco3D_SetStencilWriteMask(IN gco3D Engine, IN gctUINT8 Mask); + +/* Set stencil back write mask. */ +gceSTATUS +gco3D_SetStencilWriteMaskBack(IN gco3D Engine, IN gctUINT8 Mask); + +/* Set stencil reference. */ +gceSTATUS +gco3D_SetStencilReference(IN gco3D Engine, + IN gctUINT8 Reference, + IN gctBOOL Front); + +/* Set stencil compare. */ +gceSTATUS +gco3D_SetStencilCompare(IN gco3D Engine, + IN gceSTENCIL_WHERE Where, + IN gceCOMPARE Compare); + +/* Set stencil operation on pass. */ +gceSTATUS +gco3D_SetStencilPass(IN gco3D Engine, + IN gceSTENCIL_WHERE Where, + IN gceSTENCIL_OPERATION Operation); + +/* Set stencil operation on fail. */ +gceSTATUS +gco3D_SetStencilFail(IN gco3D Engine, + IN gceSTENCIL_WHERE Where, + IN gceSTENCIL_OPERATION Operation); + +/* Set stencil operation on depth fail. */ +gceSTATUS +gco3D_SetStencilDepthFail(IN gco3D Engine, + IN gceSTENCIL_WHERE Where, + IN gceSTENCIL_OPERATION Operation); + +/* Set all stencil states in one blow. */ +gceSTATUS +gco3D_SetStencilAll(IN gco3D Engine, IN gcsSTENCIL_INFO_PTR Info); + +typedef struct _gcsALPHA_INFO *gcsALPHA_INFO_PTR; +typedef struct _gcsALPHA_INFO { + /* Alpha test states. */ + gctBOOL test; + gceCOMPARE compare; + gctUINT8 reference; + gctFLOAT floatReference; + + /* Alpha blending states. */ + gctBOOL blend[gcdMAX_DRAW_BUFFERS]; + + gceBLEND_FUNCTION srcFuncColor[gcdMAX_DRAW_BUFFERS]; + gceBLEND_FUNCTION srcFuncAlpha[gcdMAX_DRAW_BUFFERS]; + gceBLEND_FUNCTION trgFuncColor[gcdMAX_DRAW_BUFFERS]; + gceBLEND_FUNCTION trgFuncAlpha[gcdMAX_DRAW_BUFFERS]; + + gceBLEND_MODE modeColor[gcdMAX_DRAW_BUFFERS]; + gceBLEND_MODE modeAlpha[gcdMAX_DRAW_BUFFERS]; + + gctUINT32 color; + + gctBOOL anyBlendEnabled; +} gcsALPHA_INFO; + +/* Enable or disable alpha test. */ +gceSTATUS +gco3D_SetAlphaTest(IN gco3D Engine, IN gctBOOL Enable); + +/* Set alpha test compare. */ +gceSTATUS +gco3D_SetAlphaCompare(IN gco3D Engine, IN gceCOMPARE Compare); + +/* Set alpha test reference in unsigned integer. */ +gceSTATUS +gco3D_SetAlphaReference(IN gco3D Engine, IN gctUINT8 Reference, + IN gctFLOAT FloatReference); + +/* Set alpha test reference in fixed point. */ +gceSTATUS +gco3D_SetAlphaReferenceX(IN gco3D Engine, IN gctFIXED_POINT Reference); + +/* Set alpha test reference in floating point. */ +gceSTATUS +gco3D_SetAlphaReferenceF(IN gco3D Engine, IN gctFLOAT Reference); + +#if gcdALPHA_KILL_IN_SHADER +gceSTATUS +gco3D_SetAlphaKill(IN gco3D Engine, IN gctBOOL AlphaKill, + IN gctBOOL ColorKill); +# endif + +/* Enable/Disable anti-alias line. */ +gceSTATUS +gco3D_SetAntiAliasLine(IN gco3D Engine, IN gctBOOL Enable); + +/* Set texture slot for anti-alias line. */ +gceSTATUS +gco3D_SetAALineTexSlot(IN gco3D Engine, IN gctUINT TexSlot); + +/* Set anti-alias line width scale. */ +gceSTATUS +gco3D_SetAALineWidth(IN gco3D Engine, IN gctFLOAT Width); + +/* Draw a number of primitives. */ +gceSTATUS +gco3D_DrawPrimitives(IN gco3D Engine, + IN gcePRIMITIVE Type, + IN gctSIZE_T StartVertex, + IN gctSIZE_T PrimitiveCount); + +gceSTATUS +gco3D_DrawIndirectPrimitives(IN gco3D Engine, + IN gcePRIMITIVE Type, + IN gctBOOL DrawIndex, + IN gctINT BaseOffset, + IN gcoBUFOBJ BufObj); + +gceSTATUS +gco3D_MultiDrawIndirectPrimitives(IN gco3D Engine, + IN gcePRIMITIVE Type, + IN gctBOOL DrawIndex, + IN gctINT BaseOffset, + IN gctINT DrawCount, + IN gctINT Stride, + IN gcoBUFOBJ BufObj); + +gceSTATUS +gco3D_DrawInstancedPrimitives(IN gco3D Engine, + IN gcePRIMITIVE Type, + IN gctBOOL DrawIndex, + IN gctINT StartVertex, + IN gctSIZE_T StartIndex, + IN gctSIZE_T PrimitiveCount, + IN gctSIZE_T VertexCount, + IN gctSIZE_T InstanceCount); + +gceSTATUS +gco3D_DrawNullPrimitives(IN gco3D Engine); + +gceSTATUS +gco3D_DrawPrimitivesCount(IN gco3D Engine, + IN gcePRIMITIVE Type, + IN gctINT *StartVertex, + IN gctSIZE_T *VertexCount, + IN gctSIZE_T PrimitiveCount); + +/* Draw a number of primitives using offsets. */ +gceSTATUS +gco3D_DrawPrimitivesOffset(IN gco3D Engine, + IN gcePRIMITIVE Type, + IN gctINT32 StartOffset, + IN gctSIZE_T PrimitiveCount); + +/* Draw a number of indexed primitives. */ +gceSTATUS +gco3D_DrawIndexedPrimitives(IN gco3D Engine, + IN gcePRIMITIVE Type, + IN gctSIZE_T BaseVertex, + IN gctSIZE_T StartIndex, + IN gctSIZE_T PrimitiveCount); + +/* Draw a number of indexed primitives using offsets. */ +gceSTATUS +gco3D_DrawIndexedPrimitivesOffset(IN gco3D Engine, + IN gcePRIMITIVE Type, + IN gctINT32 BaseOffset, + IN gctINT32 StartOffset, + IN gctSIZE_T PrimitiveCount); + +/* Draw a element from pattern */ +gceSTATUS +gco3D_DrawPattern(IN gco3D Engine, IN gcsFAST_FLUSH_PTR FastFlushInfo); + +/* Enable or disable anti-aliasing. */ +gceSTATUS +gco3D_SetAntiAlias(IN gco3D Engine, IN gctBOOL Enable); + +/* Set msaa samples */ +gceSTATUS +gco3D_SetSamples(IN gco3D Engine, IN gctUINT32 Samples); + +/* Write data into the command buffer. */ +gceSTATUS +gco3D_WriteBuffer(IN gco3D Engine, IN gctCONST_POINTER Data, + IN gctSIZE_T Bytes, IN gctBOOL Aligned); + +/* Send sempahore and stall until sempahore is signalled. */ +gceSTATUS +gco3D_Semaphore(IN gco3D Engine, IN gceWHERE From, + IN gceWHERE To, IN gceHOW How); + +/* Explicitly flush pipeline */ +gceSTATUS +gco3D_FlushPipe(IN gco3D Engine); + +/* Explicitly flush shader L1 cache */ +gceSTATUS +gco3D_FlushSHL1Cache(IN gco3D Engine); + +/* Set the subpixels center. */ +gceSTATUS +gco3D_SetCentroids(IN gco3D Engine, IN gctUINT32 Index, IN gctPOINTER Centroids); + +/* query msaa sample coordinates */ +gceSTATUS +gco3D_GetSampleCoords(IN gco3D Engine, + IN gctUINT32 SampleIndex, + IN gctBOOL yInverted, + OUT gctFLOAT_PTR Coords); + +gceSTATUS +gco3D_SetLogicOp(IN gco3D Engine, IN gctUINT8 Rop); + +gceSTATUS +gco3D_SetQuery(IN gco3D Engine, + IN gctADDRESS QueryHeader, + IN gceQueryType Type, + IN gctBOOL Enable, + IN gctUINT32 Index); + +gceSTATUS +gco3D_GetQuery(IN gco3D Engine, + IN gceQueryType Type, + IN gcsSURF_NODE_PTR Node, + IN gctUINT32 Size, + IN gctPOINTER Locked, + IN gctUINT32 IndexedId, + OUT gctINT32 *Index); + +gceSTATUS +gco3D_SetXfbHeader(IN gco3D Engine, IN gctADDRESS Physical); + +gceSTATUS +gco3D_SetXfbBuffer(IN gco3D Engine, + IN gctUINT32 Index, + IN gctADDRESS BufferAddr, + IN gctUINT32 BufferStride, + IN gctUINT32 BufferSize); + +gceSTATUS +gco3D_SetXfbCmd(IN gco3D Engine, IN gceXfbCmd Cmd); + +gceSTATUS +gco3D_SetRasterDiscard(IN gco3D Engine, IN gctBOOL Enable); + +gceSTATUS +gco3D_SetColorOutCount(IN gco3D Engine, IN gctUINT32 ColorOutCount); + +gceSTATUS +gco3D_SetColorCacheMode(IN gco3D Engine); + +gceSTATUS +gco3D_Set3DEngine(IN gco3D Engine); + +gceSTATUS +gco3D_UnSet3DEngine(IN gco3D Engine); + +gceSTATUS +gco3D_Get3DEngine(OUT gco3D *Engine); + +gceSTATUS +gco3D_QueryReset(IN gco3D Engine, OUT gctBOOL_PTR Innocent); + +/* OCL thread walker information. */ +typedef struct _gcsTHREAD_WALKER_INFO *gcsTHREAD_WALKER_INFO_PTR; +typedef struct _gcsTHREAD_WALKER_INFO { + gctUINT32 dimensions; + gctUINT32 traverseOrder; + gctUINT32 enableSwathX; + gctUINT32 enableSwathY; + gctUINT32 enableSwathZ; + gctUINT32 swathSizeX; + gctUINT32 swathSizeY; + gctUINT32 swathSizeZ; + gctUINT32 valueOrder; + + gctUINT32 globalSizeX; + gctUINT32 globalOffsetX; + gctUINT32 globalSizeY; + gctUINT32 globalOffsetY; + gctUINT32 globalSizeZ; + gctUINT32 globalOffsetZ; + + gctUINT32 globalScaleX; + gctUINT32 globalScaleY; + gctUINT32 globalScaleZ; + + gctUINT32 workGroupSizeX; + gctUINT32 workGroupCountX; + gctUINT32 workGroupSizeY; + gctUINT32 workGroupCountY; + gctUINT32 workGroupSizeZ; + gctUINT32 workGroupCountZ; + + gctUINT32 threadAllocation; + gctBOOL barrierUsed; + gctUINT32 memoryAccessFlag; /* same as gceMEMORY_ACCESS_FLAG */ + gctBOOL indirect; + gctUINT32 groupNumberUniformIdx; + gctADDRESS baseAddress; + gctBOOL bDual16; + gctUINT32 coreCount; + gctUINT32 coreIDs[gcdMAX_3DGPU_COUNT]; +} gcsTHREAD_WALKER_INFO; + +#if gcdENABLE_3D && gcdUSE_VX +/* VX thread walker parameters. */ +typedef struct _gcsVX_THREAD_WALKER_PARAMETERS *gcsVX_THREAD_WALKER_PARAMETERS_PTR; + +typedef struct _gcsVX_THREAD_WALKER_PARAMETERS { + gctUINT32 valueOrder; + gctUINT32 workDim; + + gctUINT32 workGroupSizeX; + gctUINT32 workGroupCountX; + + gctUINT32 workGroupSizeY; + gctUINT32 workGroupCountY; + + gctUINT32 globalOffsetX; + gctUINT32 globalScaleX; + + gctUINT32 globalOffsetY; + gctUINT32 globalScaleY; + +#if gcdVX_OPTIMIZER > 1 + gctBOOL tileMode; +# endif +} gcsVX_THREAD_WALKER_PARAMETERS; + +typedef struct _gcsVX_IMAGE_INFO *gcsVX_IMAGE_INFO_PTR; + +typedef struct _gcsVX_IMAGE_INFO { + gctUINT32 format; + gctUINT32 rect[4]; + gctSIZE_T width; + gctSIZE_T height; + + /*arraySize, sliceSize is for imageArray / image3D */ + gctSIZE_T arraySize; + gctUINT64 sliceSize; + + gctUINT32 bpp; + gctUINT32 planes; + gctUINT32 componentCount; + gctBOOL isFloat; + + gctUINT32 uPixels; + gctUINT32 vPixels; + gceSURF_FORMAT internalFormat; + gctUINT32 border; + + /*vx_imagepatch_addressing_t == (gctUINT32 * 8) */ + void *base_addr[3]; + + gctUINT64 stride[3]; + + gctPOINTER logicals[3]; + gctADDRESS physicals[3]; + gctUINT32 bytes; + + gcsSURF_NODE_PTR nodes[3]; + + gctBOOL isVXC; +#if gcdVX_OPTIMIZER + gctUINT32 uniformData[3][4]; +# endif + /* the uniform data type of save nbg */ + gctUINT32 uniformSaveDataType; +} gcsVX_IMAGE_INFO; +typedef struct _gcsVX_DISTRIBUTION_INFO *gcsVX_DISTRIBUTION_INFO_PTR; + +typedef struct _gcsVX_DISTRIBUTION_INFO { + gctUINT32 logical; + gctADDRESS physical; + gctUINT32 bytes; + + gcsSURF_NODE_PTR node; +} gcsVX_DISTRIBUTION_INFO; +# endif + +/* Start OCL thread walker. */ +gceSTATUS +gco3D_InvokeThreadWalker(IN gco3D Engine, IN gcsTHREAD_WALKER_INFO_PTR Info); + +gceSTATUS +gco3D_GetClosestRenderFormat(IN gco3D Engine, + IN gceSURF_FORMAT InFormat, + OUT gceSURF_FORMAT *OutFormat); + +/* Set w clip and w plane limit value. */ +gceSTATUS +gco3D_SetWClipEnable(IN gco3D Engine, IN gctBOOL Enable); + +gceSTATUS +gco3D_GetWClipEnable(IN gco3D Engine, OUT gctBOOL *Enable); + +gceSTATUS +gco3D_SetWPlaneLimitF(IN gco3D Engine, IN gctFLOAT Value); + +gceSTATUS +gco3D_SetWPlaneLimitX(IN gco3D Engine, IN gctFIXED_POINT Value); + +gceSTATUS +gco3D_SetWPlaneLimit(IN gco3D Engine, IN gctFLOAT Value); + +gceSTATUS +gco3D_PrimitiveRestart(IN gco3D Engine, IN gctBOOL PrimitiveRestart); + +gceSTATUS +gco3D_LoadProgram(IN gco3D Engine, + IN gcePROGRAM_STAGE_BIT StageBits, + IN gctPOINTER ProgramState); + +gceSTATUS +gco3D_EnableAlphaToCoverage(IN gco3D Engine, IN gctBOOL Enable); + +gceSTATUS +gco3D_EnableSampleCoverage(IN gco3D Engine, IN gctBOOL Enable); + +gceSTATUS +gco3D_SetSampleCoverageValue(IN gco3D Engine, + IN gctFLOAT CoverageValue, + IN gctBOOL Invert); + +gceSTATUS +gco3D_EnableSampleMask(IN gco3D Engine, IN gctBOOL Enable); + +gceSTATUS +gco3D_SetSampleMask(IN gco3D Engine, IN gctUINT32 SampleMask); + +gceSTATUS +gco3D_EnableSampleShading(IN gco3D Engine, IN gctBOOL Enable); + +gceSTATUS +gco3D_SetMinSampleShadingValue(IN gco3D Engine, IN gctFLOAT MinSampleShadingValue); + +gceSTATUS +gco3D_SetSampleShading(IN gco3D Engine, + IN gctBOOL Enable, + IN gctBOOL IsSampleIn, + IN gctFLOAT SampleShadingValue); + +gceSTATUS +gco3D_EnableSampleMaskOut(IN gco3D Engine, IN gctBOOL Enable, IN gctINT SampleMaskLoc); + +/*----------------------------------------------------------------------------*/ +/*-------------------------- gco3D Fragment Processor ------------------------*/ + +/* Set the fragment processor configuration. */ +gceSTATUS +gco3D_SetFragmentConfiguration(IN gco3D Engine, + IN gctBOOL ColorFromStream, + IN gctBOOL EnableFog, + IN gctBOOL EnableSmoothPoint, + IN gctUINT32 ClipPlanes); + +/* Enable/disable texture stage operation. */ +gceSTATUS +gco3D_EnableTextureStage(IN gco3D Engine, IN gctINT Stage, IN gctBOOL Enable); + +/* Program the channel enable masks for the color texture function. */ +gceSTATUS +gco3D_SetTextureColorMask(IN gco3D Engine, + IN gctINT Stage, + IN gctBOOL ColorEnabled, + IN gctBOOL AlphaEnabled); + +/* Program the channel enable masks for the alpha texture function. */ +gceSTATUS +gco3D_SetTextureAlphaMask(IN gco3D Engine, + IN gctINT Stage, + IN gctBOOL ColorEnabled, + IN gctBOOL AlphaEnabled); + +/* Program the constant fragment color. */ +gceSTATUS +gco3D_SetFragmentColorX(IN gco3D Engine, + IN gctFIXED_POINT Red, + IN gctFIXED_POINT Green, + IN gctFIXED_POINT Blue, + IN gctFIXED_POINT Alpha); + +gceSTATUS +gco3D_SetFragmentColorF(IN gco3D Engine, + IN gctFLOAT Red, + IN gctFLOAT Green, + IN gctFLOAT Blue, + IN gctFLOAT Alpha); + +/* Program the constant fog color. */ +gceSTATUS +gco3D_SetFogColorX(IN gco3D Engine, + IN gctFIXED_POINT Red, + IN gctFIXED_POINT Green, + IN gctFIXED_POINT Blue, + IN gctFIXED_POINT Alpha); + +gceSTATUS +gco3D_SetFogColorF(IN gco3D Engine, + IN gctFLOAT Red, + IN gctFLOAT Green, + IN gctFLOAT Blue, + IN gctFLOAT Alpha); + +/* Program the constant texture color. */ +gceSTATUS +gco3D_SetTetxureColorX(IN gco3D Engine, + IN gctINT Stage, + IN gctFIXED_POINT Red, + IN gctFIXED_POINT Green, + IN gctFIXED_POINT Blue, + IN gctFIXED_POINT Alpha); + +gceSTATUS +gco3D_SetTetxureColorF(IN gco3D Engine, + IN gctINT Stage, + IN gctFLOAT Red, + IN gctFLOAT Green, + IN gctFLOAT Blue, + IN gctFLOAT Alpha); + +/* Configure color texture function. */ +gceSTATUS +gco3D_SetColorTextureFunction(IN gco3D Engine, + IN gctINT Stage, + IN gceTEXTURE_FUNCTION Function, + IN gceTEXTURE_SOURCE Source0, + IN gceTEXTURE_CHANNEL Channel0, + IN gceTEXTURE_SOURCE Source1, + IN gceTEXTURE_CHANNEL Channel1, + IN gceTEXTURE_SOURCE Source2, + IN gceTEXTURE_CHANNEL Channel2, + IN gctINT Scale); + +/* Configure alpha texture function. */ +gceSTATUS +gco3D_SetAlphaTextureFunction(IN gco3D Engine, + IN gctINT Stage, + IN gceTEXTURE_FUNCTION Function, + IN gceTEXTURE_SOURCE Source0, + IN gceTEXTURE_CHANNEL Channel0, + IN gceTEXTURE_SOURCE Source1, + IN gceTEXTURE_CHANNEL Channel1, + IN gceTEXTURE_SOURCE Source2, + IN gceTEXTURE_CHANNEL Channel2, + IN gctINT Scale); + +/****************************************************************************** + ****************************** gcoTEXTURE Object ***************************** + ******************************************************************************/ + +typedef struct _gcsTEXTURE { + /* Addressing modes. */ + gceTEXTURE_ADDRESSING s; + gceTEXTURE_ADDRESSING t; + gceTEXTURE_ADDRESSING r; + + gceTEXTURE_SWIZZLE swizzle[gcvTEXTURE_COMPONENT_NUM]; + + /* Border color. */ + gctUINT8 border[gcvTEXTURE_COMPONENT_NUM]; + + /* Filters. */ + gceTEXTURE_FILTER minFilter; + gceTEXTURE_FILTER magFilter; + gceTEXTURE_FILTER mipFilter; + gctUINT anisoFilter; + + /* Level of detail. */ + gctFLOAT lodBias; + gctFLOAT lodMin; + gctFLOAT lodMax; + + /* base/max level */ + gctINT32 baseLevel; + gctINT32 maxLevel; + + /* depth texture comparison */ + gceTEXTURE_COMPARE_MODE compareMode; + gceCOMPARE compareFunc; + + gceTEXTURE_DS_TEX_MODE dsTextureMode; + + gceTEXTURE_DS_MODE dsMode; + + /* sRGB decode */ + gceTEXTURE_SRGBDECODE sRGB; + + gcuVALUE borderColor[4]; + gctBOOL descDirty; + + /* texture stage */ + gctINT stage; +} gcsTEXTURE, *gcsTEXTURE_PTR; + +typedef struct _gcsTEXTURE_BINDTEXTS_ARGS { + /* must be the first member */ + gceHAL_ARG_VERSION version; + +} gcsTEXTURE_BINDTEXTS_ARGS; + +/* Construct a new gcoTEXTURE object. */ +gceSTATUS +gcoTEXTURE_Construct(IN gcoHAL Hal, OUT gcoTEXTURE *Texture); + +/* Construct a new gcoTEXTURE object with type information. */ +gceSTATUS +gcoTEXTURE_ConstructEx(IN gcoHAL Hal, IN gceTEXTURE_TYPE Type, OUT gcoTEXTURE *Texture); + +/* Construct a new sized gcoTEXTURE object. */ +gceSTATUS +gcoTEXTURE_ConstructSized(IN gcoHAL Hal, + IN gceSURF_FORMAT Format, + IN gceTILING Tiling, + IN gctUINT Width, + IN gctUINT Height, + IN gctUINT Depth, + IN gctUINT Faces, + IN gctUINT MipMapCount, + IN gcePOOL Pool, + OUT gcoTEXTURE *Texture); + +/* Destroy an gcoTEXTURE object. */ +gceSTATUS +gcoTEXTURE_Destroy(IN gcoTEXTURE Texture); + +/* Check src and dst format support of 3D Blit. */ +gceSTATUS +gcoTEXTURE_CheckHWSupportedImage( + IN gceSURF_FORMAT srcFormat, + IN gceSURF_FORMAT dstFormat +); + +/* Upload data to an gcoTEXTURE object. */ +gceSTATUS +gcoTEXTURE_Upload(IN gcoTEXTURE Texture, + IN gctINT MipMap, + IN gceTEXTURE_FACE Face, + IN gctSIZE_T Width, + IN gctSIZE_T Height, + IN gctUINT Slice, + IN gctCONST_POINTER Memory, + IN gctSIZE_T Stride, + IN gceSURF_FORMAT Format, + IN gceSURF_COLOR_SPACE SrcColorSpace); + +/* Upload data to an gcoTEXTURE object. */ +gceSTATUS +gcoTEXTURE_UploadSub(IN gcoTEXTURE Texture, + IN gctINT MipMap, + IN gceTEXTURE_FACE Face, + IN gctSIZE_T X, + IN gctSIZE_T Y, + IN gctSIZE_T Width, + IN gctSIZE_T Height, + IN gctUINT Slice, + IN gctCONST_POINTER Memory, + IN gctSIZE_T Stride, + IN gceSURF_FORMAT Format, + IN gceSURF_COLOR_SPACE SrcColorSpace, + IN gctADDRESS PhysicalAddress); + +/* Upload YUV data to an gcoTEXTURE object. */ +gceSTATUS +gcoTEXTURE_UploadYUV(IN gcoTEXTURE Texture, + IN gceTEXTURE_FACE Face, + IN gctUINT Width, + IN gctUINT Height, + IN gctUINT Slice, + IN gctPOINTER Memory[3], + IN gctINT Stride[3], + IN gceSURF_FORMAT Format); + +/* Upload compressed data to an gcoTEXTURE object. */ +gceSTATUS +gcoTEXTURE_UploadCompressed(IN gcoTEXTURE Texture, + IN gctINT MipMap, + IN gceTEXTURE_FACE Face, + IN gctSIZE_T Width, + IN gctSIZE_T Height, + IN gctUINT Slice, + IN gctCONST_POINTER Memory, + IN gctSIZE_T Bytes); + +/* Upload compressed sub data to an gcoTEXTURE object. */ +gceSTATUS +gcoTEXTURE_UploadCompressedSub(IN gcoTEXTURE Texture, + IN gctINT MipMap, + IN gceTEXTURE_FACE Face, + IN gctSIZE_T XOffset, + IN gctSIZE_T YOffset, + IN gctSIZE_T Width, + IN gctSIZE_T Height, + IN gctUINT Slice, + IN gctCONST_POINTER Memory, + IN gctSIZE_T Size); + +/* Get gcoSURF object for a mipmap level. */ +gceSTATUS +gcoTEXTURE_GetMipMap(IN gcoTEXTURE Texture, IN gctUINT MipMap, OUT gcoSURF *Surface); + +/* Get gcoSURF object for a mipmap level and face offset. */ +gceSTATUS +gcoTEXTURE_GetMipMapFace(IN gcoTEXTURE Texture, + IN gctUINT MipMap, + IN gceTEXTURE_FACE Face, + OUT gcoSURF *Surface, + OUT gctSIZE_T_PTR Offset); + +gceSTATUS +gcoTEXTURE_GetMipMapSlice(IN gcoTEXTURE Texture, + IN gctUINT MipMap, + IN gctUINT Slice, + OUT gcoSURF *Surface, + OUT gctSIZE_T_PTR Offset); + +gceSTATUS +gcoTEXTURE_AddMipMap(IN gcoTEXTURE Texture, + IN gctINT Level, + IN gctINT InternalFormat, + IN gceSURF_FORMAT Format, + IN gctSIZE_T Width, + IN gctSIZE_T Height, + IN gctSIZE_T Depth, + IN gctUINT Faces, + IN gcePOOL Pool, + IN gctBOOL Filterable, + OUT gcoSURF *Surface); + +gceSTATUS +gcoTEXTURE_AddMipMapEx(IN gcoTEXTURE Texture, + IN gctINT Level, + IN gctINT InternalFormat, + IN gceSURF_FORMAT Format, + IN gctSIZE_T Width, + IN gctSIZE_T Height, + IN gctSIZE_T Depth, + IN gctUINT Faces, + IN gcePOOL Pool, + IN gctUINT32 Samples, + IN gctBOOL Protected, + IN gctBOOL Filterable, + OUT gcoSURF *Surface); + +gceSTATUS +gcoTEXTURE_AddMipMapFromClient(IN gcoTEXTURE Texture, + IN gctINT Level, + IN gcoSURF Surface); + +gceSTATUS +gcoTEXTURE_AddMipMapFromSurface(IN gcoTEXTURE Texture, + IN gctINT Level, + IN gcoSURF Surface); + +gceSTATUS +gcoTEXTURE_LockMipMap(IN gcoTEXTURE Texture, + IN gctUINT MipMap, + OPTIONAL OUT gctADDRESS *Address, + OPTIONAL OUT gctPOINTER *Memory); + +gceSTATUS +gcoTEXTURE_SetEndianHint(IN gcoTEXTURE Texture, IN gceENDIAN_HINT EndianHint); + +gceSTATUS +gcoTEXTURE_SetSurfType(IN gcoTEXTURE Texture, IN gceSURF_TYPE type); + +gceSTATUS +gcoTEXTURE_Disable(IN gcoHAL Hal, IN gctINT Sampler, IN gctBOOL DefaultInteger); + +gceSTATUS +gcoTEXTURE_Clear(IN gcoTEXTURE Texture, IN gctINT MipMap); + +gceSTATUS +gcoTEXTURE_Flush(IN gcoTEXTURE Texture); + +gceSTATUS +gcoTEXTURE_FlushVS(IN gcoTEXTURE Texture); + +gceSTATUS +gcoTEXTURE_QueryCaps(IN gcoHAL Hal, + OUT gctUINT *MaxWidth, + OUT gctUINT *MaxHeight, + OUT gctUINT *MaxDepth, + OUT gctBOOL *Cubic, + OUT gctBOOL *NonPowerOfTwo, + OUT gctUINT *VertexSamplers, + OUT gctUINT *PixelSamplers); + +gceSTATUS +gcoTEXTURE_GetClosestFormat(IN gcoHAL Hal, + IN gceSURF_FORMAT InFormat, + OUT gceSURF_FORMAT *OutFormat); + +gceSTATUS +gcoTEXTURE_GetClosestFormatEx(IN gcoHAL Hal, + IN gceSURF_FORMAT InFormat, + IN gceTEXTURE_TYPE TextureType, + OUT gceSURF_FORMAT *OutFormat); + +gceSTATUS +gcoTEXTURE_GetFormatInfo(IN gcoTEXTURE Texture, + IN gctINT preferLevel, + OUT gcsSURF_FORMAT_INFO_PTR *TxFormatInfo); + +gceSTATUS +gcoTEXTURE_GetTextureFormatName(IN gcsSURF_FORMAT_INFO_PTR TxFormatInfo, + OUT gctCONST_STRING *TxName); + +gceSTATUS +gcoTEXTURE_RenderIntoMipMap(IN gcoTEXTURE Texture, IN gctINT Level); + +gceSTATUS +gcoTEXTURE_RenderIntoMipMap2(IN gcoTEXTURE Texture, IN gctINT Level, IN gctBOOL Sync); + +gceSTATUS +gcoTEXTURE_IsRenderable(IN gcoTEXTURE Texture, IN gctUINT Level); + +gceSTATUS +gcoTEXTURE_IsComplete(IN gcoTEXTURE Texture, + IN gcsTEXTURE_PTR Info, + IN gctINT BaseLevel, + IN gctINT MaxLevel); + +gceSTATUS +gcoTEXTURE_CheckTexLevel0Attrib(IN gcoTEXTURE Texture, + IN gctINT MaxLevel, + IN gctINT usedLevel); + +gceSTATUS +gcoTEXTURE_BindTexture(IN gcoTEXTURE Texture, + IN gctINT Target, + IN gctINT Sampler, + IN gcsTEXTURE_PTR Info); + +gceSTATUS +gcoTEXTURE_BindTextureEx(IN gcoTEXTURE Texture, + IN gctINT Target, + IN gctINT Sampler, + IN gcsTEXTURE_PTR Info, + IN gctINT textureLayer); + +gceSTATUS +gcoTEXTURE_BindTextureDesc(IN gcoTEXTURE Texture, + IN gctINT Sampler, + IN gcsTEXTURE_PTR Info, + IN gctINT TextureLayer); + +gceSTATUS +gcoTEXTURE_SetDescDirty(IN gcoTEXTURE Texture); + +gceSTATUS +gcoTEXTURE_InitParams(IN gcoHAL Hal, IN gcsTEXTURE_PTR TexParams); + +gceSTATUS +gcoTEXTURE_SetDepthTextureFlag(IN gcoTEXTURE Texture, IN gctBOOL unsized); + +gceSTATUS +gcoTEXTURE_SetSpecialSwap(IN gcoTEXTURE Texture, IN gctBOOL needSwap); + +gceSTATUS +gcoTEXTURE_BindTextureTS(IN gcsTEXTURE_BINDTEXTS_ARGS *args); + +gceSTATUS +gcoTEXTURE_GenerateMipMap(IN gcoTEXTURE Texture, + IN gctINT BaseLevel, + IN gctINT MaxLevel, + IN gctBOOL sRGBDecode); + +/****************************************************************************** + ****************************** gcoSTREAM Object ****************************** + ******************************************************************************/ + +gceSTATUS +gcoSTREAM_Construct(IN gcoHAL Hal, OUT gcoSTREAM *Stream); + +gceSTATUS +gcoSTREAM_Destroy(IN gcoSTREAM Stream); + +gceSTATUS +gcoSTREAM_Upload(IN gcoSTREAM Stream, + IN gctCONST_POINTER Buffer, + IN gctSIZE_T Offset, + IN gctSIZE_T Bytes, + IN gctBOOL Dynamic); + +gceSTATUS +gcoSTREAM_ReAllocBufNode(IN gcoSTREAM Stream); + +gceSTATUS +gcoSTREAM_SetStride(IN gcoSTREAM Stream, IN gctUINT32 Stride); + +gceSTATUS +gcoSTREAM_Node(IN gcoSTREAM Stream, OUT gcsSURF_NODE_PTR *Node); + +gceSTATUS +gcoSTREAM_Lock(IN gcoSTREAM Stream, + OUT gctPOINTER *Logical, + OUT gctADDRESS *Physical); + +gceSTATUS +gcoSTREAM_Unlock(IN gcoSTREAM Stream); + +gceSTATUS +gcoSTREAM_Reserve(IN gcoSTREAM Stream, IN gctSIZE_T Bytes); + +gceSTATUS +gcoSTREAM_Flush(IN gcoSTREAM Stream); + +typedef struct _gcsSTREAM_INFO { + gctUINT index; + gceVERTEX_FORMAT format; + gctBOOL normalized; + gctUINT components; + gctSIZE_T size; + gctCONST_POINTER data; + gctUINT stride; +} gcsSTREAM_INFO, *gcsSTREAM_INFO_PTR; + +gceSTATUS +gcoSTREAM_CPUCacheOperation(IN gcoSTREAM Stream, IN gceCACHEOPERATION Operation); + +gceSTATUS +gcoSTREAM_CPUCacheOperation_Range(IN gcoSTREAM Stream, + IN gctSIZE_T Offset, + IN gctSIZE_T Length, + IN gceCACHEOPERATION Operation); + +/****************************************************************************** + ******************************* gcoVERTEX Object ***************************** + ******************************************************************************/ + +typedef struct _gcsVERTEX_ATTRIBUTES { + gceVERTEX_FORMAT format; + gctBOOL normalized; + gctUINT32 components; + gctSIZE_T size; + gctUINT32 stream; + gctUINT32 offset; + gctUINT32 stride; +} gcsVERTEX_ATTRIBUTES; + +gceSTATUS +gcoVERTEX_Construct(IN gcoHAL Hal, OUT gcoVERTEX *Vertex); + +gceSTATUS +gcoVERTEX_Destroy(IN gcoVERTEX Vertex); + +gceSTATUS +gcoVERTEX_Reset(IN gcoVERTEX Vertex); + +gceSTATUS +gcoVERTEX_EnableAttribute(IN gcoVERTEX Vertex, + IN gctUINT32 Index, + IN gceVERTEX_FORMAT Format, + IN gctBOOL Normalized, + IN gctUINT32 Components, + IN gcoSTREAM Stream, + IN gctUINT32 Offset, + IN gctUINT32 Stride); + +gceSTATUS +gcoVERTEX_DisableAttribute(IN gcoVERTEX Vertex, IN gctUINT32 Index); + +gceSTATUS +gcoVERTEX_Bind(IN gcoVERTEX Vertex); + +/****************************************************************************** + **** gcoVERTEXARRAY Object ***************************************************/ + +typedef struct _gcsATTRIBUTE { + /* Enabled. */ + gctBOOL enable; + + /* Number of components. */ + gctINT size; + + /* Attribute format. */ + gceVERTEX_FORMAT format; + + /* Flag whether the attribute is normalized or not. */ + gctBOOL normalized; + + /* Stride of the component. */ + gctSIZE_T stride; + + /* Divisor of the attribute */ + gctUINT divisor; + + /* Offset of the attribute */ + gctUINT offset; + + /* Pointer to the attribute data. */ + gctCONST_POINTER pointer; + + /* Stream object owning the attribute data. */ + gcoBUFOBJ stream; + + /* Generic values for attribute. */ + gctFLOAT genericValue[4]; + + /* Generic size for attribute. */ + gctINT genericSize; + + /* Vertex shader linkage. */ + gctUINT linkage; + +#if gcdUSE_WCLIP_PATCH + /* Does it hold positions? */ + gctBOOL isPosition; +#endif + + /* Index to vertex array */ + gctINT arrayIdx; + gctINT arrayLoc[32]; + + gceATTRIB_SCHEME convertScheme; + + /* Pointer to the temporary buffer to be freed */ + gcoBUFOBJ tempStream; + + /* Pointer to the temporary memory to be freed */ + gctCONST_POINTER tempMemory; +} gcsATTRIBUTE, *gcsATTRIBUTE_PTR; + +typedef struct _gcsVERTEXARRAY { + /* Enabled. */ + gctBOOL enable; + + /* Number of components. */ + gctINT size; + + /* Attribute format. */ + gceVERTEX_FORMAT format; + + /* Flag whether the attribute is normalized or not. */ + gctBOOL normalized; + + /* Stride of the component. */ + gctUINT stride; + + /* Divisor of the attribute */ + gctUINT divisor; + + /* Pointer to the attribute data. */ + gctCONST_POINTER pointer; + + /* Stream object owning the attribute data. */ + gcoSTREAM stream; + + /* Generic values for attribute. */ + gctFLOAT genericValue[4]; + + /* Generic size for attribute. */ + gctINT genericSize; + + /* Vertex shader linkage. */ + gctUINT linkage; + + gctBOOL isPosition; +} gcsVERTEXARRAY, *gcsVERTEXARRAY_PTR; + +gceSTATUS +gcoVERTEXARRAY_Construct(IN gcoHAL Hal, OUT gcoVERTEXARRAY *Vertex); + +gceSTATUS +gcoVERTEXARRAY_Destroy(IN gcoVERTEXARRAY Vertex); + +/* If don't consider isolation, STREAM_INFO / INDEX_INFO could be + * include in the struct of instantDraw in chip level. + */ +typedef struct _gcsVERTEXARRAY_STREAM_INFO { + gctUINT attribMask; + gctSIZE_T first; + gctSIZE_T count; + gcePRIMITIVE primMode; + gctSIZE_T primCount; + gctINT vertexInstIndex; + gctBOOL instanced; + gctSIZE_T instanceCount; + + union _gcsVERTEXARRAY_STREAM_INFO_UNION { + struct _gcsVERTEXARRAY_STREAM_ES11_INFO { + gcsVERTEXARRAY_PTR attributes; + } es11; + + struct _gcsVERTEXARRAY_STREAM_ES30_INFO { + gcsATTRIBUTE_PTR attributes; + } es30; + } u; +} gcsVERTEXARRAY_STREAM_INFO, *gcsVERTEXARRAY_STREAM_INFO_PTR; + +typedef const struct _gcsVERTEXARRAY_STREAM_INFO *gcsVERTEXARRAY_STREAM_INFO_CONST_PTR; + +typedef struct _gcsVERTEXARRAY_INDEX_INFO { + gctSIZE_T count; + gceINDEX_TYPE indexType; + gctPOINTER indexMemory; + gctUINT restartElement; + + union _gcsVERTEXARRAY_INDEX_INFO_UNION { + struct _gcsVERTEXARRAY_INDEX_ES11_INFO { + gcoINDEX indexBuffer; + } es11; + + struct _gcsVERTEXARRAY_INDEX_ES30_INFO { + gcoBUFOBJ indexBuffer; + } es30; + } u; +} gcsVERTEXARRAY_INDEX_INFO, *gcsVERTEXARRAY_INDEX_INFO_PTR; + +typedef const struct _gcsVERTEXARRAY_INDEX_INFO *gcsVERTEXARRAY_INDEX_INFO_CONST_PTR; + +gceSTATUS +gcoVERTEXARRAY_IndexBind(IN gcoVERTEXARRAY Vertex, + IN gcsVERTEXARRAY_INDEX_INFO_PTR IndexInfo); + +#if gcdUSE_WCLIP_PATCH +gceSTATUS +gcoVERTEXARRAY_StreamBind(IN gcoVERTEXARRAY Vertex, + IN OUT gctFLOAT *WLimitRms, + IN OUT gctBOOL *WLimitRmsDirty, + IN gcsVERTEXARRAY_STREAM_INFO_CONST_PTR StreamInfo, + IN gcsVERTEXARRAY_INDEX_INFO_CONST_PTR IndexInfo); + +gceSTATUS +gcoVERTEXARRAY_StreamBind_Ex(IN gcoVERTEXARRAY Vertex, + IN OUT gctFLOAT *WLimitRms, + IN OUT gctBOOL *WLimitRmsDirty, + IN OUT gcsVERTEXARRAY_STREAM_INFO_PTR StreamInfo, + IN gcsVERTEXARRAY_INDEX_INFO_PTR IndexInfo); +# else +gceSTATUS +gcoVERTEXARRAY_StreamBind(IN gcoVERTEXARRAY Vertex, + IN gcsVERTEXARRAY_STREAM_INFO_CONST_PTR StreamInfo, + IN gcsVERTEXARRAY_INDEX_INFO_CONST_PTR IndexInfo); +gceSTATUS +gcoVERTEXARRAY_StreamBind_Ex(IN gcoVERTEXARRAY Vertex, + IN OUT gcsVERTEXARRAY_STREAM_INFO_PTR StreamInfo, + IN gcsVERTEXARRAY_INDEX_INFO_PTR IndexInfo); +# endif + +gceSTATUS +gcoVERTEXARRAY_IndexBind_Ex(IN gcoVERTEXARRAY Vertex, + IN OUT gcsVERTEXARRAY_STREAM_INFO_PTR StreamInfo, + IN gcsVERTEXARRAY_INDEX_INFO_PTR IndexInfo); + +gceSTATUS +gcoVERTEXARRAY_Bind(IN gcoVERTEXARRAY Vertex, + IN gctUINT32 EnableBits, + IN gcsVERTEXARRAY_PTR VertexArray, + IN gctUINT First, + IN gctSIZE_T *Count, + IN gceINDEX_TYPE IndexType, + IN gcoINDEX IndexObject, + IN gctPOINTER IndexMemory, + IN OUT gcePRIMITIVE *PrimitiveType, +#if gcdUSE_WCLIP_PATCH + IN OUT gctUINT *PrimitiveCount, + IN OUT gctFLOAT *wLimitRms, + IN OUT gctBOOL *wLimitDirty +# else + IN OUT gctUINT *PrimitiveCount +# endif +); + +/* Frame Database */ +gceSTATUS +gcoHAL_AddFrameDB(void); + +gceSTATUS +gcoHAL_DumpFrameDB(gctCONST_STRING Filename OPTIONAL); + +gceSTATUS +gcoHAL_InitGPUProfile(void); + +/****************************************************************************** + *********************gcoBUFOBJ object***************************************** + ******************************************************************************/ +/* Construct a new gcoBUFOBJ object. */ +gceSTATUS +gcoBUFOBJ_Construct(IN gcoHAL Hal, IN gceBUFOBJ_TYPE Type, OUT gcoBUFOBJ *BufObj); + +/* Destroy a gcoBUFOBJ object. */ +gceSTATUS +gcoBUFOBJ_Destroy(IN gcoBUFOBJ BufObj); + +/* Lock pbo in memory. */ +gceSTATUS +gcoBUFOBJ_Lock(IN gcoBUFOBJ BufObj, + OUT gctADDRESS *Address, + OUT gctPOINTER *Memory); + +/* Lock pbo in memory. */ +gceSTATUS +gcoBUFOBJ_FastLock(IN gcoBUFOBJ BufObj, + OUT gctADDRESS *Address, + OUT gctPOINTER *Memory); + +/* Unlock pbo that was previously locked with gcoBUFOBJ_Lock. */ +gceSTATUS +gcoBUFOBJ_Unlock(IN gcoBUFOBJ BufObj); + +/* Free existing pbo buffer. */ +gceSTATUS +gcoBUFOBJ_Free(IN gcoBUFOBJ BufObj); + +/* Upload data into an pbo buffer. */ +gceSTATUS +gcoBUFOBJ_Upload(IN gcoBUFOBJ BufObj, + IN gctCONST_POINTER Buffer, + IN gctSIZE_T Offset, + IN gctSIZE_T Bytes, + IN gceBUFOBJ_USAGE Usage); + +/* Bind an index object to the hardware. */ +gceSTATUS +gcoBUFOBJ_IndexBind(IN gcoBUFOBJ Index, + IN gceINDEX_TYPE Type, + IN gctSIZE_T Offset, + IN gctSIZE_T Count, + IN gctUINT RestartElement); + +/* Find min and max index for the index buffer */ +gceSTATUS +gcoBUFOBJ_IndexGetRange(IN gcoBUFOBJ Index, + IN gceINDEX_TYPE Type, + IN gctSIZE_T Offset, + IN gctUINT32 Count, + OUT gctUINT32 *MinimumIndex, + OUT gctUINT32 *MaximumIndex); + +/* Sets buffer upload endian hint */ +gceSTATUS +gcoBUFOBJ_SetBufferEndianHint(IN gcoBUFOBJ BufObj); + +/* Query a buffer object dirty status */ +gceSTATUS +gcoBUFOBJ_SetDirty(IN gcoBUFOBJ BufObj, IN gctBOOL Dirty); + +/* Sets a buffer object as dirty */ +gctBOOL +gcoBUFOBJ_IsDirty(IN gcoBUFOBJ BufObj); + +/* Creates a new buffer if needed */ +gceSTATUS +gcoBUFOBJ_AlignIndexBufferWhenNeeded(IN gcoBUFOBJ BufObj, + IN gctSIZE_T Offset, + OUT gcoBUFOBJ *AlignedBufObj); + +/* Cache operations on whole range */ +gceSTATUS +gcoBUFOBJ_CPUCacheOperation(IN gcoBUFOBJ BufObj, IN gceCACHEOPERATION Operation); + +/* Cache operations on a specified range */ +gceSTATUS +gcoBUFOBJ_CPUCacheOperation_Range(IN gcoBUFOBJ BufObj, + IN gctSIZE_T Offset, + IN gctSIZE_T Length, + IN gceCACHEOPERATION Operation); + +/* Return size of the bufobj */ +gceSTATUS +gcoBUFOBJ_GetSize(IN gcoBUFOBJ BufObj, OUT gctSIZE_T_PTR Size); + +/* Return memory node of the bufobj */ +gceSTATUS +gcoBUFOBJ_GetNode(IN gcoBUFOBJ BufObj, OUT gcsSURF_NODE_PTR *Node); + +gceSTATUS +gcoBUFOBJ_ReAllocBufNode(IN gcoBUFOBJ BufObj); + +/* Handle GPU cache operations */ +gceSTATUS +gcoBUFOBJ_SetCPUWrite(gcoBUFOBJ BufObj, gctBOOL Value); + +/* Dump buffer. */ +void +gcoBUFOBJ_Dump(IN gcoBUFOBJ BufObj); + +#endif /* gcdENABLE_3D */ + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_engine_h_ */ + + diff --git a/unified-tina/inc/HAL/gc_hal_enum.h b/unified-tina/inc/HAL/gc_hal_enum.h new file mode 100644 index 0000000..759158a --- /dev/null +++ b/unified-tina/inc/HAL/gc_hal_enum.h @@ -0,0 +1,2064 @@ +/**************************************************************************** +* +* Copyright (c) 2005 - 2023 by Vivante Corp. All rights reserved. +* +* The material in this file is confidential and contains trade secrets +* of Vivante Corporation. This is proprietary information owned by +* Vivante Corporation. No part of this work may be disclosed, +* reproduced, copied, transmitted, or used in any way for any purpose, +* without the express written permission of Vivante Corporation. +* +*****************************************************************************/ + + +#ifndef __gc_hal_enum_h_ +#define __gc_hal_enum_h_ + +#include "gc_hal_options.h" +#include "shared/gc_hal_enum_shared.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* dummy draw type.*/ +typedef enum _gceDUMMY_DRAW_TYPE { + gcvDUMMY_DRAW_INVALID = 0, + gcvDUMMY_DRAW_GC400, + gcvDUMMY_DRAW_V60, +} gceDUMMY_DRAW_TYPE; + +/* Option Set*/ +typedef enum _gceOPTION { + /* HW setting. */ + gcvOPTION_PREFER_ZCONVERT_BYPASS = 0, + gcvOPTION_PREFER_TILED_DISPLAY_BUFFER = 1, + gcvOPTION_PREFER_GUARDBAND = 2, + gcvOPTION_PREFER_TPG_TRIVIALMODEL = 3, + gcvOPTION_PREFER_RA_DEPTH_WRITE = 4, + gcvOPTION_PREFER_USC_RECONFIG = 5, + gcvOPTION_PREFER_DISALBE_HZ = 6, + + /* SW options */ + gcvOPTION_HW_NULL = 50, + gcvOPTION_PRINT_OPTION = 51, + gcvOPTION_KERNEL_FENCE = 52, + gcvOPTION_ASYNC_PIPE = 53, + gcvOPTION_FBO_PREFER_MEM = 54, + gcvOPTION_GPU_TEX_UPLOAD = 55, + gcvOPTION_GPU_BUFOBJ_UPLOAD = 56, + gcvOPTION_NO_Y_INVERT = 60, + + /* OCL option */ + gcvOPTION_OCL_ASYNC_BLT = 200, + gcvOPTION_OCL_IN_THREAD, + gcvOPTION_COMPRESSION_DEC400, + gcvOPTION_OCL_VIR_SHADER, + gcvOPTION_OCL_USE_MULTI_DEVICES, + +#if gcdUSE_VX + /* OVX options that HAL could access */ + gcvOPTION_OVX_ENABLE_NN_ZDP3 = 500, + gcvOPTION_OVX_ENABLE_NN_ZDP6, + gcvOPTION_OVX_ENABLE_NN_STRIDE, + gcvOPTION_OVX_USE_MULTI_DEVICES, + gcvOPTION_OVX_ENABLE_NN_DDR_BURST_SIZE_1024B, + gcvOPTION_OVX_ENABLE_NN_DDR_BURST_SIZE_512B, + gcvOPTION_OVX_ENABLE_NN_DDR_BURST_SIZE_256B, + gcvOPTION_OVX_ENABLE_NN_DDR_BURST_SIZE_128B, + gcvOPTION_OVX_ENABLE_NN_DDR_BURST_SIZE_64B, + + gcvOPTION_OVX_ENABLE_VIP_AXI_ID1_DDR_BURST_SIZE_1024B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID1_DDR_BURST_SIZE_512B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID1_DDR_BURST_SIZE_256B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID1_DDR_BURST_SIZE_128B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID1_DDR_BURST_SIZE_64B, + + gcvOPTION_OVX_ENABLE_VIP_AXI_ID2_DDR_BURST_SIZE_1024B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID2_DDR_BURST_SIZE_512B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID2_DDR_BURST_SIZE_256B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID2_DDR_BURST_SIZE_128B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID2_DDR_BURST_SIZE_64B, + + gcvOPTION_OVX_ENABLE_VIP_AXI_ID3_DDR_BURST_SIZE_1024B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID3_DDR_BURST_SIZE_512B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID3_DDR_BURST_SIZE_256B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID3_DDR_BURST_SIZE_128B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID3_DDR_BURST_SIZE_64B, + + gcvOPTION_OVX_ENABLE_VIP_AXI_ID0_MIN_AXI_BURST_SIZE_1024B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID0_MIN_AXI_BURST_SIZE_512B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID0_MIN_AXI_BURST_SIZE_256B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID0_MIN_AXI_BURST_SIZE_128B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID0_MIN_AXI_BURST_SIZE_64B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID0_MIN_AXI_BURST_SIZE_32B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID0_MIN_AXI_BURST_SIZE_16B, + + gcvOPTION_OVX_ENABLE_VIP_AXI_ID1_MIN_AXI_BURST_SIZE_1024B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID1_MIN_AXI_BURST_SIZE_512B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID1_MIN_AXI_BURST_SIZE_256B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID1_MIN_AXI_BURST_SIZE_128B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID1_MIN_AXI_BURST_SIZE_64B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID1_MIN_AXI_BURST_SIZE_32B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID1_MIN_AXI_BURST_SIZE_16B, + + gcvOPTION_OVX_ENABLE_VIP_AXI_ID2_MIN_AXI_BURST_SIZE_1024B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID2_MIN_AXI_BURST_SIZE_512B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID2_MIN_AXI_BURST_SIZE_256B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID2_MIN_AXI_BURST_SIZE_128B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID2_MIN_AXI_BURST_SIZE_64B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID2_MIN_AXI_BURST_SIZE_32B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID2_MIN_AXI_BURST_SIZE_16B, + + gcvOPTION_OVX_ENABLE_VIP_AXI_ID3_MIN_AXI_BURST_SIZE_1024B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID3_MIN_AXI_BURST_SIZE_512B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID3_MIN_AXI_BURST_SIZE_256B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID3_MIN_AXI_BURST_SIZE_128B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID3_MIN_AXI_BURST_SIZE_64B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID3_MIN_AXI_BURST_SIZE_32B, + gcvOPTION_OVX_ENABLE_VIP_AXI_ID3_MIN_AXI_BURST_SIZE_16B, + +#endif + /* Insert option above this comment only */ + gcvOPTION_COUNT /* Not a OPTION*/ +} gceOPTION; + +typedef enum _gceFRAMEINFO { + /* Total frame count in one run */ + gcvFRAMEINFO_FRAME_NUM = 0, + /* Total draw count in current frame, including draw/compute */ + gcvFRAMEINFO_DRAW_NUM = 1, + /* Total compute count in current frame, subset of drawNum */ + gcvFRAMEINFO_COMPUTE_NUM = 2, + /* Total dual16 draw/compute count in current frame, subset of drawNum */ + gcvFRAMEINFO_DUAL16_NUM = 3, + /* Current programID is being set. only valid for ES20 driver right now */ + gcvFRAMEINFO_PROGRAM_ID = 4, + + gcvFRAMEINFO_COUNT, +} gceFRAMEINFO; + +typedef enum _gceFRAMEINFO_OP { + gcvFRAMEINFO_OP_INC = 0, + gcvFRAMEINFO_OP_DEC = 1, + gcvFRAMEINFO_OP_ZERO = 2, + gcvFRAMEINFO_OP_GET = 3, + gcvFRAMEINFO_OP_SET = 4, + gcvFRAMEINFO_OP_COUNT, +} gceFRAMEINFO_OP; + +typedef enum _gceSURF_USAGE { + gcvSURF_USAGE_UNKNOWN, + gcvSURF_USAGE_RESOLVE_AFTER_CPU, + gcvSURF_USAGE_RESOLVE_AFTER_3D +} gceSURF_USAGE; + +typedef enum _gceSURF_COLOR_SPACE { + gcvSURF_COLOR_SPACE_UNKNOWN, + gcvSURF_COLOR_SPACE_LINEAR, + gcvSURF_COLOR_SPACE_NONLINEAR, +} gceSURF_COLOR_SPACE; + +typedef enum _gceSURF_COLOR_TYPE { + gcvSURF_COLOR_UNKNOWN = 0, + gcvSURF_COLOR_LINEAR = 0x01, + gcvSURF_COLOR_ALPHA_PRE = 0x02, +} gceSURF_COLOR_TYPE; + +/* Rotation. */ +typedef enum _gceSURF_ROTATION { + gcvSURF_0_DEGREE = 0, + gcvSURF_90_DEGREE, + gcvSURF_180_DEGREE, + gcvSURF_270_DEGREE, + gcvSURF_FLIP_X, + gcvSURF_FLIP_Y, + + gcvSURF_POST_FLIP_X = 0x40000000, + gcvSURF_POST_FLIP_Y = 0x80000000, +} gceSURF_ROTATION; + +/* Surface flag */ +typedef enum _gceSURF_FLAG { + /* None flag */ + gcvSURF_FLAG_NONE = 0x0, + /* content is preserved after swap */ + gcvSURF_FLAG_CONTENT_PRESERVED = 0x1, + /* content is updated after swap*/ + gcvSURF_FLAG_CONTENT_UPDATED = 0x2, + /* content is y inverted */ + gcvSURF_FLAG_CONTENT_YINVERTED = 0x4, + /* surface has multiple nodes */ + gcvSURF_FLAG_MULTI_NODE = 0x8, + /* surface no need do dither when resovle*/ + gcvSURF_FLAG_DITHER_DISABLED = 0x10, + /* surface used a fake hal format */ + gcvSURF_FLAG_FAKE_FORMAT = 0x20, +} gceSURF_FLAG; + +typedef enum _gceMIPMAP_IMAGE_FORMAT { + gcvUNKNOWN_MIPMAP_IMAGE_FORMAT = -2 +} gceMIPMAP_IMAGE_FORMAT; + +typedef enum _gceIMAGE_MEM_TYPE { + gcvIMAGE_MEM_DEFAULT, + gcvIMAGE_MEM_HOST_PTR, + gcvIMAGE_MEM_HOST_PTR_UNCACHED, + gcvIMAGE_MEM_HOST_PHY_PTR, + gcvIMAGE_MEM_HOST_PHY_PTR_UNCACHED, +} gceIMAGE_MEM_TYPE; + +typedef enum _gceSURF_YUV_COLOR_SPACE { + gcvSURF_ITU_REC601, + gcvSURF_ITU_REC709, + gcvSURF_ITU_REC2020, +} gceSURF_YUV_COLOR_SPACE; + +typedef enum _gceSURF_YUV_SAMPLE_RANGE { + gcvSURF_YUV_UNKNOWN_RANGE, + gcvSURF_YUV_FULL_RANGE, + gcvSURF_YUV_NARROW_RANGE, +} gceSURF_YUV_SAMPLE_RANGE; + +typedef enum _gceSURF_YUV_CHROMA_SITING { + gcvSURF_YUV_CHROMA_SITING_0, + gcvSURF_YUV_CHROMA_SITING_0_5, +} gceSURF_YUV_CHROMA_SITING; + +typedef enum _gceSURF_INFO_TYPE { + gcvSURF_INFO_UNKNOWN = 0, + gcvSURF_INFO_LAYERSIZE = 1, + gcvSURF_INFO_SLICESIZE = 2, +} gceSURF_INFO_TYPE; + +/* Format modifiers. */ +typedef enum _gceSURF_FORMAT_MODE { + gcvSURF_FORMAT_OCL = 0x80000000, + gcvSURF_FORMAT_PATCH_BORDER = 0x40000000, +} gceSURF_FORMAT_MODE; + +/* Pixel swizzle modes. */ +typedef enum _gceSURF_SWIZZLE { + gcvSURF_NOSWIZZLE = 0, + gcvSURF_ARGB, + gcvSURF_ABGR, + gcvSURF_RGBA, + gcvSURF_BGRA +} gceSURF_SWIZZLE; + +/* Transparency modes. */ +typedef enum _gceSURF_TRANSPARENCY { + /* Valid only for PE 1.0 */ + gcvSURF_OPAQUE = 0, + gcvSURF_SOURCE_MATCH, + gcvSURF_SOURCE_MASK, + gcvSURF_PATTERN_MASK, +} gceSURF_TRANSPARENCY; + +/* Surface Alignment. */ +typedef enum _gceSURF_ALIGNMENT { + gcvSURF_FOUR = 0, + gcvSURF_SIXTEEN, + gcvSURF_SUPER_TILED, + gcvSURF_SPLIT_TILED, + gcvSURF_SPLIT_SUPER_TILED +} gceSURF_ALIGNMENT; + +/* Surface Addressing. */ +typedef enum _gceSURF_ADDRESSING { + gcvSURF_NO_STRIDE_TILED = 0, + gcvSURF_NO_STRIDE_LINEAR, + gcvSURF_STRIDE_TILED, + gcvSURF_STRIDE_LINEAR +} gceSURF_ADDRESSING; + +/* Transparency modes. */ +typedef enum _gce2D_TRANSPARENCY { + /* Valid only for PE 2.0 */ + gcv2D_OPAQUE = 0, + gcv2D_KEYED, + gcv2D_MASKED +} gce2D_TRANSPARENCY; + +/* Mono packing modes. */ +typedef enum _gceSURF_MONOPACK { + gcvSURF_PACKED8 = 0, + gcvSURF_PACKED16, + gcvSURF_PACKED32, + gcvSURF_UNPACKED, +} gceSURF_MONOPACK; + +/* Blending modes. */ +typedef enum _gceSURF_BLEND_MODE { + /* Porter-Duff blending modes. */ + /* Fsrc Fdst */ + gcvBLEND_CLEAR = 0, /* 0 0 */ + gcvBLEND_SRC, /* 1 0 */ + gcvBLEND_DST, /* 0 1 */ + gcvBLEND_SRC_OVER_DST, /* 1 1 - Asrc */ + gcvBLEND_DST_OVER_SRC, /* 1 - Adst 1 */ + gcvBLEND_SRC_IN_DST, /* Adst 0 */ + gcvBLEND_DST_IN_SRC, /* 0 Asrc */ + gcvBLEND_SRC_OUT_DST, /* 1 - Adst 0 */ + gcvBLEND_DST_OUT_SRC, /* 0 1 - Asrc */ + gcvBLEND_SRC_ATOP_DST, /* Adst 1 - Asrc */ + gcvBLEND_DST_ATOP_SRC, /* 1 - Adst Asrc */ + gcvBLEND_SRC_XOR_DST, /* 1 - Adst 1 - Asrc */ + + /* Special blending modes. */ + gcvBLEND_SET, /* DST = 1 */ + gcvBLEND_SUB /* DST = DST * (1 - SRC) */ +} gceSURF_BLEND_MODE; + +/* Per-pixel alpha modes. */ +typedef enum _gceSURF_PIXEL_ALPHA_MODE { + gcvSURF_PIXEL_ALPHA_STRAIGHT = 0, + gcvSURF_PIXEL_ALPHA_INVERSED +} gceSURF_PIXEL_ALPHA_MODE; + +/* Global alpha modes. */ +typedef enum _gceSURF_GLOBAL_ALPHA_MODE { + gcvSURF_GLOBAL_ALPHA_OFF = 0, + gcvSURF_GLOBAL_ALPHA_ON, + gcvSURF_GLOBAL_ALPHA_SCALE +} gceSURF_GLOBAL_ALPHA_MODE; + +/* Color component modes for alpha blending. */ +typedef enum _gceSURF_PIXEL_COLOR_MODE { + gcvSURF_COLOR_STRAIGHT = 0, + gcvSURF_COLOR_MULTIPLY +} gceSURF_PIXEL_COLOR_MODE; + +/* Color component modes for alpha blending. */ +typedef enum _gce2D_PIXEL_COLOR_MULTIPLY_MODE { + gcv2D_COLOR_MULTIPLY_DISABLE = 0, + gcv2D_COLOR_MULTIPLY_ENABLE +} gce2D_PIXEL_COLOR_MULTIPLY_MODE; + +/* Color component modes for alpha blending. */ +typedef enum _gce2D_GLOBAL_COLOR_MULTIPLY_MODE { + gcv2D_GLOBAL_COLOR_MULTIPLY_DISABLE = 0, + gcv2D_GLOBAL_COLOR_MULTIPLY_ALPHA, + gcv2D_GLOBAL_COLOR_MULTIPLY_COLOR +} gce2D_GLOBAL_COLOR_MULTIPLY_MODE; + +/* Alpha blending factor modes. */ +typedef enum _gceSURF_BLEND_FACTOR_MODE { + gcvSURF_BLEND_ZERO = 0, + gcvSURF_BLEND_ONE, + gcvSURF_BLEND_STRAIGHT, + gcvSURF_BLEND_INVERSED, + gcvSURF_BLEND_COLOR, + gcvSURF_BLEND_COLOR_INVERSED, + gcvSURF_BLEND_SRC_ALPHA_SATURATED, + gcvSURF_BLEND_STRAIGHT_NO_CROSS, + gcvSURF_BLEND_INVERSED_NO_CROSS, + gcvSURF_BLEND_COLOR_NO_CROSS, + gcvSURF_BLEND_COLOR_INVERSED_NO_CROSS, + gcvSURF_BLEND_SRC_ALPHA_SATURATED_CROSS +} gceSURF_BLEND_FACTOR_MODE; + +/* Alpha blending porter duff rules. */ +typedef enum _gce2D_PORTER_DUFF_RULE { + gcvPD_CLEAR = 0, + gcvPD_SRC, + gcvPD_SRC_OVER, + gcvPD_DST_OVER, + gcvPD_SRC_IN, + gcvPD_DST_IN, + gcvPD_SRC_OUT, + gcvPD_DST_OUT, + gcvPD_SRC_ATOP, + gcvPD_DST_ATOP, + gcvPD_ADD, + gcvPD_XOR, + gcvPD_DST +} gce2D_PORTER_DUFF_RULE; + +/* Alpha blending factor modes. */ +typedef enum _gce2D_YUV_COLOR_MODE { + gcv2D_YUV_601 = 0, + gcv2D_YUV_709, + gcv2D_YUV_2020, + gcv2D_YUV_USER_DEFINED, + gcv2D_YUV_USER_DEFINED_CLAMP, + + /* Default setting is for src. gcv2D_YUV_DST + * can be ORed to set dst. + */ + gcv2D_YUV_DST = 0x80000000, +} gce2D_YUV_COLOR_MODE; + +/* Nature rotation rules. */ +typedef enum _gce2D_NATURE_ROTATION { + gcvNR_0_DEGREE = 0, + gcvNR_LEFT_90_DEGREE, + gcvNR_RIGHT_90_DEGREE, + gcvNR_180_DEGREE, + gcvNR_FLIP_X, + gcvNR_FLIP_Y, + gcvNR_TOTAL_RULE, +} gce2D_NATURE_ROTATION; + +typedef enum _gce2D_COMMAND { + gcv2D_CLEAR = 0, + gcv2D_LINE, + gcv2D_BLT, + gcv2D_STRETCH, + gcv2D_HOR_FILTER, + gcv2D_VER_FILTER, + gcv2D_MULTI_SOURCE_BLT, + gcv2D_FILTER_BLT, +} gce2D_COMMAND; + +typedef enum _gce2D_TILE_STATUS_CONFIG { + gcv2D_TSC_DISABLE = 0, + gcv2D_TSC_ENABLE = 0x00000001, + gcv2D_TSC_COMPRESSED = 0x00000002, + gcv2D_TSC_DOWN_SAMPLER = 0x00000004, + gcv2D_TSC_2D_COMPRESSED = 0x00000008, + + gcv2D_TSC_DEC_COMPRESSED = 0x00000020, + gcv2D_TSC_DEC_TPC = 0x00000040, + gcv2D_TSC_DEC_TPC_COMPRESSED = 0x00000080, + + gcv2D_TSC_V4_COMPRESSED = 0x00000100, + gcv2D_TSC_V4_COMPRESSED_256B = 0x00000200 | gcv2D_TSC_V4_COMPRESSED, + + gcv2D_TSC_DEC_TPC_TILED = gcv2D_TSC_DEC_COMPRESSED | gcv2D_TSC_DEC_TPC, + gcv2D_TSC_DEC_TPC_TILED_COMPRESSED = gcv2D_TSC_DEC_TPC_TILED | gcv2D_TSC_DEC_TPC_COMPRESSED, + + gcv2D_TSC_TPC_COMPRESSED = 0x00001000, + gcv2D_TSC_TPC_COMPRESSED_V10 = gcv2D_TSC_TPC_COMPRESSED | 0x00000400, + gcv2D_TSC_TPC_COMPRESSED_V11 = gcv2D_TSC_TPC_COMPRESSED | 0x00000800, +} gce2D_TILE_STATUS_CONFIG; + +typedef enum _gce2D_DEC400_MINOR_VERSION { + gcv2D_DEC400_MINOR_V1 = 1, + gcv2D_DEC400_MINOR_V2 = 2, + gcv2D_DEC400_MINOR_V3 = 3, + gcv2D_DEC400_MINOR_V4 = 4, +} gce2D_DEC400_MINOR_VERSION; + +/*Tiling version when disable compression*/ +typedef enum _gce2D_TILING_MINOR_VERSION { + gcv2D_TILING_MINOR_V1 = 0, + gcv2D_TILING_MINOR_V2 = 1, +} gce2D_TILING_MINOR_VERSION; + +typedef enum _gce2D_QUERY { + gcv2D_QUERY_RGB_ADDRESS_MIN_ALIGN = 0, + gcv2D_QUERY_RGB_STRIDE_MIN_ALIGN, + gcv2D_QUERY_YUV_ADDRESS_MIN_ALIGN, + gcv2D_QUERY_YUV_STRIDE_MIN_ALIGN, + gcv2D_QUERY_DEC400_MINOR_VERSION, + gcv2D_QUERY_TILING_MINOR_VERSION, +} gce2D_QUERY; + +typedef enum _gce2D_SUPER_TILE_VERSION { + gcv2D_SUPER_TILE_VERSION_V1 = 1, + gcv2D_SUPER_TILE_VERSION_V2 = 2, + gcv2D_SUPER_TILE_VERSION_V3 = 3, +} gce2D_SUPER_TILE_VERSION; + +typedef enum _gce2D_STATE { + gcv2D_STATE_SPECIAL_FILTER_MIRROR_MODE = 1, + gcv2D_STATE_SUPER_TILE_VERSION, + gcv2D_STATE_EN_GAMMA, + gcv2D_STATE_DE_GAMMA, + gcv2D_STATE_MULTI_SRC_BLIT_UNIFIED_DST_RECT, + gcv2D_STATE_MULTI_SRC_BLIT_BILINEAR_FILTER, + gcv2D_STATE_PROFILE_ENABLE, + gcv2D_STATE_XRGB_ENABLE, + + gcv2D_STATE_ARRAY_EN_GAMMA = 0x10001, + gcv2D_STATE_ARRAY_DE_GAMMA, + gcv2D_STATE_ARRAY_CSC_YUV_TO_RGB, + gcv2D_STATE_ARRAY_CSC_RGB_TO_YUV, + + gcv2D_STATE_DEC_TPC_NV12_10BIT = 0x20001, + gcv2D_STATE_ARRAY_YUV_SRC_TILE_STATUS_ADDR, + gcv2D_STATE_ARRAY_YUV_DST_TILE_STATUS_ADDR, +} gce2D_STATE; + +typedef enum _gce2D_STATE_PROFILE { + gcv2D_STATE_PROFILE_NONE = 0x0, + gcv2D_STATE_PROFILE_COMMAND = 0x1, + gcv2D_STATE_PROFILE_SURFACE = 0x2, + gcv2D_STATE_PROFILE_ALL = 0xFFFF, +} gce2D_STATE_PROFILE; + +/* Texture object types */ +typedef enum _gceTEXTURE_TYPE { + gcvTEXTURE_UNKNOWN = 0, + gcvTEXTURE_1D, + gcvTEXTURE_2D, + gcvTEXTURE_3D, + gcvTEXTURE_CUBEMAP, + gcvTEXTURE_1D_ARRAY, + gcvTEXTURE_2D_ARRAY, + gcvTEXTURE_2D_MS, + gcvTEXTURE_2D_MS_ARRAY, + gcvTEXTURE_CUBEMAP_ARRAY, + gcvTEXTURE_EXTERNAL +} gceTEXTURE_TYPE; + +#if gcdENABLE_3D +/* Texture functions. */ +typedef enum _gceTEXTURE_FUNCTION { + gcvTEXTURE_DUMMY = 0, + gcvTEXTURE_REPLACE = 0, + gcvTEXTURE_MODULATE, + gcvTEXTURE_ADD, + gcvTEXTURE_ADD_SIGNED, + gcvTEXTURE_INTERPOLATE, + gcvTEXTURE_SUBTRACT, + gcvTEXTURE_DOT3 +} gceTEXTURE_FUNCTION; + +/* Texture sources. */ +typedef enum _gceTEXTURE_SOURCE { + gcvCOLOR_FROM_TEXTURE = 0, + gcvCOLOR_FROM_CONSTANT_COLOR, + gcvCOLOR_FROM_PRIMARY_COLOR, + gcvCOLOR_FROM_PREVIOUS_COLOR +} gceTEXTURE_SOURCE; + +/* Texture source channels. */ +typedef enum _gceTEXTURE_CHANNEL { + gcvFROM_COLOR = 0, + gcvFROM_ONE_MINUS_COLOR, + gcvFROM_ALPHA, + gcvFROM_ONE_MINUS_ALPHA +} gceTEXTURE_CHANNEL; +#endif /* gcdENABLE_3D */ + +/* Filter types. */ +typedef enum _gceFILTER_TYPE { + gcvFILTER_SYNC = 0, + gcvFILTER_BLUR, + gcvFILTER_USER +} gceFILTER_TYPE; + +/* Filter pass types. */ +typedef enum _gceFILTER_PASS_TYPE { + gcvFILTER_HOR_PASS = 0, + gcvFILTER_VER_PASS +} gceFILTER_PASS_TYPE; + +/* Endian hints. */ +typedef enum _gceENDIAN_HINT { + gcvENDIAN_NO_SWAP = 0, + gcvENDIAN_SWAP_WORD = 1, + gcvENDIAN_SWAP_DWORD = 2, + gcvENDIAN_SWAP_QWORD = 3, +} gceENDIAN_HINT; + +/* Tiling modes. */ +typedef enum _gceTILING { + gcvINVALIDTILED = 0x0, /* Invalid tiling */ + /* Tiling basic modes enum'ed in power of 2. */ + gcvLINEAR = 0x1, /* No tiling. */ + gcvTILED = 0x2, /* 4x4 tiling. */ + gcvSUPERTILED = 0x4, /* 64x64 tiling. */ + gcvMINORTILED = 0x8, /* 2x2 tiling. */ + + /* Tiling special layouts. */ + gcvTILING_SPLIT_BUFFER = 0x10, + gcvTILING_X_MAJOR = 0x20, + gcvTILING_Y_MAJOR = 0x40, + gcvTILING_SWAP = 0x80, + + /* Tiling combination layouts. */ + gcvMULTI_TILED = gcvTILED | gcvTILING_SPLIT_BUFFER, + + gcvMULTI_SUPERTILED = gcvSUPERTILED | gcvTILING_SPLIT_BUFFER, + + gcvYMAJOR_SUPERTILED = gcvSUPERTILED | gcvTILING_Y_MAJOR, + + gcvTILED_8X4 = 0x0100, + gcvTILED_4X8 = 0x0100 | gcvTILING_SWAP, + gcvTILED_8X8 = 0x0200, + gcvTILED_16X4 = 0x0400, + gcvTILED_32X4 = 0x0800, + gcvTILED_64X4 = 0x1000, + + gcvTILED_8X8_XMAJOR = gcvTILED_8X8 | gcvTILING_X_MAJOR, + gcvTILED_8X8_YMAJOR = gcvTILED_8X8 | gcvTILING_Y_MAJOR, + + gcvSUPERTILED_128B = 0x10000 | gcvSUPERTILED, + gcvSUPERTILED_256B = 0x20000 | gcvSUPERTILED, +} gceTILING; + +typedef enum _gceCACHE_MODE { + gcvCACHE_NONE, + gcvCACHE_128, + gcvCACHE_256, +} gceCACHE_MODE; + +#define DEFAULT_CACHE_MODE gcvCACHE_256 + +/* 2D pattern type. */ +typedef enum _gce2D_PATTERN { + gcv2D_PATTERN_SOLID = 0, + gcv2D_PATTERN_MONO, + gcv2D_PATTERN_COLOR, + gcv2D_PATTERN_INVALID +} gce2D_PATTERN; + +/* 2D source type. */ +typedef enum _gce2D_SOURCE { + gcv2D_SOURCE_MASKED = 0, + gcv2D_SOURCE_MONO, + gcv2D_SOURCE_COLOR, + gcv2D_SOURCE_INVALID +} gce2D_SOURCE; + +typedef enum _gceMMU_MODE { + gcvMMU_MODE_1K, + gcvMMU_MODE_4K, +} gceMMU_MODE; + +/* gcdDUMP message type. */ +typedef enum _gceDEBUG_MESSAGE_TYPE { + gcvMESSAGE_TEXT, + gcvMESSAGE_DUMP +} gceDEBUG_MESSAGE_TYPE; + +/* Shading format. */ +typedef enum _gceSHADING { + gcvSHADING_SMOOTH, + gcvSHADING_FLAT_D3D, + gcvSHADING_FLAT_OPENGL, +} gceSHADING; + +/* Culling modes. */ +typedef enum _gceCULL { + gcvCULL_NONE, + gcvCULL_CCW, + gcvCULL_CW, +} gceCULL; + +/* Fill modes. */ +typedef enum _gceFILL { + gcvFILL_POINT, + gcvFILL_WIRE_FRAME, + gcvFILL_SOLID, +} gceFILL; + +/* Compare modes. */ +typedef enum _gceCOMPARE { + gcvCOMPARE_INVALID = 0, + gcvCOMPARE_NEVER, + gcvCOMPARE_NOT_EQUAL, + gcvCOMPARE_LESS, + gcvCOMPARE_LESS_OR_EQUAL, + gcvCOMPARE_EQUAL, + gcvCOMPARE_GREATER, + gcvCOMPARE_GREATER_OR_EQUAL, + gcvCOMPARE_ALWAYS, +} gceCOMPARE; + +/* Stencil modes. */ +typedef enum _gceSTENCIL_MODE { + gcvSTENCIL_NONE, + gcvSTENCIL_SINGLE_SIDED, + gcvSTENCIL_DOUBLE_SIDED, +} gceSTENCIL_MODE; + +/* Stencil operations. */ +typedef enum _gceSTENCIL_OPERATION { + gcvSTENCIL_KEEP, + gcvSTENCIL_REPLACE, + gcvSTENCIL_ZERO, + gcvSTENCIL_INVERT, + gcvSTENCIL_INCREMENT, + gcvSTENCIL_DECREMENT, + gcvSTENCIL_INCREMENT_SATURATE, + gcvSTENCIL_DECREMENT_SATURATE, + gcvSTENCIL_OPERATION_INVALID = -1 +} gceSTENCIL_OPERATION; + +/* Stencil selection. */ +typedef enum _gceSTENCIL_WHERE { + gcvSTENCIL_FRONT, + gcvSTENCIL_BACK, +} gceSTENCIL_WHERE; + +/* Texture addressing selection. */ +typedef enum _gceTEXTURE_WHICH { + gcvTEXTURE_S, + gcvTEXTURE_T, + gcvTEXTURE_R, +} gceTEXTURE_WHICH; + +/* Texture addressing modes. */ +typedef enum _gceTEXTURE_ADDRESSING { + gcvTEXTURE_INVALID = 0, + gcvTEXTURE_CLAMP, + gcvTEXTURE_WRAP, + gcvTEXTURE_MIRROR, + gcvTEXTURE_BORDER, + gcvTEXTURE_MIRROR_ONCE, +} gceTEXTURE_ADDRESSING; + +/* Texture filters. */ +typedef enum _gceTEXTURE_FILTER { + gcvTEXTURE_NONE, + gcvTEXTURE_POINT, + gcvTEXTURE_LINEAR, + gcvTEXTURE_ANISOTROPIC, +} gceTEXTURE_FILTER; + +typedef enum _gceTEXTURE_COMPONENT { + gcvTEXTURE_COMPONENT_R, + gcvTEXTURE_COMPONENT_G, + gcvTEXTURE_COMPONENT_B, + gcvTEXTURE_COMPONENT_A, + + gcvTEXTURE_COMPONENT_NUM, +} gceTEXTURE_COMPONENT; + +/* Texture swizzle modes. */ +typedef enum _gceTEXTURE_SWIZZLE { + gcvTEXTURE_SWIZZLE_R = 0, + gcvTEXTURE_SWIZZLE_G, + gcvTEXTURE_SWIZZLE_B, + gcvTEXTURE_SWIZZLE_A, + gcvTEXTURE_SWIZZLE_0, + gcvTEXTURE_SWIZZLE_1, + + gcvTEXTURE_SWIZZLE_INVALID, +} gceTEXTURE_SWIZZLE; + +typedef enum _gceTEXTURE_SRGBDECODE { + gcvTEXTURE_SRGB_INVALID = 0, + gcvTEXTURE_DECODE, + gcvTEXTURE_SKIP_DECODE, +} gceTEXTURE_SRGBDECODE; + +typedef enum _gceTEXTURE_COMPARE_MODE { + gcvTEXTURE_COMPARE_MODE_INVALID = 0, + gcvTEXTURE_COMPARE_MODE_NONE, + gcvTEXTURE_COMPARE_MODE_REF, +} gceTEXTURE_COMPARE_MODE; + +typedef enum _gceTEXTURE_DS_MODE { + gcvTEXTURE_DS_MODE_INVALID = 0, + gcvTEXTURE_DS_MODE_DEPTH = 1, + gcvTEXTURE_DS_MODE_STENCIL = 2, +} gceTEXTURE_DS_MODE; + +typedef enum _gceTEXTURE_DS_TEX_MODE { + gcvTEXTURE_DS_TEXTURE_MODE_LUMINANCE = 0, + gcvTEXTURE_DS_TEXTURE_MODE_INTENSITY, + gcvTEXTURE_DS_TEXTURE_MODE_ALPHA, + gcvTEXTURE_DS_TEXTURE_MODE_RED, + + gcvTEXTURE_DS_TEXTURE_MODE_INVALID, +} gceTEXTURE_DS_TEX_MODE; + +/* Texture stage */ +typedef enum _gceTEXTURE_STAGE { + gcvTEXTURE_STAGE_INVALID = -1, + gcvTEXTURE_STAGE_VS = 0, + gcvTEXTURE_STAGE_TCS, + gcvTEXTURE_STAGE_TES, + gcvTEXTURE_STAGE_GS, + gcvTEXTURE_STAGE_FS, + gcvTEXTURE_STAGE_CS, + + gcvTEXTURE_STAGE_LAST +} gceTEXTURE_STAGE; + +/* Pixel output swizzle modes. */ +typedef enum _gcePIXEL_SWIZZLE { + gcvPIXEL_SWIZZLE_R = gcvTEXTURE_SWIZZLE_R, + gcvPIXEL_SWIZZLE_G = gcvTEXTURE_SWIZZLE_G, + gcvPIXEL_SWIZZLE_B = gcvTEXTURE_SWIZZLE_B, + gcvPIXEL_SWIZZLE_A = gcvTEXTURE_SWIZZLE_A, + + gcvPIXEL_SWIZZLE_INVALID, +} gcePIXEL_SWIZZLE; + +/* Primitive types. */ +typedef enum _gcePRIMITIVE { + gcvPRIMITIVE_POINT_LIST, + gcvPRIMITIVE_LINE_LIST, + gcvPRIMITIVE_LINE_STRIP, + gcvPRIMITIVE_LINE_LOOP, + gcvPRIMITIVE_TRIANGLE_LIST, + gcvPRIMITIVE_TRIANGLE_STRIP, + gcvPRIMITIVE_TRIANGLE_FAN, + gcvPRIMITIVE_RECTANGLE, + gcvPRIMITIVE_LINES_ADJACENCY, + gcvPRIMITIVE_LINE_STRIP_ADJACENCY, + gcvPRIMITIVE_TRIANGLES_ADJACENCY, + gcvPRIMITIVE_TRIANGLE_STRIP_ADJACENCY, + gcvPRIMITIVE_PATCH_LIST, +} gcePRIMITIVE; + +/* Index types. */ +typedef enum _gceINDEX_TYPE { + gcvINDEX_8, + gcvINDEX_16, + gcvINDEX_32, +} gceINDEX_TYPE; + +/* Multi GPU rendering modes. */ +typedef enum _gceMULTI_GPU_RENDERING_MODE { + gcvMULTI_GPU_RENDERING_MODE_OFF, + gcvMULTI_GPU_RENDERING_MODE_SPLIT_WIDTH, + gcvMULTI_GPU_RENDERING_MODE_SPLIT_HEIGHT, + gcvMULTI_GPU_RENDERING_MODE_INTERLEAVED_64x64, + gcvMULTI_GPU_RENDERING_MODE_INTERLEAVED_128x64, + gcvMULTI_GPU_RENDERING_MODE_INTERLEAVED_128x128, + gcvMULTI_GPU_RENDERING_MODE_INTERLEAVED, + gcvMULTI_GPU_RENDERING_MODE_INVALID +} gceMULTI_GPU_RENDERING_MODE; + +typedef enum _gceMACHINECODE { + gcvMACHINECODE_ANTUTU0 = 0x0, + + gcvMACHINECODE_GLB27_RELEASE_0, + + gcvMACHINECODE_GLB25_RELEASE_0, + gcvMACHINECODE_GLB25_RELEASE_1, + + gcvMACHINECODE_COUNT, + + /* flag as dynamic allocation for shader partial replace */ + gcvSHADER_SRC_PARTIAL_REPLACE, +} gceMACHINECODE; + +typedef enum _gceUNIFORMCVT { + gcvUNIFORMCVT_NONE = 0, + gcvUNIFORMCVT_TO_BOOL, + gcvUNIFORMCVT_TO_FLOAT, +} gceUNIFORMCVT; + +typedef enum _gceHAL_ARG_VERSION { + gcvHAL_ARG_VERSION_V1 = 0x0, + gcvHAL_ARG_VERSION_V2, +} gceHAL_ARG_VERSION; + + +/** endian mode for each 2Bytes + * endian mode endian + * mode0: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 + * mode1: 1 0 3 2 5 4 7 6 9 8 11 10 13 12 15 14 + * mode2: 2 3 0 1 6 7 4 5 10 11 8 9 14 15 12 13 + * mode3: 3 2 1 0 7 6 5 4 11 10 9 8 15 14 13 12 + * mode4: 12 13 14 15 8 9 10 11 4 5 6 7 0 1 2 3 + * mode5: 13 12 15 14 9 8 11 10 5 4 7 6 1 0 3 2 + * mode6: 14 15 12 13 10 11 8 9 6 7 4 5 2 3 0 1 + * mode7: 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 + **/ +typedef enum _gceENDIAN_MODE { + gcvENDIAN_MODE0 = 0x0, /* endian mode0 */ + gcvENDIAN_MODE1 = 0x1, /* endian mode1 */ + gcvENDIAN_MODE2 = 0x2, /* endian mode2 */ + gcvENDIAN_MODE3 = 0x3, /* endian mode3 */ + gcvENDIAN_MODE4 = 0x4, /* endian mode4 */ + gcvENDIAN_MODE5 = 0x5, /* endian mode5 */ + gcvENDIAN_MODE6 = 0x6, /* endian mode6 */ + gcvENDIAN_MODE7 = 0x7, /* endian mode7 */ +} gceENDIAN_MODE; + +typedef enum _gceHW_FE_TYPE { + gcvHW_FE_WAIT_LINK, + gcvHW_FE_ASYNC, + gcvHW_FE_MULTI_CHANNEL, + gcvHW_FE_END, +} gceHW_FE_TYPE; + +typedef enum _gceMCFE_CHANNEL_TYPE { + gcvMCFE_CHANNEL_NONE = 0, + gcvMCFE_CHANNEL_SYSTEM, + gcvMCFE_CHANNEL_SHADER, + gcvMCFE_CHANNEL_NN, + gcvMCFE_CHANNEL_TP, + + gcvMCFE_CHANNEL_3DBLIT = 128, +} gceMCFE_CHANNEL_TYPE; + +typedef enum _gcePAGE_TYPE { + gcvPAGE_TYPE_4K, + gcvPAGE_TYPE_64K, + gcvPAGE_TYPE_1M, + gcvPAGE_TYPE_16M, +} gcePAGE_TYPE; + +typedef enum _gceAREA_TYPE { + gcvAREA_TYPE_UNKNOWN = 0, + gcvAREA_TYPE_FLATMAP, + gcvAREA_TYPE_1M, + gcvAREA_TYPE_4K, +} gceAREA_TYPE; + +typedef enum _gce2D_U8ToU10_CONVERSION_MODE +{ + gcvADD_LOWER_BITS, + gcvNOT_ADD_LOWER_BITS, + gcvNOT_ADD_HIGHER_BITS +} gce2D_U8ToU10_CONVERSION_MODE; + +typedef enum _gce2D_NORMALIZATION_MODE +{ + gcvNORMALIZATION_STD_Z_SCORE, + gcvNORMALIZATION_Z_SCORE, + gcvNORMALIZATION_MIN_MAX, +} gce2D_NORMALIZATION_MODE; + +typedef enum _gce2D_MULTICORE_MODE +{ + gcvMULTICORE_COMBINED_MODE = 0, + gcvMULTICORE_INDEPENDENT_MODE +}gce2D_MULTICORE_MODE; + +typedef enum _gce2D_MULTICORE_INDEX +{ + gcv2D_CORE_0 = 0, + gcv2D_CORE_1, + gcv2D_CORE_2, + gcv2D_CORE_3, +}gce2D_MULTICORE_INDEX; + +/* Available enumeration values that can be used to set 2D state */ +typedef enum _gce2D_STATE_KEY +{ + gcvNORMALIZATION_ENABLE = 0, + gcvNORMALIZATION_MODE, + gcvNORMALIZATION_MAXMINRECIPROCAL, + gcvNORMALIZATION_MINVALUE, + gcvNORMALIZATION_MEANVALUE, + gcvNORMALIZATION_STDRECIPROCAL, + gcvQUANTIZATION_STEPRECIPROCAL, + gcvQUANTIZATION_BYPASSSTEPQUANTIZATION, + gcvUINT8_2_UINT10_CONVERSION_MODE, + gcvMulticoreMode, + + gcvSTATE_TAIL, +} gce2D_STATE_KEY; + +/******************************************************************************* + ** Broadcast interface. + */ + +typedef enum _gceBROADCAST { + /* GPU might be idle. */ + gcvBROADCAST_GPU_IDLE, + + /* A commit is going to happen. */ + gcvBROADCAST_GPU_COMMIT, + + /* GPU seems to be stuck. */ + gcvBROADCAST_GPU_STUCK, + + /* First process gets attached. */ + gcvBROADCAST_FIRST_PROCESS, + + /* Last process gets detached. */ + gcvBROADCAST_LAST_PROCESS, + + /* AXI bus error. */ + gcvBROADCAST_AXI_BUS_ERROR, + + /* Out of memory. */ + gcvBROADCAST_OUT_OF_MEMORY, +} gceBROADCAST; + +/* Notifications. */ +typedef enum _gceNOTIFY { + gcvNOTIFY_INTERRUPT, + gcvNOTIFY_COMMAND_QUEUE, +} gceNOTIFY; + +/* Flush flags. */ +typedef enum _gceKERNEL_FLUSH { + gcvFLUSH_COLOR = 0x01, + gcvFLUSH_DEPTH = 0x02, + gcvFLUSH_TEXTURE = 0x04, + gcvFLUSH_2D = 0x08, + gcvFLUSH_L2 = 0x10, + gcvFLUSH_TILE_STATUS = 0x20, + gcvFLUSH_ICACHE = 0x40, + gcvFLUSH_TXDESC = 0x80, + gcvFLUSH_FENCE = 0x100, + gcvFLUSH_VERTEX = 0x200, + gcvFLUSH_TFBHEADER = 0x400, + gcvFLUSH_ALL = gcvFLUSH_COLOR + | gcvFLUSH_DEPTH + | gcvFLUSH_TEXTURE + | gcvFLUSH_2D + | gcvFLUSH_L2 + | gcvFLUSH_TILE_STATUS + | gcvFLUSH_ICACHE + | gcvFLUSH_TXDESC + | gcvFLUSH_FENCE + | gcvFLUSH_VERTEX + | gcvFLUSH_TFBHEADER +} gceKERNEL_FLUSH; + +typedef enum _gceCOUNTER { + gcvCOUNTER_FRONT_END, + gcvCOUNTER_VERTEX_SHADER, + gcvCOUNTER_PRIMITIVE_ASSEMBLY, + gcvCOUNTER_SETUP, + gcvCOUNTER_RASTERIZER, + gcvCOUNTER_PIXEL_SHADER, + gcvCOUNTER_TEXTURE, + gcvCOUNTER_PIXEL_ENGINE, + gcvCOUNTER_MEMORY_CONTROLLER_COLOR, + gcvCOUNTER_MEMORY_CONTROLLER_DEPTH, + gcvCOUNTER_HOST_INTERFACE0, + gcvCOUNTER_HOST_INTERFACE1, + gcvCOUNTER_GPUL2_CACHE, + gcvCOUNTER_COUNT +} gceCOUNTER; + +typedef enum _gceProfilerClient { + gcvCLIENT_OPENGLES11 = 1, + gcvCLIENT_OPENGLES, + gcvCLIENT_OPENGL, + gcvCLIENT_OPENVG, + gcvCLIENT_OPENCL, + gcvCLIENT_OPENVX, + gcvCLIENT_OPENVK, +} gceProfilerClient; + +typedef enum _gceCOUNTER_OPTYPE { + gcvCOUNTER_OP_DRAW = 0, + gcvCOUNTER_OP_BLT = 1, + gcvCOUNTER_OP_COMPUTE = 2, + gcvCOUNTER_OP_RS = 3, + gcvCOUNTER_OP_FINISH = 4, + gcvCOUNTER_OP_FRAME = 5, + gcvCOUNTER_OP_OPERATION = 6, + gcvCOUNTER_OP_NONE = 7 +} +gceCOUNTER_OPTYPE; + +typedef enum _gceProbeStatus { + gcvPROBE_Disabled = 0, + gcvPROBE_Paused = 1, + gcvPROBE_Enabled = 2, +} gceProbeStatus; + +typedef enum _gceProbeCmd { + gcvPROBECMD_BEGIN = 0, + gcvPROBECMD_PAUSE = 1, + gcvPROBECMD_RESUME = 2, + gcvPROBECMD_END = 3, +} gceProbeCmd; + +/******************************************************************************* + * Events. ********************************************************************* + */ + +typedef enum _halEventType { + /* Keyboard event. */ + HAL_KEYBOARD, + + /* Mouse move event. */ + HAL_POINTER, + + /* Mouse button event. */ + HAL_BUTTON, + + /* Application close event. */ + HAL_CLOSE, + + /* Application window has been updated. */ + HAL_WINDOW_UPDATE +} halEventType; + +/* Scancodes for keyboard. */ +typedef enum _halKeys { + HAL_UNKNOWN = -1, + + HAL_BACKSPACE = 0x08, + HAL_TAB, + HAL_ENTER = 0x0D, + HAL_ESCAPE = 0x1B, + + HAL_SPACE = 0x20, + HAL_SINGLEQUOTE = 0x27, + HAL_PAD_ASTERISK = 0x2A, + HAL_COMMA = 0x2C, + HAL_HYPHEN, + HAL_PERIOD, + HAL_SLASH, + HAL_0, + HAL_1, + HAL_2, + HAL_3, + HAL_4, + HAL_5, + HAL_6, + HAL_7, + HAL_8, + HAL_9, + HAL_SEMICOLON = 0x3B, + HAL_EQUAL = 0x3D, + HAL_A = 0x41, + HAL_B, + HAL_C, + HAL_D, + HAL_E, + HAL_F, + HAL_G, + HAL_H, + HAL_I, + HAL_J, + HAL_K, + HAL_L, + HAL_M, + HAL_N, + HAL_O, + HAL_P, + HAL_Q, + HAL_R, + HAL_S, + HAL_T, + HAL_U, + HAL_V, + HAL_W, + HAL_X, + HAL_Y, + HAL_Z, + HAL_LBRACKET, + HAL_BACKSLASH, + HAL_RBRACKET, + HAL_BACKQUOTE = 0x60, + + HAL_F1 = 0x80, + HAL_F2, + HAL_F3, + HAL_F4, + HAL_F5, + HAL_F6, + HAL_F7, + HAL_F8, + HAL_F9, + HAL_F10, + HAL_F11, + HAL_F12, + + HAL_LCTRL, + HAL_RCTRL, + HAL_LSHIFT, + HAL_RSHIFT, + HAL_LALT, + HAL_RALT, + HAL_CAPSLOCK, + HAL_NUMLOCK, + HAL_SCROLLLOCK, + HAL_PAD_0, + HAL_PAD_1, + HAL_PAD_2, + HAL_PAD_3, + HAL_PAD_4, + HAL_PAD_5, + HAL_PAD_6, + HAL_PAD_7, + HAL_PAD_8, + HAL_PAD_9, + HAL_PAD_HYPHEN, + HAL_PAD_PLUS, + HAL_PAD_SLASH, + HAL_PAD_PERIOD, + HAL_PAD_ENTER, + HAL_SYSRQ, + HAL_PRNTSCRN, + HAL_BREAK, + HAL_UP, + HAL_LEFT, + HAL_RIGHT, + HAL_DOWN, + HAL_HOME, + HAL_END, + HAL_PGUP, + HAL_PGDN, + HAL_INSERT, + HAL_DELETE, + HAL_LWINDOW, + HAL_RWINDOW, + HAL_MENU, + HAL_POWER, + HAL_SLEEP, + HAL_WAKE +} halKeys; + +/*! + * @brief Command codes between kernel module and TrustZone + * @discussion + * Critical services must be done in TrustZone to avoid sensitive content leak. + * Most of kernel module is kept in non-Secure os to minimize code in TrustZone. + */ +typedef enum kernel_packet_command { + KERNEL_START_COMMAND, + KERNEL_SUBMIT, + KERNEL_MAP_MEMORY, /* */ + KERNEL_UNMAP_MEMORY, + KERNEL_ALLOCATE_SECRUE_MEMORY, /*! Security memory management. */ + KERNEL_FREE_SECURE_MEMORY, + KERNEL_EXECUTE, /* Execute a command buffer. */ + KERNEL_DUMP_MMU_EXCEPTION, + KERNEL_HANDLE_MMU_EXCEPTION, + KERNEL_READ_MMU_EXCEPTION, +} kernel_packet_command_t; + +enum { + gcvTA_COMMAND_INIT, + gcvTA_COMMAND_DISPATCH, + + gcvTA_CALLBACK_ALLOC_SECURE_MEM, + gcvTA_CALLBACK_FREE_SECURE_MEM, +}; + +typedef enum { + gcvFENCE_TYPE_READ = 0x1, + gcvFENCE_TYPE_WRITE = 0x2, + gcvFENCE_TYPE_ALL = gcvFENCE_TYPE_READ | gcvFENCE_TYPE_WRITE, + gcvFNECE_TYPE_INVALID = 0x10000, +} gceFENCE_TYPE; + +typedef enum _gceTLS_KEY { + gcvTLS_KEY_EGL, + gcvTLS_KEY_OPENGL_ES, + gcvTLS_KEY_OPENVG, + gcvTLS_KEY_OPENGL, + gcvTLS_KEY_OPENCL, + gcvTLS_KEY_OPENVX, + + gcvTLS_KEY_COUNT +} gceTLS_KEY; + +typedef enum _gcePLS_VALUE { + gcePLS_VALUE_EGL_DISPLAY_INFO, + gcePLS_VALUE_EGL_CONFIG_FORMAT_INFO, + gcePLS_VALUE_EGL_DESTRUCTOR_INFO, +} gcePLS_VALUE; + +#if gcdENABLE_3D +/* Blending functions. */ +typedef enum _gceBLEND_FUNCTION { + gcvBLEND_ZERO, + gcvBLEND_ONE, + gcvBLEND_SOURCE_COLOR, + gcvBLEND_INV_SOURCE_COLOR, + gcvBLEND_SOURCE_ALPHA, + gcvBLEND_INV_SOURCE_ALPHA, + gcvBLEND_TARGET_COLOR, + gcvBLEND_INV_TARGET_COLOR, + gcvBLEND_TARGET_ALPHA, + gcvBLEND_INV_TARGET_ALPHA, + gcvBLEND_SOURCE_ALPHA_SATURATE, + gcvBLEND_CONST_COLOR, + gcvBLEND_INV_CONST_COLOR, + gcvBLEND_CONST_ALPHA, + gcvBLEND_INV_CONST_ALPHA, +} gceBLEND_FUNCTION; + +/* Blending modes. */ +typedef enum _gceBLEND_MODE { + gcvBLEND_ADD = 0, + gcvBLEND_SUBTRACT, + gcvBLEND_REVERSE_SUBTRACT, + gcvBLEND_MIN, + gcvBLEND_MAX, + gcvBLEND_MULTIPLY, + gcvBLEND_SCREEN, + gcvBLEND_OVERLAY, + gcvBLEND_DARKEN, + gcvBLEND_LIGHTEN, + gcvBLEND_COLORDODGE, + gcvBLEND_COLORBURN, + gcvBLEND_HARDLIGHT, + gcvBLEND_SOFTLIGHT, + gcvBLEND_DIFFERENCE, + gcvBLEND_EXCLUSION, + gcvBLEND_HSL_HUE, + gcvBLEND_HSL_SATURATION, + gcvBLEND_HSL_COLOR, + gcvBLEND_HSL_LUMINOSITY, + + gcvBLEND_TOTAL +} gceBLEND_MODE; + +/* Depth modes. */ +typedef enum _gceDEPTH_MODE { + gcvDEPTH_NONE, + gcvDEPTH_Z, + gcvDEPTH_W, +} gceDEPTH_MODE; +#endif /* gcdENABLE_3D */ + +/* API flags. */ +typedef enum _gceAPI { + gcvAPI_D3D = 1, + gcvAPI_OPENGL_ES11, + gcvAPI_OPENGL_ES20, + gcvAPI_OPENGL_ES30, + gcvAPI_OPENGL_ES31, + gcvAPI_OPENGL_ES32, + gcvAPI_OPENGL, + gcvAPI_OPENVG, + gcvAPI_OPENCL, + gcvAPI_OPENVK, +} gceAPI; + +typedef enum _gceWHERE { + gcvWHERE_COMMAND_PREFETCH = 0, + gcvWHERE_COMMAND, + gcvWHERE_RASTER, + gcvWHERE_PIXEL, + gcvWHERE_BLT, +} gceWHERE; + +typedef enum _gceHOW { + gcvHOW_SEMAPHORE = 0x1, + gcvHOW_STALL = 0x2, + gcvHOW_SEMAPHORE_STALL = 0x3, +} gceHOW; + +typedef enum _gceSignalHandlerType { + gcvHANDLE_SIGFPE_WHEN_SIGNAL_CODE_IS_0 = 0x1, +} gceSignalHandlerType; + +typedef enum _gceFILE_MODE { + gcvFILE_CREATE = 0, + gcvFILE_APPEND, + gcvFILE_READ, + gcvFILE_CREATETEXT, + gcvFILE_APPENDTEXT, + gcvFILE_READTEXT, + gcvFILE_READWRITE, +} gceFILE_MODE; + +typedef enum _gceFILE_WHENCE { + gcvFILE_SEEK_SET, + gcvFILE_SEEK_CUR, + gcvFILE_SEEK_END +} gceFILE_WHENCE; + +/* Color format classes. */ +typedef enum _gceFORMAT_CLASS { + gcvFORMAT_CLASS_RGBA = 4500, + gcvFORMAT_CLASS_YUV, + gcvFORMAT_CLASS_INDEX, + gcvFORMAT_CLASS_LUMINANCE, + gcvFORMAT_CLASS_BUMP, + gcvFORMAT_CLASS_DEPTH, + gcvFORMAT_CLASS_ASTC, + gcvFORMAT_CLASS_COMPRESSED, + gcvFORMAT_CLASS_OTHER, + gcvFORMAT_CLASS_INTENSITY +} gceFORMAT_CLASS; + +/* Color format data type */ +typedef enum _gceFORMAT_DATATYPE { + gcvFORMAT_DATATYPE_UNSIGNED_NORMALIZED, + gcvFORMAT_DATATYPE_SIGNED_NORMALIZED, + gcvFORMAT_DATATYPE_UNSIGNED_INTEGER, + gcvFORMAT_DATATYPE_SIGNED_INTEGER, + gcvFORMAT_DATATYPE_FLOAT16, + gcvFORMAT_DATATYPE_FLOAT32, + gcvFORMAT_DATATYPE_FLOAT_E5B9G9R9, + gcvFORMAT_DATATYPE_FLOAT_B10G11R11F, + gcvFORMAT_DATATYPE_INDEX, + gcvFORMAT_DATATYPE_SRGB, + gcvFORMAT_DATATYPE_FLOAT32_UINT, +} gceFORMAT_DATATYPE; + +typedef enum _gceORIENTATION { + gcvORIENTATION_TOP_BOTTOM, + gcvORIENTATION_BOTTOM_TOP, +} gceORIENTATION; + +/* Special enums for width field in gcsFORMAT_COMPONENT. */ +typedef enum _gceCOMPONENT_CONTROL { + gcvCOMPONENT_NOTPRESENT = 0x00, + gcvCOMPONENT_DONTCARE = 0x80, + gcvCOMPONENT_WIDTHMASK = 0x7F, + gcvCOMPONENT_ODD = 0x80 +} gceCOMPONENT_CONTROL; + +/* User option. */ +typedef enum _gceDEBUG_MSG { + gcvDEBUG_MSG_NONE, + gcvDEBUG_MSG_ERROR, + gcvDEBUG_MSG_WARNING +} gceDEBUG_MSG; + +/* Compressed format now was defined same as dec400d, should be general. */ +typedef enum _VIV_COMPRESS_FMT { + _VIV_CFMT_ARGB8 = 0, + _VIV_CFMT_XRGB8, + _VIV_CFMT_AYUV, + _VIV_CFMT_UYVY, + _VIV_CFMT_YUY2, + _VIV_CFMT_YUV_ONLY, + _VIV_CFMT_UV_MIX, + _VIV_CFMT_ARGB4, + _VIV_CFMT_XRGB4, + _VIV_CFMT_A1R5G5B5, + _VIV_CFMT_X1R5G5B5, + _VIV_CFMT_R5G6B5, + _VIV_CFMT_Z24S8, + _VIV_CFMT_Z24, + _VIV_CFMT_Z16, + _VIV_CFMT_A2R10G10B10, + _VIV_CFMT_BAYER, + _VIV_CFMT_SIGNED_BAYER, + _VIV_CFMT_VAA16, + _VIV_CFMT_S8, + + _VIV_CFMT_MAX, +} _VIV_COMPRESS_FMT; + +typedef enum _gcePROGRAM_STAGE { + gcvPROGRAM_STAGE_VERTEX = 0x0, + gcvPROGRAM_STAGE_TCS = 0x1, + gcvPROGRAM_STAGE_TES = 0x2, + gcvPROGRAM_STAGE_GEOMETRY = 0x3, + gcvPROGRAM_STAGE_GPIPE_COUNT = 0x4, + gcvPROGRAM_STAGE_FRAGMENT = 0x4, + gcvPROGRAM_STAGE_GRAPHICS_COUNT = 0x5, + gcvPROGRAM_STAGE_COMPUTE = 0x5, + gcvPROGRAM_STAGE_OPENCL = 0x6, + gcvPROGRAM_STAGE_LAST +} gcePROGRAM_STAGE; + +typedef enum _gcePROGRAM_STAGE_BIT { + gcvPROGRAM_STAGE_VERTEX_BIT = 1 << gcvPROGRAM_STAGE_VERTEX, + gcvPROGRAM_STAGE_TCS_BIT = 1 << gcvPROGRAM_STAGE_TCS, + gcvPROGRAM_STAGE_TES_BIT = 1 << gcvPROGRAM_STAGE_TES, + gcvPROGRAM_STAGE_GEOMETRY_BIT = 1 << gcvPROGRAM_STAGE_GEOMETRY, + gcvPROGRAM_STAGE_FRAGMENT_BIT = 1 << gcvPROGRAM_STAGE_FRAGMENT, + gcvPROGRAM_STAGE_COMPUTE_BIT = 1 << gcvPROGRAM_STAGE_COMPUTE, + gcvPROGRAM_STAGE_OPENCL_BIT = 1 << gcvPROGRAM_STAGE_OPENCL, +} gcePROGRAM_STAGE_BIT; + +typedef enum _gceBLIT_FLAG { + gcvBLIT_FLAG_SKIP_DEPTH_WRITE = 1 << 0, + gcvBLIT_FLAG_SKIP_STENCIL_WRITE = 1 << 1, +} gceBLIT_FLAG; + +/* Clear flags. */ +typedef enum _gceCLEAR { + gcvCLEAR_COLOR = 0x1, + gcvCLEAR_DEPTH = 0x2, + gcvCLEAR_STENCIL = 0x4, + gcvCLEAR_HZ = 0x8, + gcvCLEAR_WITH_GPU_ONLY = 0x100, + gcvCLEAR_WITH_CPU_ONLY = 0x200, + gcvCLEAR_MULTI_SLICES = 0x400, +} gceCLEAR; + +typedef enum _gceBLIT_TYPE { + gcvBLIT_DRAW_CLEAR = 0, + gcvBLIT_DRAW_BLIT = 1, + gcvBLIT_DRAW_BLIT_DEPTH = 2, + gcvBLIT_COMPUTE_BLIT = 3, + + /* last number, not a real type */ + gcvBLIT_NUM_TYPE +} gceBLIT_TYPE; + +typedef enum _gceSPLIT_DRAW_TYPE { + gcvSPLIT_DRAW_UNKNOWN = 0x0, + gcvSPLIT_DRAW_1, + gcvSPLIT_DRAW_2, + gcvSPLIT_DRAW_3, + gcvSPLIT_DRAW_4, + gcvSPLIT_DRAW_XFB, + gcvSPLIT_DRAW_INDEX_FETCH, + gcvSPLIT_DRAW_TCS, + gcvSPLIT_DRAW_STIPPLE, + gcvSPLIT_DRAW_WIDE_LINE, + gcvSPLIT_DRAW_LINES_HW_ZERO_AREA_LINE_PATCH, + gcvSPLIT_DRAW_LAST +} gceSPLIT_DRAW_TYPE; + +/* Blending targets. */ +typedef enum _gceBLEND_UNIT { + gcvBLEND_SOURCE, + gcvBLEND_TARGET, +} gceBLEND_UNIT; + +typedef enum _gceXfbCmd { + gcvXFBCMD_BEGIN = 0, + gcvXFBCMD_PAUSE = 1, + gcvXFBCMD_RESUME = 2, + gcvXFBCMD_END = 3, + gcvXFBCMD_PAUSE_INCOMMIT = 4, + gcvXFBCMD_RESUME_INCOMMIT = 5, + gcvXFBCMD_INVALID = 6, +} gceXfbCmd; + +typedef enum _gceXfbStatus { + gcvXFB_Disabled = 0, + gcvXFB_Paused, + gcvXFB_Enabled, +} gceXfbStatus; + +typedef enum _gceQueryStatus { + gcvQUERY_Disabled = 0, + gcvQUERY_Paused = 1, + gcvQUERY_Enabled = 2, +} gceQueryStatus; + +typedef enum _gceQueryCmd { + gcvQUERYCMD_BEGIN = 0, + gcvQUERYCMD_PAUSE = 1, + gcvQUERYCMD_RESUME = 2, + gcvQUERYCMD_END = 3, + gcvQUERYCMD_INVALID = 4, +} gceQueryCmd; + +typedef enum _gceQueryType { + gcvQUERY_OCCLUSION = 0, + gcvQUERY_XFB_WRITTEN = 1, + gcvQUERY_PRIM_GENERATED = 2, + gcvQUERY_TIME_ELAPSED = 3, + gcvQUERY_MAX_NUM = 4, +} gceQueryType; + +/* Cube faces. */ +typedef enum _gceTEXTURE_FACE { + gcvFACE_NONE = 0, + gcvFACE_POSITIVE_X, + gcvFACE_NEGATIVE_X, + gcvFACE_POSITIVE_Y, + gcvFACE_NEGATIVE_Y, + gcvFACE_POSITIVE_Z, + gcvFACE_NEGATIVE_Z, +} gceTEXTURE_FACE; + +typedef enum _gceVERTEX_FORMAT { + gcvVERTEX_BYTE, + gcvVERTEX_UNSIGNED_BYTE, + gcvVERTEX_SHORT, + gcvVERTEX_UNSIGNED_SHORT, + gcvVERTEX_INT, + gcvVERTEX_UNSIGNED_INT, + gcvVERTEX_FIXED, + gcvVERTEX_HALF, + gcvVERTEX_FLOAT, + gcvVERTEX_DOUBLE, + gcvVERTEX_UNSIGNED_INT_10_10_10_2, + gcvVERTEX_INT_10_10_10_2, + gcvVERTEX_UNSIGNED_INT_2_10_10_10_REV, + gcvVERTEX_INT_2_10_10_10_REV, + /* integer format */ + gcvVERTEX_INT8, + gcvVERTEX_INT16, + gcvVERTEX_INT32, +} gceVERTEX_FORMAT; + +/* What the SW converting scheme to create temp attrib */ +typedef enum _gceATTRIB_SCHEME { + gcvATTRIB_SCHEME_KEEP = 0, + gcvATTRIB_SCHEME_2_10_10_10_REV_TO_FLOAT, + gcvATTRIB_SCHEME_BYTE_TO_IVEC4, + gcvATTRIB_SCHEME_SHORT_TO_IVEC4, + gcvATTRIB_SCHEME_INT_TO_IVEC4, + gcvATTRIB_SCHEME_UBYTE_TO_UVEC4, + gcvATTRIB_SCHEME_USHORT_TO_UVEC4, + gcvATTRIB_SCHEME_UINT_TO_UVEC4, + gcvATTRIB_SCHEME_DOUBLE_TO_FLOAT, + gcvATTRIB_SCHEME_UBYTE_BGRA_TO_UBYTE_RGBA, + gcvATTRIB_SCHEME_2_10_10_10_REV_BGRA_TO_FLOAT_RGBA, +} gceATTRIB_SCHEME; + +typedef enum _gceBUFOBJ_TYPE { + gcvBUFOBJ_TYPE_ARRAY_BUFFER = 1, + gcvBUFOBJ_TYPE_ELEMENT_ARRAY_BUFFER = 2, + gcvBUFOBJ_TYPE_UNIFORM_BUFFER = 3, + gcvBUFOBJ_TYPE_DRAW_INDIRECT_BUFFER = 4, + gcvBUFOBJ_TYPE_XFB_BUFFER = 5, + gcvBUFOBJ_TYPE_GENERIC_BUFFER = 100 + +} gceBUFOBJ_TYPE; + +typedef enum _gceBUFOBJ_USAGE { + gcvBUFOBJ_USAGE_NONE = 0x0, + gcvBUFOBJ_USAGE_STREAM_DRAW = 0x1, + gcvBUFOBJ_USAGE_STREAM_READ = 0x2, + gcvBUFOBJ_USAGE_STREAM_COPY = 0x3, + gcvBUFOBJ_USAGE_STATIC_DRAW = 0x4, + gcvBUFOBJ_USAGE_STATIC_READ = 0x5, + gcvBUFOBJ_USAGE_STATIC_COPY = 0x6, + gcvBUFOBJ_USAGE_DYNAMIC_DRAW = 0x7, + gcvBUFOBJ_USAGE_DYNAMIC_READ = 0x8, + gcvBUFOBJ_USAGE_DYNAMIC_COPY = 0x9, + gcvBUFOBJ_USAGE_PROBE_COUTNER = 0x10, + + /* Use 8bits to save the usage. */ + gcvBUFOBJ_USAGE_MASK = 0xFF, + + /* Some special flags. */ + /* special patch for optimaize performance, + * no fence and duplicate stream to ensure data correct + */ + gcvBUFOBJ_USAGE_FLAG_DISABLE_FENCE_DYNAMIC_STREAM = 0x100, + + /* This buffer object is used by driver, + * so we need to copy the data to the logical memory. + */ + gcvBUFOBJ_USAGE_FLAG_DATA_USED_BY_DRIVER = 0x200, + /* Allocate video memory under 4GB. */ + gcvBUFOBJ_USAGE_FLAG_32BIT_VA = 0x400, +} gceBUFOBJ_USAGE; + +/** + ** @ingroup gcoVG + ** + ** @brief Channel mask values. + ** + ** This enumeration defines the values for channel mask used in image + ** filtering. + */ + +/****************************************************************************** + ******************************** VG Enumerations ***************************** + ******************************************************************************/ + +/** + ** @ingroup gcoVG + ** + ** @brief Tiling mode for painting and imagig. + ** + ** This enumeration defines the tiling modes supported by the HAL. This is + ** in fact a one-to-one mapping of the OpenVG 1.1 tile modes. + */ +typedef enum _gceTILE_MODE { + gcvTILE_FILL, + gcvTILE_PAD, + gcvTILE_REPEAT, + gcvTILE_REFLECT +} gceTILE_MODE; + +/******************************************************************************/ +/** @ingroup gcoVG + ** + ** @brief The different paint modes. + ** + ** This enumeration lists the available paint modes. + */ +typedef enum _gcePAINT_TYPE { + /** Solid color. */ + gcvPAINT_MODE_SOLID, + + /** Linear gradient. */ + gcvPAINT_MODE_LINEAR, + + /** Radial gradient. */ + gcvPAINT_MODE_RADIAL, + + /** Pattern. */ + gcvPAINT_MODE_PATTERN, + + /** Mode count. */ + gcvPAINT_MODE_COUNT +} gcePAINT_TYPE; + +/** + ** @ingroup gcoVG + ** + ** @brief Types of path data supported by HAL. + ** + ** This enumeration defines the types of path data supported by the HAL. + ** This is in fact a one-to-one mapping of the OpenVG 1.1 path types. + */ +typedef enum _gcePATHTYPE { + gcePATHTYPE_UNKNOWN = -1, + gcePATHTYPE_INT8, + gcePATHTYPE_INT16, + gcePATHTYPE_INT32, + gcePATHTYPE_FLOAT +} gcePATHTYPE; + +/** + ** @ingroup gcoVG + ** + ** @brief Supported path segment commands. + ** + ** This enumeration defines the path segment commands supported by the HAL. + */ +typedef enum _gceVGCMD { + gcvVGCMD_END, /* 0: 0x00 */ + gcvVGCMD_CLOSE, /* 1: 0x01 */ + gcvVGCMD_MOVE, /* 2: 0x02 */ + gcvVGCMD_MOVE_REL, /* 3: 0x03 */ + gcvVGCMD_LINE, /* 4: 0x04 */ + gcvVGCMD_LINE_REL, /* 5: 0x05 */ + gcvVGCMD_QUAD, /* 6: 0x06 */ + gcvVGCMD_QUAD_REL, /* 7: 0x07 */ + gcvVGCMD_CUBIC, /* 8: 0x08 */ + gcvVGCMD_CUBIC_REL, /* 9: 0x09 */ + gcvVGCMD_BREAK, /* 10: 0x0A */ + gcvVGCMD_HLINE, /* 11: ******* R E S E R V E D *******/ + gcvVGCMD_HLINE_REL, /* 12: ******* R E S E R V E D *******/ + gcvVGCMD_VLINE, /* 13: ******* R E S E R V E D *******/ + gcvVGCMD_VLINE_REL, /* 14: ******* R E S E R V E D *******/ + gcvVGCMD_SQUAD, /* 15: ******* R E S E R V E D *******/ + gcvVGCMD_SQUAD_REL, /* 16: ******* R E S E R V E D *******/ + gcvVGCMD_SCUBIC, /* 17: ******* R E S E R V E D *******/ + gcvVGCMD_SCUBIC_REL, /* 18: ******* R E S E R V E D *******/ + gcvVGCMD_SCCWARC, /* 19: ******* R E S E R V E D *******/ + gcvVGCMD_SCCWARC_REL, /* 20: ******* R E S E R V E D *******/ + gcvVGCMD_SCWARC, /* 21: ******* R E S E R V E D *******/ + gcvVGCMD_SCWARC_REL, /* 22: ******* R E S E R V E D *******/ + gcvVGCMD_LCCWARC, /* 23: ******* R E S E R V E D *******/ + gcvVGCMD_LCCWARC_REL, /* 24: ******* R E S E R V E D *******/ + gcvVGCMD_LCWARC, /* 25: ******* R E S E R V E D *******/ + gcvVGCMD_LCWARC_REL, /* 26: ******* R E S E R V E D *******/ + + /* The width of the command recognized by the hardware on bits. */ + gcvVGCMD_WIDTH = 5, + + /* Hardware command mask. */ + gcvVGCMD_MASK = (1 << gcvVGCMD_WIDTH) - 1, + + /* Command modifiers. */ + gcvVGCMD_H_MOD = 1 << gcvVGCMD_WIDTH, /* = 32 */ + gcvVGCMD_V_MOD = 2 << gcvVGCMD_WIDTH, /* = 64 */ + gcvVGCMD_S_MOD = 3 << gcvVGCMD_WIDTH, /* = 96 */ + gcvVGCMD_ARC_MOD = 4 << gcvVGCMD_WIDTH, /* = 128 */ + + /* Emulated LINE commands. */ + gcvVGCMD_HLINE_EMUL = gcvVGCMD_H_MOD | gcvVGCMD_LINE, /* = 36 */ + gcvVGCMD_HLINE_EMUL_REL = gcvVGCMD_H_MOD | gcvVGCMD_LINE_REL, /* = 37 */ + gcvVGCMD_VLINE_EMUL = gcvVGCMD_V_MOD | gcvVGCMD_LINE, /* = 68 */ + gcvVGCMD_VLINE_EMUL_REL = gcvVGCMD_V_MOD | gcvVGCMD_LINE_REL, /* = 69 */ + + /* Emulated SMOOTH commands. */ + gcvVGCMD_SQUAD_EMUL = gcvVGCMD_S_MOD | gcvVGCMD_QUAD, /* = 102 */ + gcvVGCMD_SQUAD_EMUL_REL = gcvVGCMD_S_MOD | gcvVGCMD_QUAD_REL, /* = 103 */ + gcvVGCMD_SCUBIC_EMUL = gcvVGCMD_S_MOD | gcvVGCMD_CUBIC, /* = 104 */ + gcvVGCMD_SCUBIC_EMUL_REL = gcvVGCMD_S_MOD | gcvVGCMD_CUBIC_REL, /* = 105 */ + + /* Emulation ARC commands. */ + gcvVGCMD_ARC_LINE = gcvVGCMD_ARC_MOD | gcvVGCMD_LINE, /* = 132 */ + gcvVGCMD_ARC_LINE_REL = gcvVGCMD_ARC_MOD | gcvVGCMD_LINE_REL, /* = 133 */ + gcvVGCMD_ARC_QUAD = gcvVGCMD_ARC_MOD | gcvVGCMD_QUAD, /* = 134 */ + gcvVGCMD_ARC_QUAD_REL = gcvVGCMD_ARC_MOD | gcvVGCMD_QUAD_REL /* = 135 */ +} gceVGCMD; +typedef enum _gceVGCMD *gceVGCMD_PTR; + +/** + ** @ingroup gcoVG + ** + ** @brief Blending modes supported by the HAL. + ** + ** This enumeration defines the blending modes supported by the HAL. This is + ** in fact a one-to-one mapping of the OpenVG 1.1 blending modes. + */ +typedef enum _gceVG_BLEND { + gcvVG_BLEND_SRC, + gcvVG_BLEND_SRC_OVER, + gcvVG_BLEND_DST_OVER, + gcvVG_BLEND_SRC_IN, + gcvVG_BLEND_DST_IN, + gcvVG_BLEND_MULTIPLY, + gcvVG_BLEND_SCREEN, + gcvVG_BLEND_DARKEN, + gcvVG_BLEND_LIGHTEN, + gcvVG_BLEND_ADDITIVE, + gcvVG_BLEND_SUBTRACT, + gcvVG_BLEND_FILTER +} gceVG_BLEND; + +/** + ** @ingroup gcoVG + ** + ** @brief Image modes supported by the HAL. + ** + ** This enumeration defines the image modes supported by the HAL. This is + ** in fact a one-to-one mapping of the OpenVG 1.1 image modes with the addition + ** of NO IMAGE. + */ +typedef enum _gceVG_IMAGE { + gcvVG_IMAGE_NONE, + gcvVG_IMAGE_NORMAL, + gcvVG_IMAGE_MULTIPLY, + gcvVG_IMAGE_STENCIL, + gcvVG_IMAGE_FILTER +} gceVG_IMAGE; + +/** + ** @ingroup gcoVG + ** + ** @brief Filter mode patterns and imaging. + ** + ** This enumeration defines the filter modes supported by the HAL. + */ +typedef enum _gceIMAGE_FILTER { + gcvFILTER_POINT, + gcvFILTER_LINEAR, + gcvFILTER_BI_LINEAR +} gceIMAGE_FILTER; + +/** + ** @ingroup gcoVG + ** + ** @brief Primitive modes supported by the HAL. + ** + ** This enumeration defines the primitive modes supported by the HAL. + */ +typedef enum _gceVG_PRIMITIVE { + gcvVG_SCANLINE, + gcvVG_RECTANGLE, + gcvVG_TESSELLATED, + gcvVG_TESSELLATED_TILED +} gceVG_PRIMITIVE; + +/** + ** @ingroup gcoVG + ** + ** @brief Rendering quality modes supported by the HAL. + ** + ** This enumeration defines the rendering quality modes supported by the HAL. + */ +typedef enum _gceRENDER_QUALITY { + gcvVG_NONANTIALIASED, + gcvVG_2X2_MSAA, + gcvVG_2X4_MSAA, + gcvVG_4X4_MSAA +} gceRENDER_QUALITY; + +/** + ** @ingroup gcoVG + ** + ** @brief Fill rules supported by the HAL. + ** + ** This enumeration defines the fill rules supported by the HAL. + */ +typedef enum _gceFILL_RULE { gcvVG_EVEN_ODD, gcvVG_NON_ZERO } gceFILL_RULE; + +/** + ** @ingroup gcoVG + ** + ** @brief Cap styles supported by the HAL. + ** + ** This enumeration defines the cap styles supported by the HAL. + */ +typedef enum _gceCAP_STYLE { gcvCAP_BUTT, gcvCAP_ROUND, gcvCAP_SQUARE } gceCAP_STYLE; + +/** + ** @ingroup gcoVG + ** + ** @brief Join styles supported by the HAL. + ** + ** This enumeration defines the join styles supported by the HAL. + */ +typedef enum _gceJOIN_STYLE { + gcvJOIN_MITER, + gcvJOIN_ROUND, + gcvJOIN_BEVEL +} gceJOIN_STYLE; + +/* Base values for channel mask definitions. */ +#define gcvCHANNEL_X (0) +#define gcvCHANNEL_R (1 << 0) +#define gcvCHANNEL_G (1 << 1) +#define gcvCHANNEL_B (1 << 2) +#define gcvCHANNEL_A (1 << 3) + +typedef enum _gceCHANNEL { + gcvCHANNEL_XXXX = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X), + gcvCHANNEL_XXXA = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_A), + gcvCHANNEL_XXBX = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_X), + gcvCHANNEL_XXBA = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_A), + + gcvCHANNEL_XGXX = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_X), + gcvCHANNEL_XGXA = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_A), + gcvCHANNEL_XGBX = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_X), + gcvCHANNEL_XGBA = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_A), + + gcvCHANNEL_RXXX = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X), + gcvCHANNEL_RXXA = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_A), + gcvCHANNEL_RXBX = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_X), + gcvCHANNEL_RXBA = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_A), + + gcvCHANNEL_RGXX = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_X), + gcvCHANNEL_RGXA = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_A), + gcvCHANNEL_RGBX = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_X), + gcvCHANNEL_RGBA = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_A), +} gceCHANNEL; + +/* Defines the statistical data keys monitored by the statistics module */ +typedef enum _gceSTATISTICS { + gcvFRAME_FPS = 1, +} gceSTATISTICS; + +/* Value types. */ +typedef enum _gceVALUE_TYPE { + gcvVALUE_UINT = 0x0, + gcvVALUE_FIXED, + gcvVALUE_FLOAT, + gcvVALUE_INT, + + /* + ** The value need be unsigned denormalized. clamp (0.0-1.0) should be done first. + */ + gcvVALUE_FLAG_UNSIGNED_DENORM = 0x00010000, + + /* + ** The value need be signed denormalized. clamp (-1.0-1.0) should be done first. + */ + gcvVALUE_FLAG_SIGNED_DENORM = 0x00020000, + + /* + ** The value need to gammar + */ + gcvVALUE_FLAG_GAMMAR = 0x00040000, + + /* + ** The value need to convert from float to float16 + */ + gcvVALUE_FLAG_FLOAT_TO_FLOAT16 = 0x0080000, + + /* + ** Mask for flag field. + */ + gcvVALUE_FLAG_MASK = 0xFFFF0000, +} gceVALUE_TYPE; + +typedef enum _gceTRACEMODE { + gcvTRACEMODE_NONE = 0, + gcvTRACEMODE_FULL = 1, + gcvTRACEMODE_LOGGER = 2, + gcvTRACEMODE_ALLZONE = 3, + gcvTRACEMODE_PRE = 4, + gcvTRACEMODE_POST = 5, +} gceTRACEMODE; + +enum { + /* GPU can't issue more that 32bit physical address */ + gcvPLATFORM_FLAG_LIMIT_4G_ADDRESS = 1 << 0, + + gcvPLATFORM_FLAG_IMX_MM = 1 << 1, +}; + +#if gcdUSE_CAPBUF +typedef enum _gceCAPBUF_META_TYPE { + gcvCAPBUF_META_TYPE_BASE = 0, + gcvCAPBUF_META_TYPE_STATE_BUFFER = 0, + gcvCAPBUF_META_TYPE_DRAW_ID, + gcvCAPBUF_META_TYPE_SH_UNIFORM, + gcvCAPBUF_META_TYPE_VIP_SRAM, + gcvCAPBUF_META_TYPE_AXI_SRAM, + gcvCAPBUF_META_TYPE_PPU_PARAMETERS, + gcvCAPBUF_META_TYPE_VIP_SRAM_REMAP, + gcvCAPBUF_META_TYPE_AXI_SRAM_REMAP, + gcvCAPBUF_META_TYPE_IMAGE_PHYSICAL_ADDRESS, + gcvCAPBUF_META_TYPE_IMAGE_PHYSICAL_ADDRESS_40BIT, + gcvCAPBUF_META_TYPE_SH_INST_ADDRESS, + gcvCAPBUF_META_TYPE_SH_UNIFORM_ARGS_LOCAL_ADDRESS_SPACE, + gcvCAPBUF_META_TYPE_SH_UNIFORM_ARGS_CONSTANT_ADDRESS_SPACE, + gcvCAPBUF_META_TYPE_NN_TP_INST_ADDRESS, + gcvCAPBUF_META_TYPE_LOW32_OF_40BIT_PHY_ADDR, + gcvCAPBUF_META_TYPE_GPU_SYNC_CMD, + gcvCAPBUF_META_TYPE_SH_UNIFORM_ARGS_PRINTFADDRESS, + gcvCAPBUF_META_TYPE_SH_UNIFORM_ARGS_MAXPRINTFADDRESS, + /* Keep it at the end of the list. */ + gcvCAPBUF_META_TYPE_COUNT +} gceCAPBUF_META_TYPE; + +typedef enum _gceCAPBUF_SH_UNIFROM_ARGS { + gcvCAPBUF_SH_UNIFORM_ARGS_INVALID = 0, + gcvCAPBUF_SH_UNIFORM_ARGS_IMAGE_PHYSICAL_ADDRESS, + gcvCAPBUF_SH_UNIFORM_ARGS_IMAGE_PHYSICAL_ADDRESS_40BIT, + gcvCAPBUF_SH_UNIFORM_ARGS_LOCAL_ADDRESS_SPACE, + gcvCAPBUF_SH_UNIFORM_ARGS_LOCAL_ADDRESS_SPACE_COMBINE, + gcvCAPBUF_SH_UNIFORM_ARGS_CONSTANT_ADDRESS_SPACE, + gcvCAPBUF_SH_UNIFORM_ARGS_LOW32_OF_40BIT_PHY_ADDR, + gcvCAPBUF_SH_UNIFORM_ARGS_LOW_HIGH_40BIT_PHY_ADDR, + gcvCAPBUF_SH_UNIFORM_ARGS_PRINTFADDRESS, + gcvCAPBUF_SH_UNIFORM_ARGS_MAXPRINTFADDRESS, + + gcvCAPBUF_SH_UNIFORM_ARGS_40BIT_IMAGE_ADDR, + /* Keep it at the end of the list. */ + gcvCAPBUF_SH_UNIFORM_ARGS_COUNT +} gceCAPBUF_SH_UNIFORM_ARGS; + +typedef enum _gceCAPBUF_SH_UNIFORM_STATE_DATA_TYPE +{ + gcvCAPBUF_SH_UNIFORM_STATE_DATA_TYPE_INVALID = 0, + gcvCAPBUF_SH_UNIFORM_STATE_DATA_TYPE_IMAGE_PHYSICAL_ADDRESS, + gcvCAPBUF_SH_UNIFORM_STATE_DATA_TYPE_IMAGE_PHYSICAL_ADDRESS_40BIT_LOW, + gcvCAPBUF_SH_UNIFORM_STATE_DATA_TYPE_IMAGE_PHYSICAL_ADDRESS_40BIT_HIGH, + gcvCAPBUF_SH_UNIFORM_STATE_DATA_TYPE_IMAGE_PHYSICAL_ADDRESS_40BIT_HIGH1, + gcvCAPBUF_SH_UNIFORM_STATE_DATA_TYPE_LOCAL_ADDRESS_SPACE, + gcvCAPBUF_SH_UNIFORM_STATE_DATA_TYPE_CONSTANT_ADDRESS_SPACE, + gcvCAPBUF_SH_UNIFORM_STATE_DATA_TYPE_LOW32_OF_40BIT_PHY_ADDR, + gcvCAPBUF_SH_UNIFORM_STATE_DATA_TYPE_LOW_HIGH_40BIT_PHY_ADDR_LOW, + gcvCAPBUF_SH_UNIFORM_STATE_DATA_TYPE_LOW_HIGH_40BIT_PHY_ADDR_HIGH, + gcvCAPBUF_SH_UNIFORM_STATE_DATA_TYPE_PRINTFADDRESS, + gcvCAPBUF_SH_UNIFORM_STATE_DATA_TYPE_MAXPRINTFADDRESS, + /* Keep it at the end of the list. */ + gcvCAPBUF_SH_UNIFORM_STATE_DATA_TYPE_COUNT +} gceCAPBUF_SH_UNIFORM_STATE_DATA_TYPE; + +typedef enum _gceCAPBUF_PPU_PARAMETERS_INDEX { + gcvCAPBUF_PPU_GLOBAL_OFFSET_X = 0, + gcvCAPBUF_PPU_GLOBAL_OFFSET_Y, + gcvCAPBUF_PPU_GLOBAL_OFFSET_Z, + gcvCAPBUF_PPU_GLOBAL_SCALE_X, + gcvCAPBUF_PPU_GLOBAL_SCALE_Y, + gcvCAPBUF_PPU_GLOBAL_SCALE_Z, + gcvCAPBUF_PPU_GROUP_SIZE_X, + gcvCAPBUF_PPU_GROUP_SIZE_Y, + gcvCAPBUF_PPU_GROUP_SIZE_Z, + gcvCAPBUF_PPU_GROUP_COUNT_X, + gcvCAPBUF_PPU_GROUP_COUNT_Y, + gcvCAPBUF_PPU_GROUP_COUNT_Z, + gcvCAPBUF_PPU_PARAMETERS_COUNT +} gceCAPBUF_PPU_GLOBALE_OFFSET_INDEX; + +#endif + +/* GL_VIV internal usage */ +#ifndef GL_MAP_BUFFER_OBJ_VIV +#define GL_MAP_BUFFER_OBJ_VIV 0x10000 +#endif + +/* Command buffer usage. */ +#define gcvCOMMAND_2D (1 << 0) +#define gcvCOMMAND_3D (1 << 1) + +/* Default chip ID means chip ID same as core index. */ +#define gcvCHIP_ID_DEFAULT (~0U) + +/* Tile status header size */ +#ifndef gcvTS_FC_HEADER_SIZE +#define gcvTS_FC_HEADER_SIZE 128 +#endif + +/****************************************************************************** + ***************************** Object Declarations **************************** + ******************************************************************************/ +typedef struct _gckCONTEXT *gckCONTEXT; +typedef struct _gcoCMDBUF *gcoCMDBUF; + +typedef struct _gcsSTATE_DELTA *gcsSTATE_DELTA_PTR; +typedef struct _gcsQUEUE *gcsQUEUE_PTR; +typedef struct _gcoQUEUE *gcoQUEUE; +typedef struct _gcsHAL_INTERFACE *gcsHAL_INTERFACE_PTR; +#if VIVANTE_PROFILER +typedef struct _gcsHAL_PROFILER_INTERFACE *gcsHAL_PROFILER_INTERFACE_PTR; +#endif +typedef struct _gcs2D_PROFILE *gcs2D_PROFILE_PTR; + + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_enum_h_ */ + + diff --git a/unified-tina/inc/HAL/gc_hal_options.h b/unified-tina/inc/HAL/gc_hal_options.h new file mode 100644 index 0000000..c3a8698 --- /dev/null +++ b/unified-tina/inc/HAL/gc_hal_options.h @@ -0,0 +1,1578 @@ +/**************************************************************************** +* +* Copyright (c) 2005 - 2023 by Vivante Corp. All rights reserved. +* +* The material in this file is confidential and contains trade secrets +* of Vivante Corporation. This is proprietary information owned by +* Vivante Corporation. No part of this work may be disclosed, +* reproduced, copied, transmitted, or used in any way for any purpose, +* without the express written permission of Vivante Corporation. +* +*****************************************************************************/ + + +#ifndef __gc_hal_options_h_ +#define __gc_hal_options_h_ + +/* + * gcdSECURITY + */ +#ifndef gcdSECURITY +#define gcdSECURITY 0 +#endif + +/* + * gcdPRINT_VERSION + * + * Print HAL version. + */ +#ifndef gcdPRINT_VERSION +#define gcdPRINT_VERSION 0 +#endif + +/* + * USE_KERNEL_VIRTUAL_BUFFERS + * + * This define enables the use of VM for gckCommand and fence buffers. + */ +#ifndef USE_KERNEL_VIRTUAL_BUFFERS +#if defined(UNDER_CE) +# define USE_KERNEL_VIRTUAL_BUFFERS 1 +# else +# define USE_KERNEL_VIRTUAL_BUFFERS 1 +# endif +#endif + +/* + * USE_NEW_LINUX_SIGNAL + * + * This define enables the Linux kernel signaling between kernel and user. + */ +#ifndef USE_NEW_LINUX_SIGNAL +#define USE_NEW_LINUX_SIGNAL 0 +#endif + +/* + * USE_LINUX_PCIE + * + * This define enables galcore as a Linux PCIE driver. + */ +#ifndef USE_LINUX_PCIE +#define USE_LINUX_PCIE 0 +#endif + +/* + * VIVANTE_PROFILER + * + * This define enables the profiler for hardware counters. + */ +#ifndef VIVANTE_PROFILER +#define VIVANTE_PROFILER 1 +#endif + +/* + * gcdUSE_VG + * + * Enable VG HAL layer (only for GC350). + */ +#ifndef gcdUSE_VG +#define gcdUSE_VG 0 +#endif + +/* + * gcdUSE_VX + * + * Enable VX HAL layer. + */ +#ifndef gcdUSE_VX +#define gcdUSE_VX 1 +#endif + +/* + * PROFILE_HAL_COUNTERS + * + * This define enables HAL counter profiling support. HW and SHADER + * counter profiling depends on this. + */ +#ifndef PROFILE_HAL_COUNTERS +#define PROFILE_HAL_COUNTERS 1 +#endif + +/* + * PROFILE_HW_COUNTERS + * + * This define enables HW counter profiling support. + */ +#ifndef PROFILE_HW_COUNTERS +#define PROFILE_HW_COUNTERS 1 +#endif + +/* + * PROFILE_SHADER_COUNTERS + * + * This define enables SHADER counter profiling support. + */ +#ifndef PROFILE_SHADER_COUNTERS +#define PROFILE_SHADER_COUNTERS 1 +#endif + +/* + * COMMAND_PROCESSOR_VERSION + * + * The version of the command buffer and task manager. + */ +#define COMMAND_PROCESSOR_VERSION 1 + +/* + * gcdDUMP + * + * Dump for hw capture. + * When set to 1, a dump of all states and memory uploads, as well as other + * hardware related execution will be printed to the debug console. This + * data can be used for playing back applications. + * + * When set to 2, for vxc, all output memory will be dump. + * + * Please get tweak settings in gc_hal_dump.h. + */ +#ifndef gcdDUMP +#define gcdDUMP 0 +#endif + +/* + * gcdDUMP_IN_KERNEL + * + * Enhanced feature for hw capture. + * Required for MCFE. + * When set to 1, all dumps will happen in the kernel. This is handy if + * you want the kernel to dump its command buffers as well and the data + * needs to be in sync. + * + * Dump in kernel implies kernel command dump. + * See debugfs:/gc/dump/ for runtime configuration. + */ +#ifndef gcdDUMP_IN_KERNEL +#define gcdDUMP_IN_KERNEL 0 +#endif + +/* + * gcdDUMP_HW_SUBCOMMAND + * + * Dump for hw command buffer + * When set to 1, will dump hw command buffer when GPU/VIP hang. + */ +#ifndef gcdDUMP_HW_SUBCOMMAND +#define gcdDUMP_HW_SUBCOMMAND 0 +#endif + +/* + * gcdDUMP_2D + * + * Dump for 2D capture. + * When set to non-zero, it will dump the 2D command and surface. + * + * Please get tweak settings in gc_hal_dump.h. + */ +#ifndef gcdDUMP_2D +#define gcdDUMP_2D 0 +#endif + +/* + * gcdDUMP_API + * + * Dump driver level API. + * When set to 1, a high level dump of the EGL and GL/VG APs's are + * captured. + * + * Please get tweak settings in gc_hal_dump.h. + */ +#ifndef gcdDUMP_API +#define gcdDUMP_API 0 +#endif + +/* + * gcdDUMP_PER_OPERATION + * + * Operation based dump. + * + * Dump the block as below. + * 1. Multiple operations belong to the same SW tiling block. + * 2. Single operation which is NOT in any SW tiling block. + */ +#ifndef gcdDUMP_PER_OPERATION +#define gcdDUMP_PER_OPERATION 0 +#endif + +/* + * gcdDEBUG_OPTION + * When set to 1, the debug options are enabled. We must set other MACRO to enable + * sub case. + */ +#ifndef gcdDEBUG_OPTION +#define gcdDEBUG_OPTION 0 + +#if gcdDEBUG_OPTION +/* + * gcdDEBUG_OPTION_KEY + * The process name of debug application. + */ +#ifndef gcdDEBUG_OPTION_KEY +# define gcdDEBUG_OPTION_KEY "process" +# endif +/* + * gcdDEBUG_OPTION_NO_GL_DRAWS + * When set to 1, all glDrawArrays and glDrawElements will be skip. + */ +#ifndef gcdDEBUG_OPTION_NO_GL_DRAWS +# define gcdDEBUG_OPTION_NO_GL_DRAWS 0 +# endif +/* + * gcdDEBUG_OPTION_NO_DRAW_PRIMITIVES + * When set to 1, all DrawPrimitives will be skip. + */ +#ifndef gcdDEBUG_OPTION_NO_DRAW_PRIMITIVES +# define gcdDEBUG_OPTION_NO_DRAW_PRIMITIVES 0 +# endif +/* + * gcdDEBUG_OPTION_SKIP_SWAP + * When set to 1, just one out of gcdDEBUG_OPTION_SKIP_FRAMES(such as 1/10) + * eglSwapBuffers will be resolve, others skip. + */ +#ifndef gcdDEBUG_OPTION_SKIP_SWAP +# define gcdDEBUG_OPTION_SKIP_SWAP 0 +# define gcdDEBUG_OPTION_SKIP_FRAMES 10 +# endif +/* + * gcdDEBUG_OPTION_FORCE_16BIT_RENDER_TARGET + * When set to 1, the format of render target will force to RGB565. + */ +#ifndef gcdDEBUG_OPTION_FORCE_16BIT_RENDER_TARGET +# define gcdDEBUG_OPTION_FORCE_16BIT_RENDER_TARGET 0 +# endif +/* + * gcdDEBUG_OPTION_NONE_TEXTURE + * When set to 1, the type of texture will be set to + * 0x0. + */ +#ifndef gcdDEBUG_OPTION_NONE_TEXTURE +# define gcdDEBUG_OPTION_NONE_TEXTURE 0 +# endif +/* + * gcdDEBUG_OPTION_NONE_DEPTH + * When set to 1, the depth format of surface will be set to gcvSURF_UNKNOWN. + */ +#ifndef gcdDEBUG_OPTION_NONE_DEPTH +# define gcdDEBUG_OPTION_NONE_DEPTH 0 +# endif + +/* + * gcdDEBUG_FORCE_CONTEXT_UPDATE + * When set to 1, context will be updated before every commit. + */ +#ifndef gcdDEBUG_FORCE_CONTEXT_UPDATE +# define gcdDEBUG_FORCE_CONTEXT_UPDATE 0 +# endif + +/* + * gcdDEBUG_OPTION_SPECIFY_POOL + * When set to 1, pool of each type surface can be specified by + * changing poolPerType[] in gcsSURF_NODE_Construct. + */ +#ifndef gcdDEBUG_OPTION_SPECIFY_POOL +# define gcdDEBUG_OPTION_SPECIFY_POOL 0 +# endif + +# endif +#endif + +/* + * gcdENABLE_FSCALE_VAL_ADJUST + * When non-zero, FSCALE_VAL when gcvPOWER_ON can be adjusted externally. + */ +#ifndef gcdENABLE_FSCALE_VAL_ADJUST +#define gcdENABLE_FSCALE_VAL_ADJUST 1 +#endif + +#ifndef gcdCAPTURE_ONLY_MODE +#define gcdCAPTURE_ONLY_MODE 0 +#endif + +/* + * gcdNULL_DRIVER + * + * Set to 1 for infinite speed hardware. + * Set to 2 for bypassing the HAL. + */ +#ifndef gcdNULL_DRIVER +#define gcdNULL_DRIVER 0 +#endif + +/* + * gcdENABLE_TIMEOUT_DETECTION + * + * Enable timeout detection. + */ +#ifndef gcdENABLE_TIMEOUT_DETECTION +#define gcdENABLE_TIMEOUT_DETECTION 0 +#endif + +/* + * gcdCMD_BUFFER_SIZE + * + * Number of bytes in a command buffer. + */ +#ifndef gcdCMD_BUFFER_SIZE +#if gcdCAPTURE_ONLY_MODE +# define gcdCMD_BUFFER_SIZE (4 << 10) +# else +# define gcdCMD_BUFFER_SIZE (128 << 10) +# endif +#endif + +/* + * gcdCMD_BLT_BUFFER_SIZE + * + * Number of bytes in a command buffer. + */ +#ifndef gcdCMD_BLT_BUFFER_SIZE +#define gcdCMD_BLT_BUFFER_SIZE (1 << 10) +#endif + +/* + * gcdCMD_BUFFERS + * + * Number of command buffers to use per client. + */ +#ifndef gcdCMD_BUFFERS +#if gcdCAPTURE_ONLY_MODE +# define gcdCMD_BUFFERS 1 +# else +# define gcdCMD_BUFFERS 2 +# endif +#endif + +/* + * gcdMAX_CMD_BUFFERS + * + * Maximum number of command buffers to use per client. + */ +#ifndef gcdMAX_CMD_BUFFERS +#define gcdMAX_CMD_BUFFERS 8 +#endif + +/* + * gcdCOMMAND_QUEUES + * + * Number of command queues in the kernel. + */ +#ifndef gcdCOMMAND_QUEUES +#define gcdCOMMAND_QUEUES 2 +#endif + +/* + * gcdPOWER_CONTROL_DELAY + * + * The delay in milliseconds required to wait until the GPU has woke up + * from a suspend or power-down state. This is system dependent because + * the bus clock also needs to stabalize. + */ +#ifndef gcdPOWER_CONTROL_DELAY +#define gcdPOWER_CONTROL_DELAY 0 +#endif + +/* + * gcdMMU_SIZE + * + * Size of the MMU page table in bytes. Each 4 bytes can hold 4kB worth of + * virtual data. + */ +#ifndef gcdMMU_SIZE +#define gcdMMU_SIZE (256 << 10) +#endif + +#ifndef gcdGC355_VGMMU_MEMORY_SIZE_KB +#define gcdGC355_VGMMU_MEMORY_SIZE_KB 32 +#endif + +/* + * gcdREGISTER_READ_FROM_USER + * gcdREGISTER_WRITE_FROM_USER + * + * Set to 1 to allow IOCTL calls to get through from user land. This + * should only be in debug or development drops. + */ +#ifndef gcdREGISTER_READ_FROM_USER +#define gcdREGISTER_READ_FROM_USER 1 +#endif + +#ifndef gcdREGISTER_WRITE_FROM_USER +#define gcdREGISTER_WRITE_FROM_USER 0 +#endif + +/* + * gcdHEAP_SIZE + * + * Set the allocation size for the internal heaps. Each time a heap is + * full, a new heap will be allocated with this minmimum amount of bytes. + * The bigger this size, the fewer heaps there are to allocate, the better + * the performance. However, heaps won't be freed until they are + * completely free, so there might be some more memory waste if the size is + * too big. + */ +#ifndef gcdHEAP_SIZE +#define gcdHEAP_SIZE (64 << 10) +#endif + +/* + * gcdPOWER_SUSPEND_WHEN_IDLE + * + * Set to 1 to make GPU enter gcvPOWER_SUSPEND when idle detected, + * otherwise GPU will enter gcvPOWER_IDLE. + */ +#ifndef gcdPOWER_SUSPEND_WHEN_IDLE +#define gcdPOWER_SUSPEND_WHEN_IDLE 1 +#endif + +#ifndef gcdFPGA_BUILD +#define gcdFPGA_BUILD 0 +#endif + +/* + * gcdGPU_TIMEOUT + * + * This define specified the number of milliseconds the system will wait + * before it broadcasts the GPU is stuck. In other words, it will define + * the timeout of any operation that needs to wait for the GPU. + * + * If the value is 0, no timeout will be checked for. + */ +#ifndef gcdGPU_TIMEOUT +#if gcdFPGA_BUILD +# define gcdGPU_TIMEOUT 2000000 +# else +# define gcdGPU_TIMEOUT 20000 +# endif +#endif + +/* + * gcdGPU_2D_TIMEOUT + * + * This define specified the number of milliseconds the system will wait + * before it broadcasts the 2D GPU is stuck. In other words, it will define + * the timeout of any operation that needs to wait for the GPU. + * + * If the value is 0, no timeout will be checked for. + */ +#ifndef gcdGPU_2D_TIMEOUT +#define gcdGPU_2D_TIMEOUT 20000 +#endif + +/* + * gcdGPU_ADVANCETIMER + * + * it is advance timer. + */ +#ifndef gcdGPU_ADVANCETIMER +#define gcdGPU_ADVANCETIMER 250 +#endif + +/* + * gcdSTATIC_LINK + * + * This define disalbes static linking; + */ +#ifndef gcdSTATIC_LINK +#define gcdSTATIC_LINK 0 +#endif + +/* + * gcdUSE_NEW_HEAP + * + * Setting this define to 1 enables new heap. + */ +#ifndef gcdUSE_NEW_HEAP +#define gcdUSE_NEW_HEAP 0 +#endif + +/* + * gcdCMD_NO_2D_CONTEXT + * + * This define enables no-context 2D command buffer. + */ +#ifndef gcdCMD_NO_2D_CONTEXT +#define gcdCMD_NO_2D_CONTEXT 1 +#endif + +/* + * gcdENABLE_BUFFER_ALIGNMENT + * + * When enabled, video memory is allocated with at least 16KB alignment + * between multiple sub-buffers. + */ +#ifndef gcdENABLE_BUFFER_ALIGNMENT +#if gcdCAPTURE_ONLY_MODE +# define gcdENABLE_BUFFER_ALIGNMENT 0 +# else +# define gcdENABLE_BUFFER_ALIGNMENT 1 +# endif +#endif + +/* + * gcdENABLE_BANK_ALIGNMENT + * + * When enabled, video memory is allocated bank aligned. The vendor can modify + * _GetSurfaceBankAlignment() and _GetBankOffsetBytes() to define how + * different types of allocations are bank and channel aligned. + * When disabled (default), no bank alignment is done. + * For CAPTURE ONLY MODE, should make sure that gcdENABLE_BANK_ALIGNMENT is disabled. + */ +#ifndef gcdENABLE_BANK_ALIGNMENT +#if gcdCAPTURE_ONLY_MODE +# define gcdENABLE_BANK_ALIGNMENT 0 +# else +# define gcdENABLE_BANK_ALIGNMENT 0 +# endif +#endif + +/* + * gcdBANK_BIT_START + * + * Specifies the start bit of the bank (inclusive). + */ +#ifndef gcdBANK_BIT_START +#define gcdBANK_BIT_START 12 +#endif + +/* + * gcdBANK_BIT_END + * + * Specifies the end bit of the bank (inclusive). + */ +#ifndef gcdBANK_BIT_END +#define gcdBANK_BIT_END 14 +#endif + +/* + * gcdBANK_CHANNEL_BIT + * + * When set, video memory when allocated bank aligned is allocated such that + * render and depth buffer addresses alternate on the channel bit specified. + * This option has an effect only when gcdENABLE_BANK_ALIGNMENT is enabled. + * When disabled (default), no alteration is done. + */ +#ifndef gcdBANK_CHANNEL_BIT +#define gcdBANK_CHANNEL_BIT 7 +#endif + +/* + * gcdDYNAMIC_SPEED + * + * When non-zero, it informs the kernel driver to use the speed throttling + * broadcasting functions to inform the system the GPU should be spet up or + * slowed down. It will send a broadcast for slowdown each "interval" + * specified by this define in milliseconds + * (gckOS_BroadcastCalibrateSpeed). + */ +#ifndef gcdDYNAMIC_SPEED +#define gcdDYNAMIC_SPEED 2000 +#endif + +/* + * gcdDYNAMIC_EVENT_THRESHOLD + * + * When non-zero, it specifies the maximum number of available events at + * which the kernel driver will issue a broadcast to speed up the GPU + * (gckOS_BroadcastHurry). + */ +#ifndef gcdDYNAMIC_EVENT_THRESHOLD +#define gcdDYNAMIC_EVENT_THRESHOLD 5 +#endif + +/* + * gcdENABLE_PROFILING + * + * Enable profiling macros. + */ +#ifndef gcdENABLE_PROFILING +#define gcdENABLE_PROFILING 0 +#endif + +/* + * gcdENABLE_128B_MERGE + * + * Enable 128B merge for the BUS control. + */ +#ifndef gcdENABLE_128B_MERGE +#define gcdENABLE_128B_MERGE 0 +#endif + +/* + * gcdFRAME_DB + * + * When non-zero, it specified the number of frames inside the frame + * database. The frame DB will collect per-frame timestamps and hardware + * counters. + */ +#ifndef gcdFRAME_DB +#define gcdFRAME_DB 0 +#define gcdFRAME_DB_RESET 0 +#define gcdFRAME_DB_NAME "/var/log/frameDB.log" +#endif + +/* + * gcdENABLE_CACHEABLE_COMMAND_BUFFER + * + * When non-zero, command buffer will be cacheable. + */ +#ifndef gcdENABLE_CACHEABLE_COMMAND_BUFFER +#define gcdENABLE_CACHEABLE_COMMAND_BUFFER 0 +#endif + +/* + * gcdENABLE_BUFFERABLE_VIDEO_MEMORY + * + * When non-zero, all video memory will be bufferable by default. + */ +#ifndef gcdENABLE_BUFFERABLE_VIDEO_MEMORY +#define gcdENABLE_BUFFERABLE_VIDEO_MEMORY 1 +#endif + +/* + * gcdENABLE_INFINITE_SPEED_HW + * enable the Infinte HW, this is for 2D openVG + */ +#ifndef gcdENABLE_INFINITE_SPEED_HW +#define gcdENABLE_INFINITE_SPEED_HW 0 +#endif + +/* + * gcdPOWEROFF_TIMEOUT + * + * When non-zero, GPU will power off automatically from + * idle state, and gcdPOWEROFF_TIMEOUT is also the default + * timeout in milliseconds. + */ +#ifndef gcdPOWEROFF_TIMEOUT +#define gcdPOWEROFF_TIMEOUT 300 +#endif + +/* + * QNX_SINGLE_THREADED_DEBUGGING + */ +#ifndef QNX_SINGLE_THREADED_DEBUGGING +#define QNX_SINGLE_THREADED_DEBUGGING 0 +#endif + +/* + * gcdSHARED_RESOLVE_BUFFER_ENABLED + * + * Use shared resolve buffer for all app buffers. + */ +#ifndef gcdSHARED_RESOLVE_BUFFER_ENABLED +#define gcdSHARED_RESOLVE_BUFFER_ENABLED 0 +#endif + +/* + * gcdUSE_TRIANGLE_STRIP_PATCH + */ +#ifndef gcdUSE_TRIANGLE_STRIP_PATCH +#define gcdUSE_TRIANGLE_STRIP_PATCH 1 +#endif + +#ifndef gcdUSE_PVR +#define gcdUSE_PVR 1 +#endif + +/* + * gcdSMALL_BLOCK_SIZE + * + * When non-zero, a part of VIDMEM will be reserved for requests + * whose requesting size is less than gcdSMALL_BLOCK_SIZE. + * + * For Linux, it's the size of a page. If this requeset fallbacks + * to gcvPOOL_VIRTUAL, memory will be wasted + * because they allocate a page at least. + */ +#ifndef gcdSMALL_BLOCK_SIZE +#define gcdSMALL_BLOCK_SIZE 4096 +#define gcdRATIO_FOR_SMALL_MEMORY 32 +#endif + +/* + * gcdENABLE_VIRTUAL_ADDR_UNMAP + * enable virtual address unmap for the weight_bias and the virtual image + */ +#ifndef gcdENABLE_VIRTUAL_ADDRESS_UNMAP +#define gcdENABLE_VIRTUAL_ADDRESS_UNMAP 0 +#endif + +/* + * gcdENABLE_GPU_1M_PAGE + * When non-zero, GPU page size will be 1M until the pool is out of memory + * and low-level to 4K pages. When zero, it uses 4k GPU pages. + */ +#ifndef gcdENABLE_GPU_1M_PAGE +#if !gcdSECURITY && defined(LINUX) +#ifdef EMULATOR +# define gcdENABLE_GPU_1M_PAGE 0 +# else +# define gcdENABLE_GPU_1M_PAGE 1 +# endif +# else +# define gcdENABLE_GPU_1M_PAGE 0 +# endif +#endif + +/* + * gcdCONTIGUOUS_SIZE_LIMIT + * When non-zero, size of video node from gcvPOOL_VIRTUAL contiguous is + * limited by gcdCONTIGUOUS_SIZE_LIMIT. + */ +#ifndef gcdCONTIGUOUS_SIZE_LIMIT +#define gcdCONTIGUOUS_SIZE_LIMIT 0 +#endif + +/* + * gcdLINK_QUEUE_SIZE + * + * When non-zero, driver maintains a queue to record information of + * latest lined context buffer and command buffer. Data in this queue + * is be used to debug. + */ +#ifndef gcdLINK_QUEUE_SIZE +#define gcdLINK_QUEUE_SIZE 64 +#endif + +/* gcdALPHA_KILL_IN_SHADER + * + * Enable alpha kill inside the shader. This will be set automatically by the + * HAL if certain states match a criteria. + */ +#ifndef gcdALPHA_KILL_IN_SHADER +#define gcdALPHA_KILL_IN_SHADER 1 +#endif + +#ifndef gcdPRINT_SWAP_TIME +#define gcdPRINT_SWAP_TIME 0 +#endif + +/* + * gcdDVFS + * + * When non-zero, software will make use of dynamic voltage and + * frequency feature. + */ +#ifndef gcdDVFS +#define gcdDVFS 0 +#define gcdDVFS_ANAYLSE_WINDOW 4 +#define gcdDVFS_POLLING_TIME (gcdDVFS_ANAYLSE_WINDOW * 4) +#endif + +#ifndef gcdSYNC +#define gcdSYNC 1 +#endif + +#ifndef gcdSHADER_SRC_BY_MACHINECODE +#define gcdSHADER_SRC_BY_MACHINECODE 1 +#endif + +#ifndef gcdGLB27_SHADER_REPLACE_OPTIMIZATION +#define gcdGLB27_SHADER_REPLACE_OPTIMIZATION 1 +#endif + +/* + * gcdSUPPORT_SWAP_RECTANGLE + * + * Support swap with a specific rectangle. + * + * Set the rectangle with eglSetSwapRectangleVIV api. + * Android only. + */ +#ifndef gcdSUPPORT_SWAP_RECTANGLE +#define gcdSUPPORT_SWAP_RECTANGLE 0 +#endif + +/* + * gcdGPU_LINEAR_BUFFER_ENABLED + * + * Use linear buffer for GPU apps so HWC can do 2D composition. + * Android only. + */ +#ifndef gcdGPU_LINEAR_BUFFER_ENABLED +#define gcdGPU_LINEAR_BUFFER_ENABLED 1 +#endif + +/* + * gcdENABLE_RENDER_INTO_WINDOW + * + * Enable Render-Into-Window (ie, No-Resolve) feature on android. + * NOTE that even if enabled, it still depends on hardware feature and + * android application behavior. When hardware feature or application + * behavior can not support render into window mode, it will fail back + * to normal mode. + * When Render-Into-Window is finally used, window back buffer of android + * applications will be allocated matching render target tiling format. + * Otherwise buffer tiling is decided by the above option + * 'gcdGPU_LINEAR_BUFFER_ENABLED'. + * Android only for now. + */ +#ifndef gcdENABLE_RENDER_INTO_WINDOW +#define gcdENABLE_RENDER_INTO_WINDOW 1 +#endif + +/* + * gcdENABLE_RENDER_INTO_WINDOW_WITH_FC + * + * Enable Direct-rendering (ie, No-Resolve) with tile status. + * This is expremental and in development stage. + * This will dynamically check if color compression is available. + */ +#ifndef gcdENABLE_RENDER_INTO_WINDOW_WITH_FC +#define gcdENABLE_RENDER_INTO_WINDOW_WITH_FC 1 +#endif + +/* + * gcdENABLE_BLIT_BUFFER_PRESERVE + * + * Render-Into-Window (ie, No-Resolve) does not include preserved swap + * behavior. This feature can enable buffer preserve in No-Resolve mode. + * When enabled, previous buffer (may be part of ) will be resolve-blitted + * to current buffer. + */ +#ifndef gcdENABLE_BLIT_BUFFER_PRESERVE +#define gcdENABLE_BLIT_BUFFER_PRESERVE 1 +#endif + +/* + * gcdANDROID_NATIVE_FENCE_SYNC + * + * Enable android native fence sync. It is introduced since jellybean-4.2. + * Depends on linux kernel option: CONFIG_SYNC. + * + * 0: Disabled + * 1: Build framework for native fence sync feature, and EGL extension + * 2: Enable async swap buffers for client + * * Native fence sync for client 'queueBuffer' in EGL, which is + * 'acquireFenceFd' for layer in compositor side. + * 3. Enable async hwcomposer composition. + * * 'releaseFenceFd' for layer in compositor side, which is native + * fence sync when client 'dequeueBuffer' + * * Native fence sync for compositor 'queueBuffer' in EGL, which is + * 'acquireFenceFd' for framebuffer target for DC + */ +#ifndef gcdANDROID_NATIVE_FENCE_SYNC +#define gcdANDROID_NATIVE_FENCE_SYNC 0 +#endif + +#ifndef gcdLINUX_SYNC_FILE +#define gcdLINUX_SYNC_FILE 0 +#endif + +/* + * gcdANDROID_IMPLICIT_NATIVE_BUFFER_SYNC + * + * Enable implicit android native buffer sync. + * + * For non-HW_RENDER buffer, CPU (or other hardware) and GPU can access + * the buffer at the same time. This is to add implicit synchronization + * between CPU (or the hardware) and GPU. + * + * Eventually, please do not use implicit native buffer sync, but use + * "fence sync" or "android native fence sync" instead in libgui, which + * can be enabled in frameworks/native/libs/gui/Android.mk. This kind + * of synchronization should be done by app but not driver itself. + * + * Please disable this option when either "fence sync" or + * "android native fence sync" is enabled. + */ +#ifndef gcdANDROID_IMPLICIT_NATIVE_BUFFER_SYNC +#define gcdANDROID_IMPLICIT_NATIVE_BUFFER_SYNC 1 +#endif + +/* + * Implicit native buffer sync is not needed when ANDROID_native_fence_sync + * is available. + */ +#if gcdANDROID_NATIVE_FENCE_SYNC +#undef gcdANDROID_IMPLICIT_NATIVE_BUFFER_SYNC +#define gcdANDROID_IMPLICIT_NATIVE_BUFFER_SYNC 0 +#endif + +/* + * gcdUSE_WCLIP_PATCH + * + * Enable wclipping patch. + */ +#ifndef gcdUSE_WCLIP_PATCH +#define gcdUSE_WCLIP_PATCH 1 +#endif + +#ifndef gcdUSE_NPOT_PATCH +#define gcdUSE_NPOT_PATCH 1 +#endif + +/* + * gcdINTERNAL_COMMENT + * + * Wrap internal comment, content wrapped by it and the macor itself + * will be removed in release driver. + */ +#ifndef gcdINTERNAL_COMMENT +#define gcdINTERNAL_COMMENT 1 +#endif + +/* + * gcdRTT_DISABLE_FC + * + * Disable RTT FC support. For test only. + */ +#ifndef gcdRTT_DISABLE_FC +#define gcdRTT_DISABLE_FC 0 +#endif + +/* + * gcdFORCE_MIPMAP + * + * Force generate mipmap for texture. + */ +#ifndef gcdFORCE_MIPMAP +#define gcdFORCE_MIPMAP 0 +#endif + +/* + * gcdFORCE_BILINEAR + * + * Force bilinear for mipfilter. + */ +#ifndef gcdFORCE_BILINEAR +#define gcdFORCE_BILINEAR 1 +#endif + +/* + * gcdBINARY_TRACE + * + * When non-zero, binary trace will be generated. + * + * When gcdBINARY_TRACE_FILE_SIZE is non-zero, binary trace buffer will + * be written to a file which size is limited to + * gcdBINARY_TRACE_FILE_SIZE. + */ +#ifndef gcdBINARY_TRACE +#define gcdBINARY_TRACE 0 +#define gcdBINARY_TRACE_FILE_SIZE 0 +#endif + +#ifndef gcdMOVG +# define gcdMOVG 0 +# define gcdENABLE_TS_DOUBLE_BUFFER 1 +#else +#if gcdMOVG +# define gcdENABLE_TS_DOUBLE_BUFFER 0 +# else +# define gcdENABLE_TS_DOUBLE_BUFFER 1 +# endif +#endif + +/* gcdINTERRUPT_STATISTIC + * + * Monitor the event send to GPU and interrupt issued by GPU. + */ + +#ifndef gcdINTERRUPT_STATISTIC +#if defined(LINUX) || defined(__QNXNTO__) || defined(UNDER_CE) || defined(__VXWORKS__) +# define gcdINTERRUPT_STATISTIC 1 +# else +# define gcdINTERRUPT_STATISTIC 0 +# endif +#endif + +/* + * gcdFENCE_WAIT_LOOP_COUNT + * Wait fence, loop count. + */ +#ifndef gcdFENCE_WAIT_LOOP_COUNT +#define gcdFENCE_WAIT_LOOP_COUNT 10000 +#endif + +/* + * gcdPARTIAL_FAST_CLEAR + * When it's not zero, partial fast clear is enabled. + * Depends on gcdHAL_3D_DRAWBLIT, if gcdHAL_3D_DRAWBLIT is not enabled, + * only available when scissor box is completely aligned. + * Expremental, under test only. Not ready for production. + */ +#ifndef gcdPARTIAL_FAST_CLEAR +#define gcdPARTIAL_FAST_CLEAR 0 +#endif + +/* + * gcdREMOVE_SURF_ORIENTATION + * When it's not zero, we will remove surface orientation function. + * It wil become to a parameter of resolve function. + */ +#ifndef gcdREMOVE_SURF_ORIENTATION +#define gcdREMOVE_SURF_ORIENTATION 1 +#endif + +/* + * gcdTEST_DEC200 + * Test part for DEC200. Remove when release. + */ +#ifndef gcdTEST_DEC200 +#define gcdTEST_DEC200 0 +#endif + +/* + * gcdPATTERN_FAST_PATH + * For pattern match + */ +#ifndef gcdPATTERN_FAST_PATH +#define gcdPATTERN_FAST_PATH 1 +#endif + +/* + * gcdUSE_INPUT_DEVICE + * disable input devices usage under fb mode to support fb+vdk multi-process + */ +#ifndef gcdUSE_INPUT_DEVICE +#define gcdUSE_INPUT_DEVICE 1 +#endif + +/* + * gcdPERFORMANCE_ANALYSIS + * + * When set to 1, driver will pass information through loadstate + * to HW. This loadstate does not impact HW execution. + */ +#ifndef gcdPERFORMANCE_ANALYSIS +#define gcdPERFORMANCE_ANALYSIS 0 +#endif + +/* + * gcdFRAMEINFO_STATISTIC + * When enable, collect frame information. + */ +#ifndef gcdFRAMEINFO_STATISTIC + +#if (defined(DBG) && DBG) || defined(DEBUG) || \ + defined(_DEBUG) || gcdDUMP || gcdPERFORMANCE_ANALYSIS || \ + (defined(WIN32) && !defined(UNDER_CE)) || \ + gcdFPGA_BUILD +# define gcdFRAMEINFO_STATISTIC 1 +# else +# define gcdFRAMEINFO_STATISTIC 0 +# endif + +#endif + +/* + * gcdDEC_ENABLE_AHB + * Enable DEC300 compression AHB mode or not. + */ +#ifndef gcdDEC_ENABLE_AHB +#define gcdDEC_ENABLE_AHB 0 +#endif + +/* + * gcdENABLE_UNIFIED_CONSTANT + * Enable unified constant or not. + */ +#ifndef gcdENABLE_UNIFIED_CONSTANT +#define gcdENABLE_UNIFIED_CONSTANT 1 +#endif + +/* + * Core configurations. By default enable all cores. + */ +#ifndef gcdENABLE_3D +#define gcdENABLE_3D 1 +#endif + +#ifndef gcdENABLE_2D +#define gcdENABLE_2D 1 +#endif + +#ifndef gcdVG_NONE +#define gcdVG_NONE 0 +#endif + +#ifndef gcdENABLE_VG +#define gcdENABLE_VG (0 && !gcdVG_NONE) +#endif + +#ifndef gcdVG_ONLY +#define gcdVG_ONLY (!gcdENABLE_3D && !gcdENABLE_2D && gcdENABLE_VG) +#endif + +#if defined(WIN32) && !defined(UNDER_CE) && (gcdENABLE_VG == 1) + +#ifdef gcdUSE_VX +# undef gcdUSE_VX +# endif + +#ifdef COMMAND_PROCESSOR_VERSION +# undef COMMAND_PROCESSOR_VERSION +# endif + +#ifdef gcdENABLE_TRUST_APPLICATION +# undef gcdENABLE_TRUST_APPLICATION +# endif + +#ifdef gcdENABLE_3D +# undef gcdENABLE_3D +# endif + +#ifdef gcdENABLE_2D +# undef gcdENABLE_2D +# endif + +# define gcdENABLE_3D 0 +# define gcdENABLE_2D 0 +# define gcdUSE_VX 0 +# define COMMAND_PROCESSOR_VERSION 2 +# define gcdENABLE_TRUST_APPLICATION 0 + +#endif /* Only for GC355 Cmodel build. */ + +#ifndef gcdGC355_PROFILER +#define gcdGC355_PROFILER 0 +#endif + +/* + * This definition must be paired with VIVANTE_PROFILER_SYSTEM_MEMORY + */ +#ifndef gcdGC355_MEM_PRINT +# define gcdGC355_MEM_PRINT 0 +#else +#if (!((gcdENABLE_3D == 0) && (gcdENABLE_2D == 0) && (gcdENABLE_VG == 1))) +# undef gcdGC355_MEM_PRINT +# define gcdGC355_MEM_PRINT 0 +# endif +#endif + +/* + * gcdRECORD_COMMAND + */ +#ifndef gcdRECORD_COMMAND +#define gcdRECORD_COMMAND 0 +#endif + +/* + * gcdALLOC_CMD_FROM_RESERVE + * + * Provide a way by which location of command buffer can be + * specified. This is a DEBUG option to limit command buffer + * to some memory range. + */ +#ifndef gcdALLOC_CMD_FROM_RESERVE +#define gcdALLOC_CMD_FROM_RESERVE 0 +#endif + +/* + * gcdBOUNDARY_CHECK + * + * When enabled, add bounary before and after a range of + * GPU address. So overflow can be trapped by MMU exception. + * This is a debug option for new MMU and gcdUSE_MMU_EXCEPTION + * is enabled. + */ +#ifndef gcdBOUNDARY_CHECK +#define gcdBOUNDARY_CHECK 0 +#endif + +/* + * gcdRENDER_QUALITY_CHECK + * + * When enabled, we disable performance opt patch + * to get know rendering quality comparing with other vendor. + */ +#ifndef gcdRENDER_QUALITY_CHECK +#define gcdRENDER_QUALITY_CHECK 0 +#endif + +/* + * gcdSYSTRACE + * + * When enabled, we embed systrace in function header/footer + * to gather time information on linux platforms include android. + * '1' to trace API (EGL, ES11, ES2x, ES3x, etc) + * '2' to trace HAL (except compiler) + * '4' to trace HAL compiler + * See gc_hal_user_debug.c for more detailed trace zones. + */ +#ifndef gcdSYSTRACE +#define gcdSYSTRACE 0 +#endif + +#ifndef gcdENABLE_APPCTXT_BLITDRAW +#define gcdENABLE_APPCTXT_BLITDRAW 0 +#endif + +/* + * When enabled, use 1K mode for MMU version 2.0. otherwise use 4K mode. + */ +#ifndef gcdENABLE_MMU_1KMODE +#define gcdENABLE_MMU_1KMODE 1 +#endif + +/* + * gcdENABLE_TRUST_APPLICATION + * + * When enabled, trust application is used to handle 'security' registers. + * + * 1) If HW doesn't have robust and security feature, this option is meaningless. + * 2) If HW have robust and security and this option is not enable, + * security registers are handled by non secure driver. It is for + * platform doesn't want/need to use trust zone. + */ +#ifndef gcdENABLE_TRUST_APPLICATION +# define gcdENABLE_TRUST_APPLICATION 0 +#endif + +/* Disable gcdENABLE_TRUST_APPLICATION when oboslete gcdSECURITY enabled. */ +#if gcdSECURITY +# undef gcdENABLE_TRUST_APPLICATION +# define gcdENABLE_TRUST_APPLICATION 0 +#endif + +#ifndef gcdMMU_SECURE_AREA_SIZE +#if defined(gcdENABLE_MMU_1KMODE) +# define gcdMMU_SECURE_AREA_SIZE 32 +# else +# define gcdMMU_SECURE_AREA_SIZE 128 +# endif +#endif + +#ifndef gcdUSE_MMU_EXCEPTION +#define gcdUSE_MMU_EXCEPTION 1 +#endif + +#ifndef gcdVX_OPTIMIZER +#define gcdVX_OPTIMIZER 0 +#endif + +#ifndef gcdALLOC_ON_FAULT +#define gcdALLOC_ON_FAULT 0 +#endif + +/* + * gcdDISABLE_GPU_VIRTUAL_ADDRESS + * + * When enabled, disable MMU and all virtual allocated from MMU. + */ +#ifndef gcdDISABLE_GPU_VIRTUAL_ADDRESS +#define gcdDISABLE_GPU_VIRTUAL_ADDRESS 0 +#endif + +/* + * gcd2D_COMPRESSION_DEC400_ALIGN_MODE + * + * Only for DEC400 compression. + * Set 0 as 16bytes aligned. 1 as 32bytes aligned. 2 as 64bytes aligned. + * Default is 0 which means 32bytes aligned. + */ +#ifndef gcd2D_COMPRESSION_DEC400_ALIGN_MODE +#define gcd2D_COMPRESSION_DEC400_ALIGN_MODE 1 +#endif + +/* + * gcdENABLE_KERNEL_FENCE + * When enabled, use kernel fence to do resource tracking. + */ +#ifndef gcdENABLE_KENREL_FENCE +#define gcdENABLE_KERNEL_FENCE 0 +#endif + +/* + * gcdUSE_VXC_BINARY + * When enabled, will use prebuilt shader binary in VX driver. + */ +#ifndef gcdUSE_VXC_BINARY +#define gcdUSE_VXC_BINARY 0 +#endif + +/* + * gcdFEATURE_SANITYCHECK + * When enabled, will do hardware feature sanity check, each + * used hardware feature should be printed out. + */ +#ifndef gcdFEATURE_SANITYCHECK +#define gcdFEATURE_SANITYCHECK 0 +#endif + +/* + * VIVANTE_PROFILER_SYSTEM_MEMORY + * + * This define enables the profiling data for system memory allocated by driver + */ +#ifndef VIVANTE_PROFILER_SYSTEM_MEMORY +#define VIVANTE_PROFILER_SYSTEM_MEMORY 1 +#define VP_MALLOC_OFFSET (16) + +#endif + +#define gcdHAL_TEST 1 + +/* + * gcdUSE_ZWP_SYNCHRONIZATION + * + * When enabled, will use the zwp_linux_surface_synchronization path, + * otherwise switch to old wayland path. + */ +#define gcdUSE_ZWP_SYNCHRONIZATION 1 + +/* + * gcdUSE_SINGLE_CONTEXT + * When enabled, will enable single context. + */ +#ifndef gcdUSE_SINGLE_CONTEXT +#define gcdUSE_SINGLE_CONTEXT 0 +#endif + +/* + * gcdKERNEL_QUERY_PERFORMANCE_COUNTER_V8 + * When enabled, will enable query new performance counter of V8.0 in kernel + * space. + */ +#ifndef gcdKERNEL_QUERY_PERFORMANCE_COUNTER_V8 +#define gcdKERNEL_QUERY_PERFORMANCE_COUNTER_V8 0 +#endif + +/* + * gcdIGNORE_DRIVER_VERSIONS_MISMATCH + * When enabled, driver will ignore user and kernel driver version mismatch. + */ +#ifndef gcdIGNORE_DRIVER_VERSIONS_MISMATCH +#define gcdIGNORE_DRIVER_VERSIONS_MISMATCH 0 +#endif + +/* + * gcdEXTERNAL_SRAM_USAGE + * '0': User driver queries the whole external SRAM and manages the memory. + * Or user driver dynamically allocate the external SRAM with pool type + * gcvPOOL_EXTERNAL_SRAM. + * + * '1': External SRAM only can be used for the initial command, + * but the external SRAM base and size must be set by customer. + * And it only can be used if pool type is gcvPOOL_EXTERNAL_SRAM when + * allocating video memory. + * + * '2': To be extended. + */ +#ifndef gcdEXTERNAL_SRAM_USAGE +#define gcdEXTERNAL_SRAM_USAGE 0 +#endif + +/* + * gcdENABLE_SW_PREEMPTION + * Enable software preemption if set to 1, disable by default. + * Only support Linux OS currently. + */ +#ifndef gcdENABLE_SW_PREEMPTION +#define gcdENABLE_SW_PREEMPTION 0 +#endif + +/* + * gcdSUPPORT_DEVICE_TREE_SOURCE + * To suppor device tree feature if set to 1, disable by default. + * Only works on linux OS. + */ +#ifndef gcdSUPPORT_DEVICE_TREE_SOURCE +#define gcdSUPPORT_DEVICE_TREE_SOURCE 0 +#endif + +/* + * gcdENABLE_PER_DEVICE_PM + * Enable per device power management if set to 2, all the hardware + * cores will be one device. Enable per user device power management + * if set to 1, the brother cores of a device depends on user driver. + * Disable per device power management if set to 0. + * Only support Linux OS currently. + */ +#ifndef gcdENABLE_PER_DEVICE_PM +#define gcdENABLE_PER_DEVICE_PM 0 +#endif + +/* + * gcdUSE_CAPBUF + */ +#ifndef gcdUSE_CAPBUF +#define gcdUSE_CAPBUF 1 +#endif + +/* + * gcdENABLE_MP_SWITCH + * Enable multi-processor mode dynamic switch, the processor count is + * determined by specific conditions. + * Only support Linux OS currently. + */ +#ifndef gcdENABLE_MP_SWITCH +#define gcdENABLE_MP_SWITCH 0 +#endif + +/* + * gcdANON_FILE_FOR_ALLOCATOR + * Enable this macro can replace the /dev/zero by anon_inode: + * [galcore] in /proc//maps. + * Without the macro, run 'cat /proc//maps' will print "/dev/zero". + */ +#ifndef gcdANON_FILE_FOR_ALLOCATOR +#define gcdANON_FILE_FOR_ALLOCATOR 0 +#endif + +/* + * gcdWAIT_LINK_FE_MODE + * 0 means always end at the end of commit user command buffer + * and reset FE for each commit. + * 1 means default wait-link mode. + */ +#ifndef gcdWAIT_LINK_FE_MODE +#define gcdWAIT_LINK_FE_MODE 1 +#endif + +/* + * gcdENABLE_RECOVERY_ALL_CORES + * When enabled, will recovery all cores when the gpu hang. + */ +#ifndef gcdENABLE_RECOVERY_ALL_CORES +#define gcdENABLE_RECOVERY_ALL_CORES 1 +#endif + +/* + * gcdENABLE_VIDEO_MEMORY_MIRROR + * Enable pcie local memory mirror and dma sync. + * Only support Linux OS currently. + */ +#ifndef gcdENABLE_VIDEO_MEMORY_MIRROR +#define gcdENABLE_VIDEO_MEMORY_MIRROR 0 +#endif +/* + * gcdINIT_VIDEO_MEMORY_MIRROR + * Init mirror and pcie local memory to characteristic value for debugging. + * Depends on gcdENABLE_VIDEO_MEMORY_MIRROR. + * Only support Linux OS currently. + */ +#ifndef gcdINIT_VIDEO_MEMORY_MIRROR +#define gcdINIT_VIDEO_MEMORY_MIRROR 0 +#endif + +/* + * gcdSTATIC_VIDEO_MEMORY_MIRROR + * Dynamic allocate/free the mirror buffer if set to 0. + * Depends on gcdENABLE_VIDEO_MEMORY_MIRROR. + * Only support Linux OS currently. + */ +#ifndef gcdSTATIC_VIDEO_MEMORY_MIRROR +#define gcdSTATIC_VIDEO_MEMORY_MIRROR 1 +#endif + +/* + * gcdENABLE_VIDEO_MEMORY_TRACE + * Dynamic trace video memory, if set to 1. + * Depends on kernel support. + * Only support Linux OS currently. + */ +#ifndef gcdENABLE_VIDEO_MEMORY_TRACE +#define gcdENABLE_VIDEO_MEMORY_TRACE 0 +#endif + +/* + * gcdUSER_COMMAND_IN_EXCLUSIVE + * User command buffer can be able in exclusive local memory. + */ +#ifndef gcdUSER_COMMAND_IN_EXCLUSIVE +#define gcdUSER_COMMAND_IN_EXCLUSIVE 0 +#endif + +/* + * gcdVALID_COMMAND_BUFFER + * If enabled, will check the validity of the command buffer before commit. + */ +#ifndef gcdVALID_COMMAND_BUFFER +#define gcdVALID_COMMAND_BUFFER 0 +#endif + +/* + * gcdENABLE_MULTI_DEVICE_MANAGEMENT + * + * Manage cores on multiple hardware devices. + * + * Hardware device is composed by a set of cores which are the + * same type, such as 3D, 3D2D, VIP etc. and these cores have + * direct connection to work in combined mode. + */ +#ifndef gcdENABLE_MULTI_DEVICE_MANAGEMENT +#define gcdENABLE_MULTI_DEVICE_MANAGEMENT 0 +#endif + +/* + * gcdENABLE_DEVFREQ + * + * Enable linux generic dynamic voltage and frequency scaling. + * Now only support frequency scaling. + */ +#ifndef gcdENABLE_DEVFREQ +#define gcdENABLE_DEVFREQ 0 +#endif + +/* + * gcdSHARED_COMMAND_BUFFER + * + * Share one user command buffer and kernel command buffer for multi-core. + */ +#ifndef gcdSHARED_COMMAND_BUFFER +#define gcdSHARED_COMMAND_BUFFER 0 +#endif + +/* + * gcd_2D_PRINT_TIME_STAMP + * + * Print time stamp by 2D driver + */ +#ifndef gcd_2D_PRINT_TIME_STAMP +#define gcd_2D_PRINT_TIME_STAMP 0 +#endif + +/* + * gcdENABLE_AHBXTTD + * + * Enable AHBXTTD register feature. Now only support Xiaomi. + */ +#ifndef gcdENABLE_AHBXTTD +#define gcdENABLE_AHBXTTD 0 +#endif + +/* + gcdENABLE_MEMORY_OPTIMIZATION + If enabled, will move some object in system memory. +*/ +#ifndef gcdENABLE_MEMORY_OPTIMIZATION +#define gcdENABLE_MEMORY_OPTIMIZATION 0 +#endif + +/* + * gcdENABLE_NONCACHE_COMMANDBUF + * + * If enable, map the command buffer without writebuffer and cache + */ +#ifndef gcdENABLE_NONCACHE_COMMANDBUF +#define gcdENABLE_NONCACHE_COMMANDBUF 0 +#endif + +/* + * gcdENABLE_CLEAR_FENCE + * + * If enabled, will record fence value in kernel and + * recovery all fence when the kernel do recovery operation. + */ +#ifndef gcdENABLE_CLEAR_FENCE +#if defined(LINUX) && !defined(EMULATOR) +# define gcdENABLE_CLEAR_FENCE 1 +# else +# define gcdENABLE_CLEAR_FENCE 0 +# endif +#endif + +#endif /* __gc_hal_options_h_ */ + + diff --git a/unified-tina/inc/HAL/gc_hal_profiler.h b/unified-tina/inc/HAL/gc_hal_profiler.h new file mode 100644 index 0000000..92aa66f --- /dev/null +++ b/unified-tina/inc/HAL/gc_hal_profiler.h @@ -0,0 +1,1176 @@ +/**************************************************************************** +* +* Copyright (c) 2005 - 2023 by Vivante Corp. All rights reserved. +* +* The material in this file is confidential and contains trade secrets +* of Vivante Corporation. This is proprietary information owned by +* Vivante Corporation. No part of this work may be disclosed, +* reproduced, copied, transmitted, or used in any way for any purpose, +* without the express written permission of Vivante Corporation. +* +*****************************************************************************/ + + +#ifndef __gc_hal_profiler_h_ +#define __gc_hal_profiler_h_ + +#include "shared/gc_hal_profiler_shared.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define GLVERTEX_OBJECT 10 +#define GLVERTEX_OBJECT_BYTES 11 + +#define GLINDEX_OBJECT 20 +#define GLINDEX_OBJECT_BYTES 21 + +#define GLTEXTURE_OBJECT 30 +#define GLTEXTURE_OBJECT_BYTES 31 + +#define GLBUFOBJ_OBJECT 40 +#define GLBUFOBJ_OBJECT_BYTES 41 + +#define ES11_CALLS 151 +#define ES11_DRAWCALLS (ES11_CALLS + 1) +#define ES11_STATECHANGECALLS (ES11_DRAWCALLS + 1) +#define ES11_POINTCOUNT (ES11_STATECHANGECALLS + 1) +#define ES11_LINECOUNT (ES11_POINTCOUNT + 1) +#define ES11_TRIANGLECOUNT (ES11_LINECOUNT + 1) + +#define ES30_CALLS 159 +#define ES30_DRAWCALLS (ES30_CALLS + 1) +#define ES30_STATECHANGECALLS (ES30_DRAWCALLS + 1) +#define ES30_POINTCOUNT (ES30_STATECHANGECALLS + 1) +#define ES30_LINECOUNT (ES30_POINTCOUNT + 1) +#define ES30_TRIANGLECOUNT (ES30_LINECOUNT + 1) + +#define VG11_CALLS 88 +#define VG11_DRAWCALLS (VG11_CALLS + 1) +#define VG11_STATECHANGECALLS (VG11_DRAWCALLS + 1) +#define VG11_FILLCOUNT (VG11_STATECHANGECALLS + 1) +#define VG11_STROKECOUNT (VG11_FILLCOUNT + 1) +/* End of Driver API ID Definitions. */ + +/* HAL & MISC IDs. */ +#define HAL_VERTBUFNEWBYTEALLOC 1 +#define HAL_VERTBUFTOTALBYTEALLOC (HAL_VERTBUFNEWBYTEALLOC + 1) +#define HAL_VERTBUFNEWOBJALLOC (HAL_VERTBUFTOTALBYTEALLOC + 1) +#define HAL_VERTBUFTOTALOBJALLOC (HAL_VERTBUFNEWOBJALLOC + 1) +#define HAL_INDBUFNEWBYTEALLOC (HAL_VERTBUFTOTALOBJALLOC + 1) +#define HAL_INDBUFTOTALBYTEALLOC (HAL_INDBUFNEWBYTEALLOC + 1) +#define HAL_INDBUFNEWOBJALLOC (HAL_INDBUFTOTALBYTEALLOC + 1) +#define HAL_INDBUFTOTALOBJALLOC (HAL_INDBUFNEWOBJALLOC + 1) +#define HAL_TEXBUFNEWBYTEALLOC (HAL_INDBUFTOTALOBJALLOC + 1) +#define HAL_TEXBUFTOTALBYTEALLOC (HAL_TEXBUFNEWBYTEALLOC + 1) +#define HAL_TEXBUFNEWOBJALLOC (HAL_TEXBUFTOTALBYTEALLOC + 1) +#define HAL_TEXBUFTOTALOBJALLOC (HAL_TEXBUFNEWOBJALLOC + 1) + +#define GPU_CYCLES 1 +#define GPU_READ64BYTE (GPU_CYCLES + 1) +#define GPU_WRITE64BYTE (GPU_READ64BYTE + 1) +#define GPU_TOTALCYCLES (GPU_WRITE64BYTE + 1) +#define GPU_IDLECYCLES (GPU_TOTALCYCLES + 1) + +#define VS_INSTCOUNT 1 +#define VS_BRANCHINSTCOUNT (VS_INSTCOUNT + 1) +#define VS_TEXLDINSTCOUNT (VS_BRANCHINSTCOUNT + 1) +#define VS_RENDEREDVERTCOUNT (VS_TEXLDINSTCOUNT + 1) +#define VS_SOURCE (VS_RENDEREDVERTCOUNT + 1) +#define VS_NONIDLESTARVECOUNT (VS_SOURCE + 1) +#define VS_STARVELCOUNT (VS_NONIDLESTARVECOUNT + 1) +#define VS_STALLCOUNT (VS_STARVELCOUNT + 1) +#define VS_PROCESSCOUNT (VS_STALLCOUNT + 1) + +#define PS_INSTCOUNT 1 +#define PS_BRANCHINSTCOUNT (PS_INSTCOUNT + 1) +#define PS_TEXLDINSTCOUNT (PS_BRANCHINSTCOUNT + 1) +#define PS_RENDEREDPIXCOUNT (PS_TEXLDINSTCOUNT + 1) +#define PS_SOURCE (PS_RENDEREDPIXCOUNT + 1) +#define PS_NONIDLESTARVECOUNT (PS_SOURCE + 1) +#define PS_STARVELCOUNT (PS_NONIDLESTARVECOUNT + 1) +#define PS_STALLCOUNT (PS_STARVELCOUNT + 1) +#define PS_PROCESSCOUNT (PS_STALLCOUNT + 1) +#define PS_SHADERCYCLECOUNT (PS_PROCESSCOUNT + 1) + +#define PA_INVERTCOUNT 1 +#define PA_INPRIMCOUNT (PA_INVERTCOUNT + 1) +#define PA_OUTPRIMCOUNT (PA_INPRIMCOUNT + 1) +#define PA_DEPTHCLIPCOUNT (PA_OUTPRIMCOUNT + 1) +#define PA_TRIVIALREJCOUNT (PA_DEPTHCLIPCOUNT + 1) +#define PA_CULLCOUNT (PA_TRIVIALREJCOUNT + 1) +#define PA_NONIDLESTARVECOUNT (PA_CULLCOUNT + 1) +#define PA_STARVELCOUNT (PA_NONIDLESTARVECOUNT + 1) +#define PA_STALLCOUNT (PA_STARVELCOUNT + 1) +#define PA_PROCESSCOUNT (PA_STALLCOUNT + 1) + +#define SE_TRIANGLECOUNT 1 +#define SE_LINECOUNT (SE_TRIANGLECOUNT + 1) +#define SE_STARVECOUNT (SE_LINECOUNT + 1) +#define SE_STALLCOUNT (SE_STARVECOUNT + 1) +#define SE_RECEIVETRIANGLECOUNT (SE_STALLCOUNT + 1) +#define SE_SENDTRIANGLECOUNT (SE_RECEIVETRIANGLECOUNT + 1) +#define SE_RECEIVELINESCOUNT (SE_SENDTRIANGLECOUNT + 1) +#define SE_SENDLINESCOUNT (SE_RECEIVELINESCOUNT + 1) +#define SE_NONIDLESTARVECOUNT (SE_SENDLINESCOUNT + 1) +#define SE_PROCESSCOUNT (SE_NONIDLESTARVECOUNT + 1) + +#define RA_VALIDPIXCOUNT 1 +#define RA_TOTALQUADCOUNT (RA_VALIDPIXCOUNT + 1) +#define RA_VALIDQUADCOUNTEZ (RA_TOTALQUADCOUNT + 1) +#define RA_TOTALPRIMCOUNT (RA_VALIDQUADCOUNTEZ + 1) +#define RA_PIPECACHEMISSCOUNT (RA_TOTALPRIMCOUNT + 1) +#define RA_PREFCACHEMISSCOUNT (RA_PIPECACHEMISSCOUNT + 1) +#define RA_EEZCULLCOUNT (RA_PREFCACHEMISSCOUNT + 1) +#define RA_NONIDLESTARVECOUNT (RA_EEZCULLCOUNT + 1) +#define RA_STARVELCOUNT (RA_NONIDLESTARVECOUNT + 1) +#define RA_STALLCOUNT (RA_STARVELCOUNT + 1) +#define RA_PROCESSCOUNT (RA_STALLCOUNT + 1) + +#define TX_TOTBILINEARREQ 1 +#define TX_TOTTRILINEARREQ (TX_TOTBILINEARREQ + 1) +#define TX_TOTDISCARDTEXREQ (TX_TOTTRILINEARREQ + 1) +#define TX_TOTTEXREQ (TX_TOTDISCARDTEXREQ + 1) +#define TX_MEMREADCOUNT (TX_TOTTEXREQ + 1) +#define TX_MEMREADIN8BCOUNT (TX_MEMREADCOUNT + 1) +#define TX_CACHEMISSCOUNT (TX_MEMREADIN8BCOUNT + 1) +#define TX_CACHEHITTEXELCOUNT (TX_CACHEMISSCOUNT + 1) +#define TX_CACHEMISSTEXELCOUNT (TX_CACHEHITTEXELCOUNT + 1) +#define TX_NONIDLESTARVECOUNT (TX_CACHEMISSTEXELCOUNT + 1) +#define TX_STARVELCOUNT (TX_NONIDLESTARVECOUNT + 1) +#define TX_STALLCOUNT (TX_STARVELCOUNT + 1) +#define TX_PROCESSCOUNT (TX_STALLCOUNT + 1) + +#define PE_KILLEDBYCOLOR 1 +#define PE_KILLEDBYDEPTH (PE_KILLEDBYCOLOR + 1) +#define PE_DRAWNBYCOLOR (PE_KILLEDBYDEPTH + 1) +#define PE_DRAWNBYDEPTH (PE_DRAWNBYCOLOR + 1) + +#define MC_READREQ8BPIPE 1 +#define MC_READREQ8BIP (MC_READREQ8BPIPE + 1) +#define MC_WRITEREQ8BPIPE (MC_READREQ8BIP + 1) +#define MC_AXIMINLATENCY (MC_WRITEREQ8BPIPE + 1) +#define MC_AXIMAXLATENCY (MC_AXIMINLATENCY + 1) +#define MC_AXITOTALLATENCY (MC_AXIMAXLATENCY + 1) +#define MC_AXISAMPLECOUNT (MC_AXITOTALLATENCY + 1) + +#define AXI_READREQSTALLED 1 +#define AXI_WRITEREQSTALLED (AXI_READREQSTALLED + 1) +#define AXI_WRITEDATASTALLED (AXI_WRITEREQSTALLED + 1) + +#define FE_DRAWCOUNT 1 +#define FE_OUTVERTEXCOUNT (FE_DRAWCOUNT + 1) +#define FE_STALLCOUNT (FE_OUTVERTEXCOUNT + 1) +#define FE_STARVECOUNT (FE_STALLCOUNT + 1) + +#define PVS_INSTRCOUNT 1 +#define PVS_ALUINSTRCOUNT (PVS_INSTRCOUNT + 1) +#define PVS_TEXINSTRCOUNT (PVS_ALUINSTRCOUNT + 1) +#define PVS_ATTRIBCOUNT (PVS_TEXINSTRCOUNT + 1) +#define PVS_UNIFORMCOUNT (PVS_ATTRIBCOUNT + 1) +#define PVS_FUNCTIONCOUNT (PVS_UNIFORMCOUNT + 1) +#define PVS_SOURCE (PVS_FUNCTIONCOUNT + 1) + +#define PPS_INSTRCOUNT 1 +#define PPS_ALUINSTRCOUNT (PPS_INSTRCOUNT + 1) +#define PPS_TEXINSTRCOUNT (PPS_ALUINSTRCOUNT + 1) +#define PPS_ATTRIBCOUNT (PPS_TEXINSTRCOUNT + 1) +#define PPS_UNIFORMCOUNT (PPS_ATTRIBCOUNT + 1) +#define PPS_FUNCTIONCOUNT (PPS_UNIFORMCOUNT + 1) +#define PPS_SOURCE (PPS_FUNCTIONCOUNT + 1) +/* End of MISC Counter IDs. */ + +#define VPG(x) (gcoPROFILER_getVPGConst(x)) + +enum gceVPG { + VPHEADER, + INFO, + FRAME, + VPTIME, + ES11, + VG11, + HW, + MULTI_GPU, + PROG, + ES11DRAW, + MEM, + PVS, + PPS, + ES11_TIME, + ES30, + ES30_DRAW, + ES30_TIME, + FINISH, + END, + COMPUTE30, + BLT, + CLUSTER, +}; + +/* Category Constants. */ +#define VPG_HAL 0x080000 +#define VPG_GPU 0x0a0000 +#define VPG_VS 0x0b0000 +#define VPG_PS 0x0c0000 +#define VPG_PA 0x0d0000 +#define VPG_SETUP 0x0e0000 +#define VPG_RA 0x0f0000 +#define VPG_TX 0x100000 +#define VPG_PE 0x110000 +#define VPG_MC 0x120000 +#define VPG_AXI 0x130000 +#define VPG_VG11_TIME 0x220000 +#define VPG_FE 0x230000 +#define VPNG_FE 0x250000 +#define VPNG_VS 0x260000 +#define VPNG_PS 0x270000 +#define VPNG_PA 0x280000 +#define VPNG_SETUP 0x290000 +#define VPNG_RA 0x2a0000 +#define VPNG_TX 0x2b0000 +#define VPNG_PE 0x2c0000 +#define VPNG_MCC 0x2d0000 +#define VPNG_MCZ 0x2e0000 +#define VPNG_HI 0x2f0000 +#define VPNG_L2 0x300000 +#define VPNG_NN 0x310000 +#define VPNG_TP 0x320000 +#define VPG_CLUSTER 0x68000000 + +/* Info. */ +#define VPC_INFOCOMPANY (VPG(INFO) + 1) +#define VPC_INFOVERSION (VPC_INFOCOMPANY + 1) +#define VPC_INFORENDERER (VPC_INFOVERSION + 1) +#define VPC_INFOREVISION (VPC_INFORENDERER + 1) +#define VPC_INFODRIVER (VPC_INFOREVISION + 1) +#define VPC_INFODRIVERMODE (VPC_INFODRIVER + 1) +#define VPC_INFOSCREENSIZE (VPC_INFODRIVERMODE + 1) +#define VPC_INFOASICMODE (VPC_INFOSCREENSIZE + 1) + +/* Counter Constants. */ +#define VPC_ELAPSETIME (VPG(VPTIME) + 1) +#define VPC_CPUTIME (VPC_ELAPSETIME + 1) + +#define VPC_MEMMAXRES (VPG(MEM) + 1) +#define VPC_MEMSHARED (VPC_MEMMAXRES + 1) +#define VPC_MEMUNSHAREDDATA (VPC_MEMSHARED + 1) +#define VPC_MEMUNSHAREDSTACK (VPC_MEMUNSHAREDDATA + 1) + +/* OpenGL ES11 Statics Counter IDs. */ +#define VPC_ES11CALLS (VPG(ES11) + ES11_CALLS) +#define VPC_ES11DRAWCALLS (VPG(ES11) + ES11_DRAWCALLS) +#define VPC_ES11STATECHANGECALLS (VPG(ES11) + ES11_STATECHANGECALLS) +#define VPC_ES11POINTCOUNT (VPG(ES11) + ES11_POINTCOUNT) +#define VPC_ES11LINECOUNT (VPG(ES11) + ES11_LINECOUNT) +#define VPC_ES11TRIANGLECOUNT (VPG(ES11) + ES11_TRIANGLECOUNT) + +/* OpenGL ES30 Statistics Counter IDs. */ +#define VPC_ES30CALLS (VPG(ES30) + ES30_CALLS) +#define VPC_ES30DRAWCALLS (VPG(ES30) + ES30_DRAWCALLS) +#define VPC_ES30STATECHANGECALLS (VPG(ES30) + ES30_STATECHANGECALLS) +#define VPC_ES30POINTCOUNT (VPG(ES30) + ES30_POINTCOUNT) +#define VPC_ES30LINECOUNT (VPG(ES30) + ES30_LINECOUNT) +#define VPC_ES30TRIANGLECOUNT (VPG(ES30) + ES30_TRIANGLECOUNT) + +/* OpenVG Statistics Counter IDs. */ +#define VPC_VG11CALLS (VPG(VG11) + VG11_CALLS) +#define VPC_VG11DRAWCALLS (VPG(VG11) + VG11_DRAWCALLS) +#define VPC_VG11STATECHANGECALLS (VPG(VG11) + VG11_STATECHANGECALLS) +#define VPC_VG11FILLCOUNT (VPG(VG11) + VG11_FILLCOUNT) +#define VPC_VG11STROKECOUNT (VPG(VG11) + VG11_STROKECOUNT) + +/* HAL Counters. */ +#define VPC_HALVERTBUFNEWBYTEALLOC (VPG_HAL + HAL_VERTBUFNEWBYTEALLOC) +#define VPC_HALVERTBUFTOTALBYTEALLOC (VPG_HAL + HAL_VERTBUFTOTALBYTEALLOC) +#define VPC_HALVERTBUFNEWOBJALLOC (VPG_HAL + HAL_VERTBUFNEWOBJALLOC) +#define VPC_HALVERTBUFTOTALOBJALLOC (VPG_HAL + HAL_VERTBUFTOTALOBJALLOC) +#define VPC_HALINDBUFNEWBYTEALLOC (VPG_HAL + HAL_INDBUFNEWBYTEALLOC) +#define VPC_HALINDBUFTOTALBYTEALLOC (VPG_HAL + HAL_INDBUFTOTALBYTEALLOC) +#define VPC_HALINDBUFNEWOBJALLOC (VPG_HAL + HAL_INDBUFNEWOBJALLOC) +#define VPC_HALINDBUFTOTALOBJALLOC (VPG_HAL + HAL_INDBUFTOTALOBJALLOC) +#define VPC_HALTEXBUFNEWBYTEALLOC (VPG_HAL + HAL_TEXBUFNEWBYTEALLOC) +#define VPC_HALTEXBUFTOTALBYTEALLOC (VPG_HAL + HAL_TEXBUFTOTALBYTEALLOC) +#define VPC_HALTEXBUFNEWOBJALLOC (VPG_HAL + HAL_TEXBUFNEWOBJALLOC) +#define VPC_HALTEXBUFTOTALOBJALLOC (VPG_HAL + HAL_TEXBUFTOTALOBJALLOC) + +/* HW: GPU Counters. */ +#define VPC_GPUCYCLES (VPG_GPU + GPU_CYCLES) +#define VPC_GPUREAD64BYTE (VPG_GPU + GPU_READ64BYTE) +#define VPC_GPUWRITE64BYTE (VPG_GPU + GPU_WRITE64BYTE) +#define VPC_GPUTOTALCYCLES (VPG_GPU + GPU_TOTALCYCLES) +#define VPC_GPUIDLECYCLES (VPG_GPU + GPU_IDLECYCLES) + +/* HW: Shader Counters. */ +#define VPC_VSINSTCOUNT (VPG_VS + VS_INSTCOUNT) +#define VPC_VSBRANCHINSTCOUNT (VPG_VS + VS_BRANCHINSTCOUNT) +#define VPC_VSTEXLDINSTCOUNT (VPG_VS + VS_TEXLDINSTCOUNT) +#define VPC_VSRENDEREDVERTCOUNT (VPG_VS + VS_RENDEREDVERTCOUNT) +#define VPC_VSNONIDLESTARVECOUNT (VPG_VS + VS_NONIDLESTARVECOUNT) +#define VPC_VSSTARVELCOUNT (VPG_VS + VS_STARVELCOUNT) +#define VPC_VSSTALLCOUNT (VPG_VS + VS_STALLCOUNT) +#define VPC_VSPROCESSCOUNT (VPG_VS + VS_PROCESSCOUNT) +/* HW: PS Count. */ +#define VPC_PSINSTCOUNT (VPG_PS + PS_INSTCOUNT) +#define VPC_PSBRANCHINSTCOUNT (VPG_PS + PS_BRANCHINSTCOUNT) +#define VPC_PSTEXLDINSTCOUNT (VPG_PS + PS_TEXLDINSTCOUNT) +#define VPC_PSRENDEREDPIXCOUNT (VPG_PS + PS_RENDEREDPIXCOUNT) +#define VPC_PSNONIDLESTARVECOUNT (VPG_PS + PS_NONIDLESTARVECOUNT) +#define VPC_PSSTARVELCOUNT (VPG_PS + PS_STARVELCOUNT) +#define VPC_PSSTALLCOUNT (VPG_PS + PS_STALLCOUNT) +#define VPC_PSPROCESSCOUNT (VPG_PS + PS_PROCESSCOUNT) +#define VPC_PSSHADERCYCLECOUNT (VPG_PS + PS_SHADERCYCLECOUNT) + +/* HW: PA Counters. */ +#define VPC_PAINVERTCOUNT (VPG_PA + PA_INVERTCOUNT) +#define VPC_PAINPRIMCOUNT (VPG_PA + PA_INPRIMCOUNT) +#define VPC_PAOUTPRIMCOUNT (VPG_PA + PA_OUTPRIMCOUNT) +#define VPC_PADEPTHCLIPCOUNT (VPG_PA + PA_DEPTHCLIPCOUNT) +#define VPC_PATRIVIALREJCOUNT (VPG_PA + PA_TRIVIALREJCOUNT) +#define VPC_PACULLCOUNT (VPG_PA + PA_CULLCOUNT) +#define VPC_PANONIDLESTARVECOUNT (VPG_PA + PA_NONIDLESTARVECOUNT) +#define VPC_PASTARVELCOUNT (VPG_PA + PA_STARVELCOUNT) +#define VPC_PASTALLCOUNT (VPG_PA + PA_STALLCOUNT) +#define VPC_PAPROCESSCOUNT (VPG_PA + PA_PROCESSCOUNT) + +/* HW: Setup Counters. */ +#define VPC_SETRIANGLECOUNT (VPG_SETUP + SE_TRIANGLECOUNT) +#define VPC_SELINECOUNT (VPG_SETUP + SE_LINECOUNT) +#define VPC_SESTARVECOUNT (VPG_SETUP + SE_STARVECOUNT) +#define VPC_SESTALLCOUNT (VPG_SETUP + SE_STALLCOUNT) +#define VPC_SERECEIVETRIANGLECOUNT (VPG_SETUP + SE_RECEIVETRIANGLECOUNT) +#define VPC_SESENDTRIANGLECOUNT (VPG_SETUP + SE_SENDTRIANGLECOUNT) +#define VPC_SERECEIVELINESCOUNT (VPG_SETUP + SE_RECEIVELINESCOUNT) +#define VPC_SESENDLINESCOUNT (VPG_SETUP + SE_SENDLINESCOUNT) +#define VPC_SENONIDLESTARVECOUNT (VPG_SETUP + SE_NONIDLESTARVECOUNT) +#define VPC_SEPROCESSCOUNT (VPG_SETUP + SE_PROCESSCOUNT) + +/* HW: RA Counters. */ +#define VPC_RAVALIDPIXCOUNT (VPG_RA + RA_VALIDPIXCOUNT) +#define VPC_RATOTALQUADCOUNT (VPG_RA + RA_TOTALQUADCOUNT) +#define VPC_RAVALIDQUADCOUNTEZ (VPG_RA + RA_VALIDQUADCOUNTEZ) +#define VPC_RATOTALPRIMCOUNT (VPG_RA + RA_TOTALPRIMCOUNT) +#define VPC_RAPIPECACHEMISSCOUNT (VPG_RA + RA_PIPECACHEMISSCOUNT) +#define VPC_RAPREFCACHEMISSCOUNT (VPG_RA + RA_PREFCACHEMISSCOUNT) +#define VPC_RAEEZCULLCOUNT (VPG_RA + RA_EEZCULLCOUNT) +#define VPC_RANONIDLESTARVECOUNT (VPG_RA + RA_NONIDLESTARVECOUNT) +#define VPC_RASTARVELCOUNT (VPG_RA + RA_STARVELCOUNT) +#define VPC_RASTALLCOUNT (VPG_RA + RA_STALLCOUNT) +#define VPC_RAPROCESSCOUNT (VPG_RA + RA_PROCESSCOUNT) + +/* HW: TEX Counters. */ +#define VPC_TXTOTBILINEARREQ (VPG_TX + TX_TOTBILINEARREQ) +#define VPC_TXTOTTRILINEARREQ (VPG_TX + TX_TOTTRILINEARREQ) +#define VPC_TXTOTDISCARDTEXREQ (VPG_TX + TX_TOTDISCARDTEXREQ) +#define VPC_TXTOTTEXREQ (VPG_TX + TX_TOTTEXREQ) +#define VPC_TXMEMREADCOUNT (VPG_TX + TX_MEMREADCOUNT) +#define VPC_TXMEMREADIN8BCOUNT (VPG_TX + TX_MEMREADIN8BCOUNT) +#define VPC_TXCACHEMISSCOUNT (VPG_TX + TX_CACHEMISSCOUNT) +#define VPC_TXCACHEHITTEXELCOUNT (VPG_TX + TX_CACHEHITTEXELCOUNT) +#define VPC_TXCACHEMISSTEXELCOUNT (VPG_TX + TX_CACHEMISSTEXELCOUNT) +#define VPC_TXNONIDLESTARVECOUNT (VPG_TX + TX_NONIDLESTARVECOUNT) +#define VPC_TXSTARVELCOUNT (VPG_TX + TX_STARVELCOUNT) +#define VPC_TXSTALLCOUNT (VPG_TX + TX_STALLCOUNT) +#define VPC_TXPROCESSCOUNT (VPG_TX + TX_PROCESSCOUNT) + +/* HW: PE Counters. */ +#define VPC_PEKILLEDBYCOLOR (VPG_PE + PE_KILLEDBYCOLOR) +#define VPC_PEKILLEDBYDEPTH (VPG_PE + PE_KILLEDBYDEPTH) +#define VPC_PEDRAWNBYCOLOR (VPG_PE + PE_DRAWNBYCOLOR) +#define VPC_PEDRAWNBYDEPTH (VPG_PE + PE_DRAWNBYDEPTH) + +/* HW: MC Counters. */ +#define VPC_MCREADREQ8BPIPE (VPG_MC + MC_READREQ8BPIPE) +#define VPC_MCREADREQ8BIP (VPG_MC + MC_READREQ8BIP) +#define VPC_MCWRITEREQ8BPIPE (VPG_MC + MC_WRITEREQ8BPIPE) +#define VPC_MCAXIMINLATENCY (VPG_MC + MC_AXIMINLATENCY) +#define VPC_MCAXIMAXLATENCY (VPG_MC + MC_AXIMAXLATENCY) +#define VPC_MCAXITOTALLATENCY (VPG_MC + MC_AXITOTALLATENCY) +#define VPC_MCAXISAMPLECOUNT (VPG_MC + MC_AXISAMPLECOUNT) + +/* HW: AXI Counters. */ +#define VPC_AXIREADREQSTALLED (VPG_AXI + AXI_READREQSTALLED) +#define VPC_AXIWRITEREQSTALLED (VPG_AXI + AXI_WRITEREQSTALLED) +#define VPC_AXIWRITEDATASTALLED (VPG_AXI + AXI_WRITEDATASTALLED) + +/* HW: FE Counters. */ +#define VPC_FEDRAWCOUNT (VPG_FE + FE_DRAWCOUNT) +#define VPC_FEOUTVERTEXCOUNT (VPG_FE + FE_OUTVERTEXCOUNT) +#define VPC_FESTALLCOUNT (VPG_FE + FE_STALLCOUNT) +#define VPC_FESTARVECOUNT (VPG_FE + FE_STARVECOUNT) + +/* HW: Shader Counters. */ +#define VPNC_VSINSTCOUNT (VPNG_VS + 1) +#define VPNC_VSBRANCHINSTCOUNT (VPNG_VS + 2) +#define VPNC_VSTEXLDINSTCOUNT (VPNG_VS + 3) +#define VPNC_VSRENDEREDVERTCOUNT (VPNG_VS + 4) +#define VPNC_VSNONIDLESTARVECOUNT (VPNG_VS + 5) +#define VPNC_VSSTARVELCOUNT (VPNG_VS + 6) +#define VPNC_VSSTALLCOUNT (VPNG_VS + 7) +#define VPNC_VSPROCESSCOUNT (VPNG_VS + 8) +#define VPNC_VSSHADERCYCLECOUNT (VPNG_VS + 9) +#define VPNC_VS_COUNT (VPNC_VSSHADERCYCLECOUNT - VPNG_VS) + +/* HW: PS Count. */ +#define VPNC_PSINSTCOUNT (VPNG_PS + 1) +#define VPNC_PSBRANCHINSTCOUNT (VPNG_PS + 2) +#define VPNC_PSTEXLDINSTCOUNT (VPNG_PS + 3) +#define VPNC_PSRENDEREDPIXCOUNT (VPNG_PS + 4) +#define VPNC_PSNONIDLESTARVECOUNT (VPNG_PS + 5) +#define VPNC_PSSTARVELCOUNT (VPNG_PS + 6) +#define VPNC_PSSTALLCOUNT (VPNG_PS + 7) +#define VPNC_PSPROCESSCOUNT (VPNG_PS + 8) +#define VPNC_PSSHADERCYCLECOUNT (VPNG_PS + 9) +#define VPNC_PS_COUNT (VPNC_PSSHADERCYCLECOUNT - VPNG_PS) + +/* HW: PA Counters. */ +#define VPNC_PAINVERTCOUNT (VPNG_PA + 1) +#define VPNC_PAINPRIMCOUNT (VPNG_PA + 2) +#define VPNC_PAOUTPRIMCOUNT (VPNG_PA + 3) +#define VPNC_PADEPTHCLIPCOUNT (VPNG_PA + 4) +#define VPNC_PATRIVIALREJCOUNT (VPNG_PA + 5) +#define VPNC_PACULLPRIMCOUNT (VPNG_PA + 6) +#define VPNC_PADROPPRIMCOUNT (VPNG_PA + 7) +#define VPNC_PAFRCLIPPRIMCOUNT (VPNG_PA + 8) +#define VPNC_PAFRCLIPDROPPRIMCOUNT (VPNG_PA + 9) +#define VPNC_PANONIDLESTARVECOUNT (VPNG_PA + 10) +#define VPNC_PASTARVELCOUNT (VPNG_PA + 11) +#define VPNC_PASTALLCOUNT (VPNG_PA + 12) +#define VPNC_PAPROCESSCOUNT (VPNG_PA + 13) +#define VPNC_PA_COUNT (VPNC_PAPROCESSCOUNT - VPNG_PA) + +/* HW: Setup Counters. */ +#define VPNC_SECULLTRIANGLECOUNT (VPNG_SETUP + 1) +#define VPNC_SECULLLINECOUNT (VPNG_SETUP + 2) +#define VPNC_SECLIPTRIANGLECOUNT (VPNG_SETUP + 3) +#define VPNC_SECLIPLINECOUNT (VPNG_SETUP + 4) +#define VPNC_SESTARVECOUNT (VPNG_SETUP + 5) +#define VPNC_SESTALLCOUNT (VPNG_SETUP + 6) +#define VPNC_SERECEIVETRIANGLECOUNT (VPNG_SETUP + 7) +#define VPNC_SESENDTRIANGLECOUNT (VPNG_SETUP + 8) +#define VPNC_SERECEIVELINESCOUNT (VPNG_SETUP + 9) +#define VPNC_SESENDLINESCOUNT (VPNG_SETUP + 10) +#define VPNC_SENONIDLESTARVECOUNT (VPNG_SETUP + 11) +#define VPNC_SETRIVIALREJLINECOUNT (VPNG_SETUP + 12) +#define VPNC_SEPROCESSCOUNT (VPNG_SETUP + 13) +#define VPNC_SE_COUNT (VPNC_SEPROCESSCOUNT - VPNG_SETUP) + +/* HW: RA Counters. */ +#define VPNC_RAVALIDPIXCOUNT (VPNG_RA + 1) +#define VPNC_RATOTALQUADCOUNT (VPNG_RA + 2) +#define VPNC_RAVALIDQUADCOUNTEZ (VPNG_RA + 3) +#define VPNC_RAINPUTPRIMCOUNT (VPNG_RA + 4) +#define VPNC_RAPIPECACHEMISSCOUNT (VPNG_RA + 5) +#define VPNC_RAPREFCACHEMISSCOUNT (VPNG_RA + 6) +#define VPNC_RAPIPEHZCACHEMISSCOUNT (VPNG_RA + 7) +#define VPNC_RAPREFHZCACHEMISSCOUNT (VPNG_RA + 8) +#define VPNC_RAOUTPUTQUADCOUNT (VPNG_RA + 9) +#define VPNC_RAOUTPUTPIXELCOUNT (VPNG_RA + 10) +#define VPNC_RAEEZCULLCOUNT (VPNG_RA + 11) +#define VPNC_RANONIDLESTARVECOUNT (VPNG_RA + 12) +#define VPNC_RASTARVELCOUNT (VPNG_RA + 13) +#define VPNC_RASTALLCOUNT (VPNG_RA + 14) +#define VPNC_RAPROCESSCOUNT (VPNG_RA + 15) +#define VPNC_RA_COUNT (VPNC_RAPROCESSCOUNT - VPNG_RA) + +/* HW: TEX Counters. */ +#define VPNC_TXTOTBILINEARREQ (VPNG_TX + 1) +#define VPNC_TXTOTTRILINEARREQ (VPNG_TX + 2) +#define VPNC_TXTOTDISCARDTEXREQ (VPNG_TX + 3) +#define VPNC_TXTOTTEXREQ (VPNG_TX + 4) +#define VPNC_TXMC0MISSCOUNT (VPNG_TX + 5) +#define VPNC_TXMC0REQCOUNT (VPNG_TX + 6) +#define VPNC_TXMC1MISSCOUNT (VPNG_TX + 7) +#define VPNC_TXMC1REQCOUNT (VPNG_TX + 8) +#define VPNC_TX_COUNT (VPNC_TXMC1REQCOUNT - VPNG_TX) + +/* HW: PE Counters. */ +#define VPNC_PE0KILLEDBYCOLOR (VPNG_PE + 1) +#define VPNC_PE0KILLEDBYDEPTH (VPNG_PE + 2) +#define VPNC_PE0DRAWNBYCOLOR (VPNG_PE + 3) +#define VPNC_PE0DRAWNBYDEPTH (VPNG_PE + 4) +#define VPNC_PE1KILLEDBYCOLOR (VPNG_PE + 5) +#define VPNC_PE1KILLEDBYDEPTH (VPNG_PE + 6) +#define VPNC_PE1DRAWNBYCOLOR (VPNG_PE + 7) +#define VPNC_PE1DRAWNBYDEPTH (VPNG_PE + 8) +#define VPNC_PE_COUNT (VPNC_PE1DRAWNBYDEPTH - VPNG_PE) + +/* HW: MCC Counters. */ +#define VPNC_MCCREADREQ8BCOLORPIPE (VPNG_MCC + 1) +#define VPNC_MCCREADREQ8BSOCOLORPIPE (VPNG_MCC + 2) +#define VPNC_MCCWRITEREQ8BCOLORPIPE (VPNG_MCC + 3) +#define VPNC_MCCREADREQSOCOLORPIPE (VPNG_MCC + 4) +#define VPNC_MCCWRITEREQCOLORPIPE (VPNG_MCC + 5) +#define VPNC_MCCREADREQ8BDEPTHPIPE (VPNG_MCC + 6) +#define VPNC_MCCREADREQ8BSFDEPTHPIPE (VPNG_MCC + 7) +#define VPNC_MCCWRITEREQ8BDEPTHPIPE (VPNG_MCC + 8) +#define VPNC_MCCREADREQSFDEPTHPIPE (VPNG_MCC + 9) +#define VPNC_MCCWRITEREQDEPTHPIPE (VPNG_MCC + 10) +#define VPNC_MCCREADREQ8BOTHERPIPE (VPNG_MCC + 11) +#define VPNC_MCCWRITEREQ8BOTHERPIPE (VPNG_MCC + 12) +#define VPNC_MCCREADREQOTHERPIPE (VPNG_MCC + 13) +#define VPNC_MCCWRITEREQOTHERPIPE (VPNG_MCC + 14) +#define VPNC_MCCAXIMINLATENCY (VPNG_MCC + 15) +#define VPNC_MCCAXIMAXLATENCY (VPNG_MCC + 16) +#define VPNC_MCCAXITOTALLATENCY (VPNG_MCC + 17) +#define VPNC_MCCAXISAMPLECOUNT (VPNG_MCC + 18) +#define VPNC_MCCFEREADBANDWIDTH (VPNG_MCC + 19) +#define VPNC_MCCMMUREADBANDWIDTH (VPNG_MCC + 20) +#define VPNC_MCCBLTREADBANDWIDTH (VPNG_MCC + 21) +#define VPNC_MCCSH0READBANDWIDTH (VPNG_MCC + 22) +#define VPNC_MCCSH1READBANDWIDTH (VPNG_MCC + 23) +#define VPNC_MCCPEWRITEBANDWIDTH (VPNG_MCC + 24) +#define VPNC_MCCBLTWRITEBANDWIDTH (VPNG_MCC + 25) +#define VPNC_MCCSH0WRITEBANDWIDTH (VPNG_MCC + 26) +#define VPNC_MCCSH1WRITEBANDWIDTH (VPNG_MCC + 27) +#define VPNC_MCC_COUNT (VPNC_MCCSH1WRITEBANDWIDTH - VPNG_MCC) + +/* HW: MCZ Counters. */ +#define VPNC_MCZREADREQ8BCOLORPIPE (VPNG_MCZ + 1) +#define VPNC_MCZREADREQ8BSOCOLORPIPE (VPNG_MCZ + 2) +#define VPNC_MCZWRITEREQ8BCOLORPIPE (VPNG_MCZ + 3) +#define VPNC_MCZREADREQSOCOLORPIPE (VPNG_MCZ + 4) +#define VPNC_MCZWRITEREQCOLORPIPE (VPNG_MCZ + 5) +#define VPNC_MCZREADREQ8BDEPTHPIPE (VPNG_MCZ + 6) +#define VPNC_MCZREADREQ8BSFDEPTHPIPE (VPNG_MCZ + 7) +#define VPNC_MCZWRITEREQ8BDEPTHPIPE (VPNG_MCZ + 8) +#define VPNC_MCZREADREQSFDEPTHPIPE (VPNG_MCZ + 9) +#define VPNC_MCZWRITEREQDEPTHPIPE (VPNG_MCZ + 10) +#define VPNC_MCZREADREQ8BOTHERPIPE (VPNG_MCZ + 11) +#define VPNC_MCZWRITEREQ8BOTHERPIPE (VPNG_MCZ + 12) +#define VPNC_MCZREADREQOTHERPIPE (VPNG_MCZ + 13) +#define VPNC_MCZWRITEREQOTHERPIPE (VPNG_MCZ + 14) +#define VPNC_MCZAXIMINLATENCY (VPNG_MCZ + 15) +#define VPNC_MCZAXIMAXLATENCY (VPNG_MCZ + 16) +#define VPNC_MCZAXITOTALLATENCY (VPNG_MCZ + 17) +#define VPNC_MCZAXISAMPLECOUNT (VPNG_MCZ + 18) +#define VPNC_MCZ_COUNT (VPNC_MCZAXISAMPLECOUNT - VPNG_MCZ) + +/* HW: HI Counters. */ +#define VPNC_HI0READ8BYTE (VPNG_HI + 1) +#define VPNC_HI0WRITE8BYTE (VPNG_HI + 2) +#define VPNC_HI0READREQ (VPNG_HI + 3) +#define VPNC_HI0WRITEREQ (VPNG_HI + 4) +#define VPNC_HI0AXIREADREQSTALL (VPNG_HI + 5) +#define VPNC_HI0AXIWRITEREQSTALL (VPNG_HI + 6) +#define VPNC_HI0AXIWRITEDATASTALL (VPNG_HI + 7) +#define VPNC_HI1READ8BYTE (VPNG_HI + 8) +#define VPNC_HI1WRITE8BYTE (VPNG_HI + 9) +#define VPNC_HI1READREQ (VPNG_HI + 10) +#define VPNC_HI1WRITEREQ (VPNG_HI + 11) +#define VPNC_HI1AXIREADREQSTALL (VPNG_HI + 12) +#define VPNC_HI1AXIWRITEREQSTALL (VPNG_HI + 13) +#define VPNC_HI1AXIWRITEDATASTALL (VPNG_HI + 14) +#define VPNC_HITOTALCYCLES (VPNG_HI + 15) +#define VPNC_HIIDLECYCLES (VPNG_HI + 16) +#define VPNC_HIREAD8BYTE (VPNG_HI + 17) +#define VPNC_HIWRITE8BYTE (VPNG_HI + 18) +#define VPNC_HIOCBREAD16BYTE (VPNG_HI + 19) +#define VPNC_HIOCBWRITE16BYTE (VPNG_HI + 20) +#define VPNC_HI_COUNT (VPNC_HIOCBWRITE16BYTE - VPNG_HI) + +/* HW: L2 Counters. */ +#define VPNC_L2AXI0READREQCOUNT (VPNG_L2 + 1) +#define VPNC_L2AXI1READREQCOUNT (VPNG_L2 + 2) +#define VPNC_L2AXI0WRITEREQCOUNT (VPNG_L2 + 3) +#define VPNC_L2AXI1WRITEREQCOUNT (VPNG_L2 + 4) +#define VPNC_L2READTRANSREQBYAXI0 (VPNG_L2 + 5) +#define VPNC_L2READTRANSREQBYAXI1 (VPNG_L2 + 6) +#define VPNC_L2WRITETRANSREQBYAXI0 (VPNG_L2 + 7) +#define VPNC_L2WRITETRANSREQBYAXI1 (VPNG_L2 + 8) +#define VPNC_L2AXI0MINLATENCY (VPNG_L2 + 9) +#define VPNC_L2AXI0MAXLATENCY (VPNG_L2 + 10) +#define VPNC_L2AXI0TOTLATENCY (VPNG_L2 + 11) +#define VPNC_L2AXI0TOTREQCOUNT (VPNG_L2 + 12) +#define VPNC_L2AXI1MINLATENCY (VPNG_L2 + 13) +#define VPNC_L2AXI1MAXLATENCY (VPNG_L2 + 14) +#define VPNC_L2AXI1TOTLATENCY (VPNG_L2 + 15) +#define VPNC_L2AXI1TOTREQCOUNT (VPNG_L2 + 16) +#define VPNC_L2_COUNT (VPNC_L2AXI1TOTREQCOUNT - VPNG_L2) + +/* HW: FE Counters. */ +#define VPNC_FEDRAWCOUNT (VPNG_FE + 1) +#define VPNC_FEOUTVERTEXCOUNT (VPNG_FE + 2) +#define VPNC_FECACHEMISSCOUNT (VPNG_FE + 3) +#define VPNC_FECACHELKCOUNT (VPNG_FE + 4) +#define VPNC_FESTALLCOUNT (VPNG_FE + 5) +#define VPNC_FESTARVECOUNT (VPNG_FE + 6) +#define VPNC_FEPROCESSCOUNT (VPNG_FE + 7) +#define VPNC_FE_COUNT (VPNC_FEPROCESSCOUNT - VPNG_FE) + +#define TOTAL_COUNTER_NUMBER \ + (VPNC_FE_COUNT + VPNC_VS_COUNT + VPNC_PA_COUNT + VPNC_SE_COUNT + VPNC_RA_COUNT + \ + VPNC_PS_COUNT + VPNC_TX_COUNT + VPNC_PE_COUNT + VPNC_MCC_COUNT + VPNC_MCZ_COUNT + \ + VPNC_HI_COUNT + VPNC_L2_COUNT) + +#define TOTAL_MODULE_NUMBER 12 + +/* PROGRAM: Shader program counters. */ +#define VPC_PVSINSTRCOUNT (VPG(PVS) + PVS_INSTRCOUNT) +#define VPC_PVSALUINSTRCOUNT (VPG(PVS) + PVS_ALUINSTRCOUNT) +#define VPC_PVSTEXINSTRCOUNT (VPG(PVS) + PVS_TEXINSTRCOUNT) +#define VPC_PVSATTRIBCOUNT (VPG(PVS) + PVS_ATTRIBCOUNT) +#define VPC_PVSUNIFORMCOUNT (VPG(PVS) + PVS_UNIFORMCOUNT) +#define VPC_PVSFUNCTIONCOUNT (VPG(PVS) + PVS_FUNCTIONCOUNT) +#define VPC_PVSSOURCE (VPG(PVS) + PVS_SOURCE) + +#define VPC_PPSINSTRCOUNT (VPG(PPS) + PPS_INSTRCOUNT) +#define VPC_PPSALUINSTRCOUNT (VPG(PPS) + PPS_ALUINSTRCOUNT) +#define VPC_PPSTEXINSTRCOUNT (VPG(PPS) + PPS_TEXINSTRCOUNT) +#define VPC_PPSATTRIBCOUNT (VPG(PPS) + PPS_ATTRIBCOUNT) +#define VPC_PPSUNIFORMCOUNT (VPG(PPS) + PPS_UNIFORMCOUNT) +#define VPC_PPSFUNCTIONCOUNT (VPG(PPS) + PPS_FUNCTIONCOUNT) +#define VPC_PPSSOURCE (VPG(PPS) + PPS_SOURCE) + +#define VPC_PROGRAMHANDLE (VPG(PROG) + 1) + +#define VPC_ES30_DRAW_NO (VPG(ES30_DRAW) + 1) +#define VPC_ES11_DRAW_NO (VPG_ES11_DRAW + 1) +#define VPC_ES30_GPU_NO (VPG(MULTI_GPU) + 1) + +#define MODULE_FRONT_END_COUNTER_NUM 0x5 +#define MODULE_VERTEX_SHADER_COUNTER_NUM 0x9 +#define MODULE_PRIMITIVE_ASSEMBLY_COUNTER_NUM 0xC +#define MODULE_SETUP_COUNTER_NUM 0xD +#define MODULE_RASTERIZER_COUNTER_NUM 0xE +#define MODULE_PIXEL_SHADER_COUNTER_NUM 0x9 +#define MODULE_TEXTURE_COUNTER_NUM 0x8 +#define MODULE_PIXEL_ENGINE_COUNTER_NUM 0x8 +#define MODULE_MEMORY_CONTROLLER_COLOR_COUNTER_NUM 0xC +#define MODULE_MEMORY_CONTROLLER_DEPTH_COUNTER_NUM 0xC +#define MODULE_HOST_INTERFACE0_COUNTER_NUM 0x9 +#define MODULE_HOST_INTERFACE1_COUNTER_NUM 0x7 +#define MODULE_GPUL2_CACHE_COUNTER_NUM 0xE +#define MODULE_WORK_DISTRIBUTOR_COUNTER_NUM 0x0 +#define MODULE_POSITION_PA_COUNTER_NUM 0xA +#define MODULE_FINAL_PA_COUNTER_NUM 0xB +#define MODULE_TESS_CTRL_SHADER_COUNTER_NUM 0x9 +#define MODULE_TESS_EVAL_SHADER_COUNTER_NUM 0x9 +#define MODULE_GEOMETRY_SHADER_COUNTER_NUM 0x9 +#define MODULE_TRANSFORM_FEEDBACK_COUNTER_NUM 0x0 +#define MODULE_UNIVERSAL_STORAGE_COUNTER_NUM 0x0 +#define MODULE_DIRECTORY_COUNTER_NUM 0x0 +#define TOTAL_PROBE_NUMBER \ + (MODULE_FRONT_END_COUNTER_NUM + MODULE_VERTEX_SHADER_COUNTER_NUM + \ + MODULE_PRIMITIVE_ASSEMBLY_COUNTER_NUM + MODULE_SETUP_COUNTER_NUM + \ + MODULE_RASTERIZER_COUNTER_NUM + MODULE_PIXEL_SHADER_COUNTER_NUM + \ + MODULE_TEXTURE_COUNTER_NUM + MODULE_PIXEL_ENGINE_COUNTER_NUM + \ + MODULE_MEMORY_CONTROLLER_COLOR_COUNTER_NUM + \ + MODULE_MEMORY_CONTROLLER_DEPTH_COUNTER_NUM + \ + MODULE_HOST_INTERFACE0_COUNTER_NUM + MODULE_HOST_INTERFACE1_COUNTER_NUM + \ + MODULE_GPUL2_CACHE_COUNTER_NUM + MODULE_WORK_DISTRIBUTOR_COUNTER_NUM + \ + MODULE_POSITION_PA_COUNTER_NUM + MODULE_FINAL_PA_COUNTER_NUM + \ + MODULE_TESS_CTRL_SHADER_COUNTER_NUM + MODULE_TESS_EVAL_SHADER_COUNTER_NUM + \ + MODULE_GEOMETRY_SHADER_COUNTER_NUM + MODULE_TRANSFORM_FEEDBACK_COUNTER_NUM + \ + MODULE_UNIVERSAL_STORAGE_COUNTER_NUM + MODULE_DIRECTORY_COUNTER_NUM) + +#define TOTAL_CL_COUNTER_NUMBER \ + (\ + (VPNC_FEPROCESSCOUNT - VPNC_FECACHELKCOUNT + 1) + \ + VPNC_TX_COUNT + (VPNC_PS_COUNT - 1) + \ + (VPNC_MCCSH1WRITEBANDWIDTH - VPNC_MCCWRITEREQDEPTHPIPE - \ + (VPNC_MCCSH1WRITEBANDWIDTH - VPNC_MCCFEREADBANDWIDTH + 1)) + \ + (VPNC_HI_COUNT - (VPNC_HIOCBWRITE16BYTE - VPNC_HIOCBREAD16BYTE + 1)) + \ + VPNC_L2_COUNT \ + ) +#define TOTAL_CL_MODULE_NUMBER (6) + +#define VPNC_NN_LAYER_ID (VPNG_NN + 1) +#define VPNC_NN_LAYER_ID_OVFL (VPNG_NN + 2) +#define VPNC_NN_INSTR_INFO (VPNG_NN + 3) +#define VPNC_NN_TOTAL_BUSY_CYCLE (VPNG_NN + 4) +#define VPNC_NN_TOTAL_BUSY_CYCLE_OVFL (VPNG_NN + 5) +#define VPNC_NN_TOTAL_READ_CYCLE_DDR (VPNG_NN + 6) +#define VPNC_NN_TOTAL_READ_CYCLE_DDR_OVFL (VPNG_NN + 7) +#define VPNC_NN_TOTAL_READ_VALID_BW_DDR (VPNG_NN + 8) +#define VPNC_NN_TOTAL_READ_VALID_BW_DDR_OVFL (VPNG_NN + 9) +#define VPNC_NN_TOTAL_WRITE_CYCLE_DDR (VPNG_NN + 10) +#define VPNC_NN_TOTAL_WRITE_CYCLE_DDR_OVFL (VPNG_NN + 11) +#define VPNC_NN_TOTAL_WRITE_VALID_BW_DDR (VPNG_NN + 12) +#define VPNC_NN_TOTAL_WRITE_VALID_BW_DDR_OVFL (VPNG_NN + 13) +#define VPNC_NN_TOTAL_READ_CYCLE_SRAM (VPNG_NN + 14) +#define VPNC_NN_TOTAL_READ_CYCLE_SRAM_OVFL (VPNG_NN + 15) +#define VPNC_NN_TOTAL_WRITE_CYCLE_SRAM (VPNG_NN + 16) +#define VPNC_NN_TOTAL_WRITE_CYCLE_SRAM_OVFL (VPNG_NN + 17) +#define VPNC_NN_TOTAL_MAC_CYCLE (VPNG_NN + 18) +#define VPNC_NN_TOTAL_MAC_CYCLE_OVFL (VPNG_NN + 19) +#define VPNC_NN_TOTAL_MAC_COUNT (VPNG_NN + 20) +#define VPNC_NN_TOTAL_MAC_COUNT_OVFL (VPNG_NN + 21) +#define VPNC_NN_ZERO_COEF_SKIP_COUNT (VPNG_NN + 22) +#define VPNC_NN_ZERO_COEF_SKIP_COUNT_OVFL (VPNG_NN + 23) +#define VPNC_NN_NON_ZERO_COEF_COUNT (VPNG_NN + 24) +#define VPNC_NN_NON_ZERO_COEF_COUNT_OVFL (VPNG_NN + 25) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE_OVFL (VPNG_NN + 26) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE0 (VPNG_NN + 27) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE1 (VPNG_NN + 28) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE2 (VPNG_NN + 29) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE3 (VPNG_NN + 30) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE4 (VPNG_NN + 31) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE5 (VPNG_NN + 32) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE6 (VPNG_NN + 33) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE7 (VPNG_NN + 34) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE8 (VPNG_NN + 35) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE9 (VPNG_NN + 36) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE10 (VPNG_NN + 37) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE11 (VPNG_NN + 38) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE12 (VPNG_NN + 39) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE13 (VPNG_NN + 40) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE14 (VPNG_NN + 41) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE15 (VPNG_NN + 42) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE16 (VPNG_NN + 43) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE17 (VPNG_NN + 44) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE18 (VPNG_NN + 45) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE19 (VPNG_NN + 46) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE20 (VPNG_NN + 47) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE21 (VPNG_NN + 48) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE22 (VPNG_NN + 49) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE23 (VPNG_NN + 50) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE24 (VPNG_NN + 51) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE25 (VPNG_NN + 52) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE26 (VPNG_NN + 53) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE27 (VPNG_NN + 54) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE28 (VPNG_NN + 55) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE29 (VPNG_NN + 56) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE30 (VPNG_NN + 57) +#define VPNC_NN_TOTAL_IDLE_CYCLE_CORE31 (VPNG_NN + 58) +#define VPNC_NN_COUNT (VPNC_NN_TOTAL_IDLE_CYCLE_CORE31 - VPNG_NN) + +/* HW: TP Probe Counters. */ +#define VPNC_TP_LAYER_ID (VPNG_TP + 1) +#define VPNC_TP_LAYER_ID_OVFL (VPNG_TP + 2) +#define VPNC_TP_TOTAL_BUSY_CYCLE (VPNG_TP + 3) +#define VPNC_TP_TOTAL_BUSY_CYCLE_OVFL (VPNG_TP + 4) +#define VPNC_TP_TOTAL_READ_BW_CACHE (VPNG_TP + 5) +#define VPNC_TP_TOTAL_READ_BW_CACHE_OVFL (VPNG_TP + 6) +#define VPNC_TP_TOTAL_WRITE_BW_CACHE (VPNG_TP + 7) +#define VPNC_TP_TOTAL_WRITE_BW_CACHE_OVFL (VPNG_TP + 8) +#define VPNC_TP_TOTAL_READ_BW_SRAM (VPNG_TP + 9) +#define VPNC_TP_TOTAL_READ_BW_SRAM_OVFL (VPNG_TP + 10) +#define VPNC_TP_TOTAL_WRITE_BW_SRAM (VPNG_TP + 11) +#define VPNC_TP_TOTAL_WRITE_BW_SRAM_OVFL (VPNG_TP + 12) +#define VPNC_TP_TOTAL_READ_BW_OCB (VPNG_TP + 13) +#define VPNC_TP_TOTAL_READ_BW_OCB_OVFL (VPNG_TP + 14) +#define VPNC_TP_TOTAL_WRITE_BW_OCB (VPNG_TP + 15) +#define VPNC_TP_TOTAL_WRITE_BW_OCB_OVFL (VPNG_TP + 16) +#define VPNC_TP_FC_PIX_CNT (VPNG_TP + 17) +#define VPNC_TP_FC_ZERO_SKIP_CNT (VPNG_TP + 18) +#define VPNC_TP_FC_PIX_CNT_OVFL (VPNG_TP + 19) +#define VPNC_TP_FC_ZERO_SKIP_CNT_OVFL (VPNG_TP + 20) +#define VPNC_TP_FC_COEF_CNT (VPNG_TP + 21) +#define VPNC_TP_FC_COEF_ZERO_CNT (VPNG_TP + 22) +#define VPNC_TP_FC_COEF_CNT_OVFL (VPNG_TP + 23) +#define VPNC_TP_FC_COEF_ZERO_CNT_OVFL (VPNG_TP + 24) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE0 (VPNG_TP + 25) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE0_OVFL (VPNG_TP + 26) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE1 (VPNG_TP + 27) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE1_OVFL (VPNG_TP + 28) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE2 (VPNG_TP + 29) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE2_OVFL (VPNG_TP + 30) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE3 (VPNG_TP + 31) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE3_OVFL (VPNG_TP + 32) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE4 (VPNG_TP + 33) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE4_OVFL (VPNG_TP + 34) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE5 (VPNG_TP + 35) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE5_OVFL (VPNG_TP + 36) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE6 (VPNG_TP + 37) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE6_OVFL (VPNG_TP + 38) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE7 (VPNG_TP + 39) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE7_OVFL (VPNG_TP + 40) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE8 (VPNG_TP + 41) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE8_OVFL (VPNG_TP + 42) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE9 (VPNG_TP + 43) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE9_OVFL (VPNG_TP + 44) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE10 (VPNG_TP + 45) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE10_OVFL (VPNG_TP + 46) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE11 (VPNG_TP + 47) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE11_OVFL (VPNG_TP + 48) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE12 (VPNG_TP + 49) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE12_OVFL (VPNG_TP + 50) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE13 (VPNG_TP + 51) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE13_OVFL (VPNG_TP + 52) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE14 (VPNG_TP + 53) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE14_OVFL (VPNG_TP + 54) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE15 (VPNG_TP + 55) +#define VPNC_TP_TOTAL_IDLE_CYCLE_CORE15_OVFL (VPNG_TP + 56) +#define VPNC_TP_COUNT (VPNC_TP_TOTAL_IDLE_CYCLE_CORE15_OVFL - VPNG_TP) + +#define MODULE_NN_BASE_COUNTER_NUM 0x6 +/*#define MODULE_NN_RESERVED_COUNTER_NUM 0x9*/ +#define MODULE_NN_IDLE_COUNTER_NUM 0x9 +#define MODULE_NN_COUNTER_NUM (MODULE_NN_BASE_COUNTER_NUM + MODULE_NN_RESERVED_COUNTER_NUM + MODULE_NN_IDLE_COUNTER_NUM) + +#define TOTAL_VIP_COUNTER_NUMBER (VPNC_TP_COUNT + VPNC_NN_COUNT) +#define TOTAL_VIP_MODULE_NUMBER 2 +#define MODULE_TP_COUNTER_NUM 0xE +#define TOTAL_VIP_PROBE_NUMBER (MODULE_NN_COUNTER_NUM + MODULE_TP_COUNTER_NUM) + +#ifdef ANDROID +# define DEFAULT_PROFILE_FILE_NAME "/sdcard/vprofiler.vpd" +#else +# define DEFAULT_PROFILE_FILE_NAME "vprofiler.vpd" +#endif + +#define VPHEADER_VERSION "VP20" + +#define VPFILETYPE_GL "10" + +#define VPFILETYPE_CL "00" + +#if gcdENDIAN_BIG +# define BIG_ENDIAN_TRANS_INT(x) \ + ((gctUINT32)((((gctUINT32)(x) & (gctUINT32)0x000000FFUL) << 24) | \ + (((gctUINT32)(x) & (gctUINT32)0x0000FF00UL) << 8) | \ + (((gctUINT32)(x) & (gctUINT32)0x00FF0000UL) >> 8) | \ + (((gctUINT32)(x) & (gctUINT32)0xFF000000UL) >> 24))) +#else +# define BIG_ENDIAN_TRANS_INT(x) x +#endif + +/* Write a data value. */ +#define gcmWRITE_VALUE(IntData) \ + do { \ + gceSTATUS status; \ + gctINT32 value = IntData; \ + value = BIG_ENDIAN_TRANS_INT(value); \ + gcmERR_BREAK(gcoPROFILER_Write(Profiler, gcmSIZEOF(value), &value)); \ + } while (gcvFALSE) + +#define gcmWRITE_CONST(Const) \ + do { \ + gceSTATUS status; \ + gctINT32 data = Const; \ + data = BIG_ENDIAN_TRANS_INT(data); \ + gcmERR_BREAK(gcoPROFILER_Write(Profiler, gcmSIZEOF(data), &data)); \ + } while (gcvFALSE) + +#define gcmWRITE_COUNTER(Counter, Value) \ + do { \ + gcmWRITE_CONST(Counter); \ + gcmWRITE_VALUE(Value); \ + } while (gcvFALSE) + +/* Write a data value. */ +#define gcmRECORD_VALUE(IntData) \ + do { \ + gctINT32 value = IntData; \ + value = BIG_ENDIAN_TRANS_INT(value); \ + counterData[counterIndex++] = value; \ + } while (gcvFALSE) + +#define gcmRECORD_CONST(Const) \ + do { \ + gctINT32 data = Const; \ + data = BIG_ENDIAN_TRANS_INT(data); \ + counterData[counterIndex++] = data; \ + } while (gcvFALSE) + +#define gcmRECORD_COUNTER(Counter, Value) \ + do { \ + gcmRECORD_CONST(Counter); \ + gcmRECORD_VALUE(Value); \ + } while (gcvFALSE) + +/* Write a string value (char*). */ +#define gcmWRITE_STRING(String) \ + do { \ + gceSTATUS status; \ + gctINT32 length; \ + length = (gctINT32)gcoOS_StrLen((gctSTRING)String, gcvNULL); \ + length = BIG_ENDIAN_TRANS_INT(length); \ + gcmERR_BREAK(gcoPROFILER_Write(Profiler, gcmSIZEOF(length), &length)); \ + gcmERR_BREAK(gcoPROFILER_Write(Profiler, length, String)); \ + } while (gcvFALSE) + +#define gcmWRITE_BUFFER(Size, Buffer) \ + do { \ + gceSTATUS status; \ + gcmERR_BREAK(gcoPROFILER_Write(Profiler, Size, Buffer)); \ + } while (gcvFALSE) + +#define gcmGET_COUNTER(counter, counterId) \ + do { \ + if (*(memory + (counterId + offset) * (1 << clusterIDWidth)) == 0xdeaddead) { \ + counter = 0xdeaddead; \ + } else { \ + gctUINT32 i; \ + gctUINT32_PTR Memory = memory; \ + gctUINT32 total_probe_number = 0; \ + counter = 0; \ + gcmONERROR(gcoPROFILER_GetProbeNumber(Hardware, &total_probe_number)); \ + Memory = memory + total_probe_number * CoreId * (1 << clusterIDWidth); \ + for (i = 0; i < (gctUINT32)(1 << clusterIDWidth); i++) { \ + counter += *(Memory + (counterId + offset) * (1 << clusterIDWidth) + i); \ + } \ + } \ + } while (gcvFALSE) + +#define gcmGET_MAXCOUNTER(counter, counterId) \ + do { \ + if (*(memory + (counterId + offset) * (1 << clusterIDWidth)) == 0xdeaddead) { \ + counter = 0xdeaddead; \ + } else { \ + gctUINT32 i; \ + gctUINT32_PTR Memory = memory; \ + gctUINT32 total_probe_number = 0; \ + gctUINT32 max_counter = 0; \ + \ + counter = 0; \ + gcmONERROR(gcoPROFILER_GetProbeNumber(Hardware, &total_probe_number)); \ + Memory = memory + total_probe_number * CoreId * (1 << clusterIDWidth); \ + for (i = 0; i < (gctUINT32)(1 << clusterIDWidth); i++) { \ + if (max_counter < *(Memory + (counterId + offset) * (1 << clusterIDWidth) + i)) \ + max_counter = *(Memory + (counterId + offset) * (1 << clusterIDWidth) + i); \ + } \ + counter = max_counter; \ + } \ + } while (gcvFALSE) + +#define gcmGET_MINCOUNTER(counter, counterId) \ + do { \ + if (*(memory + (counterId + offset) * (1 << clusterIDWidth)) == 0xdeaddead) { \ + counter = 0xdeaddead; \ + } else { \ + gctUINT32 i; \ + gctUINT32_PTR Memory = memory; \ + gctUINT32 total_probe_number = 0; \ + gctUINT32 min_counter = 0; \ + counter = 0; \ + gcmONERROR(gcoPROFILER_GetProbeNumber(Hardware, &total_probe_number)); \ + Memory = memory + total_probe_number * CoreId * (1 << clusterIDWidth); \ + min_counter = *(Memory + (counterId + offset) * (1 << clusterIDWidth)); \ + for (i = 0; i < (gctUINT32)(1 << clusterIDWidth); i++) { \ + if (Profiler->isDebugCounter) \ + { \ + gcmPRINT("%s %d hi_total_idle_cycle_count CoreId=%d clusterId=%d offset=%u cpuaddress=%p value=%u", __FUNCTION__, __LINE__, CoreId, i, offset, (Memory + (counterId + offset) * (1 << clusterIDWidth) + i), *(Memory + (counterId + offset) * (1 << clusterIDWidth) + i)); \ + } \ + if (min_counter > *(Memory + (counterId + offset) * (1 << clusterIDWidth) + i)) \ + min_counter = *(Memory + (counterId + offset) * (1 << clusterIDWidth) + i); \ + } \ + counter = min_counter; \ + } \ + } while (gcvFALSE) + +#define gcmGET_VIPCOUNTER(counter, counterId) \ + do { \ + if (*(memory + (counterId + offset) * (1 << clusterIDWidth)) == 0xdeaddead) { \ + counter = 0xdeaddead; \ + } else { \ + gctUINT32 i; \ + gctUINT32_PTR Memory = memory; \ + counter = 0; \ + Memory = memory + TOTAL_VIP_PROBE_NUMBER * CoreId * (1 << clusterIDWidth); \ + for (i = 0; i < (gctUINT32)(1 << clusterIDWidth); i++) { \ + counter += *(Memory + (counterId + offset) * (1 << clusterIDWidth) + i); \ + } \ + } \ + } while (gcvFALSE) + +#define gcmGET_LATENCY_COUNTER(minLatency, maxLatency, counterId) \ + do { \ + if (*(memory + (counterId + offset) * (1 << clusterIDWidth)) == 0xdeaddead) { \ + minLatency = 0xdeaddead; \ + maxLatency = 0xdeaddead; \ + } else { \ + gctUINT32 i; \ + gctUINT32_PTR Memory = memory; \ + gctUINT32 total_probe_number = 0; \ + gcmONERROR(gcoPROFILER_GetProbeNumber(Hardware, &total_probe_number)); \ + Memory = memory + total_probe_number * CoreId * (1 << clusterIDWidth); \ + for (i = 0; i < (gctUINT32)(1 << clusterIDWidth); i++) { \ + maxLatency += \ + ((*(Memory + (counterId + offset) * (1 << clusterIDWidth) + i) & 0xfff000) >> 12); \ + minLatency += \ + (*(Memory + (counterId + offset) * (1 << clusterIDWidth) + i) & 0x000fff); \ + if (minLatency == 4095) \ + minLatency = 0; \ + } \ + } \ + } while (gcvFALSE) + +#define NumOfPerFrameBuf 16 +#define NumOfPerDrawBuf 128 + +struct _gcsAppInfoCounter { + gctUINT32 count[7]; +}; + +typedef struct _gcsAppInfoCounter gcsAppInfoCounter; + +typedef struct gcsCounterBuffer *gcsCounterBuffer_PTR; + +struct gcsCounterBuffer { + gctPOINTER counters; + gcsPROFILER_VIP_PROBE_COUNTERS *vipCounters; + gctHANDLE couterBufobj; + gctADDRESS probeAddress; + gctPOINTER logicalAddress; + gceCOUNTER_OPTYPE opType; + gctUINT32 opID; + gcsAppInfoCounter opCount; + gctUINT32 currentShaderId[6]; + gctUINT32 startPos; + gctUINT32 endPos; + gctUINT32 dataSize; + gctBOOL available; + gctBOOL needDump; + gcsCounterBuffer_PTR next; + gcsCounterBuffer_PTR prev; +}; + +typedef struct _gcoPROBE gcoPROBE; +struct _gcoPROBE { + gctUINT32 address; + gctUINT32 offset; +}; + +typedef struct _gcoMODULE gcoMODULE; +struct _gcoMODULE { + gctUINT32 name; + gctUINT32 address; + gctUINT32 numProbe; + gcoPROBE probe[256]; +}; + +typedef struct _gcoPROFILER *gcoPROFILER; + +struct _gcoPROFILER { + gctBOOL enable; + gctBOOL enablePrint; + gctBOOL disableProbe; + + gctBOOL vipProbe; + + gctFILE file; + gctCHAR *fileName; + gceProfilerMode profilerMode; + gceProbeMode probeMode; + + gcsCounterBuffer_PTR counterBuf; + gcsAppInfoCounter currentOpCount; + gctUINT32 bufferCount; + + gctBOOL perDrawMode; + gctBOOL needDump; + gctBOOL counterEnable; + + gceProfilerClient profilerClient; + + gctBOOL needBltDump; + gctBOOL isDummyDraw; + gctBOOL isDebugCounter; + gctUINT32 rdByte; + gctUINT32 wrByte; + gctUINT32 busyCycle; + + /*query some features from hw*/ + gctUINT32 coreCount; + gctUINT32 shaderCoreCount; + gctUINT32 clusterCount; + gctBOOL bHalti4; + gctBOOL psRenderPixelFix; + gctBOOL axiBus128bits; + gctBOOL bZDP3; +}; + +typedef struct _gcsPROBESTATES { + gceProbeStatus status; + gctADDRESS probeAddress; +} gcsPROBESTATES; + +typedef struct _gckPROFILER { + /* Enable profiling */ + gctBOOL profileEnable; + /* Profile mode */ + gceProfilerMode profileMode; + /* Probe mode */ + gceProbeMode probeMode; + /* Clear profile register or not*/ + gctBOOL profileCleanRegister; + /* Profile counter */ + gcsPROFILER_COUNTERS_PART1 latestProfiler_part1; + gcsPROFILER_COUNTERS_PART1 histroyProfiler_part1; + gcsPROFILER_COUNTERS_PART1 preProfiler_part1; + gcsPROFILER_COUNTERS_PART2 latestProfiler_part2; + gcsPROFILER_COUNTERS_PART2 histroyProfiler_part2; + gcsPROFILER_COUNTERS_PART2 preProfiler_part2; +} gckPROFILER; + +/* Construct a Profiler object per context. */ +gceSTATUS +gcoPROFILER_Construct(OUT gcoPROFILER *Profiler); + +gceSTATUS +gcoPROFILER_Destroy(IN gcoPROFILER Profiler); + +gceSTATUS +gcoPROFILER_Initialize(IN gcoPROFILER Profiler); + +gceSTATUS +gcoPROFILER_Enable(IN gcoPROFILER Profiler); + +gceSTATUS +gcoPROFILER_Disable(void); + +gceSTATUS +gcoPROFILER_EnableCounters(IN gcoPROFILER Profiler, + IN gceCOUNTER_OPTYPE operationType); + +gceSTATUS +gcoPROFILER_Start(IN gcoPROFILER Profiler); + +gceSTATUS +gcoPROFILER_End(IN gcoPROFILER Profiler, + IN gceCOUNTER_OPTYPE operationType, + IN gctUINT32 OpID); + +gceSTATUS +gcoPROFILER_Write(IN gcoPROFILER Profiler, + IN gctSIZE_T ByteCount, + IN gctCONST_POINTER Data); + +gceSTATUS +gcoPROFILER_Flush(IN gcoPROFILER Profiler); + +gceSTATUS +gcoPROFILER_GetProbeNumber(IN gcoHARDWARE Hardware, + OUT gctUINT32 *TotalProbeNumber); + +gctUINT32 +gcoPROFILER_getMuduleNum(IN gcoPROFILER Profiler); + +gctUINT32 +gcoPROFILER_getMuduleProbeNum(IN gcoPROFILER Profiler, IN gctUINT32 index); + +gctUINT32 +gcoPROFILER_getModuleAddress(IN gcoPROFILER Profiler, IN gctUINT32 ModuleIndex); + +gctUINT32 +gcoPROFILER_getProbeAddress(IN gcoPROFILER Profiler, + IN gctUINT32 ModuleIndex, + IN gctUINT32 ProbeIndex); + +gctUINT32 +gcoPROFILER_getHIIndex(IN gcoPROFILER Profiler); + +gctUINT32 +gcoPROFILER_getCounterBufferSize(IN gcoPROFILER Profiler); + +gceSTATUS +gcoPROFILER_WriteChipInfo(IN gcoPROFILER Profiler); + +gceSTATUS +gcoPROFILER_WriteClusterInfo(IN gcoPROFILER Profiler); + +gceSTATUS +gcoPROFILER_Reset(IN gcoPROFILER Profiler); + +gceSTATUS +gcoPROFILER_WriteCounters(IN gcoPROFILER Profiler); + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_profiler_h_ */ + + diff --git a/unified-tina/inc/HAL/gc_hal_raster.h b/unified-tina/inc/HAL/gc_hal_raster.h new file mode 100644 index 0000000..535584e --- /dev/null +++ b/unified-tina/inc/HAL/gc_hal_raster.h @@ -0,0 +1,883 @@ +/**************************************************************************** +* +* Copyright (c) 2005 - 2023 by Vivante Corp. All rights reserved. +* +* The material in this file is confidential and contains trade secrets +* of Vivante Corporation. This is proprietary information owned by +* Vivante Corporation. No part of this work may be disclosed, +* reproduced, copied, transmitted, or used in any way for any purpose, +* without the express written permission of Vivante Corporation. +* +*****************************************************************************/ + + +#ifndef __gc_hal_raster_h_ +#define __gc_hal_raster_h_ + +#include "gc_hal_enum.h" +#include "gc_hal_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/****************************************************************************** + ***************************** Object Declarations **************************** + ******************************************************************************/ + +typedef struct _gcoBRUSH *gcoBRUSH; +typedef struct _gcoBRUSH_CACHE *gcoBRUSH_CACHE; + +/****************************************************************************** + ******************************* gcoBRUSH Object ****************************** + ******************************************************************************/ + +/* Create a new solid color gcoBRUSH object. */ +gceSTATUS +gcoBRUSH_ConstructSingleColor(IN gcoHAL Hal, + IN gctUINT32 ColorConvert, + IN gctUINT32 Color, + IN gctUINT64 Mask, + gcoBRUSH *Brush); + +/* Create a new monochrome gcoBRUSH object. */ +gceSTATUS +gcoBRUSH_ConstructMonochrome(IN gcoHAL Hal, + IN gctUINT32 OriginX, + IN gctUINT32 OriginY, + IN gctUINT32 ColorConvert, + IN gctUINT32 FgColor, + IN gctUINT32 BgColor, + IN gctUINT64 Bits, + IN gctUINT64 Mask, + gcoBRUSH *Brush); + +/* Create a color gcoBRUSH object. */ +gceSTATUS +gcoBRUSH_ConstructColor(IN gcoHAL Hal, + IN gctUINT32 OriginX, + IN gctUINT32 OriginY, + IN gctPOINTER Address, + IN gceSURF_FORMAT Format, + IN gctUINT64 Mask, + gcoBRUSH *Brush); + +/* Destroy an gcoBRUSH object. */ +gceSTATUS +gcoBRUSH_Destroy(IN gcoBRUSH Brush); + +/****************************************************************************** + ******************************* gcoSURF Object ******************************* + ******************************************************************************/ + +/* Set cipping rectangle. */ +gceSTATUS +gcoSURF_SetClipping(IN gcoSURF Surface); + +/* Clear one or more rectangular areas. */ +gceSTATUS +gcoSURF_Clear2D(IN gcoSURF DestSurface, + IN gctUINT32 RectCount, + IN gcsRECT_PTR DestRect, + IN gctUINT32 LoColor, + IN gctUINT32 HiColor); + +/* Draw one or more Bresenham lines. */ +gceSTATUS +gcoSURF_Line(IN gcoSURF Surface, + IN gctUINT32 LineCount, + IN gcsRECT_PTR Position, + IN gcoBRUSH Brush, + IN gctUINT8 FgRop, + IN gctUINT8 BgRop); + +/* Generic rectangular blit. */ +gceSTATUS +gcoSURF_Blit(IN OPTIONAL gcoSURF SrcSurface, + IN gcoSURF DestSurface, + IN gctUINT32 RectCount, + IN OPTIONAL gcsRECT_PTR SrcRect, + IN gcsRECT_PTR DestRect, + IN OPTIONAL gcoBRUSH Brush, + IN gctUINT8 FgRop, + IN gctUINT8 BgRop, + IN OPTIONAL gceSURF_TRANSPARENCY Transparency, + IN OPTIONAL gctUINT32 TransparencyColor, + IN OPTIONAL gctPOINTER Mask, + IN OPTIONAL gceSURF_MONOPACK MaskPack); + +/* Monochrome blit. */ +gceSTATUS +gcoSURF_MonoBlit(IN gcoSURF DestSurface, + IN gctPOINTER Source, + IN gceSURF_MONOPACK SourcePack, + IN gcsPOINT_PTR SourceSize, + IN gcsPOINT_PTR SourceOrigin, + IN gcsRECT_PTR DestRect, + IN OPTIONAL gcoBRUSH Brush, + IN gctUINT8 FgRop, + IN gctUINT8 BgRop, + IN gctBOOL ColorConvert, + IN gctUINT8 MonoTransparency, + IN gceSURF_TRANSPARENCY Transparency, + IN gctUINT32 FgColor, + IN gctUINT32 BgColor); + +/* Filter blit. */ +gceSTATUS +gcoSURF_FilterBlit(IN gcoSURF SrcSurface, + IN gcoSURF DestSurface, + IN gcsRECT_PTR SrcRect, + IN gcsRECT_PTR DestRect, + IN gcsRECT_PTR DestSubRect); + +/* Enable alpha blending engine in the hardware and disengage the ROP engine. */ +gceSTATUS +gcoSURF_EnableAlphaBlend(IN gcoSURF Surface, + IN gctUINT8 SrcGlobalAlphaValue, + IN gctUINT8 DstGlobalAlphaValue, + IN gceSURF_PIXEL_ALPHA_MODE SrcAlphaMode, + IN gceSURF_PIXEL_ALPHA_MODE DstAlphaMode, + IN gceSURF_GLOBAL_ALPHA_MODE SrcGlobalAlphaMode, + IN gceSURF_GLOBAL_ALPHA_MODE DstGlobalAlphaMode, + IN gceSURF_BLEND_FACTOR_MODE SrcFactorMode, + IN gceSURF_BLEND_FACTOR_MODE DstFactorMode, + IN gceSURF_PIXEL_COLOR_MODE SrcColorMode, + IN gceSURF_PIXEL_COLOR_MODE DstColorMode); + +/* Disable alpha blending engine in the hardware and engage the ROP engine. */ +gceSTATUS +gcoSURF_DisableAlphaBlend(IN gcoSURF Surface); + +gceSTATUS +gcoSURF_SetDither(IN gcoSURF Surface, IN gctBOOL Dither); + +gceSTATUS +gcoSURF_Set2DSource(gcoSURF Surface, gceSURF_ROTATION Rotation); + +gceSTATUS +gcoSURF_Set2DTarget(gcoSURF Surface, gceSURF_ROTATION Rotation); + +/****************************************************************************** + ********************************* gco2D Object ******************************* + ******************************************************************************/ + +/* Construct a new gco2D object. */ +gceSTATUS +gco2D_Construct(IN gcoHAL Hal, OUT gco2D *Hardware); + +/* Destroy an gco2D object. */ +gceSTATUS +gco2D_Destroy(IN gco2D Hardware); + +/* Sets the maximum number of brushes in the brush cache. */ +gceSTATUS +gco2D_SetBrushLimit(IN gco2D Hardware, IN gctUINT MaxCount); + +/* Flush the brush. */ +gceSTATUS +gco2D_FlushBrush(IN gco2D Engine, IN gcoBRUSH Brush, IN gceSURF_FORMAT Format); + +/* Program the specified solid color brush. */ +gceSTATUS +gco2D_LoadSolidBrush(IN gco2D Engine, + IN gceSURF_FORMAT Format, + IN gctUINT32 ColorConvert, + IN gctUINT32 Color, + IN gctUINT64 Mask); + +gceSTATUS +gco2D_LoadMonochromeBrush(IN gco2D Engine, + IN gctUINT32 OriginX, + IN gctUINT32 OriginY, + IN gctUINT32 ColorConvert, + IN gctUINT32 FgColor, + IN gctUINT32 BgColor, + IN gctUINT64 Bits, + IN gctUINT64 Mask); + +gceSTATUS +gco2D_LoadColorBrush(IN gco2D Engine, + IN gctUINT32 OriginX, + IN gctUINT32 OriginY, + IN gctADDRESS Address, + IN gceSURF_FORMAT Format, + IN gctUINT64 Mask); + +/* Configure monochrome source. */ +gceSTATUS +gco2D_SetMonochromeSource(IN gco2D Engine, + IN gctBOOL ColorConvert, + IN gctUINT8 MonoTransparency, + IN gceSURF_MONOPACK DataPack, + IN gctBOOL CoordRelative, + IN gceSURF_TRANSPARENCY Transparency, + IN gctUINT32 FgColor, + IN gctUINT32 BgColor); + +/* Configure color source. */ +gceSTATUS +gco2D_SetColorSource(IN gco2D Engine, + IN gctADDRESS Address, + IN gctUINT32 Stride, + IN gceSURF_FORMAT Format, + IN gceSURF_ROTATION Rotation, + IN gctUINT32 SurfaceWidth, + IN gctBOOL CoordRelative, + IN gceSURF_TRANSPARENCY Transparency, + IN gctUINT32 TransparencyColor); + +/* Configure color source extension for full rotation. */ +gceSTATUS +gco2D_SetColorSourceEx(IN gco2D Engine, + IN gctADDRESS Address, + IN gctUINT32 Stride, + IN gceSURF_FORMAT Format, + IN gceSURF_ROTATION Rotation, + IN gctUINT32 SurfaceWidth, + IN gctUINT32 SurfaceHeight, + IN gctBOOL CoordRelative, + IN gceSURF_TRANSPARENCY Transparency, + IN gctUINT32 TransparencyColor); + +/* Same as gco2D_SetColorSourceEx, but with better 64bit SW-path support. + ** Please do NOT export the API now. + */ +gceSTATUS +gco2D_SetColorSource64(IN gco2D Engine, + IN gctADDRESS Address, + IN gctPOINTER Logical, + IN gctUINT32 Stride, + IN gceSURF_FORMAT Format, + IN gceSURF_ROTATION Rotation, + IN gctUINT32 SurfaceWidth, + IN gctUINT32 SurfaceHeight, + IN gctBOOL CoordRelative, + IN gceSURF_TRANSPARENCY Transparency, + IN gctUINT32 TransparencyColor); + +/* Configure color source. */ +gceSTATUS +gco2D_SetColorSourceAdvanced(IN gco2D Engine, + IN gctADDRESS Address, + IN gctUINT32 Stride, + IN gceSURF_FORMAT Format, + IN gceSURF_ROTATION Rotation, + IN gctUINT32 SurfaceWidth, + IN gctUINT32 SurfaceHeight, + IN gctBOOL CoordRelative); + +gceSTATUS +gco2D_SetColorSourceN(IN gco2D Engine, + IN gctADDRESS Address, + IN gctUINT32 Stride, + IN gceSURF_FORMAT Format, + IN gceSURF_ROTATION Rotation, + IN gctUINT32 SurfaceWidth, + IN gctUINT32 SurfaceHeight, + IN gctUINT32 SurfaceNumber); + +/* Configure masked color source. */ +gceSTATUS +gco2D_SetMaskedSource(IN gco2D Engine, + IN gctADDRESS Address, + IN gctUINT32 Stride, + IN gceSURF_FORMAT Format, + IN gctBOOL CoordRelative, + IN gceSURF_MONOPACK MaskPack); + +/* Configure masked color source extension for full rotation. */ +gceSTATUS +gco2D_SetMaskedSourceEx(IN gco2D Engine, + IN gctADDRESS Address, + IN gctUINT32 Stride, + IN gceSURF_FORMAT Format, + IN gctBOOL CoordRelative, + IN gceSURF_MONOPACK MaskPack, + IN gceSURF_ROTATION Rotation, + IN gctUINT32 SurfaceWidth, + IN gctUINT32 SurfaceHeight); + +/* Same as gco2D_SetMaskedSourceEx, but with better 64bit SW-path support. + ** Please do NOT export the API now. + */ +gceSTATUS +gco2D_SetMaskedSource64(IN gco2D Engine, + IN gctADDRESS Address, + IN gctPOINTER Logical, + IN gctUINT32 Stride, + IN gceSURF_FORMAT Format, + IN gctBOOL CoordRelative, + IN gceSURF_MONOPACK MaskPack, + IN gceSURF_ROTATION Rotation, + IN gctUINT32 SurfaceWidth, + IN gctUINT32 SurfaceHeight); + +/* Setup the source rectangle. */ +gceSTATUS +gco2D_SetSource(IN gco2D Engine, IN gcsRECT_PTR SrcRect); + +/* Set clipping rectangle. */ +gceSTATUS +gco2D_SetClipping(IN gco2D Engine, IN gcsRECT_PTR Rect); + +/* Configure destination. */ +gceSTATUS +gco2D_SetTarget(IN gco2D Engine, + IN gctADDRESS Address, + IN gctUINT32 Stride, + IN gceSURF_ROTATION Rotation, + IN gctUINT32 SurfaceWidth); + +/* Configure destination extension for full rotation. */ +gceSTATUS +gco2D_SetTargetEx(IN gco2D Engine, + IN gctADDRESS Address, + IN gctUINT32 Stride, + IN gceSURF_ROTATION Rotation, + IN gctUINT32 SurfaceWidth, + IN gctUINT32 SurfaceHeight); + +/* + * Same as gco2D_SetTargetEx, but with better 64bit SW-path support. + * Please do NOT export the API now. + */ +gceSTATUS +gco2D_SetTarget64(IN gco2D Engine, + IN gctADDRESS Address, + IN gctPOINTER Logical, + IN gctUINT32 Stride, + IN gceSURF_ROTATION Rotation, + IN gctUINT32 SurfaceWidth, + IN gctUINT32 SurfaceHeight); + +/* Calculate and program the stretch factors. */ +gceSTATUS +gco2D_CalcStretchFactor(IN gco2D Engine, + IN gctINT32 SrcSize, + IN gctINT32 DestSize, + OUT gctUINT32_PTR Factor); + +gceSTATUS +gco2D_SetStretchFactors(IN gco2D Engine, + IN gctUINT32 HorFactor, + IN gctUINT32 VerFactor); + +/* Calculate and program the stretch factors based on the rectangles. */ +gceSTATUS +gco2D_SetStretchRectFactors(IN gco2D Engine, + IN gcsRECT_PTR SrcRect, + IN gcsRECT_PTR DestRect); + +/* Create a new solid color gcoBRUSH object. */ +gceSTATUS +gco2D_ConstructSingleColorBrush(IN gco2D Engine, + IN gctUINT32 ColorConvert, + IN gctUINT32 Color, + IN gctUINT64 Mask, + gcoBRUSH *Brush); + +/* Create a new monochrome gcoBRUSH object. */ +gceSTATUS +gco2D_ConstructMonochromeBrush(IN gco2D Engine, + IN gctUINT32 OriginX, + IN gctUINT32 OriginY, + IN gctUINT32 ColorConvert, + IN gctUINT32 FgColor, + IN gctUINT32 BgColor, + IN gctUINT64 Bits, + IN gctUINT64 Mask, + gcoBRUSH *Brush); + +/* Create a color gcoBRUSH object. */ +gceSTATUS +gco2D_ConstructColorBrush(IN gco2D Engine, + IN gctUINT32 OriginX, + IN gctUINT32 OriginY, + IN gctPOINTER Address, + IN gceSURF_FORMAT Format, + IN gctUINT64 Mask, + gcoBRUSH *Brush); + +/* Clear one or more rectangular areas. */ +gceSTATUS +gco2D_Clear(IN gco2D Engine, + IN gctUINT32 RectCount, + IN gcsRECT_PTR Rect, + IN gctUINT32 Color32, + IN gctUINT8 FgRop, + IN gctUINT8 BgRop, + IN gceSURF_FORMAT DestFormat); + +/* Draw one or more Bresenham lines. */ +gceSTATUS +gco2D_Line(IN gco2D Engine, + IN gctUINT32 LineCount, + IN gcsRECT_PTR Position, + IN gcoBRUSH Brush, + IN gctUINT8 FgRop, + IN gctUINT8 BgRop, + IN gceSURF_FORMAT DestFormat); + +/* Draw one or more Bresenham lines based on the 32-bit color. */ +gceSTATUS +gco2D_ColorLine(IN gco2D Engine, + IN gctUINT32 LineCount, + IN gcsRECT_PTR Position, + IN gctUINT32 Color32, + IN gctUINT8 FgRop, + IN gctUINT8 BgRop, + IN gceSURF_FORMAT DestFormat); + +/* Generic blit. */ +gceSTATUS +gco2D_Blit(IN gco2D Engine, + IN gctUINT32 RectCount, + IN gcsRECT_PTR Rect, + IN gctUINT8 FgRop, + IN gctUINT8 BgRop, + IN gceSURF_FORMAT DestFormat); + +gceSTATUS +gco2D_Blend(IN gco2D Engine, + IN gctUINT32 SrcCount, + IN gctUINT32 RectCount, + IN gcsRECT_PTR Rect, + IN gctUINT8 FgRop, + IN gctUINT8 BgRop, + IN gceSURF_FORMAT DestFormat); + +/* Batch blit. */ +gceSTATUS +gco2D_BatchBlit(IN gco2D Engine, + IN gctUINT32 RectCount, + IN gcsRECT_PTR SrcRect, + IN gcsRECT_PTR DestRect, + IN gctUINT8 FgRop, + IN gctUINT8 BgRop, + IN gceSURF_FORMAT DestFormat); + +/* Stretch blit. */ +gceSTATUS +gco2D_StretchBlit(IN gco2D Engine, + IN gctUINT32 RectCount, + IN gcsRECT_PTR Rect, + IN gctUINT8 FgRop, + IN gctUINT8 BgRop, + IN gceSURF_FORMAT DestFormat); + +/* Monochrome blit. */ +gceSTATUS +gco2D_MonoBlit(IN gco2D Engine, + IN gctPOINTER StreamBits, + IN gcsPOINT_PTR StreamSize, + IN gcsRECT_PTR StreamRect, + IN gceSURF_MONOPACK SrcStreamPack, + IN gceSURF_MONOPACK DestStreamPack, + IN gcsRECT_PTR DestRect, + IN gctUINT32 FgRop, + IN gctUINT32 BgRop, + IN gceSURF_FORMAT DestFormat); + +gceSTATUS +gco2D_MonoBlitEx(IN gco2D Engine, + IN gctPOINTER StreamBits, + IN gctINT32 StreamStride, + IN gctINT32 StreamWidth, + IN gctINT32 StreamHeight, + IN gctINT32 StreamX, + IN gctINT32 StreamY, + IN gctUINT32 FgColor, + IN gctUINT32 BgColor, + IN gcsRECT_PTR SrcRect, + IN gcsRECT_PTR DstRect, + IN gctUINT8 FgRop, + IN gctUINT8 BgRop); + +/* Set kernel size. */ +gceSTATUS +gco2D_SetKernelSize(IN gco2D Engine, + IN gctUINT8 HorKernelSize, + IN gctUINT8 VerKernelSize); + +/* Set filter type. */ +gceSTATUS +gco2D_SetFilterType(IN gco2D Engine, IN gceFILTER_TYPE FilterType); + +/* Set the filter kernel by user. */ +gceSTATUS +gco2D_SetUserFilterKernel(IN gco2D Engine, + IN gceFILTER_PASS_TYPE PassType, + IN gctUINT16_PTR KernelArray); + +/* Select the pass(es) to be done for user defined filter. */ +gceSTATUS +gco2D_EnableUserFilterPasses(IN gco2D Engine, IN gctBOOL HorPass, IN gctBOOL VerPass); + +/* Frees the temporary buffer allocated by filter blit operation. */ +gceSTATUS +gco2D_FreeFilterBuffer(IN gco2D Engine); + +/* Filter blit. */ +gceSTATUS +gco2D_FilterBlit(IN gco2D Engine, + IN gctADDRESS SrcAddress, + IN gctUINT SrcStride, + IN gctADDRESS SrcUAddress, + IN gctUINT SrcUStride, + IN gctADDRESS SrcVAddress, + IN gctUINT SrcVStride, + IN gceSURF_FORMAT SrcFormat, + IN gceSURF_ROTATION SrcRotation, + IN gctUINT32 SrcSurfaceWidth, + IN gcsRECT_PTR SrcRect, + IN gctADDRESS DestAddress, + IN gctUINT DestStride, + IN gceSURF_FORMAT DestFormat, + IN gceSURF_ROTATION DestRotation, + IN gctUINT32 DestSurfaceWidth, + IN gcsRECT_PTR DestRect, + IN gcsRECT_PTR DestSubRect); + +/* Filter blit extension for full rotation. */ +gceSTATUS +gco2D_FilterBlitEx(IN gco2D Engine, + IN gctADDRESS SrcAddress, + IN gctUINT SrcStride, + IN gctADDRESS SrcUAddress, + IN gctUINT SrcUStride, + IN gctADDRESS SrcVAddress, + IN gctUINT SrcVStride, + IN gceSURF_FORMAT SrcFormat, + IN gceSURF_ROTATION SrcRotation, + IN gctUINT32 SrcSurfaceWidth, + IN gctUINT32 SrcSurfaceHeight, + IN gcsRECT_PTR SrcRect, + IN gctADDRESS DestAddress, + IN gctUINT DestStride, + IN gceSURF_FORMAT DestFormat, + IN gceSURF_ROTATION DestRotation, + IN gctUINT32 DestSurfaceWidth, + IN gctUINT32 DestSurfaceHeight, + IN gcsRECT_PTR DestRect, + IN gcsRECT_PTR DestSubRect); + +gceSTATUS +gco2D_FilterBlitEx2(IN gco2D Engine, + IN gctADDRESS *SrcAddresses, + IN gctUINT32 SrcAddressNum, + IN gctUINT32_PTR SrcStrides, + IN gctUINT32 SrcStrideNum, + IN gceTILING SrcTiling, + IN gceSURF_FORMAT SrcFormat, + IN gceSURF_ROTATION SrcRotation, + IN gctUINT32 SrcSurfaceWidth, + IN gctUINT32 SrcSurfaceHeight, + IN gcsRECT_PTR SrcRect, + IN gctADDRESS *DestAddresses, + IN gctUINT32 DestAddressNum, + IN gctUINT32_PTR DestStrides, + IN gctUINT32 DestStrideNum, + IN gceTILING DestTiling, + IN gceSURF_FORMAT DestFormat, + IN gceSURF_ROTATION DestRotation, + IN gctUINT32 DestSurfaceWidth, + IN gctUINT32 DestSurfaceHeight, + IN gcsRECT_PTR DestRect, + IN gcsRECT_PTR DestSubRect); + +/* Enable alpha blending engine in the hardware and disengage the ROP engine. */ +gceSTATUS +gco2D_EnableAlphaBlend(IN gco2D Engine, + IN gctUINT8 SrcGlobalAlphaValue, + IN gctUINT8 DstGlobalAlphaValue, + IN gceSURF_PIXEL_ALPHA_MODE SrcAlphaMode, + IN gceSURF_PIXEL_ALPHA_MODE DstAlphaMode, + IN gceSURF_GLOBAL_ALPHA_MODE SrcGlobalAlphaMode, + IN gceSURF_GLOBAL_ALPHA_MODE DstGlobalAlphaMode, + IN gceSURF_BLEND_FACTOR_MODE SrcFactorMode, + IN gceSURF_BLEND_FACTOR_MODE DstFactorMode, + IN gceSURF_PIXEL_COLOR_MODE SrcColorMode, + IN gceSURF_PIXEL_COLOR_MODE DstColorMode); + +/* Enable alpha blending engine in the hardware. */ +gceSTATUS +gco2D_EnableAlphaBlendAdvanced(IN gco2D Engine, + IN gceSURF_PIXEL_ALPHA_MODE SrcAlphaMode, + IN gceSURF_PIXEL_ALPHA_MODE DstAlphaMode, + IN gceSURF_GLOBAL_ALPHA_MODE SrcGlobalAlphaMode, + IN gceSURF_GLOBAL_ALPHA_MODE DstGlobalAlphaMode, + IN gceSURF_BLEND_FACTOR_MODE SrcFactorMode, + IN gceSURF_BLEND_FACTOR_MODE DstFactorMode); + +/* Enable alpha blending engine with Porter Duff rule. */ +gceSTATUS +gco2D_SetPorterDuffBlending(IN gco2D Engine, IN gce2D_PORTER_DUFF_RULE Rule); + +/* Disable alpha blending engine in the hardware and engage the ROP engine. */ +gceSTATUS +gco2D_DisableAlphaBlend(IN gco2D Engine); + +/* Retrieve the maximum number of 32-bit data chunks for a single DE command. */ +gctUINT32 +gco2D_GetMaximumDataCount(void); + +/* Retrieve the maximum number of rectangles, that can be passed in a single DE command. */ +gctUINT32 +gco2D_GetMaximumRectCount(void); + +/* Returns the pixel alignment of the surface. */ +gceSTATUS +gco2D_GetPixelAlignment(gceSURF_FORMAT Format, gcsPOINT_PTR Alignment); + +/* Retrieve monochrome stream pack size. */ +gceSTATUS +gco2D_GetPackSize(IN gceSURF_MONOPACK StreamPack, + OUT gctUINT32 *PackWidth, + OUT gctUINT32 *PackHeight); + +/* Flush the 2D pipeline. */ +gceSTATUS +gco2D_Flush(IN gco2D Engine); + +/* Load 256-entry color table for INDEX8 source surfaces. */ +gceSTATUS +gco2D_LoadPalette(IN gco2D Engine, + IN gctUINT FirstIndex, + IN gctUINT IndexCount, + IN gctPOINTER ColorTable, + IN gctBOOL ColorConvert); + +/* Enable/disable 2D BitBlt mirrorring. */ +gceSTATUS +gco2D_SetBitBlitMirror(IN gco2D Engine, + IN gctBOOL HorizontalMirror, + IN gctBOOL VerticalMirror); + +/* + * Set the transparency for source, destination and pattern. + * It also enable or disable the DFB color key mode. + */ +gceSTATUS +gco2D_SetTransparencyAdvancedEx(IN gco2D Engine, + IN gce2D_TRANSPARENCY SrcTransparency, + IN gce2D_TRANSPARENCY DstTransparency, + IN gce2D_TRANSPARENCY PatTransparency, + IN gctBOOL EnableDFBColorKeyMode); + +/* Set the transparency for source, destination and pattern. */ +gceSTATUS +gco2D_SetTransparencyAdvanced(IN gco2D Engine, + IN gce2D_TRANSPARENCY SrcTransparency, + IN gce2D_TRANSPARENCY DstTransparency, + IN gce2D_TRANSPARENCY PatTransparency); + +/* Set the source color key. */ +gceSTATUS +gco2D_SetSourceColorKeyAdvanced(IN gco2D Engine, IN gctUINT32 ColorKey); + +/* Set the source color key range. */ +gceSTATUS +gco2D_SetSourceColorKeyRangeAdvanced(IN gco2D Engine, + IN gctUINT32 ColorKeyLow, + IN gctUINT32 ColorKeyHigh); + +/* Set the target color key. */ +gceSTATUS +gco2D_SetTargetColorKeyAdvanced(IN gco2D Engine, IN gctUINT32 ColorKey); + +/* Set the target color key range. */ +gceSTATUS +gco2D_SetTargetColorKeyRangeAdvanced(IN gco2D Engine, + IN gctUINT32 ColorKeyLow, + IN gctUINT32 ColorKeyHigh); + +/* Set the YUV color space mode. */ +gceSTATUS +gco2D_SetYUVColorMode(IN gco2D Engine, IN gce2D_YUV_COLOR_MODE Mode); + +/* Setup the source global color value in ARGB8 format. */ +gceSTATUS +gco2D_SetSourceGlobalColorAdvanced(IN gco2D Engine, IN gctUINT32 Color32); + +/* Setup the target global color value in ARGB8 format. */ +gceSTATUS +gco2D_SetTargetGlobalColorAdvanced(IN gco2D Engine, IN gctUINT32 Color32); + +/* Setup the source and target pixel multiply modes. */ +gceSTATUS +gco2D_SetPixelMultiplyModeAdvanced(IN gco2D Engine, + IN gce2D_PIXEL_COLOR_MULTIPLY_MODE SrcPremultiplySrcAlpha, + IN gce2D_PIXEL_COLOR_MULTIPLY_MODE DstPremultiplyDstAlpha, + IN gce2D_GLOBAL_COLOR_MULTIPLY_MODE SrcPremultiplyGlobalMode, + IN gce2D_PIXEL_COLOR_MULTIPLY_MODE DstDemultiplyDstAlpha); + +/* Set the GPU clock cycles after which the idle engine will keep auto-flushing. */ +gceSTATUS +gco2D_SetAutoFlushCycles(IN gco2D Engine, IN gctUINT32 Cycles); + +#if VIVANTE_PROFILER +/* + * Read the profile registers available in the 2D engine and sets them in the profile. + * The function will also reset the pixelsRendered counter every time. + */ +gceSTATUS +gco2D_ProfileEngine(IN gco2D Engine, OPTIONAL gcs2D_PROFILE_PTR Profile); +#endif + +/* Enable or disable 2D dithering. */ +gceSTATUS +gco2D_EnableDither(IN gco2D Engine, IN gctBOOL Enable); + +gceSTATUS +gco2D_SetGenericSource(IN gco2D Engine, + IN gctADDRESS *Addresses, + IN gctUINT32 AddressNum, + IN gctUINT32_PTR Strides, + IN gctUINT32 StrideNum, + IN gceTILING Tiling, + IN gceSURF_FORMAT Format, + IN gceSURF_ROTATION Rotation, + IN gctUINT32 SurfaceWidth, + IN gctUINT32 SurfaceHeight); + +gceSTATUS +gco2D_SetGenericTarget(IN gco2D Engine, + IN gctADDRESS *Addresses, + IN gctUINT32 AddressNum, + IN gctUINT32_PTR Strides, + IN gctUINT32 StrideNum, + IN gceTILING Tiling, + IN gceSURF_FORMAT Format, + IN gceSURF_ROTATION Rotation, + IN gctUINT32 SurfaceWidth, + IN gctUINT32 SurfaceHeight); + +gceSTATUS +gco2D_SetCurrentSourceIndex(IN gco2D Engine, IN gctUINT32 SrcIndex); + +gceSTATUS +gco2D_MultiSourceBlit(IN gco2D Engine, + IN gctUINT32 SourceMask, + IN gcsRECT_PTR DestRect, + IN gctUINT32 RectCount); + +gceSTATUS +gco2D_SetROP(IN gco2D Engine, IN gctUINT8 FgRop, IN gctUINT8 BgRop); + +gceSTATUS +gco2D_SetGdiStretchMode(IN gco2D Engine, IN gctBOOL Enable); + +gceSTATUS +gco2D_SetSourceTileStatus(IN gco2D Engine, + IN gce2D_TILE_STATUS_CONFIG TSControl, + IN gceSURF_FORMAT CompressedFormat, + IN gctUINT32 ClearValue, + IN gctADDRESS GpuAddress); + +gceSTATUS +gco2D_SetTargetTileStatus(IN gco2D Engine, + IN gce2D_TILE_STATUS_CONFIG TileStatusConfig, + IN gceSURF_FORMAT CompressedFormat, + IN gctUINT32 ClearValue, + IN gctADDRESS GpuAddress); + +gceSTATUS +gco2D_SetSourceCacheMode(IN gco2D Engine, IN gceCACHE_MODE CacheMode); + +gceSTATUS +gco2D_SetTargetCacheMode(IN gco2D Engine, IN gceCACHE_MODE CacheMode); + +gceSTATUS +gco2D_QueryU32(IN gco2D Engine, IN gce2D_QUERY Item, OUT gctUINT32_PTR Value); + +gceSTATUS +gco2D_SetStateU32(IN gco2D Engine, IN gce2D_STATE State, IN gctUINT32 Value); + +gceSTATUS +gco2D_SetStateArrayI32(IN gco2D Engine, + IN gce2D_STATE State, + IN gctINT32_PTR Array, + IN gctINT32 ArraySize); + +gceSTATUS +gco2D_SetStateArrayU32(IN gco2D Engine, + IN gce2D_STATE State, + IN gctUINT32_PTR Array, + IN gctINT32 ArraySize); + +gceSTATUS +gco2D_SetTargetRect(IN gco2D Engine, IN gcsRECT_PTR Rect); + +gceSTATUS +gco2D_Set2DEngine(IN gco2D Engine); + +gceSTATUS +gco2D_UnSet2DEngine(IN gco2D Engine); + +gceSTATUS +gco2D_Get2DEngine(OUT gco2D *Engine); + +gceSTATUS +gco2D_Commit(IN gco2D Engine, IN gctBOOL Stall); + +gceSTATUS +gco2D_NatureRotateTranslation(IN gctBOOL IsSrcRot, + IN gce2D_NATURE_ROTATION NatureRotation, + IN gctINT32 SrcSurfaceWidth, + IN gctINT32 SrcSurfaceHeight, + IN gctINT32 DstSurfaceWidth, + IN gctINT32 DstSurfaceHeight, + IN OUT gcsRECT_PTR SrcRect, + IN OUT gcsRECT_PTR DstRect, + OUT gceSURF_ROTATION *SrcRotation, + OUT gceSURF_ROTATION *DstRotation); + +/* Set source endian mode. */ +gceSTATUS +gco2D_SetSourceEndianMode(IN gco2D Engine, IN gceENDIAN_MODE eEndianMode); + +/* Set target endian mode. */ +gceSTATUS +gco2D_SetTargetEndianMode(IN gco2D Engine, IN gceENDIAN_MODE eEndianMode); + +gceSTATUS +gco2D_GetActiveCoreIndex(IN gco2D Engine, OUT gctUINT32 *ActiveCoreIndex); + +gceSTATUS +gco2D_SetActiveCoreIndex(IN gco2D Engine, IN gctUINT32 ActiveCoreIndex); + +gceSTATUS +gco2D_SetMeanValue(IN gco2D Engine, + IN gctINT32 R, + IN gctINT32 G, + IN gctINT32 B); + +gceSTATUS +gco2D_SetStdRerciprocal(IN gco2D Engine, + IN gctINT32 R, + IN gctINT32 G, + IN gctINT32 B); + +gceSTATUS +gco2D_SetInitError(IN gco2D Engine, + IN gctBOOL GDIStretch, + IN gctUINT currentSrcIndex, + IN OUT gcsRECT_PTR SplitSrcRectL, + IN OUT gcsRECT_PTR SplitSrcRectR, + IN OUT gcsRECT_PTR SplitDstRectL, + IN OUT gcsRECT_PTR SplitDstRectR); + +gceSTATUS +gco2D_SetScaleFactor(IN gco2D Engine, IN gctUINT32 ScaleFactor); + +gceSTATUS +gco2D_SetState(IN gco2D Engine, IN gcs2D_STATE_CONFIG Config); + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_raster_h_ */ + + diff --git a/unified-tina/inc/HAL/gc_hal_statistics.h b/unified-tina/inc/HAL/gc_hal_statistics.h new file mode 100644 index 0000000..4f54ee8 --- /dev/null +++ b/unified-tina/inc/HAL/gc_hal_statistics.h @@ -0,0 +1,73 @@ +/**************************************************************************** +* +* Copyright (c) 2005 - 2023 by Vivante Corp. All rights reserved. +* +* The material in this file is confidential and contains trade secrets +* of Vivante Corporation. This is proprietary information owned by +* Vivante Corporation. No part of this work may be disclosed, +* reproduced, copied, transmitted, or used in any way for any purpose, +* without the express written permission of Vivante Corporation. +* +*****************************************************************************/ + + +#ifndef __gc_hal_statistics_h_ +#define __gc_hal_statistics_h_ + +#define VIV_STAT_ENABLE_STATISTICS 0 + +/* Toal number of frames for which the frame time is accounted. We have storage + * to keep frame times for last this many frames. + */ +#define VIV_STAT_FRAME_BUFFER_SIZE 30 + +/* + * Total number of frames sampled for a mode. This means + * + * # of frames for HZ Current : VIV_STAT_EARLY_Z_SAMPLE_FRAMES + * # of frames for HZ Switched : VIV_STAT_EARLY_Z_SAMPLE_FRAMES + * + + * -------------------------------------------------------- + * : (2 * VIV_STAT_EARLY_Z_SAMPLE_FRAMES) frames needed + * + * IMPORTANT: This total must be smaller than VIV_STAT_FRAME_BUFFER_SIZE + */ +#define VIV_STAT_EARLY_Z_SAMPLE_FRAMES 7 +#define VIV_STAT_EARLY_Z_LATENCY_FRAMES 2 + +/* + * Multiplication factor for previous Hz off mode. + * Make it more than 1.0 to advertise HZ on. + */ +#define VIV_STAT_EARLY_Z_FACTOR (1.05f) + +/* HAL statistics information. */ +typedef struct _gcsSTATISTICS_EARLYZ { + gctUINT switchBackCount; + gctUINT nextCheckPoint; + gctBOOL disabled; +} gcsSTATISTICS_EARLYZ; + +/* HAL statistics information. */ +typedef struct _gcsSTATISTICS { + gctUINT64 frameTime[VIV_STAT_FRAME_BUFFER_SIZE]; + gctUINT64 previousFrameTime; + gctUINT frame; + gcsSTATISTICS_EARLYZ earlyZ; +} gcsSTATISTICS; + +/* Add a frame based data into current statistics. */ +void +gcfSTATISTICS_AddData(IN gceSTATISTICS Key, IN gctUINT Value); + +/* Marks the frame end and triggers statistical calculations and decisions.*/ +void +gcfSTATISTICS_MarkFrameEnd(void); + +/* Sets whether the dynamic HZ is disabled or not .*/ +void +gcfSTATISTICS_DisableDynamicEarlyZ(IN gctBOOL Disabled); + +#endif /*__gc_hal_statistics_h_ */ + + diff --git a/unified-tina/inc/HAL/gc_hal_types.h b/unified-tina/inc/HAL/gc_hal_types.h new file mode 100644 index 0000000..05a2d06 --- /dev/null +++ b/unified-tina/inc/HAL/gc_hal_types.h @@ -0,0 +1,18 @@ +/**************************************************************************** +* +* Copyright (c) 2005 - 2023 by Vivante Corp. All rights reserved. +* +* The material in this file is confidential and contains trade secrets +* of Vivante Corporation. This is proprietary information owned by +* Vivante Corporation. No part of this work may be disclosed, +* reproduced, copied, transmitted, or used in any way for any purpose, +* without the express written permission of Vivante Corporation. +* +*****************************************************************************/ + + +#include "gc_hal_version.h" +#include "gc_hal_options.h" +#include "shared/gc_hal_types_shared.h" + + diff --git a/unified-tina/inc/HAL/gc_hal_version.h b/unified-tina/inc/HAL/gc_hal_version.h new file mode 100644 index 0000000..399bf8b --- /dev/null +++ b/unified-tina/inc/HAL/gc_hal_version.h @@ -0,0 +1,29 @@ +/**************************************************************************** +* +* Copyright (c) 2005 - 2023 by Vivante Corp. All rights reserved. +* +* The material in this file is confidential and contains trade secrets +* of Vivante Corporation. This is proprietary information owned by +* Vivante Corporation. No part of this work may be disclosed, +* reproduced, copied, transmitted, or used in any way for any purpose, +* without the express written permission of Vivante Corporation. +* +*****************************************************************************/ + + +#ifndef __gc_hal_version_h_ +#define __gc_hal_version_h_ + +#define gcvVERSION_MAJOR 6 + +#define gcvVERSION_MINOR 4 + +#define gcvVERSION_PATCH 15 + +#define gcvVERSION_BUILD 690884 + +#define gcvVERSION_STRING "6.4.15.3.690884" + +#endif /* __gc_hal_version_h_ */ + + diff --git a/unified-tina/inc/HAL/shared/gc_hal_base_shared.h b/unified-tina/inc/HAL/shared/gc_hal_base_shared.h new file mode 100644 index 0000000..42b91a0 --- /dev/null +++ b/unified-tina/inc/HAL/shared/gc_hal_base_shared.h @@ -0,0 +1,45 @@ +/**************************************************************************** +* +* Copyright (c) 2005 - 2023 by Vivante Corp. All rights reserved. +* +* The material in this file is confidential and contains trade secrets +* of Vivante Corporation. This is proprietary information owned by +* Vivante Corporation. No part of this work may be disclosed, +* reproduced, copied, transmitted, or used in any way for any purpose, +* without the express written permission of Vivante Corporation. +* +*****************************************************************************/ + + +#ifndef __gc_hal_base_shared_h_ +#define __gc_hal_base_shared_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define gcdBINARY_TRACE_MESSAGE_SIZE 240 + +typedef struct _gcsBINARY_TRACE_MESSAGE *gcsBINARY_TRACE_MESSAGE_PTR; +typedef struct _gcsBINARY_TRACE_MESSAGE { + gctUINT32 signature; + gctUINT32 pid; + gctUINT32 tid; + gctUINT32 line; + gctUINT32 numArguments; + gctUINT8 payload; +} gcsBINARY_TRACE_MESSAGE; + +/* gcsOBJECT object defintinon. */ +typedef struct _gcsOBJECT { + /* Type of an object. */ + gceOBJECT_TYPE type; +} gcsOBJECT; + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_base_shared_h_ */ + + diff --git a/unified-tina/inc/HAL/shared/gc_hal_driver_shared.h b/unified-tina/inc/HAL/shared/gc_hal_driver_shared.h new file mode 100644 index 0000000..a9e994d --- /dev/null +++ b/unified-tina/inc/HAL/shared/gc_hal_driver_shared.h @@ -0,0 +1,1331 @@ +/**************************************************************************** +* +* Copyright (c) 2005 - 2023 by Vivante Corp. All rights reserved. +* +* The material in this file is confidential and contains trade secrets +* of Vivante Corporation. This is proprietary information owned by +* Vivante Corporation. No part of this work may be disclosed, +* reproduced, copied, transmitted, or used in any way for any purpose, +* without the express written permission of Vivante Corporation. +* +*****************************************************************************/ + + +#ifndef __gc_hal_driver_shared_h_ +#define __gc_hal_driver_shared_h_ + +#include "gc_hal_enum_shared.h" +#include "gc_hal_types_shared.h" + + +#if defined(__QNXNTO__) +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* the DC instruction will cause segfault when disable writebufer and cache.*/ +#ifndef gcdSKIP_ARM_DC_INSTRUCTION +#define gcdSKIP_ARM_DC_INSTRUCTION 0 +#endif + +/* The number of context buffers per user. */ +#if gcdCAPTURE_ONLY_MODE +#define gcdCONTEXT_BUFFER_COUNT 1 +#else +#define gcdCONTEXT_BUFFER_COUNT 2 +#endif + +#define gcdRENDER_FENCE_LENGTH (6 * gcmSIZEOF(gctUINT32)) +#define gcdBLT_FENCE_LENGTH (10 * gcmSIZEOF(gctUINT32)) + +/****************************************************************************** + ****************************** I/O Control Codes ***************************** + ******************************************************************************/ + +#define gcvHAL_CLASS "galcore" +#define IOCTL_GCHAL_INTERFACE 30000 +#define IOCTL_GCHAL_PROFILER_INTERFACE 30001 +#define IOCTL_GCHAL_TERMINATE 30002 + +/****************************************************************************** + ***************************** Interface Structure **************************** + ******************************************************************************/ + +#define gcdMAX_PROFILE_FILE_NAME 128 +#define gcdMAX_FLAT_MAPPING_COUNT 8 + +/* gcvHAL_CHIP_INFO */ +typedef struct _gcsHAL_CHIP_INFO { + /* Chip count. */ + OUT gctUINT16 count; + + /* Chip types. */ + OUT gctUINT8 types[gcdGLOBAL_CORE_COUNT]; + + /* Chip IDs. */ + OUT gctUINT8 ids[gcdGLOBAL_CORE_COUNT]; + + OUT gctUINT16 coreIndexs[gcdGLOBAL_CORE_COUNT]; + + /* Hardware device ID. */ + OUT gctUINT8 hwDevIDs[gcdGLOBAL_CORE_COUNT]; +} gcsHAL_CHIP_INFO; + +/* gcvHAL_VERSION */ +typedef struct _gcsHAL_VERSION { + /* version: ... */ + OUT gctINT32 major; + OUT gctINT32 minor; + OUT gctINT32 patch; + + /* Build version. */ + OUT gctUINT32 build; +} gcsHAL_VERSION; + +/* gcvHAL_SET_TIMEOUT. */ +typedef struct _gcsHAL_SET_TIMEOUT { + gctUINT32 timeOut; +} gcsHAL_SET_TIMEOUT; + +/* gcvHAL_QUERY_VIDEO_MEMORY */ +typedef struct _gcsHAL_QUERY_VIDEO_MEMORY { + /* Physical memory address of internal memory. Just a name. */ + OUT gctUINT32 internalPhysName; + /* Size in bytes of internal memory. */ + OUT gctUINT64 internalSize; + + /* Physical memory address of external memory. Just a name. */ + OUT gctUINT32 externalPhysName; + /* Size in bytes of external memory.*/ + OUT gctUINT64 externalSize; + + /* Physical memory address of contiguous memory. Just a name. */ + OUT gctUINT32 contiguousPhysName; + /* Size in bytes of contiguous memory.*/ + OUT gctUINT64 contiguousSize; + + /* Physical memory address of exclusive memory. Just a name. */ + OUT gctUINT32 exclusivePhysName; + /* Size in bytes of exclusive memory.*/ + OUT gctUINT64 exclusiveSize; + + /* If the virtual pool can be an available video memory pool. */ + OUT gctBOOL virtualPoolEnabled; +} gcsHAL_QUERY_VIDEO_MEMORY; + +/* gcvHAL_QUERY_CHIP_IDENTITY */ +typedef struct _gcsHAL_QUERY_CHIP_IDENTITY *gcsHAL_QUERY_CHIP_IDENTITY_PTR; +typedef struct _gcsHAL_QUERY_CHIP_IDENTITY { + /* Chip model. */ + gceCHIPMODEL chipModel; + + /* Revision value.*/ + gctUINT32 chipRevision; + + /* Chip date. */ + gctUINT32 chipDate; + + /* Supported feature fields. */ + gctUINT32 chipFeatures; + + /* Supported minor feature fields. */ + gctUINT32 chipMinorFeatures; + + /* Supported minor feature 1 fields. */ + gctUINT32 chipMinorFeatures1; + + /* Supported minor feature 2 fields. */ + gctUINT32 chipMinorFeatures2; + + /* Supported minor feature 3 fields. */ + gctUINT32 chipMinorFeatures3; + + /* Supported minor feature 4 fields. */ + gctUINT32 chipMinorFeatures4; + + /* Supported minor feature 5 fields. */ + gctUINT32 chipMinorFeatures5; + + /* Supported minor feature 6 fields. */ + gctUINT32 chipMinorFeatures6; + + /* Number of streams supported. */ + gctUINT32 streamCount; + + /* Number of pixel pipes. */ + gctUINT32 pixelPipes; + + /* Number of resolve pipes. */ + gctUINT32 resolvePipes; + + /* Number of instructions. */ + gctUINT32 instructionCount; + + /* Number of PS instructions. */ + gctUINT32 PSInstructionCount; + + /* Number of constants. */ + gctUINT32 numConstants; + + /* Number of varyings */ + gctUINT32 varyingsCount; + + /* Number of 3D GPUs */ + gctUINT32 gpuCoreCount; + + /* Physical mask of all AVAILABLE clusters in core.*/ + gctUINT32 clusterAvailMask; + + /* Product ID */ + gctUINT32 productID; + + /* Special chip flag bits */ + gceCHIP_FLAG chipFlags; + + /* ECO ID. */ + gctUINT32 ecoID; + + /* Customer ID. */ + gctUINT32 customerID; + + /* CPU view physical address and size of SRAMs. */ + gctUINT64 sRAMBases[gcvSRAM_INTER_COUNT]; + gctUINT32 sRAMSizes[gcvSRAM_INTER_COUNT]; + + gctUINT64 platformFlagBits; + + /* APB register offset. */ + gctUINT64 registerAPB; + + /* Enabled NN clusters number. */ + gctUINT32 nnClusterNum; + + /* Virtual address bits. */ + gctUINT32 virtualAddressBits; + + gctUINT32 chipConfig; +} gcsHAL_QUERY_CHIP_IDENTITY; + +/* gcvHAL_QUERY_CHIP_OPTION. */ +typedef struct _gcsHAL_QUERY_CHIP_OPTIONS *gcsHAL_QUERY_CHIP_OPTIONS_PTR; +typedef struct _gcsHAL_QUERY_CHIP_OPTIONS { + gctBOOL gpuProfiler; + gctBOOL allowFastClear; + gctBOOL powerManagement; + /* + * Whether use new MMU. It is meaningless + * for old MMU since old MMU is always enabled. + */ + gctBOOL enableMMU; + gceCOMPRESSION_OPTION allowCompression; + gctBOOL smallBatch; + gctUINT32 uscL1CacheRatio; + gctUINT32 uscAttribCacheRatio; + gctUINT32 userClusterMask; + gctUINT32 userClusterMasks[gcdMAX_MAJOR_CORE_COUNT]; + + /* Internal SRAM. */ + gctADDRESS sRAMGPUVirtAddrs[gcvSRAM_INTER_COUNT]; + gctUINT32 sRAMSizes[gcvSRAM_INTER_COUNT]; + gctUINT32 sRAMCount; + + /* External SRAM. */ + gctPHYS_ADDR_T extSRAMCPUPhysAddrs[gcvSRAM_EXT_COUNT]; + gctPHYS_ADDR_T extSRAMGPUPhysAddrs[gcvSRAM_EXT_COUNT]; + gctADDRESS extSRAMGPUVirtAddrs[gcvSRAM_EXT_COUNT]; + gctUINT32 extSRAMGPUPhysNames[gcvSRAM_EXT_COUNT]; + gctUINT32 extSRAMSizes[gcvSRAM_EXT_COUNT]; + gctUINT32 extSRAMCount; + + gceSECURE_MODE secureMode; + + gctBOOL hasShader; + + /* NN clusters power control. */ + gctUINT32 enableNNClusters; + gctUINT32 configNNPowerControl; + /* Active NN core count. */ + gctUINT32 activeNNCoreCount; + + /* Only represents system reserved memory pool currently. */ + gctUINT32 vidMemCount; +} gcsHAL_QUERY_CHIP_OPTIONS; + +/* gcvHAL_QUERY_CHIP_FREQUENCY. */ +typedef struct _gcsHAL_QUERY_CHIP_FREQUENCY *gcsHAL_QUERY_CHIP_FREQUENCY_PTR; +typedef struct _gcsHAL_QUERY_CHIP_FREQUENCY { + OUT gctUINT64 mcClk; + OUT gctUINT64 shClk; +} gcsHAL_QUERY_CHIP_FREQUENCY; + +/* Obsolete for userpace. */ +/* gcvHAL_ALLOCATE_NON_PAGED_MEMORY */ +typedef struct _gcsHAL_ALLOCATE_NON_PAGED_MEMORY { + /* Allocation flags. */ + IN gctUINT32 flags; + + /* Number of bytes to allocate. */ + IN OUT gctUINT64 bytes; + + /* Physical address of allocation. Just a name. */ + OUT gctUINT32 physName; + + /* Logical address of allocation. */ + OUT gctUINT64 logical; +} gcsHAL_ALLOCATE_NON_PAGED_MEMORY; + +/* Obsolete for userpace. */ +/* gcvHAL_FREE_NON_PAGED_MEMORY */ +typedef struct _gcsHAL_FREE_NON_PAGED_MEMORY { + /* Number of bytes allocated. */ + IN gctUINT64 bytes; + + /* Physical address of allocation. Just a name. */ + IN gctUINT32 physName; + + /* Logical address of allocation. */ + IN gctUINT64 logical; +} gcsHAL_FREE_NON_PAGED_MEMORY; + +/* Video memory allocation. */ +/* gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY */ +typedef struct _gcsHAL_ALLOCATE_LINEAR_VIDEO_MEMORY { + /* Number of bytes to allocate. */ + IN OUT gctUINT64 bytes; + + /* Buffer alignment. */ + IN gctUINT32 alignment; + + /* Type of allocation, see gceVIDMEM_TYPE. */ + IN gctUINT32 type; + + /* Flag of allocation. */ + IN gctUINT32 flag; + + /* Memory pool to allocate from. */ + IN OUT gctUINT32 pool; + + /* Internal SRAM index. */ + IN gctINT32 sRAMIndex; + + /* External SRAM index. */ + IN gctINT32 extSRAMIndex; + + /* Allocated video memory. */ + OUT gctUINT32 node; + + /* Video memory index, only represents system reserved memroy pool currently. */ + IN gctINT32 vidMemIndex; +} gcsHAL_ALLOCATE_LINEAR_VIDEO_MEMORY; + +typedef struct _gcsUSER_MEMORY_DESC { + /* Import flag. */ + gctUINT32 flag; + + /* gcvALLOC_FLAG_DMABUF */ + gctUINT32 handle; + gctUINT64 dmabuf; + + /* gcvALLOC_FLAG_USERMEMORY */ + gctUINT64 logical; + gctUINT64 physical; + gctUINT64 size; +} gcsUSER_MEMORY_DESC; + +/* gcvHAL_WRAP_USER_MEMORY. */ +typedef struct _gcsHAL_WRAP_USER_MEMORY { + /* Description of user memory. */ + IN gcsUSER_MEMORY_DESC desc; + + /* Video memory allocation type. */ + IN gctUINT32 type; + + /* Output video mmory node. */ + OUT gctUINT32 node; + + /* size of the node in bytes */ + OUT gctUINT64 bytes; +} gcsHAL_WRAP_USER_MEMORY; + +/* gcvHAL_RELEASE_VIDEO_MEMORY */ +typedef struct _gcsHAL_RELEASE_VIDEO_MEMORY { + /* Allocated video memory. */ + IN gctUINT32 node; + +#ifdef __QNXNTO__ + /* Mapped logical address to unmap in user space. */ + OUT gctUINT64 memory; + + /* Number of bytes to allocated. */ + OUT gctUINT64 bytes; +#endif +} gcsHAL_RELEASE_VIDEO_MEMORY; + +/* gcvHAL_LOCK_VIDEO_MEMORY */ +typedef struct _gcsHAL_LOCK_VIDEO_MEMORY { + /* Allocated video memory. */ + IN gctUINT32 node; + + /* Cache configuration. */ + /* Only gcvPOOL_VIRTUAL can be configured */ + IN gctBOOL cacheable; + + /* Hardware specific address. */ + OUT gctADDRESS address; + + /* Mapped logical address. */ + OUT gctUINT64 memory; + + /* Customer priviate handle*/ + OUT gctUINT32 gid; + + /* Bus address of a contiguous video node. */ + OUT gctUINT64 physicalAddress; + +#if gcdCAPTURE_ONLY_MODE + IN gctBOOL queryCapSize; + IN gctPOINTER captureLogical; + OUT gctUINT64 captureSize; +#endif + + IN gceLOCK_VIDEO_MEMORY_OP op; +} gcsHAL_LOCK_VIDEO_MEMORY; + +/* gcvHAL_UNLOCK_VIDEO_MEMORY */ +typedef struct _gcsHAL_UNLOCK_VIDEO_MEMORY { + /* Allocated video memory. */ + IN gctUINT64 node; + + /* Video memory allocation type. */ + IN gctUINT32 type; + + /* Pool of the unlock node */ + OUT gctUINT32 pool; + + /* Bytes of the unlock node */ + OUT gctUINT64 bytes; + + /* Flag to unlock surface asynchroneously. */ + IN OUT gctBOOL asynchroneous; + +#if gcdCAPTURE_ONLY_MODE + OUT gctPOINTER captureLogical; +#endif + + IN gceLOCK_VIDEO_MEMORY_OP op; + + IN gctUINT64 mmu; +} gcsHAL_UNLOCK_VIDEO_MEMORY; + +/* gcvHAL_BOTTOM_HALF_UNLOCK_VIDEO_MEMORY: */ +typedef struct _gcsHAL_BOTTOM_HALF_UNLOCK_VIDEO_MEMORY { + /* Allocated video memory. */ + IN gctUINT32 node; + + /* Video memory allocation type. */ + IN gctUINT32 type; +} gcsHAL_BOTTOM_HALF_UNLOCK_VIDEO_MEMORY; + +/* gcvHAL_EXPORT_VIDEO_MEMORY. */ +typedef struct _gcsHAL_EXPORT_VIDEO_MEMORY { + /* Allocated video memory. */ + IN gctUINT32 node; + + /* Export flags */ + IN gctUINT32 flags; + + /* Exported dma_buf fd */ + OUT gctINT32 fd; +} gcsHAL_EXPORT_VIDEO_MEMORY; + +/* gcvHAL_NAME_VIDEO_MEMORY. */ +typedef struct _gcsHAL_NAME_VIDEO_MEMORY { + IN gctUINT32 handle; + OUT gctUINT32 name; +} gcsHAL_NAME_VIDEO_MEMORY; + +/* gcvHAL_IMPORT_VIDEO_MEMORY. */ +typedef struct _gcsHAL_IMPORT_VIDEO_MEMORY { + IN gctUINT32 name; + OUT gctUINT32 handle; +} gcsHAL_IMPORT_VIDEO_MEMORY; + +/* gcvHAL_MAP_MEMORY */ +typedef struct _gcsHAL_MAP_MEMORY { + /* Physical memory address to map. Just a name on Linux/Qnx. */ + IN gctUINT32 physName; + + /* Number of bytes in physical memory to map. */ + IN gctUINT64 bytes; + + /* Address of mapped memory. */ + OUT gctUINT64 logical; +} gcsHAL_MAP_MEMORY; + +/* gcvHAL_UNMAP_MEMORY */ +typedef struct _gcsHAL_UNMAP_MEMORY { + /* Physical memory address to unmap. Just a name on Linux/Qnx. */ + IN gctUINT32 physName; + + /* Number of bytes in physical memory to unmap. */ + IN gctUINT64 bytes; + + /* Address of mapped memory to unmap. */ + IN gctUINT64 logical; +} gcsHAL_UNMAP_MEMORY; + +/* gcvHAL_CACHE */ +typedef struct _gcsHAL_CACHE { + IN gceCACHEOPERATION operation; + IN gctUINT64 process; + IN gctUINT64 logical; + IN gctUINT64 bytes; + IN gctUINT32 node; + IN gctUINT64 offset; +} gcsHAL_CACHE; + +/* gcvHAL_ATTACH */ +typedef struct _gcsHAL_ATTACH { + /* Handle of context buffer object. */ + OUT gctUINT32 context; + + /* Maximum state in the buffer. */ + OUT gctUINT64 maxState; + + /* Number of states in the buffer. */ + OUT gctUINT32 numStates; + + /* Map context buffer to user or not. */ + IN gctBOOL map; + + /* Physical of context buffer. */ + OUT gctUINT64 logicals[2]; + + /* Bytes of context buffer. */ + OUT gctUINT32 bytes; + + /* The context is for multi-core or not. */ + IN gctBOOL shared; + +#if gcdCAPTURE_ONLY_MODE + IN gctBOOL queryCapSize; + IN gctPOINTER contextLogical[gcdCONTEXT_BUFFER_COUNT]; + OUT gctUINT64 captureSize; +#endif +} gcsHAL_ATTACH; + +/* gcvHAL_DETACH */ +typedef struct _gcsHAL_DETACH { + /* Context buffer object gckCONTEXT. Just a name. */ + IN gctUINT32 context; +} gcsHAL_DETACH; + +/* gcvHAL_EVENT_COMMIT. */ +typedef struct _gcsHAL_EVENT_COMMIT { + /* Event queue in gcsQUEUE. */ + IN gctUINT64 queue; + + /* Brother cores in user device of current commit process. */ + IN gctUINT32 broCoreMask; + + IN gctBOOL shared; +#if gcdENABLE_SW_PREEMPTION + /* Priority ID. */ + IN gctUINT32 priorityID; + + /* Does it require top priority? */ + IN gctBOOL topPriority; +#endif +} gcsHAL_EVENT_COMMIT; + +typedef struct _gcsHAL_COMMAND_LOCATION { + gctUINT32 priority; + gctUINT32 channelId; + + gctUINT32 videoMemNode; + + gctADDRESS address; + gctUINT64 logical; + gctUINT32 startOffset; + /* size includes reservedHead and reservedTail. */ + gctUINT32 size; + + gctUINT32 reservedHead; + gctUINT32 reservedTail; + + /* Pointer to patch list. */ + gctUINT64 patchHead; + + /* + * Location index of exit commands, ie where to put the chipEnable/link back + * commands in the reservedTail area. + * It's used in fully shared command buffer for multiple cores. + */ + gctUINT32 exitIndex; + gctUINT32 entryPipe; + gctUINT32 exitPipe; + + /* struct _gcsHAL_COMMAND_LOCATION * next; */ + gctUINT64 next; +#if gcdCAPTURE_ONLY_MODE + gctPOINTER contextLogical[gcdCONTEXT_BUFFER_COUNT]; +#endif +} gcsHAL_COMMAND_LOCATION; + +typedef struct _gcsHAL_SUBCOMMIT { + gctUINT32 coreId; + + /* user gcsSTATE_DELTA_PTR. */ + gctUINT64 delta; + + /* Kernel gckCONTEXT. */ + gctUINT64 context; + + /* Event queue in user gcsQUEUE *. */ + gctUINT64 queue; + + /* Locate the commands. */ + gcsHAL_COMMAND_LOCATION commandBuffer; + + /* struct _gcsHAL_SUBCOMMIT * next; */ + gctUINT64 next; + +#if gcdENABLE_SW_PREEMPTION + /* Process ID. */ + gctUINT32 pid; + + /* Engine type. */ + gceENGINE engine; + + /* Is it multi-core and shared mode?. */ + gctBOOL shared; + + /* Priority ID. */ + gctUINT32 priorityID; + + /* Does it require top priority. */ + gctBOOL topPriority; +#endif +} gcsHAL_SUBCOMMIT, *gcsHAL_SUBCOMMIT_PTR; + +/* gcvHAL_COMMIT */ +typedef struct _gcsHAL_COMMIT { + gcsHAL_SUBCOMMIT subCommit; + + gctBOOL shared; + + gctBOOL contextSwitched; + + /* Commit stamp of this commit. */ + OUT gctUINT64 commitStamp; + + /* Brother cores in user device of current commit process. */ + gctUINT32 broCoreMask; + +#if gcdENABLE_MP_SWITCH + /* Multi-processor mode. */ + gctUINT32 mpMode; + + /* Switch multi-processor mode. */ + gctUINT32 switchMpMode; +#endif + +#if gcdENABLE_SW_PREEMPTION + /* If user need to merge the delta. */ + gctBOOL needMerge; + + /* If this commit is pending. */ + gctBOOL pending; +#endif +} gcsHAL_COMMIT; + + +typedef struct _gcsHAL_COMMIT_DONE { + IN gctUINT64 context; + +#if gcdENABLE_SW_PREEMPTION + /* Priority ID. */ + IN gctUINT32 priorityID; +#endif +} gcsHAL_COMMIT_DONE; + +/* gcvHAL_USER_SIGNAL */ +typedef struct _gcsHAL_USER_SIGNAL { + /* Command. */ + gceUSER_SIGNAL_COMMAND_CODES command; + + /* Signal ID. */ + IN OUT gctINT32 id; + + /* Reset mode. */ + IN gctBOOL manualReset; + + /* Wait timedout. */ + IN gctUINT32 wait; + + /* State. */ + IN gctBOOL state; + + /* Return status */ + IN gceSIGNAL_STATUS status; +} gcsHAL_USER_SIGNAL; + +/* gcvHAL_SIGNAL. */ +typedef struct _gcsHAL_SIGNAL { + /* Signal handle to signal gctSIGNAL. */ + IN gctUINT64 signal; + + /* Reserved gctSIGNAL. */ + IN gctUINT64 auxSignal; + + /* Process owning the signal gctHANDLE. */ + IN gctUINT64 process; + +#if defined(__QNXNTO__) + /* Client pulse event. */ + IN struct sigevent event; + + /* Set by server. */ + IN gctINT32 rcvid; +#endif + /* Event generated from where of pipeline */ + IN gceKERNEL_WHERE fromWhere; + +#if gcdENABLE_SW_PREEMPTION + /* If it is a fence signal. */ + IN gctBOOL fenceSignal; +#endif +} gcsHAL_SIGNAL; + +/* gcvHAL_WRITE_DATA. */ +typedef struct _gcsHAL_WRITE_DATA { + /* Address to write data to. */ + IN gctUINT32 address; + + /* Data to write. */ + IN gctUINT32 data; +} gcsHAL_WRITE_DATA; + +/* gcvHAL_READ_REGISTER */ +typedef struct _gcsHAL_READ_REGISTER { + /* Logical address of memory to write data to. */ + IN gctUINT32 address; + + /* Data read. */ + OUT gctUINT32 data; +} gcsHAL_READ_REGISTER; + +/* gcvHAL_WRITE_REGISTER */ +typedef struct _gcsHAL_WRITE_REGISTER { + /* Logical address of memory to write data to. */ + IN gctUINT32 address; + + /* Data read. */ + IN gctUINT32 data; +} gcsHAL_WRITE_REGISTER; + +/* gcvHAL_READ_REGISTER_EX */ +typedef struct _gcsHAL_READ_REGISTER_EX { + /* Logical address of memory to write data to. */ + IN gctUINT32 address; + + IN gctUINT32 coreSelect; + + /* Data read. */ + OUT gctUINT32 data[4]; +} gcsHAL_READ_REGISTER_EX; + +/* gcvHAL_WRITE_REGISTER_EX */ +typedef struct _gcsHAL_WRITE_REGISTER_EX { + /* Logical address of memory to write data to. */ + IN gctUINT32 address; + + IN gctUINT32 coreSelect; + + /* Data read. */ + IN gctUINT32 data[4]; +} gcsHAL_WRITE_REGISTER_EX; + +/* gcvHAL_APB_AXIFEE_ACCESS */ +typedef struct _gcsHAL_APB_AXIFE_ACCESS { + /* Logical address of memory to write data to. */ + IN gctUINT32 address; + + IN gctUINT32 coreSelect; + + IN gctBOOL isRead; + + /* Data read. */ + IN gctUINT32 data; +} gcsHAL_APB_AXIFE_ACCESS; + +#if VIVANTE_PROFILER +/* gcvHAL_GET_PROFILE_SETTING */ +typedef struct _gcsHAL_GET_PROFILE_SETTING { + /* Enable profiling */ + OUT gctBOOL enable; + /* Profile mode */ + OUT gceProfilerMode profileMode; + /* Probe mode */ + OUT gceProbeMode probeMode; +} gcsHAL_GET_PROFILE_SETTING; + +/* gcvHAL_SET_PROFILE_SETTING */ +typedef struct _gcsHAL_SET_PROFILE_SETTING { + /* Enable profiling */ + IN gctBOOL enable; + /* Profile mode */ + IN gceProfilerMode profileMode; + /* Probe mode */ + IN gceProbeMode probeMode; +} gcsHAL_SET_PROFILE_SETTING; + +/* gcvHAL_READ_PROFILER_REGISTER_SETTING */ +typedef struct _gcsHAL_READ_PROFILER_REGISTER_SETTING { + /*Should Clear Register*/ + IN gctBOOL bclear; +} gcsHAL_READ_PROFILER_REGISTER_SETTING; + +typedef struct _gcsHAL_READ_ALL_PROFILE_REGISTERS_PART1 { + /* Context buffer object gckCONTEXT. Just a name. */ + IN gctUINT32 context; + + /* Data read. */ + OUT gcsPROFILER_COUNTERS_PART1 Counters; +} gcsHAL_READ_ALL_PROFILE_REGISTERS_PART1; + +typedef struct _gcsHAL_READ_ALL_PROFILE_REGISTERS_PART2 { + /* Context buffer object gckCONTEXT. Just a name. */ + IN gctUINT32 context; + + /* Data read. */ + OUT gcsPROFILER_COUNTERS_PART2 Counters; +} gcsHAL_READ_ALL_PROFILE_REGISTERS_PART2; + +/* gcvHAL_PROFILE_REGISTERS_2D */ +typedef struct _gcsHAL_PROFILE_REGISTERS_2D { + /* Data read in gcs2D_PROFILE. */ + OUT gctUINT64 hwProfile2D; +} gcsHAL_PROFILE_REGISTERS_2D; +#endif + +/* gcvHAL_SET_POWER_MANAGEMENT_STATE */ +typedef struct _gcsHAL_SET_POWER_MANAGEMENT { + /* Data read. */ + IN gceCHIPPOWERSTATE state; +} gcsHAL_SET_POWER_MANAGEMENT; + +/* gcvHAL_QUERY_POWER_MANAGEMENT_STATE */ +typedef struct _gcsHAL_QUERY_POWER_MANAGEMENT { + /* Data read. */ + OUT gceCHIPPOWERSTATE state; + + /* Idle query. */ + OUT gctBOOL isIdle; +} gcsHAL_QUERY_POWER_MANAGEMENT; + +/* gcvHAL_CONFIG_POWER_MANAGEMENT. */ +typedef struct _gcsHAL_CONFIG_POWER_MANAGEMENT { + IN gctBOOL enable; + OUT gctBOOL oldValue; +} gcsHAL_CONFIG_POWER_MANAGEMENT; + +typedef struct _gcsFLAT_MAPPING_RANGE { + gctUINT64 start; + gctUINT64 end; + gctUINT64 size; + gceFLATMAP_FLAG flag; + + /* Corresponding virtual start. */ + gctUINT64 vStart; +} gcsFLAT_MAPPING_RANGE; + +/* gcvHAL_GET_BASE_ADDRESS */ +typedef struct _gcsHAL_GET_BASE_ADDRESS { + /* Physical memory address of internal memory. */ + OUT gctUINT32 baseAddress; + + OUT gctUINT32 flatMappingRangeCount; + + OUT gcsFLAT_MAPPING_RANGE flatMappingRanges[gcdMAX_FLAT_MAPPING_COUNT]; +} gcsHAL_GET_BASE_ADDRESS; + +typedef struct _gcsHAL_SET_DEBUG_LEVEL_ZONE { + IN gctUINT32 level; + IN gctUINT32 zones; + IN gctBOOL enable; +} gcsHAL_SET_DEBUG_LEVEL_ZONE; + +typedef struct _gcsHAL_QUERY_CPU_FREQUENCY +{ + IN gctUINT32 CPUId; + OUT gctUINT32 CPUFrequency; +} gcsHAL_QUERY_CPU_FREQUENCY; + +/* gcvHAL_DEBUG_DUMP. */ +typedef struct _gcsHAL_DEBUG_DUMP { + /* gceDUMP_BUFFER_TYPE type. */ + IN gctUINT32 type; + + IN gctUINT64 ptr; + IN gctADDRESS address; + IN gctUINT32 size; +} gcsHAL_DEBUG_DUMP; + + +/* gcvHAL_TIMESTAMP */ +typedef struct _gcsHAL_TIMESTAMP { + /* Timer select. */ + IN gctUINT32 timer; + + /* Timer request type (0-stop, 1-start, 2-send delta). */ + IN gctUINT32 request; + + /* Result of delta time in microseconds. */ + OUT gctINT32 timeDelta; +} gcsHAL_TIMESTAMP; + +/* gcvHAL_DATABASE */ +typedef struct _gcsHAL_DATABASE { + /* + * Set to gcvTRUE if you want to query a particular process ID. + * Set to gcvFALSE to query the last detached process. + */ + IN gctBOOL validProcessID; + + /* Process ID to query. */ + IN gctUINT32 processID; + + /* Information. */ + OUT gcuDATABASE_INFO vidMem; + OUT gcuDATABASE_INFO nonPaged; + OUT gcuDATABASE_INFO contiguous; + OUT gcuDATABASE_INFO gpuIdle; + + /* Detail information about video memory. */ + OUT gcuDATABASE_INFO vidMemPool[3]; +} gcsHAL_DATABASE; + +/* gcvHAL_GET_FRAME_INFO. */ +typedef struct _gcsHAL_GET_FRAME_INFO { + /* gcsHAL_FRAME_INFO* */ + OUT gctUINT64 frameInfo; +} gcsHAL_GET_FRAME_INFO; + + +typedef struct _gcsHAL_SET_FSCALE_VALUE { + IN gctUINT32 value; + IN gctUINT32 shValue; +} gcsHAL_SET_FSCALE_VALUE; + +typedef struct _gcsHAL_GET_FSCALE_VALUE { + OUT gctUINT32 value; + OUT gctUINT32 minValue; + OUT gctUINT32 maxValue; +} gcsHAL_GET_FSCALE_VALUE; + +/* gcvHAL_QUERY_RESET_TIME_STAMP. */ +typedef struct _gcsHAL_QUERY_RESET_TIME_STAMP { + OUT gctUINT64 timeStamp; + OUT gctUINT64 contextID; +} gcsHAL_QUERY_RESET_TIME_STAMP; + +/* gcvHAL_CREATE_NATIVE_FENCE. */ +typedef struct _gcsHAL_CREATE_NATIVE_FENCE { + /* Signal id. */ + IN gctUINT64 signal; + + /* Native fence file descriptor. */ + OUT gctINT32 fenceFD; + +} gcsHAL_CREATE_NATIVE_FENCE; + +/* gcvHAL_WAIT_NATIVE_FENCE. */ +typedef struct _gcsHAL_WAIT_NATIVE_FENCE { + /* Native fence file descriptor. */ + IN gctINT32 fenceFD; + + /* Wait timeout. */ + IN gctUINT32 timeout; +} gcsHAL_WAIT_NATIVE_FENCE; + +/* gcvHAL_SHBUF. */ +typedef struct _gcsHAL_SHBUF { + gceSHBUF_COMMAND_CODES command; + + /* Shared buffer. */ + IN OUT gctUINT64 id; + + /* User data to be shared. */ + IN gctUINT64 data; + + /* Data size. */ + IN OUT gctUINT32 bytes; +} gcsHAL_SHBUF; + +/* gcvHAL_GET_GRAPHIC_BUFFER_FD. */ +/* + * Fd representation of android graphic buffer contents. + * Currently, it is only to reference video nodes, signal, etc to avoid being + * destroyed when trasfering across processes. + */ +typedef struct _gcsHAL_GET_GRAPHIC_BUFFER_FD { + /* Max 3 video nodes, node handle here. */ + IN gctUINT32 node[3]; + + /* A shBuf. */ + IN gctUINT64 shBuf; + + /* A signal. */ + IN gctUINT64 signal; + + OUT gctINT32 fd; +} gcsHAL_GET_GRAPHIC_BUFFER_FD; + +typedef struct _gcsHAL_VIDEO_MEMORY_METADATA { + /* Allocated video memory. */ + IN gctUINT32 node; + + IN gctUINT32 readback; + + INOUT gctINT32 ts_fd; + INOUT gctUINT32 fc_enabled; + INOUT gctUINT32 fc_value; + INOUT gctUINT32 fc_value_upper; + + INOUT gctUINT32 compressed; + INOUT gctUINT32 compress_format; +} gcsHAL_VIDEO_MEMORY_METADATA; + +/* gcvHAL_GET_VIDEO_MEMORY_FD. */ +typedef struct _gcsHAL_GET_VIDEO_MEMORY_FD { + IN gctUINT32 handle; + IN gctBOOL exported; + OUT gctINT32 fd; +} gcsHAL_GET_VIDEO_MEMORY_FD; + +/* gcvHAL_DESTROY_MMU. */ +typedef struct _gcsHAL_DESTROY_MMU { + /* Mmu object. */ + IN gctUINT64 mmu; + IN gctUINT64 database; + IN gctUINT32 pid; +} gcsHAL_DESTROY_MMU; + +/* gcvHAL_WAIT_FENCE. */ +typedef struct _gcsHAL_WAIT_FENCE { + IN gctUINT32 handle; + IN gctUINT32 timeOut; +} gcsHAL_WAIT_FENCE; + +/* gcvHAL_DEVICE_MUTEX: */ +typedef struct _gcsHAL_DEVICE_MUTEX { + /* Lock or Release device mutex. */ + gctBOOL isMutexLocked; +} gcsHAL_DEVICE_MUTEX; + + +#if gcdDEC_ENABLE_AHB +/* gcvHAL_DEC300_READ. */ +typedef struct _gcsHAL_DEC300_READ { + gctUINT32 enable; + gctUINT32 readId; + gctUINT32 format; + gctUINT32 strides[3]; + gctUINT32 is3D; + gctUINT32 isMSAA; + gctUINT32 clearValue; + gctUINT32 isTPC; + gctUINT32 isTPCCompressed; + gctUINT32 surfAddrs[3]; + gctUINT32 tileAddrs[3]; +} DEC300Read; + +/* gcvHAL_DEC300_WRITE. */ +typedef struct _gcsHAL_DEC300_WRITE { + gctUINT32 enable; + gctUINT32 readId; + gctUINT32 writeId; + gctUINT32 format; + gctUINT32 surfAddr; + gctUINT32 tileAddr; +} DEC300Write; + +/* gcvHAL_DEC300_FLUSH. */ +typedef struct _gcsHAL_DEC300_FLUSH { + IN gctUINT8 useless; +} DEC300Flush; + +/* gcvHAL_DEC300_FLUSH_WAIT. */ +typedef struct _gcsHAL_DEC300_FLUSH_WAIT { + IN gctUINT32 done; +} DEC300FlushWait; +#endif + +#if gcdENABLE_VIDEO_MEMORY_MIRROR +typedef struct _gcsHAL_SYNC_VIDEO_MEMORY { + IN gctUINT64 node; + IN gceSYNC_MEMORY_DIRECTION dir; +} gcsHAL_SYNC_VIDEO_MEMORY; +#endif + +#if gcdENABLE_CLEAR_FENCE +typedef struct _gcsHAL_STORE_CLEAR_FENCE { + gctADDRESS address; + gctUINT64 fenceValue; + gctUINT64 recordId; + gctBOOL use64BitFence; + gctBOOL isClear; +} gcsHAL_STORE_CLEAR_FENCE; +#endif + +typedef struct _gcsHAL_INTERFACE { + /* Command code. */ + gceHAL_COMMAND_CODES command; + + /* Hardware type. */ + gceHARDWARE_TYPE hardwareType; + + /* Core index for current hardware type. */ + gctUINT32 coreIndex; + + /* Status value. */ + gceSTATUS status; + + /* Engine */ + gceENGINE engine; + + /* Ignore information from TSL when doing IO control */ + gctBOOL ignoreTLS; + + /* The mutext already acquired */ + IN gctBOOL commitMutex; + + /* Union of command structures. */ + union _u { + gcsHAL_CHIP_INFO ChipInfo; + gcsHAL_VERSION Version; + gcsHAL_SET_TIMEOUT SetTimeOut; + + gcsHAL_QUERY_VIDEO_MEMORY QueryVideoMemory; + gcsHAL_QUERY_CHIP_IDENTITY QueryChipIdentity; + gcsHAL_QUERY_CHIP_OPTIONS QueryChipOptions; + gcsHAL_QUERY_CHIP_FREQUENCY QueryChipFrequency; + + gcsHAL_ALLOCATE_NON_PAGED_MEMORY AllocateNonPagedMemory; + gcsHAL_FREE_NON_PAGED_MEMORY FreeNonPagedMemory; + + gcsHAL_ALLOCATE_LINEAR_VIDEO_MEMORY AllocateLinearVideoMemory; + gcsHAL_WRAP_USER_MEMORY WrapUserMemory; + gcsHAL_RELEASE_VIDEO_MEMORY ReleaseVideoMemory; + + gcsHAL_LOCK_VIDEO_MEMORY LockVideoMemory; + gcsHAL_UNLOCK_VIDEO_MEMORY UnlockVideoMemory; + gcsHAL_BOTTOM_HALF_UNLOCK_VIDEO_MEMORY BottomHalfUnlockVideoMemory; + + gcsHAL_EXPORT_VIDEO_MEMORY ExportVideoMemory; + gcsHAL_NAME_VIDEO_MEMORY NameVideoMemory; + gcsHAL_IMPORT_VIDEO_MEMORY ImportVideoMemory; + + gcsHAL_MAP_MEMORY MapMemory; + gcsHAL_UNMAP_MEMORY UnmapMemory; + + gcsHAL_CACHE Cache; + + gcsHAL_ATTACH Attach; + gcsHAL_DETACH Detach; + + gcsHAL_EVENT_COMMIT Event; + gcsHAL_COMMIT Commit; + gcsHAL_COMMIT_DONE CommitDone; + + gcsHAL_USER_SIGNAL UserSignal; + gcsHAL_SIGNAL Signal; + + gcsHAL_WRITE_DATA WriteData; + gcsHAL_READ_REGISTER ReadRegisterData; + gcsHAL_WRITE_REGISTER WriteRegisterData; + gcsHAL_APB_AXIFE_ACCESS APBAXIFEAccess; + gcsHAL_READ_REGISTER_EX ReadRegisterDataEx; + gcsHAL_WRITE_REGISTER_EX WriteRegisterDataEx; + gcsHAL_SET_POWER_MANAGEMENT SetPowerManagement; + gcsHAL_QUERY_POWER_MANAGEMENT QueryPowerManagement; + gcsHAL_CONFIG_POWER_MANAGEMENT ConfigPowerManagement; + + gcsHAL_GET_BASE_ADDRESS GetBaseAddress; + + gcsHAL_SET_DEBUG_LEVEL_ZONE DebugLevelZone; + + gcsHAL_QUERY_CPU_FREQUENCY QueryCPUFrequency; + + gcsHAL_DEBUG_DUMP DebugDump; + + gcsHAL_TIMESTAMP TimeStamp; + gcsHAL_DATABASE Database; + + gcsHAL_GET_FRAME_INFO GetFrameInfo; + + + /* gcsHAL_DUMP_GPU_STATE */ + /* gcsHAL_DUMP_EVENT */ + + gcsHAL_SET_FSCALE_VALUE SetFscaleValue; + gcsHAL_GET_FSCALE_VALUE GetFscaleValue; + + gcsHAL_QUERY_RESET_TIME_STAMP QueryResetTimeStamp; + + gcsHAL_CREATE_NATIVE_FENCE CreateNativeFence; + gcsHAL_WAIT_NATIVE_FENCE WaitNativeFence; + gcsHAL_SHBUF ShBuf; + gcsHAL_GET_GRAPHIC_BUFFER_FD GetGraphicBufferFd; + gcsHAL_VIDEO_MEMORY_METADATA SetVidMemMetadata; + gcsHAL_GET_VIDEO_MEMORY_FD GetVideoMemoryFd; + + gcsHAL_DESTROY_MMU DestroyMmu; + + gcsHAL_WAIT_FENCE WaitFence; + + /* gcvHAL_DEVICE_MUTEX: */ + gcsHAL_DEVICE_MUTEX DeviceMutex; + + +#if gcdDEC_ENABLE_AHB + gcsHAL_DEC300_READ DEC300Read; + gcsHAL_DEC300_WRITE DEC300Write; + gcsHAL_DEC300_FLUSH DEC300Flush; + gcsHAL_DEC300_FLUSH_WAIT DEC300FlushWait; +#endif +#if gcdENABLE_VIDEO_MEMORY_MIRROR + gcsHAL_SYNC_VIDEO_MEMORY SyncVideoMemory; +#endif +#if gcdENABLE_CLEAR_FENCE + gcsHAL_STORE_CLEAR_FENCE UserFence; +#endif + } u; + + /* O/S specific device context. -- Needed for Windows WDDM device callbacks and kernel mode thunks. */ + gctUINT64 devCtxt; + + /* Device index. */ + gctUINT32 devIndex; + + /* API type. -- Needed for Windows WDDM device kernel mode thunks to set ClientHint when a context is created. */ + gceAPI api; + +} gcsHAL_INTERFACE; + +#if VIVANTE_PROFILER +typedef struct _gcsHAL_PROFILER_INTERFACE { + /* Command code. */ + gceHAL_COMMAND_CODES command; + + /* Hardware type. */ + gceHARDWARE_TYPE hardwareType; + + /* Core index for current hardware type. */ + gctUINT32 coreIndex; + + /* Status value. */ + gceSTATUS status; + + /* Ignore information from TSL when doing IO control */ + gctBOOL ignoreTLS; + + /* Union of command structures. */ + union profiler_u { + gcsHAL_GET_PROFILE_SETTING GetProfileSetting; + gcsHAL_SET_PROFILE_SETTING SetProfileSetting; + gcsHAL_READ_PROFILER_REGISTER_SETTING SetProfilerRegisterClear; + gcsHAL_READ_ALL_PROFILE_REGISTERS_PART1 RegisterProfileData_part1; + gcsHAL_READ_ALL_PROFILE_REGISTERS_PART2 RegisterProfileData_part2; + gcsHAL_PROFILE_REGISTERS_2D RegisterProfileData2D; + } u; + + /* Device index. */ + gctUINT32 devIndex; + +} gcsHAL_PROFILER_INTERFACE; +#endif + +/* State delta record. */ +typedef struct _gcsSTATE_DELTA_RECORD *gcsSTATE_DELTA_RECORD_PTR; +typedef struct _gcsSTATE_DELTA_RECORD { + /* State address. */ + gctUINT address; + + /* State mask. */ + gctUINT32 mask; + + /* State data. */ + gctUINT32 data; +} gcsSTATE_DELTA_RECORD; + +/* State delta. */ +typedef struct _gcsSTATE_DELTA { + /* For debugging: the number of delta in the order of creation. */ + gctUINT num; + + /* + * Main state delta ID. Every time state delta structure gets reinitialized, + * main ID is incremented. If main state ID overflows, all map entry IDs get + * reinitialized to make sure there is no potential erroneous match after + * the overflow. + */ + gctUINT id; + + /* The number of contexts pending modification by the delta. */ + gctINT refCount; + + /* Vertex element count for the delta buffer. */ + gctUINT elementCount; + + /* Number of states currently stored in the record array. */ + gctUINT recordCount; + + /* Record array; holds all modified states in gcsSTATE_DELTA_RECORD. */ + gctUINT64 recordArray; + + /* + * Map entry ID is used for map entry validation. If map entry ID does not + * match the main state delta ID, the entry and the corresponding state are + * considered not in use. + */ + gctUINT64 mapEntryID; + gctUINT mapEntryIDSize; + + /* + * If the map entry ID matches the main state delta ID, index points to + * the state record in the record array. + */ + gctUINT64 mapEntryIndex; + + /* Previous and next state deltas in gcsSTATE_DELTA. */ + gctUINT64 prev; + gctUINT64 next; +} gcsSTATE_DELTA; + +typedef struct _gcsQUEUE { + /* Pointer to next gcsQUEUE structure in gcsQUEUE. */ + gctUINT64 next; + + /* Event information. */ + gcsHAL_INTERFACE iface; +} gcsQUEUE; + +/* A record chunk include multiple records to save allocation. */ +typedef struct _gcsQUEUE_CHUNK { + struct _gcsQUEUE_CHUNK *next; + + gcsQUEUE record[16]; +} gcsQUEUE_CHUNK; + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_driver_shared_h_ */ + + diff --git a/unified-tina/inc/HAL/shared/gc_hal_driver_vg_shared.h b/unified-tina/inc/HAL/shared/gc_hal_driver_vg_shared.h new file mode 100644 index 0000000..ee27479 --- /dev/null +++ b/unified-tina/inc/HAL/shared/gc_hal_driver_vg_shared.h @@ -0,0 +1,208 @@ +/**************************************************************************** +* +* Copyright (c) 2005 - 2023 by Vivante Corp. All rights reserved. +* +* The material in this file is confidential and contains trade secrets +* of Vivante Corporation. This is proprietary information owned by +* Vivante Corporation. No part of this work may be disclosed, +* reproduced, copied, transmitted, or used in any way for any purpose, +* without the express written permission of Vivante Corporation. +* +*****************************************************************************/ + + +/* + * Interface specification between user and kernel level HAL layers. + */ + +#ifndef __gc_hal_driver_vg_shared_h_ +#define __gc_hal_driver_vg_shared_h_ + +#include "gc_hal_types.h" + +#if defined(__QNXNTO__) +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/****************************************************************************** + ****************************** I/O Control Codes ***************************** + ******************************************************************************/ + +#define gcvHAL_CLASS "galcore" +#define IOCTL_GCHAL_INTERFACE 30000 + +/****************************************************************************** + ******************** Command buffer information structure. ******************* + ******************************************************************************/ + +typedef struct _gcsCOMMAND_BUFFER_INFO *gcsCOMMAND_BUFFER_INFO_PTR; +typedef struct _gcsCOMMAND_BUFFER_INFO { + /* FE command buffer interrupt ID. */ + gctINT32 feBufferInt; + + /* TS overflow interrupt ID. */ + gctINT32 tsOverflowInt; + + /* Alignment and mask for the buffer address. */ + gctUINT addressMask; + gctUINT32 addressAlignment; + + /* Alignment for each command. */ + gctUINT32 commandAlignment; + + /* Number of bytes required by the STATE command. */ + gctUINT32 stateCommandSize; + + /* Number of bytes required by the RESTART command. */ + gctUINT32 restartCommandSize; + + /* Number of bytes required by the FETCH command. */ + gctUINT32 fetchCommandSize; + + /* Number of bytes required by the CALL command. */ + gctUINT32 callCommandSize; + + /* Number of bytes required by the RETURN command. */ + gctUINT32 returnCommandSize; + + /* Number of bytes required by the EVENT command. */ + gctUINT32 eventCommandSize; + + /* Number of bytes required by the END command. */ + gctUINT32 endCommandSize; + + /* Number of bytes reserved at the tail of a static command buffer. */ + gctUINT32 staticTailSize; + + /* Number of bytes reserved at the tail of a dynamic command buffer. */ + gctUINT32 dynamicTailSize; +} gcsCOMMAND_BUFFER_INFO; + +/****************************************************************************** + ******************************* Task Structures ****************************** + ******************************************************************************/ + +typedef struct _gcsTASK_HEADER *gcsTASK_HEADER_PTR; +typedef struct _gcsTASK_HEADER { + /* Task ID. */ + IN gceTASK id; +} gcsTASK_HEADER; + +typedef struct _gcsTASK_LINK *gcsTASK_LINK_PTR; +typedef struct _gcsTASK_LINK { + /* Task ID (gcvTASK_LINK). */ + IN gceTASK id; + + /* Pointer to the next task container. */ + IN gctPOINTER cotainer; + + /* Pointer to the next task from the next task container. */ + IN gcsTASK_HEADER_PTR task; +} gcsTASK_LINK; + +typedef struct _gcsTASK_CLUSTER *gcsTASK_CLUSTER_PTR; +typedef struct _gcsTASK_CLUSTER { + /* Task ID (gcvTASK_CLUSTER). */ + IN gceTASK id; + + /* Number of tasks in the cluster. */ + IN gctUINT taskCount; +} gcsTASK_CLUSTER; + +typedef struct _gcsTASK_INCREMENT *gcsTASK_INCREMENT_PTR; +typedef struct _gcsTASK_INCREMENT { + /* Task ID (gcvTASK_INCREMENT). */ + IN gceTASK id; + + /* Address of the variable to increment. */ + IN gctUINT32 address; +} gcsTASK_INCREMENT; + +typedef struct _gcsTASK_DECREMENT *gcsTASK_DECREMENT_PTR; +typedef struct _gcsTASK_DECREMENT { + /* Task ID (gcvTASK_DECREMENT). */ + IN gceTASK id; + + /* Address of the variable to decrement. */ + IN gctUINT32 address; +} gcsTASK_DECREMENT; + +typedef struct _gcsTASK_SIGNAL *gcsTASK_SIGNAL_PTR; +typedef struct _gcsTASK_SIGNAL { + /* Task ID (gcvTASK_SIGNAL). */ + IN gceTASK id; + + /* Process owning the signal. */ + IN gctHANDLE process; + + /* Signal handle to signal. */ + IN gctSIGNAL signal; + +#if defined(__QNXNTO__) + IN struct sigevent event; + IN gctINT32 rcvid; +#endif +} gcsTASK_SIGNAL; + +typedef struct _gcsTASK_LOCKDOWN *gcsTASK_LOCKDOWN_PTR; +typedef struct _gcsTASK_LOCKDOWN { + /* Task ID (gcvTASK_LOCKDOWN). */ + IN gceTASK id; + + /* Address of the user space counter. */ + IN gctUINT32 userCounter; + + /* Address of the kernel space counter. */ + IN gctUINT32 kernelCounter; + + /* Process owning the signal. */ + IN gctHANDLE process; + + /* Signal handle to signal. */ + IN gctSIGNAL signal; +} gcsTASK_LOCKDOWN; + +typedef struct _gcsTASK_UNLOCK_VIDEO_MEMORY *gcsTASK_UNLOCK_VIDEO_MEMORY_PTR; +typedef struct _gcsTASK_UNLOCK_VIDEO_MEMORY { + /* Task ID (gcvTASK_UNLOCK_VIDEO_MEMORY). */ + IN gceTASK id; + + /* Allocated video memory. */ + IN gctUINT64 node; +} gcsTASK_UNLOCK_VIDEO_MEMORY; + +typedef struct _gcsTASK_FREE_VIDEO_MEMORY *gcsTASK_FREE_VIDEO_MEMORY_PTR; +typedef struct _gcsTASK_FREE_VIDEO_MEMORY { + /* Task ID (gcvTASK_FREE_VIDEO_MEMORY). */ + IN gceTASK id; + + /* Allocated video memory. */ + IN gctUINT64 node; +} gcsTASK_FREE_VIDEO_MEMORY; + +typedef struct _gcsTASK_FREE_CONTIGUOUS_MEMORY *gcsTASK_FREE_CONTIGUOUS_MEMORY_PTR; +typedef struct _gcsTASK_FREE_CONTIGUOUS_MEMORY { + /* Task ID (gcvTASK_FREE_CONTIGUOUS_MEMORY). */ + IN gceTASK id; + + /* Number of bytes allocated. */ + IN gctSIZE_T bytes; + + /* Physical address of allocation. */ + IN gctPHYS_ADDR physical; + + /* Logical address of allocation. */ + IN gctPOINTER logical; +} gcsTASK_FREE_CONTIGUOUS_MEMORY; + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_driver_shared_h_ */ + + diff --git a/unified-tina/inc/HAL/shared/gc_hal_enum_shared.h b/unified-tina/inc/HAL/shared/gc_hal_enum_shared.h new file mode 100644 index 0000000..e78e36e --- /dev/null +++ b/unified-tina/inc/HAL/shared/gc_hal_enum_shared.h @@ -0,0 +1,2215 @@ +/**************************************************************************** +* +* Copyright (c) 2005 - 2023 by Vivante Corp. All rights reserved. +* +* The material in this file is confidential and contains trade secrets +* of Vivante Corporation. This is proprietary information owned by +* Vivante Corporation. No part of this work may be disclosed, +* reproduced, copied, transmitted, or used in any way for any purpose, +* without the express written permission of Vivante Corporation. +* +*****************************************************************************/ + + +#ifndef __gc_hal_shared_enum_h_ +#define __gc_hal_shared_enum_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Chip models. */ +typedef enum _gceCHIPMODEL { + gcv200 = 0x0200, + gcv300 = 0x0300, + gcv320 = 0x0320, + gcv328 = 0x0328, + gcv350 = 0x0350, + gcv355 = 0x0355, + gcv400 = 0x0400, + gcv410 = 0x0410, + gcv420 = 0x0420, + gcv428 = 0x0428, + gcv450 = 0x0450, + gcv500 = 0x0500, + gcv520 = 0x0520, + gcv530 = 0x0530, + gcv600 = 0x0600, + gcv620 = 0x0620, + gcv700 = 0x0700, + gcv800 = 0x0800, + gcv820 = 0x0820, + gcv860 = 0x0860, + gcv880 = 0x0880, + gcv900 = 0x0900, + gcv1000 = 0x1000, + gcv1500 = 0x1500, + gcv2000 = 0x2000, + gcv2100 = 0x2100, + gcv2200 = 0x2200, + gcv2500 = 0x2500, + gcv3000 = 0x3000, + gcv4000 = 0x4000, + gcv5000 = 0x5000, + gcv5200 = 0x5200, + gcv6400 = 0x6400, + gcv7000 = 0x7000, + gcv7400 = 0x7400, + gcv8000 = 0x8000, + gcv8400 = 0x8400, + gcv8800 = 0x8800, + gcv9100 = 0x9100, + gcv9200 = 0x9200, +} gceCHIPMODEL; + +/* Chip features. */ +typedef enum _gceFEATURE { + gcvFEATURE_PIPE_2D = 0, + gcvFEATURE_PIPE_3D, + gcvFEATURE_PIPE_VG, + gcvFEATURE_DC, + gcvFEATURE_HIGH_DYNAMIC_RANGE, + gcvFEATURE_MODULE_CG, + gcvFEATURE_MIN_AREA, + gcvFEATURE_BUFFER_INTERLEAVING, + gcvFEATURE_BYTE_WRITE_2D, + gcvFEATURE_ENDIANNESS_CONFIG, + gcvFEATURE_DUAL_RETURN_BUS, + gcvFEATURE_DEBUG_MODE, + gcvFEATURE_YUY2_RENDER_TARGET, + gcvFEATURE_FRAGMENT_PROCESSOR, + gcvFEATURE_2DPE20, + gcvFEATURE_FAST_CLEAR, + gcvFEATURE_YUV420_TILER, + gcvFEATURE_YUY2_AVERAGING, + gcvFEATURE_FLIP_Y, + gcvFEATURE_EARLY_Z, + gcvFEATURE_COMPRESSION, + gcvFEATURE_MSAA, + gcvFEATURE_SPECIAL_ANTI_ALIASING, + gcvFEATURE_SPECIAL_MSAA_LOD, + gcvFEATURE_422_TEXTURE_COMPRESSION, + gcvFEATURE_DXT_TEXTURE_COMPRESSION, + gcvFEATURE_ETC1_TEXTURE_COMPRESSION, + gcvFEATURE_TX_ETC2_COMPRESSION, + gcvFEATURE_CORRECT_TEXTURE_CONVERTER, + gcvFEATURE_TEXTURE_8K, + gcvFEATURE_SCALER, + gcvFEATURE_YUV420_SCALER, + gcvFEATURE_SHADER_HAS_W, + gcvFEATURE_SHADER_HAS_SIGN, + gcvFEATURE_SHADER_HAS_FLOOR, + gcvFEATURE_SHADER_HAS_CEIL, + gcvFEATURE_SHADER_HAS_SQRT, + gcvFEATURE_SHADER_HAS_TRIG, + gcvFEATURE_SH_SUPERSCALAR_ARCH, + gcvFEATURE_HZ, + gcvFEATURE_CORRECT_STENCIL, + gcvFEATURE_VG20, + gcvFEATURE_VG_FILTER, + gcvFEATURE_VG21, + gcvFEATURE_VG_DOUBLE_BUFFER, + gcvFEATURE_VG_RESOLUTION_8K, + gcvFEATURE_MC20, + gcvFEATURE_SUPER_TILED, + gcvFEATURE_FAST_CLEAR_FLUSH, + gcvFEATURE_2D_FILTERBLIT_PLUS_ALPHABLEND, + gcvFEATURE_2D_DITHER, + gcvFEATURE_2D_A8_TARGET, + gcvFEATURE_2D_A8_NO_ALPHA, + gcvFEATURE_2D_FILTERBLIT_FULLROTATION, + gcvFEATURE_2D_BITBLIT_FULLROTATION, + gcvFEATURE_WIDE_LINE, + gcvFEATURE_FC_FLUSH_STALL, + gcvFEATURE_FULL_DIRECTFB, + gcvFEATURE_HALF_FLOAT_PIPE, + gcvFEATURE_LINE_LOOP, + gcvFEATURE_2D_YUV_BLIT, + gcvFEATURE_2D_TILING, + gcvFEATURE_NON_POWER_OF_TWO, + gcvFEATURE_3D_TEXTURE, + gcvFEATURE_TEXTURE_ARRAY, + gcvFEATURE_TILE_FILLER, + gcvFEATURE_LOGIC_OP, + gcvFEATURE_MIXED_STREAMS, + gcvFEATURE_2D_MULTI_SOURCE_BLT, + gcvFEATURE_END_EVENT, + gcvFEATURE_VERTEX_10_10_10_2, + gcvFEATURE_TEXTURE_10_10_10_2, + gcvFEATURE_TEXTURE_ANISOTROPIC_FILTERING, + gcvFEATURE_TEXTURE_FLOAT_HALF_FLOAT, + gcvFEATURE_2D_ROTATION_STALL_FIX, + gcvFEATURE_2D_MULTI_SOURCE_BLT_EX, + gcvFEATURE_BUG_FIXES10, + gcvFEATURE_2D_MINOR_TILING, + gcvFEATURE_TEX_COMPRRESSION_SUPERTILED, /* Supertiled compressed textures are supported. */ + gcvFEATURE_FAST_MSAA, + gcvFEATURE_BUG_FIXED_INDEXED_TRIANGLE_STRIP, + gcvFEATURE_TEXTURE_TILE_STATUS_READ, + gcvFEATURE_DEPTH_BIAS_FIX, + gcvFEATURE_RECT_PRIMITIVE, + gcvFEATURE_BUG_FIXES11, + gcvFEATURE_SUPERTILED_TEXTURE, + gcvFEATURE_2D_NO_COLORBRUSH_INDEX8, + gcvFEATURE_RS_YUV_TARGET, + gcvFEATURE_2D_FC_SOURCE, /* For tilestatus compression feature*/ + gcvFEATURE_2D_CC_NOAA_SOURCE, + gcvFEATURE_PE_DITHER_FIX, + gcvFEATURE_2D_YUV_SEPARATE_STRIDE, + gcvFEATURE_FRUSTUM_CLIP_FIX, + gcvFEATURE_TEXTURE_SWIZZLE, + gcvFEATURE_PRIMITIVE_RESTART, + gcvFEATURE_TEXTURE_LINEAR, + gcvFEATURE_TEXTURE_YUV_ASSEMBLER, + gcvFEATURE_LINEAR_RENDER_TARGET, + gcvFEATURE_SHADER_HAS_ATOMIC, + gcvFEATURE_SHADER_HAS_INSTRUCTION_CACHE, + gcvFEATURE_SHADER_ENHANCEMENTS2, + gcvFEATURE_BUG_FIXES7, + gcvFEATURE_SHADER_HAS_RTNE, + gcvFEATURE_SHADER_HAS_EXTRA_INSTRUCTIONS2, + gcvFEATURE_SHADER_ENHANCEMENTS3, + gcvFEATURE_DYNAMIC_FREQUENCY_SCALING, + gcvFEATURE_SINGLE_BUFFER, + gcvFEATURE_OCCLUSION_QUERY, + gcvFEATURE_2D_GAMMA, + gcvFEATURE_2D_COLOR_SPACE_CONVERSION, + gcvFEATURE_2D_SUPER_TILE_VERSION, + gcvFEATURE_HALTI0, + gcvFEATURE_HALTI1, + gcvFEATURE_HALTI2, + gcvFEATURE_SUPPORT_GCREGTX, + gcvFEATURE_2D_MIRROR_EXTENSION, + gcvFEATURE_TEXTURE_ASTC, + gcvFEATURE_TEXTURE_ASTC_DECODE_FIX, + gcvFEATURE_TEXTURE_ASTC_BASE_LOD_FIX, + gcvFEATURE_2D_SUPER_TILE_V1, + gcvFEATURE_2D_SUPER_TILE_V2, + gcvFEATURE_2D_SUPER_TILE_V3, + gcvFEATURE_2D_MULTI_SOURCE_BLT_EX2, + gcvFEATURE_NEW_RA, + gcvFEATURE_BUG_FIXED_IMPLICIT_PRIMITIVE_RESTART, + gcvFEATURE_PE_MULTI_RT_BLEND_ENABLE_CONTROL, + gcvFEATURE_SMALL_MSAA, /* An upgraded version of Fast MSAA */ + gcvFEATURE_VERTEX_INST_ID_AS_ATTRIBUTE, + gcvFEATURE_DUAL_16, + gcvFEATURE_BRANCH_ON_IMMEDIATE_REG, + gcvFEATURE_2D_COMPRESSION, + gcvFEATURE_TPC_COMPRESSION, + gcvFEATURE_TPCV11_COMPRESSION, + gcvFEATURE_DEC_COMPRESSION, + gcvFEATURE_DEC300_COMPRESSION, + gcvFEATURE_DEC400_COMPRESSION, + /*Its a new core for DEC400 compression*/ + gcvFEATURE_DEC400EX_COMPRESSION, + gcvFEATURE_DEC_TPC_COMPRESSION, + gcvFEATURE_DEC_COMPRESSION_TILE_NV12_8BIT, + gcvFEATURE_DEC_COMPRESSION_TILE_NV12_10BIT, + gcvFEATURE_2D_OPF_YUV_OUTPUT, + gcvFEATURE_2D_FILTERBLIT_A8_ALPHA, + gcvFEATURE_2D_MULTI_SRC_BLT_TO_UNIFIED_DST_RECT, + gcvFEATURE_2D_MULTI_SRC_BLT_BILINEAR_FILTER, + gcvFEATURE_2D_MULTI_SRC_BLT_1_5_ENHANCEMENT, + gcvFEATURE_V2_COMPRESSION_Z16_FIX, + gcvFEATURE_VERTEX_INST_ID_AS_INTEGER, + gcvFEATURE_2D_YUV_MODE, + gcvFEATURE_2D_CACHE_128B256BPERLINE, + gcvFEATURE_2D_SEPARATE_CACHE, + gcvFEATURE_2D_MAJOR_SUPER_TILE, + gcvFEATURE_2D_V4COMPRESSION, + gcvFEATURE_2D_VMSAA, + gcvFEATURE_2D_10BIT_OUTPUT_LINEAR, + gcvFEATURE_2D_YUV420_OUTPUT_LINEAR, + gcvFEATURE_ACE, + gcvFEATURE_NO_YUV420_SOURCE,/* unsupported source with three planes */ + gcvFEATURE_COLOR_COMPRESSION, + gcvFEATURE_32BPP_COMPONENT_TEXTURE_CHANNEL_SWIZZLE, + gcvFEATURE_64BPP_HW_CLEAR_SUPPORT, + gcvFEATURE_TX_LERP_PRECISION_FIX, + gcvFEATURE_COMPRESSION_V2, + gcvFEATURE_MMU, + gcvFEATURE_COMPRESSION_V3, + gcvFEATURE_TX_DECOMPRESSOR, + gcvFEATURE_MRT_TILE_STATUS_BUFFER, + gcvFEATURE_COMPRESSION_V1, + gcvFEATURE_V1_COMPRESSION_Z16_DECOMPRESS_FIX, + gcvFEATURE_RTT, + gcvFEATURE_GENERIC_ATTRIB, + gcvFEATURE_2D_ONE_PASS_FILTER, + gcvFEATURE_2D_ONE_PASS_FILTER_TAP, + gcvFEATURE_2D_POST_FLIP, + gcvFEATURE_2D_PIXEL_ALIGNMENT, + gcvFEATURE_CORRECT_AUTO_DISABLE_COUNT, + gcvFEATURE_CORRECT_AUTO_DISABLE_COUNT_WIDTH, + gcvFEATURE_8K_RT, + gcvFEATURE_HALTI3, + gcvFEATURE_EEZ, + gcvFEATURE_INTEGER_SIGNEXT_FIX, + gcvFEATURE_PSOUTPUT_MAPPING, + gcvFEATURE_8K_RT_FIX, + gcvFEATURE_TX_TILE_STATUS_MAPPING, + gcvFEATURE_SRGB_RT_SUPPORT, + gcvFEATURE_TEXTURE_16K, + gcvFEATURE_PA_FARZCLIPPING_FIX, + gcvFEATURE_PE_DITHER_COLORMASK_FIX, + gcvFEATURE_ZSCALE_FIX, + gcvFEATURE_MULTI_PIXELPIPES, + gcvFEATURE_PIPE_CL, + gcvFEATURE_BUG_FIXES18, + gcvFEATURE_UNIFIED_SAMPLERS, + gcvFEATURE_CL_PS_WALKER, + gcvFEATURE_NEW_HZ, + gcvFEATURE_TX_FRAC_PRECISION_6BIT, + gcvFEATURE_SH_INSTRUCTION_PREFETCH, + gcvFEATURE_PROBE, + gcvFEATURE_SINGLE_PIPE_HALTI1, + gcvFEATURE_BUG_FIXES8, /* This HW feature is wrong, we can't use this to check integer branch!!!*/ + gcvFEATURE_2D_ALL_QUAD, + gcvFEATURE_SEPARATE_SRC_DST, + gcvFEATURE_TX_HOR_ALIGN_SEL, + gcvFEATURE_HALTI4, + gcvFEATURE_MRT_FC_FIX, + gcvFEATURE_TESSELLATION, + gcvFEATURE_DRAW_INDIRECT, + gcvFEATURE_COMPUTE_INDIRECT, + gcvFEATURE_MSAA_TEXTURE, + gcvFEATURE_STENCIL_TEXTURE, + gcvFEATURE_S8_ONLY_RENDERING, + gcvFEATURE_D24S8_SAMPLE_STENCIL, + gcvFEATURE_ADVANCED_BLEND_MODE_PART0, + gcvFEATURE_RA_DEPTH_WRITE, + gcvFEATURE_RS_DS_DOWNSAMPLE_NATIVE_SUPPORT, + gcvFEATURE_S8_MSAA_COMPRESSION, + gcvFEATURE_MSAA_FRAGMENT_OPERATION, + gcvFEATURE_FE_START_VERTEX_SUPPORT, + gcvFEATURE_DIVISOR_STREAM_ADDR_FIX, + gcvFEATURE_ZERO_ATTRIB_SUPPORT, + gcvFEATURE_DANGLING_VERTEX_FIX, + gcvFEATURE_PE_DISABLE_COLOR_PIPE, + gcvFEATURE_FE_12bit_stride, + gcvFEATURE_TX_LOD_GUARDBAND, + gcvFEATURE_HAS_PRODUCTID, + gcvFEATURE_INTEGER32_FIX, + gcvFEATURE_TEXTURE_GATHER, + gcvFEATURE_IMG_INSTRUCTION, + gcvFEATURE_HELPER_INVOCATION, + gcvFEATURE_NO_USER_CSC, + /* ANDROID_ONLY_REMOVED,remove some non-android features from gc520c: + * (1)Monochrome expansion. + * (2)Remove 3D compression and keep read tile status and tile input. + * (3)ROP2, ROP3, ROP4. Android only needs one solid color as source. Transparency by monochrome mask, pattern mask, src/dst color key, chroma key. + * (4)2x2 minor tile. + * (5)7 & 9 tap OPF. + * (6)User-defined CSC. + * (7)MultiSrc walker v1.5. + * (8)SuperTile V1. + * (9)Big endian. + */ + gcvFEATURE_ANDROID_ONLY_REMOVED, + gcvFEATURE_V2_MSAA_COHERENCY_FIX, + gcvFEATURE_BLOCK_SIZE_16x16, + gcvFEATURE_TX_SUPPORT_DEC, + gcvFEATURE_RSBLT_MSAA_DECOMPRESSION, + gcvFEATURE_TILEFILLER_32TILE_ALIGNED, + gcvFEATURE_GEOMETRY_SHADER, + gcvFEATURE_HALTI5, + gcvFEATURE_PIPELINE_32_ATTRIBUTES, + gcvFEATURE_USC, + gcvFEATURE_CUBEMAP_ARRAY, + gcvFEATURE_TX_DESCRIPTOR, + gcvFEATURE_SEPARATE_RT_CTRL, + gcvFEATURE_RENDER_ARRAY, + gcvFEATURE_BLT_ENGINE, + gcvFEATURE_TEXTURE_BUFFER, + gcvFEATURE_GS_SUPPORT_EMIT, + gcvFEATURE_SAMPLER_BASE_OFFSET, + gcvFEATURE_IMAGE_OUT_BOUNDARY_FIX, + gcvFEATURE_TX_BORDER_CLAMP, + gcvFEATURE_MSAA_SHADING, + gcvFEATURE_ADVANCED_SH_INST, + gcvFEATURE_LOD_FIX_FOR_BASELEVEL, + gcvFEATURE_MULTIDRAW_INDIRECT, + gcvFEATURE_DRAW_ELEMENTS_BASE_VERTEX, + gcvFEATURE_NEW_STEERING_AND_ICACHE_FLUSH, /* Steering base on register base. Trigger-style Icache flush state. */ + gcvFEATURE_PE_DITHER_FIX2, + gcvFEATURE_INDEX_FETCH_FIX, + gcvFEATURE_TEX_BASELOD, + gcvFEATURE_TEX_SEAMLESS_CUBE, + gcvFEATURE_TEX_ETC2, + gcvFEATURE_TEX_CUBE_BORDER_LOD, + gcvFEATURE_FE_ALLOW_STALL_PREFETCH_ENG, + gcvFEATURE_TX_8BPP_TS_FIX, + gcvFEATURE_HW_TFB, + gcvFEATURE_HW_TFB_PERF_FIX, + gcvFEATURE_COMPRESSION_V4, + gcvFEATURE_FENCE_32BIT, + gcvFEATURE_FENCE_64BIT, + gcvFEATURE_R8_UNORM, + gcvFEATURE_TX_DEFAULT_VALUE_FIX, + gcvFEATURE_TX_8bit_UVFrac, + gcvFEATURE_TX_MIPFILTER_NONE_FIX, + gcvFEATURE_MC_STENCIL_CTRL, + gcvFEATURE_DEPTH_MATH_FIX, + gcvFEATURE_PE_B2B_PIXEL_FIX, + gcvFEATURE_TEXTURE_GATHER_OFFSETS, + gcvFEATURE_TEX_CACHE_FLUSH_FIX, + gcvFEATURE_WIDELINE_HELPER_FIX, + gcvFEATURE_LINE_DIAMOND_RULE_FIX, + gcvFEATURE_MULTIGPU_SYNC_V2, + gcvFEATURE_DRAW_ID, + gcvFEATURE_SNAPPAGE_CMD, + gcvFEATURE_COMMAND_PREFETCH, + gcvFEATURE_SAMPLEPOS_SWIZZLE_FIX, + gcvFEATURE_SELECTMAP_SRC0_SWIZZLE_FIX, + gcvFEATURE_LOADATTR_OOB_FIX, + gcvFEATURE_RA_DEPTH_WRITE_MSAA1X_FIX, + gcvFEATURE_MRT_8BIT_DUAL_PIPE_FIX, + gcvFEATURE_BUG_FIXES1, + gcvFEATURE_MULTI_SOURCE_BLT, + gcvFEATURE_ZCOMPRESSION, + gcvFEATURE_DITHER_AND_FILTER_PLUS_ALPHA_2D, + gcvFEATURE_ONE_PASS_2D_FILTER, + gcvFEATURE_TX_FILTER, + gcvFEATURE_CHIPENABLE_LINK, + gcvFEATURE_TEXTURE_BIAS_LOD_FIX, + gcvFEATURE_USE_GL_Z, + gcvFEATURE_SUPPORT_INTEGER, + /* PARTLY_SUPPORT_INTEGER_BRANCH: + * chips can support all integer types for compare instructions, e.g, CMP, SELECT. + * FULLLY_SUPPORT_INTEGER_BRANCH: + * chips can support all integer types for JMP instruction. + * If PARTLY_SUPPORT_INTEGER_BRANCH is TRUE but FULLLY_SUPPORT_INTEGER_BRANCH is FALSE, + * then this chip can only support INT32/UINT32 JMP instruction. + */ + gcvFEATURE_PARTLY_SUPPORT_INTEGER_BRANCH, + gcvFEATURE_FULLLY_SUPPORT_INTEGER_BRANCH, + gcvFEATURE_SUPPORT_INTEGER_ATTRIBUTE, + gcvFEATURE_SUPPORT_MOVAI, + gcvFEATURE_NEED_FIX_FOR_CL_X, + gcvFEATURE_NEED_FIX_FOR_CL_XE, + gcvFEATURE_HAS_OUTPUT_COUNT_FIX, + gcvFEATURE_VARYING_PACKING_LIMITATION, + gcvFEATURE_HIGHP_VARYING_SHIFT, + gcvFEATURE_BUG_FIXES2, + gcvFEATURE_64K_L2_CACHE, + gcvFEATURE_128BTILE, + gcvFEATURE_ADVANCED_BLEND_OPT, + gcvFEATURE_SNAPPAGE_CMD_FIX, + gcvFEATURE_L2_CACHE_FOR_2D_420, + gcvFEATURE_TILE_STATUS_2BITS, + gcvFEATURE_EXTRA_SHADER_INSTRUCTIONS0, + gcvFEATURE_EXTRA_SHADER_INSTRUCTIONS1, + gcvFEATURE_EXTRA_SHADER_INSTRUCTIONS2, + gcvFEATURE_MEDIUM_PRECISION, + gcvFEATURE_FE20_BIT_INDEX, + gcvFEATURE_BUG_FIXES4, + gcvFEATURE_BUG_FIXES12, + gcvFEATURE_VMSAA, + gcvFEATURE_ROBUST_ATOMIC, + gcvFEATURE_32F_COLORMASK_FIX, + gcvFEATURE_NEW_GPIPE, + gcvFEATURE_RS_NEW_BASEADDR, + gcvFEATURE_TX_DXT, + gcvFEATURE_SH_FLAT_INTERPOLATION_DUAL16_FIX, + gcvFEATURE_EVIS, + gcvFEATURE_SH_SUPPORT_V4, + gcvFEATURE_SH_SUPPORT_ALPHA_KILL, + gcvFEATURE_PE_NO_ALPHA_TEST, + gcvFEATURE_SH_SNAP2PAGE_MAXPAGES_FIX, + gcvFEATURE_USC_FULLCACHE_FIX, + gcvFEATURE_PE_64bit_FENCE_FIX, + gcvFEATURE_BLT_8bit_256TILE_FC_FIX, + gcvFEATURE_PE_RGBA16I_FIX, + gcvFEATURE_BLT_64bpp_MASKED_CLEAR_FIX, + gcvFEATURE_SH_PSO_MSAA1x_FIX, + gcvFEATURE_USC_ATOMIC_FIX, + gcvFEATURE_INDEX_CONST_ON_B0, + gcvFEATURE_SH_NO_ONECONST_LIMIT, + gcvFEATURE_EVIS_NO_ABSDIFF, + gcvFEATURE_EVIS_NO_BITREPLACE, + gcvFEATURE_EVIS_NO_BOXFILTER, + gcvFEATURE_EVIS_NO_CORDIAC, + gcvFEATURE_EVIS_NO_DP32, + gcvFEATURE_EVIS_NO_FILTER, + gcvFEATURE_EVIS_NO_IADD, + gcvFEATURE_EVIS_NO_SELECTADD, + gcvFEATURE_EVIS_LERP_7OUTPUT, + gcvFEATURE_EVIS_ACCSQ_8OUTPUT, + gcvFEATURE_ROBUSTNESS, + gcvFEATURE_SECURITY, + gcvFEATURE_TX_YUV_ASSEMBLER_10BIT, + gcvFEATURE_USC_GOS_ADDR_FIX, + gcvFEATURE_SUPPORT_MSAA2X, + gcvFEATURE_TX_DESC_CACHE_CLOCKGATE_FIX, + gcvFEATURE_TX_INTEGER_COORDINATE, + gcvFEATURE_PSIO_SAMPLEMASK_IN_R0ZW_FIX, + gcvFEATURE_MULTI_CORE_BLOCK_SET_CONFIG, + gcvFEATURE_SH_IMG_LDST_ON_TEMP, + gcvFEATURE_TX_INTEGER_COORDINATE_V2, + gcvFEATURE_COMPUTE_ONLY, + gcvFEATURE_SH_IMG_LDST_CLAMP, + gcvFEATURE_SH_ICACHE_ALLOC_COUNT_FIX, + gcvFEATURE_MSAA_OQ_FIX, + gcvFEATURE_PE_ENHANCEMENTS2, + gcvFEATURE_PSIO_MSAA_CL_FIX, + gcvFEATURE_FE_NEED_DUMMYDRAW, + gcvFEATURE_MULTI_CLUSTER, + gcvFEATURE_PSIO_INTERLOCK, + gcvFEATURE_BLIT_COMPRESS_DEST, + gcvFEATURE_SH_MULTI_WG_PACK, + gcvFEATURE_FE_ROBUST_FIX, + gcvFEATURE_TX_ASTC_MULTISLICE_FIX, + gcvFEATURE_PSIO_DUAL16_32bpc_FIX, + gcvFEATURE_LS_SUPPORT_PER_COMP_DEPENDENCY, + gcvFEATURE_COMPRESSION_DEC400, + gcvFEATURE_SH_TEXLD_U_FIX, + gcvFEATURE_TX_FLUSH_L1CACHE, + gcvFEATURE_USC_DEFER_FILL_FIX, + gcvFEATURE_MC_FCCACHE_BYTEMASK, + gcvFEATURE_SH_MULTI_WG_PACK_FIX, + gcvFEATURE_FE_PATCHLIST_FETCH_FIX, + gcvFEATURE_RA_CG_FIX, + gcvFEATURE_EVIS_VX2, + gcvFEATURE_SH_HALF_DEPENDENCY_FIX, + gcvFEATURE_FE_BASEINSTANCE, + gcvFEATURE_FE_COMPUREINDIRECT_SKIP_UNIFORM, + gcvFEATURE_SH_CLOCK_GATE_FIX, + gcvFEATURE_GPIPE_CLOCK_GATE_FIX, + gcvFEATURE_TP_ENGINE, + gcvFEATURE_TX_BORDER_CLAMP_FIX, + gcvFEATURE_SH_IMAGE_LD_LAST_PIXEL_FIX, + gcvFEATURE_MULTI_CORE_BLOCK_SET_CONFIG2, + gcvFEATURE_MULTIGPU_SYNC_V3, + gcvFEATURE_PE_VMSAA_COVERAGE_CACHE_FIX, + gcvFEATURE_SECURITY_AHB, + gcvFEATURE_TX_LERP_LESS_BIT, + gcvFEATURE_SMALL_BATCH, + gcvFEATURE_SH_IDIV0_SWZL_EHS, + gcvFEATURE_SH_CMPLX, + gcvFEATURE_VIP_V7, + gcvFEATURE_SH_GM_ENDIAN, + gcvFEATURE_SH_GM_USC_UNALLOC, + gcvFEATURE_SH_END_OF_BB, + gcvFEATURE_ASYNC_BLIT, + gcvFEATURE_ASYNC_FE_FENCE_FIX, + gcvFEATURE_PSCS_THROTTLE, + gcvFEATURE_SEPARATE_LS, + gcvFEATURE_PA_VARYING_COMPONENT_TOGGLE_FIX, + gcvFEATURE_TX_MULTISAMPLER_FC_FIX, + gcvFEATURE_WIDELINE_TRIANGLE_EMU, + gcvFEATURE_FENCE, + gcvFEATURE_MCFE, + gcvFEATURE_NN_INTERLEAVE8, + gcvFEATURE_TP_REORDER, + gcvFEATURE_TP_RTNE, + gcvFEATURE_TP_LRN, + gcvFEATURE_TP_ROI_POOLING, + gcvFEATURE_TP_MAX_POOLING_STRIDE1, + gcvFEATURE_NN_BRICK_MODE, + gcvFEATURE_NN_BORDER_MODE, + gcvFEATURE_NN_FP16_ALU, + gcvFEATURE_NN_BF16_ALU, + gcvFEATURE_NN_INT16_ALU, + gcvFEATURE_NN_ZDP3, + gcvFEATURE_NN_ZDP6, + gcvFEATURE_PE_DEPTH_ONLY_OQFIX, + gcvFEATURE_TX_SNORM_SUPPORT, + gcvFEATURE_HWMANAGED_LS, + gcvFEATURE_SH_SCATTER_GATHER, + gcvFEATURE_NN_POWER_ISOLATION, + gcvFEATURE_SWTILING_PHASE1, + gcvFEATURE_SWTILING_PHASE2, + gcvFEATURE_SWTILING_PHASE3, + gcvFEATURE_TF_QUANTIZATION, + gcvFEATURE_NN_XYDP9, + gcvFEATURE_TP_SIMPLE_INT16, + gcvFEATURE_TP_REAL_INT16, + gcvFEATURE_NN_FIRST_PIXEL_POOLING, + gcvFEATURE_NN_STRIDE_SUPPORT, + gcvFEATURE_NN_XYDP6, + gcvFEATURE_NN_XYDP0, + gcvFEATURE_TP_REORDER_FIX, + gcvFEATURE_NN_CONV1x1_PERF_FIX, + gcvFEATURE_NN_CACHELINE_MODE_PERF_FIX, + gcvFEATURE_NN_PER3DTILE_BUBBLE_FIX, + gcvFEATURE_SH_IO_CG_FIX, + gcvFEATURE_USC_STAY_LRU, + gcvFEATURE_NN_NONZERO_MIRROR_BORDER, + gcvFEATURE_NN_COEF_DECOMPRESS_PERF2X, + gcvFEATURE_4BIT_INPUT, + gcvFEATURE_COEF_COMPRESSION_ENHANCEMENT, + gcvFEATURE_NN_ZDP3_NO_COMPRESS_FIX, + gcvFEATURE_NN_ASYNC_COPY_PERF_FIX, + gcvFEATURE_OCB_COUNTER, + gcvFEATURE_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX, + gcvFEATURE_NN_FULLCACHE_KERNEL_INTERLEAVE_FIX, + gcvFEATURE_DR_JD_DIFF_CONDITION_FOR_CACHELINE_MODE_PRE_FIX, + gcvFEATURE_USC_BOTTLENECK_FIX, + gcvFEATURE_OCB_REMAP_PHYSICAL_ADDRESS, + gcvFEATURE_BIT_NN_HW_LIMITATION_NATIVE_KER_1x2_2x1, + gcvFEATURE_NN_SLICE_PADDING_TO_64BYTE_ALIGN, + gcvFEATURE_NN_DW_1x1_CONV_MERGE, + gcvFEATURE_TP_REORDER_LAYER_SUSPEND_FIX, + gcvFEATURE_KERNEL_VIP_SRAM_READ_BW_LIMITATION_FIX, + gcvFEATURE_IMG_POP_PIPELINE_PAUSE_FIX, + gcvFEATURE_NN_SLOW_OUTPUT, + gcvFEATURE_NO_NARROW_POST_PROCESS_PIPE, + gcvFEATURE_TP_NN_PROBE, + gcvFEATURE_TP_23BITS_POST_MULTIPLIER, + gcvFEATURE_NN_TRANSPOSE, + gcvFEATURE_OUTIMAGE_X_BITWIDTH_LIMIT_FOR_NN_TRANSPOSE_FIX, + gcvFEATURE_TP_BFLOAT16, + gcvFEATURE_EVIS2_FLOP_RESET_FIX, + gcvFEATURE_USC_ASYNC_CP_RTN_FLOP_RESET_FIX, + gcvFEATURE_USC_EVICT_CTRL_FIFO_FLOP_RESET_FIX, + gcvFEATURE_NEGATIVE_POST_SHIFT_FIX, + gcvFEATURE_NN_COMMAND_KERNEL_REQUEST_CONFICT_FIX, + gcvFEATURE_NN_LEAKY_RELU, + gcvFEATURE_NN_PRELU, + gcvFEATURE_NN_NATIVE_STRIDE_TWO, + gcvFEATURE_BIT_NN_SUPPORT_CONV1x1_AND_NATIVE_CONV_STRIDE2, + gcvFEATURE_NN_TENSOR_ADD, + + gcvFEATURE_IMAGE_LS_NO_FULLMASK_FIX, + gcvFEATURE_BLT_YUV_OUTPUT, + gcvFEATURE_PE_TILE_CACHE_FLUSH_FIX, + gcvFEATURE_SH_ROBUSTNESS_FIX, + gcvFEATURE_USC_ATOMIC_FIX2, + gcvFEATURE_MULTIVIEW_RENDER, + gcvFEATURE_FE_DRAW_DIRECT, + gcvFEATURE_TX_VKBORDER_MODE, + gcvFEATURE_TX_UNNORMALIZED_COORD, + gcvFEATURE_VG_IMAGE_16K, + gcvFEATURE_MULTICORE_CONFIG, + gcvFEATURE_PA_LINECLIP_FIX, + gcvFEATURE_NN_ENGINE, + gcvFEATURE_NN_ASYNC_COPY_MERGE_FIX, + gcvFEATURE_NN_CONVOUT_FIFO_DEPTH_FIX, + gcvFEATURE_NN_SMALLBATCH_PHASE1, + gcvFEATURE_TP_SMALLBATCH_PHASE1, + gcvFEATURE_VIP_SCALER, + gcvFEATURE_VIP_SCALER_4K, + gcvFEATURE_TX_8bit_UVFrac_ROUNDING_FIX, + gcvFEATURE_NN_REQ_SLOWARBITRATION_FIX, + gcvFEATUER_IMAGE_PARTIAL_CACHE, + gcvFEATURE_FULLCACHE_KERNELHEAD_FIX, + gcvFEATURE_NN_SINGLEPORT_ACCUMBUFFER, + gcvFEATURE_NN_SMALLBATCH, + gcvFEATURE_TP_SMALLBATCH, + gcvFEATURE_NN_ZDP_INIMAGE_SIZE_FIX, + gcvFEATURE_HI_REORDER_FIX, + gcvFEATURE_TP_COEF_COMPRESSION_ENHANCEMENT, + gcvFEATURE_NN_DEPTHWISE_SUPPORT, + gcvFEATURE_IMAGE_NOT_PACKED_IN_SRAM_FIX, + gcvFEATURE_IDLE_BEFORE_FLUSH_COMPLETE_FIX, + gcvFEATURE_NO_FLUSH_USC_FIX, + gcvFEATURE_COEF_DELTA_CORD_OVERFLOW_ZRL_8BIT_FIX, + gcvFEATURE_XY_OFFSET_LIMITATION_FIX, + gcvFEATURE_USC_INVALIDATE_CACHE_LINE_FIX, + gcvFEATURE_LOW_EFFICIENCY_OF_ID_WRITE_IMGBUF_FIX, + gcvFEATURE_KERNEL_PER_CORE_LESS_THAN_THIRD_COEF_BUFF_DEPTH_FIX, + gcvFEATURE_NN_PER_CHANNEL_QUANT, + gcvFEATURE_NN_NO_Z_LOCATION_OFFSET, + gcvFEATURE_NN_KERNEL_SIZE_WASTE_IN_PARTIAL_MODE_FIX, + gcvFEATURE_INCORRECT_WR_REQ_TO_USC_BETWEEN_REORDER_AND_NORMAL_LAYER_FIX, + gcvFEATURE_VIP_DEC400, + gcvFEATURE_MAX_POINTSIZE_CLAMP, + gcvFEATURE_2D_FAST_CLEAR, /* For tilestatus Fast Clear feature*/ + gcvFEATURE_NN_PER_CHANNEL_QUANT_ASYM, + gcvFEATURE_SMALL_BATCH_FLOPS_RESET_FIX, + gcvFEATURE_SMALL_BATCH_DISBLE_FIX, + gcvFEATURE_FORMAT_10BIT_CROSS_4K, + gcvFEATURE_ENDIAN_CONTROL, + gcvFEATURE_SH_VX2_FLOATING_MAD_FIX, + gcvFEATURE_PE_A8B8G8R8, /* For PE support A8B8G8R8 format feature*/ + gcvFEATURE_DEPTHWISE_NEIGHBOR_IMG_DATA_TRANSFER_NOT_EFFICIENT_FIX, + + /* FP16 enhancement-related features. */ + gcvFEATURE_DST_TEX_I2F_F2I_INST_DEPRECATE, + gcvFEATURE_ALU_FP16_INST_SUPPORT, + gcvFEATURE_DUAL16_14BIT_PC_SUPPORT, + gcvFEATURE_LDST_CONV_4ROUNDING_MODES, + gcvFEATURE_FULL_PACK_MODE_SUPPORT, + gcvFEATURE_FP32_TO_FP16_CONV_FIX, + + gcvFEATURE_SH_HAS_IMGLD_COMP_COUNT_FIX, + gcvFEATURE_SH_SUPPORT_FP32_FMA, + + gcvFEATURE_SH_SUPPORT_VEC2_INT_MULMAD, + gcvFEATURE_SH_SUPPORT_VEC4_INT_MULMAD, + + gcvFEATURE_SH_SUPPORT_HIGHPVEC_FORMAT, + gcvFEATURE_SH_HAS_32BIT_NEG_OFFSET_FIX_FOR_40BIT_VA, + gcvFEATURE_SH_SUPPORT_AIGM, + gcvFEATURE_SH_CS_PAGE_SIZE_ISSUE, + gcvFEATURE_SH_INTEGER_FRONT_FACE, + gcvFEATURE_SH_SUPPORT_MULTIVIEWPORT, + + gcvFEATURE_SH_PER_STAGE_LOCAL_STORAGE, + gcvFEATURE_SH_SUPPORT_SEPARATED_TEX, + gcvFEATURE_SH_SUPPORT_CLIP_CULL_DISTANCE, + gcvFEATURE_SH_D3D11_SUPPORT, + + gcvFEATURE_SH_DYNAMIC_TEXTURE_INDEXING, + + /* AIGPU feature. */ + gcvFEATURE_AI_GPU, + gcvFEATURE_NN_FAST_FIRST_PIXEL_POOLING, + gcvFEATURE_NN_FLOAT_POST_MULT, + gcvFEATURE_NN_ASYMMETRIC_INT8, + + gcvFEATURE_FORMAT_YUV_I010, /*support YUVI010 & P010_LSB format*/ + gcvFEATURE_FORMAT_YUV420_101010, /*support YUV420_101010 format*/ + gcvFEATURE_FORMAT_FLOATPOINT, /*support FloatPoint format,also include packed-RGB888*/ + + gcvFEATURE_BIT_NN_COMPRESSION_BYPASSS, + gcvFEATURE_BIT_BFLOAT_COEF_COMPRESSION_ZERO_COEFBIT14_INVERSE, + gcvFEATURE_BIT_TP_KERNEL_1BYTE_ALGIN, + gcvFEATURE_PREPROCESS_IMG_BUF_640BYTE_LIMIT, + gcvFEATURE_BIT_TPLITE_BFLOAT16, + gcvFEATURE_VIP_HW_FINAL_RELEASE, + gcvFEATURE_OUTPUT_CONVERT_UINT8_INT8_TO_UINT16_INT16_FIX, + gcvFEATURE_IMG_ADDR_NOT_WRAP_IF_OVER_OCB_ADDR_FIX, /* 2089 */ + gcvFEATURE_BIT_V8_SINGLE_PORT_ACCUMULATION_BUFFER_RW_CONFICT_ZERO_SKIP_PERF_FIX, /* 2043 */ + gcvFEATURE_BIT_BURST_COLLECT_DUMMY_DATA_WASTE_CYCLES_FIX, /* 2111 */ + gcvFEATURE_BIT_TP_ACCESS_VIPSRAM_OT_IS_ONE_FIX, /* 2050 */ + + gcvFEATURE_BIT_USE_SINGLE_PORT_VIPSRAM, + gcvFEATURE_VALUE_DDR_KERNEL_BURST_SIZE, + gcvFEATURE_BIT_TILE_ACCESS_CAPABILITY, + gcvFEATURE_BIT_FAST_DP3_PREPROCESSOR, + + gcvFEATURE_BIT_INIMG_NOT_64BYTE_ALIGN_CACHELINE_MODE_FIX, /* 2112 */ + gcvFEATURE_BIT_DEPTHWISE_16BIT_FORMAT, + + gcvFEATURE_2D_TILESTATUS_ROTATION, /*Fix fastclear feature with rotation*/ + gcvFEATURE_BIT_TP_FC_FLOAT_LAST_PIXEL_NEGATIVE_0_FIX, + gcvFEATURE_TS_FC_VULKAN_SUPPORT, + gcvFEATURE_BIT_V8_ACCUMLATION_READ_OUT_HAS_BUBBLES_PERF_FIX, /* 2044 */ + gcvFEATURE_BIT_MAX_TILE_SIZE, + gcvFEATURE_2D_TARGET_MAJOR_SUPER_TILE, /*target support supertile Y major*/ + gcvFEATURE_BIT_INIMAGE_2DTILE_NOT_LESS_160PIXEL_FIX, + gcvFEATURE_BIT_NN_IN_TILE_DATA_IS_ALL_PAD_FIX, /* 2131 */ + gcvFEATURE_BIT_US_SRAM_READ_INTF_FIFO_OVERFLOW_FIX, /* 2280 */ + + /* TP reorder the int tile x should be less than 512 */ + gcvFEATURE_TP_REORDER_INTILE_X_SIZE_512_FIX, + gcvFEATURE_NN_WASTE_COEF_READ_WRITE_BANDWIDTH_128BYTE_VIPSRAM_IN_FULL_PATIAL_CACHE_MODE_FIX, + gcvFEATURE_BIT_BFP_COEF_AUTO_PAD_INCOMPLETE_ZERO_IN_KZ_PLANE, + gcvFEATURE_NN_FLOAT32_IO, + gcvFEATURE_TP_FLOAT32_IO, + /* add for support INT16x(U)INT8 */ + gcvFEATURE_BIT_NN_23BITS_POST_MULTIPLIER_VIP_V7, + gcvFEATURE_BIT_TP_23BITS_POST_MULTIPLIER_VIP_V7, + gcvFEATURE_CONV_INT16X8BIT_VIP_V7, + + /* Q channel support. */ + gcvFEATURE_Q_CHANNEL_SUPPORT, + + /* MMU descriptor new refinement. */ + gcvFEATURE_MMU_PAGE_DESCRIPTOR, + + gcvFEATURE_BIT_NN_TILE_NUM_BIGGER_THAN_1024_FIX, + + gcvFEATURE_BIT_HI1_L2_CACHE, + + gcvFEATURE_BIT_NN_SUPPORT_CONV_1D, + + gcvFEATURE_BIT_NN_DEPTHWISE_AFTER_16BIT_LAYER_LIMIT_FIX, + + /* only support gcvSURF_B8G8R8_PLANAR & gcvSURF_AYUV format as target */ + gcvFEATURE_BIT_BGR_PLANAR, + + gcvFEATURE_BIT_USC_INDIVIDUAL_PORT_WRT_EARLY_EVICT_DATA_CORRUPT_FIX, + gcvFEATURE_BIT_NN_TP_INSTR_COMPLETE_IN_SAME_CYCLE_WITH_WAIT_EVENT_FIX, + + gcvFEATURE_BIT_TP_SOFTMAX, + gcvFEATURE_TP_TENSOR_ADD_MUL, + + gcvFEATURE_NN_REMOVE_POOLING, + gcvFEATURE_BIT_NN_DEPTHWISE_INT16XINT8, + gcvFEATURE_BIT_NN_DEPTHWISE_8BIT_VIP_V7, + gcvFEATURE_BIT_NN_ZDP_TRANSPOSE_CH9_ONLY, + gcvFEATURE_BIT_NN_SUPPORT_DUMMY_TILE, + gcvFEATURE_BIT_USE_VIPSRAM_FOR_KERNEL_STREAMING, + gcvFEATURE_BIT_NN_SUPPORT_KERNEL_1BYTE_ALIGN, + gcvFEATURE_BIT_NN_SMALL_BATCH_PHASE2, + gcvFEATURE_SH_MOVAI_MOVAR_UNUSED_COMPONENTS_WRITE_DIRTY_DATA_FIX, + gcvFEATURE_BIT_NN_ENHANCED_MAX_POOLING, + gcvFEATURE_NN_1x1_NON_POOLING_PACKING, + gcvFEATURE_BIT_NN_SUPPORT_BOTH_CONV_NATIVE_STRIDE2_AND_POOLING, + gcvFEATURE_BIT_NN_SUPPORT_ALU, + gcvFEATURE_BIT_NN_TRANSPOSE_PHASE2, + gcvFEATURE_BIT_NN_FC_ENHANCEMENT, + gcvFEATURE_BIT_NN_2ND_IMG_BASE_ADDR_FIX, + gcvFEATURE_BIT_NN_TENSOR_ADD_FIELD_MOVE_TO_EXT_CMD, + + gcvFEATURE_IMGLD_WIDTH_LT16_FIX, + gcvFEATURE_BIT_GPU_INSPECTOR_COUNTERS, + + gcvFEATURE_VIP_REMOVE_MMU, + gcvFEATURE_BIT_TPLITE_SUPPORT_TP_DATA_TRANSPOSE, + gcvFEATURE_BIT_NN_JD_DIRECT_MODE_FIX, + gcvFEATURE_BIT_NN_CONV_CORE_BYPASS, + gcvFEATURE_BIT_TP_REMOVE_FC, + + gcvFEATURE_BIT_HI_DEFAULT_ENABLE_REORDER_FIX, + gcvFEATURE_BIT_NN_TENSOR_ADD_RELU, + gcvFEATURE_BIT_NN_VIPSRAM_DOUBLE_BUFFER_FIX, + + gcvFEATURE_BIT_NN_POST_OUT_SUPPORT_FP16, + gcvFEATURE_BIT_NN_POST_OUT_SUPPORT_BF16, + gcvFEATURE_BIT_NN_POST_OUT_SUPPORT_FP32, + gcvFEATURE_BIT_NN_POST_OUT_SUPPORT_INT32, + gcvFEATURE_BIT_DEPTHWISE_FLOAT_FIX, + + /* + * Using event 28 as frame done interrupt, set by End command with bit 28 + * interrupt enabled by AHB 0x14 register and clear by 0x10 register. + */ + gcvFEATURE_2D_FRAME_DONE_INTR, + gcvFEATURE_BIT_NN_BURST_COLLECTER_LAST_FLAG_FIX, + gcvFEATURE_BIT_NN_POST_MULT_SUPPORT_FP_CONV, + + /* + * support AXI Front-End hardware moudle for IP (subsystem) directly interfacing + * to SOC through AXI port. + */ + gcvFEATURE_BIT_AXI_FE, + gcvFEATURE_BIT_V83_1ST_CACHE_MODE_VIPSRAM_RD_UPDATE_FIX, + gcvFEATURE_BIT_NN_KERNEL_MSS_SBP2_DIRECT_STEAM_STEAM_FIX, + gcvFEATURE_BIT_NN_RD_IMG_NEED_EXTRA_SPACE, + gcvFEATURE_BIT_V83_NUMOFPENDINGTILES_FOR_2NDIMAGE_FIX, + gcvFEATURE_BIT_CORE_NUM_OF_KID_FOR_MULTI_LAYER_FIX, + gcvFEATURE_BIT_USC_RW_SAME_CACHELINE_UPDATE_FIX, + gcvFEATURE_BIT_V83_1ST_KERNEL_STREAM_BUFFER_UPDATE_FIX, + gcvFEATURE_BIT_NN_CMD_SUPPORT_SLICE, + gcvFEATURE_BIT_NN_HW_V83, + + /* ANDROID_ONLY_RESERVED,reserve some non-android features: + * (1)Line drawing, + * (2)8x8 pattern, + * (3)Index8 format, + * (4)Demultiply, + * (5)Alpha blending:(ONE,ONE_MINUS_SRC_ALPHA),(SRC_ALPHA,ONE_MINUS_SRC_ALPHA),global alpha and pre-multiply, + * (6)Rectangle clear and fill, + * (7)Rotation.Android uses'90 deg','H flip','V flip'three bits to describe 8 transformations. + */ + gcvFEATURE_ANDROID_ONLY_RESERVED, + gcvFEATURE_2D_MULTISOURCE_PIPE, /* move bitblit/stretchblit pipe line to multisource blit */ + gcvFEATURE_2D_MASK_AND_COLORKEY, /* maskblit and src/dst color key */ + + gcvFEATURE_BIT_V83_INTILESIZE_1X1_10BITS_FIX, + gcvFEATURE_BIT_NN_CIRCULAR_BUF_WRAP_ADDRESS_OVERFLOW_FIX, + gcvFEATURE_BIT_TP_CIRCULAR_BUF_WRAP_ADDRESS_OVERFLOW_FIX, + gcvFEATURE_BIT_TP_CIRCULAR_BUF_WRAP_ADDRESS_LESS_FIX, + gcvFEATURE_BIT_USC_PAUSE_TP_WR_REQ_MORE_THAN_256_CYCLES_FIX, + + gcvFEATURE_BIT_TP_SPECIAL_LIST_PARSER_FIX, /* 2365 */ + + gcvFEATURE_2D_STRETCH_MULTISOURCE_PIPE, /* move stretchblit pipe line to multisource blit */ + + gcvFEATURE_BIT_PA_ZEROAREA_LINE_FIX, /* HW 2380 */ + gcvFEATURE_BIT_RS_TILER_YUV420_FIX, /* HW 2114 */ + + gcvFEATURE_BIT_NN_JOB_CANCELATION, + + gcvFEATURE_BIT_V8_DIRECT_MODE_START_ADDR_BIAS_FOR_NEGATIVE_OFFSET_FIX, + + /* only support gcvSURF_B8G8R8_PLANAR & gcvSURF_AYUV format as source */ + gcvFEATURE_BIT_BGR_PLANAR_SOURCE, + + gcvFEATURE_2D_FC_IN_DEC400EX,/* 2D Fast Clear support in DEC400EX */ + gcvFEATURE_BIT_DIRECT_INIMAGE_XSTIDE_LE_13BIT_FIX, + gcvFEATURE_BIT_PE_64BPP_LINEAR_FORMAT, + gcvFEATURE_BIT_NN_SUPPORT_MULTI_AXI_ID, + gcvFEATURE_BIT_NN_STREAM_PROCESSOR, + gcvFEATURE_BIT_TRSPB2_ENDADDR_EQUAL_SRAMEND_FIX, + + /* Feature about normalization */ + gcvFEATURE_2D_NORMALIZATION, + gcvFEATURE_2D_NORMALIZATION_QUANTIZATION, + + gcvFEATURE_BIT_NN_SUPPORT_16_8_QUANTIZATION, + gcvFEATURE_BIT_SPECIAL_8BIT_SIGN_ABS_CONV, + gcvFEATURE_BIT_VIP_SUPPORT_TENSOR_TRANSFER, + gcvFEATURE_BIT_NN_SUPPORT_CMD_LOOP, + + gcvFEATURE_BIT_NN_1ST_AND_2ND_INIMAGE_RAISE_VIPSRAM_RD_UPDATE_AT_SAME_TIME_FIX, + gcvFEATURE_BIT_NN_1ST_AND_2ND_INIMAGE_RAISE_VIPSRAM_RD_UPDATE_AT_SAME_TIME_PHASE1_FIX, + gcvFEATURE_BIT_NN_1ST_AND_2ND_INIMAGE_RAISE_VIPSRAM_RD_UPDATE_AT_SAME_TIME_PHASE2_FIX, + gcvFEATURE_BIT_SECONDIMG_TILE_SIDEBANFIFO_FIX, + + gcvFEATURE_BIT_NN_4BIT_PHASE1, + gcvFEATURE_BIT_NN_SUPPORT_DECONVNxN_S_LESS_THAN_16, + gcvFEATURE_BIT_NN_PICOCORE_DEPTHWISE, + gcvFEATURE_BIT_NN_SINGLE_POSTMULT_FIELDS_IN_BITSTREAM, + gcvFEATURE_VALUE_NN_SMALL_ACCUM_BITS, + gcvFEATURE_VALUE_NN_SMALL_ACCUM, + gcvFEATURE_BIT_VIP_SUPPORT_X_FRAME_COMPRESSION, + gcvFEATURE_NN_SUPPORT_EFUSE, + gcvFEATURE_BIT_NN_WRITE_WITHOUT_USC, + gcvFEATURE_BIT_NN_SUPPORT_CONFIGURABLE_FASTXDP3, + gcvFEATURE_BIT_SH_SUPPORT_VEC2, + gcvFEATURE_BIT_KERNEL_WR_RD_LUTLOAD_DIRECTMODE_ADDR_FIX, + gcvFEATURE_VALUE_NN_COEF_DECOMPRESS_PERF_X, + gcvFEATURE_BIT_VZ_GROUP_START_Z_OVERFLOW_FIX, + gcvFEATURE_BIT_NN_SPLIT_X_AMONG_CLUSTE, + gcvFEATURE_BIT_SRAM_PARITY, + gcvFEATURE_BIT_TRSP2_CONV_SMALLBATCH_FIX, + gcvFEATURE_BIT_NN_SUPPORT_GEMM_PHASE1, + gcvFEATURE_BIT_NN_SUPPORT_GEMM_PHASE2, + gcvFEATURE_NN_TENSOR_ADD_INT16, + gcvFEATURE_BIT_DEPTHTOSPACE_SAME_XY_FIX, + gcvFEATURE_BIT_NN_SUPPORT_ZDP_LOOP6, + gcvFEATURE_NN_ELEMENTWISE_BROADCAST, + gcvFEATURE_NN_2ND_IMAGE_DATA_TYPE, + gcvFEATURE_BIT_FP_INIMAGE_POST_SCALE, + gcvFEATURE_BIT_FASTXDP3_ONLY_IN_DEPTHWISE_FIX, + gcvFEATURE_BIT_NN_TILE_YSIZE_127_LIMITATION_FIX, + gcvFEATURE_BIT_NN_2ND_IMG_SMALL_3D_TILE_FIX, + gcvFEATURE_BIT_NN_CONV_1D_16BIT_FORMAT_INTILE_SIZE_LIMITATION_FIX, + gcvFEATURE_BIT_PERF_KERNEL_DESCRIPTOR_SOURCE_FIX, + + /* Insert features above this comment only. */ + gcvFEATURE_COUNT /* Not a feature. */ +} gceFEATURE; + +/* Chip Power Status. */ +typedef enum _gceCHIPPOWERSTATE { + gcvPOWER_INVALID = -1, + + /* Global/base states. */ + gcvPOWER_ON = 0, + gcvPOWER_IDLE, + gcvPOWER_SUSPEND, + gcvPOWER_OFF, + + /* Power on but not global or broadcast. */ + gcvPOWER_ON_AUTO, + + /* Broadcast states. */ + gcvPOWER_FLAG_BROADCAST = 0x10, + gcvPOWER_IDLE_BROADCAST = gcvPOWER_IDLE | gcvPOWER_FLAG_BROADCAST, + gcvPOWER_SUSPEND_BROADCAST = gcvPOWER_SUSPEND | gcvPOWER_FLAG_BROADCAST, + gcvPOWER_OFF_BROADCAST = gcvPOWER_OFF | gcvPOWER_FLAG_BROADCAST, + + /* Timeout states. */ + gcvPOWER_FLAG_TIMEOUT = 0x20, + gcvPOWER_IDLE_TIMEOUT = gcvPOWER_IDLE | gcvPOWER_FLAG_TIMEOUT, + gcvPOWER_SUSPEND_TIMEOUT = gcvPOWER_SUSPEND | gcvPOWER_FLAG_TIMEOUT, + gcvPOWER_OFF_TIMEOUT = gcvPOWER_OFF | gcvPOWER_FLAG_TIMEOUT, + +} gceCHIPPOWERSTATE; + +/* CPU cache operations */ +typedef enum _gceCACHEOPERATION { + gcvCACHE_CLEAN = 0x01, /* Flush CPU cache to mem */ + gcvCACHE_INVALIDATE = 0x02, /* Invalidte CPU cache */ + gcvCACHE_FLUSH = gcvCACHE_CLEAN | gcvCACHE_INVALIDATE, /* Both flush & invalidate */ + gcvCACHE_MEMORY_BARRIER = 0x04 +} gceCACHEOPERATION; + +/* Surface types. */ +typedef enum _gceSURF_TYPE { + gcvSURF_TYPE_UNKNOWN = 0, + gcvSURF_INDEX, + gcvSURF_VERTEX, + gcvSURF_TEXTURE, + gcvSURF_RENDER_TARGET, + gcvSURF_DEPTH, + gcvSURF_BITMAP, + gcvSURF_TILE_STATUS, + gcvSURF_IMAGE, + gcvSURF_MASK, + gcvSURF_SCISSOR, + gcvSURF_HIERARCHICAL_DEPTH, + gcvSURF_ICACHE, + gcvSURF_TXDESC, + gcvSURF_FENCE, + gcvSURF_TFBHEADER, + gcvSURF_NUM_TYPES, /* Make sure this is the last one! */ + + /* Combinations. */ + gcvSURF_NO_TILE_STATUS = 0x100, + gcvSURF_NO_VIDMEM = 0x200, /* Used to allocate surfaces with no underlying vidmem node. + * In Android, vidmem node is allocated by another process. + */ + gcvSURF_CACHEABLE = 0x400, /* Used to allocate a cacheable surface */ + gcvSURF_TILE_RLV_FENCE = 0x800, /* create texture fence as tile */ + gcvSURF_TILE_STATUS_DIRTY = 0x1000, /* Init tile status to all dirty */ + gcvSURF_LINEAR = 0x2000, + gcvSURF_CREATE_AS_TEXTURE = 0x4000, /* create it as a texture */ + gcvSURF_PROTECTED_CONTENT = 0x8000, /* create it as content protected */ + gcvSURF_CREATE_AS_DISPLAYBUFFER = 0x10000, /*create it as a display buffer surface */ + gcvSURF_CONTIGUOUS = 0x20000, /*create it as contiguous */ + gcvSURF_NO_COMPRESSION = 0x40000, /* Create it as no compression, valid on when it has tile status. */ + gcvSURF_DEC = 0x80000, /* Surface is DEC compressed */ + gcvSURF_NO_HZ = 0x100000, + gcvSURF_3D = 0x200000, /* It's 3d surface */ + gcvSURF_DMABUF_EXPORTABLE = 0x400000, /* master node can be exported as dma-buf fd */ + gcvSURF_CACHE_MODE_128 = 0x800000, + gcvSURF_TILED = 0x1000000, /* force create tile buffer, as we will convert it to supertile according to related hardware feature by default */ + gcvSURF_FORCE_32BIT_VA = 0x2000000, /* force allocate 32bit VA */ + gcvSURF_LINEAR_NO_ALIGNMENT = 0x4000000, /* only for linear render target buffer */ + + gcvSURF_TEXTURE_LINEAR = gcvSURF_TEXTURE + | gcvSURF_LINEAR, + + gcvSURF_RENDER_TARGET_LINEAR = gcvSURF_RENDER_TARGET + | gcvSURF_LINEAR, + + gcvSURF_RENDER_TARGET_NO_TILE_STATUS = gcvSURF_RENDER_TARGET + | gcvSURF_NO_TILE_STATUS, + + gcvSURF_RENDER_TARGET_NO_COMPRESSION = gcvSURF_RENDER_TARGET + | gcvSURF_NO_COMPRESSION, + + gcvSURF_RENDER_TARGET_TS_DIRTY = gcvSURF_RENDER_TARGET + | gcvSURF_TILE_STATUS_DIRTY, + + gcvSURF_DEPTH_NO_TILE_STATUS = gcvSURF_DEPTH + | gcvSURF_NO_TILE_STATUS, + + gcvSURF_DEPTH_TS_DIRTY = gcvSURF_DEPTH + | gcvSURF_TILE_STATUS_DIRTY, + + /* Supported surface types with no vidmem node. */ + gcvSURF_BITMAP_NO_VIDMEM = gcvSURF_BITMAP + | gcvSURF_NO_VIDMEM, + + gcvSURF_TEXTURE_NO_VIDMEM = gcvSURF_TEXTURE + | gcvSURF_NO_VIDMEM, + + /* Cacheable surface types with no vidmem node. */ + gcvSURF_CACHEABLE_BITMAP_NO_VIDMEM = gcvSURF_BITMAP_NO_VIDMEM + | gcvSURF_CACHEABLE, + + gcvSURF_CACHEABLE_BITMAP = gcvSURF_BITMAP + | gcvSURF_CACHEABLE, + + gcvSURF_TEXTURE_3D = gcvSURF_TEXTURE + | gcvSURF_3D +} gceSURF_TYPE; + +/* + * Surface formats. + * Name rules is from MSB->LSB. + */ +typedef enum _gceSURF_FORMAT { + /* Unknown format. */ + gcvSURF_UNKNOWN = 0, + + /* Format test */ + gcvSURF_TEST = 1, + + /* Palettized formats. */ + gcvSURF_INDEX1 = 100, + gcvSURF_INDEX4, + gcvSURF_INDEX8, +#if gcdVG_ONLY + gcvSURF_INDEX2, +#endif + + /* RGB formats. */ + gcvSURF_A2R2G2B2 = 200, + gcvSURF_R3G3B2, + gcvSURF_A8R3G3B2, + gcvSURF_X4R4G4B4, + gcvSURF_A4R4G4B4, + gcvSURF_R4G4B4A4, + gcvSURF_X1R5G5B5, + gcvSURF_A1R5G5B5, + gcvSURF_R5G5B5A1, + gcvSURF_R5G6B5, + gcvSURF_R8G8B8, + gcvSURF_X8R8G8B8, + gcvSURF_A8R8G8B8, + gcvSURF_R8G8B8A8, + gcvSURF_G8R8G8B8, + gcvSURF_R8G8B8G8, + gcvSURF_X2R10G10B10, + gcvSURF_A2R10G10B10, + gcvSURF_R10G10B10A2, + gcvSURF_X12R12G12B12, + gcvSURF_A12R12G12B12, + gcvSURF_X16R16G16B16, + gcvSURF_A16R16G16B16, + gcvSURF_A32R32G32B32, + gcvSURF_R8G8B8X8, + gcvSURF_R5G5B5X1, + gcvSURF_R4G4B4X4, + gcvSURF_X16R16G16B16_2_A8R8G8B8, + gcvSURF_A16R16G16B16_2_A8R8G8B8, + gcvSURF_A32R32G32B32_2_G32R32F, + gcvSURF_A32R32G32B32_4_A8R8G8B8, + gcvSURF_R8G8B8_PLANAR, + gcvSURF_R8G8B8I, + gcvSURF_R8G8B8I_PLANAR, + gcvSURF_R16G16B16I, + gcvSURF_R16G16B16I_PLANAR, + + /* BGR formats. */ + gcvSURF_A4B4G4R4 = 300, + gcvSURF_A1B5G5R5, + gcvSURF_B5G6R5, + gcvSURF_B8G8R8, + gcvSURF_B16G16R16, + gcvSURF_X8B8G8R8, + gcvSURF_A8B8G8R8, + gcvSURF_A2B10G10R10, + gcvSURF_X16B16G16R16, + gcvSURF_A16B16G16R16, + gcvSURF_B32G32R32, + gcvSURF_X32B32G32R32, + gcvSURF_A32B32G32R32, + gcvSURF_B4G4R4A4, + gcvSURF_B5G5R5A1, + gcvSURF_B8G8R8X8, + gcvSURF_B8G8R8A8, + gcvSURF_B10G10R10A2, + gcvSURF_X4B4G4R4, + gcvSURF_X1B5G5R5, + gcvSURF_B4G4R4X4, + gcvSURF_B5G5R5X1, + gcvSURF_X2B10G10R10, + gcvSURF_B8G8R8_SNORM, + gcvSURF_X8B8G8R8_SNORM, + gcvSURF_A8B8G8R8_SNORM, + gcvSURF_A8B12G12R12_2_A8R8G8B8, + gcvSURF_B8G8R8_PLANAR, + + /* Compressed formats. */ + gcvSURF_DXT1 = 400, + gcvSURF_DXT2, + gcvSURF_DXT3, + gcvSURF_DXT4, + gcvSURF_DXT5, + gcvSURF_CXV8U8, + gcvSURF_ETC1, + gcvSURF_R11_EAC, + gcvSURF_SIGNED_R11_EAC, + gcvSURF_RG11_EAC, + gcvSURF_SIGNED_RG11_EAC, + gcvSURF_RGB8_ETC2, + gcvSURF_SRGB8_ETC2, + gcvSURF_RGB8_PUNCHTHROUGH_ALPHA1_ETC2, + gcvSURF_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2, + gcvSURF_RGBA8_ETC2_EAC, + gcvSURF_SRGB8_ALPHA8_ETC2_EAC, + gcvSURF_SDXT1, + gcvSURF_SDXT3, + gcvSURF_SDXT5, + + /* YUV formats. */ + gcvSURF_YUY2 = 500, + gcvSURF_UYVY, + gcvSURF_YV12, + gcvSURF_I420, + gcvSURF_NV12, + gcvSURF_NV21, + gcvSURF_NV16, + gcvSURF_NV61, + gcvSURF_YVYU, + gcvSURF_VYUY, + gcvSURF_AYUV, + gcvSURF_YUV420_10_ST, + gcvSURF_YUV420_TILE_ST, + gcvSURF_YUV420_TILE_10_ST, + gcvSURF_NV12_10BIT, + gcvSURF_NV21_10BIT, + gcvSURF_NV16_10BIT, + gcvSURF_NV61_10BIT, + gcvSURF_P010, + gcvSURF_P010_LSB, + gcvSURF_I010, + gcvSURF_I010_LSB, + gcvSURF_YUV420_101010, + gcvSURF_GRAY8, +#if gcdVG_ONLY + gcvSURF_AYUY2, + gcvSURF_ANV12, + gcvSURF_ANV16, + gcvSURF_AUYVY, + gcvSURF_YV16, +#endif + + /* Depth formats. */ + gcvSURF_D16 = 600, + gcvSURF_D24S8, + gcvSURF_D32, + gcvSURF_D24X8, + gcvSURF_D32F, + gcvSURF_S8D32F, + gcvSURF_S8D32F_1_G32R32F, + gcvSURF_S8D32F_2_A8R8G8B8, + gcvSURF_D24S8_1_A8R8G8B8, + gcvSURF_S8, + gcvSURF_X24S8, + gcvSURF_X24S8_1_A8R8G8B8, + + /* Alpha formats. */ + gcvSURF_A4 = 700, + gcvSURF_A8, + gcvSURF_A12, + gcvSURF_A16, + gcvSURF_A32, + gcvSURF_A1, + + /* Luminance formats. */ + gcvSURF_L4 = 800, + gcvSURF_L8, + gcvSURF_L12, + gcvSURF_L16, + gcvSURF_L32, + gcvSURF_L1, + gcvSURF_L8_RAW, + + /* Alpha/Luminance formats. */ + gcvSURF_A4L4 = 900, + gcvSURF_A2L6, + gcvSURF_A8L8, + gcvSURF_A4L12, + gcvSURF_A12L12, + gcvSURF_A16L16, + + gcvSURF_A8L8_1_A8R8G8B8, + + gcvSURF_A8L8_RAW, + + /* Bump formats. */ + gcvSURF_L6V5U5 = 1000, + gcvSURF_V8U8, + gcvSURF_X8L8V8U8, + gcvSURF_Q8W8V8U8, + gcvSURF_A2W10V10U10, + gcvSURF_V16U16, + gcvSURF_Q16W16V16U16, + + /* R/RG/RA formats. */ + gcvSURF_R8 = 1100, + gcvSURF_X8R8, + gcvSURF_G8R8, + gcvSURF_X8G8R8, + gcvSURF_A8R8, + gcvSURF_R16, + gcvSURF_X16R16, + gcvSURF_G16R16, + gcvSURF_X16G16R16, + gcvSURF_A16R16, + gcvSURF_R32, + gcvSURF_X32R32, + gcvSURF_G32R32, + gcvSURF_X32G32R32, + gcvSURF_A32R32, + gcvSURF_RG16, + gcvSURF_R8_SNORM, + gcvSURF_G8R8_SNORM, + + gcvSURF_R8_1_X8R8G8B8, + gcvSURF_G8R8_1_X8R8G8B8, + + /* Floating point formats. */ + gcvSURF_R16F = 1200, + gcvSURF_X16R16F, + gcvSURF_G16R16F, + gcvSURF_X16G16R16F, + gcvSURF_B16G16R16F, + gcvSURF_X16B16G16R16F, + gcvSURF_A16B16G16R16F, + gcvSURF_R32F, + gcvSURF_X32R32F, + gcvSURF_G32R32F, + gcvSURF_X32G32R32F, + gcvSURF_B32G32R32F, + gcvSURF_X32B32G32R32F, + gcvSURF_A32B32G32R32F, + gcvSURF_A16F, + gcvSURF_L16F, + gcvSURF_A16L16F, + gcvSURF_A16R16F, + gcvSURF_A32F, + gcvSURF_L32F, + gcvSURF_A32L32F, + gcvSURF_A32R32F, + gcvSURF_E5B9G9R9, + gcvSURF_B10G11R11F, + gcvSURF_B16G16R16F_PLANAR, + gcvSURF_B32G32R32F_PLANAR, + gcvSURF_R16G16B16F, + gcvSURF_R32G32B32F, + + gcvSURF_GRAY16F, + gcvSURF_GRAY32F, + + gcvSURF_X16B16G16R16F_2_A8R8G8B8, + gcvSURF_A16B16G16R16F_2_A8R8G8B8, + gcvSURF_A16B16G16R16F_2_G16R16F, + gcvSURF_G32R32F_2_A8R8G8B8, + gcvSURF_X32B32G32R32F_2_G32R32F, + gcvSURF_A32B32G32R32F_2_G32R32F, + gcvSURF_X32B32G32R32F_4_A8R8G8B8, + gcvSURF_A32B32G32R32F_4_A8R8G8B8, + + gcvSURF_R16F_1_A4R4G4B4, + gcvSURF_G16R16F_1_A8R8G8B8, + gcvSURF_B16G16R16F_2_A8R8G8B8, + + gcvSURF_R32F_1_A8R8G8B8, + gcvSURF_B32G32R32F_3_A8R8G8B8, + gcvSURF_B10G11R11F_1_A8R8G8B8, + + gcvSURF_A32F_1_R32F, + gcvSURF_L32F_1_R32F, + gcvSURF_A32L32F_1_G32R32F, + + gcvSURF_R16G16B16F_PLANAR, + gcvSURF_R32G32B32F_PLANAR, + + /* sRGB format. */ + gcvSURF_SBGR8 = 1400, + gcvSURF_A8_SBGR8, + gcvSURF_X8_SBGR8, + gcvSURF_A8_SRGB8, + gcvSURF_X8_SRGB8, + + /* Integer formats. */ + gcvSURF_R8I = 1500, + gcvSURF_R8UI, + gcvSURF_R16I, + gcvSURF_R16UI, + gcvSURF_R32I, + gcvSURF_R32UI, + gcvSURF_X8R8I, + gcvSURF_G8R8I, + gcvSURF_X8R8UI, + gcvSURF_G8R8UI, + gcvSURF_X16R16I, + gcvSURF_G16R16I, + gcvSURF_X16R16UI, + gcvSURF_G16R16UI, + gcvSURF_X32R32I, + gcvSURF_G32R32I, + gcvSURF_X32R32UI, + gcvSURF_G32R32UI, + gcvSURF_X8G8R8I, + gcvSURF_B8G8R8I, + gcvSURF_X8G8R8UI, + gcvSURF_B8G8R8UI, + gcvSURF_X16G16R16I, + gcvSURF_B16G16R16I, + gcvSURF_X16G16R16UI, + gcvSURF_B16G16R16UI, + gcvSURF_X32G32R32I, + gcvSURF_B32G32R32I, + gcvSURF_X32G32R32UI, + gcvSURF_B32G32R32UI, + gcvSURF_X8B8G8R8I, + gcvSURF_A8B8G8R8I, + gcvSURF_X8B8G8R8UI, + gcvSURF_A8B8G8R8UI, + gcvSURF_X16B16G16R16I, + gcvSURF_A16B16G16R16I, + gcvSURF_X16B16G16R16UI, + gcvSURF_A16B16G16R16UI, + gcvSURF_X32B32G32R32I, + gcvSURF_A32B32G32R32I, + gcvSURF_X32B32G32R32UI, + gcvSURF_A32B32G32R32UI, + gcvSURF_A2B10G10R10UI, + gcvSURF_G32R32I_2_A8R8G8B8, + gcvSURF_G32R32I_1_G32R32F, + gcvSURF_G32R32UI_2_A8R8G8B8, + gcvSURF_G32R32UI_1_G32R32F, + gcvSURF_X16B16G16R16I_2_A8R8G8B8, + gcvSURF_X16B16G16R16I_1_G32R32F, + gcvSURF_A16B16G16R16I_2_A8R8G8B8, + gcvSURF_A16B16G16R16I_1_G32R32F, + gcvSURF_X16B16G16R16UI_2_A8R8G8B8, + gcvSURF_X16B16G16R16UI_1_G32R32F, + gcvSURF_A16B16G16R16UI_2_A8R8G8B8, + gcvSURF_A16B16G16R16UI_1_G32R32F, + gcvSURF_X32B32G32R32I_2_G32R32I, + gcvSURF_A32B32G32R32I_2_G32R32I, + gcvSURF_A32B32G32R32I_2_G32R32F, + gcvSURF_X32B32G32R32I_3_A8R8G8B8, + gcvSURF_A32B32G32R32I_4_A8R8G8B8, + gcvSURF_X32B32G32R32UI_2_G32R32UI, + gcvSURF_A32B32G32R32UI_2_G32R32UI, + gcvSURF_A32B32G32R32UI_2_G32R32F, + gcvSURF_X32B32G32R32UI_3_A8R8G8B8, + gcvSURF_A32B32G32R32UI_4_A8R8G8B8, + gcvSURF_A2B10G10R10UI_1_A8R8G8B8, + gcvSURF_A8B8G8R8I_1_A8R8G8B8, + gcvSURF_A8B8G8R8UI_1_A8R8G8B8, + gcvSURF_R8I_1_A4R4G4B4, + gcvSURF_R8UI_1_A4R4G4B4, + gcvSURF_R16I_1_A4R4G4B4, + gcvSURF_R16UI_1_A4R4G4B4, + gcvSURF_R32I_1_A8R8G8B8, + gcvSURF_R32UI_1_A8R8G8B8, + gcvSURF_X8R8I_1_A4R4G4B4, + gcvSURF_X8R8UI_1_A4R4G4B4, + gcvSURF_G8R8I_1_A4R4G4B4, + gcvSURF_G8R8UI_1_A4R4G4B4, + gcvSURF_X16R16I_1_A4R4G4B4, + gcvSURF_X16R16UI_1_A4R4G4B4, + gcvSURF_G16R16I_1_A8R8G8B8, + gcvSURF_G16R16UI_1_A8R8G8B8, + gcvSURF_X32R32I_1_A8R8G8B8, + gcvSURF_X32R32UI_1_A8R8G8B8, + gcvSURF_X8G8R8I_1_A4R4G4B4, + gcvSURF_X8G8R8UI_1_A4R4G4B4, + gcvSURF_B8G8R8I_1_A8R8G8B8, + gcvSURF_B8G8R8UI_1_A8R8G8B8, + gcvSURF_B16G16R16I_2_A8R8G8B8, + gcvSURF_B16G16R16I_1_G32R32F, + gcvSURF_B16G16R16UI_2_A8R8G8B8, + gcvSURF_B16G16R16UI_1_G32R32F, + gcvSURF_B32G32R32I_3_A8R8G8B8, + gcvSURF_B32G32R32UI_3_A8R8G8B8, + gcvSURF_A16B16G16R16_2_A8R8G8B8, + gcvSURF_R8G8B8_1_A8R8G8B8, + gcvSURF_G16R16_1_A8R8G8B8, + gcvSURF_A2B10G10R10_1_A8R8G8B8, + gcvSURF_A2R10G10B10_1_A8R8G8B8, + gcvSURF_A2W10V10U10_1_A8R8G8B8, + + /* ASTC formats. */ + gcvSURF_ASTC4x4 = 1600, + gcvSURF_ASTC5x4, + gcvSURF_ASTC5x5, + gcvSURF_ASTC6x5, + gcvSURF_ASTC6x6, + gcvSURF_ASTC8x5, + gcvSURF_ASTC8x6, + gcvSURF_ASTC8x8, + gcvSURF_ASTC10x5, + gcvSURF_ASTC10x6, + gcvSURF_ASTC10x8, + gcvSURF_ASTC10x10, + gcvSURF_ASTC12x10, + gcvSURF_ASTC12x12, + gcvSURF_ASTC4x4_SRGB, + gcvSURF_ASTC5x4_SRGB, + gcvSURF_ASTC5x5_SRGB, + gcvSURF_ASTC6x5_SRGB, + gcvSURF_ASTC6x6_SRGB, + gcvSURF_ASTC8x5_SRGB, + gcvSURF_ASTC8x6_SRGB, + gcvSURF_ASTC8x8_SRGB, + gcvSURF_ASTC10x5_SRGB, + gcvSURF_ASTC10x6_SRGB, + gcvSURF_ASTC10x8_SRGB, + gcvSURF_ASTC10x10_SRGB, + gcvSURF_ASTC12x10_SRGB, + gcvSURF_ASTC12x12_SRGB, + + /* Recompile format*/ + gcvSURF_L16_1_A4R4G4B4 = 1700, + gcvSURF_V16U16_1_A8R8G8B8, + gcvSURF_Q8W8V8U8_1_A8R8G8B8, + gcvSURF_X8L8V8U8_1_A8R8G8B8, + gcvSURF_R3G3B2_1_A8R8G8B8, + gcvSURF_A8R3G3B2_1_A8R8G8B8, + gcvSURF_W11V11U10_1_A8R8G8B8, + gcvSURF_Q16W16V16U16_2_A8R8G8B8, + gcvSURF_W11V11U10, + gcvSURF_V8U8_1_A4R4G4B4, + gcvSURF_A8B8G8R8_1_A8R8G8B8, + gcvSURF_A32R32G32B32_1_A8R8G8B8, + gcvSURF_X16B16G16R16F_1_A8R8G8B8, + gcvSURF_A16B16G16R16F_1_A8R8G8B8, + gcvSURF_G32R32F_1_A8R8G8B8, + gcvSURF_X32B32G32R32F_1_A8R8G8B8, + gcvSURF_A32B32G32R32F_1_A8R8G8B8, + gcvSURF_G32R32I_1_A8R8G8B8, + gcvSURF_G32R32UI_1_A8R8G8B8, + gcvSURF_A32B32G32R32I_1_A8R8G8B8, + gcvSURF_A32B32G32R32UI_1_A8R8G8B8, + gcvSURF_Q16W16V16U16_1_A8R8G8B8, + gcvSURF_A16B16G16R16_1_A8R8G8B8, + + /* Integer formats (2)) */ + gcvSURF_R10G10B10A2UI = 1800, + gcvSURF_R5G6B5UI, + gcvSURF_B5G6R5UI, + gcvSURF_R3G3B2UI, + gcvSURF_B2G3R3UI, + gcvSURF_R4G4B4A4UI, + gcvSURF_A4B4G4R4UI, + gcvSURF_R5G5B5A1UI, + gcvSURF_A1B5G5R5UI, + gcvSURF_R8G8B8A8UI, + + /* GL4 formats */ + gcvSURF_G8 = 1900, + gcvSURF_B8, + gcvSURF_G32F, + gcvSURF_B32F, + + /* Intensity formats */ + gcvSURF_I4 = 2000, + gcvSURF_I8, + gcvSURF_I12, + gcvSURF_I16, + + /* D3D typeless format*/ + gcvSURF_R32G32B32A32_TYPELESS = 2100, + gcvSURF_R32G32B32_TYPELESS, + gcvSURF_R16G16B16A16_TYPELESS, + gcvSURF_R32G32_TYPELESS, + gcvSURF_R32G8X24_TYPELESS, + gcvSURF_R32_FLOAT_X8X24_TYPELESS, + gcvSURF_X32_TYPELESS_G8X24_UINT, + gcvSURF_R10G10B10A2_TYPELESS, + gcvSURF_R8G8B8A8_TYPELESS, + gcvSURF_R16G16_TYPELESS, + gcvSURF_R32_TYPELESS, + gcvSURF_R24G8_TYPELESS, + gcvSURF_R24_UNORM_X8_TYPELESS, + gcvSURF_X24_TYPELESS_G8_UINT, + gcvSURF_R8G8_TYPELESS, + gcvSURF_R16_TYPELESS, + gcvSURF_R8_TYPELESS, + gcvSURF_BC1_TYPELESS, + gcvSURF_BC2_TYPELESS, + gcvSURF_BC3_TYPELESS, + gcvSURF_BC4_TYPELESS, + gcvSURF_BC5_TYPELESS, + gcvSURF_B8G8R8A8_TYPELESS, + gcvSURF_B8G8R8X8_TYPELESS, + gcvSURF_BC6H_TYPELESS, + gcvSURF_BC7_TYPELESS, +} gceSURF_FORMAT; + +/* Pipes. */ +typedef enum _gcePIPE_SELECT { + gcvPIPE_INVALID = ~0, + gcvPIPE_3D = 0, + gcvPIPE_2D +} gcePIPE_SELECT; + +/* Hardware type. */ +typedef enum _gceHARDWARE_TYPE { + gcvHARDWARE_INVALID, + gcvHARDWARE_3D2D, + gcvHARDWARE_3D, + gcvHARDWARE_2D, + gcvHARDWARE_VIP, + gcvHARDWARE_VG, + gcvHARDWARE_NUM_TYPES, +} gceHARDWARE_TYPE; + +/* User signal command codes. */ +typedef enum _gceUSER_SIGNAL_COMMAND_CODES { + gcvUSER_SIGNAL_CREATE, + gcvUSER_SIGNAL_DESTROY, + gcvUSER_SIGNAL_SIGNAL, + gcvUSER_SIGNAL_WAIT, + gcvUSER_SIGNAL_MAP, + gcvUSER_SIGNAL_UNMAP, +} gceUSER_SIGNAL_COMMAND_CODES; + +/* Shared buffer command codes. */ +typedef enum _gceSHBUF_COMMAND_CODES { + gcvSHBUF_CREATE, + gcvSHBUF_DESTROY, + gcvSHBUF_MAP, + gcvSHBUF_WRITE, + gcvSHBUF_READ, +} gceSHBUF_COMMAND_CODES; + +/* Event locations. */ +typedef enum _gceKERNEL_WHERE { + gcvKERNEL_COMMAND, + gcvKERNEL_VERTEX, + gcvKERNEL_TRIANGLE, + gcvKERNEL_TEXTURE, + gcvKERNEL_PIXEL, + gcvKERNEL_BLT, +} gceKERNEL_WHERE; + +typedef enum _gceBLOCK { + gcvBLOCK_COMMAND, + gcvBLOCK_TESSELLATOR, + gcvBLOCK_TESSELLATOR2, + gcvBLOCK_TESSELLATOR3, + gcvBLOCK_RASTER, + gcvBLOCK_VG, + gcvBLOCK_VG2, + gcvBLOCK_VG3, + gcvBLOCK_PIXEL, + + /* Number of defined blocks. */ + gcvBLOCK_COUNT +} gceBLOCK; + +typedef enum _gceCORE_3D_MASK { + gcvCORE_3D_0_MASK = (1 << 0), + gcvCORE_3D_1_MASK = (1 << 1), + + gcvCORE_3D_ALL_MASK = (0xFFFF) +} gceCORE_3D_MASK; + +typedef enum _gceCORE_3D_ID { + gcvCORE_3D_0_ID = 0, + gcvCORE_3D_1_ID = 1, + + gcvCORE_3D_ID_INVALID = ~0UL +} gceCORE_3D_ID; + +typedef enum _gceCORE_2D_MASK { + gcvCORE_2D_0_MASK = (1 << 0), + gcvCORE_2D_1_MASK = (1 << 1), + gcvCORE_2D_2_MASK = (1 << 2), + gcvCORE_2D_3_MASK = (1 << 3), + + gcvCORE_2D_ALL_MASK = (0xFFFF) +} gceCORE_2D_MASK; + +typedef enum _gceCORE_2D_ID { + gcvCORE_2D_0_ID = 0, + gcvCORE_2D_1_ID = 1, + + gcvCORE_2D_ID_INVALID = ~0UL +} gceCORE_2D_ID; + + +typedef enum _gceCHIP_FLAG { + gcvCHIP_FLAG_MSAA_COHERENCEY_ECO_FIX = 1 << 0, + gcvCHIP_FLAG_GC2000_R2 = 1 << 1, + gcvCHIP_AXI_BUS128_BITS = 1 << 2, +} gceCHIP_FLAG; + +/* If different, choose render engine */ +#define PRIORITY_ENGINE(a, b) gcmMIN(a, b) + +typedef enum { + gcvENGINE_RENDER = 0, + gcvENGINE_BLT = 1, + gcvENGINE_GPU_ENGINE_COUNT = 2, + gcvENGINE_CPU = gcvENGINE_GPU_ENGINE_COUNT, + gcvENGINE_ALL_COUNT = gcvENGINE_CPU + 1, + gcvENGINE_INVALID = gcvENGINE_ALL_COUNT + 0x100 +} gceENGINE; + +/* CORE enum. */ +typedef enum _gceCORE { + gcvCORE_MAJOR, + gcvCORE_3D1, + gcvCORE_3D2, + gcvCORE_3D3, + gcvCORE_3D4, + gcvCORE_3D5, + gcvCORE_3D6, + gcvCORE_3D7, + gcvCORE_3D8, + gcvCORE_3D9, + gcvCORE_3D10, + gcvCORE_3D11, + gcvCORE_3D12, + gcvCORE_3D13, + gcvCORE_3D14, + gcvCORE_3D15, + gcvCORE_3D_MAX = gcvCORE_3D15, + gcvCORE_2D, + gcvCORE_2D1, + gcvCORE_2D2, + gcvCORE_2D3, + gcvCORE_2D_MAX = gcvCORE_2D3, + gcvCORE_VG, +#if gcdDEC_ENABLE_AHB + gcvCORE_DEC, +#endif + gcvCORE_COUNT +} gceCORE; + +#define gcdCORE_3D_COUNT (gcvCORE_3D_MAX + 1) +#define gcdCORE_2D_COUNT 4 + +#define gcdMAX_MAJOR_CORE_COUNT (gcvCORE_2D_MAX + 1) + +#ifndef gcdDEVICE_COUNT +#define gcdDEVICE_COUNT 2 +#endif + +#ifndef gcdPLATFORM_COUNT +#define gcdPLATFORM_COUNT gcdDEVICE_COUNT +#endif + +#ifndef gcvSRAM_EXT_COUNT +#define gcvSRAM_EXT_COUNT gcdDEVICE_COUNT +#endif + +#ifndef gcdSYSTEM_RESERVE_COUNT +#define gcdSYSTEM_RESERVE_COUNT gcdDEVICE_COUNT +#endif + +#ifndef gcdLOCAL_MEMORY_COUNT +#define gcdLOCAL_MEMORY_COUNT gcdDEVICE_COUNT +#endif + +#ifndef gcdGLOBAL_CORE_COUNT +#define gcdGLOBAL_CORE_COUNT (gcdDEVICE_COUNT * gcdCORE_3D_COUNT) +#endif + +#ifndef gcdGLOBAL_2D_COUNT +#define gcdGLOBAL_2D_COUNT gcdCORE_2D_COUNT +#endif + +#define gcdCHIP_COUNT gcdGLOBAL_CORE_COUNT + +typedef enum _gceSECURE_MODE { + /* For cores without gcvFEATURE_SECURITY. */ + gcvSECURE_NONE, + + /* Use registers added in gcvFEATURE_SECURITY in normal driver, + * In this mode, GPU always works under non secure mode and + * should not touch secure buffer. It is used to test basic function. + */ + gcvSECURE_IN_NORMAL, + + /* Make use of gcvFEATURE_SECURITY in trust application. */ + gcvSECURE_IN_TA +} gceSECURE_MODE; + +/* kernel driver compression option, as it's a system global option, + * it means kernel driver allows the options, NOT necessarily means it must be on. + */ +typedef enum _gceCOMPRESSION_OPTION { + gcvCOMPRESSION_OPTION_NONE = 0x0, /* No any compression */ + gcvCOMPRESSION_OPTION_COLOR = 0x1, /* Compression for non-msaa color format */ + gcvCOMPRESSION_OPTION_DEPTH = 0x2, /* Compression for non-msaa depth format */ + gcvCOMPRESSION_OPTION_MSAA_COLOR = 0x4, /* Compression for msaa color */ + gcvCOMPRESSION_OPTION_MSAA_DEPTH = 0x8, /* Compression for msaa depth */ + + /* default compressio option */ + gcvCOMPRESSION_OPTION_DEFAULT = gcvCOMPRESSION_OPTION_DEPTH | + gcvCOMPRESSION_OPTION_COLOR | + gcvCOMPRESSION_OPTION_MSAA_COLOR | + gcvCOMPRESSION_OPTION_MSAA_DEPTH, +} gceCOMPRESSION_OPTION; + +typedef enum _gceSRAM_INTERNAL { + gcvSRAM_INTERNAL0 = 0, + gcvSRAM_INTERNAL1, + + gcvSRAM_INTER_COUNT +} gceSRAM_INTERNAL; + +typedef enum _gceFLATMAP_FLAG { + gcvFLATMAP_DIRECT, + gcvFLATMAP_SHIFT, +} gceFLATMAP_FLAG; + +/* Video memory alloation type. */ +typedef enum _gceVIDMEM_TYPE { + gcvVIDMEM_TYPE_GENERIC = gcvSURF_TYPE_UNKNOWN, + gcvVIDMEM_TYPE_INDEX_BUFFER = gcvSURF_INDEX, + gcvVIDMEM_TYPE_VERTEX_BUFFER = gcvSURF_VERTEX, + gcvVIDMEM_TYPE_TEXTURE = gcvSURF_TEXTURE, + gcvVIDMEM_TYPE_COLOR_BUFFER = gcvSURF_RENDER_TARGET, + gcvVIDMEM_TYPE_DEPTH_BUFFER = gcvSURF_DEPTH, + gcvVIDMEM_TYPE_BITMAP = gcvSURF_BITMAP, + gcvVIDMEM_TYPE_TILE_STATUS = gcvSURF_TILE_STATUS, + gcvVIDMEM_TYPE_IMAGE = gcvSURF_IMAGE, + gcvVIDMEM_TYPE_MASK = gcvSURF_MASK, + gcvVIDMEM_TYPE_SCISSOR = gcvSURF_SCISSOR, + gcvVIDMEM_TYPE_HZ_BUFFER = gcvSURF_HIERARCHICAL_DEPTH, + gcvVIDMEM_TYPE_ICACHE = gcvSURF_ICACHE, + gcvVIDMEM_TYPE_TXDESC = gcvSURF_TXDESC, + gcvVIDMEM_TYPE_FENCE = gcvSURF_FENCE, + gcvVIDMEM_TYPE_TFBHEADER = gcvSURF_TFBHEADER, + gcvVIDMEM_TYPE_COMMAND, + gcvVIDMEM_TYPE_COUNT +} gceVIDMEM_TYPE; + +typedef enum _gceTASK { + gcvTASK_LINK, + gcvTASK_CLUSTER, + gcvTASK_INCREMENT, + gcvTASK_DECREMENT, + gcvTASK_SIGNAL, + gcvTASK_LOCKDOWN, + gcvTASK_UNLOCK_VIDEO_MEMORY, + gcvTASK_FREE_VIDEO_MEMORY, + gcvTASK_FREE_CONTIGUOUS_MEMORY, +} gceTASK; + +/****************************************************************************** + ******************************** Status Codes ******************************** + ******************************************************************************/ + +typedef enum _gceSTATUS { + gcvSTATUS_OK = 0, + gcvSTATUS_FALSE = 0, + gcvSTATUS_TRUE = 1, + gcvSTATUS_NO_MORE_DATA = 2, + gcvSTATUS_CACHED = 3, + gcvSTATUS_MIPMAP_TOO_LARGE = 4, + gcvSTATUS_NAME_NOT_FOUND = 5, + gcvSTATUS_NOT_OUR_INTERRUPT = 6, + gcvSTATUS_MISMATCH = 7, + gcvSTATUS_MIPMAP_TOO_SMALL = 8, + gcvSTATUS_LARGER = 9, + gcvSTATUS_SMALLER = 10, + gcvSTATUS_CHIP_NOT_READY = 11, + gcvSTATUS_NEED_CONVERSION = 12, + gcvSTATUS_SKIP = 13, + gcvSTATUS_DATA_TOO_LARGE = 14, + gcvSTATUS_INVALID_CONFIG = 15, + gcvSTATUS_CHANGED = 16, + gcvSTATUS_NOT_SUPPORT_DITHER = 17, + gcvSTATUS_EXECUTED = 18, + gcvSTATUS_TERMINATE = 19, + + gcvSTATUS_INVALID_ARGUMENT = -1, + gcvSTATUS_INVALID_OBJECT = -2, + gcvSTATUS_OUT_OF_MEMORY = -3, + gcvSTATUS_MEMORY_LOCKED = -4, + gcvSTATUS_MEMORY_UNLOCKED = -5, + gcvSTATUS_HEAP_CORRUPTED = -6, + gcvSTATUS_GENERIC_IO = -7, + gcvSTATUS_INVALID_ADDRESS = -8, + gcvSTATUS_CONTEXT_LOSSED = -9, + gcvSTATUS_TOO_COMPLEX = -10, + gcvSTATUS_BUFFER_TOO_SMALL = -11, + gcvSTATUS_INTERFACE_ERROR = -12, + gcvSTATUS_NOT_SUPPORTED = -13, + gcvSTATUS_MORE_DATA = -14, + gcvSTATUS_TIMEOUT = -15, + gcvSTATUS_OUT_OF_RESOURCES = -16, + gcvSTATUS_INVALID_DATA = -17, + gcvSTATUS_INVALID_MIPMAP = -18, + gcvSTATUS_NOT_FOUND = -19, + gcvSTATUS_NOT_ALIGNED = -20, + gcvSTATUS_INVALID_REQUEST = -21, + gcvSTATUS_GPU_NOT_RESPONDING = -22, + gcvSTATUS_TIMER_OVERFLOW = -23, + gcvSTATUS_VERSION_MISMATCH = -24, + gcvSTATUS_LOCKED = -25, + gcvSTATUS_INTERRUPTED = -26, + gcvSTATUS_DEVICE = -27, + gcvSTATUS_NOT_MULTI_PIPE_ALIGNED = -28, + gcvSTATUS_OUT_OF_SAMPLER = -29, + gcvSTATUS_PROBE_LATER = -30, + gcvSTATUS_RESLUT_OVERFLOW = -31, + gcvSTATUS_RECOVERY = -32, + gcvSTATUS_CANCEL_JOB = -33, + + /* register allocation errors. */ + gcvSTATUS_OUT_OF_REG_FAIL = -100, + + /* Linker errors. */ + gcvSTATUS_GLOBAL_TYPE_MISMATCH = -1000, + gcvSTATUS_TOO_MANY_ATTRIBUTES = -1001, + gcvSTATUS_TOO_MANY_UNIFORMS = -1002, + gcvSTATUS_TOO_MANY_VARYINGS = -1003, + gcvSTATUS_UNDECLARED_VARYING = -1004, + gcvSTATUS_VARYING_TYPE_MISMATCH = -1005, + gcvSTATUS_MISSING_MAIN = -1006, + gcvSTATUS_NAME_MISMATCH = -1007, + gcvSTATUS_INVALID_INDEX = -1008, + gcvSTATUS_UNIFORM_MISMATCH = -1009, + gcvSTATUS_UNSAT_LIB_SYMBOL = -1010, + gcvSTATUS_TOO_MANY_SHADERS = -1011, + gcvSTATUS_LINK_INVALID_SHADERS = -1012, + gcvSTATUS_CS_NO_WORKGROUP_SIZE = -1013, + gcvSTATUS_LINK_LIB_ERROR = -1014, + + gcvSTATUS_SHADER_VERSION_MISMATCH = -1015, + gcvSTATUS_TOO_MANY_INSTRUCTION = -1016, + gcvSTATUS_SSBO_MISMATCH = -1017, + gcvSTATUS_TOO_MANY_OUTPUT = -1018, + gcvSTATUS_TOO_MANY_INPUT = -1019, + gcvSTATUS_NOT_SUPPORT_CL = -1020, + gcvSTATUS_NOT_SUPPORT_INTEGER = -1021, + gcvSTATUS_UNIFORM_TYPE_MISMATCH = -1022, + + gcvSTATUS_MISSING_PRIMITIVE_TYPE = -1023, + gcvSTATUS_MISSING_OUTPUT_VERTEX_COUNT = -1024, + gcvSTATUS_NON_INVOCATION_ID_AS_INDEX = -1025, + gcvSTATUS_INPUT_ARRAY_SIZE_MISMATCH = -1026, + gcvSTATUS_OUTPUT_ARRAY_SIZE_MISMATCH = -1027, + gcvSTATUS_LOCATION_ALIASED = -1028, + gcvSTATUS_LOCATION_OVERLAP = -1029, + gcvSTATUS_LOCATION_NOTCONSISTENT = -1030, + + /* Compiler errors. */ + gcvSTATUS_COMPILER_FE_PREPROCESSOR_ERROR = -2000, + gcvSTATUS_COMPILER_FE_PARSER_ERROR = -2001, + + /* Recompilation Errors */ + gcvSTATUS_RECOMPILER_CONVERT_UNIMPLEMENTED = -3000, +} gceSTATUS; + +/* The patch types. */ +enum _gceHAL_PATCH_TYPE { + gcvHAL_PATCH_VIDMEM_ADDRESS = 1, + gcvHAL_PATCH_MCFE_SEMAPHORE, + gcvHAL_PATCH_VIDMEM_TIMESTAMP, + + /* Must be the last one for counting. */ + gcvHAL_PATCH_TYPE_COUNT, +}; + +/****************************************************************************** + ******************************** Command Codes ******************************* + ******************************************************************************/ + +typedef enum _gceHAL_COMMAND_CODES { + /*************** Common ***************/ + + /* Chip info: count, type and so on. */ + gcvHAL_CHIP_INFO, + + /* HAL driver version. */ + gcvHAL_VERSION, + + /* Query chip id and options. */ + gcvHAL_QUERY_CHIP_IDENTITY, + gcvHAL_QUERY_CHIP_OPTION, + + /* Query chip frequency, used by CL. */ + gcvHAL_QUERY_CHIP_FREQUENCY, + + /* Query system pool video memory, used by CL. */ + gcvHAL_QUERY_VIDEO_MEMORY, + + /* Memory management. */ + gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY, + gcvHAL_WRAP_USER_MEMORY, + gcvHAL_RELEASE_VIDEO_MEMORY, + gcvHAL_LOCK_VIDEO_MEMORY, + gcvHAL_UNLOCK_VIDEO_MEMORY, + gcvHAL_BOTTOM_HALF_UNLOCK_VIDEO_MEMORY, + gcvHAL_MAP_MEMORY, + gcvHAL_UNMAP_MEMORY, + + /* Cache operations. */ + gcvHAL_CACHE, + + /* HAL user attach and detach. */ + gcvHAL_ATTACH, + gcvHAL_DETACH, + + /* Event commit. */ + gcvHAL_EVENT_COMMIT, + + /* User command commit. */ + gcvHAL_COMMIT, + + /* Set hardware timeout, used by CL. */ + gcvHAL_SET_TIMEOUT, + + /* User signal operations. */ + gcvHAL_USER_SIGNAL, + + /* Event signal, commit stall. */ + gcvHAL_SIGNAL, + + /* Profile related. */ + gcvHAL_SET_PROFILE_SETTING, + gcvHAL_READ_PROFILER_REGISTER_SETTING, + gcvHAL_READ_ALL_PROFILE_REGISTERS_PART1, + gcvHAL_READ_ALL_PROFILE_REGISTERS_PART2, + + /* Query process database info when debug trace and proflie. */ + gcvHAL_DATABASE, + + /* Power management enable/disable. */ + gcvHAL_CONFIG_POWER_MANAGEMENT, + + /* Debug/dump feature. */ + gcvHAL_DEBUG_DUMP, + + /*************** Common end ***************/ + + /*************** GPU only ***************/ + /* Register operations, 2D only. */ + gcvHAL_READ_REGISTER, + gcvHAL_WRITE_REGISTER, + gcvHAL_PROFILE_REGISTERS_2D, + + /* Get base address for old mmu. */ + gcvHAL_GET_BASE_ADDRESS, + + /* Read frame database, 3D only. */ + gcvHAL_GET_FRAME_INFO, + + /* Set video memory meta data. */ + gcvHAL_SET_VIDEO_MEMORY_METADATA, + + /* Query command buffer, VG only. */ + gcvHAL_QUERY_COMMAND_BUFFER, + + /* Reset time stamp. */ + gcvHAL_QUERY_RESET_TIME_STAMP, + + /* Create native fence. */ + gcvHAL_CREATE_NATIVE_FENCE, + + /* Wait native fence. */ + gcvHAL_WAIT_NATIVE_FENCE, + + /* Wait until GPU finishes access to a resource. */ + gcvHAL_WAIT_FENCE, + + /* Video memory node operations. */ + gcvHAL_EXPORT_VIDEO_MEMORY, + gcvHAL_NAME_VIDEO_MEMORY, + gcvHAL_IMPORT_VIDEO_MEMORY, + + /* Mutex Operation. */ + gcvHAL_DEVICE_MUTEX, + /*************** GPU only end ***************/ + + /*************** DEC only ***************/ + /* DEC200 test. */ + gcvHAL_DEC200_TEST, + + /* DEC300 related operations. */ + gcvHAL_DEC300_READ, + gcvHAL_DEC300_WRITE, + gcvHAL_DEC300_FLUSH, + gcvHAL_DEC300_FLUSH_WAIT, + /*************** DEC only end ***************/ + + /*************** OS specific ***************/ + + /* Android gralloc: shared buffer operations. */ + gcvHAL_SHBUF, + + /* Android gralloc: get graphic buffer fd. */ + gcvHAL_GET_GRAPHIC_BUFFER_FD, + + /* Vsimulator only. */ + gcvHAL_UPDATE_DEBUG_CALLBACK, + gcvHAL_CONFIG_CTX_FRAMEWORK, + + /* Non paged memory management backup compatibility, windows, qnx. */ + gcvHAL_ALLOCATE_NON_PAGED_MEMORY, + gcvHAL_FREE_NON_PAGED_MEMORY, + + /* Write user data, windows only. */ + gcvHAL_WRITE_DATA, + + /*************** OS specific end ***************/ + + /*************** Reserved ***************/ + /* Access APB register. */ + gcvHAL_APB_AXIFE_ACCESS, + + /* Trigger a software reset. */ + gcvHAL_RESET, + + /* Command commit done, preemption only. */ + gcvHAL_COMMIT_DONE, + + /* Get video memory file description. */ + gcvHAL_GET_VIDEO_MEMORY_FD, + + /* Get profile setting. */ + gcvHAL_GET_PROFILE_SETTING, + + /* Read/Write register ex. */ + gcvHAL_READ_REGISTER_EX, + gcvHAL_WRITE_REGISTER_EX, + + /* Power management state. */ + gcvHAL_SET_POWER_MANAGEMENT_STATE, + gcvHAL_QUERY_POWER_MANAGEMENT_STATE, + + /* Query CPU frequency. */ + gcvHAL_QUERY_CPU_FREQUENCY, + + /* Dump HW register state. */ + gcvHAL_DUMP_GPU_STATE, + + /* Sync video memory for special memory pool */ + gcvHAL_SYNC_VIDEO_MEMORY, + + /* Cancel Job. */ + gcvHAL_CANCEL_JOB, + + /* Timer. */ + gcvHAL_TIMESTAMP, + + /* FSCALE_VAL. */ + gcvHAL_SET_FSCALE_VALUE, + gcvHAL_GET_FSCALE_VALUE, + + /* Destroy MMU. */ + gcvHAL_DESTROY_MMU, + + /* Operate fence from user*/ + gcvHAL_FENCE_OP, + /*************** Reserved end ***************/ +} gceHAL_COMMAND_CODES; + +/****************************************************************************** + ******************************* gcsOBJECT Object ***************************** + ******************************************************************************/ + +/* Macro to combine four characters into a Charcater Code. */ +#define gcmCC(c1, c2, c3, c4) \ +(\ + (char)(c1) | \ + ((char)(c2) << 8) | \ + ((char)(c3) << 16) | \ + ((char)(c4) << 24) \ +) + +/* Type of objects. */ +typedef enum _gceOBJECT_TYPE { + gcvOBJ_UNKNOWN = 0, + gcvOBJ_2D = gcmCC('2', 'D', ' ', ' '), + gcvOBJ_3D = gcmCC('3', 'D', ' ', ' '), + gcvOBJ_ATTRIBUTE = gcmCC('A', 'T', 'T', 'R'), + gcvOBJ_BRUSHCACHE = gcmCC('B', 'R', 'U', '$'), + gcvOBJ_BRUSHNODE = gcmCC('B', 'R', 'U', 'n'), + gcvOBJ_BRUSH = gcmCC('B', 'R', 'U', 'o'), + gcvOBJ_BUFFER = gcmCC('B', 'U', 'F', 'R'), + gcvOBJ_COMMAND = gcmCC('C', 'M', 'D', ' '), + gcvOBJ_COMMANDBUFFER = gcmCC('C', 'M', 'D', 'B'), + gcvOBJ_CONTEXT = gcmCC('C', 'T', 'X', 'T'), + gcvOBJ_DEVICE = gcmCC('D', 'E', 'V', ' '), + gcvOBJ_DUMP = gcmCC('D', 'U', 'M', 'P'), + gcvOBJ_EVENT = gcmCC('E', 'V', 'N', 'T'), + gcvOBJ_FUNCTION = gcmCC('F', 'U', 'N', 'C'), + gcvOBJ_HAL = gcmCC('H', 'A', 'L', ' '), + gcvOBJ_HARDWARE = gcmCC('H', 'A', 'R', 'D'), + gcvOBJ_HEAP = gcmCC('H', 'E', 'A', 'P'), + gcvOBJ_INDEX = gcmCC('I', 'N', 'D', 'X'), + gcvOBJ_INTERRUPT = gcmCC('I', 'N', 'T', 'R'), + gcvOBJ_KERNEL = gcmCC('K', 'E', 'R', 'N'), + gcvOBJ_KERNEL_FUNCTION = gcmCC('K', 'F', 'C', 'N'), + gcvOBJ_MEMORYBUFFER = gcmCC('M', 'E', 'M', 'B'), + gcvOBJ_MMU = gcmCC('M', 'M', 'U', ' '), + gcvOBJ_OS = gcmCC('O', 'S', ' ', ' '), + gcvOBJ_OUTPUT = gcmCC('O', 'U', 'T', 'P'), + gcvOBJ_PAINT = gcmCC('P', 'N', 'T', ' '), + gcvOBJ_PATH = gcmCC('P', 'A', 'T', 'H'), + gcvOBJ_QUEUE = gcmCC('Q', 'U', 'E', ' '), + gcvOBJ_SAMPLER = gcmCC('S', 'A', 'M', 'P'), + gcvOBJ_SHADER = gcmCC('S', 'H', 'D', 'R'), + gcvOBJ_VIR_SHADER = gcmCC('V', 'S', 'D', 'R'), + gcvOBJ_STREAM = gcmCC('S', 'T', 'R', 'M'), + gcvOBJ_SURF = gcmCC('S', 'U', 'R', 'F'), + gcvOBJ_TEXTURE = gcmCC('T', 'X', 'T', 'R'), + gcvOBJ_UNIFORM = gcmCC('U', 'N', 'I', 'F'), + gcvOBJ_VARIABLE = gcmCC('V', 'A', 'R', 'I'), + gcvOBJ_VERTEX = gcmCC('V', 'R', 'T', 'X'), + gcvOBJ_VIDMEM = gcmCC('V', 'M', 'E', 'M'), + gcvOBJ_VIDMEM_BLOCK = gcmCC('V', 'M', 'B', 'K'), + gcvOBJ_VG = gcmCC('V', 'G', ' ', ' '), + gcvOBJ_BUFOBJ = gcmCC('B', 'U', 'F', 'O'), + gcvOBJ_UNIFORM_BLOCK = gcmCC('U', 'B', 'L', 'K'), + gcvOBJ_CL = gcmCC('C', 'L', ' ', ' '), + gcvOBJ_STORAGE_BLOCK = gcmCC('S', 'B', 'L', 'K'), + gcvOBJ_IO_BLOCK = gcmCC('I', 'O', 'B', 'K'), +} gceOBJECT_TYPE; + +/* Video memory pool type. */ +typedef enum _gcePOOL { + gcvPOOL_UNKNOWN = 0, + gcvPOOL_DEFAULT, + gcvPOOL_LOCAL, + gcvPOOL_LOCAL_INTERNAL, + gcvPOOL_LOCAL_EXTERNAL, + gcvPOOL_UNIFIED, + gcvPOOL_SYSTEM, + gcvPOOL_VIRTUAL, + gcvPOOL_USER, + gcvPOOL_INTERNAL_SRAM, + gcvPOOL_EXTERNAL_SRAM, + gcvPOOL_LOCAL_EXCLUSIVE, + gcvPOOL_SYSTEM_32BIT_VA, + + gcvPOOL_NUMBER_OF_POOLS +} gcePOOL; + +typedef enum _gceDUMP_BUFFER_TYPE { + gcvDUMP_BUFFER_USER_STRING, + gcvDUMP_BUFFER_VERIFY, + + gcvDUMP_BUFFER_MEMORY, + gcvDUMP_BUFFER_TEXTURE, + gcvDUMP_BUFFER_STREAM, + gcvDUMP_BUFFER_INDEX, + gcvDUMP_BUFFER_BUFOBJ, + gcvDUMP_BUFFER_IMAGE, + /* A type of command, but should not execute directly. */ + gcvDUMP_BUFFER_INSTRUCTION, + gcvDUMP_BUFFER_CONTEXT, + gcvDUMP_BUFFER_COMMAND, + gcvDUMP_BUFFER_ASYNC_COMMAND, + gcvDUMP_BUFFER_USER_TYPE_LAST = gcvDUMP_BUFFER_ASYNC_COMMAND, + + gcvDUMP_BUFFER_KERNEL_CONTEXT, + gcvDUMP_BUFFER_KERNEL_COMMAND, + + gcvDUMP_BUFFER_PHYSICAL_MEMORY, + + gcvDUMP_BUFFER_TYPE_COUNT, +} gceDUMP_BUFFER_TYPE; + +typedef enum _gceLOCK_VIDEO_MEMORY_OP { + gcvLOCK_VIDEO_MEMORY_OP_NONE = 0x00, + gcvLOCK_VIDEO_MEMORY_OP_LOCK = 0x01, + gcvLOCK_VIDEO_MEMORY_OP_MAP = 0x02, + gcvLOCK_VIDEO_MEMORY_OP_UNLOCK = 0x04, + gcvLOCK_VIDEO_MEMORY_OP_UNMAP = 0x08, +} gceLOCK_VIDEO_MEMORY_OP; + +typedef enum _gceSIGNAL_STATUS { + gcvSIGNAL_OK = 0, + gcvSIGNAL_RECOVERY, + gcvSIGNAL_CANCEL, +} gceSIGNAL_STATUS; + +#if gcdENABLE_VIDEO_MEMORY_MIRROR +typedef enum _gceSYNC_MEMORY_DIRECTION { + gcvSYNC_MEMORY_DIRECTION_NONE = 0, + gcvSYNC_MEMORY_DIRECTION_LOCAL_TO_SYSTEM, + gcvSYNC_MEMORY_DIRECTION_SYSTEM_TO_LOCAL, +} gceSYNC_MEMORY_DIRECTION; + +typedef enum _gceMIRROR_TYPE { + gcvMIRROR_TYPE_NONE = 0, + gcvMIRROR_TYPE_LOCAL_MEMORY_MIRROR, + gcvMIRROR_TYPE_SYSTEM_MEMORY_MIRROR, +} gceMIRROR_TYPE; +#endif + +typedef enum _gceProfilerMode { + gcvPROFILER_UNKNOWN_MODE = 0, + gcvPROFILER_PROBE_MODE, + gcvPROFILER_AHB_MODE, +} gceProfilerMode; + +typedef enum _gceProbeMode { + gcvPROFILER_UNKNOWN_PROBE = 0, + gcvPROFILER_GPU_PROBE, + gcvPROFILER_VIP_PROBE, +} gceProbeMode; + +typedef enum _gceMULTI_PROCESSOR_MODE { + gcvMP_MODE_COMBINED = 0, + gcvMP_MODE_INDEPENDENT = 1 +} gceMULTI_PROCESSOR_MODE; + +typedef enum _gceSwitchMpMode { + gcvMP_MODE_NO_SWITCH = 0, + gcvMP_MODE_SWITCH_TO_SINGLE, + gcvMP_MODE_SWITCH_TO_MULTI, +} gceSwitchMpMode; + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_enum_shared_h_ */ + + diff --git a/unified-tina/inc/HAL/shared/gc_hal_profiler_shared.h b/unified-tina/inc/HAL/shared/gc_hal_profiler_shared.h new file mode 100644 index 0000000..d2f3b03 --- /dev/null +++ b/unified-tina/inc/HAL/shared/gc_hal_profiler_shared.h @@ -0,0 +1,346 @@ +/**************************************************************************** +* +* Copyright (c) 2005 - 2023 by Vivante Corp. All rights reserved. +* +* The material in this file is confidential and contains trade secrets +* of Vivante Corporation. This is proprietary information owned by +* Vivante Corporation. No part of this work may be disclosed, +* reproduced, copied, transmitted, or used in any way for any purpose, +* without the express written permission of Vivante Corporation. +* +*****************************************************************************/ + + +#ifndef __gc_hal_profiler_shared_h_ +#define __gc_hal_profiler_shared_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define ANDROID_PROFILER_COUNTERS 1 +#define FPGA_INFO 0 +#define RECORD_COUNTER_ADDRESS 0 + +/* HW profile information. */ +typedef struct _gcsPROFILER_COUNTERS_PART1 { + gctUINT32 gpuTotalRead64BytesPerFrame; + gctUINT32 gpuTotalWrite64BytesPerFrame; + + /* FE */ + gctUINT32 fe_draw_count; + gctUINT32 fe_out_vertex_count; + gctUINT32 fe_cache_miss_count; + gctUINT32 fe_cache_lk_count; + gctUINT32 fe_stall_count; + gctUINT32 fe_starve_count; + gctUINT32 fe_process_count; + + /* PE */ + gctUINT32 pe0_pixel_count_killed_by_color_pipe; + gctUINT32 pe0_pixel_count_killed_by_depth_pipe; + gctUINT32 pe0_pixel_count_drawn_by_color_pipe; + gctUINT32 pe0_pixel_count_drawn_by_depth_pipe; + gctUINT32 pe1_pixel_count_killed_by_color_pipe; + gctUINT32 pe1_pixel_count_killed_by_depth_pipe; + gctUINT32 pe1_pixel_count_drawn_by_color_pipe; + gctUINT32 pe1_pixel_count_drawn_by_depth_pipe; + + /* SH */ + gctUINT32 shader_cycle_count; + gctUINT32 vs_shader_cycle_count; + gctUINT32 ps_shader_cycle_count; + gctUINT32 ps_inst_counter; + gctUINT32 ps_rendered_pixel_counter; + gctUINT32 vs_inst_counter; + gctUINT32 vs_rendered_vertice_counter; + gctUINT32 vs_branch_inst_counter; + gctUINT32 vs_texld_inst_counter; + gctUINT32 ps_branch_inst_counter; + gctUINT32 ps_texld_inst_counter; + gctUINT32 vs_non_idle_starve_count; + gctUINT32 vs_starve_count; + gctUINT32 vs_stall_count; + gctUINT32 vs_process_count; + gctUINT32 ps_non_idle_starve_count; + gctUINT32 ps_starve_count; + gctUINT32 ps_stall_count; + gctUINT32 ps_process_count; + + /* PA */ + gctUINT32 pa_input_vtx_counter; + gctUINT32 pa_input_prim_counter; + gctUINT32 pa_output_prim_counter; + gctUINT32 pa_depth_clipped_counter; + gctUINT32 pa_trivial_rejected_counter; + gctUINT32 pa_culled_prim_counter; + gctUINT32 pa_droped_prim_counter; + gctUINT32 pa_frustum_clipped_prim_counter; + gctUINT32 pa_frustum_clipdroped_prim_counter; + gctUINT32 pa_non_idle_starve_count; + gctUINT32 pa_starve_count; + gctUINT32 pa_stall_count; + gctUINT32 pa_process_count; + + /* SE */ + gctUINT32 se_culled_triangle_count; + gctUINT32 se_culled_lines_count; + gctUINT32 se_clipped_triangle_count; + gctUINT32 se_clipped_line_count; + gctUINT32 se_starve_count; + gctUINT32 se_stall_count; + gctUINT32 se_receive_triangle_count; + gctUINT32 se_send_triangle_count; + gctUINT32 se_receive_lines_count; + gctUINT32 se_send_lines_count; + gctUINT32 se_process_count; + gctUINT32 se_trivial_rejected_line_count; + gctUINT32 se_non_idle_starve_count; + + /* RA */ + gctUINT32 ra_input_prim_count; + gctUINT32 ra_total_quad_count; + gctUINT32 ra_valid_quad_count_after_early_z; + gctUINT32 ra_valid_pixel_count_to_render; + gctUINT32 ra_output_valid_quad_count; + gctUINT32 ra_output_valid_pixel_count; + gctUINT32 ra_pipe_cache_miss_counter; + gctUINT32 ra_pipe_hz_cache_miss_counter; + gctUINT32 ra_prefetch_cache_miss_counter; + gctUINT32 ra_prefetch_hz_cache_miss_counter; + gctUINT32 ra_eez_culled_counter; + gctUINT32 ra_non_idle_starve_count; + gctUINT32 ra_starve_count; + gctUINT32 ra_stall_count; + gctUINT32 ra_process_count; + + /* TX */ + gctUINT32 tx_total_bilinear_requests; + gctUINT32 tx_total_trilinear_requests; + gctUINT32 tx_total_discarded_texture_requests; + gctUINT32 tx_total_texture_requests; + gctUINT32 tx_mc0_miss_count; + gctUINT32 tx_mc0_request_byte_count; + gctUINT32 tx_mc1_miss_count; + gctUINT32 tx_mc1_request_byte_count; + gctUINT32 tx_non_idle_starve_count; + gctUINT32 tx_starve_count; + gctUINT32 tx_stall_count; + gctUINT32 tx_process_count; +} gcsPROFILER_COUNTERS_PART1; + +typedef struct _gcsPROFILER_COUNTERS_PART2 { + /* MCC */ + gctUINT32 mcc_total_read_req_8B_from_colorpipe; + gctUINT32 mcc_total_read_req_8B_sentout_from_colorpipe; + gctUINT32 mcc_total_write_req_8B_from_colorpipe; + gctUINT32 mcc_total_read_req_sentout_from_colorpipe; + gctUINT32 mcc_total_write_req_from_colorpipe; + gctUINT32 mcc_total_read_req_8B_from_depthpipe; + gctUINT32 mcc_total_read_req_8B_sentout_from_depthpipe; + gctUINT32 mcc_total_write_req_8B_from_depthpipe; + gctUINT32 mcc_total_read_req_sentout_from_depthpipe; + gctUINT32 mcc_total_write_req_from_depthpipe; + gctUINT32 mcc_total_read_req_8B_from_others; + gctUINT32 mcc_total_write_req_8B_from_others; + gctUINT32 mcc_total_read_req_from_others; + gctUINT32 mcc_total_write_req_from_others; + gctUINT32 mcc_axi_total_latency; + gctUINT32 mcc_axi_sample_count; + gctUINT32 mcc_axi_max_latency; + gctUINT32 mcc_axi_min_latency; + gctUINT32 mc_fe_read_bandwidth; + gctUINT32 mc_mmu_read_bandwidth; + gctUINT32 mc_blt_read_bandwidth; + gctUINT32 mc_sh0_read_bandwidth; + gctUINT32 mc_sh1_read_bandwidth; + gctUINT32 mc_pe_write_bandwidth; + gctUINT32 mc_blt_write_bandwidth; + gctUINT32 mc_sh0_write_bandwidth; + gctUINT32 mc_sh1_write_bandwidth; + + /* MCZ */ + gctUINT32 mcz_total_read_req_8B_from_colorpipe; + gctUINT32 mcz_total_read_req_8B_sentout_from_colorpipe; + gctUINT32 mcz_total_write_req_8B_from_colorpipe; + gctUINT32 mcz_total_read_req_sentout_from_colorpipe; + gctUINT32 mcz_total_write_req_from_colorpipe; + gctUINT32 mcz_total_read_req_8B_from_depthpipe; + gctUINT32 mcz_total_read_req_8B_sentout_from_depthpipe; + gctUINT32 mcz_total_write_req_8B_from_depthpipe; + gctUINT32 mcz_total_read_req_sentout_from_depthpipe; + gctUINT32 mcz_total_write_req_from_depthpipe; + gctUINT32 mcz_total_read_req_8B_from_others; + gctUINT32 mcz_total_write_req_8B_from_others; + gctUINT32 mcz_total_read_req_from_others; + gctUINT32 mcz_total_write_req_from_others; + gctUINT32 mcz_axi_total_latency; + gctUINT32 mcz_axi_sample_count; + gctUINT32 mcz_axi_max_latency; + gctUINT32 mcz_axi_min_latency; + + /* HI */ + gctUINT32 hi0_total_read_8B_count; + gctUINT32 hi0_total_write_8B_count; + gctUINT32 hi0_total_read_request_count; + gctUINT32 hi0_total_write_request_count; + gctUINT32 hi0_axi_cycles_read_request_stalled; + gctUINT32 hi0_axi_cycles_write_request_stalled; + gctUINT32 hi0_axi_cycles_write_data_stalled; + gctUINT32 hi1_total_read_8B_count; + gctUINT32 hi1_total_write_8B_count; + gctUINT32 hi1_total_read_request_count; + gctUINT32 hi1_total_write_request_count; + gctUINT32 hi1_axi_cycles_read_request_stalled; + gctUINT32 hi1_axi_cycles_write_request_stalled; + gctUINT32 hi1_axi_cycles_write_data_stalled; + gctUINT32 hi_total_cycle_count; + gctUINT32 hi_total_idle_cycle_count; + gctUINT32 hi_total_read_8B_count; + gctUINT32 hi_total_write_8B_count; + gctUINT32 hi_total_readOCB_16B_count; + gctUINT32 hi_total_writeOCB_16B_count; + + /* L2 */ + gctUINT32 l2_total_axi0_read_request_count; + gctUINT32 l2_total_axi1_read_request_count; + gctUINT32 l2_total_axi0_write_request_count; + gctUINT32 l2_total_axi1_write_request_count; + gctUINT32 l2_total_read_transactions_request_by_axi0; + gctUINT32 l2_total_read_transactions_request_by_axi1; + gctUINT32 l2_total_write_transactions_request_by_axi0; + gctUINT32 l2_total_write_transactions_request_by_axi1; + gctUINT32 l2_axi0_minmax_latency; + gctUINT32 l2_axi0_min_latency; + gctUINT32 l2_axi0_max_latency; + gctUINT32 l2_axi0_total_latency; + gctUINT32 l2_axi0_total_request_count; + gctUINT32 l2_axi1_minmax_latency; + gctUINT32 l2_axi1_min_latency; + gctUINT32 l2_axi1_max_latency; + gctUINT32 l2_axi1_total_latency; + gctUINT32 l2_axi1_total_request_count; +} gcsPROFILER_COUNTERS_PART2; + +typedef struct _gcsPROFILER_COUNTERS { + gcsPROFILER_COUNTERS_PART1 counters_part1; + gcsPROFILER_COUNTERS_PART2 counters_part2; +} gcsPROFILER_COUNTERS; + +typedef enum _gceVIP_PROBE_COUNTER { + gcvVIP_PROBE_COUNTER_NEURAL_NET, + gcvVIP_PROBE_COUNTER_TENSOR_PROCESSOR, + gcvVIP_PROBE_COUNTER_COUNT +} gceVIP_PROBE_COUNTER; + +/* Mask definations for overflow indicator of TP */ +typedef enum _gceTPCOUNTER_OVERFLOW { + gcvTPCOUNTER_LAYER_ID_OVERFLOW = (1 << 0), + gcvTPCOUNTER_TOTAL_BUSY_CYCLE_OVERFLOW = (1 << 1), + gcvTPCOUNTER_TOTAL_READ_BW_DDR_OVERFLOW = (1 << 2), + gcvTPCOUNTER_TOTAL_WRITE_BW_DDR_OVERFLOW = (1 << 3), + gcvTPCOUNTER_TOTAL_READ_BW_SRAM_OVERFLOW = (1 << 4), + gcvTPCOUNTER_TOTAL_WRITE_BW_SRAM_OVERFLOW = (1 << 5), + gcvTPCOUNTER_TOTAL_READ_BW_OCB_OVERFLOW = (1 << 6), + gcvTPCOUNTER_TOTAL_WRITE_BW_OCB_OVERFLOW = (1 << 7), + gcvTPCOUNTER_FC_PIX_CNT_OVERFLOW = (1 << 8), + gcvTPCOUNTER_FC_ZERO_SKIP_OVERFLOW = (1 << 9), + gcvTPCOUNTER_FC_COEF_CNT_OVERFLOW = (1 << 10), + gcvTPCOUNTER_FC_COEF_ZERO_CNT_OVERFLOW = (1 << 11), + gcvTPCOUNTER_TOTAL_IDLE_CYCLE_CORE0_OVERFLOW = (1 << 0), + gcvTPCOUNTER_TOTAL_IDLE_CYCLE_CORE1_OVERFLOW = (1 << 1), + gcvTPCOUNTER_TOTAL_IDLE_CYCLE_CORE2_OVERFLOW = (1 << 2), + gcvTPCOUNTER_TOTAL_IDLE_CYCLE_CORE3_OVERFLOW = (1 << 3), +} _gceTPCOUNTER_OVERFLOW; + +/* Mask definations for overflow indicator of NN */ +typedef enum _gceNNCOUNTER_OVERFLOW { + gcvNNCOUNTER_TOTAL_BUSY_CYCLE_OVERFLOW = (1 << 0), + gcvNNCOUNTER_TOTAL_READ_CYCLE_DDR_OVERFLOW = (1 << 2), + gcvNNCOUNTER_TOTAL_READ_BW_DDR_OVERFLOW = (1 << 3), + gcvNNCOUNTER_TOTAL_WRITE_CYCLE_DDR_OVERFLOW = (1 << 4), + gcvNNCOUNTER_TOTAL_WRITE_BW_DDR_OVERFLOW = (1 << 5), + gcvNNCOUNTER_TOTAL_READ_SYCLE_SRAM_OVERFLOW = (1 << 6), + gcvNNCOUNTER_TOTAL_WRITE_CYCLE_SRAM_OVERFLOW = (1 << 7), + gcvNNCOUNTER_TOTAL_MAC_CYCLE_OVERFLOW = (1 << 8), + gcvNNCOUNTER_TOTAL_MAC_COUNT_OVERFLOW = (1 << 9), + gcvNNCOUNTER_ZERO_COEF_SKIP_COUNT_OVERFLOW = (1 << 10), + gcvNNCOUNTER_NON_ZERO_COEF_COUNT_OVERFLOW = (1 << 11), +} _gceNNCOUNTER_OVERFLOW; + +#define MODULE_NN_RESERVED_COUNTER_NUM 0x9 +typedef struct _gcsPROFILER_VIP_PROBE_COUNTERS { + /* NN */ + gctUINT32 nn_layer_id; + gctUINT32 nn_layer_id_overflow; + gctUINT32 nn_instr_info; + gctUINT32 nn_total_busy_cycle; + gctUINT32 nn_total_busy_cycle_overflow; + gctUINT32 nn_total_read_cycle_ddr; + gctUINT32 nn_total_read_cycle_ddr_overflow; + gctUINT32 nn_total_read_valid_bandwidth_ddr; + gctUINT32 nn_total_read_valid_bandwidth_ddr_overflow; + gctUINT32 nn_total_write_cycle_ddr; + gctUINT32 nn_total_write_cycle_ddr_overflow; + gctUINT32 nn_total_write_valid_bandwidth_ddr; + gctUINT32 nn_total_write_valid_bandwidth_ddr_overflow; + gctUINT32 nn_total_read_cycle_sram; + gctUINT32 nn_total_read_cycle_sram_overflow; + gctUINT32 nn_total_write_cycle_sram; + gctUINT32 nn_total_write_cycle_sram_overflow; + gctUINT32 nn_total_mac_cycle; + gctUINT32 nn_total_mac_cycle_overflow; + gctUINT32 nn_total_mac_count; + gctUINT32 nn_total_mac_count_overflow; + gctUINT32 nn_zero_coef_skip_count; + gctUINT32 nn_zero_coef_skip_count_overflow; + gctUINT32 nn_non_zero_coef_count; + gctUINT32 nn_non_zero_coef_count_overflow; + + gctUINT32 nn_reserved_counter[4 * MODULE_NN_RESERVED_COUNTER_NUM]; + gctUINT32 nn_total_idle_cycle_core_overflow[4]; + gctUINT32 nn_total_idle_cycle_core[32]; + + /* TP */ + gctUINT32 tp_layer_id; + gctUINT32 tp_layer_id_overflow; + gctUINT32 tp_total_busy_cycle; + gctUINT32 tp_total_busy_cycle_overflow; + + gctUINT32 tp_total_read_bandwidth_cache; + gctUINT32 tp_total_read_bandwidth_cache_overflow; + gctUINT32 tp_total_write_bandwidth_cache; + gctUINT32 tp_total_write_bandwidth_cache_overflow; + + gctUINT32 tp_total_read_bandwidth_sram; + gctUINT32 tp_total_read_bandwidth_sram_overflow; + gctUINT32 tp_total_write_bandwidth_sram; + gctUINT32 tp_total_write_bandwidth_sram_overflow; + + gctUINT32 tp_total_read_bandwidth_ocb; + gctUINT32 tp_total_read_bandwidth_ocb_overflow; + gctUINT32 tp_total_write_bandwidth_ocb; + gctUINT32 tp_total_write_bandwidth_ocb_overflow; + + gctUINT32 tp_fc_pix_count; + gctUINT32 tp_fc_zero_skip_count; + gctUINT32 tp_fc_pix_count_overflow; + gctUINT32 tp_fc_zero_skip_count_overflow; + + gctUINT32 tp_fc_coef_count; + gctUINT32 tp_fc_coef_zero_count; + gctUINT32 tp_fc_coef_count_overflow; + gctUINT32 tp_fc_coef_zero_count_overflow; + + gctUINT32 tp_total_idle_cycle_core[16]; + gctUINT32 tp_total_idle_cycle_core_overflows[16]; +} gcsPROFILER_VIP_PROBE_COUNTERS; + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_profiler_shared_h_ */ + + + diff --git a/unified-tina/inc/HAL/shared/gc_hal_types_shared.h b/unified-tina/inc/HAL/shared/gc_hal_types_shared.h new file mode 100644 index 0000000..e28a00a --- /dev/null +++ b/unified-tina/inc/HAL/shared/gc_hal_types_shared.h @@ -0,0 +1,1016 @@ +/**************************************************************************** +* +* Copyright (c) 2005 - 2023 by Vivante Corp. All rights reserved. +* +* The material in this file is confidential and contains trade secrets +* of Vivante Corporation. This is proprietary information owned by +* Vivante Corporation. No part of this work may be disclosed, +* reproduced, copied, transmitted, or used in any way for any purpose, +* without the express written permission of Vivante Corporation. +* +*****************************************************************************/ + + +#ifndef __gc_hal_types_shared_h_ +#define __gc_hal_types_shared_h_ + +#if !defined(GC_KMD) +#if defined(__KERNEL__) +# include "linux/version.h" +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) +typedef unsigned long uintptr_t; +# endif +# include "linux/types.h" +# elif defined(UNDER_CE) +# include +typedef signed char int8_t; +typedef short int16_t; +typedef int int32_t; +typedef long long int64_t; +typedef unsigned char uint8_t; +typedef unsigned short uint16_t; +typedef unsigned int uint32_t; +typedef unsigned long long uint64_t; +# elif defined(_MSC_VER) && (_MSC_VER <= 1500) +# include +# include "vadefs.h" +# elif defined(__QNXNTO__) +# define _QNX_SOURCE +# include +# include +# else +# include +# include +# include +# endif +#endif + +#ifdef _WIN32 +# pragma warning(disable : 4127) /* Conditional expression is constant (do { } while(0)). */ +# pragma warning(disable : 4100) /* Unreferenced formal parameter. */ +# pragma warning(disable : 4204) /* Non-constant aggregate initializer (C99). */ +# pragma warning(disable : 4131) /* Uses old-style declarator. */ +# pragma warning(disable : 4206) /* Translation unit is empty. */ +# pragma warning(disable : 4214) /* Nonstandard extension used : + * bit field types other than int. + */ +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/******************************************************************************\ + * Platform macros. + */ + +#if defined(__GNUC__) +# define gcdHAS_ELLIPSIS 1 /* GCC always has it. */ +#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) +# define gcdHAS_ELLIPSIS 1 /* C99 has it. */ +#elif defined(_MSC_VER) && (_MSC_VER >= 1500) +# define gcdHAS_ELLIPSIS 1 /* MSVC 2007+ has it. */ +#elif defined(UNDER_CE) +#if UNDER_CE >= 600 +# define gcdHAS_ELLIPSIS 1 +# else +# define gcdHAS_ELLIPSIS 0 +# endif +#else +# error "gcdHAS_ELLIPSIS: Platform could not be determined" +#endif + +/****************************************************************************** + *********************************** Keyword ********************************** + ******************************************************************************/ + +#if defined(ANDROID) && defined(__BIONIC_FORTIFY) +#if defined(__clang__) +#if (__clang_major__ >= 10) +# define gcmINLINE __inline__ __attribute__ ((always_inline)) +# else +# define gcmINLINE __inline__ __attribute__ ((always_inline)) __attribute__ ((gnu_inline)) +# endif +# else +# define gcmINLINE __inline__ __attribute__ ((always_inline)) __attribute__ ((gnu_inline)) __attribute__ ((artificial)) +# endif +#elif ((defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || defined(__APPLE__)) +# define gcmINLINE inline /* C99 keyword. */ +#elif defined(__GNUC__) +# define gcmINLINE __inline__ /* GNU keyword. */ +#elif defined(_MSC_VER) || defined(UNDER_CE) +# define gcmINLINE __inline /* Internal keyword. */ +#else +# error "gcmINLINE: Platform could not be determined" +#endif + +/* kernel layer keyword. */ +#define gcmkINLINE inline + +/* kernel fall-through keyword. */ +#if defined __has_attribute +#if __has_attribute(__fallthrough__) +# define gcmkFALLTHRU __attribute__((__fallthrough__)) +# else +# define gcmkFALLTHRU do {} while (0) /* fallthrough */ +# endif +#else +# define gcmkFALLTHRU do {} while (0) /* fallthrough */ +#endif + +/* Possible debug flags. */ +#define gcdDEBUG_NONE 0 +#define gcdDEBUG_ALL (1 << 0) +#define gcdDEBUG_FATAL (1 << 1) +#define gcdDEBUG_TRACE (1 << 2) +#define gcdDEBUG_BREAK (1 << 3) +#define gcdDEBUG_ASSERT (1 << 4) +#define gcdDEBUG_CODE (1 << 5) +#define gcdDEBUG_STACK (1 << 6) + +#define gcmIS_DEBUG(flag) (gcdDEBUG & ((flag) | gcdDEBUG_ALL)) + +#ifndef gcdDEBUG +#if (defined(DBG) && DBG) || defined(DEBUG) || defined(_DEBUG) +# define gcdDEBUG gcdDEBUG_ALL +# else +# define gcdDEBUG gcdDEBUG_NONE +# endif +#endif + +#ifdef _USRDLL +#ifdef _MSC_VER +#ifdef HAL_EXPORTS +# define HALAPI __declspec(dllexport) +# else +# define HALAPI __declspec(dllimport) +# endif +# define HALDECL __cdecl +# else +#ifdef HAL_EXPORTS +# define HALAPI +# else +# define HALAPI extern +# endif +# endif +#else +# define HALAPI +# define HALDECL +#endif + +/****************************************************************************** + ********************************* Common Types ******************************* + ******************************************************************************/ + +#define gcvFALSE 0 +#define gcvTRUE 1 + +#define gcvINFINITE ((gctUINT32)~0U) + +#define gcvINVALID_HANDLE ((gctHANDLE)gcmINT2PTR(~0U)) + +typedef int gctBOOL; +typedef gctBOOL *gctBOOL_PTR; + +typedef int gctINT; +typedef signed char gctINT8; +typedef signed short gctINT16; +typedef signed int gctINT32; +typedef signed long long gctINT64; + +typedef gctINT *gctINT_PTR; +typedef gctINT8 *gctINT8_PTR; +typedef gctINT16 *gctINT16_PTR; +typedef gctINT32 *gctINT32_PTR; +typedef gctINT64 *gctINT64_PTR; + +typedef unsigned int gctUINT; +typedef unsigned char gctUINT8; +typedef unsigned short gctUINT16; +typedef unsigned int gctUINT32; +typedef unsigned long long gctUINT64; +typedef uintptr_t gctUINTPTR_T; +typedef ptrdiff_t gctPTRDIFF_T; + +typedef gctUINT *gctUINT_PTR; +typedef gctUINT8 *gctUINT8_PTR; +typedef gctUINT16 *gctUINT16_PTR; +typedef gctUINT32 *gctUINT32_PTR; +typedef gctUINT64 *gctUINT64_PTR; + +typedef size_t gctSIZE_T; +typedef gctSIZE_T *gctSIZE_T_PTR; +typedef gctUINT32 gctTRACE; + +#ifdef __cplusplus +# define gcvNULL 0 +#else +# define gcvNULL ((void *)0) +#endif + +#define gcvMAXINT8 0x7f +#define gcvMININT8 0x80 +#define gcvMAXINT16 0x7fff +#define gcvMININT16 0x8000 +#define gcvMAXINT32 0x7fffffff +#define gcvMININT32 0x80000000 +#define gcvMAXINT64 0x7fffffffffffffff +#define gcvMININT64 0x8000000000000000 +#define gcvMAXUINT8 0xff +#define gcvMINUINT8 0x0 +#define gcvMAXUINT16 0xffff +#define gcvMINUINT16 0x0 +#define gcvMAXUINT32 0xffffffff +#define gcvMINUINT32 0x0 +#define gcvMAXUINT64 0xffffffffffffffff +#define gcvMINUINT64 0x0 +#define gcvMAXUINTPTR_T (~(gctUINTPTR_T)0) +#define gcvMAXSIZE_T ((gctSIZE_T)(-1)) + +typedef float gctFLOAT; +typedef double gctDOUBLE; +typedef signed int gctFIXED_POINT; +typedef float *gctFLOAT_PTR; +typedef double *gctDOUBLE_PTR; + +typedef void *gctPHYS_ADDR; +typedef void *gctHANDLE; +typedef void *gctFILE; +typedef void *gctSIGNAL; +typedef void *gctWINDOW; +typedef void *gctIMAGE; +typedef void *gctSHBUF; + +typedef void *gctSEMAPHORE; + +typedef void *gctPOINTER; +typedef const void *gctCONST_POINTER; + +typedef char gctCHAR; +typedef signed char gctSIGNED_CHAR; +typedef unsigned char gctUNSIGNED_CHAR; +typedef char *gctSTRING; +typedef const char *gctCONST_STRING; + +typedef gctUINT64 gctPHYS_ADDR_T; +typedef gctUINT64 gctADDRESS; + +typedef struct _gcsCOUNT_STRING { + gctSIZE_T Length; + gctCONST_STRING String; +} gcsCOUNT_STRING; + +typedef union _gcuFLOAT_UINT32 { + gctFLOAT f; + gctUINT32 u; +} gcuFLOAT_UINT32; + +/* Fixed point constants. */ +#define gcvZERO_X ((gctFIXED_POINT)0x00000000) +#define gcvHALF_X ((gctFIXED_POINT)0x00008000) +#define gcvONE_X ((gctFIXED_POINT)0x00010000) +#define gcvNEGONE_X ((gctFIXED_POINT)0xFFFF0000) +#define gcvTWO_X ((gctFIXED_POINT)0x00020000) + +/* No special needs. */ +#define gcvALLOC_FLAG_NONE 0x00000000 + +/* Physical contiguous. */ +#define gcvALLOC_FLAG_CONTIGUOUS 0x00000001 +/* Physical non contiguous. */ +#define gcvALLOC_FLAG_NON_CONTIGUOUS 0x00000002 + +/* Should not swap out. */ +#define gcvALLOC_FLAG_NON_PAGED 0x00000004 + +/* CPU access explicitly needed. */ +#define gcvALLOC_FLAG_CPU_ACCESS 0x00000008 +/* Can be remapped as cacheable. */ +#define gcvALLOC_FLAG_CACHEABLE 0x00000010 + +/* Need 32bit address. */ +#define gcvALLOC_FLAG_4GB_ADDR 0x00000020 + +/* Secure buffer. */ +#define gcvALLOC_FLAG_SECURITY 0x00000040 +/* Can be exported as dmabuf-fd */ +#define gcvALLOC_FLAG_DMABUF_EXPORTABLE 0x00000080 +/* Do not try slow pools (gcvPOOL_VIRTUAL) */ +#define gcvALLOC_FLAG_FAST_POOLS 0x00000100 + +/* Only accessed by GPU */ +#define gcvALLOC_FLAG_NON_CPU_ACCESS 0x00000200 +/* Do not be moved */ +#define gcvALLOC_FLAG_NO_EVICT 0x00000400 + +/* Allocate from user space. */ +#define gcvALLOC_FLAG_FROM_USER 0x00000800 + +/* Import DMABUF. */ +#define gcvALLOC_FLAG_DMABUF 0x00001000 +/* Import USERMEMORY. */ +#define gcvALLOC_FLAG_USERMEMORY 0x00002000 +/* Import an External Buffer, this flag is no longer used. */ +#define gcvALLOC_FLAG_EXTERNAL_MEMORY 0x00004000 +/* Import linux reserved memory. */ +#define gcvALLOC_FLAG_LINUX_RESERVED_MEM 0x00008000 + +/* 1M pages unit allocation. */ +#define gcvALLOC_FLAG_1M_PAGES 0x00010000 + +/* Non 1M pages unit allocation. */ +#define gcvALLOC_FLAG_4K_PAGES 0x00020000 + +/* Lower 4G VA range. */ +#define gcvALLOC_FLAG_32BIT_VA 0x00040000 +#define gcvALLOC_FLAG_PRIOR_32BIT_VA 0x00080000 + +/* Dynamically allocate local memory pool memory. */ +#define gcvALLOC_FLAG_DYNAMIC_ALLOC_LOCAL 0x00100000 + +/* Real allocation happens when GPU page fault. */ +#define gcvALLOC_FLAG_ALLOC_ON_FAULT 0x01000000 +/* Alloc with memory limit. */ +#define gcvALLOC_FLAG_MEMLIMIT 0x02000000 +/* Alloc memory with mirror */ +#define gcvALLOC_FLAG_WITH_MIRROR 0x04000000 + +#define gcmFIXEDCLAMP_NEG1_TO_1(_x) \ + (((_x) < gcvNEGONE_X) ? gcvNEGONE_X : \ + (((_x) > gcvONE_X) ? gcvONE_X : (_x))) + +#define gcmFLOATCLAMP_NEG1_TO_1(_f) \ + (((_f) < -1.0f) ? -1.0f : (((_f) > 1.0f) ? 1.0f : (_f))) + +#define gcmFIXEDCLAMP_0_TO_1(_x) \ + (((_x) < 0) ? 0 : (((_x) > gcvONE_X) ? gcvONE_X : (_x))) + +#define gcmFLOATCLAMP_0_TO_1(_f) \ + (((_f) < 0.0f) ? 0.0f : (((_f) > 1.0f) ? 1.0f : (_f))) + +/****************************************************************************** + ****************************** Multicast Values ****************************** + ******************************************************************************/ + +/* Value unions. */ +typedef union _gcuVALUE { + gctUINT uintValue; + gctFIXED_POINT fixedValue; + gctFLOAT floatValue; + gctINT intValue; +} gcuVALUE; + +/* Stringizing macro. */ +#define gcmSTRING(Value) #Value + +/****************************************************************************** + ****************************** Fixed Point Math ****************************** + ******************************************************************************/ + +#define gcmXMultiply(x1, x2) gcoMATH_MultiplyFixed(x1, x2) +#define gcmXDivide(x1, x2) gcoMATH_DivideFixed(x1, x2) +#define gcmXMultiplyDivide(x1, x2, x3) gcoMATH_MultiplyDivideFixed(x1, x2, x3) + +/* 2D Engine profile. */ +typedef struct _gcs2D_PROFILE { + /* Cycle count. + * 32bit counter incremented every 2D clock cycle. + * Wraps back to 0 when the counter overflows. + */ + gctUINT32 cycleCount; + + /* Pixels rendered by the 2D engine. + * Resets to 0 every time it is read. + */ + gctUINT32 pixelsRendered; +} gcs2D_PROFILE; + +#define gcmPRINTABLE(c) ((((c) >= ' ') && ((c) <= '}')) ? ((c) != '%' ? (c) : ' ') : ' ') + +#define gcmCC_PRINT(cc) \ + gcmPRINTABLE((char) ((cc) & 0xFF)), \ + gcmPRINTABLE((char) (((cc) >> 8) & 0xFF)), \ + gcmPRINTABLE((char) (((cc) >> 16) & 0xFF)), \ + gcmPRINTABLE((char) (((cc) >> 24) & 0xFF)) + +/****************************************************************************** + ***************************** Function Parameters **************************** + ******************************************************************************/ + +#define IN +#define OUT +#define INOUT +#define OPTIONAL + +/****************************************************************************** + ******************************** Status Macros ******************************* + ******************************************************************************/ + +#define gcmIS_ERROR(status) ((status) < 0) +#define gcmNO_ERROR(status) ((status) >= 0) +#define gcmIS_SUCCESS(status) ((status) == gcvSTATUS_OK) +#define gcmERROR2PTR(err) ((gctPOINTER)(err)) +#define gcmPTR2ERROR(ptr) ((gctINT64)(ptr)) +/****************************************************************************** + ******************************** Field Macros ******************************** + ******************************************************************************/ + +#define __gcmSTART(reg_field) \ + (0 ? reg_field) + +#define __gcmEND(reg_field) \ + (1 ? reg_field) + +#define __gcmGETSIZE(reg_field) \ + (__gcmEND(reg_field) - __gcmSTART(reg_field) + 1) + +#define __gcmALIGN(data, reg_field) \ + (((gctUINT32)(data)) << __gcmSTART(reg_field)) + +#define __gcmMASK(reg_field) \ + ((gctUINT32) ((__gcmGETSIZE(reg_field) == 32) \ + ? ~0U \ + : (~(~0U << __gcmGETSIZE(reg_field))))) + + +/******************************************************************************* + ** + ** gcmFIELDMASK + ** + ** Get aligned field mask. + ** + ** ARGUMENTS: + ** + ** reg Name of register. + ** field Name of field within register. + */ +#define gcmFIELDMASK(reg, field) \ + (__gcmALIGN(__gcmMASK(reg##_##field), reg##_##field)) + +/******************************************************************************* + ** + ** gcmGETFIELD + ** + ** Extract the value of a field from specified data. + ** + ** ARGUMENTS: + ** + ** data Data value. + ** reg Name of register. + ** field Name of field within register. + */ +#define gcmGETFIELD(data, reg, field) \ +(\ + ((((gctUINT32)(data)) >> __gcmSTART(reg##_##field)) \ + & __gcmMASK(reg##_##field)) \ +) + +/******************************************************************************* + ** + ** gcmSETFIELD + ** + ** Set the value of a field within specified data. + ** + ** ARGUMENTS: + ** + ** data Data value. + ** reg Name of register. + ** field Name of field within register. + ** value Value for field. + */ +#define gcmSETFIELD(data, reg, field, value) \ +(\ + (((gctUINT32)(data)) \ + & ~__gcmALIGN(__gcmMASK(reg##_##field), reg##_##field)) \ + | __gcmALIGN((gctUINT32)(value) \ + & __gcmMASK(reg##_##field), reg##_##field) \ +) + +/******************************************************************************* + ** + ** gcmSETFIELDVALUE + ** + ** Set the value of a field within specified data with a + ** predefined value. + ** + ** ARGUMENTS: + ** + ** data Data value. + ** reg Name of register. + ** field Name of field within register. + ** value Name of the value within the field. + */ +#define gcmSETFIELDVALUE(data, reg, field, value) \ +(\ + (((gctUINT32)(data)) \ + & ~__gcmALIGN(__gcmMASK(reg##_##field), reg##_##field)) \ + | __gcmALIGN(reg##_##field##_##value \ + & __gcmMASK(reg##_##field), reg##_##field) \ +) + +/******************************************************************************* + ** + ** gcmGETMASKEDFIELDMASK + ** + ** Determine field mask of a masked field. + ** + ** ARGUMENTS: + ** + ** reg Name of register. + ** field Name of field within register. + */ +#define gcmGETMASKEDFIELDMASK(reg, field) \ +(\ + gcmSETFIELD(0, reg, field, ~0U) | \ + gcmSETFIELD(0, reg, MASK_ ## field, ~0U) \ +) + +/******************************************************************************* + ** + ** gcmSETMASKEDFIELD + ** + ** Set the value of a masked field with specified data. + ** + ** ARGUMENTS: + ** + ** reg Name of register. + ** field Name of field within register. + ** value Value for field. + */ +#define gcmSETMASKEDFIELD(reg, field, value) \ +(\ + gcmSETFIELD (~0U, reg, field, value) & \ + gcmSETFIELDVALUE(~0U, reg, MASK_ ## field, ENABLED) \ +) + +/******************************************************************************* + ** + ** gcmSETMASKEDFIELDVALUE + ** + ** Set the value of a masked field with specified data. + ** + ** ARGUMENTS: + ** + ** reg Name of register. + ** field Name of field within register. + ** value Value for field. + */ +#define gcmSETMASKEDFIELDVALUE(reg, field, value) \ +(\ + gcmSETFIELDVALUE(~0U, reg, field, value) & \ + gcmSETFIELDVALUE(~0U, reg, MASK_ ## field, ENABLED) \ +) + +/******************************************************************************* + ** + ** gcmVERIFYFIELDVALUE + ** + ** Verify if the value of a field within specified data equals a + ** predefined value. + ** + ** ARGUMENTS: + ** + ** data Data value. + ** reg Name of register. + ** field Name of field within register. + ** value Name of the value within the field. + */ +#define gcmVERIFYFIELDVALUE(data, reg, field, value) \ +(\ + (((gctUINT32)(data)) >> __gcmSTART(reg##_##field) & \ + __gcmMASK(reg##_##field)) \ + == \ + (reg##_##field##_##value & __gcmMASK(reg##_##field)) \ +) + +/******************************************************************************* + ** Bit field macros. + */ + +#define __gcmSTARTBIT(Field) \ + (1 ? Field) + +#define __gcmBITSIZE(Field) \ + (0 ? Field) + +#define __gcmBITMASK(Field) \ +(\ + (1 << __gcmBITSIZE(Field)) - 1 \ +) + +#define gcmGETBITS(Value, Type, Field) \ +(\ + (((Type)(Value)) >> __gcmSTARTBIT(Field)) \ + & \ + __gcmBITMASK(Field) \ +) + +#define gcmSETBITS(Value, Type, Field, NewValue) \ +(\ + (((Type)(Value)) & ~(__gcmBITMASK(Field) << __gcmSTARTBIT(Field))) | \ + ((((Type)(NewValue)) & __gcmBITMASK(Field)) << __gcmSTARTBIT(Field)) \ +) + +/******************************************************************************* + ** + ** gcmISINREGRANGE + ** + ** Verify whether the specified address is in the register range. + ** + ** ARGUMENTS: + ** + ** Address the Address to be verified. + ** Name Name of a register. + */ + +#define gcmISINREGRANGE(Address, Name) \ +(\ + ((Address & (~0U << Name##_LSB)) == (Name##_Address >> 2)) \ +) + +/****************************************************************************** + ******************************* Ceiling Macro ******************************** + ******************************************************************************/ +#define gcmCEIL(x) \ +(\ + ((x) - (gctUINT32)(x)) == 0 ? (gctUINT32)(x) : (gctUINT32)(x) + 1 \ +) + +/****************************************************************************** + ******************************* Min/Max Macros ******************************* + ******************************************************************************/ + +#define gcmMIN(x, y) (((x) <= (y)) ? (x) : (y)) +#define gcmMAX(x, y) (((x) >= (y)) ? (x) : (y)) +#define gcmCLAMP(x, min, max) (((x) < (min)) ? (min) : ((x) > (max)) ? (max) : (x)) +#define gcmABS(x) (((x) < 0) ? -(x) : (x)) +#define gcmNEG(x) (((x) < 0) ? (x) : -(x)) + +/****************************************************************************** + ******************************* Bit Macro ************************************ + ******************************************************************************/ +#define gcmBITSET(x, bit) ((x) | (1 << (bit))) +#define gcmBITCLEAR(x, bit) ((x) & ~(1 << (bit))) +#define gcmBITTEST(x, bit) ((x) & (1 << (bit))) + +/******************************************************************************* + ** + ** gcmPTR2SIZE + ** + ** Convert a pointer to an integer value. + ** + ** ARGUMENTS: + ** + ** p Pointer value. + */ +#define gcmPTR2SIZE(p) ((gctUINTPTR_T)(p)) + +#define gcmPTR2INT32(p) ((gctUINT32)(gctUINTPTR_T)(p)) + +/******************************************************************************* + ** + ** gcmINT2PTR + ** + ** Convert an integer value into a pointer. + ** + ** ARGUMENTS: + ** + ** v Integer value. + */ + +#define gcmINT2PTR(i) ((gctPOINTER)(gctUINTPTR_T)(i)) + +/******************************************************************************* + ** + ** gcmOFFSETOF + ** + ** Compute the byte offset of a field inside a structure. + ** + ** ARGUMENTS: + ** + ** s Structure name. + ** field Field name. + */ +#define gcmOFFSETOF(s, field) (gcmPTR2INT32(&(((struct s *)0)->field))) + +#define __gcmOFFSETOF(type, field) (gcmPTR2INT32(&(((type *)0)->field))) + +/******************************************************************************* + ** + ** gcmCONTAINEROF + ** + ** Get containing structure of a member. + ** + ** ARGUMENTS: + ** + ** Pointer the Pointer of member. + ** Type Structure name. + ** Name Field name. + */ +#define gcmCONTAINEROF(Pointer, Type, Member) \ +(\ + (Type *)((gctUINTPTR_T)(Pointer) - __gcmOFFSETOF(Type, Member)) \ +) + +/******************************************************************************* + ** + ** gcmBSWAP16/32/64 + ** + ** Return a value with all bytes in the 16/32/64 bit argument swapped. + */ +#if !defined(__KERNEL__) && defined(__GNUC__) && \ + (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__ >= 40300) && \ + !defined(__VXWORKS__) +# define gcmBSWAP16(x) __builtin_bswap16(x) +# define gcmBSWAP32(x) __builtin_bswap32(x) +# define gcmBSWAP64(x) __builtin_bswap64(x) +#else +# define gcmBSWAP16(x) ((gctUINT16)(\ + (((gctUINT16)(x) & (gctUINT16)0x00FF) << 8) | \ + (((gctUINT16)(x) & (gctUINT16)0xFF00) >> 8))) + +# define gcmBSWAP32(x) ((gctUINT32)(\ + (((gctUINT32)(x) & (gctUINT32)0x000000FFU) << 24) | \ + (((gctUINT32)(x) & (gctUINT32)0x0000FF00U) << 8) | \ + (((gctUINT32)(x) & (gctUINT32)0x00FF0000U) >> 8) | \ + (((gctUINT32)(x) & (gctUINT32)0xFF000000U) >> 24))) + +# define gcmBSWAP64(x) ((gctUINT64)(\ + (((gctUINT64)(x) & (gctUINT64)0x00000000000000FFULL) << 56) | \ + (((gctUINT64)(x) & (gctUINT64)0x000000000000FF00ULL) << 40) | \ + (((gctUINT64)(x) & (gctUINT64)0x0000000000FF0000ULL) << 24) | \ + (((gctUINT64)(x) & (gctUINT64)0x00000000FF000000ULL) << 8) | \ + (((gctUINT64)(x) & (gctUINT64)0x000000FF00000000ULL) >> 8) | \ + (((gctUINT64)(x) & (gctUINT64)0x0000FF0000000000ULL) >> 24) | \ + (((gctUINT64)(x) & (gctUINT64)0x00FF000000000000ULL) >> 40) | \ + (((gctUINT64)(x) & (gctUINT64)0xFF00000000000000ULL) >> 56))) +#endif + +/******************************************************************************* + ** + ** gcmBSWAP16IN32 + ** + ** Return a value with every 16 bit swapped of a 32 bit data type. + */ +#define gcmBSWAP16IN32(x) ((gctUINT32)(\ + (((gctUINT32)(x) & (gctUINT32)0x000000FFU) << 8) | \ + (((gctUINT32)(x) & (gctUINT32)0x0000FF00U) >> 8) | \ + (((gctUINT32)(x) & (gctUINT32)0x00FF0000U) << 8) | \ + (((gctUINT32)(x) & (gctUINT32)0xFF000000U) >> 8))) + +/******************************************************************************* + ** + ** gcmBSWAP16IN32EX + ** + ** Return a value with whole 16 bit swapped of a 32 bit data type. + */ +#define gcmBSWAP16IN32EX(x) ((gctUINT32)(\ + (((gctUINT32)(x) & (gctUINT32)0x0000FFFFU) << 16) | \ + (((gctUINT32)(x) & (gctUINT32)0xFFFF0000U) >> 16))) + +/******************************************************************************* + ** + ** gcmBSWAP32IN64 + ** + ** Return a value with whole 32 bit swapped of a 64 bit data type. + */ +#define gcmBSWAP32IN64(x) ((gctUINT64)(\ + (((gctUINT64)(x) & (gctUINT64)0x00000000FFFFFFFFULL) << 32) | \ + (((gctUINT64)(x) & (gctUINT64)0xFFFFFFFF00000000ULL) >> 32))) + +/****************************************************************************** + **** Database ****************************************************************/ + +typedef struct _gcsDATABASE_COUNTERS { + /* Number of currently allocated bytes. */ + gctUINT64 bytes; + + /* Maximum number of bytes allocated (memory footprint). */ + gctUINT64 maxBytes; + + /* Total number of bytes allocated. */ + gctUINT64 totalBytes; + + /* The numbers of times video memory was allocated. */ + gctUINT32 allocCount; + + /* The numbers of times video memory was freed. */ + gctUINT32 freeCount; +} gcsDATABASE_COUNTERS; + +typedef struct _gcuDATABASE_INFO { + /* Counters. */ + gcsDATABASE_COUNTERS counters; + + /* Time value. */ + gctUINT64 time; +} gcuDATABASE_INFO; + +/****************************************************************************** + **** Frame database **********************************************************/ + +/* gcsHAL_FRAME_INFO */ +typedef struct _gcsHAL_FRAME_INFO { + /* Current timer tick. */ + OUT gctUINT64 ticks; + + /* Bandwidth counters. */ + OUT gctUINT readBytes8[8]; + OUT gctUINT writeBytes8[8]; + + /* Counters. */ + OUT gctUINT cycles[8]; + OUT gctUINT idleCycles[8]; + OUT gctUINT mcCycles[8]; + OUT gctUINT readRequests[8]; + OUT gctUINT writeRequests[8]; + + /* 3D counters. */ + OUT gctUINT vertexCount; + OUT gctUINT primitiveCount; + OUT gctUINT rejectedPrimitives; + OUT gctUINT culledPrimitives; + OUT gctUINT clippedPrimitives; + OUT gctUINT outPrimitives; + OUT gctUINT inPrimitives; + OUT gctUINT culledQuadCount; + OUT gctUINT totalQuadCount; + OUT gctUINT quadCount; + OUT gctUINT totalPixelCount; + + /* PE counters. */ + OUT gctUINT colorKilled[8]; + OUT gctUINT colorDrawn[8]; + OUT gctUINT depthKilled[8]; + OUT gctUINT depthDrawn[8]; + + /* Shader counters. */ + OUT gctUINT shaderCycles; + OUT gctUINT vsInstructionCount; + OUT gctUINT vsTextureCount; + OUT gctUINT psInstructionCount; + OUT gctUINT psTextureCount; + + /* Texture counters. */ + OUT gctUINT bilinearRequests; + OUT gctUINT trilinearRequests; + OUT gctUINT txBytes8; + OUT gctUINT txHitCount; + OUT gctUINT txMissCount; +} gcsHAL_FRAME_INFO; + +typedef struct _gckLINKDATA *gckLINKDATA; +struct _gckLINKDATA { + gctADDRESS start; + gctADDRESS end; + gctUINT32 pid; + gctUINT32 linkLow; + gctUINT32 linkHigh; +}; + +typedef struct _gckADDRESSDATA *gckADDRESSDATA; +struct _gckADDRESSDATA { + gctADDRESS start; + gctADDRESS end; +}; + +typedef union _gcuQUEUEDATA { + struct _gckLINKDATA linkData; + + struct _gckADDRESSDATA addressData; +} gcuQUEUEDATA; + +typedef struct _gckQUEUE *gckQUEUE; +struct _gckQUEUE { + gcuQUEUEDATA *datas; + gctUINT32 rear; + gctUINT32 front; + gctUINT32 count; + gctUINT32 size; +}; + +typedef struct _gcsLISTHEAD *gcsLISTHEAD_PTR; +typedef struct _gcsLISTHEAD { + gcsLISTHEAD_PTR prev; + gcsLISTHEAD_PTR next; +} gcsLISTHEAD; + +/* + * 'Patch' here means a mechanism to let kernel side modify user space reserved + * command buffer location, or something the like, during the command buffer + * commit. + * + * Reasons of using 'patch': + * 1. Some resources/states are managed globally only in kernel side, such as + * MCFE semaphore, etc. + * 2. For the sake of security or optimization, like video memory address. + * + * Patches are arranged in arrays, each array has the same type. The 'patchArray' + * in 'gcsHAL_PATCH_LIST' pointers the concrete patch item array. + * + * NOTICE: + * Be aware of the order and values! Tables in gc_hal_user_buffer.c and + * gc_hal_kernel_command.c depend on this. + */ +/* The patch array. */ +typedef struct _gcsHAL_PATCH_LIST { + /* Patch type. */ + gctUINT32 type; + + /* Patch item count. */ + gctUINT32 count; + + /* + * Pointer to the patch items. + * + * gcsHAL_PATCH_VIDMEM_ADDRESS * patchArray; + * gcsHAL_PATCH_MCFE_SEMAPHORE * patchArray; + * gcsHAL_PATCH_VIDMEM_TIMESTAMP * patchArray; + * ... + */ + gctUINT64 patchArray; + + /* struct _gcsHAL_PATCH_LIST * next; */ + gctUINT64 next; +} gcsHAL_PATCH_LIST; + +/* + * Patch a GPU address in the place (gcvHAL_PATCH_VIDMEM_ADDRESS). + * Size of a GPU address is always 32 bits. + */ +typedef struct _gcsHAL_PATCH_VIDMEM_ADDRESS { + /* Patch location in the command buffer. */ + gctUINT32 location; + + /* Handle of the video memory node. */ + gctUINT32 node; + + /* Address offset in the video memory node. */ + gctUINT32 offset; +} gcsHAL_PATCH_VIDMEM_ADDRESS; + +/* + * Patch a MCFE semaphore command in the place (gcvHAL_PATCH_MCFE_SEMAPHORE). + * Size of the semaphore command is fixed at _64_ bits! + */ +typedef struct _gcsHAL_PATCH_MCFE_SEMAPHORE { + /* Patch location in the command buffer. */ + gctUINT32 location; + + /* semaphore direction: 1 = Send, 0 = Wait. */ + gctUINT32 sendSema; + + /* Handle of the semaphore. */ + gctUINT32 semaHandle; +} gcsHAL_PATCH_MCFE_SEMAPHORE; + +/* + * Patch timestamp of given video memory node (gcvHAL_PATCH_VIDMEM_TIMESTAMP). + * Pure software-wise, not command relevant. + */ +typedef struct _gcsHAL_PATCH_VIDMEM_TIMESTAMP { + /* Handle of a video memory node. */ + gctUINT32 handle; + + gctUINT32 flag; +} gcsHAL_PATCH_VIDMEM_TIMESTAMP; + +/* Put together patch list handling variables. */ +typedef struct _gcsPATCH_LIST_VARIABLE { + /* gcvHAL_PATCH_VIDMEM_TIMESTAMP. */ + gctUINT64 maxAsyncTimestamp; + + /* gcvHAL_PATCH_MCFE_SEMAPHORE. */ + gctBOOL semaUsed; +} gcsPATCH_LIST_VARIABLE; + +/* + * gcvFEATURE_DATABASE_DATE_MASK + * + * Mask used to control which bits of chip date will be used to + * query feature database, ignore release date for fpga and emulator. + */ +#if (gcdFPGA_BUILD || defined(EMULATOR)) +# define gcvFEATURE_DATABASE_DATE_MASK (0U) +#else +# define gcvFEATURE_DATABASE_DATE_MASK (~0U) +#endif + +#if defined(__GNUC__) +#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \ + (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +# define gcdENDIAN_BIG 1 +# else +# define gcdENDIAN_BIG 0 +# endif +#else +# define gcdENDIAN_BIG 0 +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_types_shared_h_ */ + + diff --git a/unified-tina/inc/HAL/shared/gc_hal_vg_shared.h b/unified-tina/inc/HAL/shared/gc_hal_vg_shared.h new file mode 100644 index 0000000..85fba5e --- /dev/null +++ b/unified-tina/inc/HAL/shared/gc_hal_vg_shared.h @@ -0,0 +1,180 @@ +/**************************************************************************** +* +* Copyright (c) 2005 - 2023 by Vivante Corp. All rights reserved. +* +* The material in this file is confidential and contains trade secrets +* of Vivante Corporation. This is proprietary information owned by +* Vivante Corporation. No part of this work may be disclosed, +* reproduced, copied, transmitted, or used in any way for any purpose, +* without the express written permission of Vivante Corporation. +* +*****************************************************************************/ + + +#ifndef __gc_hal_shared_vg_h_ +#define __gc_hal_shared_vg_h_ + +#if defined(__QNXNTO__) +# include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* Command buffer header. */ +typedef struct _gcsCMDBUFFER *gcsCMDBUFFER_PTR; +typedef struct _gcsCMDBUFFER { + /* Pointer to the completion signal. */ + gcsCOMPLETION_SIGNAL_PTR completion; + + /* + * The user sets this to the node of the container buffer whitin which + * this particular command buffer resides. The kernel sets this to the + * node of the internally allocated buffer. + */ + gcuVIDMEM_NODE_PTR node; + + /* Command buffer hardware address. */ + gctUINT32 address; + + /* The offset of the buffer from the beginning of the header. */ + gctUINT32 bufferOffset; + + /* + * Size of the area allocated for the data portion of this particular + * command buffer (headers and tail reserves are excluded). + */ + gctUINT32 size; + + /* + * Offset into the buffer [0..size]; reflects exactly how much data has + * been put into the command buffer. + */ + gctUINT offset; + + /* + * The number of command units in the buffer for the hardware to + * execute. + */ + gctUINT32 dataCount; + + /* + * MANAGED BY : user HAL (gcoBUFFER object). + * USED BY : user HAL (gcoBUFFER object). + * Points to the immediate next allocated command buffer. + */ + gcsCMDBUFFER_PTR nextAllocated; + + /* + * MANAGED BY : user layers (HAL and drivers). + * USED BY : kernel HAL (gcoBUFFER object). + * Points to the next subbuffer if any. A family of subbuffers are chained + * together and are meant to be executed inseparably as a unit. Meaning + * that context switching cannot occur while a chain of subbuffers is being + * executed. + */ + gcsCMDBUFFER_PTR nextSubBuffer; +} gcsCMDBUFFER; + +/* Command queue element. */ +typedef struct _gcsVGCMDQUEUE { + /* Pointer to the command buffer header. */ + gcsCMDBUFFER_PTR commandBuffer; + + /* Dynamic vs. static command buffer state. */ + gctBOOL dynamic; +} gcsVGCMDQUEUE; + +/* Context map entry. */ +typedef struct _gcsVGCONTEXT_MAP { + /* State index. */ + gctUINT32 index; + + /* New state value. */ + gctUINT32 data; + + /* Points to the next entry in the mod list. */ + gcsVGCONTEXT_MAP_PTR next; +} gcsVGCONTEXT_MAP; + +/* gcsVGCONTEXT structure that holds the current context. */ +typedef struct _gcsVGCONTEXT { + /* Context ID. */ + gctUINT64 id; + + /* State caching ebable flag. */ + gctBOOL stateCachingEnabled; + + /* Current pipe. */ + gctUINT32 currentPipe; + + /* State map/mod buffer. */ + gctUINT32 mapFirst; + gctUINT32 mapLast; + gcsVGCONTEXT_MAP_PTR mapContainer; + gcsVGCONTEXT_MAP_PTR mapPrev; + gcsVGCONTEXT_MAP_PTR mapCurr; + gcsVGCONTEXT_MAP_PTR firstPrevMap; + gcsVGCONTEXT_MAP_PTR firstCurrMap; + + /* Main context buffer. */ + gcsCMDBUFFER_PTR header; + gctUINT32_PTR buffer; + + /* Completion signal. */ + gctHANDLE process; + gctSIGNAL signal; + +#if defined(__QNXNTO__) + gctSIGNAL userSignal; + struct sigevent event; + gctINT32 rcvid; +#endif +} gcsVGCONTEXT; + +/* User space task header. */ +typedef struct _gcsTASK *gcsTASK_PTR; +typedef struct _gcsTASK { + /* Pointer to the next task for the same interrupt in user space. */ + gcsTASK_PTR next; + + /* Size of the task data that immediately follows the structure. */ + gctUINT size; + + /* Task data starts here. */ + /* ... */ +} gcsTASK; + +/* User space task master table entry. */ +typedef struct _gcsTASK_MASTER_ENTRY *gcsTASK_MASTER_ENTRY_PTR; +typedef struct _gcsTASK_MASTER_ENTRY { + /* Pointers to the head and to the tail of the task chain. */ + gcsTASK_PTR head; + gcsTASK_PTR tail; +} gcsTASK_MASTER_ENTRY; + +/* User space task master table entry. */ +typedef struct _gcsTASK_MASTER_TABLE { + /* Table with one entry per block. */ + gcsTASK_MASTER_ENTRY table[gcvBLOCK_COUNT]; + + /* The total number of tasks sckeduled. */ + gctUINT count; + + /* The total size of event data in bytes. */ + gctUINT size; + +#if defined(__QNXNTO__) + struct sigevent event; + gctINT32 rcvid; +#endif +} gcsTASK_MASTER_TABLE; + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* __gc_hal_shared_h_ */ + + diff --git a/unified-tina/inc/VX/viv_nn_compatibility.h b/unified-tina/inc/VX/viv_nn_compatibility.h new file mode 100644 index 0000000..155c4fa --- /dev/null +++ b/unified-tina/inc/VX/viv_nn_compatibility.h @@ -0,0 +1,194 @@ +#ifndef _VIV_NN_COMPATIBILITY_H_ +#define _VIV_NN_COMPATIBILITY_H_ + +#include +#include + +/* keep the backward compatibility with spec 1.1 for standard nn kernels */ +#define VX_KERNEL_NN_SOFTMAX_LAYER VX_KERNEL_SOFTMAX_LAYER +#define VX_KERNEL_NN_NORMALIZATION_LAYER VX_KERNEL_NORMALIZATION_LAYER +#define VX_KERNEL_NN_POOLING_LAYER VX_KERNEL_POOLING_LAYER +#define VX_KERNEL_NN_FULLY_CONNECTED_LAYER VX_KERNEL_FULLY_CONNECTED_LAYER +#define VX_KERNEL_NN_ACTIVATION_LAYER VX_KERNEL_ACTIVATION_LAYER +#define VX_KERNEL_NN_ROIPOOL VX_KERNEL_ROI_POOLING_LAYER +#define VX_KERNEL_NN_CONVOLUTION_LAYER VX_KERNEL_CONVOLUTION_LAYER +#define VX_KERNEL_NN_DECONVOLUTION_LAYER VX_KERNEL_DECONVOLUTION_LAYER + +/* keep the backward compatibility with spec 1.1 for vx_tensor_attribute_e */ +#define VX_TENSOR_NUM_OF_DIMS VX_TENSOR_NUMBER_OF_DIMS +#define VX_TENSOR_FIXED_POINT_POS VX_TENSOR_FIXED_POINT_POSITION + +/* keep the backward compatibility with spec 1.1 from vx_convolutional_network_rounding_type_e to vx_nn_rounding_type_e */ +typedef enum vx_nn_rounding_type_e vx_convolutional_network_rounding_type_e; +#define VX_CONVOLUTIONAL_NETWORK_DS_SIZE_ROUNDING_FLOOR VX_NN_DS_SIZE_ROUNDING_FLOOR +#define VX_CONVOLUTIONAL_NETWORK_DS_SIZE_ROUNDING_CEILING VX_NN_DS_SIZE_ROUNDING_CEILING + +/* keep the backward compatibility with spec 1.1 from vx_convolutional_network_pooling_type_e to vx_nn_pooling_type_e */ +typedef enum vx_nn_pooling_type_e vx_convolutional_network_pooling_type_e; +#define VX_CONVOLUTIONAL_NETWORK_POOLING_MAX VX_NN_POOLING_MAX +#define VX_CONVOLUTIONAL_NETWORK_POOLING_AVG VX_NN_POOLING_AVG +#define VX_CONVOLUTIONAL_NETWORK_POOLING_L2 VX_NN_POOLING_L2 +#define VX_CONVOLUTIONAL_NETWORK_POOLING_AVG_ANDROID VX_NN_POOLING_AVG_ANDROID + +/* keep the backward compatibility with spec 1.1 from vx_convolutional_network_norm_type_e to vx_nn_norm_type_e */ +typedef enum vx_nn_norm_type_e vx_convolutional_network_norm_type_e; +#define VX_CONVOLUTIONAL_NETWORK_NORM_SAME_MAP VX_NN_NORMALIZATION_SAME_MAP +#define VX_CONVOLUTIONAL_NETWORK_NORM_ACROSS_MAPS VX_NN_NORMALIZATION_ACROSS_MAPS + +/* keep the backward compatibility with spec 1.1 from vx_convolutional_network_layer_type_e to vx_nn_layer_type_e */ +typedef enum vx_nn_layer_type_e vx_convolutional_network_layer_type_e; +#define VX_CONVOLUTIONAL_NETWORK_CONVOLUTION_LAYER VX_NN_CONVOLUTION_LAYER +#define VX_CONVOLUTIONAL_NETWORK_FULLYCONNECTED_LAYER VX_NN_FULLYCONNECTED_LAYER + +/* keep the backward compatibility with spec 1.1 from vx_convolutional_network_activation_func_e to vx_nn_activation_function_e */ +typedef enum vx_nn_activation_function_e vx_convolutional_network_activation_func_e; +#define VX_CONVOLUTIONAL_NETWORK_ACTIVATION_LOGISTIC VX_NN_ACTIVATION_LOGISTIC +#define VX_CONVOLUTIONAL_NETWORK_ACTIVATION_HYPERBOLIC_TAN VX_NN_ACTIVATION_HYPERBOLIC_TAN +#define VX_CONVOLUTIONAL_NETWORK_ACTIVATION_RELU VX_NN_ACTIVATION_RELU +#define VX_CONVOLUTIONAL_NETWORK_ACTIVATION_BRELU VX_NN_ACTIVATION_BRELU +#define VX_CONVOLUTIONAL_NETWORK_ACTIVATION_SOFTRELU VX_NN_ACTIVATION_SOFTRELU +#define VX_CONVOLUTIONAL_NETWORK_ACTIVATION_ABS VX_NN_ACTIVATION_ABS +#define VX_CONVOLUTIONAL_NETWORK_ACTIVATION_SQUARE VX_NN_ACTIVATION_SQUARE +#define VX_CONVOLUTIONAL_NETWORK_ACTIVATION_SQRT VX_NN_ACTIVATION_SQRT +#define VX_CONVOLUTIONAL_NETWORK_ACTIVATION_LINEAR VX_NN_ACTIVATION_LINEAR +#define VX_CONVOLUTIONAL_NETWORK_ACTIVATION_LEAKYRELU VX_NN_ACTIVATION_LEAKYRELU +#define VX_CONVOLUTIONAL_NETWORK_ACTIVATION_RELU6 VX_NN_ACTIVATION_RELU6 +#define VX_CONVOLUTIONAL_NETWORK_ACTIVATION_RELU1 VX_NN_ACTIVATION_RELU1 +#define VX_CONVOLUTIONAL_NETWORK_ACTIVATION_RSQRT VX_NN_ACTIVATION_RSQRT +#define VX_CONVOLUTIONAL_NETWORK_ACTIVATION_LEAKYRELU_MAX_POOLING VX_NN_ACTIVATION_LEAKYRELU_MAX_POOLING +#define VX_CONVOLUTIONAL_NETWORK_ACTIVATION_NONE VX_NN_ACTIVATION_NONE + +#ifdef __cplusplus +extern "C" { +#endif + +/* keep the backward compatibility with spec 1.1 for vxCreateTensor */ +VX_API_ENTRY vx_tensor VX_API_CALL +vxCreateTensor_11( + vx_context context, + vx_uint32 num_of_dims, + vx_uint32 *sizes, + vx_enum data_format, + vx_int8 fixed_point_pos + ); +#if !VX_VA40_EXT_SUPPORT +#define vxCreateTensor vxCreateTensor_11 +#endif + +/* keep the backward compatibility with spec 1.1 for vxCreateVirtualTensor */ +VX_API_ENTRY vx_tensor VX_API_CALL +vxCreateVirtualTensor_11( + vx_graph graph, + vx_uint32 num_of_dims, + vx_uint32 *sizes, + vx_enum data_format, + vx_int8 fixed_point_pos +); + +#if !VX_VA40_EXT_SUPPORT +#define vxCreateVirtualTensor vxCreateVirtualTensor_11 +#endif + +/* keep the backward compatibility with spec 1.1 for vxCreateTensorFromView */ +VX_API_ENTRY vx_tensor VX_API_CALL +vxCreateTensorFromView_11( + vx_tensor tensor, + vx_tensor_view view +); +#define vxCreateTensorFromView vxCreateTensorFromView_11 + +/* keep the backward compatibility with spec 1.1 for vxCopyTensorPatch */ +VX_API_ENTRY vx_status VX_API_CALL +vxCopyTensorPatch_11( + vx_tensor tensor, + vx_tensor_view view, + vx_tensor_addressing user_addr, + void *user_ptr, + vx_enum usage, + vx_enum user_mem_type +); +#define vxCopyTensorPatch vxCopyTensorPatch_11 + +/* keep the backward compatibility with spec 1.1 for vxCreateImageObjectArrayFromTensor */ +VX_API_ENTRY vx_object_array VX_API_CALL +vxCreateImageObjectArrayFromTensor_11( + vx_tensor tensor, + vx_rectangle_t rect, + vx_uint32 array_size, + vx_uint32 stride, + vx_df_image image_format +); +#define vxCreateImageObjectArrayFromTensor vxCreateImageObjectArrayFromTensor_11 + +/* keep the backward compatibility with spec 1.1 for vxFullyConnectedLayer */ +VX_API_ENTRY vx_node VX_API_CALL +vxFullyConnectedLayer_11( + vx_graph graph, + vx_tensor inputs, + vx_tensor weights, + vx_tensor biases, + vx_uint32 pad, + vx_uint8 accumulator_bits, + vx_enum overflow_policy, + vx_enum rounding_policy, + vx_enum down_scale_size_rounding, + vx_tensor outputs +); +#define vxFullyConnectedLayer vxFullyConnectedLayer_11 + +/* keep the backward compatibility with spec 1.1 for vxActivationLayer */ +VX_API_ENTRY vx_node VX_API_CALL +vxActivationLayer_11( + vx_graph graph, + vx_tensor inputs, + vx_enum func, + vx_int32 a, + vx_int32 b, + vx_tensor outputs +); +#define vxActivationLayer vxActivationLayer_11 + +/* keep the backward compatibility with spec 1.1 for vxPoolingLayer */ +VX_API_ENTRY vx_node VX_API_CALL +vxPoolingLayer_11( + vx_graph graph, + vx_tensor inputs, + vx_enum pool_type, + vx_uint32 pool_size_x, + vx_uint32 pool_size_y, + vx_uint32 pool_pad_x, + vx_uint32 pool_pad_y, + vx_enum rounding, + vx_tensor outputs +); +#define vxPoolingLayer vxPoolingLayer_11 + +/* keep the backward compatibility with spec 1.1 for vxNormalizationLayer */ +VX_API_ENTRY vx_node VX_API_CALL +vxNormalizationLayer_11( + vx_graph graph, + vx_tensor inputs, + vx_enum type, + vx_uint32 norm_size, + vx_float32 alpha, + vx_float32 beta, + vx_tensor outputs +); +#define vxNormalizationLayer vxNormalizationLayer_11 + +/* keep the backward compatibility with spec 1.1 for vxTensorTransposeNode */ +VX_API_ENTRY vx_node VX_API_CALL +vxTensorTransposeNode_11( + vx_graph graph, + vx_tensor inputs, + vx_tensor outputs, + vx_uint32 dim1, + vx_uint32 dim2 +); +#define vxTensorTransposeNode vxTensorTransposeNode_11 + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/unified-tina/inc/VX/vx.h b/unified-tina/inc/VX/vx.h new file mode 100644 index 0000000..27181ce --- /dev/null +++ b/unified-tina/inc/VX/vx.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2012-2020 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * NOTE: Some safety-critical environments may enforce software development + * guidelines (for example MISRA C:2012) to facilitate code quality, + * safety, security, portability and reliability. In order to meet + * such guidelines, developers may modify OpenVX standard header files + * without deviating from the OpenVX specification. + */ + +#ifndef _OPENVX_H_ +#define _OPENVX_H_ + +/*! + * \file + * \brief The top level OpenVX Header. + */ + +/*! \brief Defines the length of the implementation name string, including the trailing zero. + * \ingroup group_context + */ +#define VX_MAX_IMPLEMENTATION_NAME (64) + +/*! \brief Defines the length of a kernel name string to be added to OpenVX, including the trailing zero. + * \ingroup group_kernel + */ +#define VX_MAX_KERNEL_NAME (256) + +/*! \brief Defines the length of a message buffer to copy from the log, including the trailing zero. + * \ingroup group_basic_features + */ +#define VX_MAX_LOG_MESSAGE_LEN (1024) + +/*! \brief Defines the length of the reference name string, including the trailing zero. + * \ingroup group_reference + * \see vxSetReferenceName + */ +#define VX_MAX_REFERENCE_NAME (64) + +#include +#include +#include +#include +#include + +/*! \brief Defines the major version number macro. + * \ingroup group_basic_features + */ +#define VX_VERSION_MAJOR(x) ((x & 0xFFU) << 8) + +/*! \brief Defines the minor version number macro. + * \ingroup group_basic_features + */ +#define VX_VERSION_MINOR(x) ((x & 0xFFU) << 0) + +/*! \brief Defines the predefined version number for 1.0. + * \ingroup group_basic_features + */ +#define VX_VERSION_1_0 (VX_VERSION_MAJOR(1) | VX_VERSION_MINOR(0)) + +/*! \brief Defines the predefined version number for 1.1. + * \ingroup group_basic_features + */ +#define VX_VERSION_1_1 (VX_VERSION_MAJOR(1) | VX_VERSION_MINOR(1)) + +/*! \brief Defines the predefined version number for 1.2. + * \ingroup group_basic_features + */ +#define VX_VERSION_1_2 (VX_VERSION_MAJOR(1) | VX_VERSION_MINOR(2)) + +/*! \brief Defines the predefined version number for 1.3. + * \ingroup group_basic_features + */ +#define VX_VERSION_1_3 (VX_VERSION_MAJOR(1) | VX_VERSION_MINOR(3)) + +/*! \brief Defines the OpenVX Version Number. + * \ingroup group_basic_features + */ +#ifndef VX_VERSION +#define VX_VERSION (VX_VERSION_1_3) +#endif + +#endif diff --git a/unified-tina/inc/VX/vx_api.h b/unified-tina/inc/VX/vx_api.h new file mode 100644 index 0000000..4495742 --- /dev/null +++ b/unified-tina/inc/VX/vx_api.h @@ -0,0 +1,3480 @@ +/* + + * Copyright (c) 2012-2017 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _OPENVX_API_H_ +#define _OPENVX_API_H_ + +/*! + * \file + * \brief The API definition for OpenVX. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/*============================================================================== + CONTEXT + =============================================================================*/ + +/*! \brief Creates a \ref vx_context. + * \details This creates a top-level object context for OpenVX. + * \note This is required to do anything else. + * \returns The reference to the implementation context \ref vx_context. Any possible errors + * preventing a successful creation should be checked using \ref vxGetStatus. + * \ingroup group_context + * \post \ref vxReleaseContext + */ +VX_API_ENTRY vx_context VX_API_CALL vxCreateContext(void); + +/*! \brief Releases the OpenVX object context. + * \details All reference counted objects are garbage-collected by the return of this call. + * No calls are possible using the parameter context after the context has been + * released until a new reference from \ref vxCreateContext is returned. + * All outstanding references to OpenVX objects from this context are invalid + * after this call. + * \param [in] context The pointer to the reference to the context. + * \post After returning from this function the reference is zeroed. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE context is not a valid \ref vx_context reference. + * \ingroup group_context + * \pre \ref vxCreateContext + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseContext(vx_context *context); + +/*! \brief Retrieves the context from any reference from within a context. + * \param [in] reference The reference from which to extract the context. + * \ingroup group_context + * \return The overall context that created the particular + * reference. Any possible errors preventing a successful completion of this function + * should be checked using \ref vxGetStatus. + */ +VX_API_ENTRY vx_context VX_API_CALL vxGetContext(vx_reference reference); + +/*! \brief Queries the context for some specific information. + * \param [in] context The reference to the context. + * \param [in] attribute The attribute to query. Use a \ref vx_context_attribute_e. + * \param [out] ptr The location at which to store the resulting value. + * \param [in] size The size in bytes of the container to which \a ptr points. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE context is not a valid \ref vx_context reference. + * \retval VX_ERROR_INVALID_PARAMETERS If any of the other parameters are incorrect. + * \retval VX_ERROR_NOT_SUPPORTED If the attribute is not supported on this implementation. + * \ingroup group_context + */ +VX_API_ENTRY vx_status VX_API_CALL vxQueryContext(vx_context context, vx_enum attribute, void *ptr, vx_size size); + +/*! \brief Sets an attribute on the context. + * \param [in] context The handle to the overall context. + * \param [in] attribute The attribute to set from \ref vx_context_attribute_e. + * \param [in] ptr The pointer to the data to which to set the attribute. + * \param [in] size The size in bytes of the data to which \a ptr points. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE context is not a valid \ref vx_context reference. + * \retval VX_ERROR_INVALID_PARAMETERS If any of the other parameters are incorrect. + * \retval VX_ERROR_NOT_SUPPORTED If the attribute is not settable. + * \ingroup group_context + */ +VX_API_ENTRY vx_status VX_API_CALL vxSetContextAttribute(vx_context context, vx_enum attribute, const void *ptr, vx_size size); + +/*! \brief Provides a generic API to give platform-specific hints to the implementation. + * \param [in] reference The reference to the object to hint at. + * This could be \ref vx_context, \ref vx_graph, \ref vx_node, \ref vx_image, \ref vx_array, or any other reference. + * \param [in] hint A \ref vx_hint_e \a hint to give to a \ref vx_context. This is a platform-specific optimization or implementation mechanism. + * \param [in] data Optional vendor specific data. + * \param [in] data_size Size of the data structure \p data. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE reference is not a valid \ref vx_reference reference. + * \retval VX_ERROR_NOT_SUPPORTED If the hint is not supported. + * \ingroup group_hint + */ +VX_API_ENTRY vx_status VX_API_CALL vxHint(vx_reference reference, vx_enum hint, const void* data, vx_size data_size); + +/*! \brief Provides a generic API to give platform-specific directives to the implementations. + * \param [in] reference The reference to the object to set the directive on. + * This could be \ref vx_context, \ref vx_graph, \ref vx_node, \ref vx_image, \ref vx_array, or any other reference. + * \param [in] directive The directive to set. See \ref vx_directive_e. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE reference is not a valid \ref vx_reference reference. + * \retval VX_ERROR_NOT_SUPPORTED If the directive is not supported. + * \note The performance counter directives are only available for the reference \ref vx_context. + * Error VX_ERROR_NOT_SUPPORTED is returned when used with any other reference. + * \ingroup group_directive + */ +VX_API_ENTRY vx_status VX_API_CALL vxDirective(vx_reference reference, vx_enum directive); + +/*! \brief Provides a generic API to return status values from Object constructors if they + * fail. + * \note Users do not need to strictly check every object creator as the errors + * should properly propagate and be detected during verification time or run-time. + * \code + * vx_image img = vxCreateImage(context, 639, 480, VX_DF_IMAGE_UYVY); + * vx_status status = vxGetStatus((vx_reference)img); + * // status == VX_ERROR_INVALID_DIMENSIONS + * vxReleaseImage(&img); + * \endcode + * \pre Appropriate Object Creator function. + * \post Appropriate Object Release function. + * \param [in] reference The reference to check for construction errors. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval * Some error occurred, please check enumeration list and constructor. + * \ingroup group_basic_features + */ +VX_API_ENTRY vx_status VX_API_CALL vxGetStatus(vx_reference reference); + +/*! + * \brief Registers user-defined structures to the context. + * \param [in] context The reference to the implementation context. + * \param [in] size The size of user struct in bytes. + * \return A \ref vx_enum value that is a type given to the User + * to refer to their custom structure when declaring a \ref vx_array + * of that structure. + * \retval VX_TYPE_INVALID If the namespace of types has been exhausted. + * \note This call should only be used once within the lifetime of a context for + * a specific structure. + * \ingroup group_adv_array + */ +VX_API_ENTRY vx_enum VX_API_CALL vxRegisterUserStruct(vx_context context, vx_size size); + +/*! + * \brief Registers user-defined structures to the context, and associates a name to it. + * \param [in] context The reference to the implementation context. + * \param [in] size The size of user struct in bytes. + * \param [in] *type_name Pointer to the '\0' terminated string that identifies the + * user struct type. The string is copied by the function so + * that it stays the property of the caller. NULL means that + * the user struct is not named. The length of the string + * shall be lower than VX_MAX_REFERENCE_NAME bytes. + * \return A \ref vx_enum value that is a type given to the User + * to refer to their custom structure when declaring a \ref vx_array + * of that structure. + * \retval VX_TYPE_INVALID If the namespace of types has been exhausted. + * \note This call should only be used once within the lifetime of a context for + * a specific structure. + * \ingroup group_adv_array + */ +VX_API_ENTRY vx_enum VX_API_CALL vxRegisterUserStructWithName(vx_context context, vx_size size, const vx_char* type_name); + +/*! + * \brief Returns the name of the user-defined structure associated with the enumeration given. + * \param [in] context The reference to the implementation context. + * \param [in] type_name The enumeration value of the user struct + * \param [out] name_size Name of the user struct + * \param [in] name_size The size of allocated buffer pointed to by type_name + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS user_struct_type was valid, and name was found and returned + * \retval VX_ERROR_INVALID_PARAMETERS user_struct_type was not a valid user struct enumeration. + * \retval VX_ERROR_NO_MEMORY name_size is too small to hold the name of the user struct type. + * \retval VX_FAILURE user_struct_type does not have an associated type name. + * \pre \ref vxRegisterUserStructWithName should be called for this user struct. + * \ingroup group_adv_array + */ +VX_API_ENTRY vx_status VX_API_CALL vxGetUserStructNameByEnum(vx_context context, vx_enum user_struct_type, vx_char* type_name, vx_size name_size); + +/*! + * \brief Returns the enum of the user-defined structure associated with the name given + * \param [in] context The reference to the implementation context. + * \param [in] type_name Pointer to the '\0' terminated string that identifies the user + * struct type. The length of the string shall be lower than VX_MAX_REFERENCE_NAME bytes. + * \param [out] user_struct_type The enumeration value of the user struct + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS type_name was valid, and enumeration was found and returned + * \retval VX_FAILURE type_name does not match any user struct enumeration. +* \pre \ref vxRegisterUserStructWithName should be called for this user struct. + * \ingroup group_adv_array + */ +VX_API_ENTRY vx_status VX_API_CALL vxGetUserStructEnumByName(vx_context context, const vx_char* type_name, vx_enum *user_struct_type); + +/*! + * \brief Allocates and registers user-defined kernel enumeration to a context. + * The allocated enumeration is from available pool of 4096 enumerations reserved + * for dynamic allocation from VX_KERNEL_BASE(VX_ID_USER,0). + * \param [in] context The reference to the implementation context. + * \param [out] pKernelEnumId pointer to return \ref vx_enum for user-defined kernel. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE If the context is not a valid \ref vx_context reference. + * \retval VX_ERROR_NO_RESOURCES The enumerations has been exhausted. + * \ingroup group_user_kernels + */ +VX_API_ENTRY vx_status VX_API_CALL vxAllocateUserKernelId(vx_context context, vx_enum * pKernelEnumId); + +/*! + * \brief Allocates and registers user-defined kernel library ID to a context. + * + * The allocated library ID is from available pool of library IDs (1..255) + * reserved for dynamic allocation. The returned libraryId can be used by + * user-kernel library developer to specify individual kernel enum IDs in + * a header file, shown below: + * \code + * #define MY_KERNEL_ID1(libraryId) (VX_KERNEL_BASE(VX_ID_USER,libraryId) + 0); + * #define MY_KERNEL_ID2(libraryId) (VX_KERNEL_BASE(VX_ID_USER,libraryId) + 1); + * #define MY_KERNEL_ID3(libraryId) (VX_KERNEL_BASE(VX_ID_USER,libraryId) + 2); + * \endcode + * \param [in] context The reference to the implementation context. + * \param [out] pLibraryId pointer to \ref vx_enum for user-kernel libraryId. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_NO_RESOURCES The enumerations has been exhausted. + * \ingroup group_user_kernels + */ +VX_API_ENTRY vx_status VX_API_CALL vxAllocateUserKernelLibraryId(vx_context context, vx_enum * pLibraryId); + +/*! \brief Sets the default target of the immediate mode. Upon successful execution of this + * function any future execution of immediate mode function is attempted on the new default + * target of the context. + * \param [in] context The reference to the implementation context. + * \param [in] target_enum The default immediate mode target enum to be set + * to the \ref vx_context object. Use a \ref vx_target_e. + * \param [in] target_string The target name ASCII string. This contains a valid value + * when target_enum is set to \ref VX_TARGET_STRING, otherwise it is ignored. + * \ingroup group_context + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Default target set; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE If the context is not a valid \ref vx_context reference. + * \retval VX_ERROR_NOT_SUPPORTED If the specified target is not supported in this context. + */ +VX_API_ENTRY vx_status VX_API_CALL vxSetImmediateModeTarget(vx_context context, vx_enum target_enum, const char* target_string); + +/*============================================================================== + IMAGE + =============================================================================*/ + +/*! \brief Creates an opaque reference to an image buffer. + * \details Not guaranteed to exist until the \ref vx_graph containing it has been verified. + * \param [in] context The reference to the implementation context. + * \param [in] width The image width in pixels. The image in the formats of + * \ref VX_DF_IMAGE_NV12, \ref VX_DF_IMAGE_NV21, \ref VX_DF_IMAGE_IYUV, + * \ref VX_DF_IMAGE_UYVY, \ref VX_DF_IMAGE_YUYV must have even width. + * \param [in] height The image height in pixels. The image in the formats of + * \ref VX_DF_IMAGE_NV12, \ref VX_DF_IMAGE_NV21, \ref VX_DF_IMAGE_IYUV + * must have even height. + * \param [in] color The VX_DF_IMAGE (\ref vx_df_image_e) code that represents the format + * of the image and the color space. + * \returns An image reference \ref vx_image. Any possible errors preventing a successful + * creation should be checked using \ref vxGetStatus. + * \see vxMapImagePatch to obtain direct memory access to the image data. + * \ingroup group_image + */ +VX_API_ENTRY vx_image VX_API_CALL vxCreateImage(vx_context context, vx_uint32 width, vx_uint32 height, vx_df_image color); + +/*! \brief Creates an image from another image given a rectangle. This second + * reference refers to the data in the original image. Updates to this image + * updates the parent image. The rectangle must be defined within the pixel space + * of the parent image. + * \param [in] img The reference to the parent image. + * \param [in] rect The region of interest rectangle. Must contain points within + * the parent image pixel space. + * \returns An image reference \ref vx_image to the sub-image. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_image + */ +VX_API_ENTRY vx_image VX_API_CALL vxCreateImageFromROI(vx_image img, const vx_rectangle_t *rect); + +/*! \brief Creates a reference to an image object that has a singular, + * uniform value in all pixels. The uniform image created is read-only. + * \param [in] context The reference to the implementation context. + * \param [in] width The image width in pixels. The image in the formats of + * \ref VX_DF_IMAGE_NV12, \ref VX_DF_IMAGE_NV21, \ref VX_DF_IMAGE_IYUV, + * \ref VX_DF_IMAGE_UYVY, \ref VX_DF_IMAGE_YUYV must have even width. + * \param [in] height The image height in pixels. The image in the formats of + * \ref VX_DF_IMAGE_NV12, \ref VX_DF_IMAGE_NV21, + * \ref VX_DF_IMAGE_IYUV must have even height. + * \param [in] color The VX_DF_IMAGE (\ref vx_df_image_e) code that represents the format of the image and the color space. + * \param [in] value The pointer to the pixel value to which to set all pixels. See \ref vx_pixel_value_t. + * \returns An image reference \ref vx_image. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \see vxMapImagePatch to obtain direct memory access to the image data. + * \note \ref vxMapImagePatch and \ref vxUnmapImagePatch may be called with + * a uniform image reference. + * \ingroup group_image + */ +VX_API_ENTRY vx_image VX_API_CALL vxCreateUniformImage(vx_context context, vx_uint32 width, vx_uint32 height, vx_df_image color, const vx_pixel_value_t *value); + +/*! \brief Creates an opaque reference to an image buffer with no direct + * user access. This function allows setting the image width, height, or format. + * \details Virtual data objects allow users to connect various nodes within a + * graph via data references without access to that data, but they also permit the + * implementation to take maximum advantage of possible optimizations. Use this + * API to create a data reference to link two or more nodes together when the + * intermediate data are not required to be accessed by outside entities. This API + * in particular allows the user to define the image format of the data without + * requiring the exact dimensions. Virtual objects are scoped within the graph + * they are declared a part of, and can't be shared outside of this scope. + * All of the following constructions of virtual images are valid. + * \code + * vx_context context = vxCreateContext(); + * vx_graph graph = vxCreateGraph(context); + * vx_image virt[] = { + * vxCreateVirtualImage(graph, 0, 0, VX_DF_IMAGE_U8), // no specified dimension + * vxCreateVirtualImage(graph, 320, 240, VX_DF_IMAGE_VIRT), // no specified format + * vxCreateVirtualImage(graph, 640, 480, VX_DF_IMAGE_U8), // no user access + * }; + * \endcode + * \param [in] graph The reference to the parent graph. + * \param [in] width The width of the image in pixels. A value of zero informs the interface + * that the value is unspecified. The image in the formats of \ref VX_DF_IMAGE_NV12, + * \ref VX_DF_IMAGE_NV21, \ref VX_DF_IMAGE_IYUV, \ref VX_DF_IMAGE_UYVY, + * \ref VX_DF_IMAGE_YUYV must have even width. + * \param [in] height The height of the image in pixels. A value of zero informs the interface + * that the value is unspecified. The image in the formats of \ref VX_DF_IMAGE_NV12, + * \ref VX_DF_IMAGE_NV21, \ref VX_DF_IMAGE_IYUV must have even height. + * \param [in] color The VX_DF_IMAGE (\ref vx_df_image_e) code that represents the format + * of the image and the color space. A value of \ref VX_DF_IMAGE_VIRT informs the + * interface that the format is unspecified. + * \returns An image reference \ref vx_image. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \note Passing this reference to \ref vxMapImagePatch will return an error. + * \ingroup group_image + */ +VX_API_ENTRY vx_image VX_API_CALL vxCreateVirtualImage(vx_graph graph, vx_uint32 width, vx_uint32 height, vx_df_image color); + +/*! \brief Creates a reference to an image object that was externally allocated. + * \param [in] context The reference to the implementation context. + * \param [in] color See the \ref vx_df_image_e codes. This mandates the + * number of planes needed to be valid in the \a addrs and \a ptrs arrays based on the format given. + * \param [in] addrs[] The array of image patch addressing structures that + * define the dimension and stride of the array of pointers. See note below. + * \param [in] ptrs[] The array of platform-defined references to each plane. See note below. + * \param [in] memory_type \ref vx_memory_type_e. When giving \ref VX_MEMORY_TYPE_HOST + * the \a ptrs array is assumed to be HOST accessible pointers to memory. + * \returns An image reference \ref vx_image. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \note The user must call vxMapImagePatch prior to accessing the pixels of an image, even if the + * image was created via \ref vxCreateImageFromHandle. Reads or writes to memory referenced + * by ptrs[ ] after calling \ref vxCreateImageFromHandle without first calling + * \ref vxMapImagePatch will result in undefined behavior. + * The property of addr[] and ptrs[] arrays is kept by the caller (It means that the implementation will + * make an internal copy of the provided information. \a addr and \a ptrs can then simply be application's + * local variables). + * Only \a dim_x, \a dim_y, \a stride_x and \a stride_y fields of the \ref vx_imagepatch_addressing_t need to be + * provided by the application. Other fields (\a step_x, \a step_y, \a scale_x & \a scale_y) are ignored by this function. + * The layout of the imported memory must follow a row-major order. In other words, \a stride_x should be + * sufficiently large so that there is no overlap between data elements corresponding to different + * pixels, and \a stride_y >= \a stride_x * \a dim_x. + * + * In order to release the image back to the application we should use \ref vxSwapImageHandle. + * + * Import type of the created image is available via the image attribute \ref vx_image_attribute_e parameter. + * + * \ingroup group_image + */ +VX_API_ENTRY vx_image VX_API_CALL vxCreateImageFromHandle(vx_context context, vx_df_image color, const vx_imagepatch_addressing_t addrs[], void *const ptrs[], vx_enum memory_type); + +VX_API_ENTRY vx_image VX_API_CALL vxCreateImageFromHandleEx(vx_context context, vx_df_image color, const vx_imagepatch_addressing_t addrs[], vx_uint64 handles[], vx_enum memory_type); + +/*! \brief Swaps the image handle of an image previously created from handle. + * + * This function sets the new image handle (i.e. pointer to all image planes) + * and returns the previous one. + * + * Once this function call has completed, the application gets back the + * ownership of the memory referenced by the previous handle. This memory + * contains up-to-date pixel data, and the application can safely reuse or + * release it. + * + * The memory referenced by the new handle must have been allocated + * consistently with the image properties since the import type, + * memory layout and dimensions are unchanged (see addrs, color, and + * memory_type in \ref vxCreateImageFromHandle). + * + * All images created from ROI or channel with this image as parent or ancestor + * will automatically use the memory referenced by the new handle. + * + * The behavior of \ref vxSwapImageHandle when called from a user node is undefined. + * \param [in] image The reference to an image created from handle + * \param [in] new_ptrs[] pointer to a caller owned array that contains + * the new image handle (image plane pointers) + * \arg new_ptrs is non NULL. new_ptrs[i] must be non NULL for each i such as + * 0 < i < nbPlanes, otherwise, this is an error. The address of the storage memory + * for image plane i is set to new_ptrs[i] + * \arg new_ptrs is NULL: the previous image storage memory is reclaimed by the + * caller, while no new handle is provided. + * \param [out] prev_ptrs[] pointer to a caller owned array in which + * the application returns the previous image handle + * \arg prev_ptrs is non NULL. prev_ptrs must have at least as many + * elements as the number of image planes. For each i such as + * 0 < i < nbPlanes , prev_ptrs[i] is set to the address of the previous storage + * memory for plane i. + * \arg prev_ptrs NULL: the previous handle is not returned. + * \param [in] num_planes Number of planes in the image. This must be set equal to the number of planes of the input image. + * The number of elements in new_ptrs and prev_ptrs arrays must be equal to or greater than num_planes. + * If either array has more than num_planes elements, the extra elements are ignored. If either array is smaller + * than num_planes, the results are undefined. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors. + * \retval VX_ERROR_INVALID_REFERENCE image is not a valid \ref vx_image reference. + * reference. + * \retval VX_ERROR_INVALID_PARAMETERS The image was not created from handle or + * the content of new_ptrs is not valid. + * \retval VX_FAILURE The image was already being accessed. + * \ingroup group_image + */ + +VX_API_ENTRY vx_status VX_API_CALL vxSwapImageHandle(vx_image image, void* const new_ptrs[], void* prev_ptrs[], vx_size num_planes); + + +/*! \brief Swaps the image created from handle. + *\details This function swap image logical and physical address. + *\these tensors must have the same proterties expect memory related content. + *\Attention: APP should make sure the cache and memory cohensive for the first call vxSwapImage + *\version 0.4 + */ +VX_API_ENTRY vx_status VX_API_CALL vxSwapImage(vx_image image0, vx_image image1); + + +/*! \brief Retrieves various attributes of an image. + * \param [in] image The reference to the image to query. + * \param [in] attribute The attribute to query. Use a \ref vx_image_attribute_e. + * \param [out] ptr The location at which to store the resulting value. + * \param [in] size The size in bytes of the container to which \a ptr points. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE image is not a valid \ref vx_image reference. + * \retval VX_ERROR_INVALID_PARAMETERS If any of the other parameters are incorrect. + * \retval VX_ERROR_NOT_SUPPORTED If the attribute is not supported on this implementation. + * \ingroup group_image + */ +VX_API_ENTRY vx_status VX_API_CALL vxQueryImage(vx_image image, vx_enum attribute, void *ptr, vx_size size); + +/*! \brief Allows setting attributes on the image. + * \param [in] image The reference to the image on which to set the attribute. + * \param [in] attribute The attribute to set. Use a \ref vx_image_attribute_e enumeration. + * \param [in] ptr The pointer to the location from which to read the value. + * \param [in] size The size in bytes of the object pointed to by \a ptr. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE image is not a valid \ref vx_image reference. + * \retval VX_ERROR_INVALID_PARAMETERS If any of the other parameters are incorrect. + * \ingroup group_image + */ +VX_API_ENTRY vx_status VX_API_CALL vxSetImageAttribute(vx_image image, vx_enum attribute, const void *ptr, vx_size size); + +/*! \brief Initialize an image with the given pixel value. + * \param [in] image The reference to the image to initialize. + * \param [in] pixel_value The pointer to the constant pixel value to initialize all image pixels. See \ref vx_pixel_value_t. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors. + * \retval VX_ERROR_INVALID_REFERENCE If the image is a uniform image, a virtual image, or not a \ref vx_image. + * \retval VX_ERROR_INVALID_PARAMETERS If any of the other parameters are incorrect. + * \note All pixels of the entire image are initialized to the indicated pixel value, independently from the valid region. + * The valid region of the image is unaffected by this function. The image remains mutable after the call to this function, + * so its pixels and mutable attributes may be changed by subsequent functions. + * \ingroup group_image + */ +VX_API_ENTRY vx_status VX_API_CALL vxSetImagePixelValues(vx_image image, const vx_pixel_value_t *pixel_value); + +/*! \brief Releases a reference to an image object. + * The object may not be garbage collected until its total reference count is zero. + * + * An implementation may defer the actual object destruction after its total + * reference count is zero (potentially until context destruction). Thus, + * releasing an image created from handle + * (see \ref vxCreateImageFromHandle) and all others objects that may + * reference it (nodes, ROI, or channel for instance) are not sufficient to get back the + * ownership of the memory referenced by the current image handle. The only way + * for this is to call \ref vxSwapImageHandle) before releasing the + * image. + * + * \param [in] image The pointer to the image to release. + * \post After returning from this function the reference is zeroed. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE image is not a valid \ref vx_image reference. + * \ingroup group_image + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseImage(vx_image *image); + +/*! + * \brief Accesses a specific indexed pixel in an image patch. + * \param [in] ptr The base pointer of the patch as returned from \ref vxMapImagePatch. + * \param [in] index The 0 based index of the pixel count in the patch. Indexes increase horizontally by 1 then wrap around to the next row. + * \param [in] addr The pointer to the addressing mode information returned from \ref vxMapImagePatch. + * \return void * Returns the pointer to the specified pixel. + * \pre \ref vxMapImagePatch + * \ingroup group_image + */ +VX_API_ENTRY void * VX_API_CALL vxFormatImagePatchAddress1d(void *ptr, vx_uint32 index, const vx_imagepatch_addressing_t *addr); + +/*! + * \brief Accesses a specific pixel at a 2d coordinate in an image patch. + * \param [in] ptr The base pointer of the patch as returned from \ref vxMapImagePatch. + * \param [in] x The x dimension within the patch. + * \param [in] y The y dimension within the patch. + * \param [in] addr The pointer to the addressing mode information returned from \ref vxMapImagePatch. + * \return void * Returns the pointer to the specified pixel. + * \pre \ref vxMapImagePatch + * \ingroup group_image + */ +VX_API_ENTRY void * VX_API_CALL vxFormatImagePatchAddress2d(void *ptr, vx_uint32 x, vx_uint32 y, const vx_imagepatch_addressing_t *addr); + +/*! \brief Retrieves the valid region of the image as a rectangle. + * \param [in] image The image from which to retrieve the valid region. + * \param [out] rect The destination rectangle. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE image is not a valid \ref vx_image reference. + * \retval VX_ERROR_INVALID_PARAMETERS Invalid rect. + * \note This rectangle can be passed directly to \ref vxMapImagePatch to get + * the full valid region of the image. + * \ingroup group_image + */ +VX_API_ENTRY vx_status VX_API_CALL vxGetValidRegionImage(vx_image image, vx_rectangle_t *rect); + +/*! \brief Allows the application to copy a rectangular patch from/into an image object plane. + * \param [in] image The reference to the image object that is the source or the + * destination of the copy. + * \param [in] image_rect The coordinates of the image patch. The patch must be within + * the bounds of the image. (start_x, start_y) gives the coordinates of the topleft + * pixel inside the patch, while (end_x, end_y) gives the coordinates of the bottomright + * element out of the patch. Must be 0 <= start < end <= number of pixels in the image dimension. + * \param [in] image_plane_index The plane index of the image object that is the source or the + * destination of the patch copy. + * \param [in] user_addr The address of a structure describing the layout of the + * user memory location pointed by user_ptr. In the structure, only dim_x, dim_y, + * stride_x and stride_y fields must be provided, other fields are ignored by the function. + * The layout of the user memory must follow a row major order: + * stride_x >= pixel size in bytes, and stride_y >= stride_x * dim_x. + * \param [in] user_ptr The address of the memory location where to store the requested data + * if the copy was requested in read mode, or from where to get the data to store into the image + * object if the copy was requested in write mode. The accessible memory must be large enough + * to contain the specified patch with the specified layout: + * accessible memory in bytes >= (end_y - start_y) * stride_y. + * \param [in] usage This declares the effect of the copy with regard to the image object + * using the \ref vx_accessor_e enumeration. For uniform images, only VX_READ_ONLY + * is supported. For other images, Only \ref VX_READ_ONLY and \ref VX_WRITE_ONLY are supported: + * \arg \ref VX_READ_ONLY means that data is copied from the image object into the application memory + * \arg \ref VX_WRITE_ONLY means that data is copied into the image object from the application memory + * \param [in] user_mem_type A \ref vx_memory_type_e enumeration that specifies + * the memory type of the memory referenced by the user_addr. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_OPTIMIZED_AWAY This is a reference to a virtual image that cannot be + * accessed by the application. + * \retval VX_ERROR_INVALID_REFERENCE image is not a valid \ref vx_image reference. + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * \note The application may ask for data outside the bounds of the valid region, but + * such data has an undefined value. + * \ingroup group_image + */ +VX_API_ENTRY vx_status VX_API_CALL vxCopyImagePatch(vx_image image, const vx_rectangle_t *image_rect, vx_uint32 image_plane_index, const vx_imagepatch_addressing_t *user_addr, void * user_ptr, vx_enum usage, vx_enum user_mem_type); + + +/*! \brief Allows the application to get direct access to a rectangular patch of an image object plane. + * \param [in] image The reference to the image object that contains the patch to map. + * \param [in] rect The coordinates of image patch. The patch must be within the + * bounds of the image. (start_x, start_y) gives the coordinate of the topleft + * element inside the patch, while (end_x, end_y) give the coordinate of + * the bottomright element out of the patch. Must be 0 <= start < end. + * \param [in] plane_index The plane index of the image object to be accessed. + * \param [out] map_id The address of a \ref vx_map_id variable where the function + * returns a map identifier. + * \arg (*map_id) must eventually be provided as the map_id parameter of a call to + * \ref vxUnmapImagePatch. + * \param [out] addr The address of a \ref vx_imagepatch_addressing_t structure + * describing the memory layout of the image patch to access. The function fills the + * structure pointed by addr with the layout information that the application must + * consult to access the pixel data at address (*ptr). The layout of the mapped memory + * follows a row-major order: stride_x>0, stride_y>0 and stride_y >= stride_x * dim_x. + * An exception is for \ref VX_DF_IMAGE_U1 where \a stride_x == 0, + * _stride_x_bits_ > 0 and _stride_y_ {geq} (_stride_x_bits_ * _dim_x_ + 7) / 8 + * (i.e., at least the number of bytes needed to hold _dim_x_ pixels). + * If the image object being accessed was created via + * \ref vxCreateImageFromHandle, then the returned memory layout will be + * the identical to that of the addressing structure provided when + * \ref vxCreateImageFromHandle was called. + * \param [out] ptr The address of a pointer that the function sets to the + * address where the requested data can be accessed. This returned (*ptr) address + * is only valid between the call to this function and the corresponding call to + * \ref vxUnmapImagePatch. + * If image was created via \ref vxCreateImageFromHandle then the returned + * address (*ptr) will be the address of the patch in the original pixel buffer + * provided when image was created. + * \param [in] usage This declares the access mode for the image patch, using + * the \ref vx_accessor_e enumeration. For uniform images, only VX_READ_ONLY + * is supported. + * \arg \ref VX_READ_ONLY: after the function call, the content of the memory location + * pointed by (*ptr) contains the image patch data. Writing into this memory location + * is forbidden and its behavior is undefined. + * \arg \ref VX_READ_AND_WRITE: after the function call, the content of the memory + * location pointed by (*ptr) contains the image patch data; writing into this memory + * is allowed only for the location of pixels only and will result in a modification + * of the written pixels in the image object once the patch is unmapped. Writing into + * a gap between pixels (when addr->stride_x > pixel size in bytes or addr->stride_y > addr->stride_x*addr->dim_x) + * is forbidden and its behavior is undefined. + * \arg \ref VX_WRITE_ONLY: after the function call, the memory location pointed by (*ptr) + * contains undefined data; writing each pixel of the patch is required prior to + * unmapping. Pixels not written by the application before unmap will become + * undefined after unmap, even if they were well defined before map. Like for + * VX_READ_AND_WRITE, writing into a gap between pixels is forbidden and its behavior + * is undefined. + * \param [in] mem_type A \ref vx_memory_type_e enumeration that + * specifies the type of the memory where the image patch is requested to be mapped. + * \param [in] flags An integer that allows passing options to the map operation. + * Use the \ref vx_map_flag_e enumeration. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_OPTIMIZED_AWAY This is a reference to a virtual image that cannot be + * accessed by the application. + * \retval VX_ERROR_INVALID_REFERENCE image is not a valid \ref vx_image reference. + * reference. + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * \note The user may ask for data outside the bounds of the valid region, but + * such data has an undefined value. + * \ingroup group_image + * \post \ref vxUnmapImagePatch with same (*map_id) value. + */ +VX_API_ENTRY vx_status VX_API_CALL vxMapImagePatch(vx_image image, const vx_rectangle_t *rect, vx_uint32 plane_index, vx_map_id *map_id, vx_imagepatch_addressing_t *addr, void **ptr, vx_enum usage, vx_enum mem_type, vx_uint32 flags); + + +/*! \brief Unmap and commit potential changes to a image object patch that were previously mapped. + * Unmapping an image patch invalidates the memory location from which the patch could + * be accessed by the application. Accessing this memory location after the unmap function + * completes has an undefined behavior. + * \param [in] image The reference to the image object to unmap. + * \param [out] map_id The unique map identifier that was returned by \ref vxMapImagePatch . + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE image is not a valid \ref vx_image reference. + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * \ingroup group_image + * \pre \ref vxMapImagePatch with same map_id value + */ +VX_API_ENTRY vx_status VX_API_CALL vxUnmapImagePatch(vx_image image, vx_map_id map_id); + +/*! \brief Create a sub-image from a single plane channel of another image. + * + * The sub-image refers to the data in the original image. Updates to this image + * update the parent image and reversely. + * + * The function supports only channels that occupy an entire plane of a multi-planar + * images, as listed below. Other cases are not supported. + * VX_CHANNEL_Y from YUV4, IYUV, NV12, NV21 + * VX_CHANNEL_U from YUV4, IYUV + * VX_CHANNEL_V from YUV4, IYUV + * + * \param [in] img The reference to the parent image. + * \param [in] channel The \ref vx_channel_e channel to use. + + * \returns An image reference \ref vx_image to the sub-image. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_image + */ +VX_API_ENTRY vx_image VX_API_CALL vxCreateImageFromChannel(vx_image img, vx_enum channel); + + +/*! \brief Sets the valid rectangle for an image according to a supplied rectangle. + * \note Setting or changing the valid region from within a user node by means other than the call-back, for + * example by calling \ref vxSetImageValidRectangle, might result in an incorrect valid region calculation + * by the framework. + * \param [in] image The reference to the image. + * \param [in] rect The value to be set to the image valid rectangle. A NULL indicates that the valid region is the entire image. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE image is not a valid \ref vx_image reference. + * \retval VX_ERROR_INVALID_PARAMETERS The rect does not define a proper valid rectangle. + * \ingroup group_image + */ +VX_API_ENTRY vx_status VX_API_CALL vxSetImageValidRectangle(vx_image image, const vx_rectangle_t *rect); + +/*============================================================================== + KERNEL + =============================================================================*/ + + /*! \brief Registers a module with kernels in a context. + * \details This function registers the appropriate publish and unpublish functions + * with the module name if the module is not a dynamic library, so \ref vxLoadKernels and + * \ref vxUnloadKernels can be called. + * \param [in] context The reference to the context the kernels must be added to. + * \param [in] module The short name of the module to load. + * \param [in] publish must add kernels to the context by calling \ref vxAddUserKernel + * for each new kernel. It is called by \ref vxLoadKernels. + * \param [in] unpublish must remove kernels from the context by calling \ref vxRemoveKernel + * for each kernel the vxPublishKernels has added. It is called by \ref vxUnloadKernels. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE context is not a valid \ref vx_context reference. + * \retval VX_ERROR_INVALID_PARAMETERS If any of the other parameters are incorrect. + * \ingroup group_user_kernels + * \see vxLoadKernels + */ +VX_API_ENTRY vx_status VX_API_CALL vxRegisterKernelLibrary(vx_context context, const vx_char *module, vx_publish_kernels_f publish, vx_unpublish_kernels_f unpublish); + + +/*! \brief Loads a library of kernels, called module, into a context. + * + * The module must be a dynamic library with by convention, two exported functions + * named vxPublishKernels and vxUnpublishKernels. + * + * vxPublishKernels must have type \ref vx_publish_kernels_f, + * and must add kernels to the context by calling \ref vxAddUserKernel + * for each new kernel. vxPublishKernels is called by \ref vxLoadKernels. + * + * vxUnpublishKernels must have type \ref vx_unpublish_kernels_f, + * and must remove kernels from the context by calling \ref vxRemoveKernel + * for each kernel the vxPublishKernels has added. + * vxUnpublishKernels is called by \ref vxUnloadKernels. + * + * \note When all references to loaded kernels are released, the module + * may be automatically unloaded. + * \param [in] context The reference to the context the kernels must be added to. + * \param [in] module The short name of the module to load. On systems where + * there are specific naming conventions for modules, the name passed + * should ignore such conventions. For example: \c libxyz.so should be + * passed as just \c xyz and the implementation will do the right thing that + * the platform requires. + * \note This API uses the system pre-defined paths for modules. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE context is not a valid \ref vx_context reference. + * \retval VX_ERROR_INVALID_PARAMETERS If any of the other parameters are incorrect. + * \ingroup group_user_kernels + * \see vxGetKernelByName + */ +VX_API_ENTRY vx_status VX_API_CALL vxLoadKernels(vx_context context, const vx_char *module); + +/*! \brief Unloads all kernels from the OpenVX context that had been loaded from + * the module using the \ref vxLoadKernels function. + * + * The kernel unloading is performed by calling the vxUnpublishKernels + * exported function of the module. + * \note vxUnpublishKernels is defined in the description of + * \ref vxLoadKernels. + * + * \param [in] context The reference to the context the kernels must be removed from. + * \param [in] module The short name of the module to unload. On systems where + * there are specific naming conventions for modules, the name passed + * should ignore such conventions. For example: \c libxyz.so should be + * passed as just \c xyz and the implementation will do the right thing + * that the platform requires. + * \note This API uses the system pre-defined paths for modules. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE context is not a valid \ref vx_context reference. + * \retval VX_ERROR_INVALID_PARAMETERS If any of the other parameters are + incorrect. + * \ingroup group_user_kernels + * \see vxLoadKernels + */ +VX_API_ENTRY vx_status VX_API_CALL vxUnloadKernels(vx_context context, const vx_char *module); + +/*! \brief Obtains a reference to a kernel using a string to specify the name. + * \details User Kernels follow a "dotted" heirarchical syntax. For example: + * "com.company.example.xyz". The following are strings specifying the kernel names: + + * org.khronos.openvx.color_convert + + * org.khronos.openvx.channel_extract + + * org.khronos.openvx.channel_combine + + * org.khronos.openvx.sobel_3x3 + + * org.khronos.openvx.magnitude + + * org.khronos.openvx.phase + + * org.khronos.openvx.scale_image + + * org.khronos.openvx.table_lookup + + * org.khronos.openvx.histogram + + * org.khronos.openvx.equalize_histogram + + * org.khronos.openvx.absdiff + + * org.khronos.openvx.mean_stddev + + * org.khronos.openvx.threshold + + * org.khronos.openvx.integral_image + + * org.khronos.openvx.dilate_3x3 + + * org.khronos.openvx.erode_3x3 + + * org.khronos.openvx.median_3x3 + + * org.khronos.openvx.box_3x3 + + * org.khronos.openvx.gaussian_3x3 + + * org.khronos.openvx.custom_convolution + + * org.khronos.openvx.gaussian_pyramid + + * org.khronos.openvx.accumulate + + * org.khronos.openvx.accumulate_weighted + + * org.khronos.openvx.accumulate_square + + * org.khronos.openvx.minmaxloc + + * org.khronos.openvx.convertdepth + + * org.khronos.openvx.canny_edge_detector + + * org.khronos.openvx.and + + * org.khronos.openvx.or + + * org.khronos.openvx.xor + + * org.khronos.openvx.not + + * org.khronos.openvx.multiply + + * org.khronos.openvx.add + + * org.khronos.openvx.subtract + + * org.khronos.openvx.warp_affine + + * org.khronos.openvx.warp_perspective + + * org.khronos.openvx.harris_corners + + * org.khronos.openvx.fast_corners + + * org.khronos.openvx.optical_flow_pyr_lk + + * org.khronos.openvx.remap + + * org.khronos.openvx.halfscale_gaussian + + * org.khronos.openvx.laplacian_pyramid + + * org.khronos.openvx.laplacian_reconstruct + + * org.khronos.openvx.non_linear_filter + + * org.khronos.openvx.match_template + + * org.khronos.openvx.lbp + + * org.khronos.openvx.hough_lines_p + + * org.khronos.openvx.tensor_multiply + + * org.khronos.openvx.tensor_add + + * org.khronos.openvx.tensor_subtract + + * org.khronos.openvx.tensor_table_lookup + + * org.khronos.openvx.tensor_transpose + + * org.khronos.openvx.tensor_convert_depth + + * org.khronos.openvx.tensor_matrix_multiply + + * org.khronos.openvx.copy + + * org.khronos.openvx.non_max_suppression + + * org.khronos.openvx.scalar_operation + + * org.khronos.openvx.hog_features + + * org.khronos.openvx.hog_cells + + * org.khronos.openvx.bilateral_filter + + * org.khronos.openvx.select + + * org.khronos.openvx.min + + * org.khronos.openvx.max + + * \param [in] context The reference to the implementation context. + * \param [in] name The string of the name of the kernel to get. + * \return A kernel reference. Any possible errors preventing a successful + * completion of the function should be checked using \ref vxGetStatus. + * \ingroup group_kernel + * \pre \ref vxLoadKernels if the kernel is not provided by the + * OpenVX implementation. + * \note User Kernels should follow a "dotted" hierarchical syntax. For example: + * "com.company.example.xyz". + */ +VX_API_ENTRY vx_kernel VX_API_CALL vxGetKernelByName(vx_context context, const vx_char *name); + +/*! \brief Obtains a reference to the kernel using the \ref vx_kernel_e enumeration. + * \details Enum values above the standard set are assumed to apply to + * loaded libraries. + * \param [in] context The reference to the implementation context. + * \param [in] kernel A value from \ref vx_kernel_e or a vendor or client-defined value. + * \return A \ref vx_kernel reference. Any possible errors preventing a successful completion + * of the function should be checked using \ref vxGetStatus. + * \ingroup group_kernel + * \pre \ref vxLoadKernels if the kernel is not provided by the + * OpenVX implementation. + */ +VX_API_ENTRY vx_kernel VX_API_CALL vxGetKernelByEnum(vx_context context, vx_enum kernel); + +/*! \brief This allows the client to query the kernel to get information about + * the number of parameters, enum values, etc. + * \param [in] kernel The kernel reference to query. + * \param [in] attribute The attribute to query. Use a \ref vx_kernel_attribute_e. + * \param [out] ptr The pointer to the location at which to store the resulting value. + * \param [in] size The size of the container to which \a ptr points. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE kernel is not a valid \ref vx_kernel reference. + * \retval VX_ERROR_INVALID_PARAMETERS If any of the other parameters are incorrect. + * \retval VX_ERROR_NOT_SUPPORTED If the attribute value is not supported in this implementation. + * \ingroup group_kernel + */ +VX_API_ENTRY vx_status VX_API_CALL vxQueryKernel(vx_kernel kernel, vx_enum attribute, void *ptr, vx_size size); + +/*! \brief Release the reference to the kernel. + * The object may not be garbage collected until its total reference count is zero. + * \param [in] kernel The pointer to the kernel reference to release. + * \post After returning from this function the reference is zeroed. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE kernel is not a valid \ref vx_kernel reference. + * \ingroup group_kernel + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseKernel(vx_kernel *kernel); + +/*! \brief Allows users to add custom kernels to a context at run-time. + * \param [in] context The reference to the context the kernel must be added to. + * \param [in] name The string to use to match the kernel. + * \param [in] enumeration The enumerated value of the kernel to be used by clients. + * \param [in] func_ptr The process-local function pointer to be invoked. + * \param [in] numParams The number of parameters for this kernel. + * \param [in] validate The pointer to \ref vx_kernel_validate_f, which validates + * parameters to this kernel. + * \param [in] init The kernel initialization function. + * \param [in] deinit The kernel de-initialization function. + * \return A \ref vx_kernel reference. Any possible errors + * preventing a successful creation should be checked using \ref vxGetStatus. + * \ingroup group_user_kernels + */ +VX_API_ENTRY vx_kernel VX_API_CALL vxAddUserKernel(vx_context context, + const vx_char name[VX_MAX_KERNEL_NAME], + vx_enum enumeration, + vx_kernel_f func_ptr, + vx_uint32 numParams, + vx_kernel_validate_f validate, + vx_kernel_initialize_f init, + vx_kernel_deinitialize_f deinit); + +/*! \brief This API is called after all parameters have been added to the + * kernel and the kernel is \e ready to be used. Notice that the reference to the kernel created + * by vxAddUserKernel is still valid after the call to vxFinalizeKernel. + * If an error occurs, the kernel is not available for usage by the clients of OpenVX. Typically + * this is due to a mismatch between the number of parameters requested and given. + * \param [in] kernel The reference to the loaded kernel from \ref vxAddUserKernel. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE kernel is not a valid \ref vx_kernel reference. + * \pre \ref vxAddUserKernel and \ref vxAddParameterToKernel + * \ingroup group_user_kernels + */ +VX_API_ENTRY vx_status VX_API_CALL vxFinalizeKernel(vx_kernel kernel); + +/*! \brief Allows users to set the signatures of the custom kernel. + * \param [in] kernel The reference to the kernel added with \ref vxAddUserKernel. + * \param [in] index The index of the parameter to add. + * \param [in] dir The direction of the parameter. This must be either \ref VX_INPUT or + * \ref VX_OUTPUT. \ref VX_BIDIRECTIONAL is not supported for this function. + * \param [in] data_type The type of parameter. This must be a value from \ref vx_type_e. + * \param [in] state The state of the parameter (required or not). This must be a value from \ref vx_parameter_state_e. + * \return A \ref vx_status_e enumerated value. + * \retval VX_SUCCESS Parameter is successfully set on kernel; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE kernel is not a valid \ref vx_kernel reference. + * \retval VX_ERROR_INVALID_PARAMETERS If the parameter is not valid for any reason. + * \pre \ref vxAddUserKernel + * \ingroup group_user_kernels + */ +VX_API_ENTRY vx_status VX_API_CALL vxAddParameterToKernel(vx_kernel kernel, vx_uint32 index, vx_enum dir, vx_enum data_type, vx_enum state); + +/*! \brief Removes a custom kernel from its context and releases it. + * \param [in] kernel The reference to the kernel to remove. Returned from \ref vxAddUserKernel. + * \note Any kernel enumerated in the base standard + * cannot be removed; only kernels added through \ref vxAddUserKernel can + * be removed. + * \return A \ref vx_status_e enumeration. The function returns to the + * application full control over the memory resources provided at the kernel creation time. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE kernel is not a valid \ref vx_kernel reference. + * \retval VX_ERROR_INVALID_PARAMETERS If a base kernel is passed in. + * \retval VX_FAILURE If the application has not released all references to the kernel + * object OR if the application has not released all references to a node that is using + * this kernel OR if the application has not released all references to a graph which + * has nodes that is using this kernel. + * \ingroup group_user_kernels + */ +VX_API_ENTRY vx_status VX_API_CALL vxRemoveKernel(vx_kernel kernel); + +/*! \brief Sets kernel attributes. + * \param [in] kernel The reference to the kernel. + * \param [in] attribute The enumeration of the attributes. See \ref vx_kernel_attribute_e. + * \param [in] ptr The pointer to the location from which to read the attribute. + * \param [in] size The size in bytes of the data area indicated by \a ptr in bytes. + * \note After a kernel has been passed to \ref vxFinalizeKernel, no attributes + * can be altered. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE kernel is not a valid \ref vx_kernel reference. + * \ingroup group_user_kernels + */ +VX_API_ENTRY vx_status VX_API_CALL vxSetKernelAttribute(vx_kernel kernel, vx_enum attribute, const void *ptr, vx_size size); + +/*! \brief Retrieves a \ref vx_parameter from a \ref vx_kernel. + * \param [in] kernel The reference to the kernel. + * \param [in] index The index of the parameter. + * \return A \ref vx_parameter reference. Any possible errors preventing a + * successful completion of the function should be checked using \ref vxGetStatus. + * \ingroup group_parameter + */ +VX_API_ENTRY vx_parameter VX_API_CALL vxGetKernelParameterByIndex(vx_kernel kernel, vx_uint32 index); + +/*============================================================================== + GRAPH + =============================================================================*/ + +/*! \brief Creates an empty graph. + * \param [in] context The reference to the implementation context. + * \returns A graph reference \ref vx_graph. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_graph + */ +VX_API_ENTRY vx_graph VX_API_CALL vxCreateGraph(vx_context context); + +/*! \brief Releases a reference to a graph. + * The object may not be garbage collected until its total reference count is zero. + * Once the reference count is zero, all node references in the graph are automatically + * released as well. Releasing the graph will only release the nodes if the nodes were + * not previously released by the application. Data referenced by those nodes may not + * be released as the user may still have references to the data. + * \param [in] graph The pointer to the graph to release. + * \post After returning from this function the reference is zeroed. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE graph is not a valid \ref vx_graph reference. + * \ingroup group_graph + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseGraph(vx_graph *graph); + +/*! \brief Verifies the state of the graph before it is executed. + * This is useful to catch programmer errors and contract errors. If not verified, + * the graph verifies before being processed. + * \pre Memory for data objects is not guarenteed to exist before + * this call. \post After this call data objects exist unless + * the implementation optimized them out. + * \param [in] graph The reference to the graph to verify. + * \return A status code for graphs with more than one error; it is + * undefined which error will be returned. Register a log callback using \ref vxRegisterLogCallback + * to receive each specific error in the graph. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE graph is not a valid \ref vx_graph reference. + * \retval VX_ERROR_MULTIPLE_WRITERS If the graph contains more than one writer + * to any data object. + * \retval VX_ERROR_INVALID_NODE If a node in the graph is invalid or failed be created. + * \retval VX_ERROR_INVALID_GRAPH If the graph contains cycles or some other invalid topology. + * \retval VX_ERROR_INVALID_TYPE If any parameter on a node is given the wrong type. + * \retval VX_ERROR_INVALID_VALUE If any value of any parameter is out of bounds of specification. + * \retval VX_ERROR_INVALID_FORMAT If the image format is not compatible. + * \ingroup group_graph + * \see vxProcessGraph + */ +VX_API_ENTRY vx_status VX_API_CALL vxVerifyGraph(vx_graph graph); + +/*! \brief This function causes the synchronous processing of a graph. If the graph + * has not been verified, then the implementation verifies the graph + * immediately. If verification fails this function returns a status + * identical to what \ref vxVerifyGraph would return. After + * the graph verfies successfully then processing occurs. If the graph was + * previously verified via \ref vxVerifyGraph or \ref vxProcessGraph + * then the graph is processed. This function blocks until the graph is completed. + * \param [in] graph The graph to execute. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Graph has been processed; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE graph is not a valid \ref vx_graph reference. + * \retval VX_FAILURE A catastrophic error occurred during processing. + * \ingroup group_graph + */ +VX_API_ENTRY vx_status VX_API_CALL vxProcessGraph(vx_graph graph); + +/*! \brief Schedules a graph for future execution. If the graph + * has not been verified, then the implementation verifies the graph + * immediately. If verification fails this function returns a status + * identical to what \ref vxVerifyGraph would return. After + * the graph verfies successfully then processing occurs. If the graph was + * previously verified via \ref vxVerifyGraph or \ref vxProcessGraph + * then the graph is processed. + * \param [in] graph The graph to schedule. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS The graph has been scheduled; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE graph is not a valid \ref vx_graph reference. + * \retval VX_ERROR_NO_RESOURCES The graph cannot be scheduled now. + * \retval VX_ERROR_NOT_SUFFICIENT The graph is not verified and has failed + * forced verification. + * \ingroup group_graph + */ +VX_API_ENTRY vx_status VX_API_CALL vxScheduleGraph(vx_graph graph); + +/*! \brief Waits for a specific graph to complete. If the graph has been scheduled multiple + * times since the last call to vxWaitGraph, then vxWaitGraph returns only when the last + * scheduled execution completes. + * \param [in] graph The graph to wait on. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS The graph has successfully completed execution and its outputs are the + * valid results of the most recent execution; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE graph is not a valid \ref vx_graph reference. + * \retval VX_FAILURE An error occurred or the graph was never scheduled. Output data of the + * graph is undefined. + * \pre \ref vxScheduleGraph + * \ingroup group_graph + */ +VX_API_ENTRY vx_status VX_API_CALL vxWaitGraph(vx_graph graph); + +/*! \brief Allows the user to query attributes of the Graph. + * \param [in] graph The reference to the created graph. + * \param [in] attribute The \ref vx_graph_attribute_e type needed. + * \param [out] ptr The location at which to store the resulting value. + * \param [in] size The size in bytes of the container to which \a ptr points. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE graph is not a valid \ref vx_graph reference. + * \ingroup group_graph + */ +VX_API_ENTRY vx_status VX_API_CALL vxQueryGraph(vx_graph graph, vx_enum attribute, void *ptr, vx_size size); + +/*! \brief Allows the attributes of the Graph to be set to the provided value. + * \param [in] graph The reference to the graph. + * \param [in] attribute The \ref vx_graph_attribute_e type needed. + * \param [in] ptr The location from which to read the value. + * \param [in] size The size in bytes of the container to which \a ptr points. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE graph is not a valid \ref vx_graph reference. + * \ingroup group_graph + */ +VX_API_ENTRY vx_status VX_API_CALL vxSetGraphAttribute(vx_graph graph, vx_enum attribute, const void *ptr, vx_size size); + +/*! \brief Adds the given parameter extracted from a \ref vx_node to the graph. + * \param [in] graph The graph reference that contains the node. + * \param [in] parameter The parameter reference to add to the graph from the node. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Parameter added to Graph; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE graph is not a valid \ref vx_graph reference or parameter is not a valid \ref vx_parameter reference. + * \retval VX_ERROR_INVALID_PARAMETERS The parameter is of a node not in this + * graph. + * \ingroup group_graph_parameters + */ +VX_API_ENTRY vx_status VX_API_CALL vxAddParameterToGraph(vx_graph graph, vx_parameter parameter); + +/*! \brief Sets a reference to the parameter on the graph. The implementation + * must set this parameter on the originating node as well. + * \param [in] graph The graph reference. + * \param [in] index The parameter index. + * \param [in] value The reference to set to the parameter. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Parameter set to Graph; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE graph is not a valid \ref vx_graph reference or + * value is not a valid \ref vx_reference. + * \retval VX_ERROR_INVALID_PARAMETERS The parameter index is out of bounds or the + * dir parameter is incorrect. + * \ingroup group_graph_parameters + */ +VX_API_ENTRY vx_status VX_API_CALL vxSetGraphParameterByIndex(vx_graph graph, vx_uint32 index, vx_reference value); + +/*! \brief Retrieves a \ref vx_parameter from a \ref vx_graph. + * \param [in] graph The graph. + * \param [in] index The index of the parameter. + * \return \ref vx_parameter reference. Any possible errors preventing a successful + * function completion should be checked using \ref vxGetStatus. + * \ingroup group_graph_parameters + */ +VX_API_ENTRY vx_parameter VX_API_CALL vxGetGraphParameterByIndex(vx_graph graph, vx_uint32 index); + +/*! \brief Returns a Boolean to indicate the state of graph verification. + * \param [in] graph The reference to the graph to check. + * \return A \ref vx_bool value. + * \retval vx_true_e The graph is verified. + * \retval vx_false_e The graph is not verified. It must be verified before + * execution either through \ref vxVerifyGraph or automatically through + * \ref vxProcessGraph or \ref vxScheduleGraph. + * \ingroup group_graph + */ +VX_API_ENTRY vx_bool VX_API_CALL vxIsGraphVerified(vx_graph graph); + +/*! \brief Specify the inputs and outputs of graph explicitly. + * \param [in] graph The graph. + * \param [in] num_of_inputs Number of input reference. + * \param [in] inputs The array of input reference. + * \param [in] num_of_outputs Number of output reference. + * \param [in] ouputs The array of output reference. + * \return A \ref vx_status value. + * \ingroup group_graph + */ +VX_API_ENTRY vx_status VX_API_CALL vxIdentifyGraphInputsAndOutputs(vx_graph graph, + vx_uint32 num_of_inputs, + vx_reference *inputs, + vx_uint32 num_of_outputs, + vx_reference *outputs); + +/*! \brief Get the size of binary graph and generate binary graph into buffer. + * \param [in] graph The graph. + * \param [in] buffer Generate binary graph into buffer if *size value is the size of actual NBG. + * \param [in] size Get the size of binary graph if buffer is NULL. + * \return A \ref vx_status value. + * \ingroup group_graph + */ +VX_API_ENTRY vx_status VX_API_CALL vxGenerateNBG(vx_graph graph, void *buffer, vx_size *size); + +/*============================================================================== + NODE + =============================================================================*/ + +/*! \brief Creates a reference to a node object for a given kernel. + * \details This node has no references assigned as parameters after completion. + * The client is then required to set these parameters manually by \ref vxSetParameterByIndex. + * When clients supply their own node creation functions (for use with User Kernels), this is the API + * to use along with the parameter setting API. + * \param [in] graph The reference to the graph in which this node exists. + * \param [in] kernel The kernel reference to associate with this new node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \note A call to this API sets all parameters to NULL. + * \ingroup group_adv_node + * \post Call \ref vxSetParameterByIndex for as many parameters as needed to be set. + */ +VX_API_ENTRY vx_node VX_API_CALL vxCreateGenericNode(vx_graph graph, vx_kernel kernel); + +/*! \brief Allows a user to query information out of a node. + * \param [in] node The reference to the node to query. + * \param [in] attribute Use \ref vx_node_attribute_e value to query for information. + * \param [out] ptr The location at which to store the resulting value. + * \param [in] size The size in bytesin bytes of the container to which \a ptr points. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE node is not a valid \ref vx_node reference. + * \retval VX_ERROR_INVALID_PARAMETERS The type or size is incorrect. + * \ingroup group_node + */ +VX_API_ENTRY vx_status VX_API_CALL vxQueryNode(vx_node node, vx_enum attribute, void *ptr, vx_size size); + +/*! \brief Allows a user to set attribute of a node before Graph Validation. + * \param [in] node The reference to the node to set. + * \param [in] attribute Use \ref vx_node_attribute_e value to set the desired attribute. + * \param [in] ptr The pointer to the desired value of the attribute. + * \param [in] size The size in bytes of the objects to which \a ptr points. + * \note Some attributes are inherited from the \ref vx_kernel, which was used + * to create the node. Some of these can be overridden using this API, notably + * \ref VX_NODE_LOCAL_DATA_SIZE and \ref VX_NODE_LOCAL_DATA_PTR. + * \ingroup group_node + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS The attribute was set; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE node is not a valid \ref vx_node reference. + * \retval VX_ERROR_INVALID_PARAMETERS size is not correct for the type needed. + */ +VX_API_ENTRY vx_status VX_API_CALL vxSetNodeAttribute(vx_node node, vx_enum attribute, const void *ptr, vx_size size); + +/*! \brief Releases a reference to a Node object. + * The object may not be garbage collected until its total reference count is zero. + * \param [in] node The pointer to the reference of the node to release. + * \ingroup group_node + * \post After returning from this function the reference is zeroed. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE node is not a valid \ref vx_node reference. + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseNode(vx_node *node); + +/*! \brief Removes a Node from its parent Graph and releases it. + * \param [in] node The pointer to the node to remove and release. + * \ingroup group_node + * \post After returning from this function the reference is zeroed. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE node is not a valid \ref vx_node reference. + */ +VX_API_ENTRY vx_status VX_API_CALL vxRemoveNode(vx_node *node); + +/*! \brief Assigns a callback to a node. + * If a callback already exists in this node, this function must return an error + * and the user may clear the callback by passing a NULL pointer as the callback. + * \param [in] node The reference to the node. + * \param [in] callback The callback to associate with completion of this + * specific node. + * \warning This must be used with extreme caution as it can \e ruin + * optimizations in the power/performance efficiency of a graph. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Callback assigned; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE node is not a valid \ref vx_node reference. + * \ingroup group_node_callback + */ +VX_API_ENTRY vx_status VX_API_CALL vxAssignNodeCallback(vx_node node, vx_nodecomplete_f callback); + +/*! \brief Retrieves the current node callback function pointer set on the node. + * \param [in] node The reference to the \ref vx_node object. + * \ingroup group_node_callback + * \return vx_nodecomplete_f The pointer to the callback function. + * \retval NULL No callback is set. + * \retval * The node callback function. + */ +VX_API_ENTRY vx_nodecomplete_f VX_API_CALL vxRetrieveNodeCallback(vx_node node); + +/*! \brief Assigns a callback to a node. + * If a callback already exists in this node, this function must return an error + * and the user may clear the callback by passing a NULL pointer as the callback. + * \param [in] node The reference to the node. + * \param [in] callback The callback to associate with completion of this + * specific node. + * \warning This must be used with extreme caution as it can \e ruin + * optimizations in the power/performance efficiency of a graph. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Callback assigned; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE node is not a valid \ref vx_node reference. + * \ingroup group_node_callback + */ +VX_API_ENTRY vx_status VX_API_CALL vxAssignNodeQueryCallback(vx_node node, vx_nodequery_f callback); + +/*! \brief Sets the node target to the provided value. A success invalidates the graph + * that the node belongs to (\ref vxVerifyGraph must be called before the next execution) + * \param [in] node The reference to the \ref vx_node object. + * \param [in] target_enum The target enum to be set to the \ref vx_node object. + * Use a \ref vx_target_e. + * \param [in] target_string The target name ASCII string. This contains a valid value + * when target_enum is set to \ref VX_TARGET_STRING, otherwise it is ignored. + * \ingroup group_node + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Node target set; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE node is not a valid \ref vx_node reference. + * \retval VX_ERROR_NOT_SUPPORTED If the node kernel is not supported by the specified target. + */ +VX_API_ENTRY vx_status VX_API_CALL vxSetNodeTarget(vx_node node, vx_enum target_enum, const char* target_string); + +/*! \brief Creates replicas of the same node first_node to process a set of objects + * stored in \ref vx_pyramid or \ref vx_object_array. + * first_node needs to have as parameter levels 0 of a \ref vx_pyramid or the index 0 of a \ref vx_object_array. + * Replica nodes are not accessible by the application through any means. An application request for removal of + * first_node from the graph will result in removal of all replicas. Any change of parameter or attribute of + * first_node will be propagated to the replicas. \ref vxVerifyGraph shall enforce consistency of parameters and attributes + * in the replicas. + * \param [in] graph The reference to the graph. + * \param [in] first_node The reference to the node in the graph that will be replicated. + * \param [in] replicate an array of size equal to the number of node parameters, vx_true_e for the parameters + * that should be iterated over (should be a reference to a vx_pyramid or a vx_object_array), + * vx_false_e for the parameters that should be the same across replicated nodes and for optional + * parameters that are not used. Should be vx_true_e for all output and bidirectional parameters. + * \param [in] number_of_parameters number of elements in the replicate array + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE graph is not a valid \ref vx_graph reference, or first_node is not a + * valid \ref vx_node reference. + * \retval VX_ERROR_NOT_COMPATIBLE At least one of replicated parameters is not of level 0 of a pyramid or at index 0 of an object array. + * \retval VX_FAILURE If the node does not belong to the graph, or the number of objects in the parent objects of inputs and output are not the same. + * \ingroup group_node + */ +VX_API_ENTRY vx_status VX_API_CALL vxReplicateNode(vx_graph graph, vx_node first_node, vx_bool replicate[], vx_uint32 number_of_parameters); + +/*============================================================================== + PARAMETER + =============================================================================*/ + +/*! \brief Retrieves a \ref vx_parameter from a \ref vx_node. + * \param [in] node The node from which to extract the parameter. + * \param [in] index The index of the parameter to which to get a reference. + * \return A parameter reference \ref vx_parameter. Any possible errors preventing a successful + * completion of the function should be checked using \ref vxGetStatus. + * \ingroup group_parameter + */ +VX_API_ENTRY vx_parameter VX_API_CALL vxGetParameterByIndex(vx_node node, vx_uint32 index); + +/*! \brief Releases a reference to a parameter object. + * The object may not be garbage collected until its total reference count is zero. + * \param [in] param The pointer to the parameter to release. + * \ingroup group_parameter + * \post After returning from this function the reference is zeroed. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE param is not a valid \ref vx_parameter reference. + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseParameter(vx_parameter *param); + +/*! \brief Sets the specified parameter data for a kernel on the node. + * \param [in] node The node that contains the kernel. + * \param [in] index The index of the parameter desired. + * \param [in] value The desired value of the parameter. + * \note A user may not provide a NULL value for a mandatory parameter of this API. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE node is not a valid \ref vx_node reference, or value + * is not a valid \ref vx_reference reference. + * \ingroup group_parameter + * \see vxSetParameterByReference + */ +VX_API_ENTRY vx_status VX_API_CALL vxSetParameterByIndex(vx_node node, vx_uint32 index, vx_reference value); + +/*! \brief Associates a parameter reference and a data reference with a kernel + * on a node. + * \param [in] parameter The reference to the kernel parameter. + * \param [in] value The value to associate with the kernel parameter. + * \note A user may not provide a NULL value for a mandatory parameter of this API. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE parameter is not a valid \ref vx_parameter reference, + * or value is not a valid \ref vx_reference reference.. + * \ingroup group_parameter + * \see vxGetParameterByIndex + */ +VX_API_ENTRY vx_status VX_API_CALL vxSetParameterByReference(vx_parameter parameter, vx_reference value); + +/*! \brief Allows the client to query a parameter to determine its meta-information. + * \param [in] parameter The reference to the parameter. + * \param [in] attribute The attribute to query. Use a \ref vx_parameter_attribute_e. + * \param [out] ptr The location at which to store the resulting value. + * \param [in] size The size in bytes of the container to which \a ptr points. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE parameter is not a valid \ref vx_parameter reference. + * \ingroup group_parameter + */ +VX_API_ENTRY vx_status VX_API_CALL vxQueryParameter(vx_parameter parameter, vx_enum attribute, void *ptr, vx_size size); + +/*============================================================================== + SCALAR + =============================================================================*/ + +/*! \brief Creates a reference to a scalar object. Also see \ref sub_node_parameters. + * \param [in] context The reference to the system context. + * \param [in] data_type The type of data to hold. Must be greater than + * \ref VX_TYPE_INVALID and less than or equal to \ref VX_TYPE_VENDOR_STRUCT_END. + * Or must be a \ref vx_enum returned from \ref vxRegisterUserStruct. + * \param [in] ptr The pointer to the initial value of the scalar. + * \ingroup group_scalar + * \returns A scalar reference \ref vx_scalar. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + */ +VX_API_ENTRY vx_scalar VX_API_CALL vxCreateScalar(vx_context context, vx_enum data_type, const void *ptr); + +/*! \brief Creates a reference to a scalar object. Also see \ref sub_node_parameters. + * \param [in] context The reference to the system context. + * \param [in] data_type The type of data to hold. Must be greater than + * \ref VX_TYPE_INVALID and less than or equal to \ref VX_TYPE_VENDOR_STRUCT_END. + * Or must be a \ref vx_enum returned from \ref vxRegisterUserStruct. + * \param [in] ptr The pointer to the initial value of the scalar. + * \param [in] size Size of data at ptr in bytes. + * \ingroup group_scalar + * \returns A scalar reference \ref vx_scalar. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + */ +VX_API_ENTRY vx_scalar VX_API_CALL vxCreateScalarWithSize(vx_context context, vx_enum data_type, const void *ptr, vx_size size); + +/*! \brief Creates an opaque reference to a scalar object with no direct user access. + * \param [in] graph The reference to the parent graph. + * \param [in] data_type The type of data to hold. Must be greater than + * \ref VX_TYPE_INVALID and less than or equal to \ref VX_TYPE_VENDOR_STRUCT_END. + * Or must be a \ref vx_enum returned from \ref vxRegisterUserStruct. + * \see \ref vxCreateScalar + * \ingroup group_scalar + * \returns A scalar reference \ref vx_scalar. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + */ +VX_API_ENTRY vx_scalar VX_API_CALL vxCreateVirtualScalar(vx_graph graph, vx_enum data_type); + +/*! \brief Releases a reference to a scalar object. + * The object may not be garbage collected until its total reference count is zero. + * \param [in] scalar The pointer to the scalar to release. + * \ingroup group_scalar + * \post After returning from this function the reference is zeroed. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE scalar is not a valid \ref vx_scalar reference. + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseScalar(vx_scalar *scalar); + +/*! \brief Queries attributes from a scalar. + * \param [in] scalar The scalar object. + * \param [in] attribute The enumeration to query. Use a \ref vx_scalar_attribute_e enumeration. + * \param [out] ptr The location at which to store the resulting value. + * \param [in] size The size of the container to which \a ptr points. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE scalar is not a valid \ref vx_scalar reference. + * \ingroup group_scalar + */ +VX_API_ENTRY vx_status VX_API_CALL vxQueryScalar(vx_scalar scalar, vx_enum attribute, void *ptr, vx_size size); + +/*! \brief Allows the application to copy from/into a scalar object. + * \param [in] scalar The reference to the scalar object that is the source or the + * destination of the copy. + * \param [in] user_ptr The address of the memory location where to store the requested data + * if the copy was requested in read mode, or from where to get the data to store into the + * scalar object if the copy was requested in write mode. In the user memory, the scalar is + * a variable of the type corresponding to \ref VX_SCALAR_TYPE. + * The accessible memory must be large enough to contain this variable. + * \param [in] usage This declares the effect of the copy with regard to the scalar object + * using the \ref vx_accessor_e enumeration. Only \ref VX_READ_ONLY and \ref VX_WRITE_ONLY + * are supported: + * \arg \ref VX_READ_ONLY means that data are copied from the scalar object into the user memory. + * \arg \ref VX_WRITE_ONLY means that data are copied into the scalar object from the user memory. + * \param [in] user_mem_type A \ref vx_memory_type_e enumeration that specifies + * the memory type of the memory referenced by the user_addr. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE scalar is not a valid \ref vx_scalar reference. + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * \ingroup group_scalar + */ +VX_API_ENTRY vx_status VX_API_CALL vxCopyScalar(vx_scalar scalar, void *user_ptr, vx_enum usage, vx_enum user_mem_type); + +/*! \brief Allows the application to copy from/into a scalar object with size. + * \param [in] scalar The reference to the scalar object that is the source or the + * destination of the copy. + * \param [in] size The size in bytes of the container to which \a user_ptr points. + * \param [in] user_ptr The address of the memory location where to store the requested data + * if the copy was requested in read mode, or from where to get the data to store into the + * scalar object if the copy was requested in write mode. In the user memory, the scalar is + * a variable of the type corresponding to \ref VX_SCALAR_TYPE. + * The accessible memory must be large enough to contain this variable. + * \param [in] usage This declares the effect of the copy with regard to the scalar object + * using the \ref vx_accessor_e enumeration. Only \ref VX_READ_ONLY and \ref VX_WRITE_ONLY + * are supported: + * \arg \ref VX_READ_ONLY means that data are copied from the scalar object into the user memory. + * \arg \ref VX_WRITE_ONLY means that data are copied into the scalar object from the user memory. + * \param [in] user_mem_type A \ref vx_memory_type_e enumeration that specifies + * the memory type of the memory referenced by the user_addr. + * \return A \ref vx_status_e enumeration. + * \retval VX_ERROR_INVALID_REFERENCE The scalar reference is not actually a scalar reference. + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * \ingroup group_scalar + */ +VX_API_ENTRY vx_status VX_API_CALL vxCopyScalarWithSize(vx_scalar scalar, vx_size size, void *user_ptr, vx_enum usage, vx_enum user_mem_type); + +/*============================================================================== + REFERENCE + =============================================================================*/ + +/*! \brief Queries any reference type for some basic information like count or type. + * \param [in] ref The reference to query. + * \param [in] attribute The value for which to query. Use \ref vx_reference_attribute_e. + * \param [out] ptr The location at which to store the resulting value. + * \param [in] size The size in bytes of the container to which ptr points. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE ref is not a valid \ref vx_reference reference. + * \ingroup group_reference + */ +VX_API_ENTRY vx_status VX_API_CALL vxQueryReference(vx_reference ref, vx_enum attribute, void *ptr, vx_size size); + +/*! \brief Releases a reference. The reference may potentially refer to multiple OpenVX objects of different types. + * This function can be used instead of calling a specific release function for each individual object type + * (e.g. vxRelease). The object will not be destroyed until its total reference count is zero. + * \note After returning from this function the reference is zeroed. + * \param [in] ref_ptr The pointer to the reference of the object to release. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE ref_ptr is not a valid \ref vx_reference reference. + * \ingroup group_reference + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseReference(vx_reference* ref_ptr); + +/*! + * \brief Increments the reference counter of an object + * This function is used to express the fact that the OpenVX object is referenced + * multiple times by an application. Each time this function is called for + * an object, the application will need to release the object one additional + * time before it can be destructed + * \param [in] ref The reference to retain. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE ref is not a valid \ref vx_reference reference. + * \ingroup group_reference + */ +VX_API_ENTRY vx_status VX_API_CALL vxRetainReference(vx_reference ref); + +/*! \brief Name a reference + * \ingroup group_reference + * + * This function is used to associate a name to a referenced object. This name + * can be used by the OpenVX implementation in log messages and any + * other reporting mechanisms. + * + * The OpenVX implementation will not check if the name is unique in + * the reference scope (context or graph). Several references can then + * have the same name. + * + * \param [in] ref The reference to the object to be named. + * \param [in] name Pointer to the '\0' terminated string that identifies + * the referenced object. + * The string is copied by the function so that it + * stays the property of the caller. + * NULL means that the reference is not named. + * The length of the string shall be lower than VX_MAX_REFERENCE_NAME bytes. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE ref is not a valid \ref vx_reference reference. + */ +VX_API_ENTRY vx_status VX_API_CALL vxSetReferenceName(vx_reference ref, const vx_char *name); + +/*============================================================================== + DELAY + =============================================================================*/ + +/*! \brief Queries a \ref vx_delay object attribute. + * \param [in] delay The reference to a delay object. + * \param [in] attribute The attribute to query. Use a \ref vx_delay_attribute_e enumeration. + * \param [out] ptr The location at which to store the resulting value. + * \param [in] size The size of the container to which \a ptr points. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE delay is not a valid \ref vx_delay reference. + * \ingroup group_delay + */ +VX_API_ENTRY vx_status VX_API_CALL vxQueryDelay(vx_delay delay, vx_enum attribute, void *ptr, vx_size size); + +/*! \brief Releases a reference to a delay object. + * The object may not be garbage collected until its total reference count is zero. + * \param [in] delay The pointer to the delay object reference to release. + * \post After returning from this function the reference is zeroed. + * \ingroup group_delay + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE delay is not a valid \ref vx_delay reference. + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseDelay(vx_delay *delay); + + +/*! \brief Creates a Delay object. + * \details This function creates a delay object with \p num_slots slots. Each slot + * contains a clone of the exemplar. The clones only inherit the metadata of the + * exemplar. The data content of the exemplar is ignored and the clones have their + * data undefined at delay creation time. + * The function does not alter the exemplar. Also, it doesn't retain or release the + * reference to the exemplar. + * \note For the definition of metadata attributes see \ref vxSetMetaFormatAttribute. + * \param [in] context The reference to the context. + * \param [in] exemplar The exemplar object. Supported exemplar object types are:
+ * \arg \ref VX_TYPE_ARRAY + * \arg \ref VX_TYPE_CONVOLUTION + * \arg \ref VX_TYPE_DISTRIBUTION + * \arg \ref VX_TYPE_IMAGE + * \arg \ref VX_TYPE_LUT + * \arg \ref VX_TYPE_MATRIX + * \arg \ref VX_TYPE_OBJECT_ARRAY + * \arg \ref VX_TYPE_PYRAMID + * \arg \ref VX_TYPE_REMAP + * \arg \ref VX_TYPE_SCALAR + * \arg \ref VX_TYPE_THRESHOLD + * \arg \ref VX_TYPE_TENSOR + * \param [in] num_slots The number of objects in the delay. This value must be greater than zero. + * \returns A delay reference \ref vx_delay. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_delay + */ +VX_API_ENTRY vx_delay VX_API_CALL vxCreateDelay(vx_context context, + vx_reference exemplar, + vx_size num_slots); + +/*! \brief Retrieves a reference to a delay slot object. + * \param [in] delay The reference to the delay object. + * \param [in] index The index of the delay slot from which to extract the object reference. + * \return \ref vx_reference. Any possible errors preventing a successful + * completion of the function should be checked using \ref vxGetStatus. + * \note The delay index is in the range \f$ [-count+1,0] \f$. 0 is always the + * \e current object. + * \ingroup group_delay + * \note A reference retrieved with this function must not be given to its associated + * release API (e.g. \ref vxReleaseImage) unless \ref vxRetainReference is used. + */ +VX_API_ENTRY vx_reference VX_API_CALL vxGetReferenceFromDelay(vx_delay delay, vx_int32 index); + +/*! \brief Shifts the internal delay ring by one. + * + * This function performs a shift of the internal delay ring by one. This means that, + * the data originally at index 0 move to index -1 and so forth until index + * \f$ -count+1 \f$. The data originally at index \f$ -count+1 \f$ move to index 0. + * Here \f$ count \f$ is the number of slots in delay ring. + * When a delay is aged, any graph making use of this delay (delay object itself or data + * objects in delay slots) gets its data automatically updated accordingly. + * \param [in] delay + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Delay was aged; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE delay is not a valid \ref vx_delay reference. + * \ingroup group_delay + */ +VX_API_ENTRY vx_status VX_API_CALL vxAgeDelay(vx_delay delay); + +/*! \brief Register a delay for auto-aging. + * + * This function registers a delay object to be auto-aged by the graph. + * This delay object will be automatically aged after each successful completion of + * this graph. Aging of a delay object cannot be called during graph execution. + * A graph abandoned due to a node callback will trigger an auto-aging. + * + * If a delay is registered for auto-aging multiple times in a same graph, + * the delay will be only aged a single time at each graph completion. + * If a delay is registered for auto-aging in multiple graphs, this delay will + * aged automatically after each successful completion of any of these graphs. + * + * \param [in] graph The graph to which the delay is registered for auto-aging. + * \param [in] delay The delay to automatically age. + * + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE graph is not a valid \ref vx_graph reference, or + * delay is not a valid \ref vx_delay reference. + * \ingroup group_graph + */ +VX_API_ENTRY vx_status VX_API_CALL vxRegisterAutoAging(vx_graph graph, vx_delay delay); + +/*============================================================================== + LOGGING + =============================================================================*/ + +/*! \brief Adds a line to the log. + * \param [in] ref The reference to add the log entry against. Some valid value must be provided. + * \param [in] status The status code. \ref VX_SUCCESS status entries are ignored and not added. + * \param [in] message The human readable message to add to the log. + * \param [in] ... a list of variable arguments to the message. + * \note Messages may not exceed \ref VX_MAX_LOG_MESSAGE_LEN bytes and will be truncated in the log if they exceed this limit. + * \ingroup group_log + */ +VX_API_ENTRY void VX_API_CALL vxAddLogEntry(vx_reference ref, vx_status status, const char *message, ...); + +/*! \brief Registers a callback facility to the OpenVX implementation to receive error logs. + * \param [in] context The overall context to OpenVX. + * \param [in] callback The callback function. If NULL, the previous callback is removed. + * \param [in] reentrant If reentrancy flag is \ref vx_true_e, then the callback may be entered from multiple + * simultaneous tasks or threads (if the host OS supports this). + * \ingroup group_log + */ +VX_API_ENTRY void VX_API_CALL vxRegisterLogCallback(vx_context context, vx_log_callback_f callback, vx_bool reentrant); + +/*============================================================================== + LUT + =============================================================================*/ + +/*! \brief Creates LUT object of a given type. The value of \ref VX_LUT_OFFSET is equal to 0 + * for data_type = \ref VX_TYPE_UINT8, and (vx_uint32)(count/2) for \ref VX_TYPE_INT16. + * \param [in] context The reference to the context. + * \param [in] data_type The type of data stored in the LUT. + * \param [in] count The number of entries desired. + * \note data_type can only be \ref VX_TYPE_UINT8 or \ref VX_TYPE_INT16. If data_type + * is \ref VX_TYPE_UINT8, count should be not greater than 256. If data_type is \ref VX_TYPE_INT16, + * count should not be greater than 65536. + * \returns An LUT reference \ref vx_lut. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus. + * \ingroup group_lut + */ +VX_API_ENTRY vx_lut VX_API_CALL vxCreateLUT(vx_context context, vx_enum data_type, vx_size count); + +/*! \brief Creates an opaque reference to a LUT object with no direct user access. + * \param [in] graph The reference to the parent graph. + * \param [in] data_type The type of data stored in the LUT. + * \param [in] count The number of entries desired. + * \see \ref vxCreateLUT + * \note data_type can only be \ref VX_TYPE_UINT8 or \ref VX_TYPE_INT16. If data_type + * is \ref VX_TYPE_UINT8, count should be not greater than 256. If data_type is \ref VX_TYPE_INT16, + * count should not be greater than 65536. + * \returns An LUT reference \ref vx_lut. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus. + * \ingroup group_lut + */ +VX_API_ENTRY vx_lut VX_API_CALL vxCreateVirtualLUT(vx_graph graph, vx_enum data_type, vx_size count); + +/*! \brief Releases a reference to a LUT object. + * The object may not be garbage collected until its total reference count is zero. + * \param [in] lut The pointer to the LUT to release. + * \post After returning from this function the reference is zeroed. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE lut is not a valid \ref vx_lut reference. + * \ingroup group_lut + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseLUT(vx_lut *lut); + +/*! \brief Queries attributes from a LUT. + * \param [in] lut The LUT to query. + * \param [in] attribute The attribute to query. Use a \ref vx_lut_attribute_e enumeration. + * \param [out] ptr The location at which to store the resulting value. + * \param [in] size The size in bytes of the container to which \a ptr points. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE lut is not a valid \ref vx_lut reference. + * \ingroup group_lut + */ +VX_API_ENTRY vx_status VX_API_CALL vxQueryLUT(vx_lut lut, vx_enum attribute, void *ptr, vx_size size); + +/*! \brief Allows the application to copy from/into a LUT object. + * \param [in] lut The reference to the LUT object that is the source or the + * destination of the copy. + * \param [in] user_ptr The address of the memory location where to store the requested data + * if the copy was requested in read mode, or from where to get the data to store into the LUT + * object if the copy was requested in write mode. In the user memory, the LUT is + * represented as a array with elements of the type corresponding to + * \ref VX_LUT_TYPE, and with a number of elements equal to the value + * returned via \ref VX_LUT_COUNT. The accessible memory must be large enough + * to contain this array: + * accessible memory in bytes >= sizeof(data_element) * count. + * \param [in] usage This declares the effect of the copy with regard to the LUT object + * using the \ref vx_accessor_e enumeration. Only \ref VX_READ_ONLY and \ref VX_WRITE_ONLY + * are supported: + * \arg \ref VX_READ_ONLY means that data are copied from the LUT object into the user memory. + * \arg \ref VX_WRITE_ONLY means that data are copied into the LUT object from the user memory. + * \param [in] user_mem_type A \ref vx_memory_type_e enumeration that specifies + * the memory type of the memory referenced by the user_addr. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE lut is not a valid \ref vx_lut reference. + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * \ingroup group_lut + */ +VX_API_ENTRY vx_status VX_API_CALL vxCopyLUT(vx_lut lut, void *user_ptr, vx_enum usage, vx_enum user_mem_type); + +/*! \brief Allows the application to get direct access to LUT object. + * \param [in] lut The reference to the LUT object to map. + * \param [out] map_id The address of a \ref vx_map_id variable where the function + * returns a map identifier. + * \arg (*map_id) must eventually be provided as the map_id parameter of a call to + * \ref vxUnmapLUT. + * \param [out] ptr The address of a pointer that the function sets to the + * address where the requested data can be accessed. In the mapped memory area, + * the LUT data are structured as an array with elements of the type corresponding + * to \ref VX_LUT_TYPE, with a number of elements equal to + * the value returned via \ref VX_LUT_COUNT. Accessing the + * memory out of the bound of this array is forbidden and has an undefined behavior. + * The returned (*ptr) address is only valid between the call to the function and + * the corresponding call to \ref vxUnmapLUT. + * \param [in] usage This declares the access mode for the LUT, using + * the \ref vx_accessor_e enumeration. + * \arg \ref VX_READ_ONLY: after the function call, the content of the memory location + * pointed by (*ptr) contains the LUT data. Writing into this memory location + * is forbidden and its behavior is undefined. + * \arg \ref VX_READ_AND_WRITE: after the function call, the content of the memory + * location pointed by (*ptr) contains the LUT data; writing into this memory + * is allowed only for the location of entries and will result in a modification + * of the affected entries in the LUT object once the LUT is unmapped. + * \arg \ref VX_WRITE_ONLY: after the function call, the memory location pointed by(*ptr) + * contains undefined data; writing each entry of LUT is required prior to + * unmapping. Entries not written by the application before unmap will become + * undefined after unmap, even if they were well defined before map. + * \param [in] mem_type A \ref vx_memory_type_e enumeration that + * specifies the type of the memory where the LUT is requested to be mapped. + * \param [in] flags An integer that allows passing options to the map operation. + * Use 0 for this option. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE lut is not a valid \ref vx_lut reference. + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * \ingroup group_lut + * \post \ref vxUnmapLUT with same (*map_id) value. + */ +VX_API_ENTRY vx_status VX_API_CALL vxMapLUT(vx_lut lut, vx_map_id *map_id, void **ptr, vx_enum usage, vx_enum mem_type, vx_bitfield flags); + +/*! \brief Unmap and commit potential changes to LUT object that was previously mapped. + * Unmapping a LUT invalidates the memory location from which the LUT data could + * be accessed by the application. Accessing this memory location after the unmap function + * completes has an undefined behavior. + * \param [in] lut The reference to the LUT object to unmap. + * \param [out] map_id The unique map identifier that was returned when calling + * \ref vxMapLUT . + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE lut is not a valid \ref vx_lut reference. + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * \ingroup group_lut + * \pre \ref vxMapLUT returning the same map_id value + */ +VX_API_ENTRY vx_status VX_API_CALL vxUnmapLUT(vx_lut lut, vx_map_id map_id); + +/*============================================================================== + DISTRIBUTION + =============================================================================*/ + +/*! \brief Creates a reference to a 1D Distribution of a consecutive interval [offset, offset + range - 1] + * defined by a start offset and valid range, divided equally into numBins parts. + * \param [in] context The reference to the overall context. + * \param [in] numBins The number of bins in the distribution. + * \param [in] offset The start offset into the range value that marks the begining of the 1D Distribution. + * \param [in] range The total number of the consecutive values of the distribution interval. + * \returns A distribution reference \ref vx_distribution. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_distribution + */ +VX_API_ENTRY vx_distribution VX_API_CALL vxCreateDistribution(vx_context context, vx_size numBins, vx_int32 offset, vx_uint32 range); + +/*! \brief Creates an opaque reference to a 1D Distribution object without direct user access. + * \param [in] graph The reference to the parent graph. + * \param [in] numBins The number of bins in the distribution. + * \param [in] offset The start offset into the range value that marks the begining of the 1D Distribution. + * \param [in] range The total number of the consecutive values of the distribution interval. + * \see \ref vxCreateDistribution + * \returns A distribution reference \ref vx_distribution. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_distribution + */ +VX_API_ENTRY vx_distribution VX_API_CALL vxCreateVirtualDistribution(vx_graph graph, vx_size numBins, vx_int32 offset, vx_uint32 range); + +/*! \brief Releases a reference to a distribution object. + * The object may not be garbage collected until its total reference count is zero. + * \param [in] distribution The reference to the distribution to release. + * \post After returning from this function the reference is zeroed. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE distribution is not a valid \ref vx_distribution reference. + * \ingroup group_distribution + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseDistribution(vx_distribution *distribution); + +/*! \brief Queries a Distribution object. + * \param [in] distribution The reference to the distribution to query. + * \param [in] attribute The attribute to query. Use a \ref vx_distribution_attribute_e enumeration. + * \param [out] ptr The location at which to store the resulting value. + * \param [in] size The size in bytes of the container to which \a ptr points. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE distribution is not a valid \ref vx_distribution reference. + * \ingroup group_distribution + */ +VX_API_ENTRY vx_status VX_API_CALL vxQueryDistribution(vx_distribution distribution, vx_enum attribute, void *ptr, vx_size size); + +/*! \brief Allows the application to copy from/into a distribution object. + * \param [in] distribution The reference to the distribution object that is the source or the + * destination of the copy. + * \param [in] user_ptr The address of the memory location where to store the requested data + * if the copy was requested in read mode, or from where to get the data to store into the distribution + * object if the copy was requested in write mode. In the user memory, the distribution is + * represented as a \ref vx_uint32 array with a number of elements equal to the value returned via + * \ref VX_DISTRIBUTION_BINS. The accessible memory must be large enough + * to contain this vx_uint32 array: + * accessible memory in bytes >= sizeof(vx_uint32) * num_bins. + * \param [in] usage This declares the effect of the copy with regard to the distribution object + * using the \ref vx_accessor_e enumeration. Only \ref VX_READ_ONLY and \ref VX_WRITE_ONLY + * are supported: + * \arg \ref VX_READ_ONLY means that data are copied from the distribution object into the user memory. + * \arg \ref VX_WRITE_ONLY means that data are copied into the distribution object from the user memory. + * \param [in] user_mem_type A \ref vx_memory_type_e enumeration that specifies + * the memory type of the memory referenced by the user_addr. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE distribution is not a valid \ref vx_distribution reference. + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * \ingroup group_distribution + */ +VX_API_ENTRY vx_status VX_API_CALL vxCopyDistribution(vx_distribution distribution, void *user_ptr, vx_enum usage, vx_enum user_mem_type); + +/*! \brief Allows the application to get direct access to distribution object. + * \param [in] distribution The reference to the distribution object to map. + * \param [out] map_id The address of a \ref vx_map_id variable where the function + * returns a map identifier. + * \arg (*map_id) must eventually be provided as the map_id parameter of a call to + * \ref vxUnmapDistribution. + * \param [out] ptr The address of a pointer that the function sets to the + * address where the requested data can be accessed. In the mapped memory area, + * data are structured as a vx_uint32 array with a number of elements equal to + * the value returned via \ref VX_DISTRIBUTION_BINS. Each + * element of this array corresponds to a bin of the distribution, with a range-major + * ordering. Accessing the memory out of the bound of this array + * is forbidden and has an undefined behavior. The returned (*ptr) address + * is only valid between the call to the function and the corresponding call to + * \ref vxUnmapDistribution. + * \param [in] usage This declares the access mode for the distribution, using + * the \ref vx_accessor_e enumeration. + * \arg \ref VX_READ_ONLY: after the function call, the content of the memory location + * pointed by (*ptr) contains the distribution data. Writing into this memory location + * is forbidden and its behavior is undefined. + * \arg \ref VX_READ_AND_WRITE: after the function call, the content of the memory + * location pointed by (*ptr) contains the distribution data; writing into this memory + * is allowed only for the location of bins and will result in a modification of the + * affected bins in the distribution object once the distribution is unmapped. + * \arg \ref VX_WRITE_ONLY: after the function call, the memory location pointed by (*ptr) + * contains undefined data; writing each bin of distribution is required prior to + * unmapping. Bins not written by the application before unmap will become + * undefined after unmap, even if they were well defined before map. + * \param [in] mem_type A \ref vx_memory_type_e enumeration that + * specifies the type of the memory where the distribution is requested to be mapped. + * \param [in] flags An integer that allows passing options to the map operation. + * Use 0 for this option. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE distribution is not a valid \ref vx_distribution reference. + * reference. + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * \ingroup group_distribution + * \post \ref vxUnmapDistribution with same (*map_id) value. + */ +VX_API_ENTRY vx_status VX_API_CALL vxMapDistribution(vx_distribution distribution, vx_map_id *map_id, void **ptr, vx_enum usage, vx_enum mem_type, vx_bitfield flags); + +/*! \brief Unmap and commit potential changes to distribution object that was previously mapped. + * Unmapping a distribution invalidates the memory location from which the distribution data + * could be accessed by the application. Accessing this memory location after the unmap + * function completes has an undefined behavior. + * \param [in] distribution The reference to the distribution object to unmap. + * \param [out] map_id The unique map identifier that was returned when calling + * \ref vxMapDistribution . + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE distribution is not a valid \ref vx_distribution reference. + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * \ingroup group_distribution + * \pre \ref vxMapDistribution returning the same map_id value + */ +VX_API_ENTRY vx_status VX_API_CALL vxUnmapDistribution(vx_distribution distribution, vx_map_id map_id); + + +/*============================================================================== + THRESHOLD + =============================================================================*/ + +/*! \brief Creates a threshold object and returns a reference to it. + * + * The threshold object defines the parameters of a thresholding operation + * to an input image, that generates an output image that can have a different + * format. The thresholding 'false' or 'true' output values are specified per + * pixel channels of the output format and can be modified with + * \ref vxCopyThresholdOutput. The default 'false' output value of + * pixels channels should be 0, and the default 'true' value should be non-zero. + * For standard image formats, default output pixel values are defined as + * following: + * \arg \ref VX_DF_IMAGE_RGB : false={0, 0, 0}, true={255,255,255} + * \arg \ref VX_DF_IMAGE_RGBX : false={0, 0, 0, 0}, true={255,255,255,255} + * \arg \ref VX_DF_IMAGE_NV12 : false={0, 0, 0}, true={255,255,255} + * \arg \ref VX_DF_IMAGE_NV21 : false={0, 0, 0}, true={255,255,255} + * \arg \ref VX_DF_IMAGE_UYVY : false={0, 0, 0}, true={255,255,255} + * \arg \ref VX_DF_IMAGE_YUYV : false={0, 0, 0}, true={255,255,255} + * \arg \ref VX_DF_IMAGE_IYUV : false={0, 0, 0}, true={255,255,255} + * \arg \ref VX_DF_IMAGE_YUV4 : false={0, 0, 0}, true={255,255,255} + * \arg \ref VX_DF_IMAGE_U8 : false=0, true=0xFF + * \arg \ref VX_DF_IMAGE_S16 : false=0, true=-1 + * \arg \ref VX_DF_IMAGE_U16 : false=0, true=0xFFFF + * \arg \ref VX_DF_IMAGE_S32 : false=0, true=-1 + * \arg \ref VX_DF_IMAGE_U32 : false=0, true=0xFFFFFFFF + * \param [in] context The reference to the context in which the object is + * created. + * \param [in] thresh_type The type of thresholding operation. + * \param [in] input_format The format of images that will be used as input of + * the thresholding operation. + * \param [in] output_format The format of images that will be generated by the + * thresholding operation. + * \returns A threshold reference \ref vx_threshold. Any possible + * errors preventing a successful creation should be checked using + * \ref vxGetStatus. + * \ingroup group_threshold + */ +VX_API_ENTRY vx_threshold VX_API_CALL vxCreateThresholdForImage(vx_context context, + vx_enum thresh_type, + vx_df_image input_format, + vx_df_image output_format); + +/*! \brief Creates an opaque reference to a threshold object without direct user access. + * + * \param [in] graph The reference to the parent graph. + * \param [in] thresh_type The type of thresholding operation. + * \param [in] input_format The format of images that will be used as input of + * the thresholding operation. + * \param [in] output_format The format of images that will be generated by the + * thresholding operation. + * \see \ref vxCreateThresholdForImage + * \returns A threshold reference \ref vx_threshold. Any possible + * errors preventing a successful creation should be checked using + * \ref vxGetStatus. + * \ingroup group_threshold + */ +VX_API_ENTRY vx_threshold VX_API_CALL vxCreateVirtualThresholdForImage(vx_graph graph, + vx_enum thresh_type, + vx_df_image input_format, + vx_df_image output_format); + +/*! \brief Allows the application to copy the thresholding value from/into a + * threshold object with type \ref VX_THRESHOLD_TYPE_BINARY. + * \param [in] thresh The reference to the threshold object that is the source + * or the destination of the copy. + * \param [in,out] value_ptr The address of the memory location where to store + * the thresholding value if the copy was requested in read mode, or from where + * to get the thresholding value to store into the threshold object if the copy + * was requested in write mode. + * \param [in] usage This declares the effect of the copy with regard to the + * threshold object using the \ref vx_accessor_e enumeration. Only + * \ref VX_READ_ONLY and \ref VX_WRITE_ONLY are supported: + * \arg \ref VX_READ_ONLY means that the thresholding value is copied + * from the threshold object into the user memory. After the copy, only the + * field of the (*value_ptr) union that corresponds to the input image format + * of the threshold object is meaningful. + * \arg \ref VX_WRITE_ONLY means the field of the (*value_ptr) union + * corresponding to the input format of the threshold object is copied into + * the threshold object. + * \param [in] user_mem_type A \ref vx_memory_type_e enumeration that + * specifies the type of the memory referenced by \p value_ptr. + * \return A \ref vx_status_e enumeration. + * \retval VX_ERROR_INVALID_REFERENCE The threshold reference is not actually a + * threshold reference. + * \retval VX_ERROR_NOT_COMPATIBLE The threshold object doesn't have type + * \ref VX_THRESHOLD_TYPE_BINARY + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * \ingroup group_threshold + */ +VX_API_ENTRY vx_status VX_API_CALL vxCopyThresholdValue(vx_threshold thresh, + vx_pixel_value_t * value_ptr, + vx_enum usage, + vx_enum user_mem_type + ); + +/*! \brief Allows the application to copy thresholding values from/into a + * threshold object with type \ref VX_THRESHOLD_TYPE_RANGE. + * \param [in] thresh The reference to the threshold object that is the source + * or the destination of the copy. + * \param [in,out] lower_value_ptr The address of the memory location where to + * store the lower thresholding value if the copy was requested in read mode, + * or from where to get the lower thresholding value to store into the threshold + * object if the copy was requested in write mode. + * \param [in,out] upper_value_ptr The address of the memory location where to + * store the upper thresholding value if the copy was requested in read mode, or + * from where to get the upper thresholding value to store into the threshold + * object if the copy was requested in write mode. + * \param [in] usage This declares the effect of the copy with regard to the + * threshold object using the \ref vx_accessor_e enumeration. Only + * \ref VX_READ_ONLY and \ref VX_WRITE_ONLY are supported: + * \arg \ref VX_READ_ONLY means that thresholding values are copied + * from the threshold object into the user memory. After the copy, only the + * field of (*lower_value_ptr) and (*upper_value_ptr) unions that corresponds + * to the input image format of the threshold object is meaningful. + * \arg \ref VX_WRITE_ONLY means the field of the (*lower_value_ptr) + * and (*upper_value_ptr) unions corresponding to the input format of the + * threshold object is copied into the threshold object. + * \param [in] user_mem_type A \ref vx_memory_type_e enumeration that + * specifies the type of the memory referenced by \p lower_value_ptr and + * \p upper_value_ptr. + * \return A \ref vx_status_e enumeration. + * \retval VX_ERROR_INVALID_REFERENCE The threshold reference is not actually + * a threshold reference. + * \retval VX_ERROR_NOT_COMPATIBLE The threshold object doesn't have type + * \ref VX_THRESHOLD_TYPE_RANGE + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * \ingroup group_threshold + */ +VX_API_ENTRY vx_status VX_API_CALL vxCopyThresholdRange(vx_threshold thresh, + vx_pixel_value_t * lower_value_ptr, + vx_pixel_value_t * upper_value_ptr, + vx_enum usage, + vx_enum user_mem_type); + +/*! \brief Allows the application to copy the true and false output values + * from/into a threshold object. + * \param [in] thresh The reference to the threshold object that is the source + * or the destination of the copy. + * \param [in,out] true_value_ptr The address of the memory location where to + * store the true output value if the copy was requested in read mode, + * or from where to get the true output value to store into the threshold + * object if the copy was requested in write mode. + * \param [in,out] false_value_ptr The address of the memory location where to + * store the false output value if the copy was requested in read mode, or + * from where to get the false output value to store into the threshold + * object if the copy was requested in write mode. + * \param [in] usage This declares the effect of the copy with regard to the + * threshold object using the \ref vx_accessor_e enumeration. Only + * \ref VX_READ_ONLY and \ref VX_WRITE_ONLY are supported: + * \arg \ref VX_READ_ONLY means that true and false output values + * are copied from the threshold object into the user memory. After the copy, + * only the field of (*true_value_ptr) and (*false_value_ptr) unions that + * corresponds to the output image format of the threshold object is meaningful. + * \arg \ref VX_WRITE_ONLY means the field of the (*true_value_ptr) + * and (*false_value_ptr) unions corresponding to the output format of the + * threshold object is copied into the threshold object. + * \param [in] user_mem_type A \ref vx_memory_type_e enumeration that + * specifies the type of the memory referenced by \p true_value_ptr and + * \p false_value_ptr. + * \return A \ref vx_status_e enumeration. + * \retval VX_ERROR_INVALID_REFERENCE The threshold reference is not actually + * a threshold reference. + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * \ingroup group_threshold + */ +VX_API_ENTRY vx_status VX_API_CALL vxCopyThresholdOutput(vx_threshold thresh, + vx_pixel_value_t * true_value_ptr, + vx_pixel_value_t * false_value_ptr, + vx_enum usage, + vx_enum user_mem_type); + +/*! \brief Releases a reference to a threshold object. + * The object may not be garbage collected until its total reference count is zero. + * \param [in] thresh The pointer to the threshold to release. + * \post After returning from this function the reference is zeroed. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE thresh is not a valid \ref vx_threshold reference. + * \ingroup group_threshold + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseThreshold(vx_threshold *thresh); + +/*! \brief Sets attributes on the threshold object. + * \param [in] thresh The threshold object to set. + * \param [in] attribute The attribute to modify. Use a \ref vx_threshold_attribute_e enumeration. + * \param [in] ptr The pointer to the value to which to set the attribute. + * \param [in] size The size of the data pointed to by \a ptr. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE thresh is not a valid \ref vx_threshold reference. + * \ingroup group_threshold + */ +VX_API_ENTRY vx_status VX_API_CALL vxSetThresholdAttribute(vx_threshold thresh, vx_enum attribute, const void *ptr, vx_size size); + +/*! \brief Queries an attribute on the threshold object. + * \param [in] thresh The threshold object to set. + * \param [in] attribute The attribute to query. Use a \ref vx_threshold_attribute_e enumeration. + * \param [out] ptr The location at which to store the resulting value. + * \param [in] size The size of the container to which \a ptr points. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE thresh is not a valid \ref vx_threshold reference. + * \ingroup group_threshold + */ +VX_API_ENTRY vx_status VX_API_CALL vxQueryThreshold(vx_threshold thresh, vx_enum attribute, void *ptr, vx_size size); + +/*============================================================================== + MATRIX + =============================================================================*/ + +/*! \brief Creates a reference to a matrix object. + * \param [in] c The reference to the overall context. + * \param [in] data_type The unit format of the matrix. \ref VX_TYPE_UINT8 or \ref VX_TYPE_INT32 or \ref VX_TYPE_FLOAT32. + * \param [in] columns The first dimensionality. + * \param [in] rows The second dimensionality. + * \returns An matrix reference \ref vx_matrix. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_matrix + */ +VX_API_ENTRY vx_matrix VX_API_CALL vxCreateMatrix(vx_context c, vx_enum data_type, vx_size columns, vx_size rows); + +/*! \brief Creates an opaque reference to a matrix object without direct user access. + * \param [in] graph The reference to the parent graph. + * \param [in] data_type The unit format of the matrix. \ref VX_TYPE_UINT8 or \ref VX_TYPE_INT32 or \ref VX_TYPE_FLOAT32. + * \param [in] columns The first dimensionality. + * \param [in] rows The second dimensionality. + * \see \ref vxCreateMatrix + * \returns An matrix reference \ref vx_matrix. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_matrix + */ +VX_API_ENTRY vx_matrix VX_API_CALL vxCreateVirtualMatrix(vx_graph graph, vx_enum data_type, vx_size columns, vx_size rows); + +/*! \brief Releases a reference to a matrix object. + * The object may not be garbage collected until its total reference count is zero. + * \param [in] mat The matrix reference to release. + * \post After returning from this function the reference is zeroed. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE mat is not a valid \ref vx_matrix reference. + * \ingroup group_matrix + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseMatrix(vx_matrix *mat); + +/*! \brief Queries an attribute on the matrix object. + * \param [in] mat The matrix object to set. + * \param [in] attribute The attribute to query. Use a \ref vx_matrix_attribute_e enumeration. + * \param [out] ptr The location at which to store the resulting value. + * \param [in] size The size in bytes of the container to which \a ptr points. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE mat is not a valid \ref vx_matrix reference. + * \ingroup group_matrix + */ +VX_API_ENTRY vx_status VX_API_CALL vxQueryMatrix(vx_matrix mat, vx_enum attribute, void *ptr, vx_size size); + +/*! \brief Allows the application to copy from/into a matrix object. + * \param [in] matrix The reference to the matrix object that is the source or the + * destination of the copy. + * \param [in] user_ptr The address of the memory location where to store the requested data + * if the copy was requested in read mode, or from where to get the data to store into the matrix + * object if the copy was requested in write mode. In the user memory, the matrix is + * structured as a row-major 2D array with elements of the type corresponding to + * \ref VX_MATRIX_TYPE, with a number of rows corresponding to + * \ref VX_MATRIX_ROWS and a number of columns corresponding to + * \ref VX_MATRIX_COLUMNS. The accessible memory must be large + * enough to contain this 2D array: + * accessible memory in bytes >= sizeof(data_element) * rows * columns. + * \param [in] usage This declares the effect of the copy with regard to the matrix object + * using the \ref vx_accessor_e enumeration. Only \ref VX_READ_ONLY and \ref VX_WRITE_ONLY + * are supported: + * \arg \ref VX_READ_ONLY means that data are copied from the matrix object into the user memory. + * \arg \ref VX_WRITE_ONLY means that data are copied into the matrix object from the user memory. + * \param [in] user_mem_type A \ref vx_memory_type_e enumeration that specifies + * the memory type of the memory referenced by the user_addr. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE matrix is not a valid \ref vx_matrix reference. + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * \ingroup group_matrix + */ +VX_API_ENTRY vx_status VX_API_CALL vxCopyMatrix(vx_matrix matrix, void *user_ptr, vx_enum usage, vx_enum user_mem_type); + +/*! \brief Creates a reference to a matrix object from a boolean pattern. + * \see \ref vxCreateMatrixFromPatternAndOrigin for a description of the matrix patterns. + * \param [in] context The reference to the overall context. + * \param [in] pattern The pattern of the matrix. See \ref VX_MATRIX_PATTERN. + * \param [in] columns The first dimensionality. + * \param [in] rows The second dimensionality. + * \returns A matrix reference \ref vx_matrix of type \ref VX_TYPE_UINT8. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_matrix + */ +VX_API_ENTRY vx_matrix VX_API_CALL vxCreateMatrixFromPattern(vx_context context, vx_enum pattern, vx_size columns, vx_size rows); + +/*! \brief Creates a reference to a matrix object from a boolean pattern, with a user-specified origin. + * + * The matrix created by this function is of type \ref VX_TYPE_UINT8, with the value 0 representing False, + * and the value 255 representing True. It supports the patterns as described below: + * - VX_PATTERN_BOX is a matrix with dimensions equal to the given number of rows and columns, and all cells equal to 255. + * Dimensions of 3x3 and 5x5 must be supported. + * - VX_PATTERN_CROSS is a matrix with dimensions equal to the given number of rows and columns, which both must be odd numbers. + * All cells in the center row and center column are equal to 255, and the rest are equal to zero. + * Dimensions of 3x3 and 5x5 must be supported. + * - VX_PATTERN_DISK is a matrix with dimensions equal to the given number of rows (R) and columns (C), + * where R and C are odd and cell (c, r) is 255 if: \n + * (r-R/2 + 0.5)^2 / (R/2)^2 + (c-C/2 + 0.5)^2/(C/2)^2 is less than or equal to 1,\n and 0 otherwise. + * + * A matrix created from pattern is read-only. The behavior when attempting to modify such a matrix is undefined. + * + * \param [in] context The reference to the overall context. + * \param [in] pattern The pattern of the matrix. See \ref VX_MATRIX_PATTERN. + * \param [in] columns The first dimensionality. + * \param [in] rows The second dimensionality. + * \param [in] origin_col The origin (first dimensionality). + * \param [in] origin_row The origin (second dimensionality). + * \returns A matrix reference \ref vx_matrix of type \ref VX_TYPE_UINT8. Any possible errors + * preventing a successful creation should be checked using \ref vxGetStatus. + * \ingroup group_matrix + */ +VX_API_ENTRY vx_matrix VX_API_CALL vxCreateMatrixFromPatternAndOrigin(vx_context context, vx_enum pattern, vx_size columns, vx_size rows, vx_size origin_col, vx_size origin_row); + + +/*============================================================================== + CONVOLUTION + =============================================================================*/ + +/*! \brief Creates a reference to a convolution matrix object. + * \param [in] context The reference to the overall context. + * \param [in] columns The columns dimension of the convolution. + * Must be odd and greater than or equal to 3 and less than the value returned + * from \ref VX_CONTEXT_CONVOLUTION_MAX_DIMENSION. + * \param [in] rows The rows dimension of the convolution. + * Must be odd and greater than or equal to 3 and less than the value returned + * from \ref VX_CONTEXT_CONVOLUTION_MAX_DIMENSION. + * \returns A convolution reference \ref vx_convolution. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_convolution + */ +VX_API_ENTRY vx_convolution VX_API_CALL vxCreateConvolution(vx_context context, vx_size columns, vx_size rows); + +/*! \brief Creates an opaque reference to a convolution matrix object without direct user access. + * \param [in] graph The reference to the parent graph. + * \param [in] columns The columns dimension of the convolution. + * Must be odd and greater than or equal to 3 and less than the value returned + * from \ref VX_CONTEXT_CONVOLUTION_MAX_DIMENSION. + * \param [in] rows The rows dimension of the convolution. + * Must be odd and greater than or equal to 3 and less than the value returned + * from \ref VX_CONTEXT_CONVOLUTION_MAX_DIMENSION. + * \see \ref vxCreateConvolution + * \returns A convolution reference \ref vx_convolution. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_convolution + */ +VX_API_ENTRY vx_convolution VX_API_CALL vxCreateVirtualConvolution(vx_graph graph, vx_size columns, vx_size rows); + +/*! \brief Releases the reference to a convolution matrix. + * The object may not be garbage collected until its total reference count is zero. + * \param [in] conv The pointer to the convolution matrix to release. + * \post After returning from this function the reference is zeroed. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE conv is not a valid \ref vx_convolution reference. + * \ingroup group_convolution + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseConvolution(vx_convolution *conv); + +/*! \brief Queries an attribute on the convolution matrix object. + * \param [in] conv The convolution matrix object to set. + * \param [in] attribute The attribute to query. Use a \ref vx_convolution_attribute_e enumeration. + * \param [out] ptr The location at which to store the resulting value. + * \param [in] size The size in bytes of the container to which \a ptr points. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE conv is not a valid \ref vx_convolution reference. + * \ingroup group_convolution + */ +VX_API_ENTRY vx_status VX_API_CALL vxQueryConvolution(vx_convolution conv, vx_enum attribute, void *ptr, vx_size size); + +/*! \brief Sets attributes on the convolution object. + * \param [in] conv The coordinates object to set. + * \param [in] attribute The attribute to modify. Use a \ref vx_convolution_attribute_e enumeration. + * \param [in] ptr The pointer to the value to which to set the attribute. + * \param [in] size The size in bytes of the data pointed to by \a ptr. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE conv is not a valid \ref vx_convolution reference. + * \ingroup group_convolution + */ +VX_API_ENTRY vx_status VX_API_CALL vxSetConvolutionAttribute(vx_convolution conv, vx_enum attribute, const void *ptr, vx_size size); + +/*! \brief Allows the application to copy coefficients from/into a convolution object. + * \param [in] conv The reference to the convolution object that is the source or the destination of the copy. + * \param [in] user_ptr The address of the memory location where to store the requested + * coefficient data if the copy was requested in read mode, or from where to get the + * coefficient data to store into the convolution object if the copy was requested in + * write mode. In the user memory, the convolution coefficient data is structured as a + * row-major 2D array with elements of the type corresponding + * to \ref VX_TYPE_CONVOLUTION, with a number of rows corresponding to + * \ref VX_CONVOLUTION_ROWS and a number of columns corresponding to + * \ref VX_CONVOLUTION_COLUMNS. The accessible memory must be large + * enough to contain this 2D array: + * accessible memory in bytes >= sizeof(data_element) * rows * columns. + * \param [in] usage This declares the effect of the copy with regard to the convolution object + * using the \ref vx_accessor_e enumeration. Only \ref VX_READ_ONLY and \ref VX_WRITE_ONLY + * are supported: + * \arg \ref VX_READ_ONLY means that data are copied from the convolution object into the user memory. + * \arg \ref VX_WRITE_ONLY means that data are copied into the convolution object from the user memory. + * \param [in] user_mem_type A \ref vx_memory_type_e enumeration that specifies + * the memory type of the memory referenced by the user_addr. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE conv is not a valid \ref vx_convolution reference. + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * \ingroup group_convolution + */ +VX_API_ENTRY vx_status VX_API_CALL vxCopyConvolutionCoefficients(vx_convolution conv, void *user_ptr, vx_enum usage, vx_enum user_mem_type); + + +/*============================================================================== + PYRAMID + =============================================================================*/ + +/*! \brief Creates a reference to a pyramid object of the supplied number of levels. + * \param [in] context The reference to the overall context. + * \param [in] levels The number of levels desired. This is required to be a non-zero value. + * \param [in] scale Used to indicate the scale between pyramid levels. This is required to be a non-zero positive value. + * \ref VX_SCALE_PYRAMID_HALF and \ref VX_SCALE_PYRAMID_ORB must be supported. + * \param [in] width The width of the 0th level image in pixels. + * \param [in] height The height of the 0th level image in pixels. + * \param [in] format The format of all images in the pyramid. NV12, NV21, IYUV, UYVY and YUYV formats are not supported. + * \returns A pyramid reference \ref vx_pyramid containing the sub-images. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_pyramid + */ +VX_API_ENTRY vx_pyramid VX_API_CALL vxCreatePyramid(vx_context context, vx_size levels, vx_float32 scale, vx_uint32 width, vx_uint32 height, vx_df_image format); + +/*! \brief Creates a reference to a virtual pyramid object of the supplied number of levels. + * \details Virtual Pyramids can be used to connect Nodes together when the contents of the pyramids will + * not be accessed by the user of the API. + * All of the following constructions are valid: + * \code + * vx_context context = vxCreateContext(); + * vx_graph graph = vxCreateGraph(context); + * vx_pyramid virt[] = { + * vxCreateVirtualPyramid(graph, 4, VX_SCALE_PYRAMID_HALF, 0, 0, VX_DF_IMAGE_VIRT), // no dimension and format specified for level 0 + * vxCreateVirtualPyramid(graph, 4, VX_SCALE_PYRAMID_HALF, 640, 480, VX_DF_IMAGE_VIRT), // no format specified. + * vxCreateVirtualPyramid(graph, 4, VX_SCALE_PYRAMID_HALF, 640, 480, VX_DF_IMAGE_U8), // no access + * }; + * \endcode + * \param [in] graph The reference to the parent graph. + * \param [in] levels The number of levels desired. This is required to be a non-zero value. + * \param [in] scale Used to indicate the scale between pyramid levels. This is required to be a non-zero positive value. + * \ref VX_SCALE_PYRAMID_HALF and \ref VX_SCALE_PYRAMID_ORB must be supported. + * \param [in] width The width of the 0th level image in pixels. This may be set to zero to indicate to the interface that the value is unspecified. + * \param [in] height The height of the 0th level image in pixels. This may be set to zero to indicate to the interface that the value is unspecified. + * \param [in] format The format of all images in the pyramid. This may be set to \ref VX_DF_IMAGE_VIRT to indicate that the format is unspecified. + * \returns A pyramid reference \ref vx_pyramid. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \note Images extracted with \ref vxGetPyramidLevel behave as Virtual Images and + * cause \ref vxMapImagePatch to return errors. + * \ingroup group_pyramid + */ +VX_API_ENTRY vx_pyramid VX_API_CALL vxCreateVirtualPyramid(vx_graph graph, vx_size levels, vx_float32 scale, vx_uint32 width, vx_uint32 height, vx_df_image format); + + +/*! \brief Releases a reference to a pyramid object. + * The object may not be garbage collected until its total reference count is zero. + * \param [in] pyr The pointer to the pyramid to release. + * \ingroup group_pyramid + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE pyr is not a valid \ref vx_pyramid reference. + * \post After returning from this function the reference is zeroed. + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleasePyramid(vx_pyramid *pyr); + +/*! \brief Queries an attribute from an image pyramid. + * \param [in] pyr The pyramid to query. + * \param [in] attribute The attribute for which to query. Use a \ref vx_pyramid_attribute_e enumeration. + * \param [out] ptr The location at which to store the resulting value. + * \param [in] size The size in bytes of the container to which \a ptr points. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE pyr is not a valid \ref vx_pyramid reference. + * \ingroup group_pyramid + */ +VX_API_ENTRY vx_status VX_API_CALL vxQueryPyramid(vx_pyramid pyr, vx_enum attribute, void *ptr, vx_size size); + +/*! \brief Retrieves a level of the pyramid as a \ref vx_image, which can be used + * elsewhere in OpenVX. A call to vxReleaseImage is necessary to release an image for each + * call of vxGetPyramidLevel. + * \param [in] pyr The pyramid object. + * \param [in] index The index of the level, such that index is less than levels. + * \return A \ref vx_image reference. Any possible errors preventing a successful + * function completion should be checked using \ref vxGetStatus. + * \ingroup group_pyramid + */ +VX_API_ENTRY vx_image VX_API_CALL vxGetPyramidLevel(vx_pyramid pyr, vx_uint32 index); + +/*============================================================================== + REMAP + =============================================================================*/ + +/*! \brief Creates a remap table object. + * \param [in] context The reference to the overall context. + * \param [in] src_width Width of the source image in pixel. + * \param [in] src_height Height of the source image in pixels. + * \param [in] dst_width Width of the destination image in pixels. + * \param [in] dst_height Height of the destination image in pixels. + * \ingroup group_remap + * \returns A remap reference \ref vx_remap. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + */ +VX_API_ENTRY vx_remap VX_API_CALL vxCreateRemap(vx_context context, + vx_uint32 src_width, + vx_uint32 src_height, + vx_uint32 dst_width, + vx_uint32 dst_height); + +/*! \brief Creates an opaque reference to a remap table object without direct user access. + * \param [in] graph The reference to the parent graph. + * \param [in] src_width Width of the source image in pixel. + * \param [in] src_height Height of the source image in pixels. + * \param [in] dst_width Width of the destination image in pixels. + * \param [in] dst_height Height of the destination image in pixels. + * \see \ref vxCreateRemap + * \ingroup group_remap + * \returns A remap reference \ref vx_remap. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + */ +VX_API_ENTRY vx_remap VX_API_CALL vxCreateVirtualRemap(vx_graph graph, + vx_uint32 src_width, + vx_uint32 src_height, + vx_uint32 dst_width, + vx_uint32 dst_height); + +/*! \brief Releases a reference to a remap table object. The object may not be + * garbage collected until its total reference count is zero. + * \param [in] table The pointer to the remap table to release. + * \post After returning from this function the reference is zeroed. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE table is not a valid \ref vx_remap reference. + * \ingroup group_remap + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseRemap(vx_remap *table); + +/*! \brief Allows the application to get direct access to a rectangular patch of a remap object. + * + * The patch is specified within the destination dimensions and its + * data provide the corresponding coordinate within the source dimensions. + * The patch is mapped as a 2D array of elements of the type associated + * with the \p coordinate_type parameter (i.e., \ref vx_coordinates2df_t + * for \ref VX_TYPE_COORDINATES2DF). + * The memory layout of the mapped 2D array follows a row-major order where rows are + * compact (without any gap between elements), and where the potential + * padding after each lines is determined by (* \p stride_y). + * + * \param [in] remap The reference to the remap object that contains the + * patch to map. + * + * \param [in] rect The coordinates of remap patch. The patch must be specified + * within the bounds of the remap destination dimensions + * (\ref VX_REMAP_DESTINATION_WIDTH x \ref VX_REMAP_DESTINATION_HEIGHT). + * (start_x, start_y) gives the coordinate of the topleft element inside the patch, + * while (end_x, end_y) gives the coordinate of the bottomright element out of the patch. + * + * \param [out] map_id The address of a \ref vx_map_id variable + * where the function returns a map identifier. + * \arg (*map_id) must eventually be provided as the map_id parameter of a call + * to \ref vxUnmapRemapPatch. + * + * \param [out] stride_y The address of a vx_size variable where the function + * returns the difference between the address of the first element of two + * successive lines in the mapped remap patch. The stride value follows the + * following rule : + * (*stride_y) >= sizeof() * (rect->end_x - rect->start_x) + * + * \param [out] ptr The address of a pointer where the function returns where + * remap patch data can be accessed. (*ptr) is the address of the the top-left + * element of the remap patch. + * The returned (*ptr) address is only valid between the call to this function + * and the corresponding call to \ref vxUnmapRemapPatch. + * + * \param [in] coordinate_type This declares the type of the source coordinate + * data that the application wants to access in the remap patch. + * It must be \ref VX_TYPE_COORDINATES2DF. + * + * \param [in] usage This declares the access mode for the remap patch, using + * the \ref vx_accessor_e enumeration. + * \arg \ref VX_READ_ONLY: after the function call, the content of the + * memory location pointed by (*ptr) contains the remap patch data. Writing into + * this memory location is forbidden and its behavior is undefined. + * \arg \ref VX_READ_AND_WRITE: after the function call, the content of + * the memory location pointed by (*ptr) contains the remap patch data; writing + * into this memory is allowed for the location of elements only and will + * result in a modification of the written elements in the remap object once the + * patch is unmapped. Writing into a gap between element lines + * (when (*stride_y) > sizeof() * (rect->end_x - rect->start_x)) + * is forbidden and its behavior is undefined. + * \arg \ref VX_WRITE_ONLY: after the function call, the memory location + * pointed by (*ptr) contains undefined data; writing each element of the patch is + * required prior to unmapping. Elements not written by the application before + * unmap will become undefined after unmap, even if they were well defined before + * map. Like for \ref VX_READ_AND_WRITE, writing into a gap between + * element lines is forbidden and its behavior is undefined. + * + * \param [in] mem_type A \ref vx_memory_type_e enumeration that + * specifies the type of the memory where the remap patch is requested to be mapped. + * + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE remap is not a valid \ref vx_remap reference. + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * + * \ingroup group_remap + * \post \ref vxUnmapRemapPatch with same (*map_id) value. + */ +VX_API_ENTRY vx_status VX_API_CALL vxMapRemapPatch(vx_remap remap, + const vx_rectangle_t *rect, + vx_map_id *map_id, + vx_size *stride_y, + void **ptr, + vx_enum coordinate_type, + vx_enum usage, + vx_enum mem_type); + +/*! \brief Unmap and commit potential changes to a remap object patch that was previously mapped. + * + * Unmapping a remap patch invalidates the memory location from which the patch could + * be accessed by the application. Accessing this memory location after the unmap function + * completes has an undefined behavior. + * \param [in] remap The reference to the remap object to unmap. + * \param [out] map_id The unique map identifier that was returned by \ref vxMapRemapPatch . + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE remap is not a valid \ref vx_remap reference. + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * \ingroup group_remap + * \pre \ref vxMapRemapPatch with same map_id value +*/ +VX_API_ENTRY vx_status VX_API_CALL vxUnmapRemapPatch(vx_remap remap, vx_map_id map_id); + +/*! \brief Allows the application to copy a rectangular patch from/into a remap object. + * + * The patch is specified within the destination dimensions and its + * data provide the corresponding coordinate within the source dimensions. + * The patch in user memory is a 2D array of elements of the type associated with the + * \p coordinate_type parameter (i.e., \ref vx_coordinates2df_t for + * \ref VX_TYPE_COORDINATES2DF). + * The memory layout of this array follows a row-major order where rows are + * compact (without any gap between elements), and where the potential padding + * after each line is determined by the \p user_stride_y parameter. + + * \param [in] remap The reference to the remap object that is the source or the + * destination of the patch copy. + * + * \param [in] rect The coordinates of remap patch. The patch must be specified + * within the bounds of the remap destination dimensions + * (\ref VX_REMAP_DESTINATION_WIDTH x \ref VX_REMAP_DESTINATION_HEIGHT). + * (start_x, start_y) gives the coordinate of the topleft element inside the patch, + * while (end_x, end_y) gives the coordinate of the bottomright element out of the patch. + * + * \param [in] user_stride_y The difference between the address of the first element + * of two successive lines of the remap patch in user memory (pointed by + * \p user_ptr). The layout of the user memory must follow a row major order and user_stride_y + * must follow the following rule : + * user_stride_y >= sizeof() * (rect->end_x - rect->start_x). + * + * \param [in] user_ptr The address of the user memory location where to store the requested + * remap data if the copy was requested in read mode, or from where to get the remap data to + * store into the remap object if the copy was requested in write mode. \p user_ptr is the + * address of the the top-left element of the remap patch. + * The accessible user memory must be large enough to contain the specified patch with + * the specified layout: + * accessible memory in bytes >= (rect->end_y - rect->start_y) * user_stride_y. + * + * \param [in] user_coordinate_type This declares the type of the source coordinate remap + * data in the user memory. It must be \ref VX_TYPE_COORDINATES2DF. + * + * \param [in] usage This declares the effect of the copy with regard to the remap object + * using the \ref vx_accessor_e enumeration. Only VX_READ_ONLY and VX_WRITE_ONLY are + * supported: + * \arg \ref VX_READ_ONLY means that data is copied from the remap object into the user + * memory pointer by \p user_ptr. The potential padding after each line in user + * memory will stay unchanged. + * \arg \ref VX_WRITE_ONLY means that data is copied into the remap object from + * the user memory. + * + * \param [in] user_mem_type A \ref vx_memory_type_e enumeration that specifies + * the type of the memory pointer by \p user_ptr. + * + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE remap is not a valid \ref vx_remap reference. + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * + * \ingroup group_remap +*/ +VX_API_ENTRY vx_status VX_API_CALL vxCopyRemapPatch(vx_remap remap, + const vx_rectangle_t *rect, + vx_size user_stride_y, + void * user_ptr, + vx_enum user_coordinate_type, + vx_enum usage, + vx_enum user_mem_type); + +/*! \brief Queries attributes from a Remap table. + * \param [in] table The remap to query. + * \param [in] attribute The attribute to query. Use a \ref vx_remap_attribute_e enumeration. + * \param [out] ptr The location at which to store the resulting value. + * \param [in] size The size in bytes of the container to which \a ptr points. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE table is not a valid \ref vx_remap reference. + * \ingroup group_remap + */ +VX_API_ENTRY vx_status VX_API_CALL vxQueryRemap(vx_remap table, vx_enum attribute, void *ptr, vx_size size); + +/*============================================================================== + ARRAY + =============================================================================*/ + +/*! + * \brief Creates a reference to an Array object. + * + * User must specify the Array capacity (i.e., the maximal number of items that the array can hold). + * + * \param [in] context The reference to the overall Context. + * \param [in] item_type The type of data to hold. Must be greater than + * \ref VX_TYPE_INVALID and less than or equal to \ref VX_TYPE_VENDOR_STRUCT_END. + * Or must be a \ref vx_enum returned from \ref vxRegisterUserStruct. + * \param [in] capacity The maximal number of items that the array can hold. This value must be greater than zero. + * + * \returns An array reference \ref vx_array. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * + * \ingroup group_array + */ +VX_API_ENTRY vx_array VX_API_CALL vxCreateArray(vx_context context, vx_enum item_type, vx_size capacity); + +/*! + * \brief Creates an opaque reference to a virtual Array with no direct user access. + * + * Virtual Arrays are useful when item type or capacity are unknown ahead of time + * and the Array is used as internal graph edge. Virtual arrays are scoped within the parent graph only. + * + * All of the following constructions are allowed. + * \code + * vx_context context = vxCreateContext(); + * vx_graph graph = vxCreateGraph(context); + * vx_array virt[] = { + * vxCreateVirtualArray(graph, 0, 0), // totally unspecified + * vxCreateVirtualArray(graph, VX_TYPE_KEYPOINT, 0), // unspecified capacity + * vxCreateVirtualArray(graph, VX_TYPE_KEYPOINT, 1000), // no access + * }; + * \endcode + * + * \param [in] graph The reference to the parent graph. + * \param [in] item_type The type of data to hold. Must be greater than + * \ref VX_TYPE_INVALID and less than or equal to \ref VX_TYPE_VENDOR_STRUCT_END. + * Or must be a \ref vx_enum returned from \ref vxRegisterUserStruct. + * This may to set to zero to indicate an unspecified item type. + * \param [in] capacity The maximal number of items that the array can hold. + * This may be to set to zero to indicate an unspecified capacity. + * \see vxCreateArray for a type list. + * \returns A array reference \ref vx_array. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * + * \ingroup group_array + */ +VX_API_ENTRY vx_array VX_API_CALL vxCreateVirtualArray(vx_graph graph, vx_enum item_type, vx_size capacity); + +/*! + * \brief Releases a reference of an Array object. + * The object may not be garbage collected until its total reference count is zero. + * After returning from this function the reference is zeroed. + * \param [in] arr The pointer to the Array to release. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE arr is not a valid \ref vx_array reference. + * \ingroup group_array + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseArray(vx_array *arr); + +/*! + * \brief Queries the Array for some specific information. + * + * \param [in] arr The reference to the Array. + * \param [in] attribute The attribute to query. Use a \ref vx_array_attribute_e. + * \param [out] ptr The location at which to store the resulting value. + * \param [in] size The size in bytes of the container to which \a ptr points. + * + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE arr is not a valid \ref vx_array reference. + * \retval VX_ERROR_NOT_SUPPORTED If the \a attribute is not a value supported on this implementation. + * \retval VX_ERROR_INVALID_PARAMETERS If any of the other parameters are incorrect. + * + * \ingroup group_array + */ +VX_API_ENTRY vx_status VX_API_CALL vxQueryArray(vx_array arr, vx_enum attribute, void *ptr, vx_size size); + +/*! + * \brief Adds items to the Array. + * + * This function increases the container size. + * + * By default, the function does not reallocate memory, + * so if the container is already full (number of elements is equal to capacity) + * or it doesn't have enough space, + * the function returns \ref VX_FAILURE error code. + * + * \param [in] arr The reference to the Array. + * \param [in] count The total number of elements to insert. + * \param [in] ptr The location from which to read the input values. + * \param [in] stride The number of bytes between the beginning of two consecutive elements. + * + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE arr is not a valid \ref vx_array reference. + * \retval VX_FAILURE If the Array is full. + * \retval VX_ERROR_INVALID_PARAMETERS If any of the other parameters are incorrect. + * + * \ingroup group_array + */ +VX_API_ENTRY vx_status VX_API_CALL vxAddArrayItems(vx_array arr, vx_size count, const void *ptr, vx_size stride); + +/*! + * \brief Truncates an Array (remove items from the end). + * + * \param [in,out] arr The reference to the Array. + * \param [in] new_num_items The new number of items for the Array. + * + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE arr is not a valid \ref vx_array reference. + * \retval VX_ERROR_INVALID_PARAMETERS The \a new_size is greater than the current size. + * + * \ingroup group_array + */ +VX_API_ENTRY vx_status VX_API_CALL vxTruncateArray(vx_array arr, vx_size new_num_items); + +/*! \brief Allows the application to copy a range from/into an array object. + * \param [in] array The reference to the array object that is the source or the + * destination of the copy. + * \param [in] range_start The index of the first item of the array object to copy. + * \param [in] range_end The index of the item following the last item of the + * array object to copy. (range_end range_start) items are copied from index + * range_start included. The range must be within the bounds of the array: + * 0 <= range_start < range_end <= number of items in the array. + * \param [in] user_stride The number of bytes between the beginning of two consecutive + * items in the user memory pointed by user_ptr. The layout of the user memory must + * follow an item major order: + * user_stride >= element size in bytes. + * \param [in] user_ptr The address of the memory location where to store the requested data + * if the copy was requested in read mode, or from where to get the data to store into the array + * object if the copy was requested in write mode. The accessible memory must be large enough + * to contain the specified range with the specified stride: + * accessible memory in bytes >= (range_end range_start) * user_stride. + * \param [in] usage This declares the effect of the copy with regard to the array object + * using the \ref vx_accessor_e enumeration. Only \ref VX_READ_ONLY and \ref VX_WRITE_ONLY + * are supported: + * \arg \ref VX_READ_ONLY means that data are copied from the array object into the user memory. + * \arg \ref VX_WRITE_ONLY means that data are copied into the array object from the user memory. + * \param [in] user_mem_type A \ref vx_memory_type_e enumeration that specifies + * the memory type of the memory referenced by the user_addr. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_OPTIMIZED_AWAY This is a reference to a virtual array that cannot be + * accessed by the application. + * \retval VX_ERROR_INVALID_REFERENCE array is not a valid \ref vx_array reference. + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * \ingroup group_array + */ +VX_API_ENTRY vx_status VX_API_CALL vxCopyArrayRange(vx_array array, vx_size range_start, vx_size range_end, vx_size user_stride, void *user_ptr, vx_enum usage, vx_enum user_mem_type); + +/*! \brief Allows the application to get direct access to a range of an array object. + * \param [in] array The reference to the array object that contains the range to map. + * \param [in] range_start The index of the first item of the array object to map. + * \param [in] range_end The index of the item following the last item of the + * array object to map. (range_end range_start) items are mapped, starting from index + * range_start included. The range must be within the bounds of the array: + * Must be 0 <= range_start < range_end <= number of items. + * \param [out] map_id The address of a \ref vx_map_id variable where the function + * returns a map identifier. + * \arg (*map_id) must eventually be provided as the map_id parameter of a call to + * \ref vxUnmapArrayRange. + * \param [out] stride The address of a vx_size variable where the function + * returns the memory layout of the mapped array range. The function sets (*stride) + * to the number of bytes between the beginning of two consecutive items. + * The application must consult (*stride) to access the array items starting from + * address (*ptr). The layout of the mapped array follows an item major order: + * (*stride) >= item size in bytes. + * \param [out] ptr The address of a pointer that the function sets to the + * address where the requested data can be accessed. The returned (*ptr) address + * is only valid between the call to the function and the corresponding call to + * \ref vxUnmapArrayRange. + * \param [in] usage This declares the access mode for the array range, using + * the \ref vx_accessor_e enumeration. + * \arg \ref VX_READ_ONLY: after the function call, the content of the memory location + * pointed by (*ptr) contains the array range data. Writing into this memory location + * is forbidden and its behavior is undefined. + * \arg \ref VX_READ_AND_WRITE: after the function call, the content of the memory + * location pointed by (*ptr) contains the array range data; writing into this memory + * is allowed only for the location of items and will result in a modification of the + * affected items in the array object once the range is unmapped. Writing into + * a gap between items (when (*stride) > item size in bytes) is forbidden and its + * behavior is undefined. + * \arg \ref VX_WRITE_ONLY: after the function call, the memory location pointed by (*ptr) + * contains undefined data; writing each item of the range is required prior to + * unmapping. Items not written by the application before unmap will become + * undefined after unmap, even if they were well defined before map. Like for + * VX_READ_AND_WRITE, writing into a gap between items is forbidden and its behavior + * is undefined. + * \param [in] mem_type A \ref vx_memory_type_e enumeration that + * specifies the type of the memory where the array range is requested to be mapped. + * \param [in] flags An integer that allows passing options to the map operation. + * Use the \ref vx_map_flag_e enumeration. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_OPTIMIZED_AWAY This is a reference to a virtual array that cannot be + * accessed by the application. + * \retval VX_ERROR_INVALID_REFERENCE array is not a valid \ref vx_array reference. + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * \ingroup group_array + * \post \ref vxUnmapArrayRange with same (*map_id) value. + */ +VX_API_ENTRY vx_status VX_API_CALL vxMapArrayRange(vx_array array, vx_size range_start, vx_size range_end, vx_map_id *map_id, vx_size *stride, void **ptr, vx_enum usage, vx_enum mem_type, vx_uint32 flags); + +/*! \brief Unmap and commit potential changes to an array object range that was previously mapped. + * Unmapping an array range invalidates the memory location from which the range could + * be accessed by the application. Accessing this memory location after the unmap function + * completes has an undefined behavior. + * \param [in] array The reference to the array object to unmap. + * \param [out] map_id The unique map identifier that was returned when calling + * \ref vxMapArrayRange . + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE array is not a valid \ref vx_array reference. + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * \ingroup group_array + * \pre \ref vxMapArrayRange returning the same map_id value + */ +VX_API_ENTRY vx_status VX_API_CALL vxUnmapArrayRange(vx_array array, vx_map_id map_id); + +/*! + * \brief Accesses a specific indexed element in an array. + * \param [in] ptr The base pointer for the array range. + * \param [in] index The index of the element, not byte, to access. + * \param [in] stride The 'number of bytes' between the beginning of two consecutive elements. + * \ingroup group_array + */ +#define vxFormatArrayPointer(ptr, index, stride) \ + (&(((vx_uint8*)(ptr))[(index) * (stride)])) + +/*! + * \brief Allows access to an array item as a typecast pointer deference. + * \param [in] type The type of the item to access. + * \param [in] ptr The base pointer for the array range. + * \param [in] index The index of the element, not byte, to access. + * \param [in] stride The 'number of bytes' between the beginning of two consecutive elements. + * \ingroup group_array + */ +#define vxArrayItem(type, ptr, index, stride) \ + (*(type *)(vxFormatArrayPointer((ptr), (index), (stride)))) + + +/*============================================================================== + OBJECT ARRAY + =============================================================================*/ +/*! + * \brief Creates a reference to an ObjectArray of count objects. + * + * It uses the metadata of the exemplar to determine the object attributes, + * ignoring the object data. It does not alter the exemplar or keep or release + * the reference to the exemplar. For the definition of supported attributes see + * \ref vxSetMetaFormatAttribute. In case the exemplar is a virtual object + * it must be of immutable metadata, thus it is not allowed to be dimensionless or formatless. + * + * \param [in] context The reference to the overall Context. + * \param [in] exemplar The exemplar object that defines the metadata of the created objects in the ObjectArray. + * \param [in] count Number of Objects to create in the ObjectArray. This value must be greater than zero. + * + * \returns An ObjectArray reference \ref vx_object_array. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. Data objects are not initialized by this function. + * + * \ingroup group_object_array + */ +VX_API_ENTRY vx_object_array VX_API_CALL vxCreateObjectArray(vx_context context, vx_reference exemplar, vx_size count); + +/*! + * \brief Creates an opaque reference to a virtual ObjectArray with no direct user access. + * + * This function creates an ObjectArray of count objects with similar behavior as + * \ref vxCreateObjectArray. The only difference is that the objects that are + * created are virtual in the given graph. + * + * \param [in] graph Reference to the graph where to create the virtual ObjectArray. + * \param [in] exemplar The exemplar object that defines the type of object in the ObjectArray. + * Only exemplar type of \ref vx_image, \ref vx_array and + * \ref vx_pyramid are allowed. + * \param [in] count Number of Objects to create in the ObjectArray. + * \returns A ObjectArray reference \ref vx_object_array. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_object_array + */ +VX_API_ENTRY vx_object_array VX_API_CALL vxCreateVirtualObjectArray(vx_graph graph, vx_reference exemplar, vx_size count); + +/*! + * \brief Retrieves the reference to the OpenVX Object in location index of the ObjectArray. + * + * This is a vx_reference, which can be used elsewhere in OpenVX. A call to vxRelease or \ref vxReleaseReference + * is necessary to release the Object for each call to this function. + * + * \param [in] arr The ObjectArray. + * \param [in] index The index of the object in the ObjectArray. + * \return A reference to an OpenVX data object. Any possible errors preventing a successful + * completion of the function should be checked using \ref vxGetStatus. + * \ingroup group_object_array + */ +VX_API_ENTRY vx_reference VX_API_CALL vxGetObjectArrayItem(vx_object_array arr, vx_uint32 index); + +/*! + * \brief Releases a reference of an ObjectArray object. + * + * The object may not be garbage collected until its total reference and its contained objects + * count is zero. After returning from this function the reference is zeroed/cleared. + * + * \param [in] arr The pointer to the ObjectArray to release. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE arr is not a valid \ref vx_object_array reference. + * \ingroup group_object_array + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseObjectArray(vx_object_array *arr); + +/*! + * \brief Queries an atribute from the ObjectArray. + * + * \param [in] arr The reference to the ObjectArray. + * \param [in] attribute The attribute to query. Use a \ref vx_object_array_attribute_e. + * \param [out] ptr The location at which to store the resulting value. + * \param [in] size The size in bytes of the container to which \a ptr points. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE arr is not a valid \ref vx_object_array reference. + * \retval VX_ERROR_NOT_SUPPORTED If the \a attribute is not a value supported on this implementation. + * \retval VX_ERROR_INVALID_PARAMETERS If any of the other parameters are incorrect. + * + * \ingroup group_object_array + */ +VX_API_ENTRY vx_status VX_API_CALL vxQueryObjectArray(vx_object_array arr, vx_enum attribute, void *ptr, vx_size size); + + +/*============================================================================== + META FORMAT + =============================================================================*/ + +/*! \brief This function allows a user to set the attributes of a \ref vx_meta_format object in a kernel output validator. + * + * The \ref vx_meta_format object contains two types of information: data object meta data and + * some specific information that defines how the valid region of an image changes + * + * The meta data attributes that can be set are identified by this list: + * - \ref vx_image : \ref VX_IMAGE_FORMAT, \ref VX_IMAGE_HEIGHT, \ref VX_IMAGE_WIDTH + * - \ref vx_array : \ref VX_ARRAY_CAPACITY, \ref VX_ARRAY_ITEMTYPE + * - \ref vx_pyramid : \ref VX_PYRAMID_FORMAT, \ref VX_PYRAMID_HEIGHT, \ref VX_PYRAMID_WIDTH, \ref VX_PYRAMID_LEVELS, \ref VX_PYRAMID_SCALE + * - \ref vx_scalar : \ref VX_SCALAR_TYPE + * - \ref vx_matrix : \ref VX_MATRIX_TYPE, \ref VX_MATRIX_ROWS, \ref VX_MATRIX_COLUMNS + * - \ref vx_distribution : \ref VX_DISTRIBUTION_BINS, \ref VX_DISTRIBUTION_OFFSET, \ref VX_DISTRIBUTION_RANGE + * - \ref vx_remap : \ref VX_REMAP_SOURCE_WIDTH, \ref VX_REMAP_SOURCE_HEIGHT, \ref VX_REMAP_DESTINATION_WIDTH, \ref VX_REMAP_DESTINATION_HEIGHT + * - \ref vx_lut : \ref VX_LUT_TYPE, \ref VX_LUT_COUNT + * - \ref vx_threshold : \ref VX_THRESHOLD_TYPE, \ref VX_THRESHOLD_INPUT_FORMAT, \ref VX_THRESHOLD_INPUT_FORMAT + * - \ref vx_object_array : \ref VX_OBJECT_ARRAY_NUMITEMS, \ref VX_OBJECT_ARRAY_ITEMTYPE + * - \ref vx_tensor : \ref VX_TENSOR_NUMBER_OF_DIMS, \ref VX_TENSOR_DIMS, \ref VX_TENSOR_DATA_TYPE, \ref VX_TENSOR_FIXED_POINT_POSITION + * - \ref VX_VALID_RECT_CALLBACK + * \note For vx_image, a specific attribute can be used to specify the valid region evolution. This information is not a meta data. + * + * \param [in] meta The reference to the \ref vx_meta_format struct to set + * \param [in] attribute Use the subset of data object attributes that define the meta data of this object or attributes from \ref vx_meta_format. + * \param [in] ptr The input pointer of the value to set on the meta format object. + * \param [in] size The size in bytes of the object to which \a ptr points. + * \ingroup group_user_kernels + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS The attribute was set; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE meta is not a valid \ref vx_meta_format reference. + * \retval VX_ERROR_INVALID_PARAMETERS size was not correct for the type needed. + * \retval VX_ERROR_NOT_SUPPORTED the object attribute was not supported on the meta format object. + * \retval VX_ERROR_INVALID_TYPE attribute type did not match known meta format type. + */ +VX_API_ENTRY vx_status VX_API_CALL vxSetMetaFormatAttribute(vx_meta_format meta, vx_enum attribute, const void *ptr, vx_size size); + +/*! \brief Set a meta format object from an exemplar data object reference + * + * This function sets a \ref vx_meta_format object from the meta data of the exemplar + * + * \param [in] meta The meta format object to set + * \param [in] exemplar The exemplar data object. + * \ingroup group_user_kernels + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS The meta format was correctly set; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE meta is not a valid \ref vx_meta_format reference, + * or exemplar is not a valid \ref vx_reference reference. + */ +VX_API_ENTRY vx_status VX_API_CALL vxSetMetaFormatFromReference(vx_meta_format meta, vx_reference exemplar); + +/*! \brief This function allows a user to query the attributes of a \ref vx_meta_format object in a kernel parameter. + * + * The \ref vx_meta_format object contains two types of information: data object meta data and + * some specific information that defines how the valid region of an image changes + * + * The meta data attributes that can be queried are identified by this list: + * - \ref vx_image : \ref VX_IMAGE_FORMAT, \ref VX_IMAGE_HEIGHT, \ref VX_IMAGE_WIDTH + * - \ref vx_array : \ref VX_ARRAY_CAPACITY, \ref VX_ARRAY_ITEMTYPE + * - \ref vx_pyramid : \ref VX_PYRAMID_FORMAT, \ref VX_PYRAMID_HEIGHT, \ref VX_PYRAMID_WIDTH, \ref VX_PYRAMID_LEVELS, \ref VX_PYRAMID_SCALE + * - \ref vx_scalar : \ref VX_SCALAR_TYPE + * - \ref vx_matrix : \ref VX_MATRIX_TYPE, \ref VX_MATRIX_ROWS, \ref VX_MATRIX_COLUMNS + * - \ref vx_distribution : \ref VX_DISTRIBUTION_BINS, \ref VX_DISTRIBUTION_OFFSET, \ref VX_DISTRIBUTION_RANGE + * - \ref vx_remap : \ref VX_REMAP_SOURCE_WIDTH, \ref VX_REMAP_SOURCE_HEIGHT, \ref VX_REMAP_DESTINATION_WIDTH, \ref VX_REMAP_DESTINATION_HEIGHT + * - \ref vx_lut : \ref VX_LUT_TYPE, \ref VX_LUT_COUNT + * - \ref vx_threshold : \ref VX_THRESHOLD_TYPE, \ref VX_THRESHOLD_INPUT_FORMAT, \ref VX_THRESHOLD_INPUT_FORMAT + * - \ref vx_object_array : \ref VX_OBJECT_ARRAY_NUMITEMS, \ref VX_OBJECT_ARRAY_ITEMTYPE + * - \ref vx_tensor : \ref VX_TENSOR_NUMBER_OF_DIMS, \ref VX_TENSOR_DIMS, \ref VX_TENSOR_DATA_TYPE, \ref VX_TENSOR_FIXED_POINT_POSITION + * - \ref VX_VALID_RECT_CALLBACK + * \note For vx_image, a specific attribute can be used to query the valid region evolution. This information is not a meta data. + * + * \param [in] meta The reference to the \ref vx_meta_format struct to query + * \param [in] attribute Use the subset of data object attributes that define the meta data of this object or attributes from \ref vx_meta_format. + * \param [out] ptr The output pointer of the value to query on the meta format object. + * \param [in] size The size in bytes of the object to which \a ptr points. + * \ingroup group_import_kernel + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS The attribute was returned; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE meta is not a valid \ref vx_meta_format reference. + * \retval VX_ERROR_INVALID_PARAMETERS size was not correct for the type needed. + * \retval VX_ERROR_NOT_SUPPORTED the object attribute was not supported on the meta format object. + * \retval VX_ERROR_INVALID_TYPE attribute type did not match known meta format type. + */ +VX_API_ENTRY vx_status VX_API_CALL vxQueryMetaFormatAttribute(vx_meta_format meta, vx_enum attribute, void *ptr, vx_size size); + +VX_API_ENTRY vx_status VX_API_CALL +vxConfigTarget( + vx_context context, + vx_int32 dp_amount, + vx_int32 mad_per_core, + vx_int32 conv_cores, + vx_int32 in_buffer_depth, + vx_int32 accum_buffer_height, + vx_int32 l2_cache_size, + vx_int32 tp_cores +); + +/*============================================================================== + TENSOR DATA FUNCTIONS +=============================================================================*/ +/*! \brief Creates an opaque reference to a tensor data buffer. + * \details Not guaranteed to exist until the \ref vx_graph containing it has been verified. + * Since functions using tensors, need to understand the context of each dimension. We describe a layout of the dimensions in each function using tensors. + * That layout is not mandatory. It is done specifically to explain the functions and not to mandate layout. Different implementation may have different layout. + * Therefore the layout description is logical and not physical. It refers to the order of dimensions given in this function. + * \param [in] context The reference to the implementation context. + * \param [in] number_of_dims The number of dimensions. + * \param [in] dims Dimensions sizes in elements. + * \param [in] data_type The \ref vx_type_e that represents the data type of the tensor data elements. + * \param [in] fixed_point_position Specifies the fixed point position when the input element type is integer. if 0, calculations are performed in integer math. + * \return A tensor data reference. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_object_tensor + */ +VX_API_ENTRY vx_tensor VX_API_CALL vxCreateTensor(vx_context context, vx_size number_of_dims, const vx_size * dims, vx_enum data_type,vx_int8 fixed_point_position); + +/*! \brief Creates an array of images into the multi-dimension data, this can be adjacent 2D images or not depending on the stride value. + * The stride value is representing bytes in the third dimension. + * The OpenVX image object that points to a three dimension data and access it as an array of images. + * This has to be portion of the third lowest dimension, and the stride correspond to that third dimension. + * The returned Object array is an array of images. Where the image data is pointing to a specific memory in the input tensor. + * \param [in] tensor The tensor data from which to extract the images. Has to be a 3d tensor. + * \param [in] rect Image coordinates within tensor data. + * \param [in] array_size Number of images to extract. + * \param [in] jump Delta between two images in the array. + * \param [in] image_format The requested image format. Should match the tensor data's data type. + * \return An array of images pointing to the tensor data's data. + * \ingroup group_object_tensor + */ +VX_API_ENTRY vx_object_array VX_API_CALL vxCreateImageObjectArrayFromTensor(vx_tensor tensor, const vx_rectangle_t *rect, vx_size array_size, vx_size jump, vx_df_image image_format); + +/*! \brief Creates a tensor data from another tensor data given a view. This second + * reference refers to the data in the original tensor data. Updates to this tensor data + * updates the parent tensor data. The view must be defined within the dimensions + * of the parent tensor data. + * \param [in] tensor The reference to the parent tensor data. + * \param [in] number_of_dims Number of dimensions in the view. Error return if 0 or greater than number of + * tensor dimensions. If smaller than number of tensor dimensions, the lower dimensions are assumed. + * \param [in] view_start View start coordinates + * \param [in] view_end View end coordinates + * \return The reference to the sub-tensor. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_object_tensor + */ +VX_API_ENTRY vx_tensor VX_API_CALL vxCreateTensorFromView(vx_tensor tensor, vx_size number_of_dims, const vx_size * view_start, const vx_size * view_end); + +/*! \brief Creates an opaque reference to a tensor data buffer with no direct + * user access. This function allows setting the tensor data dimensions or data format. + * \details Virtual data objects allow users to connect various nodes within a + * graph via data references without access to that data, but they also permit the + * implementation to take maximum advantage of possible optimizations. Use this + * API to create a data reference to link two or more nodes together when the + * intermediate data are not required to be accessed by outside entities. This API + * in particular allows the user to define the tensor data format of the data without + * requiring the exact dimensions. Virtual objects are scoped within the graph + * they are declared a part of, and can't be shared outside of this scope. + * Since functions using tensors, need to understand the context of each dimension. We describe a layout of the dimensions in each function. + * That layout is not mandated. It is done specifically to explain the functions and not to mandate layout. Different implementation may have different layout. + * Therfore the layout description is logical and not physical. It refers to the order of dimensions given in \ref vxCreateTensor and \ref vxCreateVirtualTensor. + * \param [in] graph The reference to the parent graph. + * \param [in] number_of_dims The number of dimensions. + * \param [in] dims Dimensions sizes in elements. + * \param [in] data_type The \ref vx_type_e that represents the data type of the tensor data elements. + * \param [in] fixed_point_position Specifies the fixed point position when the input element type is integer. If 0, calculations are performed in integer math. + * \return A tensor data reference.Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \note Passing this reference to \ref vxCopyTensorPatch will return an error. + * \ingroup group_object_tensor + */ +VX_API_ENTRY vx_tensor VX_API_CALL vxCreateVirtualTensor(vx_graph graph, vx_size number_of_dims, const vx_size *dims, vx_enum data_type, vx_int8 fixed_point_position); + + +/*! \brief Creates a reference to an tensor object that was externally allocated. + * \param [in] context The reference to the implementation context. + * \param [in] number_of_dims The number of dimensions. + * \param [in] dims Dimensions sizes in elements. + * \param [in] data_type The \ref vx_type_e that represents the data type of the tensor data elements. + * \param [in] fixed_point_position Specifies the fixed point position when the input element type is integer. if 0, calculations are performed in integer math. + * \param [in] stride An array of stride in all dimensions in bytes. The stride value at index 0 must be size of the tensor data element type. + * \param [in] ptr The platform-defined reference to tensor. See note below. + * \param [in] memory_type \ref vx_memory_type_e. When giving \ref VX_MEMORY_TYPE_HOST + * the \a ptr is assumed to be HOST accessible pointer to memory. + * \return A tensor data reference. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \note The user must call vxMapTensorPatch prior to accessing the elements of a tensor, even if the + * tensor was created via \ref vxCreateTensorFromHandle. Reads or writes to memory referenced + * by ptr after calling \ref vxCreateTensorFromHandle without first calling + * \ref vxMapTensorPatch will result in undefined behavior. + * The property of stride[] and ptr is kept by the caller (It means that the implementation will + * make an internal copy of the provided information. \a stride and \a ptr can then simply be application's + * local variables). + * + * In order to release the tensor back to the application we should use \ref vxSwapTensorHandle. + * + * \ingroup group_object_tensor + */ +VX_API_ENTRY vx_tensor VX_API_CALL vxCreateTensorFromHandle(vx_context context, vx_size number_of_dims, const vx_size *dims, vx_enum data_type, vx_int8 fixed_point_position, const vx_size * stride, void * ptr, vx_enum memory_type); + +/*! \brief Swaps the tensor handle of an tensor previously created from handle. + * + * This function sets the new tensor handle + * and returns the previous one. + * + * Once this function call has completed, the application gets back the + * ownership of the memory referenced by the previous handle. This memory + * contains up-to-date tensor data, and the application can safely reuse or + * release it. + * + * The memory referenced by the new handle must have been allocated + * consistently with the tensor properties since the import type, + * memory layout and dimensions are unchanged (see stride and + * memory_type in \ref vxCreateTensorFromHandle). + * + * All tensors created from view with this tensor as parent or ancestor + * will automatically use the memory referenced by the new handle. + * + * The behavior of \ref vxSwapTensorHandle when called from a user node is undefined. + * \param [in] tensor The reference to an tensor created from handle. + * \param [in] new_ptr new tensor handle + * If new_ptr is NULL, + * If the new_ptr is NULL, the previous tensor storage memory is reclaimed by the + * caller, while no new handle is provided. + * \param [out] prev_ptr pointer to return the previous tensor handle. + * If prev_ptr is NULL, the previous handle is not returned. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors. + * \retval VX_ERROR_INVALID_REFERENCE tensor is not a valid \ref vx_tensor reference. + * reference. + * \retval VX_ERROR_INVALID_PARAMETERS The tensor was not created from handle or + * the content of new_ptr is not valid. + * \retval VX_FAILURE The tensor was already being accessed. + * \ingroup group_tensor + */ +VX_API_ENTRY vx_status VX_API_CALL vxSwapTensorHandle(vx_tensor tensor, void* new_ptr, void** prev_ptr); + +/*! \brief Allows the application to copy a view patch from/into an tensor object . + * \param [in] tensor The reference to the tensor object that is the source or the + * destination of the copy. + * \param [in] number_of_dims Number of patch dimension. Error return if 0 or greater than number of + * tensor dimensions. If smaller than number of tensor dimensions, the lower dimensions are assumed. + * \param [in] view_start Array of patch start points in each dimension + * \param [in] view_end Array of patch end points in each dimension + * \param [in] user_stride Array of user memory strides in each dimension + * \param [in] user_ptr The address of the memory location where to store the requested data + * if the copy was requested in read mode, or from where to get the data to store into the tensor + * object if the copy was requested in write mode. The accessible memory must be large enough + * to contain the specified patch with the specified layout:\n + * accessible memory in bytes >= (end[last_dimension] - start[last_dimension]) * stride[last_dimension].\n + * The layout of the user memory must follow a row major order. + * \param [in] usage This declares the effect of the copy with regard to the tensor object + * using the \ref vx_accessor_e enumeration. Only \ref VX_READ_ONLY and \ref VX_WRITE_ONLY are supported: + * \arg \ref VX_READ_ONLY means that data is copied from the tensor object into the application memory + * \arg \ref VX_WRITE_ONLY means that data is copied into the tensor object from the application memory + * \param [in] user_memory_type A \ref vx_memory_type_e enumeration that specifies + * the memory type of the memory referenced by the user_addr. + * \return A \ref vx_status_e enumeration. + * \retval VX_ERROR_OPTIMIZED_AWAY This is a reference to a virtual tensor that cannot be + * accessed by the application. + * \retval VX_ERROR_INVALID_REFERENCE The tensor reference is not actually an tensor reference. + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * \ingroup group_object_tensor + */ +VX_API_ENTRY vx_status VX_API_CALL vxCopyTensorPatch(vx_tensor tensor, vx_size number_of_dims, const vx_size * view_start, const vx_size * view_end, + const vx_size * user_stride, void * user_ptr, vx_enum usage, vx_enum user_memory_type); + +/*! \brief Allows the application to copy a view patch from/into an tensor object . + * \param [in] tensor The reference to the tensor object that is the source or the + * destination of the copy. + * \param [in] number_of_dims Number of patch dimension. Error return if 0 or greater than number of + * tensor dimensions. If smaller than number of tensor dimensions, the lower dimensions are assumed. + * \param [in] view_start Array of patch start points in each dimension + * \param [in] view_end Array of patch end points in each dimension + * \param [in] tensorpatch_addressing Pointer to parameter of type \ref vx_tensorpatch_addressing_t. + * \param [in] user_ptr The address of the memory location where to store the requested data + * if the copy was requested in read mode, or from where to get the data to store into the tensor + * object if the copy was requested in write mode. The accessible memory must be large enough + * to contain the specified patch with the specified layout:\n + * accessible memory in bytes >= (end[last_dimension] - start[last_dimension]) * stride[last_dimension].\n + * The layout of the user memory must follow a row major order. + * \param [in] usage This declares the effect of the copy with regard to the tensor object + * using the \ref vx_accessor_e enumeration. Only \ref VX_READ_ONLY and \ref VX_WRITE_ONLY are supported: + * \arg \ref VX_READ_ONLY means that data is copied from the tensor object into the application memory + * \arg \ref VX_WRITE_ONLY means that data is copied into the tensor object from the application memory + * \param [in] user_memory_type A \ref vx_memory_type_e enumeration that specifies + * the memory type of the memory referenced by the user_addr. + * \return A \ref vx_status_e enumeration. + * \retval VX_ERROR_OPTIMIZED_AWAY This is a reference to a virtual tensor that cannot be + * accessed by the application. + * \retval VX_ERROR_INVALID_REFERENCE The tensor reference is not actually an tensor reference. + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * \ingroup group_object_tensor + */ +VX_API_ENTRY vx_status VX_API_CALL vxCopyTensorPatch2(vx_tensor tensor, vx_size number_of_dims, const vx_size * view_start, const vx_size * view_end, + const vx_tensorpatch_addressing_t * addressing, vx_size size_of_addressing, void * user_ptr, vx_enum usage, vx_enum user_memory_type); + +/*! \brief Allows the application to get direct access to a patch of tensor object. + * \param [in] tensor The reference to the tensor object that is the source or the + * destination for direct access. + * \param [in] number_of_dims The number of dimensions. Must be same as tensor number_of_dims. + * \param [in] view_start Array of patch start points in each dimension. This is optional parameter and will be zero when NULL. + * \param [in] view_end Array of patch end points in each dimension. This is optional parameter and will be dims[] of tensor when NULL. + * \param [out] map_id The address of a vx_map_id variable where the function returns a map identifier. + * \arg (*map_id) must eventually be provided as the map_id parameter of a call to \ref vxUnmapTensorPatch. + * \param [out] stride An array of stride in all dimensions in bytes. The stride value at index 0 must be size of the tensor data element type. + * \param [out] ptr The address of a pointer that the function sets to the + * address where the requested data can be accessed. The returned (*ptr) address + * is only valid between the call to the function and the corresponding call to + * \ref vxUnmapTensorPatch. + * \param [in] usage This declares the access mode for the tensor patch, using + * the \ref vx_accessor_e enumeration. + * \arg VX_READ_ONLY: after the function call, the content of the memory location + * pointed by (*ptr) contains the tensor patch data. Writing into this memory location + * is forbidden and its behavior is undefined. + * \arg VX_READ_AND_WRITE : after the function call, the content of the memory + * location pointed by (*ptr) contains the tensor patch data; writing into this memory + * is allowed only for the location of items and will result in a modification of the + * affected items in the tensor object once the range is unmapped. Writing into + * a gap between items (when (*stride) > item size in bytes) is forbidden and its + * behavior is undefined. + * \arg VX_WRITE_ONLY: after the function call, the memory location pointed by (*ptr) + * contains undefined data; writing each item of the range is required prior to + * unmapping. Items not written by the application before unmap will become + * undefined after unmap, even if they were well defined before map. Like for + * VX_READ_AND_WRITE, writing into a gap between items is forbidden and its behavior + * is undefined. + * \param [in] mem_type A \ref vx_memory_type_e enumeration that + * specifies the type of the memory where the tensor patch is requested to be mapped. + * \return A \ref vx_status_e enumeration. + * \retval VX_ERROR_OPTIMIZED_AWAY This is a reference to a virtual tensor that cannot be accessed by the application. + * \retval VX_ERROR_INVALID_REFERENCE The tensor reference is not actually an tensor reference. + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * \retval VX_ERROR_NO_MEMORY Internal memory allocation failed. + * \ingroup group_tensor + * \post \ref vxUnmapTensorPatch with same (*map_id) value. + */ +VX_API_ENTRY vx_status VX_API_CALL vxMapTensorPatch(vx_tensor tensor, vx_size number_of_dims, const vx_size * view_start, const vx_size * view_end, vx_map_id * map_id, vx_size * stride, void ** ptr, vx_enum usage, vx_enum mem_type); + +/*! \brief Unmap and commit potential changes to a tensor object patch that was previously mapped. + * Unmapping a tensor patch invalidates the memory location from which the patch could + * be accessed by the application. Accessing this memory location after the unmap function + * completes has an undefined behavior. + * \param [in] tensor The reference to the tensor object to unmap. + * \param [in] map_id The unique map identifier that was returned when calling + * \ref vxMapTensorPatch . + * \return A \ref vx_status_e enumeration. + * \retval VX_ERROR_INVALID_REFERENCE The tensor reference is not actually an tensor reference. + * \retval VX_ERROR_INVALID_PARAMETERS An other parameter is incorrect. + * \ingroup group_tensor + * \pre \ref vxMapTensorPatch returning the same map_id value + */ +VX_API_ENTRY vx_status VX_API_CALL vxUnmapTensorPatch(vx_tensor tensor, const vx_map_id map_id); + +/*! \brief Retrieves various attributes of a tensor data. + * \param [in] tensor The reference to the tensor data to query. + * \param [in] attribute The attribute to query. Use a \ref vx_tensor_attribute_e. + * \param [out] ptr The location at which to store the resulting value. + * \param [in] size The size of the container to which \a ptr points. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors. + * \retval VX_ERROR_INVALID_REFERENCE If data is not a \ref vx_tensor. + * \retval VX_ERROR_INVALID_PARAMETERS If any of the other parameters are incorrect. + * \ingroup group_object_tensor + */ +VX_API_ENTRY vx_status VX_API_CALL vxQueryTensor(vx_tensor tensor, vx_enum attribute, void *ptr, vx_size size); + +/*! \brief Releases a reference to a tensor data object. + * The object may not be garbage collected until its total reference count is zero. + * \param [in] tensor The pointer to the tensor data to release. + * \post After returning from this function the reference is zeroed. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; all other values indicate failure + * \retval * An error occurred. See \ref vx_status_e. + * \ingroup group_object_tensor + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseTensor(vx_tensor *tensor); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/unified-tina/inc/VX/vx_compatibility.h b/unified-tina/inc/VX/vx_compatibility.h new file mode 100644 index 0000000..293fde4 --- /dev/null +++ b/unified-tina/inc/VX/vx_compatibility.h @@ -0,0 +1,253 @@ +/* + + * Copyright (c) 2012-2017 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef VX_1_0_1_NAMING_COMPATIBILITY +#define VX_1_0_1_NAMING_COMPATIBILITY + +#define VX_TYPE_SCALAR_MAX (VX_TYPE_BOOL + 1) + +#define vx_border_mode_e vx_border_e +#define vx_border_mode_policy_e vx_border_policy_e +#define _vx_border_mode_t _vx_border_t +#define vx_border_mode_t vx_border_t + +#define VX_ENUM_BORDER_MODE VX_ENUM_BORDER +#define VX_BORDER_MODE_POLICY VX_BORDER_POLICY +#define VX_BORDER_MODE_UNDEFINED VX_BORDER_UNDEFINED +#define VX_BORDER_MODE_CONSTANT VX_BORDER_CONSTANT +#define VX_BORDER_MODE_REPLICATE VX_BORDER_REPLICATE +#define VX_BORDER_MODE_UNSUPPORTED_POLICY_DEFAULT_TO_UNDEFINED VX_BORDER_POLICY_DEFAULT_TO_UNDEFINED +#define VX_BORDER_MODE_UNSUPPORTED_POLICY_RETURN_ERROR VX_BORDER_POLICY_RETURN_ERROR + +#define VX_REF_ATTRIBUTE_COUNT VX_REFERENCE_COUNT +#define VX_REF_ATTRIBUTE_TYPE VX_REFERENCE_TYPE +#define VX_REF_ATTRIBUTE_NAME VX_REFERENCE_NAME + +#define VX_CONTEXT_ATTRIBUTE_VENDOR_ID VX_CONTEXT_VENDOR_ID +#define VX_CONTEXT_ATTRIBUTE_VERSION VX_CONTEXT_VERSION +#define VX_CONTEXT_ATTRIBUTE_UNIQUE_KERNELS VX_CONTEXT_UNIQUE_KERNELS +#define VX_CONTEXT_ATTRIBUTE_MODULES VX_CONTEXT_MODULES +#define VX_CONTEXT_ATTRIBUTE_REFERENCES VX_CONTEXT_REFERENCES +#define VX_CONTEXT_ATTRIBUTE_IMPLEMENTATION VX_CONTEXT_IMPLEMENTATION +#define VX_CONTEXT_ATTRIBUTE_EXTENSIONS_SIZE VX_CONTEXT_EXTENSIONS_SIZE +#define VX_CONTEXT_ATTRIBUTE_EXTENSIONS VX_CONTEXT_EXTENSIONS +#define VX_CONTEXT_ATTRIBUTE_CONVOLUTION_MAXIMUM_DIMENSION VX_CONTEXT_CONVOLUTION_MAX_DIMENSION +#define VX_CONTEXT_ATTRIBUTE_OPTICAL_FLOW_WINDOW_MAXIMUM_DIMENSION VX_CONTEXT_OPTICAL_FLOW_MAX_WINDOW_DIMENSION +#define VX_CONTEXT_ATTRIBUTE_IMMEDIATE_BORDER_MODE VX_CONTEXT_IMMEDIATE_BORDER +#define VX_CONTEXT_ATTRIBUTE_UNIQUE_KERNEL_TABLE VX_CONTEXT_UNIQUE_KERNEL_TABLE + +#define VX_KERNEL_ATTRIBUTE_PARAMETERS VX_KERNEL_PARAMETERS +#define VX_KERNEL_ATTRIBUTE_NAME VX_KERNEL_NAME +#define VX_KERNEL_ATTRIBUTE_ENUM VX_KERNEL_ENUM +#define VX_KERNEL_ATTRIBUTE_LOCAL_DATA_SIZE VX_KERNEL_LOCAL_DATA_SIZE +#define VX_KERNEL_ATTRIBUTE_LOCAL_DATA_PTR (VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_KERNEL) + 0x4) + +#define VX_NODE_ATTRIBUTE_STATUS VX_NODE_STATUS +#define VX_NODE_ATTRIBUTE_PERFORMANCE VX_NODE_PERFORMANCE +#define VX_NODE_ATTRIBUTE_BORDER_MODE VX_NODE_BORDER +#define VX_NODE_ATTRIBUTE_LOCAL_DATA_SIZE VX_NODE_LOCAL_DATA_SIZE +#define VX_NODE_ATTRIBUTE_LOCAL_DATA_PTR VX_NODE_LOCAL_DATA_PTR + +#define VX_PARAMETER_ATTRIBUTE_INDEX VX_PARAMETER_INDEX +#define VX_PARAMETER_ATTRIBUTE_DIRECTION VX_PARAMETER_DIRECTION +#define VX_PARAMETER_ATTRIBUTE_TYPE VX_PARAMETER_TYPE +#define VX_PARAMETER_ATTRIBUTE_STATE VX_PARAMETER_STATE +#define VX_PARAMETER_ATTRIBUTE_REF VX_PARAMETER_REF + +#define VX_IMAGE_ATTRIBUTE_WIDTH VX_IMAGE_WIDTH +#define VX_IMAGE_ATTRIBUTE_HEIGHT VX_IMAGE_HEIGHT +#define VX_IMAGE_ATTRIBUTE_FORMAT VX_IMAGE_FORMAT +#define VX_IMAGE_ATTRIBUTE_PLANES VX_IMAGE_PLANES +#define VX_IMAGE_ATTRIBUTE_SPACE VX_IMAGE_SPACE +#define VX_IMAGE_ATTRIBUTE_RANGE VX_IMAGE_RANGE +#define VX_IMAGE_ATTRIBUTE_SIZE VX_IMAGE_SIZE + +#define VX_SCALAR_ATTRIBUTE_TYPE VX_SCALAR_TYPE + +#define VX_GRAPH_ATTRIBUTE_NUMNODES VX_GRAPH_NUMNODES +#define VX_GRAPH_ATTRIBUTE_STATUS (VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_GRAPH) + 0x1) +#define VX_GRAPH_ATTRIBUTE_PERFORMANCE VX_GRAPH_PERFORMANCE +#define VX_GRAPH_ATTRIBUTE_NUMPARAMETERS VX_GRAPH_NUMPARAMETERS + +#define VX_LUT_ATTRIBUTE_TYPE VX_LUT_TYPE +#define VX_LUT_ATTRIBUTE_COUNT VX_LUT_COUNT +#define VX_LUT_ATTRIBUTE_SIZE VX_LUT_SIZE + +#define VX_DISTRIBUTION_ATTRIBUTE_DIMENSIONS VX_DISTRIBUTION_DIMENSIONS +#define VX_DISTRIBUTION_ATTRIBUTE_OFFSET VX_DISTRIBUTION_OFFSET +#define VX_DISTRIBUTION_ATTRIBUTE_RANGE VX_DISTRIBUTION_RANGE +#define VX_DISTRIBUTION_ATTRIBUTE_BINS VX_DISTRIBUTION_BINS +#define VX_DISTRIBUTION_ATTRIBUTE_WINDOW VX_DISTRIBUTION_WINDOW +#define VX_DISTRIBUTION_ATTRIBUTE_SIZE VX_DISTRIBUTION_SIZE + +#define VX_THRESHOLD_ATTRIBUTE_TYPE VX_THRESHOLD_TYPE +#define VX_THRESHOLD_ATTRIBUTE_THRESHOLD_VALUE VX_THRESHOLD_THRESHOLD_VALUE +#define VX_THRESHOLD_ATTRIBUTE_THRESHOLD_LOWER VX_THRESHOLD_THRESHOLD_LOWER +#define VX_THRESHOLD_ATTRIBUTE_THRESHOLD_UPPER VX_THRESHOLD_THRESHOLD_UPPER +#define VX_THRESHOLD_ATTRIBUTE_TRUE_VALUE VX_THRESHOLD_TRUE_VALUE +#define VX_THRESHOLD_ATTRIBUTE_FALSE_VALUE VX_THRESHOLD_FALSE_VALUE +#define VX_THRESHOLD_ATTRIBUTE_DATA_TYPE VX_THRESHOLD_DATA_TYPE + +#define VX_MATRIX_ATTRIBUTE_TYPE VX_MATRIX_TYPE +#define VX_MATRIX_ATTRIBUTE_ROWS VX_MATRIX_ROWS +#define VX_MATRIX_ATTRIBUTE_COLUMNS VX_MATRIX_COLUMNS +#define VX_MATRIX_ATTRIBUTE_SIZE VX_MATRIX_SIZE + +#define VX_CONVOLUTION_ATTRIBUTE_ROWS VX_CONVOLUTION_ROWS +#define VX_CONVOLUTION_ATTRIBUTE_COLUMNS VX_CONVOLUTION_COLUMNS +#define VX_CONVOLUTION_ATTRIBUTE_SCALE VX_CONVOLUTION_SCALE +#define VX_CONVOLUTION_ATTRIBUTE_SIZE VX_CONVOLUTION_SIZE + +#define VX_PYRAMID_ATTRIBUTE_LEVELS VX_PYRAMID_LEVELS +#define VX_PYRAMID_ATTRIBUTE_SCALE VX_PYRAMID_SCALE +#define VX_PYRAMID_ATTRIBUTE_WIDTH VX_PYRAMID_WIDTH +#define VX_PYRAMID_ATTRIBUTE_HEIGHT VX_PYRAMID_HEIGHT +#define VX_PYRAMID_ATTRIBUTE_FORMAT VX_PYRAMID_FORMAT + +#define VX_REMAP_ATTRIBUTE_SOURCE_WIDTH VX_REMAP_SOURCE_WIDTH +#define VX_REMAP_ATTRIBUTE_SOURCE_HEIGHT VX_REMAP_SOURCE_HEIGHT +#define VX_REMAP_ATTRIBUTE_DESTINATION_WIDTH VX_REMAP_DESTINATION_WIDTH +#define VX_REMAP_ATTRIBUTE_DESTINATION_HEIGHT VX_REMAP_DESTINATION_HEIGHT + +#define VX_ARRAY_ATTRIBUTE_ITEMTYPE VX_ARRAY_ITEMTYPE +#define VX_ARRAY_ATTRIBUTE_NUMITEMS VX_ARRAY_NUMITEMS +#define VX_ARRAY_ATTRIBUTE_CAPACITY VX_ARRAY_CAPACITY +#define VX_ARRAY_ATTRIBUTE_ITEMSIZE VX_ARRAY_ITEMSIZE + +#define VX_DELAY_ATTRIBUTE_TYPE VX_DELAY_TYPE +#define VX_DELAY_ATTRIBUTE_SLOTS VX_DELAY_SLOTS + +#define VX_INTERPOLATION_TYPE_AREA VX_INTERPOLATION_AREA +#define VX_INTERPOLATION_TYPE_BILINEAR VX_INTERPOLATION_BILINEAR +#define VX_INTERPOLATION_TYPE_NEAREST_NEIGHBOR VX_INTERPOLATION_NEAREST_NEIGHBOR + +#define VX_IMAGE_SIZE (VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_IMAGE) + 0x6) + +#define VX_META_FORMAT_ATTRIBUTE_DELTA_RECTANGLE (VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_META_FORMAT) + 0x0) +#define VX_HINT_SERIALIZE (VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_HINT) + 0x0) + +#define vx_import_type_e vx_memory_type_e +#define VX_ENUM_IMPORT_MEM VX_ENUM_MEMORY_TYPE +#define VX_IMPORT_TYPE_NONE VX_MEMORY_TYPE_NONE +#define VX_IMPORT_TYPE_HOST VX_MEMORY_TYPE_HOST + +#define VX_TYPE_OBJECT_MAX (VX_TYPE_WEIGHTS_BIASES_PARAMETER_BASE + 1) /*TODO: check it for OpenVX 1.2*/ +#define VX_TYPE_STRUCT_MAX VX_TYPE_KHRONOS_STRUCT_MAX + +#define VX_KERNEL_INVALID (VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x0) +#define VX_KERNEL_ACCUMULATE (VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x16) +#define VX_KERNEL_ACCUMULATE_WEIGHTED (VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x17) +#define VX_KERNEL_ACCUMULATE_SQUARE (VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x18) + +#define VX_THRESHOLD_THRESHOLD_VALUE (VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_THRESHOLD) + 0x1) +#define VX_THRESHOLD_THRESHOLD_LOWER (VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_THRESHOLD) + 0x2) +#define VX_THRESHOLD_THRESHOLD_UPPER (VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_THRESHOLD) + 0x3) +#define VX_THRESHOLD_TRUE_VALUE (VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_THRESHOLD) + 0x4) +#define VX_THRESHOLD_FALSE_VALUE (VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_THRESHOLD) + 0x5) +#define VX_THRESHOLD_DATA_TYPE (VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_THRESHOLD) + 0x6) + +#define VX_BIDIRECTIONAL (VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_DIRECTION) + 0x2) + +typedef vx_status(VX_CALLBACK *vx_kernel_input_validate_f)(vx_node node, vx_uint32 index); + +typedef vx_status(VX_CALLBACK *vx_kernel_output_validate_f)(vx_node node, vx_uint32 index, vx_meta_format meta); + +typedef struct _vx_delta_rectangle_t { + vx_int32 delta_start_x; /*!< \brief The change in the start x. */ + vx_int32 delta_start_y; /*!< \brief The change in the start y. */ + vx_int32 delta_end_x; /*!< \brief The change in the end x. */ + vx_int32 delta_end_y; /*!< \brief The change in the end y. */ +} vx_delta_rectangle_t; + +#ifdef __cplusplus +extern "C" { +#endif + +VX_API_ENTRY vx_kernel VX_API_CALL vxAddKernel(vx_context context, + const vx_char name[VX_MAX_KERNEL_NAME], + vx_enum enumeration, + vx_kernel_f func_ptr, + vx_uint32 numParams, + vx_kernel_input_validate_f input, + vx_kernel_output_validate_f output, + vx_kernel_initialize_f init, + vx_kernel_deinitialize_f deinit); + +VX_API_ENTRY vx_size VX_API_CALL vxComputeImagePatchSize(vx_image image, + const vx_rectangle_t *rect, + vx_uint32 plane_index); + +VX_API_ENTRY vx_status VX_API_CALL vxAccessImagePatch(vx_image image, + const vx_rectangle_t *rect, + vx_uint32 plane_index, + vx_imagepatch_addressing_t *addr, + void **ptr, + vx_enum usage); + +VX_API_ENTRY vx_status VX_API_CALL vxCommitImagePatch(vx_image image, + const vx_rectangle_t *rect, + vx_uint32 plane_index, + const vx_imagepatch_addressing_t *addr, + const void *ptr); + +VX_API_ENTRY vx_status VX_API_CALL vxAccessArrayRange(vx_array arr, vx_size start, vx_size end, vx_size *stride, void **ptr, vx_enum usage); + +VX_API_ENTRY vx_status VX_API_CALL vxCommitArrayRange(vx_array arr, vx_size start, vx_size end, const void *ptr); + +VX_API_ENTRY vx_status VX_API_CALL vxAccessDistribution(vx_distribution distribution, void **ptr, vx_enum usage); + +VX_API_ENTRY vx_status VX_API_CALL vxCommitDistribution(vx_distribution distribution, const void * ptr); + +VX_API_ENTRY vx_status VX_API_CALL vxAccessLUT(vx_lut lut, void **ptr, vx_enum usage); + +VX_API_ENTRY vx_status VX_API_CALL vxCommitLUT(vx_lut lut, const void *ptr); + +VX_API_ENTRY vx_status VX_API_CALL vxReadMatrix(vx_matrix mat, void *array); + +VX_API_ENTRY vx_status VX_API_CALL vxWriteMatrix(vx_matrix mat, const void *array); + +VX_API_ENTRY vx_status VX_API_CALL vxReadConvolutionCoefficients(vx_convolution conv, vx_int16 *array); + +VX_API_ENTRY vx_status VX_API_CALL vxWriteConvolutionCoefficients(vx_convolution conv, const vx_int16 *array); + +VX_API_ENTRY vx_status VX_API_CALL vxReadScalarValue(vx_scalar ref, void *ptr); + +VX_API_ENTRY vx_status VX_API_CALL vxWriteScalarValue(vx_scalar ref, const void *ptr); + +VX_API_ENTRY vx_status VX_API_CALL vxSetRemapPoint(vx_remap table, vx_uint32 dst_x, vx_uint32 dst_y, vx_float32 src_x,vx_float32 src_y); + +VX_API_ENTRY vx_status VX_API_CALL vxGetRemapPoint(vx_remap table, vx_uint32 dst_x, vx_uint32 dst_y, vx_float32 *src_x, vx_float32 *src_y); + +VX_API_ENTRY vx_threshold VX_API_CALL vxCreateThreshold(vx_context c, vx_enum thresh_type, vx_enum data_type); + +VX_API_ENTRY vx_node VX_API_CALL vxAccumulateImageNode(vx_graph graph, vx_image input, vx_image accum); + +VX_API_ENTRY vx_node VX_API_CALL vxAccumulateWeightedImageNode(vx_graph graph, vx_image input, vx_scalar alpha, vx_image accum); + +VX_API_ENTRY vx_node VX_API_CALL vxAccumulateSquareImageNode(vx_graph graph, vx_image input, vx_scalar shift, vx_image accum); + +VX_API_ENTRY vx_status VX_API_CALL vxuAccumulateImage(vx_context context, vx_image input, vx_image accum); + +VX_API_ENTRY vx_status VX_API_CALL vxuAccumulateWeightedImage(vx_context context, vx_image input, vx_scalar alpha, vx_image accum); + +VX_API_ENTRY vx_status VX_API_CALL vxuAccumulateSquareImage(vx_context context, vx_image input, vx_scalar shift, vx_image accum); + +#ifdef __cplusplus +} +#endif + +#endif /* VX_1_0_1_NAMING_COMPATIBILITY */ diff --git a/unified-tina/inc/VX/vx_ext_program.h b/unified-tina/inc/VX/vx_ext_program.h new file mode 100644 index 0000000..813dae6 --- /dev/null +++ b/unified-tina/inc/VX/vx_ext_program.h @@ -0,0 +1,168 @@ +#ifndef _VX_EXT_PROGRAM_H_ +#define _VX_EXT_PROGRAM_H_ + +#include + +/***********************************************************************************/ + +#define VX_512BITS_DISABLE 0 +#define VX_512BITS_ADD 0x1 +#define VX_512BITS_SUBTRACT 0x2 +#define VX_512BITS_ACCUMULATOR 0x3 + +#define VX_512BITS_TYPE_FLOAT32 0x0 +#define VX_512BITS_TYPE_FLOAT16 0x1 +#define VX_512BITS_TYPE_SIGNED32 0x2 +#define VX_512BITS_TYPE_SIGNED16 0x3 +#define VX_512BITS_TYPE_SIGNED8 0x4 +#define VX_512BITS_TYPE_UNSIGNED32 0x5 +#define VX_512BITS_TYPE_UNSIGNED16 0x6 +#define VX_512BITS_TYPE_UNSIGNED8 0x7 + +#define VX_512BITS_SELECT_SRC0 0 +#define VX_512BITS_SELECT_SRC1 1 +#define VX_512BITS_SELECT_CONSTANTS 2 + +typedef union _vx_512bits_bin_t +{ + vx_uint8 bin8[16]; + vx_uint16 bin16[8]; + vx_uint32 bin32[4]; +} +vx_512bits_bin_t; + +typedef union _vx_512bits_config_t +{ + struct + { + vx_uint32 flag0 :2; + vx_uint32 flag1 :2; + vx_uint32 flag2 :2; + vx_uint32 flag3 :2; + vx_uint32 flag4 :2; + vx_uint32 flag5 :2; + vx_uint32 flag6 :2; + vx_uint32 flag7 :2; + vx_uint32 flag8 :2; + vx_uint32 flag9 :2; + vx_uint32 flag10:2; + vx_uint32 flag11:2; + vx_uint32 flag12:2; + vx_uint32 flag13:2; + vx_uint32 flag14:2; + vx_uint32 flag15:2; + } + bin2; + + struct + { + vx_uint32 flag0 :4; + vx_uint32 flag1 :4; + vx_uint32 flag2 :4; + vx_uint32 flag3 :4; + vx_uint32 flag4 :4; + vx_uint32 flag5 :4; + vx_uint32 flag6 :4; + vx_uint32 flag7 :4; + } + bin4; +} +vx_512bits_config_t; + +typedef struct _vx_512bits_miscconfig_t +{ + vx_uint32 post_shift :5; /*[0:4]*/ + vx_uint32 resolve1 :3; /*[5:7]*/ + vx_uint32 constant_type :3; /*[8:10]*/ + vx_uint32 resolve2 :1; /*[11:11]*/ + vx_uint32 accu_type :3; /*[12:14]*/ + vx_uint32 resolve3 :17;/*[15:31]*/ +} +vx_512bits_miscconfig_t; + +typedef struct _vx_512bits_t +{ + vx_512bits_config_t termConfig; + vx_512bits_config_t aSelect; + vx_512bits_config_t aBin[2]; + vx_512bits_config_t bSelect; + vx_512bits_config_t bBin[2]; + vx_512bits_miscconfig_t miscConfig; + vx_512bits_bin_t bins[2]; +} +vx_512bits_t; + +/***********************************************************************************/ + +typedef enum vx_ext_program_type_e +{ + VX_TYPE_PROGRAM = 0x900 +} +vx_ext_program_type_e; + +typedef enum vx_program_attribute_e +{ + VX_PROGRAM_ATTRIBUTE_BUILD_LOG = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_PROGRAM) + 0x0, +} +vx_program_attribute_e; + +typedef enum vx_ext_node_attribute_e +{ + VX_NODE_ATTRIBUTE_KERNEL_EXECUTION_PARAMETERS = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_NODE) + 0x0, +} +vx_ext_node_attribute_e; + +#define VX_MAX_WORK_ITEM_DIMENSIONS 3 + +typedef struct _vx_kernel_execution_parameters { + vx_uint32 workDim; + vx_size globalWorkOffset[VX_MAX_WORK_ITEM_DIMENSIONS]; + vx_size globalWorkScale[VX_MAX_WORK_ITEM_DIMENSIONS]; + vx_size localWorkSize[VX_MAX_WORK_ITEM_DIMENSIONS]; + vx_size globalWorkSize[VX_MAX_WORK_ITEM_DIMENSIONS]; +} vx_kernel_execution_parameters_t; + +typedef struct _vx_program * vx_program; + +#define VX_BUILD_SUCCESS 0 +#define VX_BUILD_NONE -1 +#define VX_BUILD_ERROR -2 +#define VX_BUILD_IN_PROGRESS -3 + +#if defined(__cplusplus) +extern "C" { +#endif + + +VX_API_ENTRY vx_program VX_API_CALL vxCreateProgramWithSource( + vx_context context, vx_uint32 count, const vx_char * strings[], vx_size lengths[]); + +VX_API_ENTRY vx_program VX_API_CALL vxCreateProgramWithBinary( + vx_context context, const vx_uint8 * binary, vx_size size); + +VX_API_ENTRY vx_status VX_API_CALL vxReleaseProgram(vx_program *program); + +VX_API_ENTRY vx_status VX_API_CALL vxBuildProgram(vx_program program, const vx_char * options); + + +VX_API_ENTRY vx_status VX_API_CALL vxQueryProgram(vx_program program, vx_enum attribute, void *ptr, vx_size size); + +VX_API_ENTRY vx_kernel VX_API_CALL vxAddKernelInProgram( + vx_program program, vx_char name[VX_MAX_KERNEL_NAME], vx_enum enumeration, vx_uint32 num_params, vx_kernel_validate_f validate, + vx_kernel_initialize_f initialize, vx_kernel_deinitialize_f deinitialize); + +VX_API_ENTRY vx_status VX_API_CALL vxSetNodeUniform(vx_node node, const vx_char * name, vx_size count, void * value); + +VX_API_ENTRY vx_status VX_API_CALL vxSetChildGraphOfNode(vx_node node, vx_graph graph); + +VX_API_ENTRY vx_graph VX_API_CALL vxGetChildGraphOfNode(vx_node node); + +VX_API_ENTRY vx_status VX_API_CALL vxSetArrayAttribute(vx_array array, vx_enum attribute, void *ptr, vx_size size); + +VX_API_ENTRY vx_status VX_API_CALL vxSelectKernelSubname(vx_node node, const vx_char * subname); + +#if defined(__cplusplus) +} +#endif + +#endif /* __GC_VX_PROGRAM_H__ */ diff --git a/unified-tina/inc/VX/vx_ext_target.h b/unified-tina/inc/VX/vx_ext_target.h new file mode 100644 index 0000000..3dd3a26 --- /dev/null +++ b/unified-tina/inc/VX/vx_ext_target.h @@ -0,0 +1,135 @@ +/* + + * Copyright (c) 2012-2017 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _VX_EXT_TARGET_H_ +#define _VX_EXT_TARGET_H_ + +#include + +/*! \file + * \brief The OpenVX Target API Definition + */ + +/*! \brief The extension name. + * \ingroup group_target + */ +#define OPENVX_EXT_TARGET "vx_ext_target" + +/*! \brief Defines the maximum number of characters in a target string. + * \ingroup group_target + */ +#define VX_MAX_TARGET_NAME (64) + +enum vx_ext_target_context_attribute_e { + /*! \brief Used to query the context for the number of active targets. Use a \ref vx_uint32 parameter. */ + VX_CONTEXT_TARGETS = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_CONTEXT) + 0xE, +}; + +/*! \brief An abstract handle to a target. + * \ingroup group_target + */ +typedef struct _vx_target *vx_target; + +/*! \brief The target attributes list + * \ingroup group_target + */ +enum vx_target_attribute_e { + /*! \brief Returns the index of the given target. Use a \ref vx_uint32 parameter.*/ + VX_TARGET_ATTRIBUTE_INDEX = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_TARGET) + 0x0, + /*! \brief Returns the name of the given target in the format "vendor.vendor_string". + * Use a \ref vx_char[\ref VX_MAX_TARGET_NAME] array + */ + VX_TARGET_ATTRIBUTE_NAME = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_TARGET) + 0x1, + /*! \brief Returns the number of kernels that the target is capable of processing. + * This is then used to allocate a table which is then filled when \ref vxQueryTarget + * is called with \ref VX_TARGET_ATTRIBUTE_KERNELTABLE. + * Use a \ref vx_uint32 parameter. + */ + VX_TARGET_ATTRIBUTE_NUMKERNELS = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_TARGET) + 0x2, + /*! \brief Returns the table of all the kernels that a given target can execute. + * Use a vx_kernel_info_t array. + * \pre You must call \ref vxQueryTarget with \ref VX_TARGET_ATTRIBUTE_NUMKERNELS + * to compute the necessary size of the array. + */ + VX_TARGET_ATTRIBUTE_KERNELTABLE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_TARGET) + 0x3, +}; + +#if defined(__cplusplus) +extern "C" { +#endif + +/*! \brief Used to retrieve a target reference by the index of the target. + * \param [in] context The reference to the overall context. + * \param [in] index The index of the target to get a reference to. + * \return \ref vx_target + * \retval 0 Invalid index. + * \retval * A target reference. + * \note Use \ref vxQueryContext with \ref VX_CONTEXT_NUMTARGETS to retrieve the upper limit of targets. + * \ingroup group_target + */ +VX_API_ENTRY vx_target VX_API_CALL vxGetTargetByIndex(vx_context context, vx_uint32 index); + +/*! \brief Used to get a reference to named target when the name is known beforehand. + * \param [in] context The reference to the overall context. + * \param [in] name The target string name. + * \return \ref vx_target + * \retval 0 Invalid index. + * \retval * A target reference. + * \ingroup group_target + */ +VX_API_ENTRY vx_target VX_API_CALL vxGetTargetByName(vx_context context, const vx_char *name); + +/*! \brief Releases a reference to a target object. + * The object may not be garbage collected until its total reference count is zero. + * \param [in] target The pointer to the target to release. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors. + * \retval VX_ERROR_INVALID_REFERENCE If target is not a \ref vx_target. + * \note After returning from this function the reference will be zeroed. + * \ingroup group_target + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseTarget(vx_target *target); + +/*! \brief Used to query the target about it's properties. + * \param [in] target The reference to the target. + * \param [in] attribute The \ref vx_target_attribute_e value to query for. + * \param [out] ptr The location at which the resulting value will be stored. + * \param [in] size The size of the container to which ptr points. + * \return A \ref vx_status_e enumeration. + * \pre \ref vxGetTargetByName or \ref vxGetTargetByIndex + * \ingroup group_target + */ +VX_API_ENTRY vx_status VX_API_CALL vxQueryTarget(vx_target target, vx_enum attribute, void *ptr, vx_size size); + +/*! \brief Used to assign target affinity to a node. + * \note This assignment overrides implementation chosen behavior. + * \param [in] node The node reference to assign affinity to. + * \param [in] target The reference to the target to execute the Node on. + * \pre \ref vxGetTargetByName or \ref vxGetTargetByIndex + * \return A \ref vx_status_e enumeration. + * \ingroup group_target + * \pre vxCreateGenericNode or some other node creation function. + * \retval VX_ERROR_INVALID_REFERENCE Either node or target was not a valid reference. + * \retval VX_ERROR_NOT_SUPPORTED The node can not be executed on that target. + */ +VX_API_ENTRY vx_status VX_API_CALL vxAssignNodeAffinity(vx_node node, vx_target target); + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/unified-tina/inc/VX/vx_helper.h b/unified-tina/inc/VX/vx_helper.h new file mode 100644 index 0000000..01bb55b --- /dev/null +++ b/unified-tina/inc/VX/vx_helper.h @@ -0,0 +1,293 @@ +/* + + * Copyright (c) 2012-2017 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _VX_HELPER_H_ +#define _VX_HELPER_H_ + +#include + +/*! \file + * \brief The OpenVX Helper Library Interface. + * + * \defgroup group_helper OpenVX Helper + * \brief The helper is an non-standardized set of convenience constructs for OpenVX. + * \details These functions use only the OpenVX API in order to implement their + * functionality. As such structures, objects, defines, typedefs and functions + * defined herein are not part of the OpenVX standard, and are + * included as EXAMPLE code only. + */ + +/*! \brief A definition for TAU, or 2*PI. + * \ingroup group_helper + */ +#define VX_TAU 6.28318530717958647692 + +/*! \brief Maximum number of supported entries. + * \ingroup group_helper + */ +#ifndef VX_MAX_LOG_NUM_ENTRIES +#define VX_MAX_LOG_NUM_ENTRIES (1024) +#endif + +#ifndef dimof +/*! \brief A helper macro to determine the number of elements in an array. + * \ingroup group_helper + */ +#define dimof(x) (sizeof(x)/sizeof(x[0])) +#endif + +/*! \brief Contains everything needed to abstractly describe a parameter to a kernel. This is used to + * declare kernel parameters at compile time. + * \ingroup group_helper + */ +typedef struct _vx_param_description_t { + vx_enum direction; /*!< \brief From \ref vx_direction_e */ + vx_enum data_type; /*!< \brief From \ref vx_type_e */ + vx_enum state; /*!< \brief From \ref vx_parameter_state_e */ +} vx_param_description_t; + + +/*! \brief Contains everything needed to abstractly describe a kernel. + * This is used to declare kernels at compile time. + * \ingroup group_helper + */ +typedef struct _vx_kernel_description_t { + /*! \brief The vx_kernel_e enum */ + vx_enum enumeration; + /*! \brief The name that kernel will be used with \ref vxGetKernelByName. */ + vx_char name[VX_MAX_KERNEL_NAME]; + /*! \brief The pointer to the function to execute the kernel */ + vx_kernel_f function; + /*! \brief The pointer to the array of parameter descriptors */ + vx_param_description_t *parameters; + /*! \brief The number of paraemeters in the array. */ + vx_uint32 numParams; + /*! \brief The parameters validator */ + vx_kernel_validate_f validate; + /*! \brief The input validator (deprecated in openvx 1.1) */ + void* input_validate; + /*! \brief The output validator (deprecated in openvx 1.1) */ + void* output_validate; + /*! \brief The initialization function */ + vx_kernel_initialize_f initialize; + /*! \brief The deinitialization function */ + vx_kernel_deinitialize_f deinitialize; +} vx_kernel_description_t; + +/*! \brief A log entry contains the graph reference, a status and a message. + * \ingroup group_helper + */ +typedef struct _vx_log_entry_t { + /*! \brief The status code */ + vx_status status; + /*! \brief The reference to which the message and status pertains. */ + vx_reference reference; + /*! \brief This indicates if the log entry is valid/active or not. */ + vx_enum active; + /*! \brief The message given to the log from OpenVX. This may be an empty string. */ + char message[VX_MAX_LOG_MESSAGE_LEN]; +} vx_log_entry_t; + +/*! \brief The log of a graph + * \ingroup group_helper + */ +typedef struct _vx_log_t { + vx_int32 first; /*!< Inclusive */ + vx_int32 last; /*!< Exclusive */ + vx_uint32 count; /*!< == VX_MAX_LOG_NUM_ENTRIES */ + /*! \brief The set of all log entries. */ + vx_log_entry_t entries[VX_MAX_LOG_NUM_ENTRIES]; +} vx_log_t; + +#define FGETS(str, fh) \ +{ \ + char* success = fgets(str, sizeof(str), fh); \ + if (!success) \ + { \ + printf("fgets failed\n"); \ + } \ +} + +#ifdef __cplusplus +extern "C" { +#endif + +uint32_t math_gcd(uint32_t a, uint32_t b); + + +/*! \brief Returns the previous entry of the log. When called consecutively it + * will return the entire log. The log will be cleared by reading it. + * \param [in] ref The reference to filter the log entries against. + * If the context is given, the next entry will be returned. + * \param [out] message A predefined location to store a copy of the log's + * message value. + * This must point to at least \ref VX_MAX_LOG_MESSAGE_LEN bytes of characters. + * \return Returns the status of the log entry from \ref vx_status_e. + * \ingroup group_helper + * \note The API returns errors oldest to newest order. + * When VX_SUCCESS is returned, the log reading is complete. + */ +vx_status vxGetLogEntry(vx_reference ref, char message[VX_MAX_LOG_MESSAGE_LEN]); + +/*! \brief This enables the helper library logging feature to take over the error + * log callback and keep a database of previous log entries. + * \ingroup group_helper + */ +void vxRegisterHelperAsLogReader(vx_context context); + +/*! + * \brief A method to construct a node via arbitrary parameters and an enum. + * \param [in] graph The handle to desired graph to add the node to. + * \param [in] kernelenum The \ref vx_kernel_e enum value used to create a node. + * \param [in] params The array of parameter information. + * \param [in] num The number of elements in params. + * \return vx_node + * \retval 0 Indicates a failure. + * \ingroup group_helper + */ +vx_node vxCreateNodeByStructure(vx_graph graph, + vx_enum kernelenum, + vx_reference params[], + vx_uint32 num); + +/*! \brief A method to clear out the log for a particular reference, such as a graph. + * \param [in] ref The reference to remove from the log. + * \ingroup group_helper + */ +void vxClearLog(vx_reference ref); + +/*! \brief This is used to connect one node parameter to another node parameter + * when the original handles to the data objects are already lost. + * The context determines if a buffer is necessary or can be optimized out. + * \param [in] a The first parameter + * \param [in] b The second parameter + * \note a or b must be an output parameter and other other an input. + * \return Returns a status code. + * \ingroup group_helper + */ +vx_status vxLinkParametersByReference(vx_parameter a, vx_parameter b); + +/*! \brief This is used to connect one parameter to another parameter by + * explicity indexing when the handles to the data objects are lost. + * \param [in] node_a The source node to link from. + * \param [in] index_a The index of the \ref vx_parameter to link from. + * \param [in] node_b The sink node to link to. + * \param [in] index_b The index of the \ref vx_parameter to link to. + * \return Returns a status code. + * \ingroup group_helper + */ +vx_status vxLinkParametersByIndex(vx_node node_a, vx_uint32 index_a, vx_node node_b, vx_uint32 index_b); + +/*! \brief This helper is used to easily set the affine matrix to a rotation and scale. + * \param [in] matrix The handle to the matrix. + * \param [in] angle The rotation angle in degrees. + * \param [in] scale The scaling value. Values less than one are enlarging. + * \param [in] center_x The center pixel in the x direction. + * \param [in] center_y The center pixel in the y direction. + * \return Returns a \ref vx_status_e enumeration. + * \ingroup group_helper + */ +vx_status vxSetAffineRotationMatrix(vx_matrix matrix, + vx_float32 angle, + vx_float32 scale, + vx_float32 center_x, + vx_float32 center_y); + +/*! \brief [Helper] This function changes the points of a rectangle by some + * delta value per coordinate. + * \param [in] rect The rectangle to modify. + * \param [in] dsx The start x delta. + * \param [in] dsy The start y delta. + * \param [in] dex The end x delta. + * \param [in] dey The end y delta. + * \return vx_status + * \retval VX_SUCCESS Modified rectangle. + * \retval VX_ERROR_INVALID_REFERENCE Not a valid rectangle. + * \ingroup group_helper + */ +vx_status vxAlterRectangle(vx_rectangle_t *rect, + vx_int32 dsx, + vx_int32 dsy, + vx_int32 dex, + vx_int32 dey); + +/*! \brief Adds a parameter to a graph by indicating the source node, and the + * index of the parameter on the node. + * \param [in] g The graph handle. + * \param [in] n The node handle. + * \param [in] index The index of the parameter on the node. + * \return Returns a \ref vx_status_e enumeration. + * \ingroup group_helper + */ +vx_status vxAddParameterToGraphByIndex(vx_graph g, vx_node n, vx_uint32 index); + +#if defined(EXPERIMENTAL_USE_TARGET) +/*! \brief Finds all targets which report that they implement a particular kernel by name. + * \param [in] context The overall context. + * \param [in] kname The name of the kernel to find. + * \param [in,out] targets The array of pointers to character arrays. Each index will + * be modified. If the kernel does not exist on the target, the name will be zeroed. + * If the kernel does exist on the target, the name of the target will be filled in. + * \pre targets must be a preallocated array of vx_char pointers to + * \ref VX_MAX_TARGET_NAME characters with number of elements equal to + * the number of targets in the implementation. + * \ingroup group_helper + */ +vx_bool vxFindAllTargetsOfKernelsByName(vx_context context, vx_char kname[VX_MAX_KERNEL_NAME], vx_char *targets[]); + +/*! \brief Allocates and returns a list of all available targets in a context. + * \param [in] context The overall context. + * \param [out] targets A pointer to variable to hold the array of target strings. + * \param [out] num_targets A pointer to a variable to hold the number of targets found. + * \ingroup group_helper + */ +vx_bool vxCreateListOfAllTargets(vx_context context, vx_char **targets[], vx_uint32 *num_targets); + +/*! \brief Free the array of target name strings. + * \param [in,out] targets The pointer to the variable that holds the array of strings. This variable will be set + * to NULL after this call. + * \param [in] num_targets The number of targets in the system. + * \ingroup group_helper + */ +void vxDestroyListOfAllTargets(vx_char **targets[], vx_uint32 num_targets); + +#endif + +/*! \brief Find the overlapping rectange between two rectangles. + * \ingroup group_helper + */ +vx_bool vxFindOverlapRectangle(vx_rectangle_t *rect_a, vx_rectangle_t *rect_b, vx_rectangle_t *rect_res); + +/*! \brief Read a rectangle-shaped section of an image into a 2D array. + * \ingroup group_helper + */ +void vxReadRectangle(const void *base, + const vx_imagepatch_addressing_t *addr, + const vx_border_t *borders, + vx_df_image type, + vx_uint32 center_x, + vx_uint32 center_y, + vx_uint32 radius_x, + vx_uint32 radius_y, + void *destination); + +#ifdef __cplusplus +} +#endif + +#endif /* _VX_HELPER_H_ */ + diff --git a/unified-tina/inc/VX/vx_import.h b/unified-tina/inc/VX/vx_import.h new file mode 100644 index 0000000..80a490d --- /dev/null +++ b/unified-tina/inc/VX/vx_import.h @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2012-2020 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _OPENVX_IMPORT_H_ +#define _OPENVX_IMPORT_H_ +#ifdef __cplusplus +extern "C" { +#endif +/*! + * \file + * \brief The OpenVX Import API + * part of the OpenVX Export and Import extension API + * and also part of the OpenVX SC deployment feature set. + */ + + /*! \brief An enumeration of export uses. See \ref vxExportObjectsToMemory and + * \ref vxImportObjectsFromMemory + * \ingroup vx_enum_e + */ +#define VX_ENUM_IX_USE 0x18 +/*! \brief How to export and import an object + * \ingroup group_import + */ +#define VX_IX_USE_APPLICATION_CREATE (VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_IX_USE) + 0x0) /*!< \brief The application will create the object before import. */ +/*! \brief How to export and import an object + * \ingroup group_import + */ +#define VX_IX_USE_EXPORT_VALUES (VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_IX_USE) + 0x1) /*!< \brief Data values are exported and restored upon import. */ +/*! \brief How to export and import an object + * \ingroup group_import + */ +#define VX_IX_USE_NO_EXPORT_VALUES (VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_IX_USE) + 0x2) /*!< \brief Data values are not exported. */ + +/*============================================================================= +IMPORT +=============================================================================*/ +/*! \brief The Import Object. Import is a container of OpenVX objects, which may be retreived + * by name + * \ingroup group_import + */ +typedef struct _vx_import *vx_import; + +/*! \brief The Object Type Enumeration for import. + * \ingroup group_import + */ + +#define VX_TYPE_IMPORT 0x814/*!< \brief A \ref vx_import. */ + +/*! \brief Imports objects into a context from a vendor-specific format in memory.\n + * + * \details This function imports objects from a memory blob previously created using \ref vxExportObjectsToMemory[*REQ*].\n + * A pointer to memory is given where a list of references is stored, together with the list + * of uses which describes how the references are used. The number of references given and the + * list of uses must match that given upon export, or this function will not be sucessful[*REQ*].\n + * The *uses* array specifies how the objects in the corresponding *refs* array will be imported: + * - \ref VX_IX_USE_APPLICATION_CREATE\n + * The application must create the object and supply the reference; the + * meta-data of the object must match exactly the meta-data of the object when it was exported, + * except that the name need not match[*REQ*].\n + * If the supplied reference has a different name to that stored, the supplied name is used[*REQ*]. + * - \ref VX_IX_USE_EXPORT_VALUES\n + * The implementation will create the object and set the data in it[*REQ*].\n + * Any data not defined at the time of export of the object will be set to a default value (zero in the + * absence of any other definition) upon import[*REQ*]. + * - \ref VX_IX_USE_NO_EXPORT_VALUES\n + * The implementation will create the object and the importing application will set values as applicable[*REQ*]. + * + * References are obtained from the import API for those objects whose references were listed at the time of export. + * These are not the same objects; they are equivalent objects created by the framework at import time. + * The implementation guarantees that references will be available and valid for all objects listed at the time + * of export, or the import will fail[*REQ*].\n + * The import operation will fail if more than one object whose reference is listed at *refs* + * has been given the same non-zero length name (via \ref vxSetReferenceName)[*REQ*].\n + * The import will be unsuccessful if any of the parameters supplied is NULL[*REQ*].\n + * After completion of the function the memory at *ptr* may be deallocated by the application as it will + * not be used by any of the created objects[*REQ*].\n + * Any delays imported with graphs for which they are registered for auto-aging remain registered + * for auto-aging[*REQ*].\n + * After import, a graph must execute with exactly the same effect with respect to its visible parameters + * as before export[*REQ*]. + * \note The *refs* array must be the correct length to hold all references of the import; this will be the same length + * that was supplied at the time of export. Only references for objects created by the application, where the + * corresponding *uses* entry is \ref VX_IX_USE_APPLICATION_CREATE should be filled in by the application; + * all other entries will be supplied by the framework and may be initialised by the application to NULL. The *uses* array + * must have the identical length and content as given at the time of export, and the value of *numrefs* must also match; + * these measures increase confidence that the import contains the correct data. +* \note Graph parameters may be changed after import by using the \ref vxSetGraphParameterByIndex API, and + * images may also be changed by using the \ref vxSwapImageHandle API. + * When \ref vxSetGraphParameterByIndex is used, the framework will check that the new parameter is of the + * correct type to run with the graph, which cannot be re-verified. If the reference supplied is not suitable, an error + * will be returned, but there may be circumstances where changing graph parameters for unsuitable ones is not detected + * and could lead to implementation-dependent behaviour; one such circumstance is when the new parameters are images + * corresponding to overlapping regions of interest. The user should avoid these circumstances. + * In other words, + * - The meta data of the new graph parameter must match the meta data of the graph parameter it replaces [*REQ*]. + * - A graph parameter must not be NULL [*REQ*]. + * \param [in] context context into which to import objects, must be valid [*REQ*]. + * \param [in] numrefs number of references to import, must match export[*REQ*]. + * \param [in,out] refs references imported or application-created data which must match + * meta-data of the export[*REQ*] + * \param [in] uses how to import the references, must match export values[*REQ*] + * \param [in] ptr pointer to binary buffer containing a valid binary export[*REQ*] + * \param [in] length number of bytes at \*ptr, i.e. the length of the export[*REQ*] + * \return A \ref vx_import[*REQ*]. + * Calling \ref vxGetStatus with the vx_import as a parameter will return VX_SUCCESS if the + * function was successful[*REQ*].\n + * Another value is given to indicate that there was an error[*REQ*].\n + * An implementation may provide several different error codes to give useful diagnostic information + * in the event of failure to import objects, but these are not required to indicate + * possibly recovery mechanisms, and for safety critical use assume errors are not recoverable. + * \post \ref vxReleaseImport is used to release the import object. + * \post Use \ref vxReleaseReference or an appropriate specific release function to release + * the references in the array refs when they are no longer required. + * \ingroup group_import + */ +VX_API_ENTRY vx_import VX_API_CALL vxImportObjectsFromMemory( + vx_context context, + vx_size numrefs, + vx_reference *refs, + const vx_enum * uses, + const vx_uint8 * ptr, + vx_size length); + +/*! \brief Releases an import object when no longer required.\n + * \details This function releases the reference to the import object [*REQ*].\n + * Other objects including those imported at the time of creation of the import object are unaffected[*REQ*].\n + * \param [in,out] import The pointer to the reference to the import object[*REQ*]. + * \post After returning sucessfully from this function the reference is zeroed[*REQ*]. + * \return A \ref vx_status value. + * \retval VX_SUCCESS If no errors occurred and the import was sucessfully released[*REQ*].\n + * An error is indicated when the return value is not VX_SUCCESS[*REQ*].\n + * An implementation may provide several different return values to give useful diagnostic + * information in the event of failure to export, but these are not required to indicate + * possibly recovery mechanisms, and for safety critical use assume errors are not recoverable. + * \pre \ref vxImportObjectsFromMemory is used to create an import object. + * \ingroup group_import + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseImport(vx_import *import); +/*! \brief Get a reference from the import object by name.\n + * + * \details All accessible references of the import object created using \ref vxImportObjectsFromMemory are + * in the array *refs*, which is populated partly by the application before import, and partly by the + * framework. However, it may be more convenient to access the references in the import object without + * referring to this array, for example if the import object is passed as a parameter to another function. + * In this case, references may be retreived by name, assuming that \ref vxSetReferenceName + * was called to assign a name to the reference. + * This function searches the given import for the given name and returns the associated reference[*REQ*].\n + * The reference may have been named either before export or after import[*REQ*].\n + * If more than one reference exists in the import with the given name, this is an error[*REQ*].\n + * Only references in the array *refs* after calling \ref vxImportObjectsFromMemory may be retrieved + * using this function[*REQ*].\n + * A reference to a named object may be obtained from a valid import object using this API even if all other + * references to the object have been released[*REQ*]. + * \param [in] import The import object in which to find the name; the function will fail if this parameter + * is not valid[*REQ*]. + * \param [in] name The name to find, points to a string of at least one and less than VX_MAX_REFERENCE_NAME bytes + * followed by a zero byte; the function will fail if this is not valid[*REQ*]. + * \return A \ref vx_reference[*REQ*].\n + * Calling \ref vxGetStatus with the reference as a parameter will return VX_SUCCESS if the function + * was successful[*REQ*].\n + * Another value is given to indicate that there was an error[*REQ*].\n + * On success, the reference count of the object in question is incremented[*REQ*].\n + * An implementation may provide several different error codes to give useful diagnostic information + * in the event of failure to retrieve a reference, but these are not required to indicate + * possibly recovery mechanisms, and for safety critical use assume errors are not recoverable. + * \pre \ref vxSetReferenceName was used to name the reference. + * \post use ref vxReleaseReference or appropriate specific release function to release a reference + * obtained by this method. + * \ingroup group_import + */ +VX_API_ENTRY vx_reference VX_API_CALL vxGetImportReferenceByName(vx_import import, const vx_char *name); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/unified-tina/inc/VX/vx_kernels.h b/unified-tina/inc/VX/vx_kernels.h new file mode 100644 index 0000000..1648bbd --- /dev/null +++ b/unified-tina/inc/VX/vx_kernels.h @@ -0,0 +1,547 @@ +/* + + * Copyright (c) 2012-2017 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _OPENVX_KERNELS_H_ +#define _OPENVX_KERNELS_H_ + +/*! + * \file + * \brief The list of supported kernels in the OpenVX standard. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + * \brief The standard list of available libraries + * \ingroup group_kernel + */ +enum vx_library_e { + /*! \brief The base set of kernels as defined by Khronos. */ + VX_LIBRARY_KHR_BASE = 0x0, +}; + +/*! + * \brief The standard list of available vision kernels. + * + * Each kernel listed here can be used with the \ref vxGetKernelByEnum call. + * When programming the parameters, use + * \arg \ref VX_INPUT for [in] + * \arg \ref VX_OUTPUT for [out] + * \arg \ref VX_BIDIRECTIONAL for [in,out] + * + * When programming the parameters, use + * \arg \ref VX_TYPE_IMAGE for a \ref vx_image in the size field of \ref vxGetParameterByIndex or \ref vxSetParameterByIndex * \arg \ref VX_TYPE_ARRAY for a \ref vx_array in the size field of \ref vxGetParameterByIndex or \ref vxSetParameterByIndex * \arg or other appropriate types in \ref vx_type_e. + * \ingroup group_kernel + */ +enum vx_kernel_e { + /*! + * \brief The Color Space conversion kernel. + * \details The conversions are based on the \ref vx_df_image_e code in the images. + * \see group_vision_function_colorconvert + */ + VX_KERNEL_COLOR_CONVERT = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x1, + + /*! + * \brief The Generic Channel Extraction Kernel. + * \details This kernel can remove individual color channels from an interleaved + * or semi-planar, planar, sub-sampled planar image. A client could extract + * a red channel from an interleaved RGB image or do a Luma extract from a + * YUV format. + * \see group_vision_function_channelextract + */ + VX_KERNEL_CHANNEL_EXTRACT = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x2, + + /*! + * \brief The Generic Channel Combine Kernel. + * \details This kernel combine multiple individual planes into a single + * multiplanar image of the type specified in the output image. + * \see group_vision_function_channelcombine + */ + VX_KERNEL_CHANNEL_COMBINE = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x3, + + /*! \brief The Sobel 3x3 Filter Kernel. + * \see group_vision_function_sobel3x3 + */ + VX_KERNEL_SOBEL_3x3 = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x4, + + /*! + * \brief The Magnitude Kernel. + * \details This kernel produces a magnitude plane from two input gradients. + * \see group_vision_function_magnitude + */ + VX_KERNEL_MAGNITUDE = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x5, + + /*! + * \brief The Phase Kernel. + * \details This kernel produces a phase plane from two input gradients. + * \see group_vision_function_phase + */ + VX_KERNEL_PHASE = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x6, + + /*! + * \brief The Scale Image Kernel. + * \details This kernel provides resizing of an input image to an output image. + * The scaling factor is determined but the relative sizes of the input and + * output. + * \see group_vision_function_scale_image + */ + VX_KERNEL_SCALE_IMAGE = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x7, + + /*! \brief The Table Lookup kernel + * \see group_vision_function_lut + */ + VX_KERNEL_TABLE_LOOKUP = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x8, + + /*! \brief The Histogram Kernel. + * \see group_vision_function_histogram + */ + VX_KERNEL_HISTOGRAM = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x9, + + /*! \brief The Histogram Equalization Kernel. + * \see group_vision_function_equalize_hist + */ + VX_KERNEL_EQUALIZE_HISTOGRAM = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0xA, + + /*! \brief The Absolute Difference Kernel. + * \see group_vision_function_absdiff + */ + VX_KERNEL_ABSDIFF = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0xB, + + /*! \brief The Mean and Standard Deviation Kernel. + * \see group_vision_function_meanstddev + */ + VX_KERNEL_MEAN_STDDEV = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0xC, + + /*! \brief The Threshold Kernel. + * \see group_vision_function_threshold + */ + VX_KERNEL_THRESHOLD = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0xD, + + /*! \brief The Integral Image Kernel. + * \see group_vision_function_integral_image + */ + VX_KERNEL_INTEGRAL_IMAGE = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0xE, + + /*! \brief The dilate kernel. + * \see group_vision_function_dilate_image + */ + VX_KERNEL_DILATE_3x3 = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0xF, + + /*! \brief The erode kernel. + * \see group_vision_function_erode_image + */ + VX_KERNEL_ERODE_3x3 = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x10, + + /*! \brief The median image filter. + * \see group_vision_function_median_image + */ + VX_KERNEL_MEDIAN_3x3 = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x11, + + /*! \brief The box filter kernel. + * \see group_vision_function_box_image + */ + VX_KERNEL_BOX_3x3 = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x12, + + /*! \brief The gaussian filter kernel. + * \see group_vision_function_gaussian_image + */ + VX_KERNEL_GAUSSIAN_3x3 = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x13, + + /*! \brief The custom convolution kernel. + * \see group_vision_function_custom_convolution + */ + VX_KERNEL_CUSTOM_CONVOLUTION = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x14, + + /*! \brief The gaussian image pyramid kernel. + * \see group_vision_function_gaussian_pyramid + */ + VX_KERNEL_GAUSSIAN_PYRAMID = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x15, + + /*! \brief The min and max location kernel. + * \see group_vision_function_minmaxloc + */ + VX_KERNEL_MINMAXLOC = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x19, + + /*! \brief The bit-depth conversion kernel. + * \see group_vision_function_convertdepth + */ + VX_KERNEL_CONVERTDEPTH = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x1A, + + /*! \brief The Canny Edge Detector. + * \see group_vision_function_canny + */ + VX_KERNEL_CANNY_EDGE_DETECTOR = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x1B, + + /*! \brief The Bitwise And Kernel. + * \see group_vision_function_and + */ + VX_KERNEL_AND = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x1C, + + /*! \brief The Bitwise Inclusive Or Kernel. + * \see group_vision_function_or + */ + VX_KERNEL_OR = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x1D, + + /*! \brief The Bitwise Exclusive Or Kernel. + * \see group_vision_function_xor + */ + VX_KERNEL_XOR = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x1E, + + /*! \brief The Bitwise Not Kernel. + * \see group_vision_function_not + */ + VX_KERNEL_NOT = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x1F, + + /*! \brief The Pixelwise Multiplication Kernel. + * \see group_vision_function_mult + */ + VX_KERNEL_MULTIPLY = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x20, + + /*! \brief The Addition Kernel. + * \see group_vision_function_add + */ + VX_KERNEL_ADD = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x21, + + /*! \brief The Subtraction Kernel. + * \see group_vision_function_sub + */ + VX_KERNEL_SUBTRACT = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x22, + + /*! \brief The Warp Affine Kernel. + * \see group_vision_function_warp_affine + */ + VX_KERNEL_WARP_AFFINE = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x23, + + /*! \brief The Warp Perspective Kernel. + * \see group_vision_function_warp_perspective + */ + VX_KERNEL_WARP_PERSPECTIVE = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x24, + + /*! \brief The Harris Corners Kernel. + * \see group_vision_function_harris + */ + VX_KERNEL_HARRIS_CORNERS = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x25, + + /*! \brief The FAST Corners Kernel. + * \see group_vision_function_fast + */ + VX_KERNEL_FAST_CORNERS = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x26, + + /*! \brief The Optical Flow Pyramid (LK) Kernel. + * \see group_vision_function_opticalflowpyrlk + */ + VX_KERNEL_OPTICAL_FLOW_PYR_LK = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x27, + + /*! \brief The Remap Kernel. + * \see group_vision_function_remap + */ + VX_KERNEL_REMAP = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x28, + + /*! \brief The Half Scale Gaussian Kernel. + * \see group_vision_function_scale_image + */ + VX_KERNEL_HALFSCALE_GAUSSIAN = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x29, + + VX_KERNEL_MAX_1_0, /*!< \internal Used for VX1.0 bounds checking in the conformance test. */ + + /* kernel added in OpenVX 1.1 */ + + /*! \brief The Laplacian Image Pyramid Kernel. + * \see group_vision_function_laplacian_pyramid + */ + VX_KERNEL_LAPLACIAN_PYRAMID = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x2A, + + /*! \brief The Laplacian Pyramid Reconstruct Kernel. + * \see group_vision_function_laplacian_pyramid + */ + VX_KERNEL_LAPLACIAN_RECONSTRUCT = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x2B, + + /*! \brief The Non Linear Filter Kernel. + * \see group_vision_function_nonlinear_filter + */ + VX_KERNEL_NON_LINEAR_FILTER = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x2C, + + VX_KERNEL_MAX_1_1, /*!< \internal Used for VX1.1 bounds checking in the conformance test. */ + + /* kernel added in OpenVX 1.2 */ + + /*! \brief The Match Template Kernel. + * \see group_vision_match_template + */ + VX_KERNEL_MATCH_TEMPLATE = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x2D, + + /*! \brief The LBP Kernel. + * \see group_lbp + */ + VX_KERNEL_LBP = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x2E, + + /*! \brief The hough lines probability Kernel. + * \see group_vision_hough_lines_p + */ + VX_KERNEL_HOUGH_LINES_P = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x2F, + + /*! \brief The tensor multiply Kernel. + * \see group_vision_function_tensor_multiply + */ + VX_KERNEL_TENSOR_MULTIPLY = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x30, + + /*! \brief The tensor add Kernel. + * \see group_vision_function_tensor_add + */ + VX_KERNEL_TENSOR_ADD = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x31, + + /*! \brief The tensor subtract Kernel. + * \see group_vision_function_tensor_subtract + */ + VX_KERNEL_TENSOR_SUBTRACT = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x32, + + /*! \brief The tensor table look up Kernel. + * \see group_vision_function_tensor_tablelookup + */ + VX_KERNEL_TENSOR_TABLE_LOOKUP = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x33, + + /*! \brief The tensor transpose Kernel. + * \see group_vision_function_tensor_transpose + */ + VX_KERNEL_TENSOR_TRANSPOSE = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x34, + + /*! \brief The tensor convert depth Kernel. + * \see group_vision_function_tensor_convert_depth + */ + VX_KERNEL_TENSOR_CONVERT_DEPTH = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x35, + + /*! \brief The tensor matrix multiply Kernel. + * \see group_vision_function_tensor_matrix_multiply + */ + VX_KERNEL_TENSOR_MATRIX_MULTIPLY = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x36, + + /*! \brief The data object copy kernel. + * \see group_vision_function_copy + */ + VX_KERNEL_COPY = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x37, + + /*! \brief The non-max suppression kernel. + * \see group_vision_function_nms + */ + VX_KERNEL_NON_MAX_SUPPRESSION = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x38, + + /*! \brief The scalar operation kernel. + * \see group_control_flow + */ + VX_KERNEL_SCALAR_OPERATION = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x39, + + /*! \brief The HOG features kernel. + * \see group_vision_function_hog + */ + VX_KERNEL_HOG_FEATURES = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x3A, + + /*! \brief The HOG Cells kernel. + * \see group_vision_function_hog + */ + VX_KERNEL_HOG_CELLS = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x3B, + + /*! \brief The bilateral filter kernel. + * \see group_vision_function_bilateral_filter + */ + VX_KERNEL_BILATERAL_FILTER = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x3C, + + /*! \brief The select kernel. + * \see group_control_flow + */ + VX_KERNEL_SELECT = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x3D, + + /* insert new kernels here */ + + /*! \brief The max kernel. + * \see group_vision_function_max + */ + VX_KERNEL_MAX = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x3E, + /*! \brief The min kernel. + * \see group_vision_function_min + */ + VX_KERNEL_MIN = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x3F, + + /*! \brief The weigthed average kernel. + * \see group_vision_function_weighted_average + */ + VX_KERNEL_WEIGHTED_AVERAGE = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_BASE) + 0x40, + + /* insert new kernels here */ + VX_KERNEL_NN_CONVOLUTION_RELU_POOLING_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x0, + + VX_KERNEL_NN_CONVOLUTION_RELU_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x1, + + VX_KERNEL_NN_FULLY_CONNECTED_RELU_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x2, + + //VX_KERNEL_NN_SOFTMAX_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x3, + + //VX_KERNEL_NN_NORMALIZATION_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x4, + + VX_KERNEL_NN_LRN_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x3, + + //VX_KERNEL_NN_NORMALIZE_IMAGE_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x4, + + //VX_KERNEL_NN_POOLING_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x7, + + //VX_KERNEL_NN_ACTIVATION_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x9, + + VX_KERNEL_NN_LEAKY = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x4, + + VX_KERNEL_NN_BATCH_NORM = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x5, + + VX_KERNEL_NN_RPN = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x6, + + //VX_KERNEL_NN_ROIPOOL = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0xD, + + VX_KERNEL_NN_CONCAT2_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x7, + + //VX_KERNEL_NN_CONVOLUTION_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0xF, + + VX_KERNEL_NN_CONCATINDEFINITE_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x8, + + VX_KERNEL_NN_REORG_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x9, + + //VX_KERNEL_NN_DECONVOLUTION_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x12, + + VX_KERNEL_NN_TENSOR_DIV = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0xA, + + VX_KERNEL_NN_L2NORMALIZE_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0xB, + + VX_KERNEL_NN_TENSOR_COPY = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0xC, + + VX_KERNEL_NN_CONVOLUTION_RELU_POOLING_LAYER2 = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0xD, + + VX_KERNEL_NN_POOLING_LAYER2 = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0xE, + + VX_KERNEL_NN_TENSOR_REDUCE_SUM = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0xF, + + VX_KERNEL_NN_TENSOR_PAD = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x10, + + VX_KERNEL_NN_LSTM_UNIT = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x11, + + VX_KERNEL_NN_LSTM_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x12, + + VX_KERNEL_NN_REORG2_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x13, + + VX_KERNEL_NN_TENSOR_ROUNDING = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x14, + + VX_KERNEL_NN_HASH_LUT_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x15, + + VX_KERNEL_NN_LSH_PROJECTION_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x16, + + VX_KERNEL_NN_TENSOR_RESHPE = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x17, + + VX_KERNEL_NN_LUT2_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x18, + + VX_KERNEL_NN_TENSOR_SCALE = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x19, + + VX_KERNEL_NN_RNN_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x1A, + + VX_KERNEL_NN_SOFTMAX2_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x1B, + + VX_KERNEL_NN_SVDF_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x1C, + + VX_KERNEL_NN_NORMALIZATION_LAYER2 = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x1D, + + VX_KERNEL_NN_TENSOR_REVERSE = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x1E, + + VX_KERNEL_NN_TENSOR_TRANSPOSE = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x1F, + + VX_KERNEL_NN_TENSOR_MEAN = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x20, + + VX_KERNEL_NN_TENSOR_SQUEEZE = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x21, + + VX_KERNEL_NN_TENSOR_STRIDE_SLICE = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x22, + + VX_KERNEL_NN_TENSOR_PAD2 = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x23, + + VX_KERNEL_NN_YUV2RGB_SCALE = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x24, + + VX_KERNEL_NN_PRELU = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x25, + + VX_KERNEL_NN_GRU_UNIT_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x26, + + VX_KERNEL_NN_GRU_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x27, + + VX_KERNEL_NN_CONV_LSTM_UNIT_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x28, + + VX_KERNEL_NN_CONV_LSTM_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x29, + + VX_KERNEL_NN_FULLY_CONNECTED_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x2A, + + VX_KERNEL_NN_L2NORMALIZE_LAYER2 = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x2B, + + VX_KERNEL_NN_CONVOLUTION_RELU_POOLING_ADD_LAYER2 = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x2C, + + VX_KERNEL_NN_LUT_LAYER2 = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x2D, + + VX_KERNEL_NN_CONVOLUTION_RELU_POOLING_MULTIPLY_LAYER2 = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x2E, + + VX_KERNEL_NN_BATCH_GEMM = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x2F, + + VX_KERNEL_NN_CONV_3D_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x30, + + VX_KERNEL_NN_DECONV_3D_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x31, + + VX_KERNEL_STREAM_PROCESSOR = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x32, + + VX_KERNEL_NN_BATCH_GEMM_RELU_POOLING_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x33, + + VX_KERNEL_NN_FUSED_SP_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x34, + + VX_KERNEL_NN_CONVOLUTION_RELU_POOLING_SP_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x35, + + VX_KERNEL_NN_LAYER_NORMALIZATION_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x36, + + VX_KERNEL_NN_INSTANCE_NORMALIZATION_SP_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x37, + + VX_KERNEL_NN_GROUP_NORMALIZATION_SP_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x38, + + VX_KERNEL_NN_LOGICAL_OPS_SP_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x39, + + VX_KERNEL_NN_LOGICAL_NOT_SP_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x40, + + VX_KERNEL_NN_RELATIONAL_SP_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x41, + + VX_KERNEL_NN_TENSOR_REDUCE_MAX = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x42, + + VX_KERNEL_NN_MAXIMUM_SP_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x43, + + VX_KERNEL_NN_MINIMUM_SP_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x44, + + VX_KERNEL_NN_TENSOR_SELECT_SP_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x45, + + VX_KERNEL_NN_REDUCE_SUM_SP_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x46, + + VX_KERNEL_NN_GRU_CELL_ACTIVATION_Z_H_SP_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x47, + + VX_KERNEL_NN_GRU_CELL_H_TIMES_ACTIVATION_R_SP_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x48, + + VX_KERNEL_NN_GRU_CELL_RESET_AFTER_ACTIVATION_SP_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x49, + + VX_KERNEL_NN_LSTM_ACTIVATION_SP_LAYER = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x50, + + VX_KERNEL_MAX_1_2, /*!< \internal Used for VX1.2 bounds checking in the conformance test. */ +}; + +#ifdef __cplusplus +} +#endif + +#endif /* _OPEN_VISION_LIBRARY_KERNELS_H_ */ diff --git a/unified-tina/inc/VX/vx_khr_cnn.h b/unified-tina/inc/VX/vx_khr_cnn.h new file mode 100644 index 0000000..743a595 --- /dev/null +++ b/unified-tina/inc/VX/vx_khr_cnn.h @@ -0,0 +1,8 @@ +#ifndef _VX_KHR_CNN_H_ +#define _VX_KHR_CNN_H_ + +#define OPENVX_KHR_CNN "vx_khr_cnn" + +#include + +#endif diff --git a/unified-tina/inc/VX/vx_khr_compatible.h b/unified-tina/inc/VX/vx_khr_compatible.h new file mode 100644 index 0000000..921416a --- /dev/null +++ b/unified-tina/inc/VX/vx_khr_compatible.h @@ -0,0 +1,374 @@ +#ifndef __VX_KHR_COMPATIBLE_H__ +#define __VX_KHR_COMPATIBLE_H__ +/* + VX_DECONVOLUTION_WEIGHT_LAYOUT_COMPATIBLE_KHRONOS is used to distingush deconvolution weight layout + [value] + 0: weight_layout is whnc + 1: weight_layout is whcn +*/ +#ifndef VX_DECONVOLUTION_WEIGHT_LAYOUT_COMPATIBLE_KHRONOS +#define VX_DECONVOLUTION_WEIGHT_LAYOUT_COMPATIBLE_KHRONOS 1 +#endif +/* + VX_CONVERT_POLICY_WRAP_ENABLE is used to differentiate two overflow_policys(VX_CONVERT_POLICY_WRAP and VX_CONVERT_POLICY_SAT) + [value] + 0: both overflow_policys considered as VX_CONVERT_POLICY_SAT + 1: overflow_policy is determined by arguments. +*/ +#ifndef VX_CONVERT_POLICY_WRAP_ENABLE +#define VX_CONVERT_POLICY_WRAP_ENABLE 1 +#endif + +#ifndef VX_13_NN_COMPATIBLITY +#define VX_13_NN_COMPATIBLITY 1 +#endif +/* + VX_L2NORM_AXIS_PARAMETER_SUPPORT is used to declare that L2NORMALIZE can support axis parameter + [value] + 0: not support + 1: support +*/ +#ifndef VX_L2NORM_AXIS_PARAMETER_SUPPORT +#define VX_L2NORM_AXIS_PARAMETER_SUPPORT 1 +#endif +/* + VX_SOFTMAX_AXIS_PARAMETER_SUPPORT is used to declare that SOFTAMX can support axis parameter + [value] + 0: not support + 1: support +*/ +#ifndef VX_SOFTMAX_AXIS_PARAMETER_SUPPORT +#define VX_SOFTMAX_AXIS_PARAMETER_SUPPORT 1 +#endif +/* + VX_NORMALIZATION_AXIS_PARAMETER_SUPPORT is used to declare that NORMALIZATION can support axis parameter + [value] + 0: not support + 1: support +*/ +#ifndef VX_NORMALIZATION_AXIS_PARAMETER_SUPPORT +#define VX_NORMALIZATION_AXIS_PARAMETER_SUPPORT 1 +#endif +/* + VX_ACTIVATION_EXT_SUPPORT is used to declare that ACTIVATION can support swish and hswish + [value] + 0: not support + 1: support +*/ +#ifndef VX_ACTIVATION_EXT_SUPPORT +#define VX_ACTIVATION_EXT_SUPPORT 1 +#endif + +/* + VX_HARDWARE_CAPS_PARAMS_EXT_SUPPORT is used to query more hardware parameter such as shader sub-group size. + [value] + 0: not support + 1: support +*/ +#ifndef VX_HARDWARE_CAPS_PARAMS_EXT_SUPPORT +#define VX_HARDWARE_CAPS_PARAMS_EXT_SUPPORT 1 +#endif + +/* + VX_VA40_EXT_SUPPORT is used to declare that openvx can support VA40. + [value] + 0: not support + 1: support +*/ +#ifndef VX_VA40_EXT_SUPPORT +#define VX_VA40_EXT_SUPPORT 0 +#endif + +/* + VX_USER_LOOKUP_TABLE_SUPPORT is used to declare that openvx can support user lookuptable. + [value] + 0: not support + 1: support +*/ +#ifndef VX_USER_LOOKUP_TABLE_SUPPORT +#define VX_USER_LOOKUP_TABLE_SUPPORT 1 +#endif + +/* +VX_PRELOAD_CONST_TENSOR_SUPPORT is used to declare that openvx can support preload weight/bias and const tensor + [value] + 0: not support + 1: support(NN conv and TP FC weightbias, and SH const tensor) +*/ +#ifndef VX_PRELOAD_CONST_TENSOR_SUPPORT +#define VX_PRELOAD_CONST_TENSOR_SUPPORT 1 +#endif + +/* +VX_CREATE_TENSOR_SUPPORT_PHYSICAL is used to declare that openvx can support physical address for vxCreateTensorFromHandle + [value] + 0: not support + 1: support +*/ +#ifndef VX_CREATE_TENSOR_SUPPORT_PHYSICAL +#define VX_CREATE_TENSOR_SUPPORT_PHYSICAL 1 +#endif + +/* + VX_GRAPH_PREEMPTION_SUPPORT is used to declare that openvx can support different graph preemption function. + [value] + 0: not support + 1: support +*/ +#ifndef VX_GRAPH_PREEMPTION_SUPPORT +#define VX_GRAPH_PREEMPTION_SUPPORT 1 +#endif + +/* +VX_BATCH_GEMM_API_SUPPORT is used to declare that vsi openvx driver can support vxBatchGemmNode API to transform gemm to convolution + [value] + 0: not support + 1: support +*/ +#ifndef VX_BATCH_GEMM_API_SUPPORT +#define VX_BATCH_GEMM_API_SUPPORT 1 +#endif + +/* +VX_CONV_3D_API_SUPPORT is used to declare that vsi openvx driver can support conv3d by vxConv3dLayer API. + [value] + 0: not support + 1: support +*/ +#ifndef VX_CONV_3D_API_SUPPORT +#define VX_CONV_3D_API_SUPPORT 1 +#endif + +/* +VX_DECONV_3D_API_SUPPORT is used to declare that vsi openvx driver can support deconv3d by vxDeconv3dLayer API. + [value] + 0: not support + 1: support +*/ +#ifndef VX_DECONV_3D_API_SUPPORT +#define VX_DECONV_3D_API_SUPPORT 1 +#endif + +/* + VX_PAD_CONST_SUPPORT is used to declare that openvx can support pad_const for tensorpad and convolution. + [value] + 0: not support + 1: support +*/ +#ifndef VX_PAD_CONST_SUPPORT +#define VX_PAD_CONST_SUPPORT 1 +#endif + +/* + VX_TENSOR_STRIDE_X_BITS_SUPPORT is used to declare that openvx can support tensor which bits of stride in x dimension is not an integer number of bytes. + [value] + 0: not support + 1: support +*/ +#ifndef VX_TENSOR_STRIDE_X_BITS_SUPPORT +#define VX_TENSOR_STRIDE_X_BITS_SUPPORT 1 +#endif + +/* +VX_REMOVE_RESHAPE_SUPPORT is used to declare if graph opt support to remove reshape op, if support, it's not need to remove reshape in ovxlib. + 0: not support + 1: support +*/ +/* +#ifndef VX_REMOVE_RESHAPE_SUPPORT +#define VX_REMOVE_RESHAPE_SUPPORT 0 +#endif +*/ + +/* +VX_STREAM_PROCESSOR_SUPPORT is used to declare that vsi openvx driver can support vxStreamProcessorNode API + [value] + 0: not support + 1: support +*/ +#ifndef VX_STREAM_PROCESSOR_SUPPORT +#define VX_STREAM_PROCESSOR_SUPPORT 1 +#endif + +/* + VX_TENSOR_MEMORY_CONNECT_DMA_CHANNEL is used to declare that this tensor connect to fixed DMA channel. + [value] + 0: not support + 1: support +*/ +#ifndef VX_TENSOR_MEMORY_CONNECT_DMA_CHANNEL +#define VX_TENSOR_MEMORY_CONNECT_DMA_CHANNEL 1 +#endif + +/* + VX_SCALE_EXTRA_PARAMETER_SUPPORT is used to declare that RESIZE can support align_cornor and half_pixel_center parameter + [value] + 0: not support + 1: support +*/ +#ifndef VX_SCALE_EXTRA_PARAMETER_SUPPORT +#define VX_SCALE_EXTRA_PARAMETER_SUPPORT 1 +#endif + +/* + VX_INVALIDATE_HANDLE_SUPPORT is used to declare that we refined vxSwapTensorHandle API to follow KHR OpenVX 1.3 spec: tensor don't maintain handle internally if new_ptr is NULL. + [value] + 0: not support + 1: support +*/ +#ifndef VX_INVALIDATE_HANDLE_SUPPORT +#define VX_INVALIDATE_HANDLE_SUPPORT 1 +#endif + +/* + VX_ACTIVATION_EXT2_SUPPORT is used to declare that ACTIVATION can support sign, hard_sigmoid, neg, clip, exp, sin, cos, + log, mish, gelu, hgelu, elu, selu, celu, rcp, softsign, atan, atanh, acosh, inverse sigmoid, round and erf. + [value] + 0: not support + 1: support +*/ +#ifndef VX_ACTIVATION_EXT2_SUPPORT +#define VX_ACTIVATION_EXT2_SUPPORT 1 +#endif + +/* + VX_TENSORVIEW_ON_ANY_DIM is used to declare that ovxlib can do optimization for all concat node(all dimision) to tensor view if possiable, not only channel. + [value] + 0: disable + 1: enable +*/ +#ifndef VX_TENSORVIEW_ON_ANY_DIM +#define VX_TENSORVIEW_ON_ANY_DIM 0 +#endif + +/* +VX_DEPTH2SPACE_CRD_MODE_SUPPORT is used to declare that SPACE2DEPTH can support CRD mode + [value] + 0: not support + 1: support +*/ +#ifndef VX_DEPTH2SPACE_CRD_MODE_SUPPORT +#define VX_DEPTH2SPACE_CRD_MODE_SUPPORT 1 +#endif + +/* + VX_LAYER_NORMALIZATION_VX_SUPPORT is used to declare driver support layer normalization layer. + [value] + 0: not support + 1: support +*/ +#ifndef VX_LAYER_NORMALIZATION_VX_SUPPORT +#define VX_LAYER_NORMALIZATION_VX_SUPPORT 1 +#endif + +/* + VX_LAYER_NORMALIZATION_VX_SUPPORT is used to declare driver support layer normalization layer. + [value] + 0: not support + 1: support +*/ +#ifndef VX_INSTANCE_NORMALIZATION_VX_SUPPORT +#define VX_INSTANCE_NORMALIZATION_VX_SUPPORT 1 +#endif + +/* + VX_GROUP_NORMALIZATION_VX_SUPPORT is used to declare driver support layer normalization layer. + [value] + 0: not support + 1: support +*/ +#ifndef VX_GROUP_NORMALIZATION_VX_SUPPORT +#define VX_GROUP_NORMALIZATION_VX_SUPPORT 1 +#endif + +/* + VX_LOGICAL_VX_SUPPORT is used to declare driver support layer logical related layer. + [value] + 0: not support + 1: support +*/ +#ifndef VX_LOGICAL_VX_SUPPORT +#define VX_LOGICAL_VX_SUPPORT 1 +#endif + +/* + VX_RELATIONAL_OPS_VX_SUPPORT is used to declare driver support layer relational related layer. + [value] + 0: not support + 1: support +*/ +#ifndef VX_RELATIONAL_OPS_VX_SUPPORT +#define VX_RELATIONAL_OPS_VX_SUPPORT 1 +#endif + +/* + VX_REDUCE_MAX_VX_SUPPORT is used to declare driver support layer reduce max layer. + [value] + 0: not support + 1: support +*/ +#ifndef VX_REDUCE_MAX_VX_SUPPORT +#define VX_REDUCE_MAX_VX_SUPPORT 1 +#endif + +/* + VX_REDUCE_MEAN_VX_SUPPORT is used to declare driver support layer reduce mean layer. + [value] + 0: not support + 1: support +*/ +#ifndef VX_REDUCE_MEAN_VX_SUPPORT +#define VX_REDUCE_MEAN_VX_SUPPORT 1 +#endif + +/* + VX_REDUCE_SUM_VX_SUPPORT is used to declare driver support layer reduce sum layer. + [value] + 0: not support + 1: support +*/ +#ifndef VX_REDUCE_SUM_VX_SUPPORT +#define VX_REDUCE_SUM_VX_SUPPORT 1 +#endif + +/* + VX_MAX_MIN_IMUM_VX_SUPPORT is used to declare driver support maximum and minimum layer. + [value] + 0: not support + 1: support +*/ +#ifndef VX_MAX_MIN_IMUM_VX_SUPPORT +#define VX_MAX_MIN_IMUM_VX_SUPPORT 1 +#endif + +/* + VX_TENSOR_SELECR_VX_SUPPORT is used to declare driver support tensor select layer. + [value] + 0: not support + 1: support +*/ +#ifndef VX_TENSOR_SELECT_VX_SUPPORT +#define VX_TENSOR_SELECT_VX_SUPPORT 1 +#endif + +/* + VX_GRU_CELL_VX_SUPPORT is used to declare driver support gru cell layer. + [value] + 0: not support + 1: support +*/ +#ifndef VX_GRU_CELL_VX_SUPPORT +#define VX_GRU_CELL_VX_SUPPORT 1 +#endif + +/* + VX_LSTM_ACTIVATION_SUPPORT is used to declare driver support gru cell layer. + [value] + 0: not support + 1: support +*/ +#ifndef VX_LSTM_ACTIVATION_SUPPORT +#define VX_LSTM_ACTIVATION_SUPPORT 1 +#endif + +#endif /* __VX_KHR_COMPATIBLE_H__ */ diff --git a/unified-tina/inc/VX/vx_khr_dot.h b/unified-tina/inc/VX/vx_khr_dot.h new file mode 100644 index 0000000..a3f638e --- /dev/null +++ b/unified-tina/inc/VX/vx_khr_dot.h @@ -0,0 +1,42 @@ +/* + + * Copyright (c) 2012-2017 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _VX_KHR_DOT_H_ +#define _VX_KHR_DOT_H_ + +#define OPENVX_KHR_DOT "vx_khr_dot" + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/*! \brief Exports a single graph to a dotfile. + * \param [in] graph The graph to export. + * \param [in] dotfile The name of the file to write to. + * \param [in] showData If true, data objects will be listed in the graph too. + * \see http://www.graphviz.com + */ +vx_status vxExportGraphToDot(vx_graph g, vx_char dotfile[], vx_bool showData); + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/unified-tina/inc/VX/vx_khr_icd.h b/unified-tina/inc/VX/vx_khr_icd.h new file mode 100644 index 0000000..fc44049 --- /dev/null +++ b/unified-tina/inc/VX/vx_khr_icd.h @@ -0,0 +1,80 @@ +/* + + * Copyright (c) 2017-2017 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file + * \defgroup group_icd OpenVX ICD Loader API + * \brief The OpenVX Installable Client Driver (ICD) Loader API. + * \details The vx_khr_icd extension provides a mechanism for vendors to implement Installable Client Driver (ICD) for OpenVX. The OpenVX ICD Loader API provides a mechanism for applications to access these vendor implementations. + */ + +#ifndef _VX_KHR_ICD_H_ +#define _VX_KHR_ICD_H_ + +#include +#include + +/*! \brief Platform handle of an implementation. + * \ingroup group_icd + */ +typedef struct _vx_platform * vx_platform; + +#ifdef __cplusplus +extern "C" { +#endif + +/*! \brief Queries list of available platforms. + * \param [in] capacity Maximum number of items that platform[] can hold. + * \param [out] platform[] List of platform handles. + * \param [out] pNumItems Number of platform handles returned. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors. + * \retval VX_FAILURE If no platforms are found. + * \ingroup group_icd + */ +vx_status VX_API_CALL vxIcdGetPlatforms(vx_size capacity, vx_platform platform[], vx_size * pNumItems); + +/*! \brief Queries the platform for some specific information. + * \param [in] platform The platform handle. + * \param [in] attribute The attribute to query. Use one of the following: + * \ref VX_CONTEXT_VENDOR_ID, + * \ref VX_CONTEXT_VERSION, + * \ref VX_CONTEXT_EXTENSIONS_SIZE, + * \ref VX_CONTEXT_EXTENSIONS. + * \param [out] ptr The location at which to store the resulting value. + * \param [in] size The size in bytes of the container to which \a ptr points. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors. + * \retval VX_ERROR_INVALID_REFERENCE If the platform is not a \ref vx_platform. + * \retval VX_ERROR_INVALID_PARAMETERS If any of the other parameters are incorrect. + * \retval VX_ERROR_NOT_SUPPORTED If the attribute is not supported on this implementation. + * \ingroup group_icd + */ +vx_status VX_API_CALL vxQueryPlatform(vx_platform platform, vx_enum attribute, void *ptr, vx_size size); + +/*! \brief Creates a \ref vx_context from a \ref vx_platform. + * \details This creates a top-level object context for OpenVX from a platform handle. + * \returns The reference to the implementation context \ref vx_context. Any possible errors + * preventing a successful creation should be checked using \ref vxGetStatus. + * \ingroup group_icd + */ +vx_context VX_API_CALL vxCreateContextFromPlatform(vx_platform platform); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/unified-tina/inc/VX/vx_khr_import_kernel.h b/unified-tina/inc/VX/vx_khr_import_kernel.h new file mode 100644 index 0000000..aa5b79e --- /dev/null +++ b/unified-tina/inc/VX/vx_khr_import_kernel.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2012-2018 The Khronos Group Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and/or associated documentation files (the + * "Materials"), to deal in the Materials without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Materials, and to + * permit persons to whom the Materials are furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Materials. + * + * MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS + * KHRONOS STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS + * SPECIFICATIONS AND HEADER INFORMATION ARE LOCATED AT + * https://www.khronos.org/registry/ + * + * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. + */ + +#ifndef _OPENVX_IMPORT_KERNEL_H_ +#define _OPENVX_IMPORT_KERNEL_H_ + +#include + +/*! + * \file + * \brief The OpenVX import kernel extension API. + */ +#define OPENVX_KHR_IMPORT_KERNEL "vx_khr_import_kernel" + +/*! \brief The import kernel extension library set + * \ingroup group_import_kernel + */ +#define VX_LIBRARY_KHR_IMPORT_KERNEL_EXTENSION (0x5) + +/* +define type for vxImportKernelFromURL() function +*/ +#define VX_VIVANTE_IMPORT_KERNEL_FROM_FILE "vx_vivante_file" +#define VX_VIVANTE_IMPORT_KERNEL_FROM_FOLDER "vx_vivante_folder" +#define VX_VIVANTE_IMPORT_KERNEL_FROM_LABEL "vx_vivante_label" +#define VX_VIVANTE_IMPORT_KERNEL_FROM_POINTER "vx_vivante_pointer" + +#ifdef __cplusplus +extern "C" { +#endif + +/*! \brief Import a kernel from binary specified by URL. + * + * The name of kernel parameters can be queried using the vxQueryReference API + * with vx_parameter as ref and VX_REFERENCE_NAME as attribute. + * + * \param context [in] The OpenVX context + * \param type [in] Vendor-specific identifier that indicates to the implementation + * how to interpret the url. For example, if an implementation can interpret the url + * as a file, a folder a symbolic label, or a pointer, then a vendor may choose + * to use "vx__file", "vx__folder", "vx__label", and + * "vx__pointer", respectively for this field. Container types starting + * with "vx_khr_" are reserved. Refer to vendor documentation for list of + * container types supported + * \param url [in] URL to binary container. + * + * \retval On success, a valid vx_kernel object. Calling vxGetStatus with the return value + * as a parameter will return VX_SUCCESS if the function was successful. + * + * \ingroup group_import_kernel + */ +VX_API_ENTRY vx_kernel VX_API_CALL vxImportKernelFromURL( + vx_context context, + const vx_char * type, + const vx_char * url + ); + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/unified-tina/inc/VX/vx_khr_interp.h b/unified-tina/inc/VX/vx_khr_interp.h new file mode 100644 index 0000000..befe4ae --- /dev/null +++ b/unified-tina/inc/VX/vx_khr_interp.h @@ -0,0 +1,38 @@ +/* + + * Copyright (c) 2012-2017 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _VX_KHR_INTERP_H_ +#define _VX_KHR_INTERP_H_ + +/*! \brief The Interpolation Type Query Extension. + * \file + */ + +#define OPENVX_KHR_INTERP "vx_khr_interpolation" + +#include + +/*! \brief Additional interpolation types */ +enum vx_interpolation_type_ext_e { + /*! \brief Bicubic interpolation method */ + VX_INTERPOLATION_BICUBIC = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_INTERPOLATION) + 0x3, + /*! \brief Mipmapping interpolation method */ + VX_INTERPOLATION_MIPMAP = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_INTERPOLATION) + 0x4, +}; + +#endif + diff --git a/unified-tina/inc/VX/vx_khr_ix.h b/unified-tina/inc/VX/vx_khr_ix.h new file mode 100644 index 0000000..7a97d9c --- /dev/null +++ b/unified-tina/inc/VX/vx_khr_ix.h @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2012-2020 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _OPENVX_IMPORT_EXPORT_H_ +#define _OPENVX_IMPORT_EXPORT_H_ + +/*! + * \file + * \brief The OpenVX Export and Import extension API. + */ + +#define OPENVX_KHR_IX "vx_khr_ix" + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/*============================================================================= +Export to host memory +=============================================================================*/ + +/*! \brief Exports selected objects to memory in a vendor-specific format.\n + * + * \details A list of references in the given context is supplied to this function, and all information + * required to re-create these is stored in memory in such a way that those objects may be re-created + * with the corresponding import function, according to the usage specified by the *uses* parameter[*REQ*].\n + * The information must be context independent in that it may be written to external storage for later + * retreival with another instantiation of a compatible implementation[*REQ*].\n + * The list of objects to export may contain only valid references (i.e. vxGetStatus() will return VX_SUCCESS) + * to vx_graph and non-virtual data objects or the function will fail[*REQ*]. + * (Specifically not vx_context, vx_import, vx_node, vx_kernel, vx_parameter or vx_meta_format)\n + * Some node creation functions take C parameters rather than OpenVX data objects (such as the *gradient_size* + * parameter of \ref vxHarrisCornersNode that is provided as a vx_int32), because these are intended + * to be fixed at node creation time; nevertheless OpenVX data objects may be assigned to them, for example if + * the \ref vxCreateGenericNode API is used. + * A data object corresponding to a node parameter that is intended to be fixed at node creation time must not be + * in the list of exported objects nor attached as a graph parameter or the export operation will fail[*REQ*].\n + * The *uses* array specifies how the objects in the corresponding *refs* array will be exported. A data object + * will always have its meta-data (e.g. dimensions and format of an image) exported, and optionally + * may have its data (e.g. pixel values) exported, and additionally you can decide whether the importing + * application will create data objects to replace those attached to graphs, or if the implementation will + * automatically create them: + * - \ref VX_IX_USE_APPLICATION_CREATE \n + * Export sufficient data to check that an application-supplied + * object is compatible when the data is later imported[*REQ*]. + * \note This value must be given for images created from handles, or the the export operation + * will fail[*REQ*] + * - \ref VX_IX_USE_EXPORT_VALUES\n + * Export complete information (for example image data or value of a + * scalar)[*REQ*]. + * - \ref VX_IX_USE_NO_EXPORT_VALUES\n + * Export meta-data only; the importing application will set values + * as applicable[*REQ*] + * + * The values in *uses* are applicable only for data objects and are ignored for vx_graph objects[*REQ*].\n + * If the list *refs* contains vx_graph objects, these graphs will be verified during the export operation and the export operation will fail if verification fails; when successfully exported graphs are subsequently imported they will appear as verified [*REQ*].\n + * \note The implementation may also choose to re-verify any previously verified graphs and apply + * optimisations based upon which references are to be exported and how.\n + * Any data objects attached to a graph that are hidden, i.e. their references are not in the list *refs*, + * may be treated by the implementation as virtual objects, since they can never be visible when the graph is + * subsequently imported.\n + * Note that imported graphs cannot become unverified. Attempts to change the + * graph that might normally cause the graph to be unverified, e.g. calling + * vxSetGraphParameterByIndex with an object with different metadata, will fail.\n + * The implementation should make sure that all permissible changes of exported objects are possible + * without re-verification. For example: + * - A uniform image may be swapped for a non-uniform image, so corresponding optimisations should be + * inhibited if a uniform image appears in the *refs* list + * - An image that is a region of interest of another image may be similarly replaced by any other image of + * matching size and format, and vice-versa + * + * If a graph is exported that has delays registered for auto-aging, then this information is also + * exported[*REQ*].\n + * If the function is called with NULL for any of its parameters, this is an error [*REQ*].\n + * The reference counts of objects as visible to the calling application will not be affected + * by calling this function [*REQ*].\n + * The export operation will fail if more than one object whose reference is listed at *refs* + * has been given the same non-zero length name (via \ref vxSetReferenceName)[*REQ*].\n + * If a graph listed for export has any graph parameters not listed at *refs*, then the + * export operation will fail[*REQ*]. + * \note The order of the references supplied in the *refs* array will be the order in which the + * framwork will supply references for the corresponding import operation with \ref vxImportObjectsFromMemory.\n + * The same length of *uses* array, containing the same values, and the same value of *numrefs*, must be supplied + * for the corresponding import operation. + * + * For objects not listed in *refs*, the following rules apply: + * 1. In any one graph, if an object is not connected as an output of a node in a graph being exported + * then its data values will be exported (for subsequent import)[*REQ*]. + * 2. Where the object in (1) is a composite object (such as a pyramid) then rule (1) applies to + * all of its sub-objects[*REQ*]. + * 3. Where the object in (1) is a sub-object such as a region of interest, and the composite object + * (in this case the parent image) does not meet the conditions of rule (1), then rule (1) applies + * to the sub-object only[*REQ*]. + * \param [in] context context from which to export objects, must be valid [*REQ*]. + * \param [in] numrefs number of references to export [*REQ*]. + * \param [in] refs references to export. This is an array of length numrefs populated with + * the references to export[*REQ*]. + * \param [in] uses how to export the references. This is an array of length numrefs containing + * values as described above[*REQ*]. + * \param [out] ptr returns pointer to binary buffer. On error this is set to NULL[*REQ*]. + * \param [out] length number of bytes at \*ptr. On error this is set to zero[*REQ*]. + * \return A \ref vx_status value. + * \retval VX_SUCCESS If no errors occurred and the objects were sucessfully exported[*REQ*]. + * An error is indicated when the return value is not VX_SUCCESS.\n + * An implementation may provide several different return values to give useful diagnostic + * information in the event of failure to export, but these are not required to indicate + * possible recovery mechanisms, and for safety critical use assume errors are not recoverable. + * \post \ref vxReleaseExportedMemory is used to deallocate the memory. + * \ingroup group_import + */ + +VX_API_ENTRY vx_status VX_API_CALL vxExportObjectsToMemory( + vx_context context, + vx_size numrefs, + const vx_reference *refs, + const vx_enum * uses, + const vx_uint8 ** ptr, + vx_size * length); + +/*! \brief Releases memory allocated for a binary export when it is no longer required. + * \details This function releases memory allocated by \ref vxExportObjectsToMemory[*REQ*]. + * \param [in] context The context for which \ref vxExportObjectsToMemory was called[*REQ*]. + * \param [in,out] ptr A pointer previously set by calling \ref vxExportObjectsToMemory[*REQ*]. + * The function will fail if *ptr does not contain an address of memory previously + * allocated by \ref vxExportObjectsToMemory[*REQ*]. + * \post After returning from sucessfully from this function \*ptr is set to NULL[*REQ*]. + * \return A \ref vx_status value. + * \retval VX_SUCCESS If no errors occurred and the memory was sucessfully released[*REQ*].\n + * An error is indicated when the return value is not VX_SUCCESS[*REQ*].\n + * An implementation may provide several different return values to give useful diagnostic + * information in the event of failure to export, but these are not required to indicate + * possible recovery mechanisms, and for safety critical use assume errors are not recoverable. + * \pre \ref vxExportObjectsToMemory is used to allocate the memory. + * \ingroup group_import + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseExportedMemory( + vx_context context, const vx_uint8 ** ptr); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/unified-tina/inc/VX/vx_khr_nn.h b/unified-tina/inc/VX/vx_khr_nn.h new file mode 100644 index 0000000..c29b0e8 --- /dev/null +++ b/unified-tina/inc/VX/vx_khr_nn.h @@ -0,0 +1,2635 @@ +/* + + * Copyright (c) 2012-2017 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _VX_KHR_NN_H_ +#define _VX_KHR_NN_H_ + +/*! + * \file + * \brief The Khronos Extension for Deep Convolutional Networks Functions. + * + * \defgroup group_cnn Extension: Deep Convolutional Networks API + * \brief Convolutional Network Nodes. + */ + +#define OPENVX_KHR_NN "vx_khr_nn" + +#include +#include +#include + + +#ifdef __cplusplus +extern "C" { +#endif + +/*TODO: check it for OpenVX 1.2*/ +//#if defined(OPENVX_CNN_1_0) +//#undef OPENVX_CNN_1_1 +//#endif + +enum vx_context_attribute_internal_type_e +{ + VX_CONTEXT_DEVICE_COUNT_VIV = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_CONTEXT) + 0x0, +}; + +enum vx_graph_attribute_internal_type_e +{ + /*! \brief Queries a graph for its device index (read-write. Use a \ref vx_uint32 parameter. */ + VX_GRAPH_DEVICE_INDEX_VIV = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_GRAPH) + 0x0, + /*! \brief Queries a graph for its weight data pre-loading size in vip sram (read-write. Use a \ref vx_uint32 parameter. */ + VX_GRAPH_VIP_SRAM_PRE_LOAD = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_GRAPH) + 0x1, + /*! \brief Queries a graph for its weight data pre-loading size in axi sram (read-write. Use a \ref vx_uint32 parameter. */ + VX_GRAPH_AXI_SRAM_PRE_LOAD = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_GRAPH) + 0x2, + /*! \brief Queries a graph for its running priority (read-write. Use a \ref vx_uint32 parameter. */ + VX_GRAPH_PRIORITY_VALUE_VIV = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_GRAPH) + 0x3, + VX_GRAPH_PSI_EXTRATOR_PARAMETER = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_GRAPH) + 0x4, + VX_GRAPH_PSI_FILLER_PARAMETER = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_GRAPH) + 0x5, + VX_GRAPH_DENOISE_POSTPROCESS_PARAMETER = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_GRAPH) + 0x6, + VX_GRAPH_DATA_COMPRESSION_RATIO = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_GRAPH) + 0x7, + VX_GRAPH_ISP_EMULATION_PARAMETER = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_GRAPH) + 0x8, + VX_GRAPH_PROCESS_FPS = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_GRAPH) + 0x9, + /*This parameter.come from customer, not used by unify driver but lite driver*/ + VX_GRAPH_CUSTOMER_PARAMETER_FOR_NBG = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_GRAPH) + 0xA, +}; + +/*! \brief Size Alignment of User Memory + * \0x40 64Byte Align + * \0x1000 4k Align + */ +#define VX_WRAP_USER_MEMORY_SIZE_ALIGNMENT (0x40) + +/*! \brief OpenVX Version Compatibility set*/ +#define VX_KHR_COMPATIBILITY (0x1) + +/*============================================================================== +CONVOLUTIONAL_NETWORK structs and enums +=============================================================================*/ +/*! \brief The Neural Network Extension Library Set + * \ingroup group_cnn + */ +#define VX_LIBRARY_KHR_NN_EXTENSION (0x1) + +/*! \brief The list of Neural Network Extension Kernels. + * \ingroup group_cnn + */ +enum vx_kernel_nn_ext_e { + /*! \brief The Neural Network Extension convolution Kernel. + * \see group_cnn + */ + VX_KERNEL_CONVOLUTION_LAYER = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_NN_EXTENSION) + 0x0, + /*! \brief The Neural Network Extension fully connected Kernel. + * \see group_cnn + */ + VX_KERNEL_FULLY_CONNECTED_LAYER = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_NN_EXTENSION) + 0x1, + /*! \brief The Neural Network Extension pooling Kernel. + * \see group_cnn + */ + VX_KERNEL_POOLING_LAYER = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_NN_EXTENSION) + 0x2, + /*! \brief The Neural Network Extension softmax Kernel. + * \see group_cnn + */ + VX_KERNEL_SOFTMAX_LAYER = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_NN_EXTENSION) + 0x3, + /*! \brief The Neural Network Extension normalization Kernel. + * \see group_cnn + */ + VX_KERNEL_NORMALIZATION_LAYER = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_NN_EXTENSION) + 0x4, + /*! \brief The Neural Network Extension activation Kernel. + * \see group_cnn + */ + VX_KERNEL_ACTIVATION_LAYER = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_NN_EXTENSION) + 0x5, + /*! \brief The Neural Network POI Pooling Kernel. + * \see group_cnn + */ + VX_KERNEL_ROI_POOLING_LAYER = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_NN_EXTENSION) + 0x6, + /*! \brief The Neural Network Extension Deconvolution Kernel. + * \see group_cnn + */ + VX_KERNEL_DECONVOLUTION_LAYER = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_NN_EXTENSION) + 0x7, + /*! \brief The Neural Network Extension local response normalization Kernel (with bias). + * \see group_cnn + */ + VX_KERNEL_LOCAL_RESPONSE_NORMALIZATION_LAYER = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_NN_EXTENSION) + 0x8, +}; + +/*! \brief NN extension type enums. + * \ingroup group_cnn + */ +enum vx_nn_enum_e +{ + VX_ENUM_NN_ROUNDING_TYPE = 0x1A, + VX_ENUM_NN_POOLING_TYPE = 0x1B, + VX_ENUM_NN_NORMALIZATION_TYPE = 0x1C, + VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE = 0x1D, + /* 0x1E, 0x1F and 0x20 are reserved for VX_ENUM_CLASSIFIER_MODEL, VX_ENUM_IX_USE and VX_ENUM_SCALAR_OPERATION*/ + VX_ENUM_NN_LAYER_TYPE = 0x21, +}; + +/*! \brief down scale rounding. + * \details Due to different scheme of downscale size calculation in the various training frameworks. Implementation must support 2 rounding methods for down scale calculation. + * The floor and the ceiling. In convolution and pooling functions. + * Relevant when input size is even. + * \ingroup group_cnn + */ +enum vx_nn_rounding_type_e +{ + /*! \brief floor rounding */ + VX_NN_DS_SIZE_ROUNDING_FLOOR = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_ROUNDING_TYPE) + 0x0, + /*! \brief ceil rounding */ + VX_NN_DS_SIZE_ROUNDING_CEILING = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_ROUNDING_TYPE) + 0x1 +}; + + +/*! \brief The Neural Network pooling type list. + * \details kind of pooling done in pooling function + * \ingroup group_cnn + */ +enum vx_nn_pooling_type_e +{ + /*! \brief max pooling*/ + VX_NN_POOLING_MAX = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_POOLING_TYPE) + 0x0, + /*! \brief average pooling*/ + VX_NN_POOLING_AVG = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_POOLING_TYPE) + 0x1, + /*! \brief l2 pooling*/ + VX_NN_POOLING_L2 = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_POOLING_TYPE) + 0x0, + /*! \brief average pooling for android*/ + VX_NN_POOLING_AVG_ANDROID = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_POOLING_TYPE) + 0x1, +}; + + +/*! \brief The Neural Network normalization type list. + * \ingroup group_cnn + */ +enum vx_nn_norm_type_e +{ + /*! \brief normalization is done on same IFM*/ + VX_NN_NORMALIZATION_SAME_MAP = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_NORMALIZATION_TYPE) + 0x0, + /*! \brief Normalization is done across different IFMs*/ + VX_NN_NORMALIZATION_ACROSS_MAPS = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_NORMALIZATION_TYPE) + 0x1, +}; + + +/*! \brief The Neural Network activation functions list. + * \details + * + *
Function name Mathematical definition Parameters Parameters type + *
logistic \f$f(x)=1/(1+e^{-x}) \f$ + *
hyperbolic tangent \f$f(x)=a\cdot tanh(b\cdot x) \f$ a,b VX_FLOAT32 + *
relu \f$f(x)=max(0,x)\f$ + *
bounded relu \f$f(x)=min(a,max(0,x)) \f$ a VX_FLOAT32 + *
soft relu \f$f(x)=log(1+e^{x}) \f$ + *
abs \f$f(x)=\mid x\mid \f$ + *
square \f$f(x)= x^2 \f$ + *
square root \f$f(x)=\sqrt{x} \f$ + *
linear \f$f(x)=ax+b \f$ a,b VX_FLOAT32 + *
+ * \ingroup group_cnn + */ +enum vx_nn_activation_function_e +{ + VX_NN_ACTIVATION_LOGISTIC = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x0, + VX_NN_ACTIVATION_HYPERBOLIC_TAN = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x1, + VX_NN_ACTIVATION_RELU = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x2, + VX_NN_ACTIVATION_BRELU = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x3, + VX_NN_ACTIVATION_SOFTRELU = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x4, + VX_NN_ACTIVATION_ABS = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x5, + VX_NN_ACTIVATION_SQUARE = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x6, + VX_NN_ACTIVATION_SQRT = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x7, + VX_NN_ACTIVATION_LINEAR = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x8, + VX_NN_ACTIVATION_LEAKYRELU = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x0, + VX_NN_ACTIVATION_RELU6 = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x1, + VX_NN_ACTIVATION_RELU1 = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x2, + VX_NN_ACTIVATION_RSQRT = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x3, + VX_NN_ACTIVATION_LEAKYRELU_MAX_POOLING = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x4, + VX_NN_ACTIVATION_SWISH = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x5, + VX_NN_ACTIVATION_HSWISH = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x6, + VX_NN_ACTIVATION_CUSTOM = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x7, + VX_NN_ACTIVATION_NONE = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x8, + VX_NN_ACTIVATION_SIGN_VSI = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x9, + VX_NN_ACTIVATION_HSIGMOID_VSI = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0xa, + VX_NN_ACTIVATION_NEG_VSI = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0xb, + VX_NN_ACTIVATION_CLIP_VSI = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0xc, + VX_NN_ACTIVATION_EXP_VSI = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0xd, + VX_NN_ACTIVATION_SIN_VSI = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0xe, + VX_NN_ACTIVATION_COS_VSI = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0xf, + VX_NN_ACTIVATION_LOG_VSI = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x10, + VX_NN_ACTIVATION_MISH_VSI = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x11, + VX_NN_ACTIVATION_GELU_VSI = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x12, + VX_NN_ACTIVATION_HGELU_VSI = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x13, + VX_NN_ACTIVATION_ELU_VSI = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x14, + VX_NN_ACTIVATION_SELU_VSI = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x15, + VX_NN_ACTIVATION_CELU_VSI = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x16, + VX_NN_ACTIVATION_RECIPROCAL_VSI = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x17, + VX_NN_ACTIVATION_SOFTSIGN_VSI = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x18, + VX_NN_ACTIVATION_ATAN_VSI = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x19, + VX_NN_ACTIVATION_ATANH_VSI = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x1a, + VX_NN_ACTIVATION_ACOSH_VSI = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x1b, + VX_NN_ACTIVATION_INVERSE_SIGMOID_VSI = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x1c, + VX_NN_ACTIVATION_ROUND_VSI = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x1d, + VX_NN_ACTIVATION_ERF_VSI = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x1e, +}; + +/*! \brief The Convolutional network type + * \ingroup group_cnn + */ +enum vx_nn_layer_type_e +{ + /*! \brief convolution layer */ + VX_NN_CONVOLUTION_LAYER = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_LAYER_TYPE) + 0x0, + /*! \brief fully connected layer */ + VX_NN_FULLYCONNECTED_LAYER = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_LAYER_TYPE) + 0x1, +}; + +/*! \brief The pad mode list. + * \ingroup group_cnn + * \version 0.3 + */ +enum vx_pad_mode_e { + /*! \brief For nodes that support this behavior, a constant value is + * \e filled-in when accessing padding pixels. + * eg. [1,2,3,4]->C,C,[1,2,3,4]C,C + */ + VX_PAD_CONSTANT = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_BORDER) + 0x0, + + /*! \brief For nodes that support this behavior, a relicateion of the nearest + * edge pixels value is given for padding pixels. + * eg. [1,2,3,4]->1,1,[1,2,3,4],4,4 + */ + VX_PAD_REPLICATE = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_BORDER) + 0x1, + + /*! \brief For nodes that support this behavior, a mirror of the nearest + * edge pixels value is given for padding pixels. ege is duplicate. + * eg. [1,2,3,4]->2,1,[1,2,3,4],4,3 + */ + VX_PAD_MIRROR_SYMMETRIC = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_BORDER) + 0x2, + + /*! \brief For nodes that support this behavior, a mirror of the nearest + * edge pixels value is given for padding pixels. ege is not duplicate. + * eg. [1,2,3,4]->3,2,[1,2,3,4],3,2 + */ + VX_PAD_MIRROR_REFLECT = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_BORDER) + 0x3, +}; + +/*! \brief The Quantized format list. + * \ingroup group_tensor + * \version 0.3 + */ +enum vx_quantized_format_e +{ + /*! \brief Non-quantized data. */ + VX_QUANT_NONE = 0x0, + /*! \brief A quantization data type which specifies the fixed point position for whole tensor. */ + VX_QUANT_DYNAMIC_FIXED_POINT = 0x1, + /*! \brief A quantization data type which has scale value and zero point to match with TF and Android NN API for whole tensor. */ + VX_QUANT_AFFINE_SCALE = 0x2, + /*! \brief A quantization data type which has scale value and zero point to match with TF and Android NN API for per channel of tensor. */ + VX_QUANT_AFFINE_SCALE_PER_CHANNEL = 0x3, +}; + +/*! \brief The rank mode of tensor memory. + * \ingroup group_tensor + * \version 0.4 + */ +enum vx_tensor_rank_type_e +{ + /*! \brief rank with weight,height,channel,batch */ + VX_TENSOR_RANK_WHCN = 0, + + /*! \brief rank with channel,weight,height,batch */ + VX_TENSOR_RANK_CWHN, + + /*! \brief rank with size, batch */ + VX_TENSOR_RANK_SN, +}; + +/*! \brief The attribute of tensor. + * \ingroup group_tensor + * \version 0.4 + */ +enum vx_tensor_priority_e +{ + /*! \brief no special requirement */ + VX_TENSOR_DEFAULT = 0, + + /*! \brief 2nd input(reference) */ + /*VX_TENSOR_2ND_INPUT_FOR = 1,*/ + VX_TENSOR_FOR_GRAPH_REFERENCE = 1, +}; + + +/*! \brief The attribute of tensor memory. + * \ingroup group_tensor + * \version 0.4 + */ +enum vx_tensor_memory_attribute_e +{ + /*! \brief no special requirement */ + VX_TENSOR_MEMORY_DEFAULT = 0, + + VX_TENSOR_MEMORY_CONNECT_DMA_CHANNEL_0 = (0x1 << 0), + VX_TENSOR_MEMORY_CONNECT_DMA_CHANNEL_1 = (0x1 << 1), + VX_TENSOR_MEMORY_CONNECT_DMA_CHANNEL_2 = (0x1 << 2), + VX_TENSOR_MEMORY_CONNECT_DMA_CHANNEL_3 = (0x1 << 3), + VX_TENSOR_MEMORY_CONNECT_DMA_CHANNEL_4 = (0x1 << 4), + /* + VX_TENSOR_MEMORY_CONNECT_DMA_CHANNEL_5 = (0x1 << VX_DMA5_IN_ISP_OCM_PSI), + VX_TENSOR_MEMORY_CONNECT_DMA_CHANNEL_6 = (0x1 << VX_DMA6_DDR_DECOMPRESS), + VX_TENSOR_MEMORY_CONNECT_DMA_CHANNEL_7 = (0x1 << VX_DMA7_POSTOUT_OCM_ISP), + VX_TENSOR_MEMORY_CONNECT_DMA_CHANNEL_8 = (0x1 << VX_DMA8_COMPRESS_DDR), + VX_TENSOR_MEMORY_CONNECT_DMA_CHANNEL_9 = (0x1 << VX_DMA9_ISP_PATTERN_GENERATOR), + VX_TENSOR_MEMORY_CONNECT_DMA_CHANNEL_10 = (0x1 << VX_DMA10_ISP_CHECKSUM_GENERATOR), + */ + /*! \brief DMA transfer data to VIP and enable circular buffer */ +#if !VX_TENSOR_MEMORY_CONNECT_DMA_CHANNEL + VX_TENSOR_MEMORY_ENABLE_CIRCULAR_BY_DMA = 0xFFFFFFFF, +#endif +}; + +enum vx_dma_extrator_pad_mode_e +{ + /*! \brief no special requirement */ + VX_DMA_EXTRATOR_PAD_CONST = 0, + + /*! \brief DMA extrator pad with nearest edge */ + VX_DMA_EXTRATOR_PAD_WITH_NEAREAST_EDGE = 1, +}; + + +/*! \brief The precision of tensor. + * \ingroup group_tensor + * \version 0.4 + */ +enum vx_tensor_precision_type_e +{ + /*! \brief auto adapter precision */ + VX_TENSOR_PRECISION_AUTO = 0, + + /*! \brief high precision */ + VX_TENSOR_PRECISION_HIGH, +}; + +/*! \brief Specifies a static or dynamic tensor. + * \ingroup group_tensor + * \version 0.4 + */ +enum vx_tensor_lifetime_type_e +{ + /*! \brief static tensor */ + VX_TENSOR_LIFE_TIME_STATIC = 0, + + /*! \brief dynamic tensor */ + VX_TENSOR_LIFE_TIME_DYNAMIC, +}; + +/*! \brief Specifies depthtospace mode + * \ingroup group_cnn + */ +enum vx_nn_depth_to_space_mode_e +{ + /*! \brief DCR(default) for depth-column-row order re-arrangement */ + VX_NN_DEPTH_TO_SPACE_DCR = 0x0, + /*! \brief CRD for column-row-depth order re-arrangement */ + VX_NN_DEPTH_TO_SPACE_CRD, +}; + +typedef struct _vx_nn_convolution_3d_params_t +{ + vx_int32 padding_w_left; /*!< \brief Number of elements added at each side in the left of w dimension of the input. */ + vx_int32 padding_w_right; /*!< \brief Number of elements added at each side in the right of w dimension of the input. */ + vx_int32 padding_h_top; /*!< \brief Number of elements added at each side in the top of h dimension of the input. */ + vx_int32 padding_h_bottom; /*!< \brief Number of elements added at each side in the bottom of h dimension of the input. */ + vx_int32 padding_d_front; /*!< \brief Number of elements added at each side in the front of d dimension of the input. */ + vx_int32 padding_d_rear; /*!< \brief Number of elements added at each side in the rear of d dimension of the input. */ + + vx_int32 stride_w; /*!< \brief skip w jump for down scale. */ + vx_int32 stride_h; /*!< \brief skip h jump for down scale. */ + vx_int32 stride_d; /*!< \brief skip d jump for down scale. */ + vx_int32 dilation_w; /*!< \brief "inflate" the kernel by inserting zeros between the kernel elements in the w direction. The value is the number of zeros to insert.*/ + vx_int32 dilation_h; /*!< \brief "inflate" the kernel by inserting zeros between the kernel elements in the h direction. The value is the number of zeros to insert.*/ + vx_int32 dilation_d; /*!< \brief "inflate" the kernel by inserting zeros between the kernel elements in the d direction. The value is the number of zeros to insert.*/ + + vx_enum pad_mode; /*!< \brief A VX_TYPE_ENUM of the \ref vx_pad_mode_e enumeration. */ + vx_scalar pad_const; /*!< \brief pad const value if setting pad mode to const, the const value is base value, not quantized value. */ + + vx_enum overflow_policy; /*!< \brief A VX_TYPE_ENUM of the vx_convert_policy_e enumeration. */ + vx_enum rounding_policy; /*!< \brief A VX_TYPE_ENUM of the vx_round_policy_e enumeration. */ + vx_enum down_scale_size_rounding; /*!< \brief Rounding method for calculating output dimensions. See \ref vx_nn_rounding_type_e */ + + vx_int32 depth_multiplier; /*!< \brief depthwise multiplier value, if 0, means convolution, elsewise(>=1), the convolution is depthwiseconvolution. */ +}vx_nn_convolution_3d_params_t; + +typedef struct _vx_nn_deconvolution_3d_params_t +{ + vx_int32 padding_w_left; /*!< \brief Number of elements subtracted at left of the w dimension of the input. */ + vx_int32 padding_w_right; /*!< \brief Number of elements subtracted at right of the w dimension of the input. */ + vx_int32 padding_h_top; /*!< \brief Number of elements subtracted at top of the h dimension of the input. */ + vx_int32 padding_h_bottom; /*!< \brief Number of elements subtracted at bottom of the h dimension of the input. */ + vx_int32 padding_d_front; /*!< \brief Number of elements subtracted at front of the d dimension of the input. */ + vx_int32 padding_d_rear; /*!< \brief Number of elements subtracted at end of the d dimension of the input. */ + + vx_int32 stride_w; /*!< \brief inter 0 between input elements at w direction for down scale. */ + vx_int32 stride_h; /*!< \brief inter 0 between input elements at h direction for down scale. */ + vx_int32 stride_d; /*!< \brief inter 0 between input elements at d direction for down scale. */ + + vx_int32 a_w; /*!< \brief user-specified quantity used to distinguish between the \f$upscale_w\f$ different possible output sizes. */ + vx_int32 a_h; /*!< \brief user-specified quantity used to distinguish between the \f$upscale_h\f$ different possible output sizes. */ + vx_int32 a_d; /*!< \brief user-specified quantity used to distinguish between the \f$upscale_d\f$ different possible output sizes. */ + + vx_int32 channel_group; /*!< \brief Number of separate groups for deconvolution (Range: 0 <= groups <= size of z dimension of input; size of z dimension of input can be divided by groups) */ + + vx_enum overflow_policy; /*!< \brief A VX_TYPE_ENUM of the vx_convert_policy_e enumeration. */ + vx_enum rounding_policy; /*!< \brief A VX_TYPE_ENUM of the vx_round_policy_e enumeration. */ + vx_enum down_scale_size_rounding; /*!< \brief Rounding method for calculating output dimensions. See \ref vx_nn_rounding_type_e */ +}vx_nn_deconvolution_3d_params_t; + +/*============================================================================== + TENSOR DATA FUNCTIONS +=============================================================================*/ +#if VX_VA40_EXT_SUPPORT +/*! \brief Create an opaque reference to a tensor view object. + * \details Not guaranteed to exist until the vx_graph containing it has been verified. + * \param [in] context The reference to the implementation context. + * \param [in] view_array_start a vx_size array of start values of the view. + * \param [in] view_array_end a vx_size array of end values of the view. + * \param [in] numViewDimensions number of dimensions of view_array_start and view_array_end. + * \return A tensor data view reference or zero when an error is encountered. + * \ingroup group_tensor + */ +VX_API_ENTRY vx_tensor_view VX_API_CALL vxCreateTensorView(vx_context context, vx_size* view_array_start, vx_size* view_array_end, vx_size numViewDimensions); +#else +/*! \brief Create an opaque reference to a tensor view object. + * \details Not guaranteed to exist until the vx_graph containing it has been verified. + * \param [in] context The reference to the implementation context. + * \param [in] view_array_start a vx_uint32 array of start values of the view. + * \param [in] view_array_end a vx_uint32 array of end values of the view. + * \param [in] numViewDimensions number of dimensions of view_array_start and view_array_end. + * \return A tensor data view reference or zero when an error is encountered. + * \ingroup group_tensor + */ +VX_API_ENTRY vx_tensor_view VX_API_CALL vxCreateTensorView(vx_context context, vx_uint32 *view_array_start, vx_uint32 * view_array_end, vx_uint8 numViewDimensions); +#endif + +/*! \brief Releases a reference to a tensor data view object. +* The object may not be garbage collected until its total reference count is zero. +* \param [in] tensor_view The pointer to the tensor data view to release. +* \post After returning from this function the reference is zeroed. +* \return A vx_status_e enumeration. +* \retval VX_SUCCESS No errors. +* \retval VX_SUCCESS Success +* \retval * An error occurred. See vx_status_e. +* \ingroup group_tensor +*/ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseTensorView(vx_tensor_view *tensor_view); + +#if VX_VA40_EXT_SUPPORT +/*! \brief Create an opaque reference to a tensor addressing object. +* \details Not guaranteed to exist until the vx_graph containing it has been verified. +* \param [in] context The reference to the implementation context. +* \param [in] addressing_array_dimension a vx_size array of sLength of patch in all dimensions in elements. +* \param [in] addressing_array_stride a vx_size arrayStride in all dimensions in bytes. +* \param [in] numViewDimensions number of dimensions of view_array_start and view_array_end. +* \return A tensor data view reference or zero when an error is encountered. +* \ingroup group_tensor +*/ +VX_API_ENTRY vx_tensor_addressing VX_API_CALL vxCreateTensorAddressing(vx_context context, vx_size* addressing_array_dimension, vx_size* addressing_array_stride, vx_size numViewDimensions); +#else +/*! \brief Create an opaque reference to a tensor addressing object. +* \details Not guaranteed to exist until the vx_graph containing it has been verified. +* \param [in] context The reference to the implementation context. +* \param [in] addressing_array_dimension a vx_uint32 array of sLength of patch in all dimensions in elements. +* \param [in] addressing_array_stride a vx_uint32 arrayStride in all dimensions in bytes. +* \param [in] numViewDimensions number of dimensions of view_array_start and view_array_end. +* \return A tensor data view reference or zero when an error is encountered. +* \ingroup group_tensor +*/ +VX_API_ENTRY vx_tensor_addressing VX_API_CALL vxCreateTensorAddressing(vx_context context, vx_uint32 * addressing_array_dimension, vx_uint32 * addressing_array_stride, vx_uint8 numViewDimensions); +#endif + +/*! \brief Releases a reference to a tensor data addressing object. +* The object may not be garbage collected until its total reference count is zero. +* \param [in] tensor_addr The pointer to the tensor data addressing to release. +* \post After returning from this function the reference is zeroed. +* \return A vx_status_e enumeration. +* \retval VX_SUCCESS No errors. +* \retval VX_SUCCESS Success +* \retval * An error occurred. See vx_status_e. +* \ingroup group_tensor +*/ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseTensorAddressing(vx_tensor_addressing *tensor_addr); + +/*! \brief Creates an array of tensors + * \param [in] context The reference to the overall Context. + * \param [in] count Number of Objects to create in the ObjectArray. + * \param [in] tensor* The tensors array that need add to the ObjectArray. + * + * \returns An ObjectArray reference \ref vx_object_array. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. Data objects are not initialized by this function. + * + * \ingroup group_object_array + */ +VX_API_ENTRY vx_object_array VX_API_CALL vxCreateTensorObjectArray(vx_context context, vx_uint32 count, vx_tensor* tensor); + +typedef union _vx_tensor_quant_param +{ + struct + { + vx_int8 fixed_point_pos; /*!< \brief Specifies the fixed point position when the input element type is int16/int8, if 0 calculations are performed in integer math */ + } dfp; + + struct + { + vx_float32 scale; /*!< \brief Scale vaule for the quantized value */ + vx_int32 zeroPoint; /*!< \brief A 32 bit integer, in range [0, 255] */ + } affine; + + struct + { + vx_uint32 channelDim; /*!< \brief a 32 bit unsigned integer indicating channel dimension */ + vx_uint32 scaleCount; /*!< \brief the size of the scale array, must be equal to size[channelDim] */ + vx_float32 * scales; /*!< \brief an array of positive 32 bit floating point value. The size of the scales array must be equal to size[channelDim] */ + vx_uint32 zeroPointCount; /*!< \brief the size of the zero point array, must be equal to 0 or size[channelDim] */ + vx_int32 * zeroPoint; /*!< \brief A 32 bit integer, in range [0, 255] */ + } affinePerChannel; +}vx_tensor_quant_param; + +/*! \brief Input parameter for createTensor2 + * \ingroup group_tensor + * \version 0.3 + */ +typedef struct _vx_tensor_create_params_t +{ + vx_uint32 num_of_dims; /*!< \brief The number of dimensions specified in *sizes*/ +#if VX_VA40_EXT_SUPPORT + vx_size * sizes; /*!< \brief The pointer to an array of dimension */ +#else + vx_uint32 * sizes; /*!< \brief The pointer to an array of dimension */ +#endif + vx_enum data_format; /*!< \brief Data format for the tensor */ + vx_enum quant_format; /*!< \brief Quantized format \ref vx_quantized_format_e . */ + vx_tensor_quant_param quant_data; +} vx_tensor_create_params_t; + + +/*! \brief Creates an opaque reference to a tensor data buffer. + * \details Not guaranteed to exist until the vx_graph containing it has been verified. + * \param [in] context The reference to the implementation context. + * \param [in] tensor_create_params A pointer to the tensor create parameter\ref vx_tensor_create_params_t + * \param [in] size_of_create_params Byte size of the parameter structure + * \return A tensor data reference or zero when an error is encountered. + * \ingroup group_tensor + * \version 0.3 + */ +VX_API_ENTRY vx_tensor VX_API_CALL vxCreateTensor2(vx_context context, const vx_tensor_create_params_t* tensor_create_params, vx_size size_of_create_params); + +/*! \brief Creates an opaque reference to a tensor data buffer with no direct + * user access. This function allows setting the tensor data dimensions or data format. + * \details Virtual data objects allow users to connect various nodes within a + * graph via data references without access to that data, but they also permit the + * implementation to take maximum advantage of possible optimizations. Use this + * API to create a data reference to link two or more nodes together when the + * intermediate data are not required to be accessed by outside entities. This API + * in particular allows the user to define the tensor data format of the data without + * requiring the exact dimensions. Virtual objects are scoped within the graph + * they are declared a part of, and can't be shared outside of this scope. + * \param [in] graph The reference to the parent graph. + * \param [in] tensor_create_params A pointer to the tensor create parameter\ref vx_tensor_create_params_t + * \param [in] size_of_create_params Byte size of the parameter structure + * \return A tensor data reference or zero when an error is encountered. + * \note Passing this reference to \ref vxCopyTensorPatch will return an error. + * \ingroup group_tensor + * \version 0.3 + */ +VX_API_ENTRY vx_tensor VX_API_CALL vxCreateVirtualTensor2(vx_graph graph, const vx_tensor_create_params_t* tensor_create_params, vx_size size_of_create_params); + +/*! \brief Swap tensor handle between two tensors which are created from handle. + * \details These tensors must have the same attributes expect for tensor hanlde. + * for better performance, must make sure the memory referenced by the tensor handle is flushed by using \ref vxFlushHandle. + * \param [in] tensor0 The tensor whose handle will be changed to tensor1's. + * \param [in] tensor1 The tensor whose handle will be changed to tensor0's. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors. + * \retval VX_ERROR_INVALID_REFERENCE tensor is not a valid \ref vx_tensor reference. + * reference. + * \retval VX_ERROR_INVALID_REFERENCE The tensor0 and tensor1's attributes are not the same. + * \ingroup group_tensor + *\version 0.5 + */ +VX_API_ENTRY vx_status VX_API_CALL vxSwapTensor(vx_tensor tensor0, vx_tensor tensor1); + +/*! \brief Creates a reference to a tensor object that was externally allocated. + * \param [in] context The reference to the implementation context. + * \param [in] tensor_create_params The \ref vx_tensor_create_params_t that points to a parameter structure. + * \param [in] size_of_create_params Size of parameter structure. + * \param [in] addrs The tensor patch addressing structures that define the dimension and stride of pointers. See note below. + * \param [in] ptr The logical pointer of platform-defined references to tensor data. + * \param [in] import_type \ref vx_memory_type_e. When giving \ref VX_MEMORY_TYPE_HOST + * the \a ptr is assumed to be a HOST accessible pointer to memory. + * \returns An tensor reference \ref vx_tensor. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * + * In order to release the image back to the application we should use \ref vxSwapTensorHandle. + * + * \ingroup group_tensor + *\version 0.4 + */ +VX_API_ENTRY vx_tensor VX_API_CALL vxCreateTensorFromHandle2( + vx_context context, const vx_tensor_create_params_t* tensor_create_params, vx_size size_of_create_params, const vx_tensor_addressing addrs, + void * const ptr, vx_enum import_type); + +/*! \brief Flush the memory referenced by reference's handle when it is ready. +* \param [in] ref The reference(image or tensor) which created from handle. +* \return A \ref vx_status_e enumeration.; +* \retval VX_ERROR_INVALID_REFERENCE tensor is not a valid \ref vx_tensor \ref vx_imagereference created from Handle. +*/ +VX_API_ENTRY vx_status VX_API_CALL vxFlushHandle(vx_reference ref); +/* !\brief Same as vxFlushHandle() also added by Verisilicon as extension API. + */ +VX_API_ENTRY vx_status VX_API_CALL vxFlushHandleVSI(vx_reference ref); + +#if defined(VX_INVALIDATE_HANDLE_SUPPORT) && VX_INVALIDATE_HANDLE_SUPPORT +/*! \brief Invalidate the memory referenced by reference's handle when it is ready. +* added by Versilicon as extension API. +* \param [in] ref The reference(image or tensor) which created from handle. +* \return A \ref vx_status_e enumeration.; +* \retval VX_ERROR_INVALID_REFERENCE tensor is not a valid \ref vx_tensor \ref vx_imagereference created from Handle. +*/ +VX_API_ENTRY vx_status VX_API_CALL vxInvalidateHandleVSI(vx_reference ref); +#endif + +#if VX_VA40_EXT_SUPPORT +/*! \brief Return a new tensor referencing the same memory location but with different shape. +* \param [in] tensor The input tensor data to reshape. +* \param [in] num_of_dims Size of each dimension. If one component is special value -1, +* the size of that dimension is computed so that the total size remains the same as input tensor. +* If is is [-1], then flatten is performed which turns tensor into 1-D. +* \param [in] sizes The size of the container to which \a num_of_dims points. +* \return a vx_tensor that has shaped. +* \return VX_NULL if an error occurred. +* \ingroup group_tensor +*/ +VX_API_ENTRY vx_tensor VX_API_CALL vxReshapeTensor(vx_tensor tensor, vx_size* num_of_dims, vx_size sizes); +#else +/*! \brief Return a new tensor referencing the same memory location but with different shape. +* \param [in] tensor The input tensor data to reshape. +* \param [in] num_of_dims Size of each dimension. If one component is special value -1, +* the size of that dimension is computed so that the total size remains the same as input tensor. +* If is is [-1], then flatten is performed which turns tensor into 1-D. +* \param [in] sizes The size of the container to which \a num_of_dims points. +* \return a vx_tensor that has shaped. +* \return VX_NULL if an error occurred. +* \ingroup group_tensor +*/ +VX_API_ENTRY vx_tensor VX_API_CALL vxReshapeTensor(vx_tensor tensor, vx_int32* num_of_dims, vx_uint32 sizes); +#endif + +/*! \brief Allows setting attributes on the tensor. + * \param [in] tensor The reference to the tensor on which to set the attribute. + * \param [in] attribute The attribute to set. Use a \ref vx_tensor_attribute_e enumeration. + * \param [in] ptr The pointer to the location from which to read the value. + * \param [in] size The size in bytes of the object pointed to by \a ptr. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors. + * \retval VX_ERROR_INVALID_REFERENCE If the tensor is not a \ref vx_tensor. + * \retval VX_ERROR_INVALID_PARAMETERS If any of the other parameters are incorrect. + * \ingroup group_tensor + */ +VX_API_ENTRY vx_status VX_API_CALL vxSetTensorAttribute(vx_tensor tensor, vx_enum attribute, const void *ptr, vx_size size); + +/*! \brief Creates an opaque reference to a tensor data buffer. + * \details The tensor is a dummy tensor which will not allocate any memory. And it cannot reshape or view. + * Not guaranteed to exist until the vx_graph containing it has been verified. + * \param [in] context The reference to the implementation context. + * \param [in] number_of_dims The number of dimensions. + * \param [in] dims Dimensions sizes in elements. + * \param [in] data_format The \ref vx_type_e that represents the data format of the tensor data elements. + * \return A tensor data reference or zero when an error is encountered. + * \ingroup group_tensor + * \version 0.3 + */ +VX_API_ENTRY vx_tensor VX_API_CALL vxCreateDummyTensor(vx_context context, vx_size number_of_dims, const vx_size *dims, vx_enum data_format); + + +/*! \brief The type enumeration lists all NN extension types. + * \ingroup group_cnn + */ +enum vx_nn_type_e { + VX_TYPE_NN_CONVOLUTION_PARAMS = 0x025,/*!< \brief A \ref vx_nn_convolution_params_t. */ + VX_TYPE_NN_DECONVOLUTION_PARAMS = 0x026,/*!< \brief A \ref vx_nn_deconvolution_params_t. */ + VX_TYPE_NN_ROI_POOL_PARAMS = 0x027,/*!< \brief A \ref vx_nn_roi_pool_params_t. */ +}; + +/*! \brief Input parameters for a convolution operation. + * \ingroup group_cnn + */ +typedef struct _vx_nn_convolution_params_t +{ + vx_size padding_x; /*!< \brief Number of elements added at each side in the x dimension of the input. */ + vx_size padding_y; /*!< \brief Number of elements added at each side in the y dimension of the input. */ + vx_enum overflow_policy; /*!< \brief A VX_TYPE_ENUM of the vx_convert_policy_e enumeration. */ + vx_enum rounding_policy; /*!< \brief A VX_TYPE_ENUM of the vx_round_policy_e enumeration. */ + vx_enum down_scale_size_rounding; /*!< \brief Rounding method for calculating output dimensions. See \ref vx_nn_rounding_type_e */ + vx_size dilation_x; /*!< \brief "inflate" the kernel by inserting zeros between the kernel elements in the x direction. The value is the number of zeros to insert.*/ + vx_size dilation_y; /*!< \brief "inflate" the kernel by inserting zeros between the kernel elements in the y direction. The value is the number of zeros to insert.*/ +} vx_nn_convolution_params_t; + +/*! \brief Extended input parameter structure for convolution layer + * \ingroup group_cnn + */ +typedef struct _vx_nn_convolution_params_ext_t +{ + vx_nn_convolution_params_t khr; /*!< \brief Khronos standard structure head */ + vx_size padding_x_right; /*!< \brief Number of elements added at each side in the right of x dimension of the input, + "padding_x" is for the left */ + vx_size padding_y_bottom; /*!< \brief Number of elements added at each side in the bottom of y dimension of the input. + "padding_y" is for the top */ + vx_enum pad_mode; /*!< \brief A VX_TYPE_ENUM of the \ref vx_pad_mode_e enumeration. */ + vx_scalar pad_const; /*!< \brief pad const value if setting pad mode to const, the const value is base value, not quantized value. */ +} vx_nn_convolution_params_ext_t; + +/*! \brief Input parameters for a deconvolution operation. + * \ingroup group_cnn + */ +typedef struct _vx_nn_deconvolution_params_t +{ + vx_size padding_x; /*!< \brief Number of elements subtracted at each side in the x dimension of the output. */ + vx_size padding_y; /*!< \brief Number of elements subtracted at each side in the y dimension of the output. */ + vx_enum overflow_policy; /*!< \brief A VX_TYPE_ENUM of the vx_convert_policy_e enumeration. */ + vx_enum rounding_policy; /*!< \brief A VX_TYPE_ENUM of the vx_round_policy_e enumeration. */ + vx_size a_x; /*!< \brief user-specified quantity used to distinguish between the \f$upscale_x\f$ different possible output sizes. */ + vx_size a_y; /*!< \brief user-specified quantity used to distinguish between the \f$upscale_y\f$ different possible output sizes. */ +} vx_nn_deconvolution_params_t; + +/*! \brief Extended input parameter for a deconvolution operation. + * \ingroup group_cnn + */ +typedef struct _vx_nn_deconvolution_params_ext_t +{ + vx_nn_deconvolution_params_t khr; /*!< \brief Khronos standard structure head \ref vx_nn_deconvolution_params_t */ + vx_size padding_x_right; /*!< \brief Number of elements subtracted at each side in the right of x dimension of the input."padding_x" is for the left */ + vx_size padding_y_bottom; /*!< \brief Number of elements subtracted at each side in the bottom of y dimension of the input. "padding_y" is for the top */ + vx_int32 channel_group; /*!< \brief Number of separate groups for deconvolution (Range: 0 <= groups <= size of z dimension of input; size of z dimension of input can be divided by groups) */ + vx_enum pad_mode; /*!< \brief A VX_TYPE_ENUM of the \ref vx_pad_mode_e enumeration. */ + vx_scalar pad_const; /*!< \brief The pad const value if setting pad mode to const, the const value is base value, not quantized value. */ +} vx_nn_deconvolution_params_ext_t; + +typedef struct _vx_nn_deconvolution_params_ext2_t +{ + vx_nn_deconvolution_params_ext_t ext; /*!< \brief Deconvolution extension structure head */ + vx_uint32 stride_x; /*!< \brief skip x jump for down scale. */ + vx_uint32 stride_y; /*!< \brief skip y jump for down scale. */ + vx_enum down_scale_size_rounding; /*!< \brief Rounding method for calculating output dimensions. See \ref vx_nn_rounding_type_e */ +} vx_nn_deconvolution_params_ext2_t; + +/*! \brief Input parameters for ROI pooling operation. + * \ingroup group_cnn + */ +typedef struct _vx_nn_roi_pool_params_t +{ + vx_enum pool_type; /*!< \brief Of type \ref vx_nn_pooling_type_e. Only \ref VX_NN_POOLING_MAX pooling is supported. */ +} vx_nn_roi_pool_params_t; + +/*! \brief Extended input parameters for ROI pooling operation. + * \ingroup group_cnn + */ +typedef struct _vx_nn_roi_pool_params_ext_t +{ + vx_nn_roi_pool_params_t khr; /*!< \brief Khronos standard structure head \ref vx_nn_roi_pool_params_t */ + vx_float32 spatial_scale; /*!< \brief The ratio of image to feature map (Range: 0 < spatial_scale <= 1) */ + vx_int32 pooled_height; /*!< \brief The height of roi pooling (Range: 0 < pool_height <= height of input_data) */ + vx_int32 pooled_width; /*!< \brief The width of roi pooling(Range: 0 < pool_height <= width of input_data) */ +} vx_nn_roi_pool_params_ext_t; + +typedef struct _vx_nn_convolution_params_ext2_t +{ + vx_nn_convolution_params_ext_t ext; /*!< \brief Convolution extension structure head */ + + vx_uint32 stride_x; /*!< \brief skip x jump for down scale. */ + vx_uint32 stride_y; /*!< \brief skip y jump for down scale. */ + + vx_int32 depth_multiplier; /*!< \brief depthwise multiplier value, if 0, means convolution, elsewise(>=1), the convolution is depthwiseconvolution. */ +} vx_nn_convolution_params_ext2_t; + +typedef struct _vx_nn_convolution_params_ext3_t +{ + vx_nn_convolution_params_ext2_t ext2; /*!< \brief Convolution extension structure head */ + + vx_bool isPPU; /*!< \brief merge convolution and relu for PPU. */ +} vx_nn_convolution_params_ext3_t; + +/*============================================================================== + NN Nodes +=============================================================================*/ +/*! \brief [Graph] Creates a Convolutional Network Convolution Layer Node. + * \details This function implement Convolutional Network Convolution layer. + * For fixed-point data types, a fixed point calculation is performed with round and saturate according to the number of accumulator bits. The number of the accumulator bits are implementation defined, + * and should be at least 16.\n + * round: rounding according the vx_round_policy_e enumeration. \n + * saturate: A saturation according the vx_convert_policy_e enumeration. + * The following equation is implemented: \n + * \f$ outputs[j,k,i] = saturate(round(\sum_{l} (\sum_{m,n} inputs[j+m,k+n,l] \times weights[m,n,l,i])+biasses[j,k,i])) \f$\n + * Where \f$m,n\f$ are indexes on the convolution matrices. \f$ l\f$ is an index on all the convolutions per input.\f$ i\f$ is an index per output. + * \f$ j,k \f$ are the inputs/outputs spatial indexes. + * Convolution is done on the width and height dimensions of the \ref vx_tensor. Therefore, we use here the term x for index along the width dimension and y for index along the height dimension.\n + * before the Convolution is done, a padding with zeros of the width and height input dimensions is performed. + * Then down scale is done by picking the results according to a skip jump. The skip in the x and y is determined by the output size dimensions. + * The relation between input to output is as follows: \n + * \f$ width_{output} = round(\frac{(width_{input} + 2 * padding_x - kernel_x - (kernel_x -1) * dilation_x)}{skip_x} + 1) \f$\n + * and \n + * \f$ height_{output} = round(\frac{(height + 2 * padding_y - kernel_y - (kernel_y -1) * dilation_y)}{skip_y} + 1) \f$\n + * where \f$width\f$ is the size of the input width dimension. \f$height\f$ is the size of the input height dimension. + * \f$width_{output}\f$ is the size of the output width dimension. \f$height_{output}\f$ is the size of the output height dimension. + * \f$kernel_x\f$ and \f$kernel_y\f$ are the convolution sizes in width and height dimensions. + * skip is calculated by the relation between input and output. In case of ambiguity in the inverse calculation of the skip. The minimum solution is chosen. Skip must be a positive non zero integer. + * rounding is done according to \ref vx_convolutional_network_rounding_type_e. + * Notice that this node creation function has more parameters than the corresponding kernel. Numbering of kernel parameters (required if you create this node using the generic interface) is explicitly specified here. + * \param [in] graph The handle to the graph. + * \param [in] inputs The input tensor data. 3 lower dimensions represent a single input, all following dimensions represent number of batches, possibly nested. + * The dimension order is [width, height, #IFM, #batches].\n + * \param [in] weights [*static] Weights are 4d tensor with dimensions [kernel_x, kernel_y, #IFM, #OFM]. see \ref vxCreateTensor2 and \ref vxCreateVirtualTensor2 \n Weights data type must match the data type of the inputs. (Kernel parameter #1) + * \param [in] biases [*static] Optional, ignored if NULL. The biases, which may be shared (one per ofm) or unshared (one per ofm * output location). The possible layouts are + * either [#OFM] or [width, height, #OFM]. Biases data type must match the data type of the inputs. + * \param [in] convolution_params [static] Pointer to parameters of type \ref vx_nn_convolution_params_t. + * \param [in] size_of_convolution_params [static] Size in bytes of convolution_params. Note that this parameter is not counted as one of the kernel parameters. + * \param [out] outputs The output tensor data. Output will have the same number and structure of dimensions as input. Output tensor data type must be same as the inputs. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + */ +VX_API_ENTRY vx_node VX_API_CALL vxConvolutionLayer(vx_graph graph, vx_tensor inputs, vx_tensor weights, vx_tensor biases, const vx_nn_convolution_params_t *convolution_params, vx_size size_of_convolution_params, vx_tensor outputs); + +/*! \brief [Graph] Creates a Fully connected Convolutional Network Layer Node. +* \details This function implement Fully connected Convolutional Network layers. +* In case the input and output \ref vx_tensor are signed 16. A fixed point calculation is performed with round and saturate according to the number of accumulator bits. \n +* round: rounding according the vx_round_policy_e enumeration. \n +* saturate: A saturation according the vx_convert_policy_e enumeration. +* The saturation is done based on the accumulator_bits parameter. +* According the accumulator_bits, the saturation might not be performed every operation. +* But every a specified amount of operations, +* that are suspected to saturate the accumulation bits\n +* The equation for Fully connected layer:\n +* \f$ outputs[i] = ( \sum_{j} saturate(round(inputs[j] \times weights[j,i])))+biasses[i] \f$\n +* Where \f$j\f$ is a index on the input feature and \f$i\f$ is a index on the output. +* before the fully connected is done, a padding of the input is performed. +* Then down scale is done by picking the results according to a skip jump. The skip is determined by the output size dimensions. +* The relation between input to output is as follows: +* \f$ size_{output} = round(\frac{(size_{input} + 2 * pad)}{skip} + 1) \f$\n +* where \f$size_{input}\f$ is the size of the input dimension. +* \f$size_{output}\f$ is the size of the output dimension. +* skip is calculated by the relation between input and output. +* rounding is done according to \ref vx_convolutional_network_rounding_type_e. +* \param [in] graph The handle to the graph. +* \param [in] inputs The input tensor data. There two possible input layouts: +* 1. [#IFM, #batches]. See \ref vxCreateTensor2 and \ref vxCreateVirtualTensor2. +* 2. [width, height, #IFM, #batches]. See \ref vxCreateTensor2 and \ref vxCreateVirtualTensor2\n +* In both cases number of batches are optional and may be multidimensional. +* The second option is a special case to deal with convolution layer followed by fully connected. +* The dimension order is [#IFM, #batches]. See \ref vxCreateTensor2 and \ref vxCreateVirtualTensor2. Note that batch may be multidimensional. +* \param [in] weights [*static] Number of dimensions equals dim(single input)+1. Single input dims are [width, height, #IFM], with height and #IFM being optional.\n +* \param [in] biases [*static]The biases, which may be shared (one per ofm) or unshared (one per ofm * output location). +* \param [in] pad [static] Number of elements added at each side in the input. +* \param [in] accumulator_bits [static] Is the total number of bits used during intermediate accumulation. +* \param [in] overflow_policy [static] A VX_TYPE_ENUM of the vx_convert_policy_e enumeration. +* \param [in] rounding_policy [static] A VX_TYPE_ENUM of the vx_round_policy_e enumeration. +* \param [in] down_scale_size_rounding [static] Rounding method for calculating output dimensions. See \ref vx_convolutional_network_rounding_type_e +* \param [out] outputs The output tensor data. Output dimension layout is [#OFM,#batches]. See \ref vxCreateTensor2 and \ref vxCreateVirtualTensor2, where #batches may be multidimensional. +* \return vx_node. +* \retval 0 Node could not be created. +* \retval * Node handle. +* \ingroup group_cnn +*/ +VX_API_ENTRY vx_node VX_API_CALL vxFullyConnectedLayer(vx_graph graph, vx_tensor inputs, vx_tensor weights, vx_tensor biases, vx_enum overflow_policy, vx_enum rounding_policy, vx_tensor outputs); + +/*! \brief [Graph] Creates a Convolutional Network Local Response Normalization Layer Node. This function is optional for 8-bit extension with the extension string 'KHR_NN_8'. + * \details Normalizing over local input regions. Each input value is divided by \f$ (\bias+\frac{\alpha}{n}\sum_i x^2_i)^\beta \f$ , where n is the number of elements to normalize across. + * and the sum is taken over a rectangle region centred at that value (zero padding is added where necessary). + * \param [in] graph The handle to the graph. + * \param [in] inputs The input tensor data. 3 lower dimensions represent a single input, 4th dimension for batch of inputs is optional. Dimension layout is [width, height, IFM, #batches]. + * See \ref vxCreateTensor2 and \ref vxCreateVirtualTensor2. + * Implementations must support input tensor data types indicated by the extension strings 'KHR_NN_8 KHR_NN_16'. + * Since this function is optional for 'KHR_NN_8', so implementations only must support VX_TYPE_INT16 with fixed_point_position 8. + * \param [in] type [static] Either same map or across maps (see \ref vx_nn_norm_type_e). + * \param [in] normalization_size [static] Number of elements to normalize across. Must be a positive odd number with maximum size of 7 and minimum of 3. + * \param [in] alpha [static] Alpha parameter in the local response normalization equation. must be positive. + * \param [in] beta [static] Beta parameter in the local response normalization equation. must be positive. + * \param [in] bias [static] Bias parameter in the local response normalization equation. must be positive. + * \param [out] outputs The output tensor data. Output will have the same number of dimensions as input. + * \ingroup group_cnn + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + */ +VX_API_ENTRY vx_node VX_API_CALL vxLocalResponseNormalizationLayer(vx_graph graph, vx_tensor inputs, vx_enum type, + vx_size normalization_size, + vx_float32 alpha, + vx_float32 beta, + vx_float32 bias, + vx_tensor outputs); + +/*! \brief Input parameter for normalization layer2 +* \ingroup group_cnn +*\version 0.4 +*/ +typedef struct _vx_nn_normalization_params_t +{ + vx_enum type; /*!< \brief Either same map or across maps \refvx_convolutional_network_norm_type_e */ + vx_uint32 norm_size; /*!< \brief Number of elements to normalize across */ + vx_float32 alpha; /*!< \brief Alpha parameter in the normalization equation */ + vx_float32 beta; /*!< \brief Beta parameter in the normalization equation */ + vx_float32 bias; /*!< \brief Bias parameter, must not be zero */ +} vx_nn_normalization_params_t; + +/*! \brief extenstion parameters for normalization layer2. + * \ingroup group_cnn + *\version 0.5 + */ +typedef struct _vx_nn_normalization_params_ext_t +{ + vx_nn_normalization_params_t base; /*!< \brief Khronos standard structure head \ref vx_nn_normalization_params_t */ + vx_int32 axis; +} vx_nn_normalization_params_ext_t; + +/*! \brief Input parameter for tensor transpose layer2 +* \ingroup group_cnn +*\version 0.5 +*/ +typedef struct _vx_nn_transpose_params_t +{ + vx_int32* dims; /*!< \brief The array of perm dims */ + vx_uint32 dims_num; /*!< \brief Number of dims */ +} vx_nn_transpose_params_t; + +/*! \brief Input parameter for tensor mean layer +* \ingroup group_cnn +*\version 0.5 +*/ +typedef struct _vx_nn_mean_params_t +{ + vx_tensor axis; /*!< \brief 1D axis tensor of reduce dims */ + vx_int32 keep_dims; /*!< \brief Keep dims, if positive, retains reduced dims with length 1 */ +} vx_nn_mean_params_t; + +/*! \brief Input parameter for reducesum layer +* \ingroup group_cnn +*\version 0.5 +*/ +typedef struct _vx_nn_sum_params_t +{ + vx_tensor axis; /*!< \brief 1D axis tensor of reduce dims */ + vx_int32 keep_dims; /*!< \brief Keep dims, if positive, retains reduced dims with length 1 */ +} vx_nn_sum_params_t; + +/*! \brief Input parameter for tensor squeeze layer +* \ingroup group_cnn +*\version 0.5 +*/ +typedef struct _vx_nn_squeeze_params_t +{ + vx_tensor squeeze_dims; /*!< \brief [Optional]1D tensor of squeeze dims, if specified, only squeezes the dimisions lists. otherwise, squeeze all */ +} vx_nn_squeeze_params_t; + +/*! \brief Input parameter for tensor stride slice layer +* \ingroup group_cnn +*\version 0.5 +*/ +typedef struct _vx_nn_stride_slice_params_t +{ + vx_tensor begin_dims; /*!< \brief 1D tensor of int32, the starts of the dims of the input tensor to be sliced. the length must be of rank(input) */ + vx_tensor end_dims; /*!< \brief 1D tensor of int32, the ends of the dims of the input tensor to be sliced. the length must be of rank(input) */ + vx_tensor stride_dims; /*!< \brief 1D tensor of int32, the stride of the dims of the input tensor to be sliced. the length must be of rank(input) , note that a stride can be negative, which cause a reverse slice */ + vx_int32 begin_mask; /*!< \brief begin mask, if the ith bit of begin maks is set, begin[i] is ignored and the fullest possible range in that dim is used instead. */ + vx_int32 end_mask; /*!< \brief end mask, if the ith bit of end maks is set, end[i] is ignored and the fullest possible range in that dim is used instead. */ + vx_int32 shrink_axis_mask; /*!< \brief An int32 mask, if the ith bit of shrink axis mask is set, it implies that the ith specification shrinks dim must be preserved. */ +} vx_nn_stride_slice_params_t; + +/*! \brief [Graph] Creates a Convolutional Network Normalization Layer Node. +* \details Normalizing over local input regions. Each input value is divided by \f$ (bias+\frac{\alpha}{n}\sum_i x^2_i)^\beta \f$ , where n is the number of elements to normalize across. +:* and the sum is taken over the region centred at that value (zero padding is added where necessary). +* \param [in] graph The handle to the graph. +* \param [in] inputs The input tensor data. 3 lower dimensions represent a single input, 4th dimension for batch of inputs is optional.Dimension layout is [width, height, IFM, #batches]. +* See \ref vxCreateTensor2 and \ref vxCreateVirtualTensor2. +* \param [in] nomalization_params [static] Pointer to \ref vx_nn_normalization_params_t parameter structure. +* \param [in] size_of_normalization_param [static] The size of the parameter structure. +* \param [out] outputs The output tensor data. Output will have the same number of dimensions as input. +* \ingroup group_cnn +* \version 0.4 +* \return vx_node. +*/ +VX_API_ENTRY vx_node VX_API_CALL vxNormalizationLayer2(vx_graph graph, vx_tensor inputs, const vx_nn_normalization_params_t *normalization_params, + vx_size size_of_normalization_param, vx_tensor outputs); + +/*! \brief [Graph] Creates a Convolutional Network Activation Layer Node. + * The function operate a specific function (Specified in \ref vx_nn_activation_function_e), On the input data. + * the equation for the layer is: + * \f$ outputs(i,j,k,l) = function(inputs(i,j,k,l), a, b) \f$ for all i,j,k,l. + * \param [in] graph The handle to the graph. + * \param [in] inputs The input tensor data. + * \param [in] function [static] Non-linear function (see \ref vx_convolutional_network_activation_func_e). Implementations must support \ref VX_NN_ACTIVATION_LOGISTIC, \ref VX_NN_ACTIVATION_HYPERBOLIC_TAN and \ref VX_NN_ACTIVATION_RELU + * \param [in] a [static] Function parameters a. must be positive. + * \param [in] b [static] Function parameters b. must be positive. + * \param [out] outputs The output tensor data. Output will have the same number of dimensions as input. + * \ingroup group_cnn + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + */ +VX_API_ENTRY vx_node VX_API_CALL vxActivationLayer(vx_graph graph, vx_tensor inputs, vx_enum function, vx_float32 a,vx_float32 b, vx_tensor outputs); + +/*! \brief [Graph] Creates a Convolutional Network ROI pooling node + * \details Pooling is done on the width and height dimensions of the \ref vx_tensor. The ROI Pooling get an array of roi rectangles, and an input tensor. + * The kernel crop the width and height dimensions of the input tensor with the ROI rectangles and down scale the result to the size of the output tensor. The output tensor width and height are the pooled width and pooled height. + * The down scale method is determined by the pool_type. + * Notice that this node creation function has more parameters than the corresponding kernel. Numbering of kernel parameters (required if you create this node using the generic interface) is explicitly specified here. + * \param [in] graph The handle to the graph. + * \param [in] inputs The input tensor data. 3 lower dimensions represent a single input, 4th dimension for batch of inputs is optional. Dimension layout is [width, height, #IFM, #batches]. + * See \ref vxCreateTensor2 and \ref vxCreateVirtualTensor2. + * Implementations must support input tensor data types indicated by the extension strings 'KHR_NN_8' or 'KHR_NN_8 KHR_NN_16'. (Kernel parameter #0) + * \param [in] inputs_rois The roi array tensor. ROI array with dimensions [4, roi_count, #batches] where the first dimension represents 4 coordinates of the top left and bottom right corners of the roi rectangles, based on the input tensor width and height. + * #batches is optional and must be the same as in inputs. roi_count is the number of ROI rectangles. (Kernel parameter #1) + * \param [in] pool_type [static] Of type \ref vx_nn_pooling_type_e. Only \ref VX_NN_POOLING_MAX pooling is supported. (Kernel parameter #2) + * \param [in] size_of_roi_params [static] Size in bytes of roi_pool_params. Note that this parameter is not counted as one of the kernel parameters. + * \param [out] output_arr The output tensor. Output will have [output_width, output_height, #IFM, #batches] dimensions. #batches is optional and must be the same as in inputs. (Kernel parameter #3) + * \ingroup group_cnn + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + */ +VX_API_ENTRY vx_node VX_API_CALL vxROIPoolingLayer(vx_graph graph, vx_tensor input_data, vx_tensor input_rois, const vx_nn_roi_pool_params_t *roi_pool_params, vx_size size_of_roi_params, vx_tensor output_arr); + + +/*! \brief [Graph] Creates a Convolutional Network Deconvolution Layer Node. + * \details Deconvolution denote a sort of reverse convolution, which importantly and confusingly is not actually a proper mathematical deconvolution. + * Convolutional Network Deconvolution is up-sampling of an image by learned Deconvolution coefficients. + * The operation is similar to convolution but can be implemented by up-sampling the inputs with zeros insertions between the inputs, + * and convolving the Deconvolution kernels on the up-sampled result. + * For fixed-point data types, a fixed point calculation is performed with round and saturate according to the number of accumulator bits. The number of the accumulator bits are implementation defined, + * and should be at least 16.\n + * round: rounding according the vx_round_policy_e enumeration. \n + * saturate: A saturation according the vx_convert_policy_e enumeration. + * The following equation is implemented: \n + * \f$ outputs[j,k,i] = saturate(round(\sum_{l} \sum_{m,n}(inputs_{upscaled}[j+m,k+n,l] \times weights[m,n,l,i])+biasses[j,k,i])) \f$\n + * Where \f$m,n\f$ are indexes on the convolution matrices. \f$ l\f$ is an index on all the convolutions per input.\f$ i\f$ is an index per output. + * \f$ j,k \f$ are the inputs/outputs spatial indexes. + * Deconvolution is done on the width and height dimensions of the \ref vx_tensor. Therefore, we use here the term x for the width dimension and y for the height dimension.\n + * before the Deconvolution is done, up-scaling the width and height dimensions with zeros is performed. + * The relation between input to output is as follows: \n + * \f$ width_{output} = (width_{input} -1) * upscale_x - 2 * padding_x + kernel_x + a_x \f$\n + * and \n + * \f$ height_{output} = (height_{input} - 1) * upscale_y - 2 * padding_y + kernel_y + a_y \f$\n + * where \f$width_{input}\f$ is the size of the input width dimension. \f$height_{input}\f$ is the size of the input height dimension. + * \f$width_{output}\f$ is the size of the output width dimension. \f$height_{output}\f$ is the size of the output height dimension. + * \f$kernel_x\f$ and \f$kernel_y\f$ are the convolution sizes in width and height. \f$a_x\f$ and \f$a_y\f$ are user-specified quantity used to distinguish between the \f$upscale_x\f$ and \f$upscale_y\f$ different possible output sizes. + * \f$upscale_x\f$ and \f$upscale_y\f$ are calculated by the relation between input and output. + * \f$a_x\f$ and \f$a_y\f$ must be positive and smaller then \f$upscale_x\f$ and \f$upscale_y\f$ respectively. + * Since the padding parameter is on the output. The effective input padding is: \n + * \f$ padding_{input_x} = kernel_x -padding_x -1\f$ \n + * \f$ padding_{input_y} = kernel_y -padding_y -1\f$ \n + * Therfore the following constarints apply : \f$kernel_x >= padding_x - 1\f$ and \f$kernel_y >= padding_y - 1\f$. + * rounding is done according to \ref vx_nn_rounding_type_e. + * Notice that this node creation function has more parameters than the corresponding kernel. Numbering of kernel parameters (required if you create this node using the generic interface) is explicitly specified here. + * \param [in] graph The handle to the graph. + * \param [in] inputs The input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Dimension layout is [width, height, #IFM, #batches]. + * See \ref vxCreateTensor2 and \ref vxCreateVirtualTensor2. + * Implementations must support input tensor data types indicated by the extension strings 'KHR_NN_8' or 'KHR_NN_8 KHR_NN_16'. (Kernel parameter #0) + * \param [in] weights [static] The 4d weights with dimensions [width, height, #IFM, #OFM]. See \ref vxCreateTensor2 and \ref vxCreateVirtualTensor2. (Kernel parameter #1) + * \param [in] biases [static] Optional, ignored if NULL. The biases have one dimension [#OFM]. Implementations must support input tensor data type same as the inputs. (Kernel parameter #2) + * \param [in] deconvolution_params [static] Pointer to parameters of type \ref vx_nn_deconvolution_params_t (Kernel parameter #3) + * \param [in] size_of_deconv_params [static] Size in bytes of deconvolution_params. Note that this parameter is not counted as one of the kernel parameters. + * \param [out] outputs The output tensor. The output has the same number of dimensions as the input. (Kernel parameter #4) + * \ingroup group_cnn + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + */ +VX_API_ENTRY vx_node VX_API_CALL vxDeconvolutionLayer(vx_graph graph, vx_tensor inputs, vx_tensor weights, vx_tensor biases, const vx_nn_deconvolution_params_t *deconvolution_params, vx_size size_of_deconv_params, vx_tensor outputs); + +/*! \brief [Graph] Creates a LeakyRELU Layer Node. + * \details Activate the layer with leakyRELU algorithm. Given an input value x, the leakyRELU layer computes the output as x if x > 0 and negative_slope * x if x <= 0. + * \param [in] graph The reference to the parent graph. + * \param [in] inputs The input tensor data to reorg. + * \param [in] negative_slope [static] specifies whether to leak the nagative part by multiplying it with the slope value rather than setting it to 0. + * \param [in] outputs The output tensor data. Output will have same dimensions number as inputs. + * \return vx_node. + * \retval 0 Node could not be created. + * \retval * Node handle. + * \ingroup group_cnn +*/ +VX_API_ENTRY vx_node VX_API_CALL vxLeakyReluLayer( + vx_graph graph, + vx_tensor inputs, + vx_float32 negative_slope, + vx_tensor outputs + ); + +/*! \brief [Graph] Creates a PRelu Layer Node. + * \details Activate the layer with parametric RELU algorithm. Given an input value x, the PRelu layer computes the output as x if x > 0 and alpha * x if x <= 0. + * \param [in] graph The reference to the parent graph. + * \param [in] inputs The input tensor data to reorg. + * \param [in] alpha The per channel alpha tensor to leak the nagative part by multiplying it with alpha value. + * \param [in] outputs The output tensor data. Output will have same dimensions number as inputs. + * \return vx_node. + * \retval 0 Node could not be created. + * \retval * Node handle. + * \ingroup group_cnn + * \version 0.5 +*/ +VX_API_ENTRY vx_node VX_API_CALL vxPReluLayer( + vx_graph graph, + vx_tensor inputs, + vx_tensor alpha, + vx_tensor outputs + ); + +/*! \brief [Graph] Creates a Batch Normalization Node. + * \details Normalize the activations of the previous layer at each batch, i.e. applies a transformation that maintains the mean activation close to 0 and the activation standard deviation close to 1. + * \param [in] graph The handle to the graph. + * \param [in] eps [static] Float 32. Small value to add to the variance estimate so that we don't divide by zero.(default is 1e-5) + * \param [in] mean [static] A mean tensor data. + * \param [in] variance [static] A variance tensor data. + * \param [in] gamma [static] A scale tensor data, often denoted gamma in equations. + * \param [in] beta [static] A offset tensor data, often denoted beta in equations. + * \param [in] input The input tensor. + * \param [out] output The output tensor. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + */ +VX_API_ENTRY vx_node VX_API_CALL vxBatchNormalizationLayer( + vx_graph graph, + vx_float32 eps, + vx_tensor mean, + vx_tensor variance, + vx_tensor gamma, + vx_tensor beta, + vx_tensor input, + vx_tensor output + ); + +/*! \brief [Graph] Creates a concat Node. + * \details Concat one tensor from two tensor. + * \param [in] graph The handle to the graph. + * \param [in] in0 The input 0 tensor to be combined. + * \param [in] in1 The input 1 tensor to be combined. + * \param [out] out The output tensor. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + */ +VX_API_ENTRY vx_node VX_API_CALL vxConcat2Layer( + vx_graph graph, + vx_tensor in0, + vx_tensor in1, + vx_tensor out + ); + +/*! \brief parameter for vxConcatIndefiniteLayer + * \ingroup group_cnn + * \version 0.4 + */ +typedef struct _vx_nn_concat_params_t +{ + vx_uint32 axis; /*!< \brief The axis on which we need do concat. */ +} vx_nn_concat_params_t; + +/*! \brief [Graph] Create a concat layer for indefinite number of tensors. + * \param [in] graph The handle to the graph + * \param [in] in Pointer to a list of tensors + * \param [in] concat_params [static] Pointer to parameters of type \ref vx_nn_concat_params_t + * \param [in] size_of_concat_params [static] Size in bytes of vx_nn_concat_params_t. + * \param [out] out The output tensor after concat + * \returns vx_node. + * \retval 0 Node could not be created. + * \retval * Node handle. + * \ingroup group_cnn + */ +VX_API_ENTRY vx_node VX_API_CALL vxConcatIndefiniteLayer( + vx_graph graph, + vx_object_array in, + const vx_nn_concat_params_t* concat_params, + vx_size size_of_concat_params, + vx_tensor out + ); + +/*! \brief The type list of reorgnization. + * \ingroup group_cnn + * \version 0.4 + */ +enum vx_reorg_type_e +{ + /*! \brief Reorgnization from depth to space. */ + VX_REORG_DEPTH_TO_SPACE = 0, + + /*! \brief Reorgnization from space to depth. */ + VX_REORG_SPACE_TO_DEPTH = 1, + + /*! \brief Reorgnization from batch to space. */ + VX_REORG_BATCH_TO_SPACE_ND, + + /*! \brief Reorgnization from space to batch. */ + VX_REORG_SPACE_TO_BATCH_ND, + + /*! \brief Reorgnzation channel. */ + VX_REORG_SHUFFLE_CHANNEL, +}; + +/*! \brief Input parameter for reorg layer + *\ingroup group_cnn + *\version 0.4 + */ +typedef struct _vx_nn_reorg_params_t +{ + vx_tensor block_size; /*!< \brief The block sizes(int32) for each spatial dimensions of the input to do a reorg operation, all value must > 1 */ + vx_enum type; /*!< \brief The type of Reorgnization, \ref vx_reorg_type_e */ +} vx_nn_reorg_params_t, * vx_nn_reorg_params; + +/*! \brief extenstion parameters for reorg layer . + * \ingroup group_cnn + *\version 0.5 + */ +typedef struct _vx_nn_reorg_params_ext_t +{ + vx_nn_reorg_params_t base; /*!< \brief vx_nn_reorg_params \ref vx_nn_reorg_params_t */ + vx_tensor pad; /*!< \brief [Optional] Only for SPACE2BATCH, 2D tensor for paddings for each spatial dim of the input tensor(rank(input), 2), all values must be >=0. */ +} vx_nn_reorg_params_ext_t; + +typedef struct _vx_nn_reorg_params_ext2_t +{ + vx_nn_reorg_params_t base; /*!< \brief vx_nn_reorg_params \ref vx_nn_reorg_params_t */ + vx_int32 *num_group; + vx_int32 *axis; +} vx_nn_reorg_params_ext2_t; + +typedef struct _vx_nn_reorg_params_ext3_t +{ + vx_nn_reorg_params_ext2_t base; /*!< \brief vx_nn_reorg_params \ref vx_nn_reorg_params_t */ + vx_enum mode; /*!< \brief [Optional] Only for DEPH2SPACE */ +} vx_nn_reorg_params_ext3_t; + +/*! \brief [Graph] Creates a Reorgnization Layer Node, Enhancement of vxReorgLayer, Support both DEPTH to SPACE and SPACE to DEPTH. + * \param [in] graph The reference to the parent graph. + * \param [in] input The input tensor data to reorg. + * \param [in] reorg_params [static] Pointer to parameters of type \ref vx_nn_reorg_params + * \param [in] size_of_reorg_params [static] Size in bytes of vx_nn_reorg_params. + * \param [out] output The output tensor data. Output will have different number of each dimensions as input. + * \returns vx_node. + * \retval 0 Node could not be created. + * \retval * Node handle. + * \ingroup group_cnn + * \version 0.4 + */ +VX_API_ENTRY vx_node VX_API_CALL vxReorgLayer2( + vx_graph graph, + vx_tensor input, + const vx_nn_reorg_params reorg_params, + vx_size size_of_reorg_params, + vx_tensor output + ); + +/*! \brief Input parameter for TensorRoundingLayer + * \ingroup group_tensor + * \version 0.4 + */ +typedef struct _vx_nn_rounding_params_t +{ + vx_enum mode; /*!< \brief Rounding method for calculating tensor data(VX_CONVOLUTIONAL_NETWORK_DS_SIZE_ROUNDING_FLOOR or VX_CONVOLUTIONAL_NETWORK_DS_SIZE_ROUNDING_CEILING). See \ref vx_convolutional_network_rounding_type_e */ +} vx_nn_rounding_params_t, * vx_nn_rounding_params; + +/*! \brief [Graph] Creates a Rounding Layer Node, support FLOOR and CEIL. + * \param [in] graph The reference to the parent graph. + * \param [in] input The input tensor data to reorg. + * \param [in] rounding_params [static] Pointer to parameters of type \ref vx_nn_rounding_params + * \param [in] size_of_rounding_params [static] Size in bytes of vx_nn_rounding_params. + * \param [out] output The output tensor data. + * \returns vx_node. + * \retval 0 Node could not be created. + * \retval * Node handle. + * \ingroup group_tensor + * \version 0.4 + */ +VX_API_ENTRY vx_node VX_API_CALL vxTensorRoundingNode( + vx_graph graph, + vx_tensor input, + const vx_nn_rounding_params rounding_params, + vx_size size_of_rounding_params, + vx_tensor output + ); + +/*! \brief Input parameter for hashTableLookupLayer + *\ingroup group_cnn + *\version 0.4 + */ +typedef struct _vx_nn_hashlut_params_t +{ + vx_tensor keys; /*!< \brief A 1-D tensor with shape [ n ]; */ + vx_tensor values; /*!< \brief A tensor with shape of [ n, ?]; i.e., the first dimension must be n. */ +} vx_nn_hashlut_params_t, * vx_nn_hashlut_params; + +/*! \brief [Graph] Creates a hash lookup table Layer Node. + * \details Keys and Values pair represent a map, i.e., the ith element + * in Keys (Keys[i]) is the key to select the ith sub-tensor + * in Values (Values[i]), where 0 <= i <= n-1. + * Keys tensor *MUST* be sorted in ascending order. + * \param [in] graph The reference to the parent graph. + * \param [in] input 1-D tensor with shape [ k ]. + * \param [in] hashlut_params Pointer to parameters of type \ref vx_nn_hashlut_params_t + * \param [in] size_of_hashlut_params [static] Size in bytes of vx_nn_hashlut_params. + * \param [out] hits A boolean tensor with shape [ k ] indicates whether the lookup hits (True) or not (False). + * \param [out] output The output tensor data, tensor with shape [ k, ?] + * \returns vx_node. + * \retval 0 Node could not be created. + * \retval * Node handle. + * \ingroup group_cnn + * \version 0.4 + */ +VX_API_ENTRY vx_node VX_API_CALL vxHashTableLookupLayer( + vx_graph graph, + vx_tensor input, + const vx_nn_hashlut_params hashlut_params, + vx_size size_of_hashlut_params, + vx_tensor hits, + vx_tensor output + ); + +/*! \brief LSH project type list + *\ingroup group_cnn + *\version 0.4 + */ +enum vx_lshproj_type_e { + /*! \brief Computed bit vector is considered to be sparse. */ + VX_LSH_PROJ_SPARSE = 1, + + /*! \brief Computed bit vector is considered to be dense. */ + VX_LSH_PROJ_DENSE = 2, +}; + +/*! \brief Input parameter to LSH projection layer + *\ingroup group_cnn + *\version 0.4 + */ +typedef struct _vx_nn_lshproj_params_t +{ + vx_tensor hash_func; /*!< \brief Tensor of hash function. Dim size is 2, .Dim[0]: Number of hash functions. Dim[1]: Number of seeds per hash functions. Dim[1] <= 32 in sparse case. */ + vx_tensor weights; /*!< \brief Optional. Dim.size == 1, If not set, each input element is considered to have the same weight of 1.0. */ + vx_tensor type; /*!< \brief The type of LSH projection, support VX_LSH_PROJ_SPARSE and VX_LSH_PROJ_DENSE; */ +} vx_nn_lshproj_params_t, * vx_nn_lshproj_params; + +/*! \brief [Graph] Creates a LSH projection Layer Node. + * \details Projects an input to a bit vector via locality senstive hashing. + * Sparse: Value VX_LSH_PROJ_SPARSE(=1). + * Computed bit vector is considered to be sparse. + * Each output element is an int32 made up of multiple bits computed from + * hash functions. + * Dense: Value VX_LSH_PROJ_DENSE(=2). + * Computed bit vector is considered to be dense. Each output element + * represents a bit and can take the value of either 0 or 1. + * + * \param [in] graph The reference to the parent graph. + * \param [in] input input tensor data, Dim size must >= 1. + * \param [in] lshproj_params Pointer to parameters of type \ref vx_nn_lshproj_params + * \param [in] size_of_lshproj_params [static] Size in bytes of vx_nn_lshproj_params. + * \param [out] output The output tensor data. + * If the projection type is sparse: + * Output.Dim == { Tensor[0].Dim[0] } + * A tensor that represents hash signatures. + * If the projection type is Dense: + * Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] } + * A flattened tensor that represents projected bit vectors. + * \returns vx_node. + * \retval 0 Node could not be created. + * \retval * Node handle. + * \ingroup group_cnn + * \version 0.4 + */ +VX_API_ENTRY vx_node VX_API_CALL vxLSHProjectionLayer( + vx_graph graph, + vx_tensor input, + const vx_nn_lshproj_params lshproj_params, + vx_size size_of_lshproj_params, + vx_tensor output + ); + +/*! \brief Input parameter for Reshape layer + *\ingroup group_cnn + *\version 0.4 + */ +typedef struct _vx_nn_reshape_params_t +{ + vx_tensor dims; /*!< \brief dimension. */ +} vx_nn_reshape_params_t, * vx_nn_reshape_params; + +/*! \brief [Graph] Creates a Reshape Layer Node. + * \param [in] graph The reference to the parent graph. + * \param [in] input The input tensor data to reshape. + * \param [in] reshape_params Pointer to parameters of type \ref vx_nn_reshape_params + * \param [in] size_of_reshape_params [static] Size in bytes of vx_nn_reshape_params. + * \param [out] output The output tensor data. + * \returns vx_node. + * \retval 0 Node could not be created. + * \retval * Node handle. + * \ingroup group_tensor + * \version 0.4 + */ +VX_API_ENTRY vx_node VX_API_CALL vxTensorReshapeNode( + vx_graph graph, + vx_tensor input, + const vx_nn_reshape_params reshape_params, + vx_size size_of_reshape_params, + vx_tensor output + ); + +/*! \brief Input parameter for Scale layer + *\ingroup group_cnn + *\version 0.4 + */ +typedef struct _vx_nn_scale_params_t +{ + vx_enum type; /*!< \brief The interpolation type, only support VX_INTERPOLATION_BILINEAR. */ +} vx_nn_scale_params_t, * vx_nn_scale_params; + +typedef struct _vx_nn_scale_params_ext_t +{ + vx_nn_scale_params_t base; + vx_bool align_corners; + vx_bool half_pixel_centers; +} vx_nn_scale_params_ext_t, * vx_nn_scale_params_ext; + +/*! \brief [Graph] Creates a scale Layer Node. + * \param [in] graph The reference to the parent graph. + * \param [in] input The input tensor data to scale. + * \param [in] scale_params [static] Pointer to parameters of type \ref vx_nn_scale_params + * \param [in] size_of_scale_params [static] Size in bytes of vx_nn_scale_params. + * \param [out] output The output tensor data. + * \returns vx_node. + * \retval 0 Node could not be created. + * \retval * Node handle. + * \ingroup group_tensor + * \version 0.4 + */ +VX_API_ENTRY vx_node VX_API_CALL vxTensorScaleNode( + vx_graph graph, + vx_tensor input, + const vx_nn_scale_params scale_params, + vx_size size_of_scale_params, + vx_tensor output + ); + +/*! \brief Input parameter for YUV to RGB scale layer + *\ingroup group_cnn + *\version 0.5 + */ +typedef struct _vx_nn_yuv2rgb_scale_params_t +{ + vx_rectangle_t rect; /*!< \brief The rectangle region of input image to do yuv2rgb scale. If it is set to 0, region is full input image; */ + vx_float32 mean_r; /*!< \brief Mean coefficient for output r channel; */ + vx_float32 mean_g; /*!< \brief Mean coefficient for output g channel; */ + vx_float32 mean_b; /*!< \brief Mean coefficient for output b channel; */ + vx_float32 scale_rgb; /*!< \brief Scale coefficient value for output rgb; Not the scale ratio; */ + vx_bool y_only; /*!< \brief YUV mode, Y only or normal YUV. */ + vx_bool output_rgb; /*!< \brief Output mode, BGR or RGB. */ + vx_bool output_roi; /*!< \brief Output full image or partial region of image. Default is full image. */ + vx_uint8 fill_r; /*!< \brief R channel value of output image pad. */ + vx_uint8 fill_g; /*!< \brief G channel value of output image pad. */ + vx_uint8 fill_b; /*!< \brief B channel value of output image pad. */ + vx_rectangle_t output_rect; /*!< \brief The rectangle region of output image. It should be smaller than input image. If output_roi is false, this parameter will be ignored.*/ +} vx_nn_yuv2rgb_scale_params_t, * vx_nn_yuv2rgb_scale_params; + +/*! \brief [Graph] Creates a scale Layer Node. + * \param [in] graph The reference to the parent graph. + * \param [in] input The input tensor data to scale. + * \param [in] scale_params [static] Pointer to parameters of type \ref vx_nn_scale_params + * \param [in] size_of_scale_params [static] Size in bytes of vx_nn_scale_params. + * \param [out] output The output tensor data. + * \returns vx_node. + * \retval 0 Node could not be created. + * \retval * Node handle. + * \ingroup group_tensor + * \version 0.5 + */ +VX_API_ENTRY vx_node VX_API_CALL vxYUV2RGBScaleNode( + vx_graph graph, + vx_image input, + const vx_nn_yuv2rgb_scale_params yuv2rgb_scale_params, + vx_size size_of_yuv2rgb_scale_param, + vx_tensor output + ); + +/*! \brief Input parameter for RNN layer + *\ingroup group_cnn + *\version 0.4 + */ +typedef struct _vx_nn_rnn_params_t +{ + vx_tensor weights; /*!< \brief 2-D recurrent weights tensor, of shape [num_units, input_size], where "num_units" corresponds to the number of units. */ + vx_tensor recurrent_weights; /*!< \brief 2-D tensor, of shape [num_units, num_units], with columns corresponding to the weights from each unit. */ + vx_tensor bias; /*!< \brief 1-D tensor, of shape [num_units]. */ + vx_tensor state_in; /*!< \brief 2-D tensor, of shape [batch_size, num_units]. */ + vx_tensor activation; /*!< \brief Optional, indicating the activation function. If "NONE" is specified then it results in a linear activation. */ +} vx_nn_rnn_params_t, * vx_nn_rnn_params; + +/*! \brief [Graph] Creates a RNN Layer Node. + * \details A basic recurrent neural network layer. + * This layer implements the operation: + * outputs = state = activation(inputs * input_weights + state * recurrent_weights + bias) + * + * Where: + * "input_weights" is a weight matrix that multiplies the inputs; + * "recurrent_weights" is a weight matrix that multiplies the current + * "state" which itself is the output from the previous time step + * computation; + * "bias" is a bias vector (added to each output vector in the batch); + * "activation" is the function passed as the "activation_function" + * argument (if not "NONE"). + * \param [in] graph The reference to the parent graph. + * \param [in] input The input tensor data to rnn, 2-D tensor, of shape [input_size, batch_size], where "batch_size" corresponds to the batching dimension, and "input_size" is the size of the input. + * \param [in] rnn_params Pointer to parameters of type \ref vx_nn_rnn_params + * \param [in] size_of_rnn_params [static] Size in bytes of vx_nn_rnn_params. + * \param [out] state_out The output tensor data, A 2-D tensor, of shape [batch_size, num_units]. + * \param [out] output The output tensor data, 2-D tensor, of shape [batch_size, num_units]. This is effectively the same as the current state value.. + * \returns vx_node. + * \retval 0 Node could not be created. + * \retval * Node handle. + * \ingroup group_cnn + * \version 0.4 + */ +VX_API_ENTRY vx_node VX_API_CALL vxRNNLayer( + vx_graph graph, + vx_tensor input, + const vx_nn_rnn_params rnn_params, + vx_size size_of_rnn_params, + vx_tensor state_out, + vx_tensor output + ); + +/*! \brief Input parameter for softmax layer2 + *\ingroup group_cnn + *\version 0.4 + */ +typedef struct _vx_nn_softmax_params_t +{ + vx_float32 beta; /*!< \brief A FLOAT32 value, specifying the positive scaling factor for the exponent, beta. */ +} vx_nn_softmax_params_t, * vx_nn_softmax_params; + +/*! \brief extenstion parameters for softmax layer2. + * \ingroup group_cnn + *\version 0.5 + */ +typedef struct _vx_nn_softmax_params_ext_t +{ + vx_nn_softmax_params_t base; /*!< \brief Khronos standard structure head \ref vx_nn_softmax_params_t */ + vx_int32 axis; +} vx_nn_softmax_params_ext_t; + +/*! \brief [Graph] Creates a softmax Layer Node. + * \param [in] graph The reference to the parent graph. + * \param [in] input The input tensor data, with number of dimensions equals dim(input batch) + 1. Softmax will be calculated per IFM.. + * \param [in] softmax_params [static] Pointer to parameters of type \ref vx_nn_softmax_params + * \param [in] size_of_softmax_params [static] Size in bytes of vx_nn_softmax_params. + * \param [out] output The output tensor data, Outputs will have the same number of dimensions as input.. + * \returns vx_node. + * \retval 0 Node could not be created. + * \retval * Node handle. + * \ingroup group_cnn + * \version 0.4 + */ +VX_API_ENTRY vx_node VX_API_CALL vxSoftmaxLayer2( + vx_graph graph, + vx_tensor input, + const vx_nn_softmax_params softmax_params, + vx_size size_of_softmax_params, + vx_tensor output + ); + +/*! \brief Input parameter for SVDF layer + *\ingroup group_cnn + *\version 0.4 + */ +typedef struct _vx_nn_svdf_params_t +{ + vx_tensor weights_feature; /*!< \brief A 2-D tensor, of shape [num_units, input_size], where "num_units" corresponds to the number of units. */ + vx_tensor recurrent_time; /*!< \brief A 2-D tensor, of shape [num_units, memory_size], where "memory_size" corresponds to the fixed-size of the memory. */ + vx_tensor bias; /*!< \brief Optional, 1-D tensor of type T, of shape [num_units]. */ + vx_tensor state_in; /*!< \brief A 2-D tensor, of shape [(memory_size - 1) * num_units * rank, batch_size] */ + vx_tensor rank; /*!< \brief The rank of the SVD approximation. */ + vx_tensor activation; /*!< \brief Indicating the activation function, specify linear activation for default */ +} vx_nn_svdf_params_t, * vx_nn_svdf_params; + +/*! \brief [Graph] Creates a svdf Layer Node. + * \details SVDF op is a kind of stateful layer derived from the notion that a + * densely connected layer that's processing a sequence of input frames can + * be approximated by using a singular value decomposition of each of its + * nodes. The implementation is based on: + * + * https://research.google.com/pubs/archive/43813.pdf + * + * P. Nakkiran, R. Alvarez, R. Prabhavalkar, C. Parada. + * "Compressing Deep Neural Networks using a Rank-Constrained Topology". + * INTERSPEECH, 2015. + * + * It processes the incoming input using a 2-stage filtering mechanism: + * stage 1 performs filtering on the "features" dimension, whose outputs get + * pushed into a memory of fixed-size memory_size. + * stage 2 performs filtering on the "time" dimension of the memory_size + * memoized outputs of stage 1. + * + * Specifically, for rank 1, this layer implements the operation: + * + * memory = push(conv1d(inputs, weights_feature, feature_dim, + * "PADDING_VALID")); + * outputs = activation(memory * weights_time + bias); + * + * Where: + * "weights_feature" is a weights matrix that processes the inputs (by + * convolving the input with every "feature filter"), and whose outputs get + * pushed, stacked in order, into the fixed-size "memory" (the oldest entry + * gets dropped); + * "weights_time" is a weights matrix that processes the "memory" (by a + * batched matrix multiplication on the num_units); + * "bias" is an optional bias vector (added to each output vector in the + * batch); and + * "activation" is the function passed as the "fused_activation_function" + * argument (if not "NONE"). + * + * Each rank adds a dimension to the weights matrices by means of stacking + * the filters. + * \param [in] graph The reference to the parent graph. + * \param [in] input The input tensor data, A 2-D tensor of type T, of shape [input_size, batch_size], where + * "batch_size" corresponds to the batching dimension, and "input_size" is + * the size of the input. + * \param [in] svdf_params Pointer to parameters of type \ref vx_nn_svdf_params + * \param [in] size_of_svdf_params [static] Size in bytes of vx_nn_svdf_params. + * \param [out] state_out A 2-D tensor, of shape [(memory_size - 1) * num_units * rank, batch_size]. + * \param [out] output The output tensor data, Outputs will have the same number of dimensions as input. + * \returns vx_node. + * \retval 0 Node could not be created. + * \retval * Node handle. + * \ingroup group_cnn + * \version 0.4 + */ +VX_API_ENTRY vx_node VX_API_CALL vxSVDFLayer( + vx_graph graph, + vx_tensor input, + const vx_nn_svdf_params svdf_params, + vx_size size_of_svdf_params, + vx_tensor state_out, + vx_tensor output + ); + +/*! \brief Input parameter for Pooling layer2 + * \ingroup group_cnn + */ +typedef struct _vx_nn_pooling_params_t +{ + vx_enum pool_type; /*!< \brief either max pooling or average pooling, see \ref vx_convolutional_network_pooling_type_e. */ + vx_uint32 pool_size_x; /*!< \brief Size of the pooling region in the x dimension. */ + vx_uint32 pool_size_y; /*!< \brief Size of the pooling region in the y dimension. */ + vx_uint32 pool_pad_x_left; /*!< \brief Padding size in the left of x dimension. */ + vx_uint32 pool_pad_x_right; /*!< \brief Padding size in the right of x dimension. */ + vx_uint32 pool_pad_y_top; /*!< \brief Padding size in the top of y dimension. */ + vx_uint32 pool_pad_y_bottom; /*!< \brief Padding size in the bottom of y dimension. */ + vx_enum rounding; /*!< \brief Rounding method for calculating output dimensions. See \ref vx_convolutional_network_rounding_type_e */ +} vx_nn_pooling_params_t; + + +/*! \brief Extended input parameter for Pooling layer2 + * \ingroup group_cnn + * \version 0.4 + */ +typedef struct _vx_nn_pooling_params_ext_t +{ + vx_nn_pooling_params_t base; /*!< \brief The base definition.\ref vx_nn_pooling_params_t */ + vx_uint32 stride_x; /*!< \brief Skip x jump for down scale. */ + vx_uint32 stride_y; /*!< \brief Skip y jump for down scale. */ +} vx_nn_pooling_params_ext_t; + + +/*! \brief [Graph] Creates a Convolutional Network Pooling Layer Node, this function can support uneven padding. + * \details Pooling is done on the first 2 dimensions or the \ref vx_tensor. Therefore, we use here the term x for the first dimension and y for the second.\n + * Pooling operation is a function operation over a rectangle size and then a nearest neighbour down scale. + * Here we use pool_size_x and pool_size_y to specify the rectangle size on which the operation + * is performed. \n + * before the operation is done (average or maximum value). the data is padded in the first 2D with zeros. + * The down scale is done by picking the results according to a skip jump. The skip in the x and y dimension is determined by the output size dimensions. + * \param [in] graph The handle to the graph. + * \param [in] inputs The input tensor data. 3 lower dimensions represent a single input, 4th dimension for batch of inputs is optional.Dimension layout is [width, height, #IFM, #batches]. + * See \ref vxCreateTensor2 and \ref vxCreateVirtualTensor2 + * \param [in] pooling_params [static] Pointer to parameters of type \ref vx_nn_pooling_params_t + * \param [in] size_of_pooling_params [static] Size in bytes of pooling_params. + * \param [out] outputs The output tensor data. Output will have the same number of dimensions as input. + * \return vx_node. + * \retval 0 Node could not be created. + * \retval * Node handle. + * \ingroup group_cnn + */ +VX_API_ENTRY vx_node VX_API_CALL vxPoolingLayer2( + vx_graph graph, + vx_tensor inputs, + const vx_nn_pooling_params_t * pooling_params, + vx_size size_of_pooling_params, + vx_tensor outputs); + +/*! \brief [Graph] Performs arithmetic addition on element values in the input tensor data's. + * \param [in] graph The handle to the graph. + * \param [in] in1 input tensor data,. + * \param [in] in2 input tensor data, inputs must be of equal in dimensions. + * else, If in one of the vx_mddata dimension is 1. + * That dimension is considered as a const on all the dimension terms. + * And will perform as if the values are duplicated on all terms in that dimensions. + * After the expansion. The dimensions are equal. + * \param [in] scale [static] The scale value. + * \param [in] overflow_policy [static] A vx_convert_policy_e enumeration. + * \param [in] rounding_policy [static] A vx_round_policy_e enumeration. + * \param [out] out The output tensor data with the same dimensions as the input tensor data's. + * \ingroup group_tensor + * \return vx_node. + * \retval 0 Node could not be created. + * \retval * Node handle. + */ +VX_API_ENTRY vx_node VX_API_CALL vxTensorDivideNode(vx_graph graph, vx_tensor in1, vx_tensor in2, vx_scalar scale, vx_enum overflow_policy, vx_enum rounding_policy, vx_tensor out); + +/*! \brief [Graph] Performs LUT on element values in the input tensor data's. + * \param [in] graph The handle to the graph. + * \param [in] in1 input tensor data. + * \param [in] lut lut tensor data. + * \param [out] out The output tensor data with the same dimensions as the input tensor data's. + * \ingroup group_tensor + * \return vx_node. + * \retval 0 Node could not be created. + * \retval * Node handle. + */ +VX_API_ENTRY vx_node VX_API_CALL vxTensorTableLookupNode2(vx_graph graph, vx_tensor in1, vx_tensor lut, vx_tensor out); + +/*! \brief [Graph] Performs matrices transformation on input tensor. +* The node transpose the tensor according to the matrices that perm gives. +* \param [in] graph The handle to the graph. +* \param [in] in input tensor data, +* \param [out] out output tensor data, +* \param [in] perm [static] that is the matrices to transpose. If not given, do full reversed transpose according to the input tensor dimension. +* \param [in] sizes_of_perm [static] that is the dimension of perm. +* \ingroup group_tensor +* \return vx_node. +* \retval 0 Node could not be created. +* \retval * Node handle. +*/ +VX_API_ENTRY vx_node VX_API_CALL vxTensorPermuteNode(vx_graph graph, vx_tensor in, vx_tensor out, vx_uint32* perm, vx_uint32 sizes_of_perm); + +/*! \brief [Graph] Computes the sum of elements across dimensions of input tensor. +* \param [in] graph The handle to the graph. +* \param [in] in input tensor data, +* \param [out] out output tensor data, +* \param [in] reduce_dim [static] used to determine sum across which dimension(dimension 0 means width, etc). If not given, compute the sum across all dimensions. +* \param [in] dim_size [static] used to specify the array size of redume_dim. +* \param [in] keep_dim [static] means if keep the dimesion count. +* \ingroup group_tensor +* \return vx_node. +* \retval 0 Node could not be created. +* \retval * Node handle. +* \version 0.3 +*/ +VX_API_ENTRY vx_node VX_API_CALL vxTensorReduceSumNode(vx_graph graph, vx_tensor in, vx_tensor out, vx_uint32* reduce_dim, vx_int32 dim_size, vx_bool keep_dim); + + +/*! \brief Input parameter structure for TensorPadNode + * \ingroup group_tensor + * \version 0.3 + */ +typedef struct _vx_nn_pad_params_t +{ + vx_int32 * pad_front_array; /*!< \brief An array of values which specify how many values are added on the front(left, top etc) of a tensor. */ + vx_int32 * pad_back_array; /*!< \brief An array of values which specify how many values are added on the back(right, bottom etc) of a tensor. */ + vx_uint8 numViewDimensions; /*!< \brief The size of two arrays. */ + vx_enum pad_mode; /*!< \brief A VX_TYPE_ENUM of the \ref vx_pad_mode_e enumeration. */ + vx_scalar pad_const; /*!< \brief The order const value if setting pad mode to const, the const value is base value, not quantized value. */ +} vx_nn_pad_params_t, * vx_nn_pad_params; + + +/*! \brief [Graph] Performs padding on input tensor with diffrent pad mode. +* \param [in] graph The handle to the graph. +* \param [in] in input tensor data, +* \param [out] out output tensor data, +* \param [in] pad_params [static] contains pad left, right, top, bottom, pad mode, const value, etc. +* \param [in] size_of_pad_params [static] The size of pad_params. +* \ingroup group_tensor +* \return vx_node. +* \retval 0 Node could not be created. +* \retval * Node handle. +* \version 0.3 +*/ +VX_API_ENTRY vx_node VX_API_CALL vxTensorPadNode(vx_graph graph, vx_tensor in, vx_tensor out, const vx_nn_pad_params pad_params, vx_size size_of_pad_params); + +/*! \brief [Graph] Performs copy from source tensor to destination tensor. +*\details This copy function also perform format converion if src tensor and dst tensor have differnt formats. +* Dequatization could be done by this function. +* \param [in] graph The handle to the graph. +* \param [in] src input tensor data, +* \param [out] dst output tensor data. +* \note that copy size is the min(srcSize, dstSize) +* \return vx_node. +* \retval 0 Node could not be created. +* \retval * Node handle. +* \ingroup group_tensor +*/ +VX_API_ENTRY vx_node VX_API_CALL vxTensorCopyNode(vx_graph graph, vx_tensor src, vx_tensor dst); + +/*! \brief Input parameter for vxTensorReverse + * \ingroup group_cnn + */ +typedef struct _vx_nn_tensor_reverse_params_t +{ + vx_int32 *axis; /*!< \brief array of axis */ + vx_uint32 numberOfAxis; /*!< \brief size of axis, max value is 4 */ +} +vx_nn_tensor_reverse_params_t; + +/*! \brief [Graph] Performs reverse on input tensor. +* \param [in] graph The handle to the graph. +* \param [in] inputs input tensor data. +* \param [in] tensor_reverse_params [static] Pointer to parameters of type \ref vx_nn_tensor_reverse_params_t. +* \param [in] size_of_tensor_reverse_params [static] The size of tensor_reverse_params. +* \param [out] outputs output tensor data. +* \ingroup group_tensor +* \return vx_node. +* \retval 0 Node could not be created. +* \retval * Node handle. +*/ +VX_API_ENTRY vx_node VX_API_CALL vxTensorReverse(vx_graph graph, vx_tensor inputs, const vx_nn_tensor_reverse_params_t * tensor_reverse_params, vx_size size_of_tensor_reverse_params, vx_tensor outputs); + +/*! \brief Input parameter for L2Normalize layer2 + *\ingroup group_cnn + *\version 0.4 + */ +typedef struct _vx_nn_l2norm_params_t +{ + vx_int32 axis; +} vx_nn_l2norm_params_t; + +/*! \brief [Graph] Creates a Convolutional Network L2Normalize Layer2 Node. + * \param [in] graph The handle to the graph. +* \param [in] inputs The input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Dimension layout is [width, height, #IFM, #batches]. + * See \ref vxCreateTensor2 and \ref vxCreateVirtualTensor2. +* \param [in] l2norm_params [static] Pointer to parameters of type \ref vx_nn_l2norm_params +* \param [in] size_of_l2norm_params [static] Size in bytes of vx_nn_l2norm_params. +* \param [out] outputs The output tensor data. Output will have the same number of dimensions as input. +* \ingroup group_cnn +* \return vx_node. +* \retval 0 Node could not be created. +* \retval * Node handle. +*/ +VX_API_ENTRY vx_node VX_API_CALL vxL2NormalizeLayer2( + vx_graph graph, + vx_tensor inputs, + const vx_nn_l2norm_params_t * l2norm_params, + vx_size size_of_l2norm_params, + vx_tensor outputs); + +/*! \brief Input parameter structure for RPNLayer + *\ingroup group_cnn + */ +typedef struct _vx_nn_rpn_params_t +{ + vx_uint32 feature_stride; /*!< \brief Image feature stride. */ + vx_uint32 min_size; /*!< \brief The smallest rectangular box size */ + vx_uint32 pre_nms_topn; /*!< \brief Before NMS, take pre_nms_topn rectangulars for NMS. */ + vx_uint32 post_nms_topn; /*!< \brief After NMS, take post_nms_topn rectangulars for proposals output */ + vx_float32 nms_thresh; /*!< \brief The IOU threshold */ +} vx_nn_rpn_params_t; + +/*! \brief [Graph] Creates a Regin Proposal Networks Layer Node. + * \details A Region Proposal Network(RPN) takes an image(of any size) as input and outputs a set of rectangular object proposals, + * each with an objectness socre. + * \param [in] graph The handle to the graph. + * \param [in] score The score tensor data. its has 2 types of values: foreground and background. Only foreground objects are needed. + * \param [in] bbox The bounding box regressor tensor data. Used for bounding box regression. + * \param [in] anchors The anchor box tensor data. A set of rectangles generated by scale and aspect ratio. + * \param [in] img_info [static] The image information tensor data. 4 elements: image width, image height, image width scale, image height scale. + * \param [in] rpn_params [static] Pointer to parameters of type \ref vx_nn_rpn_params_t + * \param [in] size_of_rpn_params [static] Size in bytes of vx_nn_rpn_params. + * \param [in] roi_output The output tensor. The proposals output tensor data. This information used by ROI pooling + * \param [in] score_output The output tensor. The proposals score output tensor data. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + */ +VX_API_ENTRY vx_node VX_API_CALL vxRPNLayer( + vx_graph graph, + vx_tensor score, + vx_tensor bbox, + vx_tensor anchors, + vx_tensor img_info, + const vx_nn_rpn_params_t * rpn_params, + vx_size size_of_rpn_params, + vx_tensor roi_output, + vx_tensor score_output + ); + +/*! \brief Input parameters for a lstm activation operation. + * \ingroup group_cnn + * \version 0.3 + */ +typedef struct _vx_nn_lstm_activation_params_t +{ + vx_int32 is_ln; + vx_int32 is_cifg; + vx_int32 is_proj; + vx_int32 is_hybrid; + vx_int32 is_peephole; + vx_int32 recurrent_activation; + vx_float32 forget_bias; +} vx_nn_lstm_activation_params_t; + +/*! \brief Input parameters for a lstm operation. + * \ingroup group_cnn + * \version 0.3 + */ +typedef struct _vx_nn_lstm_params_t +{ + vx_tensor input2input_weight; /*!< \brief Optional A 2-D tensor of type T, of shape [num_units, input_size]. where "num_units" corresponds to the number of cell units.*/ + vx_tensor input2forget_weight; /*!< \brief A 2-D tensor of type T, of shape [num_units, input_size].*/ + vx_tensor input2cell_weight; /*!< \brief A 2-D tensor of type T, of shape [num_units, input_size].*/ + vx_tensor input2output_weight; /*!< \brief A 2-D tensor of type T, of shape [num_units, input_size].*/ + + vx_tensor recurrent2input_weight; /*!< \brief Optional A 2-D tensor of type T, of shape [num_units, output_size]. where "output_size" corresponds to either the number of cell units (i.e., "num_units"), or the second dimension of the "projection_weights", if defined.*/ + vx_tensor recurrent2forget_weight; /*!< \brief A 2-D tensor of type T, of shape [num_units, output_size].*/ + vx_tensor recurrent2cell_weight; /*!< \brief A 2-D tensor of type T, of shape [num_units, output_size].*/ + vx_tensor recurrent2output_weight; /*!< \brief A 2-D tensor of type T, of shape [num_units, output_size].*/ + + vx_tensor cell2input_weight; /*!< \brief Optional A 1-D tensor of type T, of shape [num_units].*/ + vx_tensor cell2forget_weight; /*!< \brief Optional A 1-D tensor of type T, of shape [num_units].*/ + vx_tensor cell2output_weight; /*!< \brief Optional A 1-D tensor of type T, of shape [num_units].*/ + + vx_tensor input_gate_bias; /*!< \brief Optional A 1-D tensor of type T, of shape [num_units].*/ + vx_tensor forget_gate_bias; /*!< \brief A 1-D tensor of type T, of shape [num_units].*/ + vx_tensor cell_bias; /*!< \brief A 1-D tensor of type T, of shape [num_units].*/ + vx_tensor output_gate_bias; /*!< \brief A 1-D tensor of type T, of shape [num_units].*/ + + vx_tensor projection_weight; /*!< \brief Optional A 2-D tensor of type T, of shape [output_size, num_units].*/ + vx_tensor projection_bias; /*!< \brief Optional A 1-D tensor of type T, of shape [output_size].*/ + + vx_tensor activation; /*!< \brief Optional. An ActivationFunctionType indicating the activation function. If "NONE" is specified then it results in a linear activation.If "NONE" is specified then it results in a linear activation.*/ + vx_tensor cell_clip; /*!< \brief A clipping threshold for the cell state, such that values are bound within [-cell_clip, cell_clip]. If set to 0.0 then clipping is disabled.*/ + vx_tensor proj_clip; /*!< \brief A clipping threshold for the output from the projection layer, such that values are bound within [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.*/ +} vx_nn_lstm_params_t; + +/*! \brief extenstion parameters for a lstm unit operation. + * \ingroup group_cnn + */ +typedef struct _vx_nn_lstm_params_ext_t +{ + vx_nn_lstm_params_t base; /*!< \brief standard structure head.*/ + vx_tensor forget_bias; /*!< \brief A bias(float 32) for the forget gate. If set to 0.0f(by default) then bias is ignored.*/ + + vx_float32 norm_gain; /*!< \brief Float32[static] The layer normalization gain initial value(default is 1.0f).*/ + vx_float32 norm_shift; /*!< \brief Float32[static] The layer normalization shift initial value(default is 0.0f).*/ + + vx_tensor sequence_length; /*!< \brief Optional[static] Specifies the length of each sequence in inputs. An `int32` (tensor) size `[batch_size]`, values in `[0, time_len)` or None(by default).*/ + + /*Since ANDROID NN API level 29 there are additional inputs to this op:*/ + vx_tensor layernorm2input_weight; /*!< \brief [Optional] The input layer normalization weights. A 1 - D tensor of shape[num_units].Used to rescale normalized inputs to activation at input gate.*/ + vx_tensor layernorm2forget_weight; /*!< \brief [Optional] The forget layer normalization weights. A 1 - D tensor of shape[num_units].Used to rescale normalized inputs to activation at forget gate.*/ + vx_tensor layernorm2cell_weight; /*!< \brief [Optional] The cell layer normalization weights. A 1 - D tensor of shape[num_units].Used to rescale normalized inputs to activation at cell gate.*/ + vx_tensor layernorm2output_weight; /*!< \brief [Optional] The output layer normalization weights. A 1 - D tensor of shape[num_units].Used to rescale normalized inputs to activation at output gate.*/ +} vx_nn_lstm_params_ext_t; + +/*! \brief input parameters for a lstm layer operation. + * \ingroup group_cnn + */ +typedef struct _vx_nn_lstm_layer_params_t +{ + vx_nn_lstm_params_t lstm_param; /*!< \brief lstm input param \ref vx_nn_lstm_params_t.*/ + vx_enum lstm_layer_type; /*!< \brief lstm layer type.*/ +} vx_nn_lstm_layer_params_t; + +/*! \brief input parameters for a lstm layer operation. + * \ingroup group_cnn + */ +typedef struct _vx_nn_lstm_layer_params_ext_t +{ + vx_nn_lstm_params_ext_t lstm_param; /*!< \brief lstm input param \ref vx_nn_lstm_params_ext_t.*/ + vx_enum lstm_layer_type; /*!< \brief lstm layer type.*/ +} vx_nn_lstm_layer_params_ext_t; + +/*! \brief [Graph] Creates a Long short-term memory unit (LSTM) Unit Networks Layer Node. + * \details + * The default non-peephole implementation is based on: + * http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf + * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural + * Computation, 9(8):1735-1780, 1997. + * + * The peephole implementation is based on: + * https://research.google.com/pubs/archive/43905.pdf + * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory + * recurrent neural network architectures for large scale acoustic modeling." + * INTERSPEECH, 2014. + * + * The coupling of input and forget gate (CIFG) is based on: + * http://arxiv.org/pdf/1503.04069.pdf + * Greff et al. "LSTM: A Search Space Odyssey" + * + * The class has the following independently optional inputs: + * * If input gate (if CIFG): "input_to_forget_weights", + * "recurrent_to_input_weights", "cell_to_input_weights", "input_gate_bias". + * * If no peephole connections: "cell_to_input_weights", + * "cell_to_forget_weights", "cell_to_output_weights". + * * If no projection layer: "projection_weights" and "projection_bias". + * * If no projection bias: "projection_bias". + * + * \param [in] graph The handle to the graph. + * \param [in] input A 2-D tensor of type T, of shape [input_size, batch_size], where + * "batch_size" corresponds to the batching dimension, and "input_size" + * is the size of the input. + * \param [in] output_state_in A 2-D tensor of type T, of shape [output_size, batch_size]. + * \param [in] cell_state_in A 2-D tensor of type T, of shape [num_units, batch_size]. + * \param [in] lstm_params LSTM paraments \ref vx_nn_lstm_params_t . + * \param [in] size_of_lstm_params [static] The size of the lstm_params. + * \param [out] scratch A 3-D tensor of type T, of shape [num_cell, 4, batch_size]. + * \param [out] output_state_out A 2-D tensor of type T, of shape [output_size, batch_size]. + * \param [out] cell_state_out A 2-D tensor of type T, of shape [num_units, batch_size]. + * \param [out] output A 2-D tensor of type T, of shape [output_size, batch_size]. + * This is effectively the same as the current "output_state" value. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + * \version 0.3 + */ +VX_API_ENTRY vx_node VX_API_CALL vxLstmUnitLayer( + vx_graph graph, + vx_tensor input, + vx_tensor output_state_in, + vx_tensor cell_state_in, + const vx_nn_lstm_params_t * lstm_params, + vx_size size_of_lstm_params, + vx_tensor scratch, + vx_tensor output_state_out, + vx_tensor cell_state_out, + vx_tensor output); + +/*! \brief [Graph] Creates a Long short-term memory layer (LSTM) Networks Layer Node. + * \details + * + * \param [in] graph The handle to the graph. + * \param [in] input A 3-D tensor of type T, of shape [input_size, batch_size, time_step], where + * "input_size" corresponds to the size of the input, and "batch_size" + * is the batching dimension, time_step means time length actually used by the input. + * \param [in] static_input optional, A 2-D tensor of type T, of shape [input_size, batch_size], where + * "input_size" corresponds to the size of the input, and "batch_size" + * is the batching dimension. + * \param [in] cont optional, A 2-D tensor of type T, of shape [input_size, batch_size], where + * "input_size" corresponds to the size of the input, and "batch_size" + * is the batching dimension. + * \param [in] lstm_layer_params LSTM paraments \ref vx_nn_lstm_layer_params_t . + * \param [in] size_of_lstm_layer_params [static] The size of the lstm_layer_params. + * \param [out] output A 2-D/3D tensor of type T, of shape [output_size, batch_size] or [output_size, batch_size, time]. + * This is effectively the same as the current "output_state" value. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + * \version 0.3 + */ +VX_API_ENTRY vx_node VX_API_CALL vxLstmLayer( + vx_graph graph, + vx_tensor input, + vx_tensor static_input, + vx_tensor cont, + const vx_nn_lstm_layer_params_t * lstm_layer_params, + vx_size size_of_lstm_layer_params, + vx_tensor output + ); + +/*! \brief [Graph] Creates transpose layer node. +* \details +* Transposes the input tensor, permuting the dimensions according to perm tensor. +* +* \param [in] graph The handle to the graph. +* \param [in] input A n-D tensor, specifying the tensor to be transposed. +* \param [in] transpose_params paraments \ref vx_nn_transpose_params_t . +* \param [in] size_of_transpose_param [static] The size of the vx_nn_transpose_params_t. +* \param [out] output A n-D tensor of the same type as input. +* \return vx_node. +* \returns A node reference \ref vx_node. Any possible errors preventing a +* successful creation should be checked using \ref vxGetStatus. +* \ingroup group_tensor +* \version 0.5 +*/ +VX_API_ENTRY vx_node VX_API_CALL vxTensorTransposeNode2( + vx_graph graph, + vx_tensor inputs, + const vx_nn_transpose_params_t *transpose_params, + vx_size size_of_transpose_param, + vx_tensor outputs); + +/*! \brief [Graph] Creates mean layer node. +* \details +* Computes the mean of elements across dimensions of a tensor. +* +* \param [in] graph The handle to the graph. +* \param [in] input A n-D tensor, specifying the input. +* \param [in] mean_params paraments \ref vx_nn_mean_params_t . +* \param [in] size_of_mean_param [static] The size of the vx_nn_mean_params_t. +* \param [out] output A n-D tensor of the same type as input. +* \return vx_node. +* \returns A node reference \ref vx_node. Any possible errors preventing a +* successful creation should be checked using \ref vxGetStatus. +* \ingroup group_tensor +* \version 0.5 +*/ +VX_API_ENTRY vx_node VX_API_CALL vxTensorMeanNode( + vx_graph graph, + vx_tensor inputs, + const vx_nn_mean_params_t *mean_params, + vx_size size_of_mean_param, + vx_tensor outputs); + +/*! \brief [Graph] Creates sum layer node. +* \details +* Computes the sum of elements across dimensions of a tensor. +* +* \param [in] graph The handle to the graph. +* \param [in] input A n-D tensor, specifying the input. +* \param [in] sum_params paraments \ref vx_nn_sum_params_t . +* \param [in] size_of_sum_param [static] The size of the vx_nn_mean_params_t. +* \param [out] output A n-D tensor of the same type as input. +* \return vx_node. +* \returns A node reference \ref vx_node. Any possible errors preventing a +* successful creation should be checked using \ref vxGetStatus. +* \ingroup group_tensor +* \version 0.5 +*/ +VX_API_ENTRY vx_node VX_API_CALL vxReduceSumNode( + vx_graph graph, + vx_tensor inputs, + const vx_nn_sum_params_t *sum_params, + vx_size size_of_sum_param, + vx_tensor outputs); + +/*! \brief [Graph] Creates squeeze layer node. +* \details +* Remove dimensions of size 1 from the input tensor. +* +* \param [in] graph The handle to the graph. +* \param [in] input A n-D tensor, specifying the tensor to be squeezed. +* \param [in] squeeze_params paraments \ref vx_nn_squeeze_params_t . +* \param [in] size_of_squeeze_param [static] The size of the vx_nn_squeeze_params_t. +* \param [out] output A n-D tensor of the same type as input. Contains the same data as input, +* but has one or more dimensions of size 1 removed. +* \return vx_node. +* \returns A node reference \ref vx_node. Any possible errors preventing a +* successful creation should be checked using \ref vxGetStatus. +* \ingroup group_tensor +* \version 0.5 +*/ +VX_API_ENTRY vx_node VX_API_CALL vxTensorSqueezeNode( + vx_graph graph, + vx_tensor inputs, + const vx_nn_squeeze_params_t *squeeze_params, + vx_size size_of_squeeze_param, + vx_tensor outputs); + +/*! \brief [Graph] Creates stride slice layer node. +* \details +* Extracts a stride slice of a tensor. +* +* \param [in] graph The handle to the graph. +* \param [in] input A n-D tensor, specifying the tensor to be sliced. +* \param [in] stride_slice_params paraments \ref vx_nn_stride_slice_params_t . +* \param [in] size_of_stride_slice_param [static] The size of the vx_nn_stride_slice_params_t. +* \param [out] output A n-D tensor of the same type as input. +* \return vx_node. +* \returns A node reference \ref vx_node. Any possible errors preventing a +* successful creation should be checked using \ref vxGetStatus. +* \ingroup group_tensor +* \version 0.5 +*/ +VX_API_ENTRY vx_node VX_API_CALL vxTensorStrideSliceNode( + vx_graph graph, + vx_tensor inputs, + const vx_nn_stride_slice_params_t *stride_slice_params, + vx_size size_of_stride_slice_param, + vx_tensor outputs); + +/*! \brief Input parameters for query hardware caps. + * \ingroup group_context + */ +typedef struct _vx_hardware_caps_params_t +{ + vx_uint32 ecoID; /*!< \brief hardware eco ID.*/ + vx_uint32 customerID; /*!< \brief hardware custmoer ID. ecoID and custmomerID can identify a unique hardware.*/ + vx_bool evis1; /*!< \brief evs1 If true, hardware support evis1.*/ + vx_bool evis2; /*!< \brief evs2 If true, hardware support evis2.*/ +} vx_hardware_caps_params_t; + +/*! \brief Input parameters for query hardware caps. + * \ingroup group_context + */ +typedef struct _vx_hardware_caps_params_ext_t +{ + vx_hardware_caps_params_t base; + vx_uint32 subGroupSize; /*!< \brief shader sub-group size.*/ + vx_bool supportVA40; /*!< \brief support 40bit virtual address.*/ + vx_uint32 supportStreamProcessor; /*!< \brief support stream processor.*/ +} vx_hardware_caps_params_ext_t; + +typedef struct _vx_hardware_caps_params_ext2_t +{ + vx_hardware_caps_params_ext_t base; + vx_uint32 streamProcessorExecCount; /*!< \brief stream processor execution count. */ + vx_uint32 streamProcessorVectorSize; /*!< \brief stream processor vector size. */ +} vx_hardware_caps_params_ext2_t; + +/*! \brief Queries hardware caps information. + * \param [in] context The reference to the context. + * \param [in] hardware_caps_params \ref vx_hardware_caps_params_t . + * \param [in] size_of_hardware_caps_param [static] Size in bytes of hardware_caps_params. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; any other value indicates failure. + * \retval VX_ERROR_INVALID_REFERENCE context is not a valid \ref vx_context reference. + * \retval VX_ERROR_INVALID_PARAMETERS If any of the other parameters are incorrect. + * \ingroup group_context + */ +VX_API_ENTRY vx_status VX_API_CALL vxQueryHardwareCaps( + vx_context context, + const vx_hardware_caps_params_t * hardware_caps_params, + vx_size size_of_hardware_caps_param + ); + +/*! \brief [Graph] Creates a Convolutional-3d Network Convolution Layer Node. + * \details This function implement Convolutional-3d Network Convolution layer. + * For fixed-point data types, a fixed point calculation is performed with round and saturate according to the number of accumulator bits. The number of the accumulator bits are implementation defined, + * and should be at least 16.\n + * round: rounding according the vx_round_policy_e enumeration. \n + * saturate: A saturation according the vx_convert_policy_e enumeration. + * \param [in] graph The handle to the graph. + * \param [in] inputs The input tensor data. 4 lower dimensions represent a single input, all following dimensions represent number of batches, possibly nested. + * The dimension order is [width, height, depth, #IFM, #batches].\n + * \param [in] weights [*static] Weights are 5d tensor with dimensions [kernel_x, kernel_y, kernel_d, #IFM, #OFM]. + * see \ref vxCreateTensor2 and \ref vxCreateVirtualTensor2 \n Weights data type must match the data type of the inputs. (Kernel parameter #1) + * \param [in] biases [*static] Optional, ignored if NULL. The biases, which may be shared (one per ofm) or unshared (one per ofm * output location). The possible layouts are + * either [#OFM] or [width, height, #OFM]. Biases data type must match the data type of the inputs. + * \param [in] convolution_params [static] Pointer to parameters of type \ref vx_nn_convolution_3d_params_t. + * \param [in] size_of_convolution_params [static] Size in bytes of convolution_params. Note that this parameter is not counted as one of the kernel parameters. + * \param [out] outputs The output tensor data. Output will have the same number and structure of dimensions as input. Output tensor data type must be same as the inputs. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + */ +VX_API_ENTRY vx_node VX_API_CALL vxConv3dLayer(vx_graph graph, vx_tensor inputs, vx_tensor weights, vx_tensor biases, const vx_nn_convolution_3d_params_t *convolution_params, vx_size size_of_convolution_params, vx_tensor outputs); + +/*! \brief [Graph] Creates a Convolutional Network Deconvolution3d Layer Node. + * \details Deconvolution denote a sort of reverse convolution, which importantly and confusingly is not actually a proper mathematical deconvolution. + * Convolutional Network Deconvolution is up-sampling of an image by learned Deconvolution coefficients. + * The operation is similar to convolution but can be implemented by up-sampling the inputs with zeros insertions between the inputs, + * and convolving the Deconvolution kernels on the up-sampled result. + * For fixed-point data types, a fixed point calculation is performed with round and saturate according to the number of accumulator bits. The number of the accumulator bits are implementation defined, + * and should be at least 16.\n + * round: rounding according the vx_round_policy_e enumeration. \n + * saturate: A saturation according the vx_convert_policy_e enumeration. + * The following equation is implemented: \n + * \f$ outputs[j,k,i] = saturate(round(\sum_{l} \sum_{m,n}(inputs_{upscaled}[j+m,k+n,l] \times weights[m,n,l,i])+biasses[j,k,i])) \f$\n + * Where \f$m,n\f$ are indexes on the convolution matrices. \f$ l\f$ is an index on all the convolutions per input.\f$ i\f$ is an index per output. + * \f$ j,k \f$ are the inputs/outputs spatial indexes. + * Deconvolution is done on the width and height dimensions of the \ref vx_tensor. Therefore, we use here the term x for the width dimension and y for the height dimension.\n + * before the Deconvolution is done, up-scaling the width and height dimensions with zeros is performed. + * The relation between input to output is as follows: \n + * \f$ width_{output} = (width_{input} -1) * upscale_x - 2 * padding_x + kernel_x + a_x \f$\n + * and \n + * \f$ height_{output} = (height_{input} - 1) * upscale_y - 2 * padding_y + kernel_y + a_y \f$\n + * \f$ depth_{output} = (depth_{input} - 1) * upscale_d - 2 * padding_d + kernel_d + a_d \f$\n + * where + * \f$width_{input}\f$ is the size of the input width dimension. + * \f$height_{input}\f$ is the size of the input height dimension. + * \f$depth_{input}\f$ is the size of the input depth dimension. + * + * \f$width_{output}\f$ is the size of the output width dimension. + * \f$height_{output}\f$ is the size of the output height dimension. + * \f$depth_{output}\f$ is the size of the output depth dimension. + * + * \f$kernel_x\f$, \f$kernel_y\f$ and \f$kernel_d\f$ are the deconvolutioned sizes in width, height and depth. + * \f$a_x\f$ and \f$a_y\f$ are user-specified quantity used to distinguish between the \f$upscale_x\f$ and \f$upscale_y\f$ different possible output sizes. + * \f$upscale_x\f$, \f$upscale_y\f$ and \f$upscale_d\f$ are calculated by the relation between input and output. + * \f$a_x\f$ and \f$a_y\f$ must be positive and smaller then \f$upscale_x\f$ and \f$upscale_y\f$ respectively. + * Since the padding parameter is on the output. The effective input padding is: \n + * \f$ padding_{input_x} = kernel_x -padding_x -1\f$ \n + * \f$ padding_{input_y} = kernel_y -padding_y -1\f$ \n + * \f$ padding_{input_d} = kernel_d -padding_d -1\f$ \n + * Therfore the following constarints apply : + * \f$kernel_x >= padding_x - 1\f$, + * \f$kernel_y >= padding_y - 1\f$. + * \f$kernel_d >= padding_d - 1\f$. + * rounding is done according to \ref vx_nn_rounding_type_e. + * Notice that this node creation function has more parameters than the corresponding kernel. Numbering of kernel parameters (required if you create this node using the generic interface) is explicitly specified here. + * \param [in] graph The handle to the graph. + * \param [in] inputs The input tensor. 4 lower dimensions represent a single input, and an optional 5th dimension for batch of inputs. Dimension layout is [width, height, depth, #IFM, #batches]. + * See \ref vxCreateTensor2 and \ref vxCreateVirtualTensor2. + * Implementations must support input tensor data types indicated by the extension strings 'KHR_NN_8' or 'KHR_NN_8 KHR_NN_16'. (Kernel parameter #0) + * \param [in] weights [static] The 5d weights with dimensions [width, height, depth, #IFM, #OFM]. See \ref vxCreateTensor2 and \ref vxCreateVirtualTensor2. (Kernel parameter #1) + * \param [in] biases [static] Optional, ignored if NULL. The biases have one dimension [#OFM]. Implementations must support input tensor data type same as the inputs. (Kernel parameter #2) + * \param [in] deconvolution_params [static] Pointer to parameters of type \ref vx_nn_deconvolution_params_t (Kernel parameter #3) + * \param [in] size_of_deconv_params [static] Size in bytes of deconvolution_params. Note that this parameter is not counted as one of the kernel parameters. + * \param [out] outputs The output tensor. The output has the same number of dimensions as the input. (Kernel parameter #4) + * \ingroup group_cnn + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + */ +VX_API_ENTRY vx_node VX_API_CALL vxDeconv3dLayer(vx_graph graph, vx_tensor inputs, vx_tensor weights, vx_tensor biases, const vx_nn_deconvolution_3d_params_t *convolution_params, vx_size size_of_deconv_params, vx_tensor outputs); + +/*! \brief [Graph] Creates a layer Normalization Node. + * \details Normalize the activations of the previous layer at each batch, i.e. applies a transformation that maintains the mean activation close to 0 and the activation standard deviation close to 1. + * \param [in] graph The handle to the graph. + * \param [in] eps [static] Float 32. Small value to add to the variance estimate so that we don't divide by zero.(default is 1e-5) + * \param [in] axis [static] The axis on which we need do normalize. + * \param [in] input_list [static] The input tensor data. + * \param [in] input_count [static] The input tensor number. + * \param [out] output [static] The output tensor data. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + */ +VX_API_ENTRY vx_node VX_API_CALL vxLayerNormalizationLayer( + vx_graph graph, + vx_float32 eps, + vx_int32 axis, + vx_tensor* input_list, + vx_uint32 input_count, + vx_tensor output + ); + +/*! \brief [Graph] Creates a layer instance normalization Node. + * \details Normalize the activations of the previous layer at each batch, i.e. applies a transformation that maintains the mean activation close to 0 and the activation standard deviation close to 1. + * \param [in] graph The handle to the graph. + * \param [in] eps [static] Float 32. Small value to add to the variance estimate so that we don't divide by zero.(default is 1e-5) + * \param [in] input_list [static] The input tensor data. + * \param [in] input_count [static] The input tensor number. + * \param [out] output [static] The output tensor data. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + */ +VX_API_ENTRY vx_node VX_API_CALL vxInstanceNormalizationLayer( + vx_graph graph, + vx_float32 eps, + vx_tensor* input_list, + vx_uint32 input_count, + vx_tensor output + ); + +/*! \brief [Graph] Creates a layer instance normalization Node. + * \details Normalize the activations of the previous layer at each batch, i.e. applies a transformation that maintains the mean activation close to 0 and the activation standard deviation close to 1. + * \param [in] graph The handle to the graph. + * \param [in] eps [static] Float 32. Small value to add to the variance estimate so that we don't divide by zero.(default is 1e-5) + * \param [in] group_num [static] Int 32. Number of groups for GN + * \param [in] input_list [static] The input tensor data. + * \param [in] input_count [static] The input tensor number. + * \param [out] output [static] The output tensor data. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + */ +VX_API_ENTRY vx_node VX_API_CALL vxGroupNormalizationLayer( + vx_graph graph, + vx_float32 eps, + vx_int32 group_num, + vx_tensor* input_list, + vx_uint32 input_count, + vx_tensor output + ); + +/*! \brief [Graph] Creates a layer logical ops Node. + * \details Return the truth value of x AND, XOR,OR y element-wise. + * \param [in] graph The handle to the graph. + * \param [in] ops_type [static] Int 32. Operation Type + * \param [in] input_list [static] The input tensor data. + * \param [in] input_count [static] The input tensor number. + * \param [out] output [static] The output tensor data. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + */ +VX_API_ENTRY vx_node VX_API_CALL vxLogicalOpsLayer( + vx_graph graph, + vx_int32 ops_type, + vx_tensor* input_list, + vx_uint32 input_count, + vx_tensor output + ); + +/*! \brief [Graph] Creates a layer logical not Node. + * \details Return the truth value of not x element-wise. + * \param [in] graph The handle to the graph. + * \param [in] input [static] The input tensor data. + * \param [out] output [static] The output tensor data. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + */ +VX_API_ENTRY vx_node VX_API_CALL vxLogicalNotLayer( + vx_graph graph, + vx_tensor input, + vx_tensor output + ); + +/*! \brief [Graph] Creates a layer relational Node. + * \param [in] graph The handle to the graph. + * \param [in] ops_type [static] Int 32. Operation Type + * \param [in] input_list [static] The input tensor data. + * \param [in] input_count [static] The input tensor number. + * \param [out] output [static] The output tensor data. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + */ +VX_API_ENTRY vx_node VX_API_CALL vxRelationalLayer( + vx_graph graph, + vx_int32 ops_type, + vx_tensor* input_list, + vx_uint32 input_count, + vx_tensor output + ); + +/*! \brief [Graph] Computes the max of elements across dimensions of input tensor. +* \param [in] graph The handle to the graph. +* \param [in] in input tensor data, +* \param [in] axis [static] used to determine max across which dimension(dimension 0 means width, etc). If not given, compute the sum across all dimensions. +* \param [in] keep_dim [static] means if keep the dimesion count. +* \param [out] out output tensor data. +* \ingroup group_tensor +* \return vx_node. +* \retval 0 Node could not be created. +* \retval * Node handle. +* \version 0.3 +*/ +VX_API_ENTRY vx_node VX_API_CALL vxTensorReduceMaxNode( + vx_graph graph, + vx_tensor inputs, + vx_tensor axis, + vx_bool keep_dims, + vx_tensor outputs); + +/*! \brief [Graph] Creates a layer minumum Node. + * \param [in] graph The handle to the graph. + * \param [in] input_list [static] The input tensor data. + * \param [in] input_count [static] The input tensor number. + * \param [out] output [static] The output tensor data. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + */ +VX_API_ENTRY vx_node VX_API_CALL vxMinimumLayer( + vx_graph graph, + vx_tensor* input_list, + vx_uint32 input_count, + vx_tensor output + ); + +/*! \brief [Graph] Creates a layer maximum Node. + * \param [in] graph The handle to the graph. + * \param [in] input_list [static] The input tensor data. + * \param [in] input_count [static] The input tensor number. + * \param [out] output [static] The output tensor data. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + */ +VX_API_ENTRY vx_node VX_API_CALL vxMaximumLayer( + vx_graph graph, + vx_tensor* input_list, + vx_uint32 input_count, + vx_tensor output + ); + +/*! \brief [Graph] Creates a layer select Node. + * \param [in] graph The handle to the graph. + * \param [in] input_list [static] The input tensor data. + * \param [in] input_count [static] The input tensor number. + * \param [out] output [static] The output tensor data. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + */ +VX_API_ENTRY vx_node VX_API_CALL vxTensorSelectLayer( + vx_graph graph, + vx_tensor* input_list, + vx_uint32 input_count, + vx_tensor output + ); + +/*! \brief [Graph] Creates a layer gru cell activation z h Node. + * \param [in] graph The handle to the graph. + * \param [in] input_list [static] The input tensor data. + * \param [in] input_count [static] The input tensor number. + * \param [in] recurrent_activation [static] recurrent activation type. + * \param [in] activation [static] activation type. + * \param [out] output_list [static] The output tensor data. + * \param [out] output_count [static] The output tensor number. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + */ +VX_API_ENTRY vx_node VX_API_CALL vxGruCellActivationZHLayer( + vx_graph graph, + vx_tensor* input_list, + vx_uint32 input_count, + vx_int32 recurrent_activation, + vx_int32 activation, + vx_tensor* output_list, + vx_uint32 output_count + ); + +/*! \brief [Graph] Creates a layer gru cell h times activation r Node. + * \param [in] graph The handle to the graph. + * \param [in] input_list [static] The input tensor data. + * \param [in] input_count [static] The input tensor number. + * \param [in] recurrent_activation [static] recurrent activation type. + * \param [out] output_list [static] The output tensor data. + * \param [out] output_count [static] The output tensor number. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + */ +VX_API_ENTRY vx_node VX_API_CALL vxGruCellHTimeActivationRLayer( + vx_graph graph, + vx_tensor* input_list, + vx_uint32 input_count, + vx_int32 recurrent_activation, + vx_tensor* output_list, + vx_uint32 output_count + ); + +/*! \brief [Graph] Creates a layer gru cell reset after activationNode. + * \param [in] graph The handle to the graph. + * \param [in] input_list [static] The input tensor data. + * \param [in] input_count [static] The input tensor number. + * \param [in] recurrent_activation [static] recurrent activation type. + * \param [in] activation [static] activation type. + * \param [out] output_list [static] The output tensor data. + * \param [out] output_count [static] The output tensor number. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + */ +VX_API_ENTRY vx_node VX_API_CALL vxGruCellResetAfterActivationLayer( + vx_graph graph, + vx_tensor* input_list, + vx_uint32 input_count, + vx_int32 recurrent_activation, + vx_int32 activation, + vx_tensor* output_list, + vx_uint32 output_count + ); + +/*! \brief [Graph] Creates a layer lstm activation Node. + * \param [in] graph The handle to the graph. + * \param [in] input_list [static] The input tensor data. + * \param [in] input_count [static] The input tensor number. + * \param [in] lstm_activation_param \ref vx_nn_lstm_activation_params_t . + * \param [out] output_list [static] The output tensor data. + * \param [out] output_count [static] The output tensor number. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + */ +VX_API_ENTRY vx_node VX_API_CALL vxLSTMActivationLayer( + vx_graph graph, + vx_tensor* input_list, + vx_uint32 input_count, + const vx_nn_lstm_activation_params_t * lstm_activation_param, + vx_tensor* output_list, + vx_uint32 output_count + ); +#ifdef __cplusplus +} +#endif + + +#endif diff --git a/unified-tina/inc/VX/vx_khr_nn_internal.h b/unified-tina/inc/VX/vx_khr_nn_internal.h new file mode 100644 index 0000000..1929271 --- /dev/null +++ b/unified-tina/inc/VX/vx_khr_nn_internal.h @@ -0,0 +1,1221 @@ +/* + + * Copyright (c) 2012-2017 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _VX_KHR_NN_INTERNAL_H_ +#define _VX_KHR_NN_INTERNAL_H_ + +/*! + * \file + * \brief The Khronos Extension for Deep Convolutional Networks Functions. + * + * \defgroup group_cnn Extension: Deep Convolutional Networks API + * \brief Convolutional Network Nodes. + */ + +#define OPENVX_KHR_NN_INTERNAL "vx_khr_nn_internal" + +#include + + +#ifdef __cplusplus +extern "C" { +#endif + +/*TODO: check it for OpenVX 1.2*/ +//#if defined(OPENVX_CNN_1_0) +//#undef OPENVX_CNN_1_1 +//#endif + +/*! \brief [Graph] Creates a Convolutional Network Convolution and Activation(Relu) and pooling Layer Node. +* \details This function implement Convolutional Network Convolution and Activation(Relu) and pooling layer. +* \param [in] graph The handle to the graph. +* \param [in] inputs The input tensor data. 3 lower dimensions represent a single input, all following dimensions represent number of batches, possibly nested. +* The dimension order is [width, height, #IFM, #batches]. \n +* \param [in] weights_biases [static] Point to WeightBiasesParameter data, vx_weights_biases_parameter is an opaque reference.\n +* \param [in] pad_x [static] Number of elements added at each side in the x dimension of the input. +* \param [in] pad_y [static] Number of elements added at each side in the y dimension of the input. In fully connected layers this input is ignored. +* \param [in] accumulator_bits [static] Is the total number of bits used during intermediate accumulation. +* \param [in] overflow_policy [static] A VX_TYPE_ENUM of the vx_convert_policy_e enumeration. +* \param [in] rounding_policy [static] A VX_TYPE_ENUM of the vx_round_policy_e enumeration. +* \param [in] down_scale_size_rounding [static] Rounding method for calculating output dimensions. See \ref vx_convolutional_network_rounding_type_e +* \param [in] enable_relu [static] If true, enable vxActivationLayer's relu function +* \param [in] pool_type [static] if neither max pooling nor average pooling, disable pooling function. (see \ref vx_convolutional_network_pooling_type_e). +* \param [in] pool_size_x [static] Size of the pooling region in the x dimension +* \param [in] pool_size_y [static] Size of the pooling region in the y dimension. +* \param [out] outputs The output tensor data. Output will have the same number and structure of dimensions as input. +* \return vx_node. +* \retval 0 Node could not be created. +* \retval * Node handle. +* \ingroup group_cnn +*/ +VX_API_ENTRY vx_node VX_API_CALL vxConvolutionReluPoolingLayer( + vx_graph graph, + vx_tensor inputs, + vx_weights_biases_parameter weights_biases, + vx_uint32 pad_x, + vx_uint32 pad_y, + vx_uint8 accumulator_bits, + vx_enum overflow_policy, + vx_enum rounding_policy, + vx_enum down_scale_size_rounding, + vx_bool enable_relu, + vx_enum pool_type, + vx_uint32 pool_size_x, + vx_uint32 pool_size_y, + vx_tensor outputs + ); + +/*! \brief [Graph] Creates a Convolutional Network Convolution and Activation(Relu) Layer Node. +* \details This function implement Convolutional Network Convolution and Activation(Relu) layer. +* \param [in] graph The handle to the graph. +* \param [in] inputs The input tensor data. 3 lower dimensions represent a single input, all following dimensions represent number of batches, possibly nested. + * The dimension order is [width, height, #IFM, #batches]. \n +* \param [in] weights_biases [static] Point to WeightBiasesParameter data, vx_weights_biases_parameter is an opaque reference. +* \param [in] pad_x [static] Number of elements added at each side in the x dimension of the input. +* \param [in] pad_y [static] Number of elements added at each side in the y dimension of the input. In fully connected layers this input is ignored. +* \param [in] accumulator_bits [static] Is the total number of bits used during intermediate accumulation. +* \param [in] overflow_policy [static] A VX_TYPE_ENUM of the vx_convert_policy_e enumeration. +* \param [in] rounding_policy [static] A VX_TYPE_ENUM of the vx_round_policy_e enumeration. +* \param [in] down_scale_size_rounding [static] Rounding method for calculating output dimensions. See \ref vx_convolutional_network_rounding_type_e +* \param [in] enable_relu [static] If true, enable vxActivationLayer's relu function. +* \param [out] outputs The output tensor data. Output will have the same number and structure of dimensions as input. +* \return vx_node. +* \retval 0 Node could not be created. +* \retval * Node handle. +* \ingroup group_cnn +*/ + +VX_API_ENTRY vx_node VX_API_CALL vxConvolutionReluLayer( + vx_graph graph, + vx_tensor inputs, + vx_weights_biases_parameter weights_biases, + vx_uint32 pad_x, + vx_uint32 pad_y, + vx_uint8 accumulator_bits, + vx_enum overflow_policy, + vx_enum rounding_policy, + vx_enum down_scale_size_rounding, + vx_bool enable_relu, + vx_tensor outputs + ); + +/*! \brief [Graph] Creates a Fully connected and Activation(Relu) Convolutional Network Layer Node. +* \details This function implement Fully connected and Activation(Relu) Convolutional Network layers. +* \param [in] graph The handle to the graph. +* \param [in] inputs The input tensor data. There two possible input layouts: +* 1. [#IFM, #batches]. See \ref vxCreateTensor and \ref vxCreateVirtualTensor. +* 2. [width, height, #IFM, #batches]. See \ref vxCreateTensor and \ref vxCreateVirtualTensor\n +* In both cases number of batches are optional and may be multidimensional. +* The second option is a special case to deal with convolution layer followed by fully connected. +* The dimension order is [#IFM, #batches]. See \ref vxCreateTensor and \ref vxCreateVirtualTensor. Note that batch may be multidimensional. +* \param [in] weights_biases [static] Point to WeightBiasesParameter data, vx_weights_biases_parameter is an opaque reference.\n +* \param [in] pad [static] Number of elements added at each side in the input. +* \param [in] accumulator_bits [static] Is the total number of bits used during intermediate accumulation. +* \param [in] overflow_policy [static] A VX_TYPE_ENUM of the vx_convert_policy_e enumeration. +* \param [in] rounding_policy [static] A VX_TYPE_ENUM of the vx_round_policy_e enumeration. +* \param [in] down_scale_size_rounding [static] Rounding method for calculating output dimensions. See \ref vx_convolutional_network_rounding_type_e +* \param [in] enable_relu [static] If true, enable vxActivationLayer's relu function. +* \param [out] outputs The output tensor data. Output dimension layout is [#OFM,#batches]. See \ref vxCreateTensor and \ref vxCreateVirtualTensor, where #batches may be multidimensional. +* \return vx_node. +* \retval 0 Node could not be created. +* \retval * Node handle. +* \ingroup group_cnn +*/ +VX_API_ENTRY vx_node VX_API_CALL vxFullyConnectedReluLayer( + vx_graph graph, + vx_tensor inputs, + vx_weights_biases_parameter weights_biases, + vx_uint32 pad, + vx_uint8 accumulator_bits, + vx_enum overflow_policy, + vx_enum rounding_policy, + vx_enum down_scale_size_rounding, + vx_bool enable_relu, + vx_tensor outputs + ); + +/*! \brief Input parameter for convolutionReluPooling2 + * \ingroup group_cnn + */ +typedef struct _vx_nn_convolution_relu_pooling_params_t +{ + vx_size dilation_x; /*!< \brief "inflate" the kernel by inserting zeros between the kernel elements in the x direction. + The value is the number of zeros to insert. */ + vx_size dilation_y; /*!< \brief "inflate" the kernel by inserting zeros between the kernel elements in the y direction. + The value is the number of zeros to insert. */ + vx_uint32 pad_x_left; /*!< \brief Number of elements added at each side in the left of x dimension of the input. */ + vx_uint32 pad_x_right; /*!< \brief Number of elements added at each side in the right of x dimension of the input. */ + vx_uint32 pad_y_top; /*!< \brief Number of elements added at each side in the top of y dimension of the input. */ + vx_uint32 pad_y_bottom; /*!< \brief Number of elements added at each side in the bottom of y dimension of the input. */ + vx_uint8 accumulator_bits; /*!< \brief Is the total number of bits used during intermediate accumulation. */ + vx_enum overflow_policy; /*!< \brief A VX_TYPE_ENUM of the vx_convert_policy_e enumeration. */ + vx_enum rounding_policy; /*!< \brief A VX_TYPE_ENUM of the vx_round_policy_e enumeration. */ + vx_enum down_scale_size_rounding; /*!< \brief Rounding method for calculating output dimensions. See vx_convolutional_network_rounding_type_e */ + vx_bool enable_relu; /*!< \brief Enable Relu layer function or not. */ + vx_enum pool_type; /*!< \brief neither max pooling nor average pooling, disable pooling function (see vx_convolutional_network_pooling_type_e). */ + vx_uint32 pool_size_x; /*!< \brief Size of the pooling region in the x dimension */ + vx_uint32 pool_size_y; /*!< \brief Size of the pooling region in the y dimension. */ + vx_enum pad_mode; /*!< \brief A VX_TYPE_ENUM of the \ref vx_pad_mode_e enumeration. */ + vx_scalar pad_const; /*!< \brief The order const value if setting pad mode to const, the const value is base value, not quantized value. */ +} vx_nn_convolution_relu_pooling_params_t, * vx_nn_convolution_relu_pooling_params; + +/*! \brief Extended input parameter for a convolutionReluPooling2 operation. + * \ingroup group_cnn + *\version 0.3 + */ +typedef struct _vx_nn_convolution_relu_pooling_params_ext_t +{ + vx_nn_convolution_relu_pooling_params_t base; /*!< \brief convolution relu pooling params \ref vx_nn_convolution_relu_pooling_params_t */ + vx_uint32 stride_x; /*!< \brief skip x jump for down scale. */ + vx_uint32 stride_y; /*!< \brief skip y jump for down scale. */ +} vx_nn_convolution_relu_pooling_params_ext_t, * vx_nn_convolution_relu_pooling_params_ext; + +/*! \brief The 2nd version of extended input parameter for a convolutionReluPooling2 operation. + *\ingroup group_cnn + *\version 0.4 + */ +typedef struct _vx_nn_convolution_relu_pooling_params_ext2_t +{ + vx_nn_convolution_relu_pooling_params_ext_t ext; /*!< \brief convolution relu pooling params \ref vx_nn_convolution_relu_pooling_params__ext_t */ + vx_int32 depth_multiplier; /*!< \brief specifying the depthwise multiplier for depthwise convolution. */ + vx_enum src_rank_mode; /*!< \brief source rank mode A VX_TYPE_ENUM of the \ref vx_tensor_rank_type_e enumeration. */ + vx_enum convert_dst_format; /*!< \brief The convert target format. */ +} vx_nn_convolution_relu_pooling_params_ext2_t, * vx_nn_convolution_relu_pooling_params_ext2; + +#define MERGED_NODE_COUNT_MAX 4 + +typedef struct _vx_nn_convolution_relu_pooling_params_ext3_t +{ + vx_nn_convolution_relu_pooling_params_ext2_t ext2; /*!< \brief convolution relu pooling params \ref vx_nn_convolution_relu_pooling_params__ext_t */ + vx_uint32 mergedNodeCount; + vx_float32* interScale; /*!< \brief specifying the depthwise multiplier for depthwise convolution. */ + vx_int32* interZeroPoint; + vx_enum* interDataType; +} vx_nn_convolution_relu_pooling_params_ext3_t, * vx_nn_convolution_relu_pooling_params_ext3; + +typedef struct _vx_nn_convolution_relu_pooling_params_ext4_t +{ + vx_nn_convolution_relu_pooling_params_ext3_t ext3; /*!< \brief convolution relu pooling params \ref vx_nn_convolution_relu_pooling_params__ext_t */ + vx_uint32 poolingStrideX; + vx_uint32 poolingStrideY; + vx_uint32 poolingPadLeft; + vx_uint32 poolingPadRight; + vx_uint32 poolingPadTop; + vx_uint32 poolingPadBottom; + vx_bool enable_nn_tensor_add_relu; /*!< \brief Enable Relu function after tensor add. */ +} vx_nn_convolution_relu_pooling_params_ext4_t, * vx_nn_convolution_relu_pooling_params_ext4; + +typedef struct _vx_nn_convolution_relu_pooling_params_ext5_t +{ + vx_nn_convolution_relu_pooling_params_ext4_t ext4; /*!< \brief convolution relu pooling params \ref vx_nn_convolution_relu_pooling_params_ext_t */ + + vx_object_array inputs_list; + vx_object_array outputs_list; + vx_spinst spinst_obj; +} vx_nn_convolution_relu_pooling_params_ext5_t, * vx_nn_convolution_relu_pooling_params_ext5; + +typedef struct _vx_nn_convolution_relu_pooling_params_ext6_t +{ + vx_nn_convolution_relu_pooling_params_ext5_t ext5; /*!< \brief convolution relu pooling params \ref vx_nn_convolution_relu_pooling_params_ext_t */ + vx_uint32 depth2space_block_x; /*!< \brief hw limitation: value between 2 and 16. 2, 16 included. */ + vx_uint32 depth2space_block_y; /*!< \brief hw limitation: equals value of depth2space_block_x. */ + +} vx_nn_convolution_relu_pooling_params_ext6_t, * vx_nn_convolution_relu_pooling_params_ext6;; + +typedef struct _vx_nn_convolution_relu_pooling_params_ext7_t +{ + vx_nn_convolution_relu_pooling_params_ext6_t ext6; /*!< \brief convolution relu pooling params \ref vx_nn_convolution_relu_pooling_params_ext_t */ + vx_bool isSub; +} vx_nn_convolution_relu_pooling_params_ext7_t, * vx_nn_convolution_relu_pooling_params_ext7; + +typedef struct _vx_nn_fused_sp_params_t +{ + vx_enum multi_sp_kernel_type; + /*!*/ + vx_scalar mul_scale; + /*!*/ + union + { + struct + { + vx_scalar linear_a, linear_b; + } linear; + struct + { + vx_scalar tanh_a, tanh_b; + float a_v, b_v; + } tanh_linear; + struct + { + vx_scalar hsigmoid_a, hsigmoid_b; + } hsigmoid; + struct + { + vx_scalar clip_a, clip_b; + } clip; + struct + { + vx_scalar scalar_a, scalar_b, scalar_c, scalar_d; + } params; + } scalar_params; + /*!*/ +} vx_nn_fused_sp_params_t, * vx_nn_fused_sp_params; + +typedef struct _vx_nn_convolution_relu_pooling_params_sp_ext_t +{ + vx_nn_convolution_relu_pooling_params_ext4_t ext4; /*!< \brief convolution relu pooling params \ref vx_nn_convolution_relu_pooling_params_ext_t */ + vx_object_array inputs_list; + vx_object_array outputs_list; + vx_nn_fused_sp_params_t sp_param; + +} vx_nn_convolution_relu_pooling_params_sp_ext_t, * vx_nn_convolution_relu_pooling_params_sp_ext; + +/*! \brief [Graph] Creates a Convolutional Network Convolution and Activation(Relu) and Pooling Layer Node, this fucntion match kronos NN Extension 1.2 verion. + * \details This function implement Convolutional Network Convolution and Activation(Relu) and Pooling layer. + * For fixed-point data types, a fixed point calculation is performed with round and saturate according to the number of accumulator bits. The number of the accumulator bits are implementation defined, + * and should be at least 16.\n + * round: rounding according the vx_round_policy_e enumeration. \n + * saturate: A saturation according the vx_convert_policy_e enumeration. + * The following equation is implemented: \n + * \f$ outputs[j,k,i] = saturate(round(\sum_{l} (\sum_{m,n} inputs[j-m,k-n,l] \times weights[m,n,l,i])+biasses[j,k,i])) \f$\n + * Where \f$m,n\f$ are indexes on the convolution matrices. \f$ l\f$ is an index on all the convolutions per input.\f$ i\f$ is an index per output. + * \f$ j,k \f$ are the inputs/outputs spatial indexes. + * Convolution is done on the width and height dimensions of the \ref vx_tensor. Therefore, we use here the term x for index along the width dimension and y for index along the height dimension.\n + * before the Convolution is done, a padding with zeros of the width and height input dimensions is performed. + * Then down scale is done by picking the results according to a skip jump. The skip in the x and y is determined by the output size dimensions. + * The relation between input to output is as follows: \n + * \f$ width_{output} = round(\frac{(width_{input} + paddingleft_x + paddingright_x - kernel_x - (kernel_x -1) * dilation_x)}{skip_x} + 1) \f$\n + * and \n + * \f$ height_{output} = round(\frac{(height + paddingtop_y + paddingbottom_y - kernel_y - (kernel_y -1) * dilation_y)}{skip_y} + 1) \f$\n + * where \f$width\f$ is the size of the input width dimension. \f$height\f$ is the size of the input height dimension. + * \f$width_{output}\f$ is the size of the output width dimension. \f$height_{output}\f$ is the size of the output height dimension. + * \f$kernel_x\f$ and \f$kernel_y\f$ are the convolution sizes in width and height dimensions. + * skip is calculated by the relation between input and output. + * rounding is done according to \ref vx_convolutional_network_rounding_type_e. + * \param [in] graph The handle to the graph. + * \param [in] inputs The input tensor data. 3 lower dimensions represent a single input, all following dimensions represent number of batches, possibly nested. + * The dimension order is [width, height, #IFM, #batches]. \n + * \param [in] weights_biases [static] Point to WeightBiasesParameter data, vx_weights_biases_parameter is an opaque reference. + * \param [in] convolution_relu_pooling_params [static] Pointer to parameters of type \ref vx_nn_convolution_relu_pooling_params_t + * \param [in] size_of_convolution_relu_pooling_params [static] Size in bytes of convolution_relu_pooling_params. + * \param [out] outputs The output tensor data. Output will have the same number and structure of dimensions as input. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + */ +VX_API_ENTRY vx_node VX_API_CALL vxConvolutionReluPoolingLayer2( + vx_graph graph, + vx_tensor inputs, + vx_weights_biases_parameter weights_biases, + const vx_nn_convolution_relu_pooling_params_t * convolution_relu_pooling_params, + vx_size size_of_convolution_relu_pooling_params, + vx_tensor outputs); + +/*! \brief The optimization direvative for weights_biases_parameter create. + * \ingroup group_cnn + */ +typedef struct _vx_weights_biases_parameter_optimizations_t { + vx_int8 zrl; /*!< \brief The zero run length. Set negtive value to disable*/ + vx_enum outputFormat; /*!< \brief The output format. */ + vx_int32 inputZeroPoint; /*!< \brief zero point of input. A 32 bit integer, in range [0, 255], Set zero value to disable */ +} vx_weights_biases_parameter_optimizations_t; + +typedef struct _vx_weights_biases_parameter_optimizations_ext_t { + vx_int8 zrl; /*!< \brief The zero run length. Set negtive value to disable*/ + vx_enum outputFormat; /*!< \brief The output format. */ + vx_int32 inputZeroPoint; /*!< \brief zero point of input. A 32 bit integer, in range [0, 255], Set zero value to disable */ + vx_uint32 num_of_input_dims; /*< \brief The input dimesion number*/ + vx_uint32 num_of_output_dims; /*!< \brief The output dimesion number*/ +} vx_weights_biases_parameter_optimizations_ext_t; + + +typedef struct _vx_weights_biases_parameter_optimizations_ext2_t { + vx_weights_biases_parameter_optimizations_ext_t ext; + vx_float32 inputScale; + vx_float32 outputScale; + vx_enum inputFormat; + vx_int32 output_ZP_dw; /*depthwise conv output ZP*/ + vx_float32 output_scale_dw; /*depthwise conv output scale*/ + vx_int8 output_fpp_dw; /*depthwise conv output fix-point*/ +} vx_weights_biases_parameter_optimizations_ext2_t; + +#if VX_VA40_EXT_SUPPORT +/*! + * \brief Creates a reference to a vx_weights_biases_parameter opaque object. + * + * \param [in] layer_type The network type of objects to hold. Types allowed are: + * \arg VX_CONVOLUTIONAL_NETWORK_CONVOLUTION_LAYER for convolution layer. + * \arg VX_CONVOLUTIONAL_NETWORK_FULLYCONNECTED_LAYER for fullyconnected layer. + * \param [in] num_of_dims The dimention number of input & output image tensor. + * \param [in] inputs_dims The input tensor's dimension size. + * \param [in] pad_x The number of elements subtracted at each side in the x dimension of the input. + * \param [in] pad_y The number of elements subtracted at each side in the y dimension of the input. + * \param [in] pooling_size_x The size of the pooling region in the x dimension, 0 means no pooling operation. + * \param [in] pooling_size_y The size of the pooling region in the y dimension, 0 means no pooling operation. + * \param [in] down_scale_size_rounding A VX_TYPE_ENUM of the vx_round_policy_e enumeration. + * \param [in] convolution_outputs_dims The output's dimension size after covolution operation. + * \param [in] pool_outputs_dims The output's dimension size after pooling operation. + * \param [in] optimizations A optional param for \ref vx_weights_biases_parameter_optimizations_t. + * \param [in] weights The weights tensor which need be compressed. + * \param [in] biases The biases tensor which need be compressed. + * + * \returns An opaque vx_weights_biases_parameter reference with compressed kernel data. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * + * \ingroup group_cnn + */ +VX_API_ENTRY vx_weights_biases_parameter VX_API_CALL +vxCreateWeightsBiasesParameterFromTensors( + vx_enum layer_type, + vx_size num_of_dims, + vx_size * inputs_dims, + vx_uint32 pad_x, + vx_uint32 pad_y, + vx_uint32 pooling_size_x, + vx_uint32 pooling_size_y, + vx_enum down_scale_size_rounding, + vx_size * convolution_outputs_dims, + vx_size * pool_outputs_dims, + vx_weights_biases_parameter_optimizations_t *optimizations, + vx_tensor weights, + vx_tensor biases); + +/*! + * \brief Creates a reference to an opaque vx_weights_biases_parameter object. + * + * \param [in] layer_type The network type of objects to hold. Types allowed are: + * \arg VX_CONVOLUTIONAL_NETWORK_CONVOLUTION_LAYER for convolution layer. + * \arg VX_CONVOLUTIONAL_NETWORK_FULLYCONNECTED_LAYER for fullyconnected layer. + * \param [in] num_of_dims The dimention number of input & output image tensor. + * \param [in] inputs_dims The input tensor's dimension size. + * \param [in] convolution_outputs_dims The output's dimension size after covolution operation. + * \param [in] pool_outputs_dims The output's dimension size after pooling operation. + * \param [in] output_format The output tensor element type. + * \param [in] convolution_relu_pooling_params The convolution_relu_pooling_params Pointer to parameters of type \ref vx_nn_convolution_relu_pooling_params_t + * \param [in] size_of_convolution_relu_pooling_params The size in bytes of convolution_relu_pooling_params. + * \param [in] optimizations A optional param for \ref vx_weights_biases_parameter_optimizations_t. + * \param [in] weights The weights tensor which need be compressed. + * \param [in] biases The biases tensor which need be compressed. + * + * \returns An opaque vx_weights_biases_parameter reference with compressed kernel data. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * + * \ingroup group_cnn + */ +VX_API_ENTRY vx_weights_biases_parameter VX_API_CALL vxCreateWeightsBiasesParameterFromTensors2( + vx_enum layer_type, + vx_size num_of_dims, + vx_size * inputs_dims, + vx_size * convolution_outputs_dims, + vx_size * pool_outputs_dims, + vx_enum output_format, + const vx_nn_convolution_relu_pooling_params convolution_relu_pooling_params, + vx_size size_of_convolution_relu_pooling_params, + vx_weights_biases_parameter_optimizations_t *optimizations, + vx_tensor weights, + vx_tensor biases); + +/*! + * \brief Creates a reference to an opaque vx_weights_biases_parameter object. + * + * \param [in] layer_type The network type of objects to hold. Types allowed are: + * \arg VX_CONVOLUTIONAL_NETWORK_CONVOLUTION_LAYER for convolution layer. + * \arg VX_CONVOLUTIONAL_NETWORK_FULLYCONNECTED_LAYER for fullyconnected layer. + * \param [in] inputs_dims The input tensor's dimension size. + * \param [in] convolution_outputs_dims The output's dimension size after covolution operation. + * \param [in] pool_outputs_dims The output's dimension size after pooling operation. + * \param [in] convolution_relu_pooling_params The convolution_relu_pooling_params Pointer to parameters of type \ref vx_nn_convolution_relu_pooling_params_t + * \param [in] size_of_convolution_relu_pooling_params The size in bytes of convolution_relu_pooling_params. + * \param [in] optimizations A optional param for \ref vx_weights_biases_parameter_optimizations_t. + * \param [in] size_of_optimizations The size in bytes of optimizations. + * \param [in] weights The weights tensor which need be compressed. + * \param [in] biases The biases tensor which need be compressed. + * + * \returns An opaque vx_weights_biases_parameter reference with compressed kernel data. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * + * \ingroup group_cnn + */ +VX_API_ENTRY vx_weights_biases_parameter VX_API_CALL vxCreateWeightsBiasesParameterFromTensors3( + vx_enum layer_type, + vx_size * inputs_dims, + vx_size * convolution_outputs_dims, + vx_size * pool_outputs_dims, + const vx_nn_convolution_relu_pooling_params convolution_relu_pooling_params, + vx_size size_of_convolution_relu_pooling_params, + vx_weights_biases_parameter_optimizations_t *optimizations, + vx_size size_of_optimizations, + vx_tensor weights, + vx_tensor biases); + +/*! + * \brief Creates a reference to an vx_weights_biases_parameter object. + * \param [in] context The OpenVX context object. + * \param [in] layer_type The network type of objects to hold. Types allowed are: + * \arg VX_CONVOLUTIONAL_NETWORK_CONVOLUTION_LAYER for convolution layer. + * \arg VX_CONVOLUTIONAL_NETWORK_FULLYCONNECTED_LAYER for fullyconnected layer. + * \param [in] num_of_dims The dimention number of input & output image tensor. + * \param [in] inputs_dims The input tensor's dimension size. + * \param [in] pad_x The number of elements subtracted at each side in the x dimension of the input. + * \param [in] pad_y The number of elements subtracted at each side in the y dimension of the input. + * \param [in] pooling_size_x The size of the pooling region in the x dimension, 0 means no pooling operation. + * \param [in] pooling_size_y The size of the pooling region in the y dimension, 0 means no pooling operation. + * \param [in] down_scale_size_rounding A VX_TYPE_ENUM of the vx_round_policy_e enumeration. + * \param [in] convolution_outputs_dims The output's dimension size after covolution operation. + * \param [in] pool_outputs_dims The output's dimension size after pooling operation. + * \param [in] weights_num_of_dims The dimention number of weights tensor. + * \param [in] weights_dims The dimention size of weights tensor. + * \param [in] weights_data_format The format of weights tensor. + * \param [in] weights_fixed_point_pos The fixed point position when the weights element type is int16/int8, if 0 calculations are performed in integer math. + * \param [in] biases_num_of_dims The dimention number of biases tensor. + * \param [in] biases_dims The dimention size of biases tensor. + * \param [in] biases_data_format The format of biases tensor. + * \param [in] biases_fixed_point_pos The fixed point position when the biases element type is int16/int8, if 0 calculations are performed in integer math. + * \param [in] raw_data_size The data size of compressed data. + * + * \returns A weightsbiases reference without compressed kernel data vx_weights_biases_parameter. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * + * \ingroup group_cnn + */ +VX_API_ENTRY vx_weights_biases_parameter VX_API_CALL +vxCreateWeightsBiasesParameter( + vx_context context, + vx_enum layer_type, + vx_size num_of_dims, + vx_size * inputs_dims, + vx_uint32 pad_x, + vx_uint32 pad_y, + vx_uint32 pooling_size_x, + vx_uint32 pooling_size_y, + vx_enum down_scale_size_rounding, + vx_size * convolution_outputs_dims, + vx_size * pool_outputs_dims, + vx_size weights_num_of_dims, + vx_size * weights_dims, + vx_enum weights_data_format, + vx_int8 weights_fixed_point_pos, + vx_size biases_num_of_dims, + vx_size * biases_dims, + vx_enum biases_data_format, + vx_int8 biases_fixed_point_pos, + vx_uint32 raw_data_size + ); +#else +/*! + * \brief Creates a reference to a vx_weights_biases_parameter opaque object. + * + * \param [in] layer_type The network type of objects to hold. Types allowed are: + * \arg VX_CONVOLUTIONAL_NETWORK_CONVOLUTION_LAYER for convolution layer. + * \arg VX_CONVOLUTIONAL_NETWORK_FULLYCONNECTED_LAYER for fullyconnected layer. + * \param [in] num_of_dims The dimention number of input & output image tensor. + * \param [in] inputs_dims The input tensor's dimension size. + * \param [in] pad_x The number of elements subtracted at each side in the x dimension of the input. + * \param [in] pad_y The number of elements subtracted at each side in the y dimension of the input. + * \param [in] pooling_size_x The size of the pooling region in the x dimension, 0 means no pooling operation. + * \param [in] pooling_size_y The size of the pooling region in the y dimension, 0 means no pooling operation. + * \param [in] down_scale_size_rounding A VX_TYPE_ENUM of the vx_round_policy_e enumeration. + * \param [in] convolution_outputs_dims The output's dimension size after covolution operation. + * \param [in] pool_outputs_dims The output's dimension size after pooling operation. + * \param [in] optimizations A optional param for \ref vx_weights_biases_parameter_optimizations_t. + * \param [in] weights The weights tensor which need be compressed. + * \param [in] biases The biases tensor which need be compressed. + * + * \returns An opaque vx_weights_biases_parameter reference with compressed kernel data. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * + * \ingroup group_cnn + */ +VX_API_ENTRY vx_weights_biases_parameter VX_API_CALL +vxCreateWeightsBiasesParameterFromTensors( + vx_enum layer_type, + vx_uint32 num_of_dims, + vx_uint32 * inputs_dims, + vx_uint32 pad_x, + vx_uint32 pad_y, + vx_uint32 pooling_size_x, + vx_uint32 pooling_size_y, + vx_enum down_scale_size_rounding, + vx_uint32 * convolution_outputs_dims, + vx_uint32 * pool_outputs_dims, + vx_weights_biases_parameter_optimizations_t *optimizations, + vx_tensor weights, + vx_tensor biases); + +/*! + * \brief Creates a reference to an opaque vx_weights_biases_parameter object. + * + * \param [in] layer_type The network type of objects to hold. Types allowed are: + * \arg VX_CONVOLUTIONAL_NETWORK_CONVOLUTION_LAYER for convolution layer. + * \arg VX_CONVOLUTIONAL_NETWORK_FULLYCONNECTED_LAYER for fullyconnected layer. + * \param [in] num_of_dims The dimention number of input & output image tensor. + * \param [in] inputs_dims The input tensor's dimension size. + * \param [in] convolution_outputs_dims The output's dimension size after covolution operation. + * \param [in] pool_outputs_dims The output's dimension size after pooling operation. + * \param [in] output_format The output tensor element type. + * \param [in] convolution_relu_pooling_params The convolution_relu_pooling_params Pointer to parameters of type \ref vx_nn_convolution_relu_pooling_params_t + * \param [in] size_of_convolution_relu_pooling_params The size in bytes of convolution_relu_pooling_params. + * \param [in] optimizations A optional param for \ref vx_weights_biases_parameter_optimizations_t. + * \param [in] weights The weights tensor which need be compressed. + * \param [in] biases The biases tensor which need be compressed. + * + * \returns An opaque vx_weights_biases_parameter reference with compressed kernel data. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * + * \ingroup group_cnn + */ +VX_API_ENTRY vx_weights_biases_parameter VX_API_CALL vxCreateWeightsBiasesParameterFromTensors2( + vx_enum layer_type, + vx_uint32 num_of_dims, + vx_uint32 * inputs_dims, + vx_uint32 * convolution_outputs_dims, + vx_uint32 * pool_outputs_dims, + vx_enum output_format, + const vx_nn_convolution_relu_pooling_params convolution_relu_pooling_params, + vx_size size_of_convolution_relu_pooling_params, + vx_weights_biases_parameter_optimizations_t *optimizations, + vx_tensor weights, + vx_tensor biases); + +/*! + * \brief Creates a reference to an opaque vx_weights_biases_parameter object. + * + * \param [in] layer_type The network type of objects to hold. Types allowed are: + * \arg VX_CONVOLUTIONAL_NETWORK_CONVOLUTION_LAYER for convolution layer. + * \arg VX_CONVOLUTIONAL_NETWORK_FULLYCONNECTED_LAYER for fullyconnected layer. + * \param [in] inputs_dims The input tensor's dimension size. + * \param [in] convolution_outputs_dims The output's dimension size after covolution operation. + * \param [in] pool_outputs_dims The output's dimension size after pooling operation. + * \param [in] convolution_relu_pooling_params The convolution_relu_pooling_params Pointer to parameters of type \ref vx_nn_convolution_relu_pooling_params_t + * \param [in] size_of_convolution_relu_pooling_params The size in bytes of convolution_relu_pooling_params. + * \param [in] optimizations A optional param for \ref vx_weights_biases_parameter_optimizations_t. + * \param [in] size_of_optimizations The size in bytes of optimizations. + * \param [in] weights The weights tensor which need be compressed. + * \param [in] biases The biases tensor which need be compressed. + * + * \returns An opaque vx_weights_biases_parameter reference with compressed kernel data. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * + * \ingroup group_cnn + */ +VX_API_ENTRY vx_weights_biases_parameter VX_API_CALL vxCreateWeightsBiasesParameterFromTensors3( + vx_enum layer_type, + vx_uint32 * inputs_dims, + vx_uint32 * convolution_outputs_dims, + vx_uint32 * pool_outputs_dims, + const vx_nn_convolution_relu_pooling_params convolution_relu_pooling_params, + vx_size size_of_convolution_relu_pooling_params, + vx_weights_biases_parameter_optimizations_t *optimizations, + vx_size size_of_optimizations, + vx_tensor weights, + vx_tensor biases); + +/*! + * \brief Creates a reference to an vx_weights_biases_parameter object. + * \param [in] context The OpenVX context object. + * \param [in] layer_type The network type of objects to hold. Types allowed are: + * \arg VX_CONVOLUTIONAL_NETWORK_CONVOLUTION_LAYER for convolution layer. + * \arg VX_CONVOLUTIONAL_NETWORK_FULLYCONNECTED_LAYER for fullyconnected layer. + * \param [in] num_of_dims The dimention number of input & output image tensor. + * \param [in] inputs_dims The input tensor's dimension size. + * \param [in] pad_x The number of elements subtracted at each side in the x dimension of the input. + * \param [in] pad_y The number of elements subtracted at each side in the y dimension of the input. + * \param [in] pooling_size_x The size of the pooling region in the x dimension, 0 means no pooling operation. + * \param [in] pooling_size_y The size of the pooling region in the y dimension, 0 means no pooling operation. + * \param [in] down_scale_size_rounding A VX_TYPE_ENUM of the vx_round_policy_e enumeration. + * \param [in] convolution_outputs_dims The output's dimension size after covolution operation. + * \param [in] pool_outputs_dims The output's dimension size after pooling operation. + * \param [in] weights_num_of_dims The dimention number of weights tensor. + * \param [in] weights_dims The dimention size of weights tensor. + * \param [in] weights_data_format The format of weights tensor. + * \param [in] weights_fixed_point_pos The fixed point position when the weights element type is int16/int8, if 0 calculations are performed in integer math. + * \param [in] biases_num_of_dims The dimention number of biases tensor. + * \param [in] biases_dims The dimention size of biases tensor. + * \param [in] biases_data_format The format of biases tensor. + * \param [in] biases_fixed_point_pos The fixed point position when the biases element type is int16/int8, if 0 calculations are performed in integer math. + * \param [in] raw_data_size The data size of compressed data. + * + * \returns A weightsbiases reference without compressed kernel data vx_weights_biases_parameter. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * + * \ingroup group_cnn + */ +VX_API_ENTRY vx_weights_biases_parameter VX_API_CALL +vxCreateWeightsBiasesParameter( + vx_context context, + vx_enum layer_type, + vx_uint32 num_of_dims, + vx_uint32 * inputs_dims, + vx_uint32 pad_x, + vx_uint32 pad_y, + vx_uint32 pooling_size_x, + vx_uint32 pooling_size_y, + vx_enum down_scale_size_rounding, + vx_uint32 * convolution_outputs_dims, + vx_uint32 * pool_outputs_dims, + vx_uint32 weights_num_of_dims, + vx_uint32 * weights_dims, + vx_enum weights_data_format, + vx_int8 weights_fixed_point_pos, + vx_uint32 biases_num_of_dims, + vx_uint32 * biases_dims, + vx_enum biases_data_format, + vx_int8 biases_fixed_point_pos, + vx_uint32 raw_data_size + ); +#endif + +/*! \brief Releases the OpenVX object vx_weights_biases_parameter. + * \param [in] weights_bias The pointer to the reference to the vx_weights_biases_parameter. + * \post After returning from this function the reference is zeroed. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors. + * \retval VX_ERROR_INVALID_REFERENCE If weights_bias is not a vx_weights_biases_parameter. + * \pre \ref vxCreateWeightsBiasesParameterFromTensors / vxCreateWeightsBiasesParameterFromTensors2/ vxCreateWeightsBiasesParameter / vxCreateWeightsBiasesParameterFromStream + * \ingroup group_cnn + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseWeightsBiasesParameter(vx_weights_biases_parameter *weights_bias); +/*! \brief Input parameters for a gru operation. + * \ingroup group_cnn + * \version 0.5 + */ +typedef struct _vx_nn_gru_params_t +{ + vx_tensor reset2input_weights; /*!< \brief [static] Weight matrix for the reset gate with input. A 2-D tensor of type T, of shape [input_size, cell_size]. where "cell_size" corresponds to the number of cell units.*/ + vx_tensor update2input_weights; /*!< \brief [static] Weight matrix for the update gate with input. A 2-D tensor of type T, of shape [input_size, cell_size]. */ + vx_tensor reset2recurrent_weights; /*!< \brief [static] Weight matrix for the reset gate with recurrent(h_prev). A 2-D tensor of type T, of shape [cell_size, cell_size]. */ + vx_tensor update2recurrent_weights; /*!< \brief [static] Weight matrix for the update gate with recurrent(h_prev). A 2-D tensor of type T, of shape [cell_size, cell_size]. */ + + vx_tensor connection2input_weights; /*!< \brief [static] Weight matrix for the cell connection gate with input. A 2-D tensor of type T, of shape [input_size, cell_size]. */ + vx_tensor connection2recurrent_weights; /*!< \brief [static] Weight matrix for the cell connection gate with recurrent(h_prev). A 2-D tensor of type T, of shape [cell_size, cell_size]. */ + + vx_tensor gate_input_bias; /*!< \brief [static] Bias vector for the reset and update gate for input. A 1-D tensor of type T, of shape [cell_size].*/ + vx_tensor gate_recurrent_bias; /*!< \brief [static] Bias vector for the reset and update gate for recurrent. A 1-D tensor of type T, of shape [cell_size].*/ + + vx_tensor connection_bias; /*!< \brief [static] Bias vector for the cell connection gate. A 1-D tensor of type T, of shape [cell_size].*/ + +} vx_nn_gru_params_t; + + +/*! \brief [Graph] Creates a Long short-term memory unit (gru) Unit Networks Layer Node. not implement yet. + * \details + * The implementation is based on: http://arxiv.org/abs/1406.1078 + * Computes the GRU cell forward propagation for 1 time step. + * This kernel op implements the following mathematical equations: + * Biases are initialized with: + * * `b_ru` - constant_initializer(1.0) + * * `b_c` - constant_initializer(0.0) + * + * x_h_prev = [x, h_prev] + * [r_bar u_bar] = x_h_prev * w_ru + b_ru + * r = sigmoid(r_bar) + * u = sigmoid(u_bar) + * h_prevr = h_prev x r + * x_h_prevr = [x h_prevr] + * c_bar = x_h_prevr * w_c + b_c + * c = tanh(c_bar) + * h = (1-u) x c + u x h_prev + * + * \param [in] graph The handle to the graph. + * \param [in] input A 2-D tensor of type T, of shape [input_size, batch_size], where + * "batch_size" corresponds to the batching dimension, and "input_size" + * is the size of the input. + * \param [in] h_prev A 2-D tensor of type T, of shape [cell_size, batch_size]. + * \param [in] gru_params gru paraments \ref vx_nn_gru_params_t . + * \param [in] size_of_gru_params [static] The size of the gru_params. + * \param [out] output A 2-D tensor of type T, of shape [cell_size, batch_size]. + * This is effectively the same as the current "output_state" value. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + * \version 0.5 + */ +VX_API_ENTRY vx_node VX_API_CALL vxGRUUnitLayer( + vx_graph graph, + vx_tensor input, + vx_tensor h_prev, + const vx_nn_gru_params_t * gru_params, + vx_size size_of_gru_params, + vx_tensor output); + +/*! \brief [Graph] Creates a Long short-term memory layer (gru) Networks Layer Node. not implement yet. + * \details + * + * \param [in] graph The handle to the graph. + * \param [in] input A 3-D tensor of type T, of shape [input_size, batch_size, time_step], where + * "input_size" corresponds to the size of the input, and "batch_size" + * is the batching dimension, time_step means time length actually used by the input. + * \param [in] h_prev optional, A 2-D tensor of type T, of shape [cell_size, batch_size], where + * "input_size" corresponds to the size of the input, and "batch_size" + * is the batching dimension. + * \param [in] vx_nn_gru_params gru paraments \ref vx_nn_gru_params_t . + * \param [in] size_of_gru_layer_params [static] The size of the vx_nn_gru_params. + * \param [out] output A 2-D tensor of type T, of shape [cell_size, batch_size]. + * This is effectively the same as the current "output_state" value. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + * \version 0.5 + */ +VX_API_ENTRY vx_node VX_API_CALL vxGRULayer( + vx_graph graph, + vx_tensor input, + vx_tensor h_prev, + const vx_nn_gru_params_t * gru_layer_params, + vx_size size_of_gru_layer_params, + vx_tensor output + ); + + +/*! \brief Input parameters for a convolution lstm operation. + * \ingroup group_cnn + * \version 0.5 + */ +typedef struct _vx_nn_convlstm_params_t +{ + vx_tensor input2input_weight; /*!< \brief Optional A 2-D tensor of type T, of shape [num_units, input_size]. where "num_units" corresponds to the number of cell units.*/ + vx_tensor input2forget_weight; /*!< \brief A 2-D tensor of type T, of shape [num_units, input_size].*/ + vx_tensor input2cell_weight; /*!< \brief A 2-D tensor of type T, of shape [num_units, input_size].*/ + vx_tensor input2output_weight; /*!< \brief A 2-D tensor of type T, of shape [num_units, input_size].*/ + + vx_tensor recurrent2input_weight; /*!< \brief Optional A 2-D tensor of type T, of shape [num_units, output_size]. where "output_size" corresponds to either the number of cell units (i.e., "num_units"), or the second dimension of the "projection_weights", if defined.*/ + vx_tensor recurrent2forget_weight; /*!< \brief A 2-D tensor of type T, of shape [num_units, output_size].*/ + vx_tensor recurrent2cell_weight; /*!< \brief A 2-D tensor of type T, of shape [num_units, output_size].*/ + vx_tensor recurrent2output_weight; /*!< \brief A 2-D tensor of type T, of shape [num_units, output_size].*/ + + vx_tensor input_gate_bias; /*!< \brief Optional A 1-D tensor of type T, of shape [num_units].*/ + vx_tensor forget_gate_bias; /*!< \brief A 1-D tensor of type T, of shape [num_units].*/ + vx_tensor cell_bias; /*!< \brief A 1-D tensor of type T, of shape [num_units].*/ + vx_tensor output_gate_bias; /*!< \brief A 1-D tensor of type T, of shape [num_units].*/ + + vx_tensor activation; /*!< \brief Optional. An ActivationFunctionType indicating the activation function. If "NONE" is specified then it results in a linear activation.If "NONE" is specified then it results in a linear activation.*/ + + vx_float32 forget_bias; /*!< \brief Float32[static] A bias for the forget gate. If set to 0.0f(by default) then bias is ignored.*/ + vx_bool skip_connection; /*< \brief If set to `vx_true_e`, concatenate the input to the output of the conv LSTM. Default: `vx_false_e`.*/ + +} vx_nn_convlstm_params_t; + +/*! \brief input parameters for a convolution lstm layer operation. + * \ingroup group_cnn + */ +typedef struct _vx_nn_convlstm_layer_params_t +{ + vx_nn_convlstm_params_t convlstm_param; /*!< \brief convolution lstm input param \ref vx_nn_convlstm_params_t.*/ + vx_enum convlstm_layer_type; /*!< \brief convolution lstm layer type.*/ +} vx_nn_convlstm_layer_params_t; + + +/*! \brief [Graph] Creates a Convolution Long short-term memory unit (ConvLSTM) Unit Networks Layer Node. not implement yet. + * \details + * + * https://arxiv.org/pdf/1506.04214v1.pdf + * + * \param [in] graph The handle to the graph. + * \param [in] input A 2-D tensor of type T, of shape [input_size, batch_size], where + * "batch_size" corresponds to the batching dimension, and "input_size" + * is the size of the input. + * \param [in] output_state_in A 2-D tensor of type T, of shape [output_size, batch_size]. + * \param [in] cell_state_in A 2-D tensor of type T, of shape [num_units, batch_size]. + * \param [in] convlstm_params LSTM paraments \ref vx_nn_convlstm_params_t . + * \param [in] size_of_convlstm_params [static] The size of the convlstm_params. + * \param [out] scratch A 3-D tensor of type T, of shape [num_cell, 4, batch_size]. + * \param [out] output_state_out A 2-D tensor of type T, of shape [output_size, batch_size]. + * \param [out] cell_state_out A 2-D tensor of type T, of shape [num_units, batch_size]. + * \param [out] output A 2-D tensor of type T, of shape [output_size, batch_size]. + * This is effectively the same as the current "output_state" value. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + * \version 0.5 + */ +VX_API_ENTRY vx_node VX_API_CALL vxConvLSTMUnitLayer( + vx_graph graph, + vx_tensor input, + vx_tensor output_state_in, + vx_tensor cell_state_in, + const vx_nn_convlstm_params_t * convlstm_params, + vx_size size_of_convlstm_params, + vx_tensor output_state_out, + vx_tensor cell_state_out, + vx_tensor output); + +/*! \brief [Graph] Creates a Long short-term memory layer (LSTM) Networks Layer Node. not implement yet. + * \details + * + * \param [in] graph The handle to the graph. + * \param [in] input A 3-D tensor of type T, of shape [input_size, batch_size, time_step], where + * "input_size" corresponds to the size of the input, and "batch_size" + * is the batching dimension, time_step means time length actually used by the input. + * \param [in] static_input optional, A 2-D tensor of type T, of shape [input_size, batch_size], where + * "input_size" corresponds to the size of the input, and "batch_size" + * is the batching dimension. + * \param [in] cont optional, A 2-D tensor of type T, of shape [input_size, batch_size], where + * "input_size" corresponds to the size of the input, and "batch_size" + * is the batching dimension. + * \param [in] convlstm_layer_params LSTM paraments \ref vx_nn_convlstm_layer_params_t . + * \param [in] size_of_convlstm_layer_params [static] The size of the convlstm_layer_params. + * \param [out] output A 2-D tensor of type T, of shape [output_size, batch_size]. + * This is effectively the same as the current "output_state" value. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + * \version 0.5 + */ +VX_API_ENTRY vx_node VX_API_CALL vxConvLSTMLayer( + vx_graph graph, + vx_tensor input, + vx_tensor static_input, + vx_tensor cont, + const vx_nn_convlstm_layer_params_t * convlstm_layer_params, + vx_size size_of_convlstm_layer_params, + vx_tensor output + ); + +/*! \brief [Graph] Creates a Convolutional Network Pooling Layer Node. + * \details Pooling is done on the first 2 dimensions or the \ref vx_tensor. Therefore, we use here the term x for the first dimension and y for the second.\n + * Pooling operation is a function operation over a rectangle size and then a nearest neighbour down scale. + * Here we use pool_size_x and pool_size_y to specify the rectangle size on which the operation + * is performed. \n + * before the operation is done (average or maximum value). the data is padded in the first 2D with zeros. + * The down scale is done by picking the results according to a skip jump. The skip in the x and y dimension is determined by the output size dimensions. +* \param [in] graph The handle to the graph. +* \param [in] inputs The input tensor data. 3 lower dimensions represent a single input, 4th dimension for batch of inputs is optional.Dimension layout is [width, height, #IFM, #batches]. +* See \ref vxCreateTensor and \ref vxCreateVirtualTensor +* \param [in] pool_type [static] Either max pooling or average pooling (see \ref vx_convolutional_network_pooling_type_e). +* \param [in] pool_size_x [static] Size of the pooling region in the x dimension +* \param [in] pool_size_y [static] Size of the pooling region in the y dimension. +* \param [in] pool_pad_x [static] Padding size in the x dimension. +* \param [in] pool_pad_y [static] Padding size in the y dimension. +* \param [in] rounding [static] The rounding method for calculating output dimensions. See \ref vx_convolutional_network_rounding_type_e +* \param [out] outputs The output tensor data. Output will have the same number of dimensions as input. +* \return vx_node. +* \retval 0 Node could not be created. +* \retval * Node handle. +* \ingroup group_cnn +*/ +VX_API_ENTRY vx_node VX_API_CALL vxPoolingLayer(vx_graph graph, vx_tensor inputs, vx_enum pooling_type, + vx_size pooling_size_x, + vx_size pooling_size_y, + vx_size pooling_padding_x, + vx_size pooling_padding_y, + vx_enum rounding, + vx_tensor outputs); + +/*! \brief [Graph] Creates a Convolutional Network Softmax Layer Node. + * \details the softmax function, is a generalization of the logistic function that "squashes" a K-dimensional vector \f$ z \f$ of arbitrary real values to a K-dimensional vector + * \f$ \sigma(z) \f$ of real values in the range (0, 1) that add up to 1. The function is given by: + * \f$ \sigma(z) = \frac{\exp^z}{\sum_i \exp^{z_i}} \f$ + * \param [in] graph The handle to the graph. + * \param [in] inputs The input tensor, with the number of dimensions according to the following scheme. + * In case IFM dimension is 1. Softmax is be calculated on that dimension. + * In case IFM dimension is 2. Softmax is be calculated on the first dimension. The second dimension is batching. + * In case IFM dimension is 3. Dimensions are [Width, Height, Classes]. And Softmax is calculated on the third dimension. + * In case IFM dimension is 4. Dimensions are [Width, Height, Classes, batching]. Softmax is calculated on the third dimension. + * Regarding the layout specification, see \ref vxCreateTensor and \ref vxCreateVirtualTensor. + * \param [out] outputs The output tensor. Output will have the same number of dimensions as input. Output tensor data type must be same as the inputs. + * \ingroup group_cnn + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + */ +VX_API_ENTRY vx_node VX_API_CALL vxSoftmaxLayer(vx_graph graph, vx_tensor inputs, vx_tensor outputs); + +/* vxCopyTensorPatchForNN11 is for back compatibility with spec 1.1, which is used in nn*/ +VX_API_ENTRY vx_status VX_API_CALL vxCopyTensorPatchForNN11( + vx_tensor tensor, + vx_tensor_view view, + vx_tensor_addressing user_addr, + void *user_ptr, + vx_enum usage, + vx_enum user_mem_type + ); + +/* vxCreateTensorForNN11 is for back compatibility with spec 1.1, which is used in nn*/ +VX_API_ENTRY vx_tensor VX_API_CALL +vxCreateTensorForNN11( + vx_context context, + vx_uint32 num_of_dims, + vx_uint32 *sizes, + vx_enum data_format, + vx_int8 fixed_point_pos + ); + +/*! \brief [Graph] Creates a Convolutional Network Normalization Layer Node. +* \details Normalizing over local input regions. Each input value is divided by \f$ (1+\frac{\alpha}{n}\sum_i x^2_i)^\beta \f$ , where n is the number of elements to normalize across. +* and the sum is taken over the region centred at that value (zero padding is added where necessary). +* \param [in] graph The handle to the graph. +* \param [in] inputs The input tensor data. 3 lower dimensions represent a single input, 4th dimension for batch of inputs is optional.Dimension layout is [width, height, IFM, #batches]. +* See \ref vxCreateTensor2 and \ref vxCreateVirtualTensor2. +* \param [in] type [static] Either same map or across maps (see vx_convolutional_network_norm_type_e). +* \param [in] norm_size [static] Number of elements to normalize across. +* \param [in] alpha [static] Alpha parameter in the normalization equation. +* \param [in] beta [static ] Beta parameter in the normalization equation. +* \param [out] outputs The output tensor data. Output will have the same number of dimensions as input. +* \ingroup group_cnn +* \return vx_node. +* \retval 0 Node could not be created. +* \retval * Node handle. +*/ +VX_API_ENTRY vx_node VX_API_CALL vxNormalizationLayer(vx_graph graph, vx_tensor inputs, vx_enum type, + vx_size normalization_size, + vx_float32 alpha, + vx_float32 beta, + vx_tensor outputs); + +/*! \brief [Graph] Creates a Reorgnization Layer Node. + * \details Reorganize the layer. Picking up pixels from input tensor according to the rule \n + * dimension 1: i * stride + (k / out_c) % stride \n + * dimension 2: j * stride + (k / out_c) / stride \n + * dimension 3: k % out_c \n + * out_c = input_c / (stride * stride), i is in range (0, input_w-1), j is in range (0, input_h-1), k is in range (0, input_c-1) + * Output value is in order sequence. + * \param [in] graph The reference to the parent graph. + * \param [in] inputs The input tensor data to reorg. + * \param [in] stride [static] Delta size of two pixels in each dimensions to do a reorg operation. + * \param [out] outputs The output tensor data. Output will have different number of each dimensions as input. + * \returns vx_node. + * \retval 0 Node could not be created. + * \retval * Node handle. + * \ingroup group_cnn + */ +VX_API_ENTRY vx_node VX_API_CALL vxReorgLayer( + vx_graph graph, + vx_tensor inputs, + vx_uint32 stride, + vx_tensor outputs + ); + +/*! \brief [Graph] Creates a Convolutional Network L2Normalize Layer Node. +* \param [in] graph The handle to the graph. +* \param [in] inputs The input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Dimension layout is [width, height, #IFM, #batches]. + * See \ref vxCreateTensor2 and \ref vxCreateVirtualTensor2. +* \param [out] outputs The output tensor data. Output will have the same number of dimensions as input. +* \ingroup group_cnn +* \return vx_node. +* \retval 0 Node could not be created. +* \retval * Node handle. +*/ +VX_API_ENTRY vx_node VX_API_CALL vxL2NormalizeLayer(vx_graph graph, vx_tensor inputs, vx_tensor outputs); + +/*! \brief [Graph] Creates a Convolutional Network Convolution and Activation(Relu) and Pooling and Add Layer Node. + * \details This function implement Convolutional Network Convolution and Activation(Relu) and Pooling and Add layer. + * For fixed-point data types, a fixed point calculation is performed with round and saturate according to the number of accumulator bits. The number of the accumulator bits are implementation defined, + * and should be at least 16.\n + * round: rounding according the vx_round_policy_e enumeration. \n + * saturate: A saturation according the vx_convert_policy_e enumeration. + * The following equation is implemented: \n + * \f$ outputs[j,k,i] = saturate(round(\sum_{l} (\sum_{m,n} inputs[j-m,k-n,l] \times weights[m,n,l,i])+biasses[j,k,i])) \f$\n + * Where \f$m,n\f$ are indexes on the convolution matrices. \f$ l\f$ is an index on all the convolutions per input.\f$ i\f$ is an index per output. + * \f$ j,k \f$ are the inputs/outputs spatial indexes. + * Convolution is done on the width and height dimensions of the \ref vx_tensor. Therefore, we use here the term x for index along the width dimension and y for index along the height dimension.\n + * before the Convolution is done, a padding with zeros of the width and height input dimensions is performed. + * Then down scale is done by picking the results according to a skip jump. The skip in the x and y is determined by the output size dimensions. + * The relation between input to output is as follows: \n + * \f$ width_{output} = round(\frac{(width_{input} + paddingleft_x + paddingright_x - kernel_x - (kernel_x -1) * dilation_x)}{skip_x} + 1) \f$\n + * and \n + * \f$ height_{output} = round(\frac{(height + paddingtop_y + paddingbottom_y - kernel_y - (kernel_y -1) * dilation_y)}{skip_y} + 1) \f$\n + * where \f$width\f$ is the size of the input width dimension. \f$height\f$ is the size of the input height dimension. + * \f$width_{output}\f$ is the size of the output width dimension. \f$height_{output}\f$ is the size of the output height dimension. + * \f$kernel_x\f$ and \f$kernel_y\f$ are the convolution sizes in width and height dimensions. + * skip is calculated by the relation between input and output. + * rounding is done according to \ref vx_convolutional_network_rounding_type_e. + * \param [in] graph The handle to the graph. + * \param [in] inputs_conv The input tensor data for convolution. 3 lower dimensions represent a single input, all following dimensions represent number of batches, possibly nested. + * \param [in] inputs_add The input tensor data for add. 3 lower dimensions represent a single input, all following dimensions represent number of batches, possibly nested. + * The dimension order is [width, height, #IFM, #batches]. \n + * \param [in] weights_biases [static] Point to WeightBiasesParameter data, vx_weights_biases_parameter is an opaque reference. + * \param [in] convolution_relu_pooling_params [static] Pointer to parameters of type \ref vx_nn_convolution_relu_pooling_params_t + * \param [in] size_of_convolution_relu_pooling_params [static] Size in bytes of convolution_relu_pooling_params. + * \param [in] outputs_conv The convolution output tensor data. Output will have the same number and structure of dimensions as inputs_conv. + * We uses this tensor to provide format information of convolution output data to hardware, don't really return convolution output data. + * \param [out] outputs_add The final add output tensor data. Output will have the same number and structure of dimensions as input. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + */ +VX_API_ENTRY vx_node VX_API_CALL vxConvolutionReluPoolingAddLayer2( + vx_graph graph, + vx_tensor inputs_conv, + vx_tensor inputs_add, + vx_weights_biases_parameter weights_biases, + const vx_nn_convolution_relu_pooling_params_t * convolution_relu_pooling_params, + vx_size size_of_convolution_relu_pooling_params, + vx_tensor outputs_conv, + vx_tensor outputs_add); + +/*! \brief [Graph] Creates a Convolutional Network Convolution and Activation(Relu) and Pooling and Multiply Layer Node. + * \details This function implement Convolutional Network Convolution and Activation(Relu) and Pooling and Multiply layer. + * For fixed-point data types, a fixed point calculation is performed with round and saturate according to the number of accumulator bits. The number of the accumulator bits are implementation defined, + * and should be at least 16.\n + * round: rounding according the vx_round_policy_e enumeration. \n + * saturate: A saturation according the vx_convert_policy_e enumeration. + * The following equation is implemented: \n + * \f$ outputs[j,k,i] = saturate(round(\sum_{l} (\sum_{m,n} inputs[j-m,k-n,l] \times weights[m,n,l,i])+biasses[j,k,i])) \f$\n + * Where \f$m,n\f$ are indexes on the convolution matrices. \f$ l\f$ is an index on all the convolutions per input.\f$ i\f$ is an index per output. + * \f$ j,k \f$ are the inputs/outputs spatial indexes. + * Convolution is done on the width and height dimensions of the \ref vx_tensor. Therefore, we use here the term x for index along the width dimension and y for index along the height dimension.\n + * before the Convolution is done, a padding with zeros of the width and height input dimensions is performed. + * Then down scale is done by picking the results according to a skip jump. The skip in the x and y is determined by the output size dimensions. + * The relation between input to output is as follows: \n + * \f$ width_{output} = round(\frac{(width_{input} + paddingleft_x + paddingright_x - kernel_x - (kernel_x -1) * dilation_x)}{skip_x} + 1) \f$\n + * and \n + * \f$ height_{output} = round(\frac{(height + paddingtop_y + paddingbottom_y - kernel_y - (kernel_y -1) * dilation_y)}{skip_y} + 1) \f$\n + * where \f$width\f$ is the size of the input width dimension. \f$height\f$ is the size of the input height dimension. + * \f$width_{output}\f$ is the size of the output width dimension. \f$height_{output}\f$ is the size of the output height dimension. + * \f$kernel_x\f$ and \f$kernel_y\f$ are the convolution sizes in width and height dimensions. + * skip is calculated by the relation between input and output. + * rounding is done according to \ref vx_convolutional_network_rounding_type_e. + * \param [in] graph The handle to the graph. + * \param [in] inputs_conv The input tensor data for convolution. 3 lower dimensions represent a single input, all following dimensions represent number of batches, possibly nested. + * \param [in] inputs_mul The input tensor data for mul. 3 lower dimensions represent a single input, all following dimensions represent number of batches, possibly nested. + * The dimension order is [width, height, #IFM, #batches]. \n + * \param [in] scale A non-negative \ref VX_TYPE_FLOAT32 multiplied to each product before overflow handling. + * \param [in] weights_biases [static] Point to WeightBiasesParameter data, vx_weights_biases_parameter is an opaque reference. + * \param [in] convolution_relu_pooling_params [static] Pointer to parameters of type \ref vx_nn_convolution_relu_pooling_params_t + * \param [in] size_of_convolution_relu_pooling_params [static] Size in bytes of convolution_relu_pooling_params. + * \param [in] outputs_conv The convolution output tensor data. Output will have the same number and structure of dimensions as inputs_conv. + * We uses this tensor to provide format information of convolution output data to hardware, don't really return convolution output data. + * \param [out] outputs_mul The final mul output tensor data. Output will have the same number and structure of dimensions as input. + * \return vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + * \ingroup group_cnn + */ +VX_API_ENTRY vx_node VX_API_CALL vxConvolutionReluPoolingMultiplyLayer2( + vx_graph graph, + vx_tensor inputs_conv, + vx_tensor inputs_mul, + vx_float32 input_scale, + vx_weights_biases_parameter weights_biases, + const vx_nn_convolution_relu_pooling_params_t * convolution_relu_pooling_params, + vx_size size_of_convolution_relu_pooling_params, + vx_tensor outputs_conv, + vx_tensor outputs_mul); +/*! \brief [Graph] Performs LUT on element values in the input tensor data's. + * \param [in] graph The handle to the graph. + * \param [in] input input tensor data. + * \param [in] InLut The look-up table of x value, of type \ref vx_lut. + * \param [in] OutLut The look-up table of y value, of type \ref vx_lut. + * \param [out] output The output tensor data with the same dimensions as the input tensor data's. + * \ingroup group_tensor + * \return vx_node. + * \retval 0 Node could not be created. + * \retval * Node handle. + */ +VX_API_ENTRY vx_node VX_API_CALL vxTensorTableLookupLayer( + vx_graph graph, + vx_tensor input, + vx_lut InLut, + vx_lut OutLut, + vx_tensor output); + +typedef struct _vx_nn_gemm_relu_pooling_params_t +{ + vx_bool enable_relu; /*!< \brief Enable Relu layer function or not. */ + vx_bool enable_leaky_relu; /*!< \brief Enable LeakyRelu layer function or not. */ + vx_float32 alpha; /*!< \brief Alpha value for Activation */ + vx_float32 beta; /*!< \brief Beta value for Activation */ + vx_uint32 node_count; /*!< \brief node count to merge */ + vx_float32 merged_scale[MERGED_NODE_COUNT_MAX]; /*!< \brief scale of merged node output */ + vx_int32 merged_zero_point[MERGED_NODE_COUNT_MAX]; /*!< \brief zero point of merged node output */ + vx_enum merged_data_type[MERGED_NODE_COUNT_MAX]; /*!< \brief data type of merged node output */ + vx_enum act_func; /*!< \brief nn activation function */ + vx_lut lut_in; /*!< \brief LUT in */ + vx_lut lut_out; /*!< \brief LUT out */ + vx_bool enbale_const_multiplier; /*!< \brief tensor mul with one of inputs as a single pixel const tensor */ + vx_float32 const_multiplier; /*!< \brief const multiplier */ +} vx_nn_gemm_relu_pooling_params_t, * vx_nn_gemm_relu_pooling_params; + +/*! \brief Create a batch gemm node, the calcution formula is output = matrix_a * matrix_b + matrix_c. + * \param [in] graph The reference to the graph. + * \param [in] matrix_a The first input tensor. + * \param [in] matrix_b The second input tensor. Must be in the same data type and batch count as first input tensor. + * \param [in] matrix_c The third input tensor. Must be in the same data type and batch count as first input tensor. [optional] + * \param [in] trans_a If true, the matrix_a has been transposed before calcution. + * \param [in] trans_b If true, the matrix_b has been transposed before calcution. + * \param [in] trans_c If true, the matrix_c has been transposed before calcution. [optional] + * \param [in] merge_param the parameters for gemm + op merging + * \param [out] output The output tensor. Output dimension must agree the formula in the description. + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation + * should be checked using \ref vxGetStatus + * \ingroup group_vision_function_gemm + */ +VX_API_ENTRY vx_node VX_API_CALL vxBatchGemmReluPoolingLayer(vx_graph graph, + vx_tensor matrix_a, + vx_tensor matrix_b, + vx_tensor matrix_c, + vx_scalar trans_a, + vx_scalar trans_b, + vx_scalar trans_c, + const vx_nn_gemm_relu_pooling_params merge_param, + vx_tensor output); + +/*! \brief Create a fuse stream process node. + * \param [in] graph The handle to the graph. + * \param [in] input_list input tensor list. + * \param [in] input_count input tensor number. + * \param [in] output_list output tensor list. + * \param [in] output_count output tensor number. + * \param [in] params the parameters for multi streamprocessor merging. + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation + * should be checked using \ref vxGetStatus + * \ingroup group_vision_function_sp + */ +VX_API_ENTRY vx_node VX_API_CALL vxFusedSpNode( + vx_graph graph, + vx_tensor* input_list, + vx_uint32 input_count, + vx_tensor* output_list, + vx_uint32 output_count, + const vx_nn_fused_sp_params_t * params + ); + +/*! \brief Create a conv fuse stream process node. + * \param [in] graph The handle to the graph. + * \param [in] inputs input tensor. + * \param [in] weights_biases [static] Point to WeightBiasesParameter data, vx_weights_biases_parameter is an opaque reference. + * \param [in] convolution_relu_pooling_params [static] Pointer to parameters of type \ref vx_nn_convolution_relu_pooling_params_t + * \param [in] size_of_convolution_relu_pooling_params [static] Size in bytes of convolution_relu_pooling_params. + * \param [in] outputs output tensor. + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation + * should be checked using \ref vxGetStatus + * \ingroup group_vision_function_sp + */ +VX_API_ENTRY vx_node VX_API_CALL vxConvSpNode( + vx_graph graph, + vx_tensor inputs, + vx_weights_biases_parameter weights_biases, + const vx_nn_convolution_relu_pooling_params_t * convolution_relu_pooling_params, + vx_size size_of_convolution_relu_pooling_params, + vx_tensor outputs +); + +#ifdef __cplusplus +} +#endif + + +#endif diff --git a/unified-tina/inc/VX/vx_khr_node_memory.h b/unified-tina/inc/VX/vx_khr_node_memory.h new file mode 100644 index 0000000..f94af4f --- /dev/null +++ b/unified-tina/inc/VX/vx_khr_node_memory.h @@ -0,0 +1,61 @@ +/* + + * Copyright (c) 2012-2017 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _VX_KHR_NODE_MEMORY_H_ +#define _VX_KHR_NODE_MEMORY_H_ + +/*! \brief The Node Memory Extension. + * \file + */ + +#define OPENVX_KHR_NODE_MEMORY "vx_khr_node_memory" + +#include + +/*! \brief The kernel object attributes for global and local memory. + * \ingroup group_kernel + */ +enum vx_kernel_attribute_memory_e { + /*! \brief The global data pointer size to be shared across all instances of + * the kernel (nodes are instances of kernels). + * Use a \ref vx_size parameter. + * \note If not set it will default to zero. + */ + VX_KERNEL_GLOBAL_DATA_SIZE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_KERNEL) + 0x5, + /*! \brief The global data pointer to the shared across all the instances of + * the kernel (nodes are instances of the kernels). + * Use a \ref void * parameter. + */ + VX_KERNEL_GLOBAL_DATA_PTR = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_KERNEL) + 0x6, +}; + +/*! \brief The node object attributes for global and local memory. + * \ingroup group_node + */ +enum vx_node_attribute_memory_e { + /*! \brief Used to indicate the size of the shared kernel global memory area. + * Use a \ref vx_size parameter. + */ + VX_NODE_GLOBAL_DATA_SIZE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0x9, + /*! \brief Used to indicate the pointer to the shared kernel global memory area. + * Use a void * parameter. + */ + VX_NODE_GLOBAL_DATA_PTR = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0xA, +}; + +#endif + diff --git a/unified-tina/inc/VX/vx_khr_opencl.h b/unified-tina/inc/VX/vx_khr_opencl.h new file mode 100644 index 0000000..c764c3a --- /dev/null +++ b/unified-tina/inc/VX/vx_khr_opencl.h @@ -0,0 +1,268 @@ +/* + + * Copyright (c) 2012-2017 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _VX_KHR_OPENCL_H_ +#define _VX_KHR_OPENCL_H_ + +#include +#include + +/*! \file + * \brief The OpenVX to OpenCL Inter-op Extension Header. + * + * \defgroup group_cl_api API + * \brief The API used by Clients to add OpenCL Kernels as vx_kernel. + * \details + * + * \defgroup group_cl_def Extension Defines + * \brief The Extension defines and constants. + * + * \defgroup group_cl_image Images + * \brief OpenVX Images + * \details Depending on whether the OpenCL implementation supports images, vx_image + * may map to an image2d_t or a OpenCL buffer. + * + * \defgroup group_cl_array Arrays + * \brief OpenVX Arrays + * + * \defgroup group_cl_convolution Convolutions + * \brief OpenVX Convolutions + * + * \defgroup group_cl_distribution Distributions + * \brief OpenVX Distributions + * + * \defgroup group_cl_matrix Matricies + * \brief OpenVX Matrix + * + * \defgroup group_cl_types OpenVX to OpenCL Atomic Types + * \brief Atomic Types + * \details OpenVX types map to OpenCL types through this table: + * | VX | OpenCL| + * |:---------|:------| + * |vx_uint8 |uchar | + * |vx_int8 |char | + * |vx_uint16 |ushort | + * |vx_int16 |short | + * |vx_uint32 |uint | + * |vx_int32 |int | + * |vx_uint64 |ulong | + * |vx_int64 |long | + * |vx_float32|float | + * |vx_float64|double | + * |vx_size |size_t | + * + * \note size_t can not be used as a parameter to a __kernel. + */ + +#ifndef VX_SCALE_UNITY +#define VX_SCALE_UNITY (1024) +#endif + +/*!\brief The maximum number of planes an image may have which is compatible across both + * API. + * \ingroup group_cl_def + */ +#define VX_CL_MAX_PLANES (4) + +#if defined(VX_CL_DOCUMENTATION) || !defined(VX_CL_KERNEL) + +#if defined(__APPLE__) || defined(DARWIN) +#include +#else +#include +#endif + +#if (!defined(__APPLE__)) && defined(CL_USE_LUMINANCE) +#define CL_USE_IMAGES +#endif + +/*! \brief The string name of this extension to match for in the extensions list + * \ingroup group_cl_def + */ +#define OPENVX_KHR_OPENCL "vx_khr_opencl" + +/*! \brief Adds an OpenCL Kernel as source code into the OpenVX implementation. + * \param [in] context The OpenVX Context. + * \param [in] name The name of the kernel in OpenVX nomenclature. + * \param [in] enumeration The OpenVX kernel enumeration used to identify this kernel. + * \param [in] source The array of source line pointers. + * \param [in] line_lengths The array of lines lengths for each line of source. + * \param [in] num_lines the number of lines in both the sources array and line_lengths array. + * \param [in] symbol_name The name of the kernel to call in the program. + * \param [in] numParams The number of parameters to the OpenVX kernel. + * \param [in] input The input validator. + * \param [in] output The output validator. + * \see vxAddParameterToKernel to configure the specific parameter attributes. + * \ingroup group_cl_api + */ +VX_API_ENTRY vx_kernel VX_API_CALL vxAddOpenCLAsSourceKernel(vx_context context, + vx_char name[VX_MAX_KERNEL_NAME], + vx_enum enumeration, + char *source[], + size_t line_lengths[], + size_t num_lines, + char symbol_name[], + vx_uint32 numParams, + vx_kernel_input_validate_f input, + vx_kernel_output_validate_f output); + +/*! \brief Adds an OpenCL Kernel as binary program into the OpenVX implementation. + * \param [in] context The OpenVX Context. + * \param [in] name The name of the kernel in OpenVX nomenclature. + * \param [in] enumeration The OpenVX kernel enumeration used to identify this kernel. + * \param [in] program The OpenCL Program which contains the kernel (either pre-compiled or compiled by user). + * \param [in] symbol_name The name of the kernel to call in the program. + * \param [in] numParams The number of parameters to the OpenVX kernel. + * \param [in] input The input validator. + * \param [in] output The output validator. + * \see vxAddParameterToKernel to configure the specific parameter attributes. + * \ingroup group_cl_api + */ +VX_API_ENTRY vx_kernel VX_API_CALL vxAddOpenCLAsBinaryKernel(vx_context context, + vx_char name[VX_MAX_KERNEL_NAME], + vx_enum enumeration, + cl_program program, + char symbol_name[], + vx_uint32 numParams, + vx_kernel_input_validate_f input, + vx_kernel_output_validate_f output); + +#endif // External API + +#if defined(VX_CL_DOCUMENTATION) || defined(VX_CL_KERNEL) + +#if defined(__IMAGE_SUPPORT__) && defined(CL_USE_LUMINANCE) +#define CL_USE_IMAGES +#endif + +/*! \brief Allows access to an image pixel as a typecast pointer deference. + * \param type The OpenCL single element type + * \param ptr The __global pointer to the base of the image. + * \param x The x coordinate. + * \param y The y coordinate. + * \param sx The x stride. + * \param sy The y stride. + * \ingroup group_cl_image + */ +#define vxImagePixel(type, ptr, x, y, sx, sy) \ + (*(type *)(&((uchar *)ptr)[((y) * sy) + ((x) * sx)])) + +/*! + * \brief Allows access to an array item as a typecast pointer deference. + * \param type The OpenCL single element type or structure type. + * \param ptr The __global pointer to the base of the array. + * \param index The index of the element to access. + * \param stride The stride in bytes between two adjacent elements. + * \ingroup group_cl_array + */ +#define vxArrayItem(type, ptr, index, stride) \ + (*(type *)(&((uchar *)ptr)[index*stride])) + +/*! \brief Allows access to a matrix element \f$ M_{ij} \f$ where i is the column and j is the row. + * \param type The OpenCL single element type of the matrix. + * \param ptr The __global pointer to the base of the array. + * \param columns The number of columns in the matrix. + * \param i The column index + * \param j The row index + * \ingroup group_cl_matrix + */ +#define vxMatrixElement(type, ptr, columns, i, j) (((type *)ptr)[columns*j + i]) + +/*! \brief Allows access to a convolution element \f$ C_{ij} \f$ where i is the column and j is the row. + * \note Convolution elements are always of type short. + * \param ptr The __global pointer to the base of the array. + * \param columns The number of columns in the matrix. + * \param i The column index + * \param j The row index + * \ingroup group_cl_convolution + */ +#define vxConvolveElement(ptr, columns, i, j) (((short *)ptr)[columns*j + i]) + +/*! \brief Allows access to a distribution frequency counter. + * \param ptr The __global pointer to the base of the distribution. + * \param value The value to retrive the frequency count for. + * \param offset The offset within the input domain. + * \param range The total range within the domain starting from offset. + * \param window_size The window size of the bin. + * \ingroup group_cl_distribution + */ +#define vxGetFrequency(ptr, value, offset, range, window_size) \ + ((offset <= value) && (value <= (range+offset)) ? ptr[(value-offset)/window_size] : 0) + +/*! \brief Increments a distribution frequency counter for a value. + * \param ptr The __global pointer to the base of the distribution. + * \param value The value to increment the frequency count for. + * \param offset The offset within the input domain. + * \param range The total range within the domain starting from offset. + * \param window_size The window size of the bin. + * \ingroup group_cl_distribution + */ +#define vxIncFrequency(ptr, value, offset, range, window_size) \ + ((offset <= value) && (value <= (range+offset)) ? ++ptr[(value-offset)/window_size] : 0) + +/*! \brief Decrements a distribution frequency counter for a value. + * \param ptr The __global pointer to the base of the distribution. + * \param value The value to decrement the frequency count for. + * \param offset The offset within the input domain. + * \param range The total range within the domain starting from offset. + * \param window_size The window size of the bin. + * \ingroup group_cl_distribution + */ +#define vxDecFrequency(ptr, value, offset, range, window_size) \ + ((offset <= value) && (value <= (range+offset)) ? --ptr[(value-offset)/window_size] : 0) + +#if defined(VX_VERSION_1_1) && (VX_VERSION >= VX_VERSION_1_1) + +/*! \brief Allows access to a distribution frequency counter. + * \param ptr The __global pointer to the base of the distribution. + * \param value The value to retrive the frequency count for. + * \param offset The offset within the input domain. + * \param range The total range within the domain starting from offset. + * \param num_bins The number of bins in the domain range. + * \ingroup group_cl_distribution + */ +#define vxGetFrequency2(ptr, value, offset, range, num_bins) \ + ((offset <= value) && (value <= (range+offset)) ? ptr[(value-offset)*num_bins/range] : 0) + +/*! \brief Increments a distribution frequency counter for a value. + * \param ptr The __global pointer to the base of the distribution. + * \param value The value to increment the frequency count for. + * \param offset The offset within the input domain. + * \param range The total range within the domain starting from offset. + * \param num_bins The number of bins in the domain range. + * \ingroup group_cl_distribution + */ +#define vxIncFrequency2(ptr, value, offset, range, num_bins) \ + ((offset <= value) && (value <= (range+offset)) ? ++ptr[(value-offset)*num_bins/range] : 0) + +/*! \brief Decrements a distribution frequency counter for a value. + * \param ptr The __global pointer to the base of the distribution. + * \param value The value to decrement the frequency count for. + * \param offset The offset within the input domain. + * \param range The total range within the domain starting from offset. + * \param num_bins The number of bins in the domain range. + * \ingroup group_cl_distribution + */ +#define vxDecFrequency2(ptr, value, offset, range, num_bins) \ + ((offset <= value) && (value <= (range+offset)) ? --ptr[(value-offset)*num_bins/range] : 0) + +#endif /*VX_VERSION_1_1*/ + +#endif + +#endif + diff --git a/unified-tina/inc/VX/vx_khr_tiling.h b/unified-tina/inc/VX/vx_khr_tiling.h new file mode 100644 index 0000000..0c6ad7b --- /dev/null +++ b/unified-tina/inc/VX/vx_khr_tiling.h @@ -0,0 +1,347 @@ +/* + + * Copyright (c) 2012-2017 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _VX_KHR_TILING_H_ +#define _VX_KHR_TILING_H_ + +/*! + * \file + * \brief The Khronos Extension for User Tiling Functions. + * + * \defgroup group_tiling Extension: User Tiling API + * \brief The Khronos Extension for User Tiling Functions. + */ + +#define OPENVX_KHR_TILING "vx_khr_tiling" + +#if defined(OPENVX_TILING_1_0) +#undef OPENVX_TILING_1_1 +#endif + +#include +/* For vx_kernel_input_validate_f and vx_kernel_output_validate_f: */ +#include + + +/*! \def VX_RESTRICT + * \brief A platform wrapper for the restrict keyword. + * \ingroup group_tiling + */ +#if defined(_WIN32) +#define VX_RESTRICT +#else +#if defined(__cplusplus) || defined(ANDROID) +#define VX_RESTRICT __restrict +#elif defined(__linux__) +#define VX_RESTRICT +#elif defined __QNXNTO__ +#define VX_RESTRICT +#else +#define VX_RESTRICT restrict +#endif +#endif + +/*! \brief The User Tiling Function tile block size declaration. + * \details The author of a User Tiling Kernel will use this structure to define + * the dimensionality of the tile block. + * \ingroup group_tiling + */ +typedef struct _vx_tile_block_size_t { + vx_int32 width; /*!< \brief Tile block width in pixels. */ + vx_int32 height; /*!< \brief Tile block height in pixels. */ +} vx_tile_block_size_t; + +/*! \brief The User Tiling Function Neighborhood declaration. + * \details The author of a User Tiling Kernel will use this structure to define + * the neighborhood surrounding the tile block. + * \ingroup group_tiling + */ +typedef struct _vx_neighborhood_size_t { + vx_int32 left; /*!< \brief Left of the tile block. */ + vx_int32 right; /*!< \brief Right of the tile block. */ + vx_int32 top; /*!< \brief Top of the tile block. */ + vx_int32 bottom; /*!< \brief Bottom of the tile block. */ +} vx_neighborhood_size_t; + +/*! \brief A structure which describes the tile's parent image. + * \ingroup group_tiling + */ +typedef struct _vx_image_description_t { + vx_uint32 width; /*!< \brief Width of the image */ + vx_uint32 height; /*!< \brief Height of the image */ + vx_df_image format; /*!< \brief The \ref vx_df_image_e of the image */ + vx_uint32 planes; /*!< \brief The number of planes in the image */ + vx_enum range; /*!< \brief The \ref vx_channel_range_e enumeration. */ + vx_enum space; /*!< \brief The \ref vx_color_space_e enumeration. */ +} vx_image_description_t; + +/*! \brief The maximum number of planes in a tiled image. + * \ingroup group_tiling + */ +#define VX_MAX_TILING_PLANES (4) + +/*! \brief The tile structure declaration. + * \ingroup group_tiling + */ +typedef struct _vx_tile_t { + /*! \brief The array of pointers to the tile's image plane. */ + vx_uint8 * VX_RESTRICT base[VX_MAX_TILING_PLANES]; + /*! \brief The top left X pixel index within the width dimension of the image. */ + vx_uint32 tile_x; + /*! \brief The top left Y pixel index within the height dimension of the image. */ + vx_uint32 tile_y; + /*! \brief The array of addressing structure to describe each plane. */ + vx_imagepatch_addressing_t addr[VX_MAX_TILING_PLANES]; + /*! \brief The output block size structure. */ + vx_tile_block_size_t tile_block; + /*! \brief The neighborhood definition. */ + vx_neighborhood_size_t neighborhood; + /*! \brief The description and attributes of the image. */ + vx_image_description_t image; +} vx_tile_t; + +#ifndef VX_TILE_ATTRIBUTES_DEFINITIONS + +/*! + * \brief The full height of the tile's parent image in pixels. + * \param [in] ptile The pointer to the \ref vx_tile_t structure. + * \ingroup group_tiling + */ +#define vxImageHeight(ptile) ((ptile))->image.height) + +/*! + * \brief The full width of the tile's parent image in pixels. + * \param [in] ptile The pointer to the \ref vx_tile_t structure. + * \ingroup group_tiling + */ +#define vxImageWidth(ptile) ((ptile))->image.width) + +/*! + * \brief The offset between the left edge of the image and the left edge of the tile, in pixels. + * \param [in] ptile The pointer to the \ref vx_tile_t structure. + * \ingroup group_tiling + */ +#define vxTileX(ptile) ((ptile)->tile_x) + +/*! + * \brief The offset between the top edge of the image and the top edge of the tile, in pixels. + * \param [in] ptile The pointer to the \ref vx_tile_t structure. + * \ingroup group_tiling + */ +#define vxTileY(ptile) ((ptile)->tile_y) + +/*! + * \brief The width of the tile in pixels. + * \param [in] ptile The pointer to the \ref vx_tile_t structure. + * \param [in] index The plane index. + * \ingroup group_tiling + */ +#define vxTileWidth(ptile, index) ((ptile)->addr[index].dim_x) + +/*! + * \brief The height of the tile in pixels. + * \param [in] ptile The pointer to the \ref vx_tile_t structure. + * \param [in] index The plane index. + * \ingroup group_tiling + */ +#define vxTileHeight(ptile, index) ((ptile)->addr[index].dim_y) + +/*! + * \brief The tile block height. + * \param [in] ptile The pointer to the \ref vx_tile_t structure. + * \ingroup group_tiling + */ +#define vxTileBlockHeight(ptile) ((ptile)->tile_block.height) + +/*! + * \brief The tile block width. + * \param [in] ptile The pointer to the \ref vx_tile_t structure. + * \ingroup group_tiling + */ +#define vxTileBlockWidth(ptile) ((ptile)->tile_block.width) + +/*! + * \brief The simple wrapper to access each image's neighborhood -X value. + * \param [in] ptile The pointer to the \ref vx_tile_t structure. + * \ingroup group_tiling + */ +#define vxNeighborhoodLeft(ptile) ((ptile)->neighborhood.left) + +/*! + * \brief The simple wrapper to access each image's neighborhood +X value. + * \param [in] ptile The pointer to the \ref vx_tile_t structure. + * \ingroup group_tiling + */ +#define vxNeighborhoodRight(ptile) ((ptile)->neighborhood.right) + +/*! + * \brief The simple wrapper to access each image's neighborhood -Y value. + * \param [in] ptile The pointer to the \ref vx_tile_t structure. + * \ingroup group_tiling + */ +#define vxNeighborhoodTop(ptile) ((ptile)->neighborhood.top) + +/*! + * \brief The simple wrapper to access each image's neighborhood +Y value. + * \param [in] ptile The pointer to the \ref vx_tile_t structure. + * \ingroup group_tiling + */ +#define vxNeighborhoodBottom(ptile) ((ptile)->neighborhood.bottom) + + +#endif + +/*! \brief The User Kernel Tiling Attributes. + * \ingroup group_tiling + */ +enum vx_kernel_attribute_tiling_e { + /*! \brief This allows a tiling mode kernel to set its input neighborhood. */ + VX_KERNEL_INPUT_NEIGHBORHOOD = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_KERNEL) + 0x7, + /*! \brief This allows a tiling mode kernel to set its output tile block size. */ + VX_KERNEL_OUTPUT_TILE_BLOCK_SIZE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_KERNEL) + 0x8, + /*! \brief This allows the author to set the border mode on the tiling kernel. */ + VX_KERNEL_BORDER = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_KERNEL) + 0x9, + /*! \brief This determines the per tile memory allocation. */ + VX_KERNEL_TILE_MEMORY_SIZE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_KERNEL) + 0xA, +#if defined(OPENVX_TILING_1_1) + /*! \brief This allows a tiling mode kernel to set its input tile block size. */ + VX_KERNEL_INPUT_TILE_BLOCK_SIZE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_KERNEL) + 0xB, + /*! \brief This allows a tiling mode kernel to set its output neighborhood. */ + VX_KERNEL_OUTPUT_NEIGHBORHOOD = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_KERNEL) + 0xC, +#endif +}; + +/*! \brief The User Node Tiling Attributes. + * \note These are largely unusable by the tiling function, as it doesn't give you the node reference! + * \ingroup group_tiling + */ +enum vx_node_attribute_tiling_e { + /*! \brief This allows a tiling mode node to get its input neighborhood. */ + VX_NODE_INPUT_NEIGHBORHOOD = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0xB, + /*! \brief This allows a tiling mode node to get its output tile block size. */ + VX_NODE_OUTPUT_TILE_BLOCK_SIZE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0xC, + /*! \brief This is the size of the tile local memory area. */ + VX_NODE_TILE_MEMORY_SIZE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0xD, +#if defined(OPENVX_TILING_1_1) + /*! \brief This allows a tiling mode node to get its input tile block size. */ + VX_NODE_INPUT_TILE_BLOCK_SIZE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0xE, + /*! \brief This allows a tiling mode node to get its output neighborhood. */ + VX_NODE_OUTPUT_NEIGHBORHOOD = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0xF, +#endif +}; + +/*! \brief The tiling border mode extensions + * \ingroup group_tiling + */ +enum vx_border_tiling_e { + /*! \brief This value indicates that the author of the tiling kernel wrote + * code to handle border conditions into the kernel itself. If this mode + * is set, it can not be overriden by a call to the \ref vxSetNodeAttribute + * with \ref VX_NODE_BORDER. + */ + VX_BORDER_MODE_SELF = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_BORDER) + 0x3, +}; + +/*! \typedef vx_tiling_kernel_f + * \brief Tiling Kernel function typedef for User Tiling Kernels. + * \note Tiles may come in any dimension and are not guaranteed to be delivered in + * any particular order. + * \param [in] parameters The array abstract pointers to parameters. + * \param [in] tile_memory The local tile memory pointer if requested, otherwise NULL. + * \param [in] tile_memory_size The size of the local tile memory, if not requested, 0. + * \ingroup group_tiling + */ +#ifdef __cplusplus +typedef void (*vx_tiling_kernel_f)(void * VX_RESTRICT parameters[], + void * VX_RESTRICT tile_memory, + vx_size tile_memory_size); +#else +typedef void (*vx_tiling_kernel_f)(void * VX_RESTRICT parameters[VX_RESTRICT], + void * VX_RESTRICT tile_memory, + vx_size tile_memory_size); +#endif + +#ifndef VX_IMAGE_PIXEL_DEFINITION + +/*! \def vxImageOffset + * \brief Computes the offset within an image. + * \param [in] ptile The pointer to the \ref vx_tile_t structure. + * \param [in] i The plane index. + * \param [in] x The Width Coordinates. + * \param [in] y The Height Coordinates. + * \param [in] ox The X offset. + * \param [in] oy The Y offset. + * \ingroup group_tiling + */ +#define vxImageOffset(ptile, i, x, y, ox, oy) \ + ((ptile)->addr[i].stride_y * (vx_int32)(((vx_int32)((oy)+(y)) * (vx_int32)(ptile)->addr[i].scale_y)/(vx_int32)VX_SCALE_UNITY)) + \ + ((ptile)->addr[i].stride_x * (vx_int32)(((vx_int32)((ox)+(x)) * (vx_int32)(ptile)->addr[i].scale_x)/(vx_int32)VX_SCALE_UNITY)) + + +/*! \def vxImagePixel + * \brief Accesses an image pixel as a type-cast indexed pointer dereference. + * \param [in] type The type of the image pixel. Example values are \ref vx_uint8, \ref vx_uint16, \ref vx_uint32, etc. + * \param [in] ptile The pointer to the \ref vx_tile_t structure. + * \param [in] i The plane index. + * \param [in] x The Center Pixel in Width Coordinates. + * \param [in] y The Center Pixel in Height Coordinates. + * \param [in] ox The X offset. + * \param [in] oy The Y offset. + * \ingroup group_tiling + */ +#define vxImagePixel(type, ptile, i, x, y, ox, oy) \ + *((type *)(&((vx_uint8 *)(ptile)->base[i])[vxImageOffset(ptile, i, x, y, ox, oy)])) + +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/*! \brief Allows a user to add a tile-able kernel to the OpenVX system. + * \param [in] context The handle to the implementation context. + * \param [in] name The string to be used to match the kernel. + * \param [in] enumeration The enumerated value of the kernel to be used by clients. + * \param [in] flexible_func_ptr The process-local flexible function pointer to be invoked. + * \param [in] fast_func_ptr The process-local fast function pointer to be invoked. + * \param [in] num_params The number of parameters for this kernel. + * \param [in] input The pointer to a function which will validate the + * input parameters to this kernel. + * \param [in] output The pointer to a function which will validate the + * output parameters to this kernel. + * \note Tiling Kernels do not have access to any of the normal node attributes listed + * in \ref vx_node_attribute_e. + * \post Call \ref vxAddParameterToKernel for as many parameters as the function has, + * then call \ref vxFinalizeKernel. + * \retval 0 Indicates that an error occurred when adding the kernel. + * Note that the fast or flexible formula, but not both, can be NULL. + * \ingroup group_tiling + */ +VX_API_ENTRY vx_kernel VX_API_CALL vxAddTilingKernel(vx_context context, + vx_char name[VX_MAX_KERNEL_NAME], + vx_enum enumeration, + vx_tiling_kernel_f flexible_func_ptr, + vx_tiling_kernel_f fast_func_ptr, + vx_uint32 num_params, + vx_kernel_input_validate_f input, + vx_kernel_output_validate_f output); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/unified-tina/inc/VX/vx_khr_variants.h b/unified-tina/inc/VX/vx_khr_variants.h new file mode 100644 index 0000000..29928b8 --- /dev/null +++ b/unified-tina/inc/VX/vx_khr_variants.h @@ -0,0 +1,96 @@ +/* + + * Copyright (c) 2012-2017 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _VX_KHR_VARIANT_H_ +#define _VX_KHR_VARIANT_H_ + +/*! + * \file + * \brief The Khronos Extension for Kernel Variants. + * + * \defgroup group_variants Extension: Kernel Variants + * \brief The Khronos Extension for Kernel Variants. + * \details Kernel Variants allow the Client-Defined Functions to create several + * kernels on the same target with the same name, but with slight variations + * between them. Frequently these variants are expected to employ different + * algorithms or methodologies. + * + * All target specific kernels and target variants must conform to the same OpenVX + * specification of the OpenVX Kernel in order to use the string name and enumeration. + * For example, a vendor may supply multiple targets, + * and implement the same functionality on each. Futhermore the same + * vendor may offer a variant on some specific target which offers some differentiation but + * still conforms to the definition of the OpenVX Kernel. + * In this example there are 3 implementations of the same computer vision function, "Sobel3x3". + * \arg On "CPU" a "Sobel3x3" which is "faster". A variant which may produce slightly less accurate but still conformant results. + * \arg On "CPU" a "Sobel3x3" which is more "accurate". A variant which may run slower but produces bit exact results. + * \arg On "GPU" a "Sobel3x3" \e default variant which may run on a remote core and produce bit exact results. + * + * In each of the cases a client of OpenVX could request the kernels in nearly + * the same the same manner. There are two main approaches, which depend on the + * method a client calls to get the kernel reference. The first uses enumerations. + * This method allows to client to attempt to find other targets and variants, but if + * these are not present, the default node would still have been constructed. + * The second method depends on using fully qualified strings to get the kernel reference. + * This second method is more compact but is does not permit fail-safing to default versions. + * + * As part of this extension, the function vxGetKernelByName will now accept more + * qualifications to the string naming scheme. Kernels names can be additionally + * qualified in 2 separate ways, by target and by variant. A "fully" qualified name is in the format of + * target:kernel:variant. + * Both \e target and \e variant may be omitted (for an unqualified name). + * In this case, the implementation will assume the "default" value of these + * names (which could literally be "default"). Names may also be fully + * qualified with target included. + * Examples: + * \arg "khronos.c_model:org.khonos.openvx.sobel3x3:default" - fully qualified + * \arg "org.khronos.openvx.sobel3x3:default" (missing target) - partially qualified + * \arg "khronos.c_model:org.khronos.openvx.sobel3x3" (missing variant) - partially qualifed. + * \arg "org.khronos.openvx.sobel3x3" - unqualified. + * + */ + +/*! \brief The string name of the extension. + * \ingroup group_variants + */ +#define OPENVX_KHR_VARIANTS "vx_khr_variants" + +/*! \brief Defines the maximum number of characters in a variant string. + * \ingroup group_variants + */ +#define VX_MAX_VARIANT_NAME (64) + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/*! \brief Used to choose a variant of a kernel for execution on a particular node. + * \param [in] node The reference to the node. + * \param [in] variantName The name of the variant to choose. + * \return A \ref vx_status_e enumeration. + * \ingroup group_variants + */ +VX_API_ENTRY vx_status VX_API_CALL vxChooseKernelVariant(vx_node node, vx_char variantName[VX_MAX_VARIANT_NAME]); + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/unified-tina/inc/VX/vx_khr_xml.h b/unified-tina/inc/VX/vx_khr_xml.h new file mode 100644 index 0000000..c75412f --- /dev/null +++ b/unified-tina/inc/VX/vx_khr_xml.h @@ -0,0 +1,156 @@ +/* + + * Copyright (c) 2012-2017 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _VX_KHR_XML_H_ +#define _VX_KHR_XML_H_ + +/*! \file + * \brief The OpenVX XML Schema Extension Header. + * + * \defgroup group_xml Extension: XML API + * \brief The Khronos Extension for OpenVX XML Import and Export Support. + */ + +#define OPENVX_KHR_XML "vx_khr_xml" + +#include + +/*! \brief The Object Type Enumeration for Imports. + * \ingroup group_xml + */ +enum vx_ext_import_type_e { + VX_TYPE_IMPORT = 0x814,/*!< \brief A \ref vx_import */ +}; + +/*! \brief The import type enumeration. + * \ingroup group_xml + * \see VX_IMPORT_ATTRIBUTE_TYPE + */ +enum vx_ext_import_types_e { + VX_IMPORT_TYPE_XML = 0,/*!< \brief The XML import type */ +}; + +/*! \brief The import attributes list + * \ingroup group_xml + * \see vxQueryImport + */ +enum vx_import_attribute_e { + /*! \brief Returns the number of references in the import object. Use a \ref vx_uint32 parameter.*/ + VX_IMPORT_ATTRIBUTE_COUNT = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_IMPORT) + 0x0, + /*! \brief Returns the type of import. Use a \ref vx_ext_import_types_e parameter */ + VX_IMPORT_ATTRIBUTE_TYPE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_IMPORT) + 0x1, +}; + +/*! \brief An abstract handle to an import object. + * \ingroup group_xml + * \extends vx_reference + */ +typedef struct _vx_import *vx_import; + + +#ifdef __cplusplus +extern "C" { +#endif + +/*! \brief Exports all objects in the context to an XML file which uses the OpenVX + * XML Schema. + * \param [in] context The context to export. + * \param [in] xmlfile The file name to write the XML into. + * \note The reference numbers contained in the xml file can appear in any order but + * should be inclusive from index number 0 to [number of references - 1]. For example, + * if there are 20 references in the xml file, none of the reference indices should be >= 20. + * \return A \ref vx_status_e enumeration. + * \see https://www.khronos.org/registry/vx/schema/openvx-1-1.xsd + * \ingroup group_xml + */ +VX_API_ENTRY vx_status VX_API_CALL vxExportToXML(vx_context context, vx_char xmlfile[]); + + +/*! \brief Imports all framework and data objects from an XML file into the given context. + * \param [in] context The context to import into. + * \param [in] xmlfile The XML file to read. + * \note The reference indices in the import object corresponds with the reference numbers in the + * XML file. It is assumed that the program has some means to know which references to use from + * imported list (either by name: \ref vxGetImportReferenceByName, or by index from looking at the XML + * file (debug use case): \ref vxGetImportReferenceByIndex). Alternativly, the program can use + * \ref vxGetImportReferenceByIndex in a loop and query each one to understand what was imported. After + * all references of interest have been retrieved, this import obects should be released using + * \ref vxReleaseImport. + * \return \ref vx_import object containing references to the imported objects in the context + * \see https://www.khronos.org/registry/vx/schema/openvx-1-1.xsd + * \ingroup group_xml + */ +VX_API_ENTRY vx_import VX_API_CALL vxImportFromXML(vx_context context, vx_char xmlfile[]); + +/*! \brief Used to retrieve a reference by name from the import when the name is known beforehand. If + * multiple references have the same name, then *any* one of them may be returned. + * \param [in] import The reference to the import object. + * \param [in] name The reference string name. + * \return \ref vx_reference + * \retval 0 Invalid import object or name does not match a reference in the import object. + * \retval * The reference matching the requested name. + * \note Use \ref vxReleaseReference to release the reference before releasing the context. + * \pre \ref vxImportFromXML + * \ingroup group_xml + */ +VX_API_ENTRY vx_reference VX_API_CALL vxGetImportReferenceByName(vx_import import, const vx_char *name); + +/*! \brief Used to retrieve a reference by the index from the import. + * \param [in] import The reference to the import object. + * \param [in] index The index of the reference in the import object to return. + * \return \ref vx_reference + * \retval 0 Invalid import object or index. + * \retval * The reference at the requested index number. + * \note Use \ref vxQueryImport with \ref VX_IMPORT_ATTRIBUTE_COUNT to retrieve + * the upper limit of references in the import. + * \note Use \ref vxReleaseReference to release the reference before releasing the context. + * \pre \ref vxImportFromXML + * \ingroup group_xml + */ +VX_API_ENTRY vx_reference VX_API_CALL vxGetImportReferenceByIndex(vx_import import, vx_uint32 index); + +/*! \brief Used to query the import about its properties. + * \param [in] import The reference to the import object. + * \param [in] attribute The \ref vx_import_attribute_e value to query for. + * \param [out] ptr The location at which the resulting value will be stored. + * \param [in] size The size of the container to which ptr points. + * \return A \ref vx_status_e enumeration. + * \pre \ref vxImportFromXML + * \ingroup group_xml + */ +VX_API_ENTRY vx_status VX_API_CALL vxQueryImport(vx_import import, vx_enum attribute, void *ptr, vx_size size); + +/*! \brief Releases a reference to an import object. + * Also internally releases its references to its imported objects. These + * imported objects may not be garbage collected until their total reference + * counts are zero. + * \param [in] import The pointer to the import object to release. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors. + * \retval VX_ERROR_INVALID_REFERENCE If import is not a \ref vx_import. + * \note After returning from this function the reference will be zeroed. + * \pre \ref vxImportFromXML + * \ingroup group_xml + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseImport(vx_import *import); + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/unified-tina/inc/VX/vx_lib_debug.h b/unified-tina/inc/VX/vx_lib_debug.h new file mode 100644 index 0000000..65db9bf --- /dev/null +++ b/unified-tina/inc/VX/vx_lib_debug.h @@ -0,0 +1,385 @@ +/* + + * Copyright (c) 2012-2017 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _OPENVX_EXT_DEBUG_H_ +#define _OPENVX_EXT_DEBUG_H_ + +#include + +/*! + * \file + * \brief The OpenVX Debugging Extension. + * \defgroup group_debug_ext Debugging Extension + * \defgroup group_vision_function_copy_image Kernel: Copy Image + * \defgroup group_vision_function_copy_array Kernel: Copy Array + * \defgroup group_vision_function_fwrite_image Kernel: File Write Image + * \defgroup group_vision_function_fwrite_array Kernel: File Write Array + * \defgroup group_vision_function_plus1 Kernel: Plus One Image + * \defgroup group_vision_function_fill_image Kernel: Fill Image + * \defgroup group_vision_function_check_image Kernel: Check Image + * \defgroup group_vision_function_check_array Kernel: Check Array + * \defgroup group_vision_function_compare_images Kernel: Compare Images + */ + +/*! \brief The maximum filepath name length. + * \ingroup group_debug_ext + */ +#define VX_MAX_FILE_NAME (256) + +/*! \brief The library value for the extension + * \ingroup group_debug_ext + */ +#define VX_LIBRARY_KHR_DEBUG (0xFF) + +/*! \brief The list of extensions to OpenVX from the Sample Implementation. + * \ingroup group_debug_ext + */ +enum vx_kernel_debug_ext_e { + + /*! + * \brief The Copy kernel. Output = Input. + * \param [in] vx_image The input image. + * \param [out] vx_image The output image. + * \see group_vision_function_copy_image + */ + VX_KERNEL_DEBUG_COPY_IMAGE = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_DEBUG) + 0x0, + + /*! + * \brief The Copy Kernel, Output = Input. + * \param [in] vx_array The input array. + * \param [out] vx_array The output array. + * \see group_vision_function_copy_array + */ + VX_KERNEL_DEBUG_COPY_ARRAY = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_DEBUG) + 0x1, + + /*! + * \brief The File Writing Kernel for Images. + * \param [in] vx_image The input image. + * \param [in] vx_array The name of the file. + * \see group_vision_function_fwrite_image + */ + VX_KERNEL_DEBUG_FWRITE_IMAGE = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_DEBUG) + 0x2, + + /*! + * \brief The File Writing Kernel for Arrays + * \param [in] vx_array The input array. + * \param [in] vx_array The name of the file. + * \see group_vision_function_fwrite_array + */ + VX_KERNEL_DEBUG_FWRITE_ARRAY = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_DEBUG) + 0x3, + + /*! + * \brief The File Reading Kernel for images. + * \param [in] vx_array The name of the file to read. + * \param [out] vx_image The output image. + * \see group_vision_function_fread_image + */ + VX_KERNEL_DEBUG_FREAD_IMAGE = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_DEBUG) + 0x4, + + /*! + * \brief The File Reading Kernel for Arrays. + * \param [in] vx_array The name of the file to read. + * \param [out] vx_image The output image. + * \see group_vision_function_fread_array + */ + VX_KERNEL_DEBUG_FREAD_ARRAY = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_DEBUG) + 0x5, + + /*! + * \brief Fills the image with a given value. + * \param [in] vx_uint32 + * \param [out] vx_image + * \ingroup group_vision_function_fill_image + */ + VX_KERNEL_FILL_IMAGE = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_DEBUG) + 0x6, + + /*! + * \brief Checks an image against a known value and returns a number of + * errors. + * \param [in] vx_image + * \param [in] vx_uint32 + * \param [out] vx_scalar + * \ingroup group_vision_function_check_image + */ + VX_KERNEL_CHECK_IMAGE = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_DEBUG) + 0x7, + + /*! + * \brief Checks an array against a known value and returns a number of + * errors. + * \param [in] vx_array + * \param [in] vx_uint8 + * \param [out] vx_scalar + * \ingroup group_vision_function_check_array + */ + VX_KERNEL_CHECK_ARRAY = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_DEBUG) + 0x8, + + /*! + * \brief Compares two images and returns the number of differences. + * \param [in] vx_image + * \param [in] vx_image + * \param [out] vx_scalar + * \ingroup group_vision_function_compare_image + */ + VX_KERNEL_COMPARE_IMAGE = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_DEBUG) + 0x9, + + /*! + * \brief Copies an image from a memory area. + * \param [in] void * + * \param [out] vx_image + * \see group_vision_function_copy_ptr + */ + VX_KERNEL_COPY_IMAGE_FROM_PTR = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_DEBUG) + 0xA, +}; + +/******************************************************************************/ +// GRAPH MODE FUNCTIONS +/******************************************************************************/ + +#ifdef __cplusplus +extern "C" { +#endif +/*! + * \brief [Graph] Creates a Copy Image Node. + * \param [in] graph The handle to the graph. + * \param [in] input The input image. + * \param [out] output The output image. + * \see VX_KERNEL_COPY_IMAGE + * \note Graph Mode Function. + * \ingroup group_vision_function_copy_image + */ +vx_node vxCopyImageNode(vx_graph graph, vx_image input, vx_image output); + +/*! + * \brief [Graph] Creates a Copy Array Node. + * \param [in] graph The handle to the graph. + * \param [in] input The input array. + * \param [out] output The output array. + * \see VX_KERNEL_COPY_ARRAY + * \note Graph Mode Function. + * \ingroup group_vision_function_copy_array + */ +vx_node vxCopyArrayNode(vx_graph graph, vx_array input, vx_array output); + +/*! \brief [Graph] Writes the source image to the file. + * \param [in] graph The handle to the graph. + * \param [in] image The input array. + * \param [in] name The name of the file. + * \note Graph Mode Function. + * \ingroup group_vision_function_fwrite_image + */ +vx_node vxFWriteImageNode(vx_graph graph, vx_image image, vx_char name[VX_MAX_FILE_NAME]); + +/*! \brief [Graph] Writes the source array to the file. + * \param [in] graph The handle to the graph. + * \param [in] array The input array. + * \param [in] name The name of the file. + * \note Graph Mode Function. + * \ingroup group_vision_function_fwrite_array + */ +vx_node vxFWriteArrayNode(vx_graph graph, vx_array array, vx_char name[VX_MAX_FILE_NAME]); + +/*! \brief [Graph] Writes the source image to the file. + * \param [in] graph The handle to the graph. + * \param [in] name The name of the file. + * \param [out] image The output image. + * \note Graph Mode Function. + * \ingroup group_vision_function_fread_image + */ +vx_node vxFReadImageNode(vx_graph graph, vx_char name[VX_MAX_FILE_NAME], vx_image image); + +/*! \brief [Graph] Writes the source array to the file. + * \param [in] graph The handle to the graph. + * \param [in] name The name of the file. + * \param [out] array The output array. + * \note Graph Mode Function. + * \ingroup group_vision_function_fread_array + */ +vx_node vxFReadArrayNode(vx_graph graph, vx_char name[VX_MAX_FILE_NAME], vx_array array); + +/*! \brief [Graph] Adds 1 to each uint8 pixel. This will clamp at 255. + * \param [in] graph The handle to the graph. + * \param [in,out] image The image to increment. + * \note Graph Mode Function + * \ingroup group_vision_function_plus1 + */ +vx_node vxPlusOneNode(vx_graph graph, vx_image image); + +/*! + * \brief [Graph] Fills an image with a known value. + * \param [in] graph The handle to the graph. + * \param [in] value The known value to fill the image with. + * \param [out] output The image to fill. + * \note Graph Mode Function + * \ingroup group_vision_function_fill_image + */ +vx_node vxFillImageNode(vx_graph graph, vx_uint32 value, vx_image output); + +/*! + * \brief [Graph] Checks an image against a known value. + * \param [in] graph The handle to the graph. + * \param [in] input The image to check. + * \param [in] value The known value to check the image against. + * \param [out] errs The handle to the number of errors found. + * \note Graph Mode Function + * \ingroup group_vision_function_check_image + */ +vx_node vxCheckImageNode(vx_graph graph, vx_image input, vx_uint32 value, vx_scalar errs); + +/*! + * \brief [Graph] Checks a array for a known value. + * \param [in] graph The handle to the graph. + * \param [in] input The array to check. + * \param [in] value The known value to check against. + * \param [out] errs An output of the number of errors. + * \note Graph Mode Function + * \ingroup group_vision_function_check_array + */ +vx_node vxCheckArrayNode(vx_graph graph, vx_array input, vx_uint8 value, vx_scalar errs); + +/*! + * \brief [Graph] Compares two images and returns the number of pixel sub-channels + * which are different. + * \param [in] graph The handle to the graph. + * \param [in] a The first image. + * \param [in] b The second image. + * \param [out] diffs The handle to scalar to hold the number of differences. + * \note Graph Mode Function + * \ingroup group_vision_function_compare_image + */ +vx_node vxCompareImagesNode(vx_graph graph, vx_image a, vx_image b, vx_scalar diffs); + +/*! \brief [Graph] Copies a HOST memory area into an image. + * \param [in] graph The handle to the graph. + * \param [in] ptr The input pointer to the memory area to copy. + * \param [out] output The output image. + * \note Graph Mode Function + * \ingroup group_vision_function_copy_ptr + */ +vx_node vxCopyImageFromPtrNode(vx_graph graph, void *ptr, vx_image output); + +/******************************************************************************/ +// IMMEDIATE MODE FUNCTION +/******************************************************************************/ + +/*! \brief [Immediate] Copies the source image to the destination image. + * \param [in] src The input image. + * \param [in] dst The output image. + * \note Immediate Mode Function. + * \ingroup group_vision_function_copy_image + */ +vx_status vxuCopyImage(vx_context context, vx_image src, vx_image dst); + +/*! \brief [Immediate] Copies the source array to the destination array. + * \param [in] src The input array. + * \param [in] dst The output array. + * \note Immediate Mode Function. + * \ingroup group_vision_function_copy_array + */ +vx_status vxuCopyArray(vx_context context, vx_array src, vx_array dst); + +/*! \brief [Immediate] Writes the source image to the file. + * \param [in] image The input array. + * \param [in] name The name of the file. + * \note Immediate Mode Function. + * \ingroup group_vision_function_fwrite_image + */ +vx_status vxuFWriteImage(vx_context context, vx_image image, vx_char name[VX_MAX_FILE_NAME]); + +/*! \brief [Immediate] Writes the source array to the file. + * \param [in] array The input array. + * \param [in] name The name of the file. + * \note Immediate Mode Function. + * \ingroup group_vision_function_fwrite_array + */ +vx_status vxuFWriteArray(vx_context context, vx_array array, vx_char name[VX_MAX_FILE_NAME]); + +/*! \brief [Immediate] Reads the source image from the file. + * \param [in] name The name of the file. + * \param [out] image The output image. + * \note Immediate Mode Function. + * \ingroup group_vision_function_fread_image + */ +vx_status vxuFReadImage(vx_context context, vx_char name[VX_MAX_FILE_NAME], vx_image image); + +/*! \brief [Immediate] Reads the source array from the file. + * \param [in] name The name of the file. + * \param [out] array The output array. + * \note Immediate Mode Function. + * \ingroup group_vision_function_fread_array + */ +vx_status vxuFReadArray(vx_context context, vx_char name[VX_MAX_FILE_NAME], vx_array array); + +/*! \brief [Immediate] Adds 1 to each uint8 pixel. This will clamp at 255. + * \param [in,out] image The image to increment. + * \note Immediate Mode Function + * \ingroup group_vision_function_plus1 + */ +vx_node vxuPlusOneNode(vx_context context, vx_image image); + +/*! + * \brief [Immediate] Fills an image with a known value. + * \param [in] value The known value to fill the image with. + * \param [out] output The image to fill. + * \note Immediate Mode Function + * \ingroup group_vision_function_fill_image + */ +vx_status vxuFillImage(vx_context context, vx_uint32 value, vx_image output); + +/*! + * \brief [Immediate] Checks an image against a known value. + * \param [in] output The image to check. + * \param [in] value The known value to check the image against. + * \param [out] numErrors The handle to the number of errors found. + * \note Immediate Mode Function + * \ingroup group_vision_function_check_image + */ +vx_status vxuCheckImage(vx_context context, vx_image input, vx_uint32 value, vx_uint32 *numErrors); + +/*! + * \brief [Immediate] Checks a array for a known value. + * \param [in] input The array to check. + * \param [in] value The known value to check against. + * \param [out] numErrors An output of the number of errors. + * \note Immediate Mode Function + * \ingroup group_vision_function_check_array + */ +vx_status vxuCheckArray(vx_context context, vx_array input, vx_uint8 value, vx_uint32 *numErrors); + +/*! + * \brief [Immediate] Compares two images and returns the number of pixel sub-channels + * which are different. + * \param [in] a The first image. + * \param [in] b The second image. + * \param [out] numDiffs The handle to scalar to hold the number of differences. + * \note Immediate Mode Function + * \ingroup group_vision_function_compare_image + */ +vx_status vxuCompareImages(vx_context context, vx_image a, vx_image b, vx_uint32 *numDiffs); + +/*! \brief [Immediate] Copies a HOST memory area into an image. + * \param [in] ptr The input pointer to the memory area to copy. + * \param [out] output The output image. + * \note Immediate Mode Function + * \ingroup group_vision_function_copy_ptr + */ +vx_status vxuCopyImageFromPtr(vx_context context, void *ptr, vx_image output); + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/unified-tina/inc/VX/vx_lib_extras.h b/unified-tina/inc/VX/vx_lib_extras.h new file mode 100644 index 0000000..d4339a2 --- /dev/null +++ b/unified-tina/inc/VX/vx_lib_extras.h @@ -0,0 +1,252 @@ +/* + + * Copyright (c) 2012-2017 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _VX_EXT_EXTRAS_H_ +#define _VX_EXT_EXTRAS_H_ + +/*! \file + * \brief Extras Extension. + * + * \defgroup group_extras_ext Khronos Extras Extension. + * \brief A Set of Kernels which extend OpenVX. + * + * \defgroup group_vision_function_laplacian_image Kernel: Laplacian Filter + * \brief Computes a Laplacian filter over a window of the input image. + * \details This filter uses the follow convolution matrix: + \f[ + \mathbf{K}_{gaussian} = \begin{vmatrix} + 1 & 1 & 1\\ + 1 &-8 & 1\\ + 1 & 1 & 1 + \end{vmatrix} * \frac{1}{1} + \f] + * + * \defgroup group_vision_function_scharr3x3 Kernel: Sobel 3x3 + * \brief The Scharr Image Filter Kernel + * \details This kernel produces two output planes (one can be omitted) + * in the x and y plane. The Scharr operators \f$G_x, G_y\f$ are defined as: + \f[ + \mathbf{G}_x=\begin{vmatrix} + -3 & 0 & +3\\ + -10& 0 & +10\\ + -3 & 0 & +3 + \end{vmatrix} + , + \mathbf{G}_y=\begin{vmatrix} + -3 & -10 & -3 \\ + 0 & 0 & 0 \\ + +3 & +10 & +3 + \end{vmatrix} + + \f] + * + */ + +/*! \brief The Khronos Extras Library + * \ingroup group_extras_ext + */ +#define VX_LIBRARY_KHR_EXTRAS (0xFE) + +/*! \brief The Khronos Extras Kernels. + * \ingroup group_extras_ext + */ +enum vx_kernel_extras_ext_e { + /*! \brief The Non-Maximum Supression Kernel for Canny. + * \note Use "org.khronos.extra.nonmaximasuppression" to \ref vxGetKernelByName. + * \param [in] vx_image The magnitude image in VX_DF_IMAGE_U8. + * \param [in] vx_image The phase image in VX_DF_IMAGE_U8. + * \param [out] vx_image The edge image in VX_DF_IMAGE_U8. + * \ingroup group_vision_function_nonmaxsuppression + */ + VX_KERNEL_EXTRAS_NONMAXSUPPRESSION_CANNY = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_EXTRAS) + 0x0, + + /*! \brief The laplacian filter kernel. + * \note Use "org.khronos.extras.laplacian3x3" to \ref vxGetKernelByName. + * \param [in] vx_image The VX_DF_IMAGE_U8 input image. + * \param [out] vx_image The VX_DF_IMAGE_U8 output image. + * \see group_vision_function_laplacian_image + */ + VX_KERNEL_EXTRAS_LAPLACIAN_3x3 = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_EXTRAS) + 0x1, + + /*! \brief The scharr filter kernel. + * \note Use "org.khronos.extras.scharr3x3" to \ref vxGetKernelByName. + * \param [in] vx_image The VX_DF_IMAGE_U8 input image. + * \param [out] vx_image The VX_DF_IMAGE_S16 output gradient x image. + * \param [out] vx_image The VX_DF_IMAGE_S16 output gradient y image. + * \see group_vision_function_scharr3x3 + */ + VX_KERNEL_EXTRAS_SCHARR_3x3 = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_EXTRAS) + 0x2, + + /*! \brief The Harris Score Kernel. + * \note use "org.khronos.extras.harris_score". + * \param [in] vx_image A VX_DF_IMAGE_S16 X Gradient + * \param [in] vx_image A VX_DF_IMAGE_S16 Y Gradient + * \param [in] vx_scalar A block size. + * \param [out] vx_image A VX_DF_IMAGE_S32 corner score per pixel. + * \ingroup group_vision_function_harris_score + */ + VX_KERNEL_EXTRAS_HARRIS_SCORE = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_EXTRAS) + 0x3, + + /*! \brief The Sobel MxN kernel. + * \note Use "org.khronos.extras.sobelMxN" to \ref vxGetKernelByName. + * \param [in] vx_image The VX_DF_IMAGE_U8 input image. + * \param [in] vx_scalar Window Size (3,5,7) + * \param [out] vx_image The VX_DF_IMAGE_S16 output gradient x image. + * \param [out] vx_image The VX_DF_IMAGE_S16 output gradient y image. + * \see group_vision_function_sobelmxn + */ + VX_KERNEL_EXTRAS_SOBEL_MxN = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_EXTRAS) + 0x4, + + /*! \brief The image to list converter. + * \param [in] vx_image The VX_DF_IMAGE_U8 or VX_DF_IMAGE_S32 image. + * \param [out] vx_array The array of output + * \param [out] vx_scalar The total number of non zero points in image (optional) + * \ingroup group_vision_function_image_lister + */ + VX_KERNEL_EXTRAS_IMAGE_LISTER = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_EXTRAS) + 0x5, + + /*! \brief The Euclidean Non-Maximum Suppression Kernel for Harris Corners. + * \param [in] vx_image The VX_DF_IMAGE_F32 image. + * \param [in] vx_scalar The minimum threshold + * \param [in] vx_scalar The euclidean distance from the considered pixel. + * \param [out] vx_image The VX_DF_IMAGE_F32 image. + * \ingroup group_vision_function_euclidean_nonmax + */ + VX_KERNEL_EXTRAS_EUCLIDEAN_NONMAXSUPPRESSION_HARRIS = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_EXTRAS) + 0x6, + + /*! \brief Elementwise binary norm kernel. + * \param [in] vx_image Left image (VX_DF_IMAGE_S16). + * \param [in] vx_image Right image (VX_DF_IMAGE_S16). + * \param [in] vx_scalar Norm type (vx_norm_type_e). + * \param [in] vx_image Output image (VX_DF_IMAGE_U16). + */ + VX_KERNEL_EXTRAS_ELEMENTWISE_NORM = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_EXTRAS) + 0x7, + + /*! \brief Edge tracing kernel. + * \param [in] vx_image Norm image (VX_DF_IMAGE_U16). + * \param [in] vx_image Phase image (VX_DF_IMAGE_U8). + * \param [in] vx_threshold Threshold (VX_THRESHOLD_TYPE_RANGE). + * \param [out] vx_image Output binary image (VX_DF_IMAGE_U8). + */ + VX_KERNEL_EXTRAS_EDGE_TRACE = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_EXTRAS) + 0x8 +}; + +/*! \brief Extra VX_DF_IMAGE codes supported by this extension. */ +enum _vx_extra_df_image { + /*! \brief A single plane of 32 bit float data. + * The range of the data is not specified. + */ + VX_DF_IMAGE_F32 = VX_DF_IMAGE('F','0','3','2'), +}; + +#ifdef __cplusplus +extern "C" { +#endif + +/*! \brief [Graph] Creates a Non Max Suppress Node. + * \param [in] graph The handle to the graph. + * \param [in] input The input image in VX_DF_IMAGE_U8 format. + * \param [out] output The output image in VX_DF_IMAGE_U8 format. + * \ingroup group_vision_function_laplacian_image + */ +vx_node vxNonMaxSuppressionCannyNode(vx_graph graph, vx_image mag, vx_image phase, vx_image edge); + +/*! \brief [Immediate] Creates a Non Max Suppress Node. + * \param [in] graph The handle to the graph. + * \param [in] input The input image in VX_DF_IMAGE_U8 format. + * \param [out] output The output image in VX_DF_IMAGE_U8 format. + * \ingroup group_vision_function_laplacian_image + */ +vx_status vxuNonMaxSuppressionCanny(vx_context context, vx_image mag, vx_image phase, vx_image edge); + +/*! \brief [Graph] Creates a Laplacian Filter Node. + * \param [in] graph The handle to the graph. + * \param [in] input The input image in VX_DF_IMAGE_U8 format. + * \param [out] output The output image in VX_DF_IMAGE_U8 format. + * \ingroup group_vision_function_laplacian_image + */ +vx_node vxLaplacian3x3Node(vx_graph graph, vx_image input, vx_image output); + +/*! \brief [Immediate] Computes a laplacian filter on the image by a 3x3 window. + * \param [in] input The input image in VX_DF_IMAGE_U8 format. + * \param [out] output The output image in VX_DF_IMAGE_U8 format. + * \ingroup group_vision_function_laplacian_image + */ +vx_status vxuLaplacian3x3(vx_context context, vx_image input, vx_image output); + +/*! \brief [Graph] Creates a Scharr Filter Node. + * \param [in] graph The handle to the graph. + * \param [in] input The input image in VX_DF_IMAGE_U8 format. + * \param [out] output The output image in VX_DF_IMAGE_U8 format. + * \ingroup group_vision_function_laplacian_image + */ +vx_node vxScharr3x3Node(vx_graph graph, vx_image input, vx_image output1, vx_image output2); + +/*! \brief [Immediate] Computes a Scharr filter on the image by a 3x3 window. + * \param [in] input The input image in VX_DF_IMAGE_U8 format. + * \param [out] output The output image in VX_DF_IMAGE_U8 format. + * \ingroup group_vision_function_laplacian_image + */ +vx_status vxuScharr3x3(vx_context context, vx_image input, vx_image output1, vx_image output2); + +vx_node vxSobelMxNNode(vx_graph graph, vx_image input, vx_scalar win, vx_image gx, vx_image gy); + +vx_status vxuSobelMxN(vx_context context, vx_image input, vx_scalar win, vx_image gx, vx_image gy); + +vx_node vxHarrisScoreNode(vx_graph graph, + vx_image gx, + vx_image gy, + vx_scalar sensitivity, + vx_scalar grad_size, + vx_scalar block_size, + vx_scalar shift, + vx_image score); + +vx_status vxuHarrisScore(vx_context context, vx_image gx, + vx_image gy, + vx_scalar sensitivity, + vx_scalar grad_size, + vx_scalar block_size, + vx_scalar shift, + vx_image score); + +vx_node vxEuclideanNonMaxHarrisNode(vx_graph graph, + vx_image input, + vx_scalar strength_thresh, + vx_scalar min_distance, + vx_image output); + +vx_status vxuEuclideanNonMaxHarris(vx_context context, vx_image input, + vx_scalar strength_thresh, + vx_scalar min_distance, + vx_image output); + +vx_node vxImageListerNode(vx_graph graph, vx_image input, vx_array arr, vx_scalar num_points); + +vx_status vxuImageLister(vx_context context, vx_image input, + vx_array arr, vx_scalar num_points); + +vx_node vxElementwiseNormNode(vx_graph graph, vx_image input_x, vx_image input_y, vx_scalar norm_type, vx_image output); + +vx_node vxEdgeTraceNode(vx_graph graph, vx_image norm, vx_threshold threshold, vx_image output); + +#ifdef __cplusplus +} +#endif + +#endif /* _VX_EXT_EXTRAS_H_ */ + diff --git a/unified-tina/inc/VX/vx_lib_xyz.h b/unified-tina/inc/VX/vx_lib_xyz.h new file mode 100644 index 0000000..8dd7e25 --- /dev/null +++ b/unified-tina/inc/VX/vx_lib_xyz.h @@ -0,0 +1,109 @@ +/* + + * Copyright (c) 2012-2017 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once +#ifndef _OPENVX_EXT_XYZ_H_ +#define _OPENVX_EXT_XYZ_H_ + +/*! + * \file + * \brief An example of how to wrap a User Extension Kernel. + * + * \defgroup group_xyz_ext The Example User Kernel Extension + * + */ + +#include + +/*! + * \file vx_ext_xyz.h + * \brief The example header for how to write a user mode extension to OpenVX. + */ + +/*! \brief The XYZ Data area in bytes + * \ingroup group_xyz_ext + */ +#define XYZ_DATA_AREA (1024) + +/*! \brief The required number of items in the temp array + * \ingroup group_xyz_ext + */ +#define XYZ_TEMP_NUMITEMS (374) + +/*! \brief The minimum value of the scalar for the XYZ Kernel. + * \ingroup group_xyz_ext + */ +#define XYZ_VALUE_MIN (-10) + +/*! \brief The maximum value of the scalar for the XYZ Kernel. + * \ingroup group_xyz_ext + */ +#define XYZ_VALUE_MAX (10) + +//! [KERNEL ENUM] +#define VX_KERNEL_NAME_KHR_XYZ "org.khronos.example.xyz" +/*! \brief The XYZ Example Library Set + * \ingroup group_xyz_ext + */ +#define VX_LIBRARY_XYZ (0x3) // assigned from Khronos, vendors control their own + +/*! \brief The list of XYZ Kernels. + * \ingroup group_xyz_ext + */ +enum vx_kernel_xyz_ext_e { + /*! \brief The Example User Defined Kernel */ + VX_KERNEL_KHR_XYZ = VX_KERNEL_BASE(VX_ID_DEFAULT, VX_LIBRARY_XYZ) + 0x0, + // up to 0xFFF kernel enums can be created. +}; +//! [KERNEL ENUM] + +#ifdef __cplusplus +extern "C" { +#endif + +//! [node] +/*! \brief [Graph] This is an example ISV or OEM provided node which executes + * in the Graph to call the XYZ kernel. + * \param [in] graph The handle to the graph in which to instantiate the node. + * \param [in] input The input image. + * \param [in] value The input scalar value + * \param [out] output The output image. + * \param [in,out] temp A temp array for some data which is needed for + * every iteration. + * \ingroup group_example_kernel + */ +vx_node vxXYZNode(vx_graph graph, vx_image input, vx_uint32 value, vx_image output, vx_array temp); +//! [node] + +//! [vxu] +/*! \brief [Immediate] This is an example of an immediate mode version of the XYZ node. + * \param [in] context The overall context of the implementation. + * \param [in] input The input image. + * \param [in] value The input scalar value + * \param [out] output The output image. + * \param [in,out] temp A temp array for some data which is needed for + * every iteration. + * \ingroup group_example_kernel + */ +vx_status vxuXYZ(vx_context context, vx_image input, vx_uint32 value, vx_image output, vx_array temp); +//! [vxu] + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/unified-tina/inc/VX/vx_nodes.h b/unified-tina/inc/VX/vx_nodes.h new file mode 100644 index 0000000..1e0cb94 --- /dev/null +++ b/unified-tina/inc/VX/vx_nodes.h @@ -0,0 +1,1004 @@ +/* + + * Copyright (c) 2012-2017 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _OPENVX_NODES_H_ +#define _OPENVX_NODES_H_ + +/*! + * \file vx_nodes.h + * \brief The "Simple" API interface for OpenVX. These APIs are just + * wrappers around the more verbose functions defined in \ref vx_api.h. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/*! \brief [Graph] Creates a color conversion node. + * \param [in] graph The reference to the graph. + * \param [in] input The input image from which to convert. + * \param [out] output The output image to which to convert, which must have the same dimensions as the input image. + * \see VX_KERNEL_COLOR_CONVERT + * \ingroup group_vision_function_colorconvert + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxColorConvertNode(vx_graph graph, vx_image input, vx_image output); + +/*! \brief [Graph] Creates a channel extract node. + * \param [in] graph The reference to the graph. + * \param [in] input The input image. Must be one of the defined \ref vx_df_image_e multi-channel formats. + * \param [in] channel The \ref vx_channel_e channel to extract. + * \param [out] output The output image. Must be \ref VX_DF_IMAGE_U8, and must have the same dimensions as the input image. + * \see VX_KERNEL_CHANNEL_EXTRACT + * \ingroup group_vision_function_channelextract + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxChannelExtractNode(vx_graph graph, + vx_image input, + vx_enum channel, + vx_image output); + +/*! \brief [Graph] Creates a channel combine node. + * \param [in] graph The graph reference. + * \param [in] plane0 The plane that forms channel 0. Must be \ref VX_DF_IMAGE_U8. + * \param [in] plane1 The plane that forms channel 1. Must be \ref VX_DF_IMAGE_U8. + * \param [in] plane2 [optional] The plane that forms channel 2. Must be \ref VX_DF_IMAGE_U8. + * \param [in] plane3 [optional] The plane that forms channel 3. Must be \ref VX_DF_IMAGE_U8. + * \param [out] output The output image. The format of the image must be defined, even if the image is virtual. Must have the same dimensions as the input images + * \see VX_KERNEL_CHANNEL_COMBINE + * \ingroup group_vision_function_channelcombine + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxChannelCombineNode(vx_graph graph, + vx_image plane0, + vx_image plane1, + vx_image plane2, + vx_image plane3, + vx_image output); + +/*! \brief [Graph] Creates a Phase node. + * \param [in] graph The reference to the graph. + * \param [in] grad_x The input x image. This must be in \ref VX_DF_IMAGE_S16 format. + * \param [in] grad_y The input y image. This must be in \ref VX_DF_IMAGE_S16 format. + * \param [out] orientation The phase image. This is in \ref VX_DF_IMAGE_U8 format, and must have the same dimensions as the input images. + * \see VX_KERNEL_PHASE + * \ingroup group_vision_function_phase + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxPhaseNode(vx_graph graph, vx_image grad_x, vx_image grad_y, vx_image orientation); + +/*! \brief [Graph] Creates a Sobel3x3 node. + * \param [in] graph The reference to the graph. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 format. + * \param [out] output_x [optional] The output gradient in the x direction in \ref VX_DF_IMAGE_S16. Must have the same dimensions as the input image. + * \param [out] output_y [optional] The output gradient in the y direction in \ref VX_DF_IMAGE_S16. Must have the same dimensions as the input image. + * \see VX_KERNEL_SOBEL_3x3 + * \ingroup group_vision_function_sobel3x3 + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxSobel3x3Node(vx_graph graph, vx_image input, vx_image output_x, vx_image output_y); + + +/*! \brief [Graph] Create a Magnitude node. + * \param [in] graph The reference to the graph. + * \param [in] grad_x The input x image. This must be in \ref VX_DF_IMAGE_S16 format. + * \param [in] grad_y The input y image. This must be in \ref VX_DF_IMAGE_S16 format. + * \param [out] mag The magnitude image. This is in \ref VX_DF_IMAGE_S16 format. Must have the same dimensions as the input image. + * \see VX_KERNEL_MAGNITUDE + * \ingroup group_vision_function_magnitude + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxMagnitudeNode(vx_graph graph, vx_image grad_x, vx_image grad_y, vx_image mag); + +/*! \brief [Graph] Creates a Scale Image Node. + * \param [in] graph The reference to the graph. + * \param [in] src The source image of type \ref VX_DF_IMAGE_U8. + * \param [out] dst The destination image of type \ref VX_DF_IMAGE_U8. + * \param [in] type The interpolation type to use. \see vx_interpolation_type_e. + * \ingroup group_vision_function_scale_image + * \note The destination image must have a defined size and format. The border modes + * \ref VX_NODE_BORDER value \ref VX_BORDER_UNDEFINED, + * \ref VX_BORDER_REPLICATE and \ref VX_BORDER_CONSTANT are supported. + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxScaleImageNode(vx_graph graph, vx_image src, vx_image dst, vx_enum type); + +/*! \brief [Graph] Creates a Table Lookup node. If a value from the input image is not present in the lookup table, the result is undefined. + * \param [in] graph The reference to the graph. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16. + * \param [in] lut The LUT which is of type \ref VX_TYPE_UINT8 if input image is \ref VX_DF_IMAGE_U8 or \ref VX_TYPE_INT16 if input image is \ref VX_DF_IMAGE_S16. + * \param [out] output The output image of the same type and size as the input image. + * \ingroup group_vision_function_lut + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus. + */ +VX_API_ENTRY vx_node VX_API_CALL vxTableLookupNode(vx_graph graph, vx_image input, vx_lut lut, vx_image output); + +/*! \brief [Graph] Creates a Histogram node. + * \param [in] graph The reference to the graph. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8. + * \param [out] distribution The output distribution. + * \ingroup group_vision_function_histogram + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxHistogramNode(vx_graph graph, vx_image input, vx_distribution distribution); + +/*! \brief [Graph] Creates a Histogram Equalization node. + * \param [in] graph The reference to the graph. + * \param [in] input The grayscale input image in \ref VX_DF_IMAGE_U8. + * \param [out] output The grayscale output image of type \ref VX_DF_IMAGE_U8 with equalized brightness and contrast and same size as the input image. + * \ingroup group_vision_function_equalize_hist + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxEqualizeHistNode(vx_graph graph, vx_image input, vx_image output); + +/*! \brief [Graph] Creates an AbsDiff node. + * \param [in] graph The reference to the graph. + * \param [in] in1 An input image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 format. + * \param [in] in2 An input image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 format. + * \param [out] out The output image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 format, which must have the same dimensions as the input image. + * \ingroup group_vision_function_absdiff + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxAbsDiffNode(vx_graph graph, vx_image in1, vx_image in2, vx_image out); + +/*! \brief [Graph] Creates a mean value and optionally, a standard deviation node. + * \param [in] graph The reference to the graph. + * \param [in] input The input image. \ref VX_DF_IMAGE_U8 is supported. + * \param [out] mean The \ref VX_TYPE_FLOAT32 average pixel value. + * \param [out] stddev [optional] The \ref VX_TYPE_FLOAT32 standard deviation of the pixel values. + * \ingroup group_vision_function_meanstddev + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxMeanStdDevNode(vx_graph graph, vx_image input, vx_scalar mean, vx_scalar stddev); + +/*! \brief [Graph] Creates a Threshold node and returns a reference to it. + * \param [in] graph The reference to the graph in which the node is created. + * \param [in] input The input image. Only images with format \ref VX_DF_IMAGE_U8 + * and \ref VX_DF_IMAGE_S16 are supported. + * \param [in] thresh The thresholding object that defines the parameters of + * the operation. The \ref VX_THRESHOLD_INPUT_FORMAT must be the same as the input image format and + * the \ref VX_THRESHOLD_OUTPUT_FORMAT must be the same as the output image format. + * \param [out] output The output image, that will contain as pixel value + * true and false values defined by \p thresh. Only images with format + * \ref VX_DF_IMAGE_U8 are supported. The dimensions are the same as the input image. + * \ingroup group_vision_function_threshold + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation + * should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxThresholdNode(vx_graph graph, vx_image input, vx_threshold thresh, vx_image output); + +/*! \brief [Graph] Creates a Non-Maxima Suppression node. + * \param [in] graph The reference to the graph. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 format. + * \param [in] mask [optional] Constrict suppression to a ROI. The mask image is of type \ref VX_DF_IMAGE_U8 and must be the same dimensions as the input image. + * \param [in] win_size The size of window over which to perform the localized non-maxima suppression. Must be odd, and less than or equal to the smallest dimension of the input image. + * \param [out] output The output image, of the same type and size as the input, that has been non-maxima suppressed. + * \ingroup group_vision_function_nms + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxNonMaxSuppressionNode(vx_graph graph, vx_image input, vx_image mask, vx_int32 win_size, vx_image output); + +/*! \brief [Graph] Creates an Integral Image Node. + * \param [in] graph The reference to the graph. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 format. + * \param [out] output The output image in \ref VX_DF_IMAGE_U32 format, which must have the same dimensions as the input image. + * \ingroup group_vision_function_integral_image + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxIntegralImageNode(vx_graph graph, vx_image input, vx_image output); + +/*! \brief [Graph] Creates an Erosion Image Node. + * \param [in] graph The reference to the graph. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 format. + * \param [out] output The output image in \ref VX_DF_IMAGE_U8 format, which must have the same dimensions as the input image. + * \ingroup group_vision_function_erode_image + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxErode3x3Node(vx_graph graph, vx_image input, vx_image output); + +/*! \brief [Graph] Creates a Dilation Image Node. + * \param [in] graph The reference to the graph. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 format. + * \param [out] output The output image in \ref VX_DF_IMAGE_U8 format, which must have the same dimensions as the input image. + * \ingroup group_vision_function_dilate_image + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxDilate3x3Node(vx_graph graph, vx_image input, vx_image output); + +/*! \brief [Graph] Creates a Median Image Node. + * \param [in] graph The reference to the graph. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 format. + * \param [out] output The output image in \ref VX_DF_IMAGE_U8 format, which must have the same dimensions as the input image. + * \ingroup group_vision_function_median_image + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxMedian3x3Node(vx_graph graph, vx_image input, vx_image output); + +/*! \brief [Graph] Creates a Box Filter Node. + * \param [in] graph The reference to the graph. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 format. + * \param [out] output The output image in \ref VX_DF_IMAGE_U8 format, which must have the same dimensions as the input image. + * \ingroup group_vision_function_box_image + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxBox3x3Node(vx_graph graph, vx_image input, vx_image output); + +/*! \brief [Graph] Creates a Gaussian Filter Node. + * \param [in] graph The reference to the graph. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 format. + * \param [out] output The output image in \ref VX_DF_IMAGE_U8 format, which must have the same dimensions as the input image. + * \ingroup group_vision_function_gaussian_image + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxGaussian3x3Node(vx_graph graph, vx_image input, vx_image output); + +/*! \brief [Graph] Creates a Non-linear Filter Node. + * \param [in] graph The reference to the graph. + * \param [in] function The non-linear filter function. See \ref vx_non_linear_filter_e. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 format. + * \param [in] mask The mask to be applied to the Non-linear function. \ref VX_MATRIX_ORIGIN attribute is used + * to place the mask appropriately when computing the resulting image. See \ref vxCreateMatrixFromPattern. + * \param [out] output The output image in \ref VX_DF_IMAGE_U8 format, which must have the same dimensions as the input image. + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + * \ingroup group_vision_function_nonlinear_filter + */ +VX_API_ENTRY vx_node VX_API_CALL vxNonLinearFilterNode(vx_graph graph, vx_enum function, vx_image input, vx_matrix mask, vx_image output); + +/*! \brief [Graph] Creates a custom convolution node. + * \param [in] graph The reference to the graph. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 format. + * \param [in] conv The \ref vx_int16 convolution matrix. + * \param [out] output The output image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 format, which must have the same dimensions as the input image. + * \ingroup group_vision_function_custom_convolution + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxConvolveNode(vx_graph graph, vx_image input, vx_convolution conv, vx_image output); + +/*! \brief [Graph] Creates a node for a Gaussian Image Pyramid. + * \param [in] graph The reference to the graph. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 format. + * \param [out] gaussian The Gaussian pyramid with \ref VX_DF_IMAGE_U8 to construct. + * \ingroup group_vision_function_gaussian_pyramid + * \see group_pyramid + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxGaussianPyramidNode(vx_graph graph, vx_image input, vx_pyramid gaussian); + +/*! \brief [Graph] Creates a node for a Laplacian Image Pyramid. + * \param [in] graph The reference to the graph. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 format. + * \param [out] laplacian The Laplacian pyramid with \ref VX_DF_IMAGE_S16 to construct. + * \param [out] output The lowest resolution image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 format necessary to reconstruct the input image from the pyramid. The output image format should be same as input image format. + * \ingroup group_vision_function_laplacian_pyramid + * \see group_pyramid + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxLaplacianPyramidNode(vx_graph graph, vx_image input, + vx_pyramid laplacian, vx_image output); + +/*! \brief [Graph] Reconstructs an image from a Laplacian Image pyramid. + * \param [in] graph The reference to the graph. + * \param [in] laplacian The Laplacian pyramid with \ref VX_DF_IMAGE_S16 format. + * \param [in] input The lowest resolution image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 format for the Laplacian pyramid. + * \param [out] output The output image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 format with the highest possible resolution reconstructed from the Laplacian pyramid. The output image format should be same as input image format. + * \ingroup group_vision_function_laplacian_reconstruct + * \see group_pyramid + * \return \ref vx_node. + * \retval 0 Node could not be created. + * \retval * Node handle. + */ +VX_API_ENTRY vx_node VX_API_CALL vxLaplacianReconstructNode(vx_graph graph, vx_pyramid laplacian, vx_image input, + vx_image output); +/*! \brief [Graph] Creates a image weighted average node. + * \param [in] graph The reference to the graph. + * \param [in] img1 The first input \ref VX_DF_IMAGE_U8 image. + * \param [in] alpha The input \ref VX_TYPE_FLOAT32 scalar value with a value in the range of \f$ 0.0 \le \alpha \le 1.0 \f$. + * \param [in] img2 The second \ref VX_DF_IMAGE_U8 image, which must have the same dimensions as the img1. + * \param [out] output The output \ref VX_DF_IMAGE_U8 image, which must have the same dimensions as the img1. + * \ingroup group_vision_function_weighted_average + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxWeightedAverageNode(vx_graph graph, vx_image img1, vx_scalar alpha, vx_image img2, vx_image output); +/*! \brief [Graph] Creates a min,max,loc node. + * \param [in] graph The reference to create the graph. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 format. + * \param [out] minVal The minimum value in the image, which corresponds to the type of the input. + * \param [out] maxVal The maximum value in the image, which corresponds to the type of the input. + * \param [out] minLoc [optional] The minimum \ref VX_TYPE_COORDINATES2D locations. If the input image has several minimums, the kernel will return up to the capacity of the array. + * \param [out] maxLoc [optional] The maximum \ref VX_TYPE_COORDINATES2D locations. If the input image has several maximums, the kernel will return up to the capacity of the array. + * \param [out] minCount [optional] The total number of detected minimums in image. Use a \ref VX_TYPE_SIZE scalar. + * \param [out] maxCount [optional] The total number of detected maximums in image. Use a \ref VX_TYPE_SIZE scalar. + * \ingroup group_vision_function_minmaxloc + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxMinMaxLocNode(vx_graph graph, + vx_image input, + vx_scalar minVal, vx_scalar maxVal, + vx_array minLoc, vx_array maxLoc, + vx_scalar minCount, vx_scalar maxCount); + +/*! \brief [Graph] Creates a pixel-wise minimum kernel. + * \param [in] graph The reference to the graph where to create the node. + * \param [in] in1 The first input image. Must be of type \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16. + * \param [in] in2 The second input image. Must be of type \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16. + * \param [out] out The output image which will hold the result of min and will have the same type and dimensions of the imput images. + * \ingroup group_vision_function_min + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxMinNode(vx_graph graph, vx_image in1, vx_image in2, vx_image out); + +/*! \brief [Graph] Creates a pixel-wise maximum kernel. + * \param [in] graph The reference to the graph where to create the node. + * \param [in] in1 The first input image. Must be of type \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16. + * \param [in] in2 The second input image. Must be of type \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16. + * \param [out] out The output image which will hold the result of max and will have the same type and dimensions of the imput images. + * \ingroup group_vision_function_max + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxMaxNode(vx_graph graph, vx_image in1, vx_image in2, vx_image out); + +/*! \brief [Graph] Creates a bitwise AND node. + * \param [in] graph The reference to the graph. + * \param [in] in1 A \ref VX_DF_IMAGE_U8 input image. + * \param [in] in2 A \ref VX_DF_IMAGE_U8 input image. + * \param [out] out The \ref VX_DF_IMAGE_U8 output image, which must have the same dimensions as the input images. + * \ingroup group_vision_function_and + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxAndNode(vx_graph graph, vx_image in1, vx_image in2, vx_image out); + +/*! \brief [Graph] Creates a bitwise INCLUSIVE OR node. + * \param [in] graph The reference to the graph. + * \param [in] in1 A \ref VX_DF_IMAGE_U8 input image. + * \param [in] in2 A \ref VX_DF_IMAGE_U8 input image. + * \param [out] out The \ref VX_DF_IMAGE_U8 output image, which must have the same dimensions as the input images. + * \ingroup group_vision_function_or + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxOrNode(vx_graph graph, vx_image in1, vx_image in2, vx_image out); + +/*! \brief [Graph] Creates a bitwise EXCLUSIVE OR node. + * \param [in] graph The reference to the graph. + * \param [in] in1 A \ref VX_DF_IMAGE_U8 input image. + * \param [in] in2 A \ref VX_DF_IMAGE_U8 input image. + * \param [out] out The \ref VX_DF_IMAGE_U8 output image, which must have the same dimensions as the input images. + * \ingroup group_vision_function_xor + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxXorNode(vx_graph graph, vx_image in1, vx_image in2, vx_image out); + +/*! \brief [Graph] Creates a bitwise NOT node. + * \param [in] graph The reference to the graph. + * \param [in] input A \ref VX_DF_IMAGE_U8 input image. + * \param [out] output The \ref VX_DF_IMAGE_U8 output image, which must have the same dimensions as the input image. + * \ingroup group_vision_function_not + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxNotNode(vx_graph graph, vx_image input, vx_image output); + +/*! \brief [Graph] Creates a scalar operation node. + * \param [in] graph The reference to the graph. + * \param [in] scalar_operation A \ref VX_TYPE_ENUM of the \ref vx_scalar_operation_e enumeration. + * \param [in] a First scalar operand. + * \param [in] b Second scalar operand. + * \param [out] output Result of the scalar operation. + * \ingroup group_control_flow + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxScalarOperationNode(vx_graph graph, vx_enum scalar_operation, vx_scalar a, vx_scalar b, vx_scalar output); + +/*! \brief [Graph] Selects one of two data objects depending on the the value of a condition (boolean scalar), and copies its data into another data object. + * \details This node supports predicated execution flow within a graph. All the data objects passed to this kernel shall + * have the same object type and meta data. It is important to note that an implementation may optimize away the select and copy when virtual data + * objects are used.\n + * If there is a kernel node that contribute only into virtual data objects during the graph execution due to certain data path being eliminated by not + * taken argument of select node, then the OpenVX implementation guarantees that there will not be any side effects to graph execution and node state.\n + * If the path to a select node contains non-virtual objects, user nodes, or nodes with completion callbacks, then that path may not be "optimized out" + * because the callback must be executed and the non-virtual objects must be modified. + * \param [in] graph The reference to the graph. + * \param [in] condition \ref VX_TYPE_BOOL predicate variable. + * \param [in] true_value Data object for true. + * \param [in] false_value Data object for false. + * \param [out] output Output data object. + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + * \ingroup group_control_flow + */ +VX_API_ENTRY vx_node VX_API_CALL vxSelectNode(vx_graph graph, vx_scalar condition, vx_reference true_value, vx_reference false_value, vx_reference output); + +/*! \brief [Graph] Creates an pixelwise-multiplication node. + * \param [in] graph The reference to the graph. + * \param [in] in1 An input image, \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16. + * \param [in] in2 An input image, \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16. + * \param [in] scale A non-negative \ref VX_TYPE_FLOAT32 multiplied to each product before overflow handling. + * \param [in] overflow_policy A \ref VX_TYPE_ENUM of the \ref vx_convert_policy_e enumeration. + * \param [in] rounding_policy A \ref VX_TYPE_ENUM of the \ref vx_round_policy_e enumeration. + * \param [out] out The output image, a \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 image. Must have the same type and dimensions of the imput images. + * \ingroup group_vision_function_mult + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxMultiplyNode(vx_graph graph, + vx_image in1, vx_image in2, + vx_scalar scale, + vx_enum overflow_policy, + vx_enum rounding_policy, + vx_image out); + +/*! \brief [Graph] Creates an arithmetic addition node. + * \param [in] graph The reference to the graph. + * \param [in] in1 An input image, \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16. + * \param [in] in2 An input image, \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16. + * \param [in] policy A \ref VX_TYPE_ENUM of the \ref vx_convert_policy_e enumeration. + * \param [out] out The output image, a \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 image, which must have the same dimensions as the input images. + * \ingroup group_vision_function_add + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxAddNode(vx_graph graph, + vx_image in1, vx_image in2, + vx_enum policy, + vx_image out); + +/*! \brief [Graph] Creates an arithmetic subtraction node. + * \param [in] graph The reference to the graph. + * \param [in] in1 An input image, \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16, the minuend. + * \param [in] in2 An input image, \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16, the subtrahend. + * \param [in] policy A \ref VX_TYPE_ENUM of the \ref vx_convert_policy_e enumeration. + * \param [out] out The output image, a \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 image, which must have the same dimensions as the input images. + * \ingroup group_vision_function_sub + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxSubtractNode(vx_graph graph, + vx_image in1, vx_image in2, + vx_enum policy, + vx_image out); + +/*! \brief [Graph] Creates a bit-depth conversion node. + * \param [in] graph The reference to the graph. + * \param [in] input The input image. + * \param [out] output The output image with the same dimensions of the input image. + * \param [in] policy A \ref VX_TYPE_ENUM of the \ref vx_convert_policy_e enumeration. + * \param [in] shift A scalar containing a \ref VX_TYPE_INT32 of the shift value. + * \ingroup group_vision_function_convertdepth + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxConvertDepthNode(vx_graph graph, vx_image input, vx_image output, vx_enum policy, vx_scalar shift); + +/*! \brief [Graph] Creates a Canny Edge Detection Node. + * \param [in] graph The reference to the graph. + * \param [in] input The input \ref VX_DF_IMAGE_U8 image. + * \param [in] hyst The double threshold for hysteresis. The \ref VX_THRESHOLD_INPUT_FORMAT shall be either + * \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16. The \ref VX_THRESHOLD_OUTPUT_FORMAT is ignored. + * \param [in] gradient_size The size of the Sobel filter window, must support at least 3, 5, and 7. + * \param [in] norm_type A flag indicating the norm used to compute the gradient, \ref VX_NORM_L1 or \ref VX_NORM_L2. + * \param [out] output The output image in \ref VX_DF_IMAGE_U8 format with values either 0 or 255. + * \ingroup group_vision_function_canny + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxCannyEdgeDetectorNode(vx_graph graph, vx_image input, vx_threshold hyst, + vx_int32 gradient_size, vx_enum norm_type, + vx_image output); + +/*! \brief [Graph] Creates an Affine Warp Node. + * \param [in] graph The reference to the graph. + * \param [in] input The input \ref VX_DF_IMAGE_U8 image. + * \param [in] matrix The affine matrix. Must be 2x3 of type \ref VX_TYPE_FLOAT32. + * \param [in] type The interpolation type from \ref vx_interpolation_type_e. + * \ref VX_INTERPOLATION_AREA is not supported. + * \param [out] output The output \ref VX_DF_IMAGE_U8 image and the same dimensions as the input image. + * \ingroup group_vision_function_warp_affine + * \note The border modes \ref VX_NODE_BORDER value \ref VX_BORDER_UNDEFINED and + * \ref VX_BORDER_CONSTANT are supported. + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxWarpAffineNode(vx_graph graph, vx_image input, vx_matrix matrix, vx_enum type, vx_image output); + +/*! \brief [Graph] Creates a Perspective Warp Node. + * \param [in] graph The reference to the graph. + * \param [in] input The input \ref VX_DF_IMAGE_U8 image. + * \param [in] matrix The perspective matrix. Must be 3x3 of type \ref VX_TYPE_FLOAT32. + * \param [in] type The interpolation type from \ref vx_interpolation_type_e. + * \ref VX_INTERPOLATION_AREA is not supported. + * \param [out] output The output \ref VX_DF_IMAGE_U8 image with the same dimensions as the input image. + * \ingroup group_vision_function_warp_perspective + * \note The border modes \ref VX_NODE_BORDER value \ref VX_BORDER_UNDEFINED and + * \ref VX_BORDER_CONSTANT are supported. + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxWarpPerspectiveNode(vx_graph graph, vx_image input, vx_matrix matrix, vx_enum type, vx_image output); + +/*! \brief [Graph] Creates a Harris Corners Node. + * \param [in] graph The reference to the graph. + * \param [in] input The input \ref VX_DF_IMAGE_U8 image. + * \param [in] strength_thresh The \ref VX_TYPE_FLOAT32 minimum threshold with which to eliminate Harris Corner scores (computed using the normalized Sobel kernel). + * \param [in] min_distance The \ref VX_TYPE_FLOAT32 radial Euclidean distance for non-maximum suppression. + * \param [in] sensitivity The \ref VX_TYPE_FLOAT32 scalar sensitivity threshold \f$ k \f$ from the Harris-Stephens equation. + * \param [in] gradient_size The gradient window size to use on the input. The + * implementation must support at least 3, 5, and 7. + * \param [in] block_size The block window size used to compute the Harris Corner score. + * The implementation must support at least 3, 5, and 7. + * \param [out] corners The array of \ref VX_TYPE_KEYPOINT objects. The order of the keypoints in this array is implementation dependent. + * \param [out] num_corners [optional] The total number of detected corners in image. Use a \ref VX_TYPE_SIZE scalar. + * \ingroup group_vision_function_harris + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxHarrisCornersNode(vx_graph graph, + vx_image input, + vx_scalar strength_thresh, + vx_scalar min_distance, + vx_scalar sensitivity, + vx_int32 gradient_size, + vx_int32 block_size, + vx_array corners, + vx_scalar num_corners); + +/*! \brief [Graph] Creates a FAST Corners Node. + * \param [in] graph The reference to the graph. + * \param [in] input The input \ref VX_DF_IMAGE_U8 image. + * \param [in] strength_thresh Threshold on difference between intensity of the central pixel and pixels on Bresenham's circle + * of radius 3 (\ref VX_TYPE_FLOAT32 scalar), with a value in the range of 0.0 \f$\le\f$ strength_thresh < 256.0. + * Any fractional value will be truncated to an integer. + * \param [in] nonmax_suppression If true, non-maximum suppression is applied to + * detected corners before being placed in the \ref vx_array of \ref VX_TYPE_KEYPOINT objects. + * \param [out] corners Output corner \ref vx_array of \ref VX_TYPE_KEYPOINT. The order of the + * keypoints in this array is implementation dependent. + * \param [out] num_corners [optional] The total number of detected corners in image. Use a \ref VX_TYPE_SIZE scalar. + * \ingroup group_vision_function_fast + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxFastCornersNode(vx_graph graph, vx_image input, vx_scalar strength_thresh, vx_bool nonmax_suppression, vx_array corners, vx_scalar num_corners); + +/*! \brief [Graph] Creates a Lucas Kanade Tracking Node. + * \param [in] graph The reference to the graph. + * \param [in] old_images Input of first (old) image pyramid in \ref VX_DF_IMAGE_U8. + * \param [in] new_images Input of destination (new) image pyramid \ref VX_DF_IMAGE_U8. + * \param [in] old_points An array of key points in a \ref vx_array of \ref VX_TYPE_KEYPOINT; those key points are defined at + * the \a old_images high resolution pyramid. + * \param [in] new_points_estimates An array of estimation on what is the output key points in a \ref vx_array of + * \ref VX_TYPE_KEYPOINT; those keypoints are defined at the \a new_images high resolution pyramid. + * \param [out] new_points An output array of key points in a \ref vx_array of \ref VX_TYPE_KEYPOINT; those key points are + * defined at the \a new_images high resolution pyramid. + * \param [in] termination The termination can be \ref VX_TERM_CRITERIA_ITERATIONS or \ref VX_TERM_CRITERIA_EPSILON or + * \ref VX_TERM_CRITERIA_BOTH. + * \param [in] epsilon The \ref vx_float32 error for terminating the algorithm. + * \param [in] num_iterations The number of iterations. Use a \ref VX_TYPE_UINT32 scalar. + * \param [in] use_initial_estimate Use a \ref VX_TYPE_BOOL scalar. + * \param [in] window_dimension The size of the window on which to perform the algorithm. See + * \ref VX_CONTEXT_OPTICAL_FLOW_MAX_WINDOW_DIMENSION + * \ingroup group_vision_function_opticalflowpyrlk + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxOpticalFlowPyrLKNode(vx_graph graph, + vx_pyramid old_images, + vx_pyramid new_images, + vx_array old_points, + vx_array new_points_estimates, + vx_array new_points, + vx_enum termination, + vx_scalar epsilon, + vx_scalar num_iterations, + vx_scalar use_initial_estimate, + vx_size window_dimension); + +/*! \brief [Graph] Creates a Remap Node. + * \param [in] graph The reference to the graph that will contain the node. + * \param [in] input The input \ref VX_DF_IMAGE_U8 image. + * \param [in] table The remap table object. + * \param [in] policy An interpolation type from \ref vx_interpolation_type_e. + * \ref VX_INTERPOLATION_AREA is not supported. + * \param [out] output The output \ref VX_DF_IMAGE_U8 image with the same dimensions as the input image. + * \note The border modes \ref VX_NODE_BORDER value \ref VX_BORDER_UNDEFINED and + * \ref VX_BORDER_CONSTANT are supported. + * \return A \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + * \ingroup group_vision_function_remap + */ +VX_API_ENTRY vx_node VX_API_CALL vxRemapNode(vx_graph graph, + vx_image input, + vx_remap table, + vx_enum policy, + vx_image output); + +/*! \brief [Graph] Performs a Gaussian Blur on an image then half-scales it. The interpolation mode used is nearest-neighbor. + * \details The output image size is determined by: + * \f[ + * W_{output} = \frac{W_{input} + 1}{2} \\ + * , + * H_{output} = \frac{H_{input} + 1}{2} + * \f] + * \param [in] graph The reference to the graph. + * \param [in] input The input \ref VX_DF_IMAGE_U8 image. + * \param [out] output The output \ref VX_DF_IMAGE_U8 image. + * \param [in] kernel_size The input size of the Gaussian filter. Supported values are 1, 3 and 5. + * \ingroup group_vision_function_scale_image + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxHalfScaleGaussianNode(vx_graph graph, vx_image input, vx_image output, vx_int32 kernel_size); + +VX_API_ENTRY vx_node VX_API_CALL vxCensus3x3Node(vx_graph graph, vx_image src, vx_image dst); + +/*! \brief [Graph] The Node Compares an image template against overlapped image regions. + * \details The detailed equation to the matching can be found in \ref vx_comp_metric_e. + * The output of the template matching node is a comparison map as described in \ref vx_comp_metric_e. + * The Node have a limitation on the template image size (width*height). It should not be larger then 65535. + * If the valid region of the template image is smaller than the entire template image, the result in the destination image is implementation-dependent. + * \param [in] graph The reference to the graph. + * \param [in] src The input image of type \ref VX_DF_IMAGE_U8. + * \param [in] templateImage Searched template of type \ref VX_DF_IMAGE_U8. + * \param [in] matchingMethod attribute specifying the comparison method \ref vx_comp_metric_e. This function support only \ref VX_COMPARE_CCORR_NORM and \ref VX_COMPARE_L2. + * \param [out] output Map of comparison results. The output is an image of type VX_DF_IMAGE_S16 + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + * \ingroup group_vision_function_match_template + */ + VX_API_ENTRY vx_node VX_API_CALL vxMatchTemplateNode(vx_graph graph, vx_image src, vx_image templateImage, vx_enum matchingMethod, vx_image output); + + /*! \brief [Graph] Creates a node that extracts LBP image from an input image +* \param [in] graph The reference to the graph. +* \param [in] in An input image in vx_image. Or \f$ SrcImg\f$ in the equations. the image is of type \ref VX_DF_IMAGE_U8 +* \param [in] format A variation of LBP like original LBP and mLBP. see \ref vx_lbp_format_e +* \param [in] kernel_size Kernel size. Only size of 3 and 5 are supported +* \param [out] out An output image in vx_image.Or \f$ DstImg\f$ in the equations. the image is of type \ref VX_DF_IMAGE_U8 with the same dimensions as the input image. + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus +* \ingroup group_vision_function_lbp +*/ +VX_API_ENTRY vx_node VX_API_CALL vxLBPNode(vx_graph graph, vx_image in, vx_enum format, vx_int8 kernel_size, vx_image out); + +/*! \brief [Graph] Performs cell calculations for the average gradient magnitude and gradient orientation histograms. + * \details Firstly, the gradient magnitude and gradient orientation are computed for each pixel in the input image. + * Two 1-D centred, point discrete derivative masks are applied to the input image in the horizontal and vertical directions. + * \f[ M_h = [-1, 0, 1] \f] and \f[ M_v = [-1, 0, 1]^T \f] + * \f$G_v\f$ is the result of applying mask \f$M_v\f$ to the input image, and \f$G_h\f$ is the result of applying mask \f$M_h\f$ to the input image. + * The border mode used for the gradient calculation is implementation dependent. Its behavior should be similar to \ref VX_BORDER_UNDEFINED. + * The gradient magnitudes and gradient orientations for each pixel are then calculated in the following manner. + * \f[ G(x,y) = \sqrt{G_v(x,y)^2 + G_h(x,y)^2} \f] + * \f[ \theta(x,y) = arctan(G_v(x,y), G_h(x,y)) \f] + * where \f$arctan(v, h)\f$ + * is \f$ tan^{-1}(v/h)\f$ when \f$h!=0\f$, + * + * \f$ -pi/2 \f$ if \f$v<0\f$ and \f$h==0\f$, + * + * \f$ pi/2 \f$ if \f$v>0\f$ and \f$h==0\f$ + * + * and \f$ 0 \f$ if \f$v==0\f$ and \f$h==0\f$ + * + * Secondly, the gradient magnitudes and orientations are used to compute the bins output tensor and optional magnitudes output tensor. + * These tensors are computed on a cell level where the cells are rectangular in shape. + * The magnitudes tensor contains the average gradient magnitude for each cell. + * \f[magnitudes(c) = \frac{1}{(cell\_width * cell\_height)}\sum\limits_{w=0}^{cell\_width} \sum\limits_{h=0}^{cell\_height} G_c(w,h)\f] + * where \f$G_c\f$ is the gradient magnitudes related to cell \f$c\f$. + * The bins tensor contains histograms of gradient orientations for each cell. + * The gradient orientations at each pixel range from 0 to 360 degrees. These are quantised into a set of histogram bins based on the num_bins parameter. + * Each pixel votes for a specific cell histogram bin based on its gradient orientation. The vote itself is the pixel's gradient magnitude. + * \f[bins(c, n) = \sum\limits_{w=0}^{cell\_width} \sum\limits_{h=0}^{cell\_height} G_c(w,h) * 1[B_c(w, h, num\_bins) == n]\f] + * where \f$B_c\f$ produces the histogram bin number based on the gradient orientation of the pixel at location (\f$w\f$, \f$h\f$) in cell \f$c\f$ based on + * the \f$num\_bins\f$ and \f[1[B_c(w, h, num\_bins) == n]\f] is a delta-function with value 1 when \f$B_c(w, h, num\_bins) == n\f$ or 0 otherwise. + * \param [in] graph The reference to the graph. + * \param [in] input The input image of type \ref VX_DF_IMAGE_U8. + * \param [in] cell_width The histogram cell width of type \ref VX_TYPE_INT32. + * \param [in] cell_height The histogram cell height of type \ref VX_TYPE_INT32. + * \param [in] num_bins The histogram size of type \ref VX_TYPE_INT32. + * \param [out] magnitudes (Optional) The output average gradient magnitudes per cell of \ref vx_tensor of type \ref VX_TYPE_INT16 of size \f$ [floor(image_{width}/cell_{width}) ,floor(image_{height}/cell_{height}) ] \f$. + * \param [out] bins The output gradient orientation histograms per cell of \ref vx_tensor of type \ref VX_TYPE_INT16 of size \f$ [floor(image_{width}/cell_{width}) ,floor(image_{height}/cell_{height}), num_{bins}] \f$. + * \return \ref vx_node. + * \retval 0 Node could not be created. + * \retval * Node handle. + * \ingroup group_vision_function_hog + */ +VX_API_ENTRY vx_node VX_API_CALL vxHOGCellsNode(vx_graph graph, vx_image input, vx_int32 cell_width, vx_int32 cell_height, vx_int32 num_bins, vx_tensor magnitudes, vx_tensor bins); + +/*! \brief [Graph] The node produces HOG features for the W1xW2 window in a sliding window fashion over the whole input image. Each position produces a HOG feature vector. + * \details Firstly if a magnitudes tensor is provided the cell histograms in the bins tensor are normalised by the average cell gradient magnitudes. + \f[bins(c,n) = \frac{bins(c,n)}{magnitudes(c)}\f] + * To account for changes in illumination and contrast the cell histograms must be locally normalized which requires grouping the cell histograms together into larger spatially connected blocks. + * Blocks are rectangular grids represented by three parameters: the number of cells per block, the number of pixels per cell, and the number of bins per cell histogram. + * These blocks typically overlap, meaning that each cell histogram contributes more than once to the final descriptor. + * To normalize a block its cell histograms \f$h\f$ are grouped together to form a vector \f$v = [h_1, h_2, h_3, ... , h_n]\f$. + * This vector is normalised using L2-Hys which means performing L2-norm on this vector; clipping the result (by limiting the maximum values of v to be threshold) and renormalizing again. If the threshold is equal to zero then L2-Hys normalization is not performed. + * \f[L2norm(v) = \frac{v}{\sqrt{\|v\|_2^2 + \epsilon^2}}\f] + * where \f$ \|v\|_k \f$ be its k-norm for k=1, 2, and \f$ \epsilon \f$ be a small constant. + * For a specific window its HOG descriptor is then the concatenated vector of the components of the normalized cell histograms from all of the block regions contained in the window. + * The W1xW2 window starting position is at coordinates 0x0. + * If the input image has dimensions that are not an integer multiple of W1xW2 blocks with the specified stride, then the last positions that contain only a partial W1xW2 window + * will be calculated with the remaining part of the W1xW2 window padded with zeroes. + * The Window W1xW2 must also have a size so that it contains an integer number of cells, otherwise the node is not well-defined. + * The final output tensor will contain HOG descriptors equal to the number of windows in the input image. + * The output features tensor has 3 dimensions, given by:\n + * \f[[ (floor((image_{width}-window_{width})/window_{stride}) + 1),\f] + * \f[ (floor((image_{height}-window_{height})/window_{stride}) + 1),\f] + * \f[ floor((window_{width} - block_{width})/block_{stride} + 1) * floor((window_{height} - block_{height})/block_{stride} + 1) *\f] + * \f[ (((block_{width} * block_{height}) / (cell_{width} * cell_{height})) * num_{bins})] \f] + * See \ref vxCreateTensor and \ref vxCreateVirtualTensor. + * We recommend the output tensors always be *virtual* objects, with this node connected directly to the classifier. + * The output tensor will be very large, and using non-virtual tensors will result in a poorly optimized implementation. + * Merging of this node with a classifier node such as that described in the classifier extension will result in better performance. + * Notice that this node creation function has more parameters than the corresponding kernel. Numbering of kernel parameters (required if you create this node using the generic interface) is explicitly specified here. + * \param [in] graph The reference to the graph. + * \param [in] input The input image of type \ref VX_DF_IMAGE_U8. (Kernel parameter #0) + * \param [in] magnitudes (Optional) The gradient magnitudes per cell of \ref vx_tensor of type \ref VX_TYPE_INT16. It is the output of \ref vxHOGCellsNode. (Kernel parameter #1) + * \param [in] bins The gradient orientation histograms per cell of \ref vx_tensor of type \ref VX_TYPE_INT16. It is the output of \ref vxHOGCellsNode. (Kernel parameter #2) + * \param [in] params The parameters of type \ref vx_hog_t. (Kernel parameter #3) + * \param [in] hog_param_size Size of \ref vx_hog_t in bytes. Note that this parameter is not counted as one of the kernel parameters. + * \param [out] features The output HOG features of \ref vx_tensor of type \ref VX_TYPE_INT16. (Kernel parameter #4) + * \return \ref vx_node. + * \retval 0 Node could not be created. + * \retval * Node handle. + * \ingroup group_vision_function_hog + */ +VX_API_ENTRY vx_node VX_API_CALL vxHOGFeaturesNode(vx_graph graph, vx_image input, vx_tensor magnitudes, vx_tensor bins, const vx_hog_t *params, vx_size hog_param_size, vx_tensor features); + +/*! \brief [Graph] Finds the Probabilistic Hough Lines detected in the input binary image, each line is stored in the output array as a set of points (x1, y1, x2, y2) . + * \details Some implementations of the algorithm may have a random or non-deterministic element. If the target application is in a safety-critical environment this + * should be borne in mind and steps taken in the implementation, the application or both to achieve the level of determinism required by the system design. + * \param [in] graph graph handle + * \param [in] input 8 bit, single channel binary source image + * \param [in] params parameters of the struct \ref vx_hough_lines_p_t + * \param [out] lines_array lines_array contains array of lines, see \ref vx_line2d_t The order of lines in implementation dependent + * \param [out] num_lines [optional] The total number of detected lines in image. Use a VX_TYPE_SIZE scalar + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + * \ingroup group_vision_function_hough_lines_p + */ +VX_API_ENTRY vx_node VX_API_CALL vxHoughLinesPNode(vx_graph graph, vx_image input, const vx_hough_lines_p_t *params, vx_array lines_array, vx_scalar num_lines); + +/*! \brief [Graph] The function applies bilateral filtering to the input tensor. +* \param [in] graph The reference to the graph. +* \param [in] src The input data a \ref vx_tensor. maximum 3 dimension and minimum 2. The tensor is of type \ref VX_TYPE_UINT8 or \ref VX_TYPE_INT16. +* dimensions are [radiometric ,width,height] or [width,height].See \ref vxCreateTensor and \ref vxCreateVirtualTensor. +* \param [in] diameter of each pixel neighbourhood that is used during filtering. Values of diameter must be odd. Bigger then 3 and smaller then 10. +* \param [in] sigmaValues Filter sigma in the radiometric space. Supported values are bigger then 0 and smaller or equal 20. +* \param [in] sigmaSpace Filter sigma in the spatial space. Supported values are bigger then 0 and smaller or equal 20. +* \param [out] dst The output data a \ref vx_tensor,Of type \ref VX_TYPE_UINT8 or \ref VX_TYPE_INT16. And must be the same type and size of the input. +* \note The border modes +* \ref VX_NODE_BORDER value +* \ref VX_BORDER_REPLICATE and \ref VX_BORDER_CONSTANT are supported. +* \return vx_node. +* \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using vxGetStatus +* \ingroup group_vision_function_bilateral_filter +*/ +VX_API_ENTRY vx_node VX_API_CALL vxBilateralFilterNode(vx_graph graph, vx_tensor src, vx_int32 diameter, vx_float32 sigmaSpace, vx_float32 sigmaValues, vx_tensor dst); + +/*! \brief [Graph] Performs element wise multiplications on element values in the input tensor data with a scale. + * \param [in] graph The handle to the graph. + * \param [in] input1 Input tensor data. Implementations must support input tensor data type \ref VX_TYPE_INT16 with fixed_point_position 8, + * and tensor data types \ref VX_TYPE_UINT8 and \ref VX_TYPE_INT8, with fixed_point_position 0. + * \param [in] input2 Input tensor data. The dimensions and sizes of input2 match those of input1, unless the vx_tensor of one or more dimensions in input2 is 1. + * In this case, those dimensions are treated as if this tensor was expanded to match the size of the corresponding dimension of input1, + * and data was duplicated on all terms in that dimension. After this expansion, the dimensions will be equal. + * The data type must match the data type of Input1. + * \param [in] scale A non-negative \ref VX_TYPE_FLOAT32 multiplied to each product before overflow handling. + * \param [in] overflow_policy A \ref vx_convert_policy_e enumeration. + * \param [in] rounding_policy A \ref vx_round_policy_e enumeration. + * \param [out] output The output tensor data with the same dimensions as the input tensor data. + * \ingroup group_vision_function_tensor_multiply + * \return \ref vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + */ +VX_API_ENTRY vx_node VX_API_CALL vxTensorMultiplyNode(vx_graph graph, vx_tensor input1, vx_tensor input2, vx_scalar scale, vx_enum overflow_policy, + vx_enum rounding_policy, vx_tensor output); + +/*! \brief [Graph] Performs arithmetic addition on element values in the input tensor data. + * \param [in] graph The handle to the graph. + * \param [in] input1 Input tensor data. Implementations must support input tensor data type \ref VX_TYPE_INT16 with fixed_point_position 8, + * and tensor data types \ref VX_TYPE_UINT8 and \ref VX_TYPE_INT8, with fixed_point_position 0. + * \param [in] input2 Input tensor data. The dimensions and sizes of input2 match those of input1, unless the vx_tensor of one or more dimensions in input2 is 1. + * In this case, those dimensions are treated as if this tensor was expanded to match the size of the corresponding dimension of input1, + * and data was duplicated on all terms in that dimension. After this expansion, the dimensions will be equal. + * The data type must match the data type of Input1. + * \param [in] policy A \ref vx_convert_policy_e enumeration. + * \param [out] output The output tensor data with the same dimensions as the input tensor data. + * \ingroup group_vision_function_tensor_add + * \return \ref vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + */ +VX_API_ENTRY vx_node VX_API_CALL vxTensorAddNode(vx_graph graph, vx_tensor input1, vx_tensor input2, vx_enum policy, vx_tensor output); + +/*! \brief [Graph] Performs arithmetic subtraction on element values in the input tensor data. + * \param [in] graph The handle to the graph. + * \param [in] input1 Input tensor data. Implementations must support input tensor data type \ref VX_TYPE_INT16 with fixed_point_position 8, + * and tensor data types \ref VX_TYPE_UINT8 and \ref VX_TYPE_INT8, with fixed_point_position 0. + * \param [in] input2 Input tensor data. The dimensions and sizes of input2 match those of input1, unless the vx_tensor of one or more dimensions in input2 is 1. + * In this case, those dimensions are treated as if this tensor was expanded to match the size of the corresponding dimension of input1, + * and data was duplicated on all terms in that dimension. After this expansion, the dimensions will be equal. + * The data type must match the data type of Input1. + * \param [in] policy A \ref vx_convert_policy_e enumeration. + * \param [out] output The output tensor data with the same dimensions as the input tensor data. + * \ingroup group_vision_function_tensor_subtract + * \return \ref vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + */ +VX_API_ENTRY vx_node VX_API_CALL vxTensorSubtractNode(vx_graph graph, vx_tensor input1, vx_tensor input2, vx_enum policy, vx_tensor output); + +/*! \brief [Graph] Performs LUT on element values in the input tensor data. + * \param [in] graph The handle to the graph. + * \param [in] input1 Input tensor data. Implementations must support input tensor data type \ref VX_TYPE_INT16 with fixed_point_position 8, + * and tensor data types \ref VX_TYPE_UINT8, with fixed_point_position 0. + * \param [in] lut The look-up table to use, of type \ref vx_lut. + * The elements of input1 are treated as unsigned integers to determine an index into the look-up table. + * The data type of the items in the look-up table must match that of the output tensor. + * \param [out] output The output tensor data with the same dimensions as the input tensor data. + * \ingroup group_vision_function_tensor_tablelookup + * \return \ref vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + */ +VX_API_ENTRY vx_node VX_API_CALL vxTensorTableLookupNode(vx_graph graph, vx_tensor input1, vx_lut lut, vx_tensor output); + +/*! \brief [Graph] Performs transpose on the input tensor. + * The node transpose the tensor according to a specified 2 indexes in the tensor (0-based indexing) + * \param [in] graph The handle to the graph. + * \param [in] input Input tensor data, Implementations must support input tensor data type \ref VX_TYPE_INT16 with fixed_point_position 8, + * and tensor data types \ref VX_TYPE_UINT8 and \ref VX_TYPE_INT8, with fixed_point_position 0. + * \param [out] output output tensor data, + * \param [in] dimension1 Dimension index that is transposed with dim 2. + * \param [in] dimension2 Dimension index that is transposed with dim 1. + * \ingroup group_vision_function_tensor_transpose + * \return \ref vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + */ +VX_API_ENTRY vx_node VX_API_CALL vxTensorTransposeNode(vx_graph graph, vx_tensor input, vx_tensor output, vx_size dimension1, vx_size dimension2); +/*! \brief [Graph] Creates a bit-depth conversion node. + * \param [in] graph The reference to the graph. + * \param [in] input The input tensor. Implementations must support input tensor data type \ref VX_TYPE_INT16 with fixed_point_position 8, + * and tensor data types \ref VX_TYPE_UINT8 and \ref VX_TYPE_INT8, with fixed_point_position 0. + * \param [in] policy A \ref VX_TYPE_ENUM of the \ref vx_convert_policy_e enumeration. + * \param [in] norm A scalar containing a \ref VX_TYPE_FLOAT32 of the normalization value. + * \param [in] offset A scalar containing a \ref VX_TYPE_FLOAT32 of the offset value subtracted before normalization. + * \param [out] output The output tensor. Implementations must support input tensor data type \ref VX_TYPE_INT16. with fixed_point_position 8. + * And \ref VX_TYPE_UINT8 with fixed_point_position 0. + * \ingroup group_vision_function_tensor_convert_depth + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation should be checked using \ref vxGetStatus + */ +VX_API_ENTRY vx_node VX_API_CALL vxTensorConvertDepthNode(vx_graph graph, vx_tensor input, vx_enum policy, vx_scalar norm, vx_scalar offset, vx_tensor output); + +/*! \brief [Graph] Creates a generalized matrix multiplication node. + * \param [in] graph The reference to the graph. + * \param [in] input1 The first input 2D tensor of type \ref VX_TYPE_INT16 with fixed_point_pos 8, or tensor data types \ref VX_TYPE_UINT8 or \ref VX_TYPE_INT8, with fixed_point_pos 0. + * \param [in] input2 The second 2D tensor. Must be in the same data type as input1. + * \param [in] input3 The third 2D tensor. Must be in the same data type as input1. [optional]. + * \param [in] matrix_multiply_params Matrix multiply parameters, see \ref vx_tensor_matrix_multiply_params_t . + * \param [out] output The output 2D tensor. Must be in the same data type as input1. Output dimension must agree the formula in the description. + * \ingroup group_vision_function_tensor_matrix_multiply + * \return \ref vx_node. + * \returns A node reference \ref vx_node. Any possible errors preventing a + * successful creation should be checked using \ref vxGetStatus. + */ +VX_API_ENTRY vx_node VX_API_CALL vxTensorMatrixMultiplyNode(vx_graph graph, vx_tensor input1, vx_tensor input2, vx_tensor input3, + const vx_tensor_matrix_multiply_params_t *matrix_multiply_params, vx_tensor output); + +/*! \brief Copy data from one object to another. + * \note An implementation may optimize away the copy when virtual data objects are used. + * \param [in] graph The reference to the graph. + * \param [in] input The input data object. + * \param [out] output The output data object with meta-data identical to the input data object. + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation + * should be checked using \ref vxGetStatus + * \ingroup group_vision_function_copy + */ +VX_API_ENTRY vx_node VX_API_CALL vxCopyNode(vx_graph graph, vx_reference input, vx_reference output); + +/*! \brief Create a batch gemm node, the calcution formula is output = matrix_a * matrix_b + matrix_c. + * \param [in] graph The reference to the graph. + * \param [in] matrix_a The first input tensor. + * \param [in] matrix_b The second input tensor. Must be in the same data type and batch count as first input tensor. + * \param [in] matrix_c The third input tensor. Must be in the same data type and batch count as first input tensor. [optional] + * \param [in] trans_a If true, the matrix_a has been transposed before calcution. + * \param [in] trans_b If true, the matrix_b has been transposed before calcution. + * \param [in] trans_c If true, the matrix_c has been transposed before calcution. [optional] + * \param [out] output The output tensor. Output dimension must agree the formula in the description. + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation + * should be checked using \ref vxGetStatus + * \ingroup group_vision_function_gemm + */ +VX_API_ENTRY vx_node VX_API_CALL vxBatchGemmNode(vx_graph graph, + vx_tensor matrix_a, + vx_tensor matrix_b, + vx_tensor matrix_c, + vx_scalar trans_a, + vx_scalar trans_b, + vx_scalar trans_c, + vx_tensor output); + +typedef struct _vx_lut_params_s +{ + vx_enum lut_function; /*!< \brief Set VX_NN_ACTIVATION_NONE to disable lut table or set VX_NN_ACTIVATION_CUSTOM to customize lut table or set others to use fixed lut table */ + vx_float32 float_values[4]; /*!< \brief Float parameters of fixed lut table */ + vx_uint32 fvalues_count; /*!< \brief Count of float_values */ + vx_int32 int_values[4]; /*!< \brief Int parameters of fixed lut table */ + vx_uint32 ivalues_count; /*!< \brief Count of int_values */ + vx_lut in_lut; /*!< \brief Only valid when lut_function is VX_NN_ACTIVATION_CUSTOM */ + vx_lut out_lut; /*!< \brief Only valid when lut_function is VX_NN_ACTIVATION_CUSTOM */ +} vx_lut_params_s, * vx_lut_params; + +/*! \brief Create a stream processor node. + * \param [in] graph The reference to the graph. + * \param [in] input_list The input tensor list. + * \param [in] input_count The input tensor count. + * \param [in] output_list The output tensor list. + * \param [in] output_count The output tensor count. + * \param [in] spinst_obj The stream processor instrunction object. Use vxCreateSPINST() to create. + * \param [in] lut_params The lut parameters. Refer to vx_lut_params_s. + * \return \ref vx_node. + * \retval vx_node A node reference. Any possible errors preventing a successful creation + * should be checked using \ref vxGetStatus + * \ingroup group_vision_function_sp + */ +VX_API_ENTRY vx_node VX_API_CALL vxStreamProcessorNode( + vx_graph graph, + vx_tensor* input_list, + vx_uint32 input_count, + vx_tensor* output_list, + vx_uint32 output_count, + vx_spinst spinst_obj, + vx_lut_params lut_params + ); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/unified-tina/inc/VX/vx_spinst.h b/unified-tina/inc/VX/vx_spinst.h new file mode 100644 index 0000000..b8766a6 --- /dev/null +++ b/unified-tina/inc/VX/vx_spinst.h @@ -0,0 +1,377 @@ +#ifndef _VX_SPINST_H_ +#define _VX_SPINST_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum _vx_sp_inst_type_e +{ + VX_SP_INST_TYPE_FADD, + VX_SP_INST_TYPE_FMULT, + VX_SP_INST_TYPE_MOVE, + VX_SP_INST_TYPE_PWL, + + VX_SP_INST_TYPE_COUNT, +} +vx_sp_inst_type_e; + +typedef enum _vx_sp_inst_type_fadd_e +{ + VX_SP_INST_TYPE_FADD_IDLE, // FADD-IDLE + VX_SP_INST_TYPE_FADD_ADD, // dst = src0 + src1 + VX_SP_INST_TYPE_FADD_SUB, // dst = src0 - src1 + + VX_SP_INST_TYPE_FADD_COUNT, +} +vx_sp_inst_type_fadd_e; + +typedef enum _vx_sp_inst_type_fmult_e +{ + VX_SP_INST_TYPE_FMULT_IDLE, /* FMULT-IDLE */ + VX_SP_INST_TYPE_FMULT_MUL, /* dst = src0 * src1 */ + VX_SP_INST_TYPE_FMULT_MUL_CLAMP, /* dst = clamp (src0, src1, R6, R7) */ + + VX_SP_INST_TYPE_FMULT_COUNT, +} +vx_sp_inst_type_fmult_e; + +typedef enum _vx_sp_inst_type_move_e +{ + VX_SP_INST_TYPE_MOVE_IDLE, + VX_SP_INST_TYPE_MOVE_MOVE, // dst = src1 + VX_SP_INST_TYPE_MOVE_SEL0, // dst = (src0 > 0) ? src1[0] : src1[1] + VX_SP_INST_TYPE_MOVE_SEL1, // dst = (src0 > 0) ? src1 : FA-src0 // use FA's SRC0 + VX_SP_INST_TYPE_MOVE_IMMD, // dst = Constant assign immmediate + VX_SP_INST_TYPE_MOVE_ABS, // dst = abs(src1) + + VX_SP_INST_TYPE_MOVE_COUNT, +} +vx_sp_inst_type_move_e; + +typedef enum _vx_sp_inst_type_pwl_e +{ + VX_SP_INST_TYPE_PWL_IDLE, + VX_SP_INST_TYPE_PWL_SETUP_0, /* PWL ID = 0 */ + VX_SP_INST_TYPE_PWL_SETUP_1, /* Sigmode() */ + VX_SP_INST_TYPE_PWL_SETUP_2, /* Tanh() */ + + VX_SP_INST_TYPE_PWL_COUNT, +} +vx_sp_inst_type_pwl_e; + +typedef enum _vx_sp_inst_src_dst_e +{ + VX_SP_INST_SPINOUT, + VX_SP_INST_SR1, + VX_SP_INST_SR2, + VX_SP_INST_SR3, + VX_SP_INST_SR4, + VX_SP_INST_SR5, + VX_SP_INST_SR6, /* nn_clamp_min */ + VX_SP_INST_SR7, /* nn_clamp_max */ + VX_SP_INST_SR8, + VX_SP_INST_SR9, + VX_SP_INST_SR10, + VX_SP_INST_VR11, + VX_SP_INST_VR12, + VX_SP_INST_VR13, + VX_SP_INST_VR14, + VX_SP_INST_SETUPOUT, /* Input of PWL Mult and Add: FMInA, FMInB, FAInA, FAInB */ +} +vx_sp_inst_src_dst_e; + +typedef struct _vx_spinst_unit_param +{ + vx_enum op; /* vx_sp_inst_type_e */ + + struct + { + vx_enum op; /* vx_sp_inst_type_fadd/fmult/move/pwl_e */ + + struct + { + vx_uint8 src0; /* vx_sp_inst_src_dst_e */ + vx_uint8 src1; /* vx_sp_inst_src_dst_e */ + vx_uint8 dst; /* vx_sp_inst_src_dst_e */ + vx_float32 constant; + } var; + + } sub; + +} +vx_spinst_unit_param; + +/**********************************************************************************************/ + +typedef enum _vx_sp_attribute_e +{ + VX_SP_ATTRIBUTE_NONE, + + VX_SP_ATTRIBUTE_INPUT_TILE_MAPPING, + VX_SP_ATTRIBUTE_OUTPUT_COLLAPSE_X, + VX_SP_ATTRIBUTE_OUTPUT_COLLAPSE_Y, + VX_SP_ATTRIBUTE_OUTPUT_COLLAPSE_Z, + + VX_SP_ATTRIBUTE_PROG_INIT_INSTR_NUM, + VX_SP_ATTRIBUTE_PROG_LOOP_INSTR_NUM, + VX_SP_ATTRIBUTE_PROG_COMPLETE_INSTR_NUM, + VX_SP_ATTRIBUTE_PROG_ROUNDING_MODE, + VX_SP_ATTRIBUTE_INPUT_SETUP, + + VX_SP_ATTRIBUTE_IGNORED_LEADING_OUTPUTS, + VX_SP_ATTRIBUTE_FLUSH_CYCLE_NUM, + VX_SP_ATTRIBUTE_IGNORED_LEADING_V11_WR, + VX_SP_ATTRIBUTE_IGNORED_LEADING_V12_WR, + VX_SP_ATTRIBUTE_IGNORED_LEADING_V11_RD, + VX_SP_ATTRIBUTE_IGNORED_LEADING_V12_RD, + + VX_SP_ATTRIBUTE_CH0_POST_REDISTRIBUTE, + VX_SP_ATTRIBUTE_CH1_POST_REDISTRIBUTE, + VX_SP_ATTRIBUTE_V11_RESET_AT_START, + VX_SP_ATTRIBUTE_V12_RESET_AT_START, + VX_SP_ATTRIBUTE_V11_POP_CONFIG, + VX_SP_ATTRIBUTE_V12_POP_CONFIG, + VX_SP_ATTRIBUTE_ACCELERATOR_INPUT_SELECT, + VX_SP_ATTRIBUTE_IGNORED_LEADING_ACC_OUT, + VX_SP_ATTRIBUTE_SUM_ENGINE_RESET, + VX_SP_ATTRIBUTE_SUM_ENGINE_CONTROL, + VX_SP_ATTRIBUTE_SUM_ENGINE_NUM_CH_MINUS_ONE, + VX_SP_ATTRIBUTE_SUM_ENGINE_2D_ACCUM_STORAGE, + VX_SP_ATTRIBUTE_SUM_ENGINE_OP_SELECT, + + VX_SP_ATTRIBUTE_NUM_OF_ELEMENTS_PER_LOOP_PER_INPUT, + + VX_SP_ATTRIBUTE_NUM_OF_V11_RD_IN_FLUSH_CYCLE, + VX_SP_ATTRIBUTE_NUM_OF_V12_RD_IN_FLUSH_CYCLE, + VX_SP_ATTRIBUTE_NUM_OF_V11_WR_IN_FLUSH_CYCLE, + VX_SP_ATTRIBUTE_NUM_OF_V12_WR_IN_FLUSH_CYCLE, + + VX_SP_ATTRIBUTE_GENERAL_COUNT, + + VX_SP_ATTRIBUTE_CONST0, /* NN post multiplier */ + VX_SP_ATTRIBUTE_CONST1, /* NN neg pos multiplier */ + VX_SP_ATTRIBUTE_CONST2, /* NN tensor add const */ + VX_SP_ATTRIBUTE_CONST3, /* NN clamp max */ + VX_SP_ATTRIBUTE_CONST4, /* NN clmap min */ + + VX_SP_ATTRIBUTE_CONST_COUNT, + + VX_SP_ATTRIBUTE_SPLIT_AXIS, + VX_SP_ATTRIBUTE_SPLIT_MAX_SIZE, + VX_SP_ATTRIBUTE_SPLIT_TILEX_EQUAL_INIMAGEX, + + VX_SP_ATTRIBUTE_NOT_MERGE_CONVSP, + VX_SP_ATTRIBUTE_UPDATE_CONST0_TO_PCQ_COEF_TENSOR, + VX_SP_ATTRIBUTE_RESHAPE_ARRAY, /* bit layout | output:24-29 | input3:18-23 | input2:12-17 | input1:6-11 | input0:0-5 | */ + VX_SP_ATTRIBUTE_ALIGN_SP_CORE_AXIS, + VX_SP_ATTRIBUTE_KEEP_TILE_SIZE, + + VX_SP_ATTRIBUTE_TOTAL_COUNT, +} +vx_sp_attribute_e; + +typedef enum _vx_sp_attribute_input_tile_mapping_e +{ + VX_SP_ATTRIBUTE_INPUT_TILE_MAPPING_XYMERGE, + VX_SP_ATTRIBUTE_INPUT_TILE_MAPPING_YZMERGE, +} +vx_sp_attribute_input_tile_mapping_e; + +typedef enum _vx_sp_attribute_output_collapse_e +{ + VX_SP_ATTRIBUTE_OUTPUT_COLLAPSE_DISABLED, + VX_SP_ATTRIBUTE_OUTPUT_COLLAPSE_ENABLED, +} +vx_sp_attribute_output_collapse_e; + +typedef enum _vx_sp_attribute_rounding_mode_e +{ + VX_SP_ATTRIBUTE_PROG_ROUNDING_MODE_RTNE, + VX_SP_ATTRIBUTE_PROG_ROUNDING_MODE_STICKY, +} +vx_sp_attribute_rounding_mode_e; + +typedef enum _vx_sp_attribute_input_setup_e +{ + VX_SP_ATTRIBUTE_INPUT_SETUP_SINGLE_INPUT, + VX_SP_ATTRIBUTE_INPUT_SETUP_INTERLEAVE_TWO_INPUTS, + VX_SP_ATTRIBUTE_INPUT_SETUP_V11, + VX_SP_ATTRIBUTE_INPUT_SETUP_V12, +} +vx_sp_attribute_input_setup_e; + +typedef enum _vx_sp_attribute_ch_post_redistribute_e +{ + VX_SP_ATTRIBUTE_CH_POST_REDISTRIBUTE_DISABLED, + VX_SP_ATTRIBUTE_CH_POST_REDISTRIBUTE_SCALAR_GATHER, + VX_SP_ATTRIBUTE_CH_POST_REDISTRIBUTE_VECTOR_GATHER, + VX_SP_ATTRIBUTE_CH_POST_REDISTRIBUTE_VECTOR_SCATTER, +} +vx_sp_attribute_ch_post_redistribute_e; + +typedef enum _vx_sp_attribute_v_reset_at_start_e +{ + VX_SP_ATTRIBUTE_V_RESET_AT_START_NONE, + VX_SP_ATTRIBUTE_V_RESET_AT_START_RESET, +} +vx_sp_attribute_v_reset_at_start_e; + +typedef enum _vx_sp_attribute_v_pop_config_e +{ + VX_SP_ATTRIBUTE_V_POP_CONFIG_EVERY_READ, + VX_SP_ATTRIBUTE_V_POP_CONFIG_EVERY_ROW, +} +vx_sp_attribute_v_pop_config_e; + +typedef enum _vx_sp_attribute_accelerator_input_select_e +{ + VX_SP_ATTRIBUTE_ACCELERATOR_INPUT_SELECT_FROM_OUTPUT, + VX_SP_ATTRIBUTE_ACCELERATOR_INPUT_SELECT_FROM_ACCLERATOR, +} +vx_sp_attribute_accelerator_input_select_e; + +typedef enum _vx_sp_attribute_sum_engine_reset_e +{ + VX_SP_ATTRIBUTE_SUM_ENGINE_RESET_NONE, + VX_SP_ATTRIBUTE_SUM_ENGINE_RESET_RESET, +} +vx_sp_attribute_sum_engine_reset_e; + +typedef enum _vx_sp_attribute_sum_engine_control_e +{ + VX_SP_ATTRIBUTE_SUM_ENGINE_CONTROL_ACCUM_INTERNAL, + VX_SP_ATTRIBUTE_SUM_ENGINE_CONTROL_ACCUM_1D, + VX_SP_ATTRIBUTE_SUM_ENGINE_CONTROL_ACCUM_2D, +} +vx_sp_attribute_sum_engine_control_e; + +typedef enum _vx_sp_attribute_sum_engine_num_ch_minus_one_e +{ + VX_SP_ATTRIBUTE_SUM_ENGINE_NUM_CH_MINUS_ONE_ONE_CH, + VX_SP_ATTRIBUTE_SUM_ENGINE_NUM_CH_MINUS_ONE_TWO_CH, +} +vx_sp_attribute_sum_engine_num_ch_minus_one_e; + +typedef enum _vx_sp_attribute_sum_engine_2d_accum_storage_e +{ + VX_SP_ATTRIBUTE_SUM_ENGINE_2D_ACCUM_STORAGE_SAME, + VX_SP_ATTRIBUTE_SUM_ENGINE_2D_ACCUM_STORAGE_DIFFERENT, +} +vx_sp_attribute_sum_engine_2d_accum_storage_e; + +typedef enum _vx_sp_attribute_sum_engine_op_select_e +{ + VX_SP_ATTRIBUTE_SUM_ENGINE_SUM_OP, + VX_SP_ATTRIBUTE_SUM_ENGINE_MAX_OP +} vx_sp_attribute_sum_engine_op_select_e; + +typedef enum _vx_sp_attribute_reshape_e +{ + VX_SP_ATTRIBUTE_RESHAPE_CHW2CHW = 0x00, + VX_SP_ATTRIBUTE_RESHAPE_CHW2WHC = 0x06, + VX_SP_ATTRIBUTE_RESHAPE_CHW2WCH = 0x09, + VX_SP_ATTRIBUTE_RESHAPE_CHW2HWC = 0x12, + VX_SP_ATTRIBUTE_RESHAPE_CHW2HCW = 0x18, + VX_SP_ATTRIBUTE_RESHAPE_CHW2CWH = 0x21, +} +vx_sp_attribute_reshape_e; + +typedef enum _vx_sp_attribute_split_axis_e +{ + VX_SP_ATTRIBUTE_SPLIT_ON_AXIS_X, + VX_SP_ATTRIBUTE_SPLIT_ON_AXIS_Y, + VX_SP_ATTRIBUTE_SPLIT_ON_AXIS_Z, + VX_SP_ATTRIBUTE_SPLIT_ON_AXIS_XY, + VX_SP_ATTRIBUTE_SPLIT_ON_AXIS_YZ, + VX_SP_ATTRIBUTE_SPLIT_ON_AXIS_XYZ, +} +vx_sp_attribute_split_axis_e; + +typedef enum _vx_sp_attribute_tile_align_sp_core_e +{ + VX_SP_ATTRIBUTE_TILE_ALIGN_SP_CORE_NONE = 0, + VX_SP_ATTRIBUTE_TILE_ALIGN_SP_CORE_WITH_AXIS_X, + VX_SP_ATTRIBUTE_TILE_ALIGN_SP_CORE_WITH_AXIS_Y, + VX_SP_ATTRIBUTE_TILE_ALIGN_SP_CORE_WITH_AXIS_XY, +} +vx_sp_attribute_tile_align_sp_core_e; + +typedef enum _vx_sp_attribute_keep_tile_size_e +{ + VX_SP_ATTRIBUTE_KEEP_TILE_SIZE_NONE = 0, + VX_SP_ATTRIBUTE_KEEP_TILE_SIZE_WITH_AXIS_X, + VX_SP_ATTRIBUTE_KEEP_TILE_SIZE_WITH_AXIS_Y, + VX_SP_ATTRIBUTE_KEEP_TILE_SIZE_WITH_AXIS_XY, +} +vx_sp_attribute_keep_tile_size_e; + +/**********************************************************************************************/ + +/*! \brief Creates an external reference to a spinst data. + * \param [in] context The reference to the implementation context. + * \return A spinst data reference. + * \Any possible errors preventing a successful creation should be checked using \ref vxGetStatus. + * \ingroup group_object_spinst + */ +VX_API_ENTRY vx_spinst VX_API_CALL vxCreateSPINST( + vx_context context + ); + +/*! \brief Releases a reference to a external spinst object. + * The object may not be garbage collected until its total reference count is zero. + * \param [in] spinst_obj The pointer to the spinst data to release. + * \post After returning from this function the reference is zeroed. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; all other values indicate failure + * \retval * An error occurred. See \ref vx_status_e. + * \ingroup group_object_spinst + */ +VX_API_ENTRY vx_status VX_API_CALL vxReleaseSPINST( + vx_spinst *spinst_obj + ); + +/*! \brief Add a instruction to spinst object. + * \param [in] spinst_obj The reference to the spinst object. + * \param [in] inst_unit_array The units of one instruction. Use a \ref vx_spinst_unit_param. + * \param [in] inst_unit_count The count of instruction units. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors. + * \retval VX_ERROR_INVALID_REFERENCE If data is not a \ref spinst_obj. + * \retval VX_ERROR_INVALID_PARAMETERS If any of parameters is incorrect. + * \retval VX_ERROR_NO_MEMORY If fail to allocate internal instruction memory. + * \ingroup group_object_spinst + */ +VX_API_ENTRY vx_status VX_API_CALL vxAddOneInstToSPINST( + vx_spinst spinst_obj, + vx_spinst_unit_param* inst_unit_array, + vx_uint8 inst_unit_count + ); + +/*! \brief Set various attributes of a spinst data. + * \param [in] spinst_obj The reference to the vx_spinst object to set. + * \param [in] attribute The attribute to set. Use a \ref vx_sp_attribute_e. + * \param [in] value The value of attribute. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors. + * \retval VX_ERROR_INVALID_REFERENCE If data is not a \ref vx_spinst. + * \retval VX_ERROR_INVALID_PARAMETERS If any of attribute is incorrect. + * \ingroup group_object_spinst + */ +VX_API_ENTRY vx_status VX_API_CALL vxSetAttributeToSPINST( + vx_spinst spinst_obj, + vx_enum attribute, + vx_uint32 value + ); + +VX_API_ENTRY vx_status VX_API_CALL vxGetAttributeToSPINST( + vx_spinst spinst_obj, + vx_enum attribute, + vx_uint32* value +); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/unified-tina/inc/VX/vx_types.h b/unified-tina/inc/VX/vx_types.h new file mode 100644 index 0000000..e75b353 --- /dev/null +++ b/unified-tina/inc/VX/vx_types.h @@ -0,0 +1,2014 @@ +/* + + * Copyright (c) 2012-2017 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _OPENVX_TYPES_H_ +#define _OPENVX_TYPES_H_ + +/*! + * \file vx_types.h + * \brief The type definitions required by OpenVX Library. + */ + +#include +#include +#include + +/*! + * \internal + * \def VX_API_ENTRY + * \brief This is a tag used to identify exported, public API functions as + * distinct from internal functions, helpers, and other non-public interfaces. + * It can optionally be defined in the make system according the the compiler and intent. + * \ingroup group_basic_features + */ +#ifndef VX_API_ENTRY +#if defined(_WIN32) +#define VX_API_ENTRY __declspec(dllexport) +#else +#define VX_API_ENTRY __attribute__((visibility("default"))) +#endif +#endif +#ifndef VX_INTERNAL_ENTRY +#if defined(_WIN32) +#define VX_INTERNAL_ENTRY __declspec(dllexport) +#else +#define VX_INTERNAL_ENTRY __attribute__((visibility("default"))) +#endif +#endif +#ifndef VX_API_CALL +#if defined(_WIN32) +#define VX_API_CALL __stdcall +#else +#define VX_API_CALL +#endif +#endif +#ifndef VX_INTERNAL_CALL +#if defined(_WIN32) +#define VX_INTERNAL_CALL __stdcall +#else +#define VX_INTERNAL_CALL +#endif +#endif +#ifndef VX_CALLBACK +#if defined(_WIN32) +#define VX_CALLBACK __stdcall +#else +#define VX_CALLBACK +#endif + +#endif + +/*! \brief An 8 bit ASCII character. + * \ingroup group_basic_features + */ +typedef char vx_char; + +/*! \brief An 8-bit unsigned value. + * \ingroup group_basic_features + */ +typedef uint8_t vx_uint8; + +/*! \brief A 16-bit unsigned value. + * \ingroup group_basic_features + */ +typedef uint16_t vx_uint16; + +/*! \brief A 32-bit unsigned value. + * \ingroup group_basic_features + */ +typedef uint32_t vx_uint32; + +/*! \brief A 64-bit unsigned value. + * \ingroup group_basic_features + */ +typedef uint64_t vx_uint64; + +/*! \brief An 8-bit signed value. + * \ingroup group_basic_features + */ +typedef int8_t vx_int8; + +/*! \brief A 16-bit signed value. + * \ingroup group_basic_features + */ +typedef int16_t vx_int16; + +/*! \brief A 32-bit signed value. + * \ingroup group_basic_features + */ +typedef int32_t vx_int32; + +/*! \brief A 64-bit signed value. + * \ingroup group_basic_features + */ +typedef int64_t vx_int64; + +typedef uint32_t vx_bitfield; + +#if defined(EXPERIMENTAL_PLATFORM_SUPPORTS_16_FLOAT) + +/*! \brief A 16-bit float value. + * \ingroup group_basic_features + */ +typedef hfloat vx_float16; +#endif + +/*! \brief A 32-bit float value. + * \ingroup group_basic_features + */ +typedef float vx_float32; + +/*! \brief A 64-bit float value (aka double). + * \ingroup group_basic_features + */ +typedef double vx_float64; + +/*! \brief A generic opaque reference to any object within OpenVX. + * \details A user of OpenVX should not assume that this can be cast directly to anything; + * however, any object in OpenVX can be cast back to this for the purposes of + * querying attributes of the object or for passing the object as a parameter to + * functions that take a \ref vx_reference type. + * If the API does not take that specific type but may take others, an + * error may be returned from the API. + * \ingroup group_reference + */ +typedef struct _vx_reference *vx_reference; + +/*! \brief Sets the standard enumeration type size to be a fixed quantity. + * \details All enumerable fields must use this type as the container to + * enforce enumeration ranges and sizeof() operations. + * \ingroup group_basic_features + */ +typedef int32_t vx_enum; + +/*! \brief A wrapper of size_t to keep the naming convention uniform. + * \ingroup group_basic_features + */ +typedef size_t vx_size; + +/*! \brief Used to hold a VX_DF_IMAGE code to describe the pixel format and color space. + * \ingroup group_basic_features + */ +typedef uint32_t vx_df_image; + +/*! \brief Holds the address of a variable where the map/unmap functions return a map identifier. + * \ingroup group_image + */ +typedef uintptr_t vx_map_id; + +/*! \brief An opaque reference to a scalar. + * \details A scalar can be up to 64 bits wide. + * \see vxCreateScalar + * \ingroup group_scalar + * \extends vx_reference + */ +typedef struct _vx_scalar *vx_scalar; + +/*! \brief An opaque reference to an image. + * \see vxCreateImage + * \ingroup group_image + * \extends vx_reference + */ +typedef struct _vx_image *vx_image; + +/*! \brief An opaque reference to the descriptor of a kernel. + * \see vxGetKernelByName + * \see vxGetKernelByEnum + * \ingroup group_kernel + * \extends vx_reference + */ +typedef struct _vx_kernel *vx_kernel; + +/*! \brief An opaque reference to a single parameter. + * \see vxGetParameterByIndex + * \ingroup group_parameter + * \extends vx_reference + */ +typedef struct _vx_parameter *vx_parameter; + +/*! \brief An opaque reference to a kernel node. + * \see vxCreateGenericNode + * \ingroup group_node + * \extends vx_reference + */ +typedef struct _vx_node *vx_node; + +/*! \brief An opaque reference to a graph + * \see vxCreateGraph + * \ingroup group_graph + * \extends vx_reference + */ +typedef struct _vx_graph *vx_graph; + +/*! \brief An opaque reference to the implementation context. + * \see vxCreateContext + * \ingroup group_context + * \extends vx_reference + */ +typedef struct _vx_context *vx_context; + +/*! \brief The delay object. This is like a ring buffer of objects that is + * maintained by the OpenVX implementation. + * \see vxCreateDelay + * \extends vx_reference + * \ingroup group_delay + */ +typedef struct _vx_delay *vx_delay; + +/*! \brief The Look-Up Table (LUT) Object. + * \extends vx_reference + * \ingroup group_lut + */ +typedef struct _vx_lut *vx_lut; + +/*! \brief The Distribution object. This has a user-defined number of bins over + * a user-defined range (within a uint32_t range). + * \extends vx_reference + * \ingroup group_distribution + */ +typedef struct _vx_distribution *vx_distribution; + +/*! \brief The Matrix Object. An MxN matrix of some unit type. + * \extends vx_reference + * \ingroup group_matrix + */ +typedef struct _vx_matrix *vx_matrix; + +/*! \brief The Image Pyramid object. A set of scaled images. + * \extends vx_reference + * \ingroup group_pyramid + */ +typedef struct _vx_pyramid *vx_pyramid; + +/*! \brief The Threshold Object. A thresholding object contains the types and + * limit values of the thresholding required. + * \extends vx_reference + * \ingroup group_threshold + */ +typedef struct _vx_threshold *vx_threshold; + +/*! \brief The Convolution Object. A user-defined convolution kernel of MxM elements. + * \extends vx_reference + * \ingroup group_convolution + */ +typedef struct _vx_convolution *vx_convolution; + +/*! \brief The remap table Object. A remap table contains per-pixel mapping of + * output pixels to input pixels. + * \ingroup group_remap + */ +typedef struct _vx_remap *vx_remap; + +/*! \brief The Array Object. Array is a strongly-typed container for other data structures. + * \ingroup group_array + */ +typedef struct _vx_array *vx_array; + +/*! \brief The ObjectArray Object. ObjectArray is a strongly-typed container of OpenVX data-objects. + * \ingroup group_object_array + */ +typedef struct _vx_object_array *vx_object_array; + + /*! \brief The multidimensional data object (Tensor). + * \see vxCreateTensor + * \ingroup group_object_tensor + * \extends vx_reference + */ +typedef struct _vx_tensor_t * vx_tensor; + +/*! \brief The multi dimensional view data structure. +* \details Used to split tensors into several views. Or concatenate several view into one tensor. +* \see vxCreateTensorFromView +* \ingroup group_tensor +*/ +typedef struct _vx_tensor_view_t * vx_tensor_view; + +/*! \brief The addressing of a tensor view patch structure is used by the Host only +* to address elements in a tensor view patch. +* \see \ref vxCopyTensorPatch +* \ingroup group_tensor +*/ +typedef struct _vx_tensor_addressing_t * vx_tensor_addressing; + +/*! + * \brief The addressing image patch structure is used by the Host only + * to address pixels in an image patch. The fields of the structure are defined as: + * \arg dim - The dimensions of the image in logical pixel units in the x & y direction. + * \arg stride - The physical byte distance from a logical pixel to the next + * logically adjacent pixel in the positive x or y direction. + * \arg scale - The relationship of scaling from the primary plane (typically + * the zero indexed plane) to this plane. An integer down-scaling factor of \f$ f \f$ shall be + * set to a value equal to \f$ scale = \frac{unity}{f} \f$ and an integer up-scaling factor of \f$ f \f$ + * shall be set to a value of \f$ scale = unity * f \f$. \f$ unity \f$ is defined as \ref VX_SCALE_UNITY. + * \arg step - The step is the number of logical pixel units to skip to + * arrive at the next physically unique pixel. For example, on a plane that is + * half-scaled in a dimension, the step in that dimension is 2 to indicate that + * every other pixel in that dimension is an alias. This is useful in situations + * where iteration over unique pixels is required, such as in serializing + * or de-serializing the image patch information. + * \see \ref vxMapImagePatch + * \ingroup group_image + */ +typedef struct _vx_tensorpatch_addressing_t { + vx_uint32 num_of_dims; /*!< \brief Width of patch in X dimension in pixels. */ + vx_size *dim_sizes; /*!< \brief Pointer to dimensions array */ + vx_size *strides; /*!< \brief Pointer to strides array */ + vx_uint16 stride_x_bits; /*!< \brief Stride in X dimension in bits. Used when stride_x is not an integer number of bytes. */ +} vx_tensorpatch_addressing_t; + +/*! \brief The addressing of a tensor patch structure is used by the Host only +* to address elements in a tensor view patch. +* \see \ref vxCopyTensorPatch2 +* \ingroup group_tensor +*/ +typedef struct _vx_tensorpatch_addressing_t * vx_trensor_addressing; + +/*! \brief The weight bias parameter for fused layers + * \ingroup group_cnn + */ +typedef struct _vx_weights_biases_parameter_s * vx_weights_biases_parameter; + +/*! \brief The object for stream processor + * \ingroup group_spinst + */ +typedef struct _vx_spinst_s * vx_spinst; + +/*! \brief A Boolean value. + * This allows 0 to be FALSE, as it is in C, and any non-zero to be TRUE. + * \code + * vx_bool ret = vx_true_e; + * if (ret) printf("true!\n"); + * ret = vx_false_e; + * if (!ret) printf("false!\n"); + * \endcode + * This would print both strings. + * \see vx_bool + * \ingroup group_basic_features + */ +typedef enum _vx_bool_e { + /*! \brief The "false" value. */ + vx_false_e = 0, + /*! \brief The "true" value. */ + vx_true_e, +} vx_bool_e; + +/*! \brief A formal boolean type with known fixed size. + * \see vx_bool_e + * \ingroup group_basic_features + */ +typedef vx_enum vx_bool; + +/*! + * \brief This object is used by output validation functions to specify the meta data + * of the expected output data object. + * \note When the actual output object of the user node is virtual, the information + * given through the vx_meta_format object allows the OpenVX framework to automatically + * create the data object when meta data were not specified by the application at object + * creation time. + * \ingroup group_user_kernels + */ +typedef struct _vx_meta_format* vx_meta_format; + +/*! \brief The type enumeration lists all the known types in OpenVX. + * \ingroup group_basic_features + */ +enum vx_type_e { + VX_TYPE_INVALID = 0x000,/*!< \brief An invalid type value. When passed an error must be returned. */ + VX_TYPE_CHAR = 0x001,/*!< \brief A \ref vx_char. */ + VX_TYPE_INT8 = 0x002,/*!< \brief A \ref vx_int8. */ + VX_TYPE_UINT8 = 0x003,/*!< \brief A \ref vx_uint8. */ + VX_TYPE_INT16 = 0x004,/*!< \brief A \ref vx_int16. */ + VX_TYPE_UINT16 = 0x005,/*!< \brief A \ref vx_uint16. */ + VX_TYPE_INT32 = 0x006,/*!< \brief A \ref vx_int32. */ + VX_TYPE_UINT32 = 0x007,/*!< \brief A \ref vx_uint32. */ + VX_TYPE_INT64 = 0x008,/*!< \brief A \ref vx_int64. */ + VX_TYPE_UINT64 = 0x009,/*!< \brief A \ref vx_uint64. */ + VX_TYPE_FLOAT32 = 0x00A,/*!< \brief A \ref vx_float32. */ + VX_TYPE_FLOAT64 = 0x00B,/*!< \brief A \ref vx_float64. */ + VX_TYPE_ENUM = 0x00C,/*!< \brief A \ref vx_enum. Equivalent in size to a \ref vx_int32. */ + VX_TYPE_SIZE = 0x00D,/*!< \brief A \ref vx_size. */ + VX_TYPE_DF_IMAGE = 0x00E,/*!< \brief A \ref vx_df_image. */ + VX_TYPE_FLOAT16 = 0x00F,/*!< \brief A \ref vx_float16. */ + VX_TYPE_BOOL = 0x010,/*!< \brief A \ref vx_bool. */ + VX_TYPE_BOOL8 = 0x011,/*!< \brief A \ref vx_bool8. */ + + VX_TYPE_RECTANGLE = 0x020,/*!< \brief A \ref vx_rectangle_t. */ + VX_TYPE_KEYPOINT = 0x021,/*!< \brief A \ref vx_keypoint_t. */ + VX_TYPE_COORDINATES2D = 0x022,/*!< \brief A \ref vx_coordinates2d_t. */ + VX_TYPE_COORDINATES3D = 0x023,/*!< \brief A \ref vx_coordinates3d_t. */ + VX_TYPE_COORDINATES2DF = 0x024,/*!< \brief A \ref vx_coordinates2df_t. */ + + /* Reserve enums that are defined in khronos extensions + NN extensions: + VX_TYPE_NN_CONVOLUTION_PARAMS = 0x025, + VX_TYPE_NN_DECONVOLUTION_PARAMS = 0x026, + VX_TYPE_NN_ROI_POOL_PARAMS = 0x027, + Classifier extension: + VX_TYPE_CLASSIFER_MODEL = 0x02C, + */ + VX_TYPE_HOG_PARAMS = 0x028, /*!< \brief A \ref vx_hog_t. */ + VX_TYPE_HOUGH_LINES_PARAMS = 0x029, /*!< \brief A \ref vx_hough_lines_p_t. */ + VX_TYPE_LINE_2D = 0x02A, /*!< \brief A \ref vx_line2d_t. */ + VX_TYPE_TENSOR_MATRIX_MULTIPLY_PARAMS = 0x02B, /*!< \brief A \ref vx_tensor_matrix_multiply_params_t. */ + + + VX_TYPE_USER_STRUCT_START = 0x100,/*!< \brief A user-defined struct base index.*/ + VX_TYPE_VENDOR_STRUCT_START = 0x400,/*!< \brief A vendor-defined struct base index.*/ + VX_TYPE_KHRONOS_OBJECT_START = 0x800,/*!< \brief A Khronos defined object base index. */ + VX_TYPE_VENDOR_OBJECT_START = 0xC00,/*!< \brief A vendor defined object base index. */ + + VX_TYPE_WEIGHTS_BIASES_PARAMETER = VX_TYPE_VENDOR_OBJECT_START, + VX_TYPE_WEIGHTS_BIASES_PARAMETER_BASE = VX_TYPE_VENDOR_OBJECT_START+1, + + VX_TYPE_KHRONOS_STRUCT_MAX = VX_TYPE_USER_STRUCT_START - 1,/*!< \brief A value for comparison between Khronos defined structs and user structs. */ + + VX_TYPE_USER_STRUCT_END = VX_TYPE_VENDOR_STRUCT_START - 1,/*!< \brief A value for comparison between user structs and vendor structs. */ + VX_TYPE_VENDOR_STRUCT_END = VX_TYPE_KHRONOS_OBJECT_START - 1,/*!< \brief A value for comparison between vendor structs and Khronos defined objects. */ + VX_TYPE_KHRONOS_OBJECT_END = VX_TYPE_VENDOR_OBJECT_START - 1,/*!< \brief A value for comparison between Khronos defined objects and vendor structs. */ + VX_TYPE_VENDOR_OBJECT_END = 0xFFF,/*!< \brief A value used for bound checking of vendor objects */ + + + VX_TYPE_REFERENCE = 0x800,/*!< \brief A \ref vx_reference. */ + VX_TYPE_CONTEXT = 0x801,/*!< \brief A \ref vx_context. */ + VX_TYPE_GRAPH = 0x802,/*!< \brief A \ref vx_graph. */ + VX_TYPE_NODE = 0x803,/*!< \brief A \ref vx_node. */ + VX_TYPE_KERNEL = 0x804,/*!< \brief A \ref vx_kernel. */ + VX_TYPE_PARAMETER = 0x805,/*!< \brief A \ref vx_parameter. */ + VX_TYPE_DELAY = 0x806,/*!< \brief A \ref vx_delay. */ + VX_TYPE_LUT = 0x807,/*!< \brief A \ref vx_lut. */ + VX_TYPE_DISTRIBUTION = 0x808,/*!< \brief A \ref vx_distribution. */ + VX_TYPE_PYRAMID = 0x809,/*!< \brief A \ref vx_pyramid. */ + VX_TYPE_THRESHOLD = 0x80A,/*!< \brief A \ref vx_threshold. */ + VX_TYPE_MATRIX = 0x80B,/*!< \brief A \ref vx_matrix. */ + VX_TYPE_CONVOLUTION = 0x80C,/*!< \brief A \ref vx_convolution. */ + VX_TYPE_SCALAR = 0x80D,/*!< \brief A \ref vx_scalar. when needed to be completely generic for kernel validation. */ + VX_TYPE_ARRAY = 0x80E,/*!< \brief A \ref vx_array. */ + VX_TYPE_IMAGE = 0x80F,/*!< \brief A \ref vx_image. */ + VX_TYPE_REMAP = 0x810,/*!< \brief A \ref vx_remap. */ + VX_TYPE_ERROR = 0x811,/*!< \brief An error object which has no type. */ + VX_TYPE_META_FORMAT = 0x812,/*!< \brief A \ref vx_meta_format. */ + VX_TYPE_OBJECT_ARRAY = 0x813,/*!< \brief A \ref vx_object_array. */ + /* Reserved for IX and XML extensions */ + /* VX_TYPE_IMPORT = 0x814, !< \brief A \ref vx_import. */ + VX_TYPE_TENSOR = 0x815,/*!< \brief A \ref vx_tensor. */ + /* Reserved for VX_TYPE_TARGET extensions*/ + VX_TYPE_TARGET = 0x816,/*!< \brief A \ref vx_target */ + VX_TYPE_TENSOR_VIEW = 0x817,/*!< \brief A \ref vx_tensor_view. */ + VX_TYPE_TENSOR_ADDRESS = 0x818,/*!< \brief A \ref vx_tensor_addressing. */ + VX_TYPE_TENSOR_MEM = 0x819,/*!< \brief A \ref vx_tensor_alloc_info. */ + + /* \todo add new object types here */ + VX_TYPE_BFLOAT16 = 0x81A,/*!< \brief A \ref vx_bfloat16. */ + + VX_TYPE_SPINST = 0x81B,/*!< \brief A \ref vx_spinst. */ + VX_TYPE_INT4 = 0x81C,/*!< \brief A \ref signed 4bits tensor.. */ + VX_TYPE_UINT4 = 0x81D,/*!< \brief A \ref unsigned 4bits tensor.. */ + VX_TYPE_FLOAT8_E4M3 = 0x81E,/*!< \brief A \ref vx_float8_e4m3. */ + VX_TYPE_FLOAT8_E5M2 = 0x81F,/*!< \brief A \ref vx_float8_e5m2. */ +}; + +/*! \brief The enumeration of all status codes. + * \see vx_status. + * \ingroup group_basic_features + */ +enum vx_status_e { + VX_ERROR_VENDOR_VSI_END = -2000, /*!< \brief A vendor defined error status end base. */ + /* add new error here*/ + VX_ERROR_CANCEL_JOB = -1001, /*!< \brief Indicates that a VIP job was cancelled. */ + VX_ERROR_VENDOR_VSI_START = -1000, /*!< \brief A vendor defined error status start base. */ + + VX_STATUS_MIN = -25,/*!< \brief Indicates the lower bound of status codes in VX. Used for bounds checks only. */ + /* add new codes here */ + VX_ERROR_REFERENCE_NONZERO = -24,/*!< \brief Indicates that an operation did not complete due to a reference count being non-zero. */ + VX_ERROR_MULTIPLE_WRITERS = -23,/*!< \brief Indicates that the graph has more than one node outputting to the same data object. This is an invalid graph structure. */ + VX_ERROR_GRAPH_ABANDONED = -22,/*!< \brief Indicates that the graph is stopped due to an error or a callback that abandoned execution. */ + VX_ERROR_GRAPH_SCHEDULED = -21,/*!< \brief Indicates that the supplied graph already has been scheduled and may be currently executing. */ + VX_ERROR_INVALID_SCOPE = -20,/*!< \brief Indicates that the supplied parameter is from another scope and cannot be used in the current scope. */ + VX_ERROR_INVALID_NODE = -19,/*!< \brief Indicates that the supplied node could not be created.*/ + VX_ERROR_INVALID_GRAPH = -18,/*!< \brief Indicates that the supplied graph has invalid connections (cycles). */ + VX_ERROR_INVALID_TYPE = -17,/*!< \brief Indicates that the supplied type parameter is incorrect. */ + VX_ERROR_INVALID_VALUE = -16,/*!< \brief Indicates that the supplied parameter has an incorrect value. */ + VX_ERROR_INVALID_DIMENSION = -15,/*!< \brief Indicates that the supplied parameter is too big or too small in dimension. */ + VX_ERROR_INVALID_FORMAT = -14,/*!< \brief Indicates that the supplied parameter is in an invalid format. */ + VX_ERROR_INVALID_LINK = -13,/*!< \brief Indicates that the link is not possible as specified. The parameters are incompatible. */ + VX_ERROR_INVALID_REFERENCE = -12,/*!< \brief Indicates that the reference provided is not valid. */ + VX_ERROR_INVALID_MODULE = -11,/*!< \brief This is returned from \ref vxLoadKernels when the module does not contain the entry point. */ + VX_ERROR_INVALID_PARAMETERS = -10,/*!< \brief Indicates that the supplied parameter information does not match the kernel contract. */ + VX_ERROR_OPTIMIZED_AWAY = -9,/*!< \brief Indicates that the object refered to has been optimized out of existence. */ + VX_ERROR_NO_MEMORY = -8,/*!< \brief Indicates that an internal or implicit allocation failed. Typically catastrophic. After detection, deconstruct the context. \see vxVerifyGraph. */ + VX_ERROR_NO_RESOURCES = -7,/*!< \brief Indicates that an internal or implicit resource can not be acquired (not memory). This is typically catastrophic. After detection, deconstruct the context. \see vxVerifyGraph. */ + VX_ERROR_NOT_COMPATIBLE = -6,/*!< \brief Indicates that the attempt to link two parameters together failed due to type incompatibilty. */ + VX_ERROR_NOT_ALLOCATED = -5,/*!< \brief Indicates to the system that the parameter must be allocated by the system. */ + VX_ERROR_NOT_SUFFICIENT = -4,/*!< \brief Indicates that the given graph has failed verification due to an insufficient number of required parameters, which cannot be automatically created. Typically this indicates required atomic parameters. \see vxVerifyGraph. */ + VX_ERROR_NOT_SUPPORTED = -3,/*!< \brief Indicates that the requested set of parameters produce a configuration that cannot be supported. Refer to the supplied documentation on the configured kernels. \see vx_kernel_e. This is also returned if a function to set an attribute is called on a Read-only attribute.*/ + VX_ERROR_NOT_IMPLEMENTED = -2,/*!< \brief Indicates that the requested kernel is missing. \see vx_kernel_e vxGetKernelByName. */ + VX_FAILURE = -1,/*!< \brief Indicates a generic error code, used when no other describes the error. */ + VX_SUCCESS = 0,/*!< \brief No error. */ +}; + +/*! \brief A formal status type with known fixed size. + * \see vx_status_e + * \ingroup group_basic_features + */ +typedef vx_enum vx_status; + +/*! \brief The formal typedef of the response from the callback. + * \see vx_action_e + * \ingroup group_node_callback + */ +typedef vx_enum vx_action; + +/*! \brief A callback to the client after a particular node has completed. + * \see vx_action + * \see vxAssignNodeCallback + * \param [in] node The node to which the callback was attached. + * \return An action code from \ref vx_action_e. + * \ingroup group_node_callback + */ +typedef vx_action (VX_CALLBACK *vx_nodecomplete_f)(vx_node node); + +/*! \brief A callback to the client for querying information of a node. + * \see vx_action + * \see vxAssignNodeCallback + * \param [in] node The node to which the callback was attached. + * \return An action code from \ref vx_action_e. + * \ingroup group_node_callback + */ +typedef vx_status (VX_CALLBACK *vx_nodequery_f)(vx_node node); + +/*! \brief Vendor IDs are 2 nibbles in size and are located in the upper byte of + * the 4 bytes of an enumeration. + * \ingroup group_basic_features + */ +#define VX_VENDOR_MASK (0xFFF00000) + +/*! \brief A type mask removes the scalar/object type from the attribute. + * It is 3 nibbles in size and is contained between the third and second byte. + * \see vx_type_e + * \ingroup group_basic_features + */ +#define VX_TYPE_MASK (0x000FFF00) + +/*! \brief A library is a set of vision kernels with its own ID supplied by a vendor. + * The vendor defines the library ID. The range is \f$ [0,2^{8}-1] \f$ inclusive. + * \ingroup group_basic_features + */ +#define VX_LIBRARY_MASK (0x000FF000) + +/*! \brief An individual kernel in a library has its own unique ID within \f$ [0,2^{12}-1] \f$ (inclusive). + * \ingroup group_basic_features + */ +#define VX_KERNEL_MASK (0x00000FFF) + +/*! \brief An object's attribute ID is within the range of \f$ [0,2^{8}-1] \f$ (inclusive). + * \ingroup group_basic_features + */ +#define VX_ATTRIBUTE_ID_MASK (0x000000FF) + +/*! \brief A type of enumeration. The valid range is between \f$ [0,2^{8}-1] \f$ (inclusive). + * \ingroup group_basic_features + */ +#define VX_ENUM_TYPE_MASK (0x000FF000) + +/*! \brief A generic enumeration list can have values between \f$ [0,2^{12}-1] \f$ (inclusive). + * \ingroup group_basic_features + */ +#define VX_ENUM_MASK (0x00000FFF) + +/*! \brief A macro to extract the vendor ID from the enumerated value. + * \ingroup group_basic_features + */ +#define VX_VENDOR(e) (((vx_uint32)e & VX_VENDOR_MASK) >> 20) + +/*! \brief A macro to extract the type from an enumerated attribute value. + * \ingroup group_basic_features + */ +#define VX_TYPE(e) (((vx_uint32)e & VX_TYPE_MASK) >> 8) + +/*! \brief A macro to extract the enum type from an enumerated value. + * \ingroup group_basic_features + */ +#define VX_ENUM_TYPE(e) (((vx_uint32)e & VX_ENUM_TYPE_MASK) >> 12) + +/*! \brief A macro to extract the kernel library enumeration from a enumerated kernel value. + * \ingroup group_basic_features + */ +#define VX_LIBRARY(e) (((vx_uint32)e & VX_LIBRARY_MASK) >> 12) + +#if defined(_LITTLE_ENDIAN_) || (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || defined(_WIN32) +#define VX_DF_IMAGE(a,b,c,d) ((a) | (b << 8) | (c << 16) | (d << 24)) +#define VX_ATTRIBUTE_BASE(vendor, object) (((vendor) << 20) | (object << 8)) +#define VX_KERNEL_BASE(vendor, lib) (((vendor) << 20) | (lib << 12)) +#define VX_ENUM_BASE(vendor, id) (((vendor) << 20) | (id << 12)) +#elif defined(_BIG_ENDIAN_) || (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +#define VX_DF_IMAGE(a,b,c,d) ((d) | (c << 8) | (b << 16) | (a << 24)) +#define VX_ATTRIBUTE_BASE(vendor, object) ((vendor) | (object << 12)) +#define VX_KERNEL_BASE(vendor, lib) ((vendor) | (lib << 12)) +#define VX_ENUM_BASE(vendor, id) ((vendor) | (id << 12)) +#else +#error "Endian-ness must be defined!" +#endif + +/*! \def VX_DF_IMAGE + * \brief Converts a set of four chars into a \c uint32_t container of a VX_DF_IMAGE code. + * \note Use a \ref vx_df_image variable to hold the value. + * \ingroup group_basic_features + */ +#define VX_DF_IMAGE(a,b,c,d) ((a) | (b << 8) | (c << 16) | (d << 24)) + +/*! \def VX_ATTRIBUTE_BASE + * \brief Defines the manner in which to combine the Vendor and Object IDs to get + * the base value of the enumeration. + * \ingroup group_basic_features + */ +#define VX_ATTRIBUTE_BASE(vendor, object) (((vendor) << 20) | (object << 8)) + +/*! \def VX_KERNEL_BASE + * \brief Defines the manner in which to combine the Vendor and Library IDs to get + * the base value of the enumeration. + * \ingroup group_basic_features + */ +#define VX_KERNEL_BASE(vendor, lib) (((vendor) << 20) | (lib << 12)) + +/*! \def VX_ENUM_BASE + * \brief Defines the manner in which to combine the Vendor and Object IDs to get + * the base value of the enumeration. + * \details From any enumerated value (with exceptions), the vendor, and enumeration + * type should be extractable. Those types that are exceptions are + * \ref vx_vendor_id_e, \ref vx_type_e, \ref vx_enum_e, \ref vx_df_image_e, and \c vx_bool. + * \ingroup group_basic_features + */ +#define VX_ENUM_BASE(vendor, id) (((vendor) << 20) | (id << 12)) + +/*! \brief The set of supported enumerations in OpenVX. + * \details These can be extracted from enumerated values using \ref VX_ENUM_TYPE. + * \ingroup group_basic_features + */ +enum vx_enum_e { + VX_ENUM_DIRECTION = 0x00, /*!< \brief Parameter Direction. */ + VX_ENUM_ACTION = 0x01, /*!< \brief Action Codes. */ + VX_ENUM_HINT = 0x02, /*!< \brief Hint Values. */ + VX_ENUM_DIRECTIVE = 0x03, /*!< \brief Directive Values. */ + VX_ENUM_INTERPOLATION = 0x04, /*!< \brief Interpolation Types. */ + VX_ENUM_OVERFLOW = 0x05, /*!< \brief Overflow Policies. */ + VX_ENUM_COLOR_SPACE = 0x06, /*!< \brief Color Space. */ + VX_ENUM_COLOR_RANGE = 0x07, /*!< \brief Color Space Range. */ + VX_ENUM_PARAMETER_STATE = 0x08, /*!< \brief Parameter State. */ + VX_ENUM_CHANNEL = 0x09, /*!< \brief Channel Name. */ + VX_ENUM_CONVERT_POLICY = 0x0A, /*!< \brief Convert Policy. */ + VX_ENUM_THRESHOLD_TYPE = 0x0B, /*!< \brief Threshold Type List. */ + VX_ENUM_BORDER = 0x0C, /*!< \brief Border Mode List. */ + VX_ENUM_COMPARISON = 0x0D, /*!< \brief Comparison Values. */ + VX_ENUM_MEMORY_TYPE = 0x0E, /*!< \brief The memory type enumeration. */ + VX_ENUM_TERM_CRITERIA = 0x0F, /*!< \brief A termination criteria. */ + VX_ENUM_NORM_TYPE = 0x10, /*!< \brief A norm type. */ + VX_ENUM_ACCESSOR = 0x11, /*!< \brief An accessor flag type. */ + VX_ENUM_ROUND_POLICY = 0x12, /*!< \brief Rounding Policy. */ + VX_ENUM_TARGET = 0x13, /*!< \brief Target. */ + VX_ENUM_BORDER_POLICY = 0x14, /*!< \brief Unsupported Border Mode Policy List. */ + VX_ENUM_GRAPH_STATE = 0x15, /*!< \brief Graph attribute states. */ + VX_ENUM_NONLINEAR = 0x16, /*!< \brief Non-linear function list. */ + VX_ENUM_PATTERN = 0x17, /*!< \brief Matrix pattern enumeration. */ + VX_ENUM_LBP_FORMAT = 0x18, /*!< \brief Lbp format. */ + VX_ENUM_COMP_METRIC = 0x19, /*!< \brief Compare metric. */ + +/* NN extension + VX_ENUM_NN_ROUNDING_TYPE = 0x1A, + VX_ENUM_NN_POOLING_TYPE = 0x1B, + VX_ENUM_NN_NORMALIZATION_TYPE = 0x1C, + VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE = 0x1D, +*/ + +/* Classifier extension + VX_ENUM_CLASSIFIER_MODEL= 0x1E, +*/ +/* IX extension + VX_ENUM_IX_USE = 0x1F, !< \brief How to use references in import and export. */ + VX_ENUM_SCALAR_OPERATION= 0X20 /*!< \brief Scalar operation list. */ + }; + +/*! \brief A return code enumeration from a \ref vx_nodecomplete_f during execution. + * \see vxAssignNodeCallback + * \ingroup group_node_callback + */ +enum vx_action_e { + /*! \brief Continue executing the graph with no changes. */ + VX_ACTION_CONTINUE = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_ACTION) + 0x0, + /*! \brief Stop executing the graph. */ + VX_ACTION_ABANDON = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_ACTION) + 0x1, +}; + +/*! \brief An indication of how a kernel will treat the given parameter. + * \ingroup group_parameter + */ +enum vx_direction_e { + /*! \brief The parameter is an input only. */ + VX_INPUT = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_DIRECTION) + 0x0, + /*! \brief The parameter is an output only. */ + VX_OUTPUT = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_DIRECTION) + 0x1, +}; + +/*! \brief These enumerations are given to the \ref vxHint API to enable/disable platform + * optimizations and/or features. Hints are optional and usually are vendor-specific. + * \see \ref vxHint + * \ingroup group_hint + */ +enum vx_hint_e { + /*! \brief Indicates to the implementation that user do not apply any specific + * requirements for performance. + */ + VX_HINT_PERFORMANCE_DEFAULT = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_HINT) + 0x1, + /*! \brief Indicates the user preference is low power consumption versus + * highest performance. + */ + VX_HINT_PERFORMANCE_LOW_POWER = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_HINT) + 0x2, + /*! \brief Indicates the user preference for highest performance over + * low power consumption. + */ + VX_HINT_PERFORMANCE_HIGH_SPEED = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_HINT) + 0x3, +}; + +/*! \brief These enumerations are given to the \c vxDirective API to enable/disable + * platform optimizations and/or features. Directives are not optional and + * usually are vendor-specific, by defining a vendor range of directives and + * starting their enumeration from there. + * \see vxDirective + * \ingroup group_directive + */ +enum vx_directive_e { + /*! \brief Disables recording information for graph debugging. */ + VX_DIRECTIVE_DISABLE_LOGGING = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_DIRECTIVE) + 0x0, + /*! \brief Enables recording information for graph debugging. */ + VX_DIRECTIVE_ENABLE_LOGGING = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_DIRECTIVE) + 0x1, + /*! \brief Disables performance counters for the context. By default performance counters are disabled */ + VX_DIRECTIVE_DISABLE_PERFORMANCE = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_DIRECTIVE) + 0x2, + /*! \brief Enables performance counters for the context. */ + VX_DIRECTIVE_ENABLE_PERFORMANCE = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_DIRECTIVE) + 0x3, +}; + +/*! \brief The Graph State Enumeration. + * \ingroup group_graph + */ +enum vx_graph_state_e { + /*! \brief The graph should be verified before execution */ + VX_GRAPH_STATE_UNVERIFIED = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_GRAPH_STATE) + 0x0, + /*! \brief The graph has been verified and has not been executed or scheduled for execution yet */ + VX_GRAPH_STATE_VERIFIED = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_GRAPH_STATE) + 0x1, + /*! \brief The graph either has been scheduled and not completed, or is being executed */ + VX_GRAPH_STATE_RUNNING = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_GRAPH_STATE) + 0x2, + /*! \brief The graph execution was abandoned */ + VX_GRAPH_STATE_ABANDONED = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_GRAPH_STATE) + 0x3, + /*! \brief The graph execution is completed and the graph is not scheduled for execution */ + VX_GRAPH_STATE_COMPLETED = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_GRAPH_STATE) + 0x4, + /*! \brief The graph execution was cancelled */ + VX_GRAPH_STATE_CANCELLED = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_GRAPH_STATE) + 0x5, +}; + +/*! \brief The graph attributes list. + * \ingroup group_graph + */ +enum vx_graph_attribute_e { + /*! \brief Returns the number of nodes in a graph. Read-only. Use a \ref vx_uint32 parameter.*/ + VX_GRAPH_NUMNODES = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_GRAPH) + 0x0, + /*! \brief Returns the overall performance of the graph. Read-only. Use a \ref vx_perf_t parameter. + * The accuracy of timing information is platform dependent. + * \note Performance tracking must have been enabled. See \ref vx_directive_e + */ + VX_GRAPH_PERFORMANCE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_GRAPH) + 0x2, + /*! \brief Returns the number of explicitly declared parameters on the graph. Read-only. Use a \ref vx_uint32 parameter. */ + VX_GRAPH_NUMPARAMETERS = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_GRAPH) + 0x3, + /*! \brief Returns the state of the graph. See \ref vx_graph_state_e enum. */ + VX_GRAPH_STATE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_GRAPH) + 0x4, +}; + +/*! \brief The Conversion Policy Enumeration. + * \ingroup group_basic_features + */ +enum vx_convert_policy_e { + /*! \brief Results are the least significant bits of the output operand, as if + * stored in two's complement binary format in the size of its bit-depth. + */ + VX_CONVERT_POLICY_WRAP = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_CONVERT_POLICY) + 0x0, + /*! \brief Results are saturated to the bit depth of the output operand. */ + VX_CONVERT_POLICY_SATURATE = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_CONVERT_POLICY) + 0x1, + /*! \brief Results preserve infinity and nan value. */ + VX_CONVERT_POLICY_INF = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_CONVERT_POLICY) + 0x0, +}; + +/*! \brief Based on the VX_DF_IMAGE definition. + * \note Use \ref vx_df_image to contain these values. + * \ingroup group_basic_features + */ +enum vx_df_image_e { + /*! \brief A virtual image of no defined type. */ + VX_DF_IMAGE_VIRT = VX_DF_IMAGE('V','I','R','T'), + /*! \brief A single plane of 24-bit pixel as 3 interleaved 8-bit units of + * R then G then B data. This uses the BT709 full range by default. + */ + VX_DF_IMAGE_RGB = VX_DF_IMAGE('R','G','B','2'), + /*! \brief A single plane of 32-bit pixel as 4 interleaved 8-bit units of + * R then G then B data, then a don't care byte. + * This uses the BT709 full range by default. + */ + VX_DF_IMAGE_RGBX = VX_DF_IMAGE('R','G','B','A'), + /*! \brief A 2-plane YUV format of Luma (Y) and interleaved UV data at + * 4:2:0 sampling. This uses the BT709 full range by default. + */ + VX_DF_IMAGE_NV12 = VX_DF_IMAGE('N','V','1','2'), + /*! \brief A 2-plane YUV format of Luma (Y) and interleaved VU data at + * 4:2:0 sampling. This uses the BT709 full range by default. + */ + VX_DF_IMAGE_NV21 = VX_DF_IMAGE('N','V','2','1'), + /*! \brief A single plane of 32-bit macro pixel of U0, Y0, V0, Y1 bytes. + * This uses the BT709 full range by default. + */ + VX_DF_IMAGE_UYVY = VX_DF_IMAGE('U','Y','V','Y'), + /*! \brief A single plane of 32-bit macro pixel of Y0, U0, Y1, V0 bytes. + * This uses the BT709 full range by default. + */ + VX_DF_IMAGE_YUYV = VX_DF_IMAGE('Y','U','Y','V'), + /*! \brief A 3 plane of 8-bit 4:2:0 sampled Y, U, V planes. + * This uses the BT709 full range by default. + */ + VX_DF_IMAGE_IYUV = VX_DF_IMAGE('I','Y','U','V'), + /*! \brief A 3 plane of 8 bit 4:4:4 sampled Y, U, V planes. + * This uses the BT709 full range by default. + */ + VX_DF_IMAGE_YUV4 = VX_DF_IMAGE('Y','U','V','4'), + /*! \brief A single plane of unsigned 1-bit data packed eight pixels per byte. + * The least significant bit is the first pixel in each byte. + * See \ref vx_imagepatch_addressing_t for more details. + */ + VX_DF_IMAGE_U1 = VX_DF_IMAGE('U','0','0','1'), + /*! \brief A single plane of unsigned 8-bit data. + * The range of data is not specified, as it may be extracted from a YUV or + * generated. + */ + VX_DF_IMAGE_U8 = VX_DF_IMAGE('U','0','0','8'), + /*! \brief A single plane of unsigned 16-bit data. + * The range of data is not specified, as it may be extracted from a YUV or + * generated. + */ + VX_DF_IMAGE_U16 = VX_DF_IMAGE('U','0','1','6'), + /*! \brief A single plane of signed 16-bit data. + * The range of data is not specified, as it may be extracted from a YUV or + * generated. + */ + VX_DF_IMAGE_S16 = VX_DF_IMAGE('S','0','1','6'), + /*! \brief A single plane of unsigned 32-bit data. + * The range of data is not specified, as it may be extracted from a YUV or + * generated. + */ + VX_DF_IMAGE_U32 = VX_DF_IMAGE('U','0','3','2'), + /*! \brief A single plane of unsigned 32-bit data. + * The range of data is not specified, as it may be extracted from a YUV or + * generated. + */ + VX_DF_IMAGE_S32 = VX_DF_IMAGE('S','0','3','2'), +}; + +/*! \brief The Target Enumeration. + * \ingroup group_basic_features + */ +enum vx_target_e { + /*! \brief Any available target. An OpenVX implementation must support at least one target associated with this value */ + VX_TARGET_ANY = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_TARGET) + 0x0000, + /*! \brief Target, explicitly specified by its (case-insensitive) name string. */ + VX_TARGET_STRING = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_TARGET) + 0x0001, + /*! \brief Start of Vendor specific target enumerates. */ + VX_TARGET_VENDOR_BEGIN = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_TARGET) + 0x1000, +}; + +/*! \brief The reference attributes list. + * \ingroup group_reference + */ +enum vx_reference_attribute_e { + /*! \brief Returns the reference count of the object. Read-only. Use a \ref vx_uint32 parameter. */ + VX_REFERENCE_COUNT = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_REFERENCE) + 0x0, + /*! \brief Returns the \ref vx_type_e of the reference. Read-only. Use a \ref vx_enum parameter. */ + VX_REFERENCE_TYPE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_REFERENCE) + 0x1, + /*! \brief Used to query the reference for its name. Read-write. Use a *\ref vx_char parameter. */ + VX_REFERENCE_NAME = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_REFERENCE) + 0x2, +}; + +/*! \brief A list of context attributes. + * \ingroup group_context + */ +enum vx_context_attribute_e { + /*! \brief Queries the unique vendor ID. Read-only. Use a \ref vx_uint16. */ + VX_CONTEXT_VENDOR_ID = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_CONTEXT) + 0x0, + /*! \brief Queries the OpenVX Version Number. Read-only. Use a \ref vx_uint16 */ + VX_CONTEXT_VERSION = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_CONTEXT) + 0x1, + /*! \brief Queries the context for the number of \e unique kernels. Read-only. Use a \ref vx_uint32 parameter. */ + VX_CONTEXT_UNIQUE_KERNELS = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_CONTEXT) + 0x2, + /*! \brief Queries the context for the number of active modules. Read-only. Use a \ref vx_uint32 parameter. */ + VX_CONTEXT_MODULES = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_CONTEXT) + 0x3, + /*! \brief Queries the context for the number of active references. Read-only. Use a \ref vx_uint32 parameter. */ + VX_CONTEXT_REFERENCES = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_CONTEXT) + 0x4, + /*! \brief Queries the context for it's implementation name. Read-only. Use a \ref vx_char[\ref VX_MAX_IMPLEMENTATION_NAME] array */ + VX_CONTEXT_IMPLEMENTATION = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_CONTEXT) + 0x5, + /*! \brief Queries the number of bytes in the extensions string. Read-only. Use a \ref vx_size parameter. */ + VX_CONTEXT_EXTENSIONS_SIZE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_CONTEXT) + 0x6, + /*! \brief Retrieves the extensions string. Read-only. + * This is a space-separated string of extension names. Each OpenVX official extension has a unique identifier, + * comprised of capital letters, numbers and the underscore character, prefixed with "KHR_", for example "KHR_NEW_FEATURE". + * Use a \ref vx_char pointer allocated to the size returned from \ref VX_CONTEXT_EXTENSIONS_SIZE. + */ + VX_CONTEXT_EXTENSIONS = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_CONTEXT) + 0x7, + /*! \brief The maximum width or height of a convolution matrix. + * Read-only. Use a \ref vx_size parameter. + * Each vendor must support centered kernels of size w X h, where both w + * and h are odd numbers, 3 <= w <= n and 3 <= h <= n, where n is the value of the + * \ref VX_CONTEXT_CONVOLUTION_MAX_DIMENSION attribute. n is an odd + * number that should not be smaller than 9. w and h may or may not be equal to + * each other. All combinations of w and h meeting the conditions above must be + * supported. The behavior of \ref vxCreateConvolution is undefined for values + * larger than the value returned by this attribute. + */ + VX_CONTEXT_CONVOLUTION_MAX_DIMENSION = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_CONTEXT) + 0x8, + /*! \brief The maximum window dimension of the OpticalFlowPyrLK kernel. The value of this attribute shall be equal to or greater than '9'. + * \see \ref VX_KERNEL_OPTICAL_FLOW_PYR_LK. Read-only. Use a \ref vx_size parameter. + */ + VX_CONTEXT_OPTICAL_FLOW_MAX_WINDOW_DIMENSION = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_CONTEXT) + 0x9, + /*! \brief The border mode for immediate mode functions. + * \details Graph mode functions are unaffected by this attribute. Read-write. Use a pointer to a \ref vx_border_t structure as parameter. + * \note The assumed default value for immediate mode functions is \ref VX_BORDER_UNDEFINED. + */ + VX_CONTEXT_IMMEDIATE_BORDER = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_CONTEXT) + 0xA, + /*! \brief Returns the table of all unique the kernels that exist in the context. + * Read-only. Use a \ref vx_kernel_info_t array. + * \pre You must call \ref vxQueryContext with \ref VX_CONTEXT_UNIQUE_KERNELS + * to compute the necessary size of the array. + */ + VX_CONTEXT_UNIQUE_KERNEL_TABLE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_CONTEXT) + 0xB, + /*! \brief The unsupported border mode policy for immediate mode functions. Read-Write. + * \details Graph mode functions are unaffected by this attribute. Use a \ref vx_enum as parameter. Will contain a \ref vx_border_policy_e. + * \note The assumed default value for immediate mode functions is \ref VX_BORDER_POLICY_DEFAULT_TO_UNDEFINED. Users should refer to the documentation of their implementation to determine what border modes are supported by each kernel. + */ + VX_CONTEXT_IMMEDIATE_BORDER_POLICY = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_CONTEXT) + 0xC, + /*! \brief The dimension of the largest nonlinear filter supported. See \ref vxNonLinearFilterNode. + * \details The implementation must support all dimensions (height or width, not necessarily the same) + * up to the value of this attribute. The lowest value that must be supported for this attribute is 9. + * Read-only. Use a \ref vx_size parameter. + */ + VX_CONTEXT_NONLINEAR_MAX_DIMENSION = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_CONTEXT) + 0xd, + /*! \brief tensor Data maximal number of dimensions supported by the implementation. */ + VX_CONTEXT_MAX_TENSOR_DIMS = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_CONTEXT) + 0xE, +}; + +/*! \brief The kernel attributes list + * \ingroup group_kernel + */ +enum vx_kernel_attribute_e { + /*! \brief Queries a kernel for the number of parameters the kernel + * supports. Read-only. Use a \ref vx_uint32 parameter. + */ + VX_KERNEL_PARAMETERS = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_KERNEL) + 0x0, + /*! \brief Queries the name of the kernel. Not settable. + * Read-only. Use a \ref vx_char[\ref VX_MAX_KERNEL_NAME] array (not a \ref vx_array). + */ + VX_KERNEL_NAME = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_KERNEL) + 0x1, + /*! \brief Queries the enum of the kernel. Not settable. + * Read-only. Use a \ref vx_enum parameter. + */ + VX_KERNEL_ENUM = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_KERNEL) + 0x2, + /*! \brief The local data area allocated with each kernel when it becomes a + * node. Read-write. Can be written only before user-kernel finalization. + * Use a \ref vx_size parameter. + * \note If not set it will default to zero. + */ + VX_KERNEL_LOCAL_DATA_SIZE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_KERNEL) + 0x3, +}; + +/*! \brief The node attributes list. + * \ingroup group_node + */ +enum vx_node_attribute_e { + /*! \brief Queries the status of node execution. Read-only. Use a \ref vx_status parameter. */ + VX_NODE_STATUS = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0x0, + /*! \brief Queries the performance of the node execution. + * The accuracy of timing information is platform dependent and also depends on the graph + * optimizations. Read-only. + * \note Performance tracking must have been enabled. See \ref vx_directive_e. + */ + VX_NODE_PERFORMANCE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0x1, + /*! \brief Gets or sets the border mode of the node. + * Read-write. Use a \ref vx_border_t structure with a default value of VX_BORDER_UNDEFINED. + */ + VX_NODE_BORDER = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0x2, + /*! \brief Indicates the size of the kernel local memory area. + * Read-only. Can be written only at user-node (de)initialization if VX_KERNEL_LOCAL_DATA_SIZE==0. + * Use a \ref vx_size parameter. + */ + VX_NODE_LOCAL_DATA_SIZE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0x3, + /*! \brief Indicates the pointer kernel local memory area. + * Read-Write. Can be written only at user-node (de)initialization if VX_KERNEL_LOCAL_DATA_SIZE==0. + * Use a void * parameter. + */ + VX_NODE_LOCAL_DATA_PTR = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0x4, + /*! \brief Indicates the number of node parameters, including optional parameters that are not passed. + * Read-only. Use a \ref vx_uint32 parameter. + */ + VX_NODE_PARAMETERS = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0x5, + /*! \brief Indicates whether the node is replicated. Read-only. + * Use a \ref vx_bool parameter. + */ + VX_NODE_IS_REPLICATED = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0x6, + /*! \brief Indicates the replicated parameters. Read-only. + * Use a \ref vx_bool* parameter. + */ + VX_NODE_REPLICATE_FLAGS = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0x7, + /*! \brief Indicates the behavior with respect to the valid rectangle. Read-only. + * Use a \ref vx_bool parameter. + */ + VX_NODE_VALID_RECT_RESET = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0x8, + + VX_NODE_ATTRIBUTE_CONST_TENSOR_CACHE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0x9, + + VX_NODE_ATTRIBUTE_FOR_HW_QUALITY = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0xA, + + VX_NODE_SWTILING_TILE_XY = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0x10, + VX_NODE_SPINST_INDEX = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0x11, + VX_NODE_SPCONV_PCQ_REPLACE_SPINST = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0x12, + VX_NODE_SP_NAME = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0x13, + VX_NODE_SPINST = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0x14, +}; + +/*! \brief The parameter attributes list + * \ingroup group_parameter + */ +enum vx_parameter_attribute_e { + /*! \brief Queries a parameter for its index value on the kernel with which it is associated. Read-only. Use a \ref vx_uint32 parameter. */ + VX_PARAMETER_INDEX = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_PARAMETER) + 0x0, + /*! \brief Queries a parameter for its direction value on the kernel with which it is associated. Read-only. Use a \ref vx_enum parameter. */ + VX_PARAMETER_DIRECTION = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_PARAMETER) + 0x1, + /*! \brief Queries a parameter for its type, \ref vx_type_e is returned. Read-only. The size of the parameter is implied for plain data objects. For opaque data objects like images and arrays a query to their attributes has to be called to determine the size. */ + VX_PARAMETER_TYPE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_PARAMETER) + 0x2, + /*! \brief Queries a parameter for its state. A value in \ref vx_parameter_state_e is returned. Read-only. Use a \ref vx_enum parameter. */ + VX_PARAMETER_STATE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_PARAMETER) + 0x3, + /*! \brief Use to extract the reference contained in the parameter. Read-only. Use a \ref vx_reference parameter. */ + VX_PARAMETER_REF = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_PARAMETER) + 0x4, + /*! \brief Use to extract the meta format contained in the parameter. Read-only. Use a \ref vx_meta_format parameter. */ + VX_PARAMETER_META_FORMAT = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_PARAMETER) + 0x5, +}; + +/*! \brief The image attributes list. + * \ingroup group_image + */ +enum vx_image_attribute_e { + /*! \brief Queries an image for its width. Read-only. Use a \ref vx_uint32 parameter. */ + VX_IMAGE_WIDTH = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_IMAGE) + 0x0, + /*! \brief Queries an image for its height. Read-only. Use a \ref vx_uint32 parameter. */ + VX_IMAGE_HEIGHT = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_IMAGE) + 0x1, + /*! \brief Queries an image for its format. Read-only. Use a \ref vx_df_image parameter. */ + VX_IMAGE_FORMAT = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_IMAGE) + 0x2, + /*! \brief Queries an image for its number of planes. Read-only. Use a \ref vx_size parameter. */ + VX_IMAGE_PLANES = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_IMAGE) + 0x3, + /*! \brief Queries an image for its color space (see \ref vx_color_space_e). Read-write. Use a \ref vx_enum parameter. */ + VX_IMAGE_SPACE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_IMAGE) + 0x4, + /*! \brief Queries an image for its channel range (see \ref vx_channel_range_e). Read-only. Use a \ref vx_enum parameter. */ + VX_IMAGE_RANGE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_IMAGE) + 0x5, + /*! \brief Queries memory type if created using vxCreateImageFromHandle. If vx_image was not created using + vxCreateImageFromHandle, VX_MEMORY_TYPE_NONE is returned. Use a \ref vx_memory_type_e parameter. */ + VX_IMAGE_MEMORY_TYPE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_IMAGE) + 0x7, + /*! \brief Queries if an image is uniform. Read-only. Use a \ref vx_bool parameter */ + VX_IMAGE_IS_UNIFORM = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_IMAGE) + 0x8, + /*! \brief Queries the image uniform value if any. Read-only. Use a \ref vx_pixel_value_t parameter. */ + VX_IMAGE_UNIFORM_VALUE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_IMAGE) + 0x9, +}; + +/*! \brief The scalar attributes list. + * \ingroup group_scalar + */ +enum vx_scalar_attribute_e { + /*! \brief Queries the type of atomic that is contained in the scalar. Read-only. Use a \ref vx_enum parameter.*/ + VX_SCALAR_TYPE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_SCALAR) + 0x0, +}; + +/*! \brief A type of operation in which both operands are scalars. + * \see group_scalar + * \ingroup group_scalar + */ +enum vx_scalar_operation_e { + /*! \brief logical and. */ + VX_SCALAR_OP_AND = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_SCALAR_OPERATION) + 0x0, + /*! \brief logical or. */ + VX_SCALAR_OP_OR = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_SCALAR_OPERATION) + 0x1, + /*! \brief logical exclusive or. */ + VX_SCALAR_OP_XOR = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_SCALAR_OPERATION) + 0x2, + /*! \brief logical nand. */ + VX_SCALAR_OP_NAND = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_SCALAR_OPERATION) + 0x3, + /*! \brief comparison (equal). */ + VX_SCALAR_OP_EQUAL = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_SCALAR_OPERATION) + 0x4, + /*! \brief comparison (not equal). */ + VX_SCALAR_OP_NOTEQUAL = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_SCALAR_OPERATION) + 0x5, + /*! \brief comparison (less than). */ + VX_SCALAR_OP_LESS = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_SCALAR_OPERATION) + 0x6, + /*! \brief comparison (less than or equal to). */ + VX_SCALAR_OP_LESSEQ = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_SCALAR_OPERATION) + 0x7, + /*! \brief comparison (greater than). */ + VX_SCALAR_OP_GREATER = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_SCALAR_OPERATION) + 0x8, + /*! \brief comparison (greater than or equal to). */ + VX_SCALAR_OP_GREATEREQ = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_SCALAR_OPERATION) + 0x9, + /*! \brief arithmetic addition. */ + VX_SCALAR_OP_ADD = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_SCALAR_OPERATION) + 0xA, + /*! \brief arithmetic subtraction. */ + VX_SCALAR_OP_SUBTRACT = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_SCALAR_OPERATION) + 0xB, + /*! \brief arithmetic multiplication. */ + VX_SCALAR_OP_MULTIPLY = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_SCALAR_OPERATION) + 0xC, + /*! \brief arithmetic division. */ + VX_SCALAR_OP_DIVIDE = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_SCALAR_OPERATION) + 0xD, + /*! \brief arithmetic (modulo operator). */ + VX_SCALAR_OP_MODULUS = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_SCALAR_OPERATION) + 0xE, + /*! \brief minimum of two scalars. */ + VX_SCALAR_OP_MIN = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_SCALAR_OPERATION) + 0xF, + /*! \brief maximum of two scalars. */ + VX_SCALAR_OP_MAX = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_SCALAR_OPERATION) + 0x10, +}; + +/*! \brief The Look-Up Table (LUT) attribute list. + * \ingroup group_lut + */ +enum vx_lut_attribute_e { + /*! \brief Indicates the value type of the LUT. Read-only. Use a \ref vx_enum. */ + VX_LUT_TYPE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS,VX_TYPE_LUT) + 0x0, + /*! \brief Indicates the number of elements in the LUT. Read-only. Use a \ref vx_size. */ + VX_LUT_COUNT = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS,VX_TYPE_LUT) + 0x1, + /*! \brief Indicates the total size of the LUT in bytes. Read-only. Uses a \ref vx_size. */ + VX_LUT_SIZE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS,VX_TYPE_LUT) + 0x2, + /*! \brief Indicates the index of the input value = 0. Read-only. Uses a \ref vx_uint32. */ + VX_LUT_OFFSET = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS,VX_TYPE_LUT) + 0x3, +}; + +/*! \brief The distribution attribute list. + * \ingroup group_distribution + */ +enum vx_distribution_attribute_e { + /*! \brief Indicates the number of dimensions in the distribution. Read-only. Use a \ref vx_size parameter. */ + VX_DISTRIBUTION_DIMENSIONS = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_DISTRIBUTION) + 0x0, + /*! \brief Indicates the start of the values to use (inclusive). Read-only. Use a \ref vx_int32 parameter. */ + VX_DISTRIBUTION_OFFSET = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_DISTRIBUTION) + 0x1, + /*! \brief Indicates the total number of the consecutive values of the distribution interval. */ + VX_DISTRIBUTION_RANGE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_DISTRIBUTION) + 0x2, + /*! \brief Indicates the number of bins. Read-only. Use a \ref vx_size parameter. */ + VX_DISTRIBUTION_BINS = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_DISTRIBUTION) + 0x3, + /*! \brief Indicates the width of a bin. Equal to the range divided by the number of bins. If the range is not a + * multiple of the number of bins, it is not valid. Read-only. Use a \ref vx_uint32 parameter. */ + VX_DISTRIBUTION_WINDOW = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_DISTRIBUTION) + 0x4, + /*! \brief Indicates the total size of the distribution in bytes. Read-only. Use a \ref vx_size parameter. */ + VX_DISTRIBUTION_SIZE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_DISTRIBUTION) + 0x5, +}; + +/*! \brief The Threshold types. + * \ingroup group_threshold + */ +enum vx_threshold_type_e { + /*! \brief A threshold with only 1 value. */ + VX_THRESHOLD_TYPE_BINARY = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_THRESHOLD_TYPE) + 0x0, + /*! \brief A threshold with 2 values (upper/lower). Use with Canny Edge Detection. */ + VX_THRESHOLD_TYPE_RANGE = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_THRESHOLD_TYPE) + 0x1, +}; + +/*! \brief The threshold attributes. + * \ingroup group_threshold + */ +enum vx_threshold_attribute_e { + /*! \brief The value type of the threshold. Read-only. Use a \ref vx_enum parameter. Will contain a \ref vx_threshold_type_e. */ + VX_THRESHOLD_TYPE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_THRESHOLD) + 0x0, + /*! \brief The input image format the threshold was created for. Read-only. Use a \ref vx_enum parameter. Will contain a \ref vx_df_image_e.*/ + VX_THRESHOLD_INPUT_FORMAT = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_THRESHOLD) + 0x7, + /*! \brief The output image format the threshold was created for. Read-only. Use a \ref vx_enum parameter. Will contain a \ref vx_df_image_e.*/ + VX_THRESHOLD_OUTPUT_FORMAT = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_THRESHOLD) + 0x8 +}; + +/*! \brief The matrix attributes. + * \ingroup group_matrix + */ +enum vx_matrix_attribute_e { + /*! \brief The value type of the matrix. Read-only. Use a \ref vx_enum parameter. */ + VX_MATRIX_TYPE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_MATRIX) + 0x0, + /*! \brief The M dimension of the matrix. Read-only. Use a \ref vx_size parameter. */ + VX_MATRIX_ROWS = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_MATRIX) + 0x1, + /*! \brief The N dimension of the matrix. Read-only. Use a \ref vx_size parameter. */ + VX_MATRIX_COLUMNS = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_MATRIX) + 0x2, + /*! \brief The total size of the matrix in bytes. Read-only. Use a \ref vx_size parameter. */ + VX_MATRIX_SIZE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_MATRIX) + 0x3, + /*! \brief The origin of the matrix with a default value of [floor(VX_MATRIX_COLUMNS/2), + floor(VX_MATRIX_ROWS/2)]. Read-only. Use a \ref vx_coordinates2d_t parameter. */ + VX_MATRIX_ORIGIN = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_MATRIX) + 0x4, + /*! \brief The pattern of the matrix. See \ref vx_pattern_e . Read-only. Use a + * \ref vx_enum parameter. If the matrix was created via \ref vxCreateMatrixFromPattern + * or \ref vxCreateMatrixFromPatternAndOrigin, the attribute corresponds to the given pattern. + * Otherwise the attribute is \ref VX_PATTERN_OTHER. */ + VX_MATRIX_PATTERN = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_MATRIX) + 0x5, +}; + +/*! \brief The convolution attributes. + * \ingroup group_convolution + */ +enum vx_convolution_attribute_e { + /*! \brief The number of rows of the convolution matrix. Read-only. Use a \ref vx_size parameter. */ + VX_CONVOLUTION_ROWS = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_CONVOLUTION) + 0x0, + /*! \brief The number of columns of the convolution matrix. Read-only. Use a \ref vx_size parameter. */ + VX_CONVOLUTION_COLUMNS = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_CONVOLUTION) + 0x1, + /*! \brief The scale of the convolution matrix. Read-write. Use a \ref vx_uint32 parameter. + * \if OPENVX_STRICT_1_0 + * \note For 1.0, only powers of 2 are supported up to 2^31. + * \endif + */ + VX_CONVOLUTION_SCALE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_CONVOLUTION) + 0x2, + /*! \brief The total size of the convolution matrix in bytes. Read-only. Use a \ref vx_size parameter. */ + VX_CONVOLUTION_SIZE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_CONVOLUTION) + 0x3, +}; + +/*! \brief The pyramid object attributes. + * \ingroup group_pyramid + */ +enum vx_pyramid_attribute_e { + /*! \brief The number of levels of the pyramid. Read-only. Use a \ref vx_size parameter. */ + VX_PYRAMID_LEVELS = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_PYRAMID) + 0x0, + /*! \brief The scale factor between each level of the pyramid. Read-only. Use a \ref vx_float32 parameter. */ + VX_PYRAMID_SCALE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_PYRAMID) + 0x1, + /*! \brief The width of the 0th image in pixels. Read-only. Use a \ref vx_uint32 parameter. */ + VX_PYRAMID_WIDTH = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_PYRAMID) + 0x2, + /*! \brief The height of the 0th image in pixels. Read-only. Use a \ref vx_uint32 parameter. */ + VX_PYRAMID_HEIGHT = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_PYRAMID) + 0x3, + /*! \brief The \ref vx_df_image_e format of the image. Read-only. Use a \ref vx_df_image parameter. */ + VX_PYRAMID_FORMAT = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_PYRAMID) + 0x4, +}; + +/*! \brief The remap object attributes. + * \ingroup group_remap + */ +enum vx_remap_attribute_e { + /*! \brief The source width. Read-only. Use a \ref vx_uint32 parameter. */ + VX_REMAP_SOURCE_WIDTH = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_REMAP) + 0x0, + /*! \brief The source height. Read-only. Use a \ref vx_uint32 parameter. */ + VX_REMAP_SOURCE_HEIGHT = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_REMAP) + 0x1, + /*! \brief The destination width. Read-only. Use a \ref vx_uint32 parameter. */ + VX_REMAP_DESTINATION_WIDTH = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_REMAP) + 0x2, + /*! \brief The destination height. Read-only. Use a \ref vx_uint32 parameter. */ + VX_REMAP_DESTINATION_HEIGHT = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_REMAP) + 0x3, +}; + +/*! \brief The array object attributes. + * \ingroup group_array + */ +enum vx_array_attribute_e { + /*! \brief The type of the Array items. Read-only. Use a \ref vx_enum parameter. */ + VX_ARRAY_ITEMTYPE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_ARRAY) + 0x0, + /*! \brief The number of items in the Array. Read-only. Use a \ref vx_size parameter. */ + VX_ARRAY_NUMITEMS = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_ARRAY) + 0x1, + /*! \brief The maximal number of items that the Array can hold. Read-only. Use a \ref vx_size parameter. */ + VX_ARRAY_CAPACITY = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_ARRAY) + 0x2, + /*! \brief Queries an array item size. Read-only. Use a \ref vx_size parameter. */ + VX_ARRAY_ITEMSIZE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_ARRAY) + 0x3, +}; + +/*! \brief The ObjectArray object attributes. + * \ingroup group_object_array + */ +enum vx_object_array_attribute_e { + /*! \brief The type of the ObjectArray items. Read-only. Use a \ref vx_enum parameter. */ + VX_OBJECT_ARRAY_ITEMTYPE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_OBJECT_ARRAY) + 0x0, + /*! \brief The number of items in the ObjectArray. Read-only. Use a \ref vx_size parameter. */ + VX_OBJECT_ARRAY_NUMITEMS = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_OBJECT_ARRAY) + 0x1, +}; +/*! \brief tensor Data attributes. + * \ingroup group_object_tensor + */ +enum vx_tensor_attribute_e +{ + /*! \brief Number of dimensions. */ + VX_TENSOR_NUMBER_OF_DIMS = VX_ATTRIBUTE_BASE( VX_ID_KHRONOS, VX_TYPE_TENSOR ) + 0x0, + /*! \brief Dimension sizes. */ + VX_TENSOR_DIMS = VX_ATTRIBUTE_BASE( VX_ID_KHRONOS, VX_TYPE_TENSOR ) + 0x1, + /*! \brief tensor Data element data type. vx_type_e */ + VX_TENSOR_DATA_TYPE = VX_ATTRIBUTE_BASE( VX_ID_KHRONOS, VX_TYPE_TENSOR ) + 0x2, + /*! \brief fixed point position when the input element type is integer. */ + VX_TENSOR_FIXED_POINT_POSITION = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_TENSOR) + 0x3, + /*! \brief tensor quantization data type. */ + VX_TENSOR_QUANT_FORMAT = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_TENSOR) + 0x0, + /*! \brief tensor quantization zero point. */ + VX_TENSOR_ZERO_POINT = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_TENSOR) + 0x1, + /*! \brief tensor quantization scale value. */ + VX_TENSOR_SCALE = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_TENSOR) + 0x2, + /*! \brief the rank of tensor. */ + VX_TENSOR_RANK = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_TENSOR) + 0x3, + /*! \brief the precision of tensor. */ + VX_TENSOR_PRECISION = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_TENSOR) + 0x4, + /*! \brief the data lifetime of tensor. */ + VX_TENSOR_LIFETIME = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_TENSOR) + 0x5, + /*! \brief the value status of tensor. */ + VX_TENSOR_VALUE = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_TENSOR) + 0x6, + /*XiaoMi project*/ + VX_TENSOR_INPUT_FOR_REFERENCE = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_TENSOR) + 0x7, + VX_TENSOR_MEMORY_ATTRIBUTE = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_TENSOR) + 0x8, +}; + +/*! \brief The meta valid rectangle attributes. + * \ingroup group_user_kernels + */ +enum vx_meta_valid_rect_attribute_e { + /*! \brief Valid rectangle callback during output parameter validation. Write-only. */ + VX_VALID_RECT_CALLBACK = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_META_FORMAT) + 0x1, +}; + +/*! \brief The channel enumerations for channel extractions. + * \see vxChannelExtractNode + * \see vxuChannelExtract + * \see VX_KERNEL_CHANNEL_EXTRACT + * \ingroup group_basic_features + */ +enum vx_channel_e { + /*! \brief Used by formats with unknown channel types. */ + VX_CHANNEL_0 = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_CHANNEL) + 0x0, + /*! \brief Used by formats with unknown channel types. */ + VX_CHANNEL_1 = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_CHANNEL) + 0x1, + /*! \brief Used by formats with unknown channel types. */ + VX_CHANNEL_2 = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_CHANNEL) + 0x2, + /*! \brief Used by formats with unknown channel types. */ + VX_CHANNEL_3 = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_CHANNEL) + 0x3, + + /*! \brief Use to extract the RED channel, no matter the byte or packing order. */ + VX_CHANNEL_R = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_CHANNEL) + 0x10, + /*! \brief Use to extract the GREEN channel, no matter the byte or packing order. */ + VX_CHANNEL_G = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_CHANNEL) + 0x11, + /*! \brief Use to extract the BLUE channel, no matter the byte or packing order. */ + VX_CHANNEL_B = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_CHANNEL) + 0x12, + /*! \brief Use to extract the ALPHA channel, no matter the byte or packing order. */ + VX_CHANNEL_A = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_CHANNEL) + 0x13, + /*! \brief Use to extract the LUMA channel, no matter the byte or packing order. */ + VX_CHANNEL_Y = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_CHANNEL) + 0x14, + /*! \brief Use to extract the Cb/U channel, no matter the byte or packing order. */ + VX_CHANNEL_U = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_CHANNEL) + 0x15, + /*! \brief Use to extract the Cr/V/Value channel, no matter the byte or packing order. */ + VX_CHANNEL_V = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_CHANNEL) + 0x16, + + VX_CHANNEL_UV = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_CHANNEL) + 0x0, +}; + +/*! \brief An enumeration of memory import types. + * \ingroup group_context + */ +enum vx_memory_type_e { + /*! \brief For memory allocated through OpenVX, this is the import type. */ + VX_MEMORY_TYPE_NONE = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_MEMORY_TYPE) + 0x0, + + /*! \brief The default memory type to import from the Host. */ + VX_MEMORY_TYPE_HOST = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_MEMORY_TYPE) + 0x1, + + VX_MEMORY_TYPE_DMABUF = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_MEMORY_TYPE) + 0x0, + + VX_MEMORY_TYPE_INTERNAL = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_MEMORY_TYPE) + 0x1, + + VX_MEMORY_TYPE_HOST_UNCACHED = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_MEMORY_TYPE) + 0x2, + + VX_MEMORY_TYPE_HOST_PHYSICAL = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_MEMORY_TYPE) + 0x3, +}; + +/*! \brief The image reconstruction filters supported by image resampling operations. + * + * The edge of a pixel is interpreted as being aligned to the edge of the image. + * The value for an output pixel is evaluated at the center of that pixel. + * + * This means, for example, that an even enlargement of a factor of two in nearest-neighbor + * interpolation will replicate every source pixel into a 2x2 quad in the destination, and that + * an even shrink by a factor of two in bilinear interpolation will create each destination pixel + * by average a 2x2 quad of source pixels. + * + * Samples that cross the boundary of the source image have values determined by the border + * mode - see \ref vx_border_e and \ref VX_NODE_BORDER. + * \see vxuScaleImage + * \see vxScaleImageNode + * \see VX_KERNEL_SCALE_IMAGE + * \see vxuWarpAffine + * \see vxWarpAffineNode + * \see VX_KERNEL_WARP_AFFINE + * \see vxuWarpPerspective + * \see vxWarpPerspectiveNode + * \see VX_KERNEL_WARP_PERSPECTIVE + * \ingroup group_basic_features + */ +enum vx_interpolation_type_e { + /*! \brief Output values are defined to match the source pixel whose center is nearest to the sample position. */ + VX_INTERPOLATION_NEAREST_NEIGHBOR = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_INTERPOLATION) + 0x0, + /*! \brief Output values are defined by bilinear interpolation between the pixels whose centers are closest + * to the sample position, weighted linearly by the distance of the sample from the pixel centers. */ + VX_INTERPOLATION_BILINEAR = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_INTERPOLATION) + 0x1, + /*! \brief Output values are determined by averaging the source pixels whose areas fall under the + * area of the destination pixel, projected onto the source image. */ + VX_INTERPOLATION_AREA = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_INTERPOLATION) + 0x2, +}; + +/*! \brief An enumeration of non-linear filter functions. + * \ingroup group_basic_features + */ +enum vx_non_linear_filter_e { + /*! \brief Nonlinear median filter. */ + VX_NONLINEAR_FILTER_MEDIAN = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NONLINEAR) + 0x0, + /*! \brief Nonlinear Erode. */ + VX_NONLINEAR_FILTER_MIN = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NONLINEAR) + 0x1 , + /*! \brief Nonlinear Dilate. */ + VX_NONLINEAR_FILTER_MAX = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NONLINEAR) + 0x2, +}; + +/*! \brief An enumeration of matrix patterns. See \ref vxCreateMatrixFromPattern + * and \ref vxCreateMatrixFromPatternAndOrigin + * \ingroup group_basic_features + */ +enum vx_pattern_e { + /*! \brief Box pattern matrix */ + VX_PATTERN_BOX = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_PATTERN) + 0x0, + /*! \brief Cross pattern matrix */ + VX_PATTERN_CROSS = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_PATTERN) + 0x1 , + /*! \brief A square matrix (rows = columns = size) */ + VX_PATTERN_DISK = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_PATTERN) + 0x2, + /*! \brief Matrix with any pattern other than above. */ + VX_PATTERN_OTHER = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_PATTERN) + 0x3, +}; + +/*! \brief The image color space list used by the \ref VX_IMAGE_SPACE attribute of a \ref vx_image. + * \ingroup group_image + */ +enum vx_color_space_e { + /*! \brief Use to indicate that no color space is used. */ + VX_COLOR_SPACE_NONE = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_COLOR_SPACE) + 0x0, + /*! \brief Use to indicate that the BT.601 coefficients and SMPTE C primaries are used for conversions. */ + VX_COLOR_SPACE_BT601_525 = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_COLOR_SPACE) + 0x1, + /*! \brief Use to indicate that the BT.601 coefficients and BTU primaries are used for conversions. */ + VX_COLOR_SPACE_BT601_625 = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_COLOR_SPACE) + 0x2, + /*! \brief Use to indicate that the BT.709 coefficients are used for conversions. */ + VX_COLOR_SPACE_BT709 = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_COLOR_SPACE) + 0x3, + + /*! \brief All images in VX are by default BT.709 */ + VX_COLOR_SPACE_DEFAULT = VX_COLOR_SPACE_BT709, +}; + +/*! \brief The image channel range list used by the \ref VX_IMAGE_RANGE attribute of a \ref vx_image. + * \ingroup group_image + */ +enum vx_channel_range_e { + /*! \brief Full range of the unit of the channel */ + VX_CHANNEL_RANGE_FULL = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_COLOR_RANGE) + 0x0, + /*! \brief Restricted range of the unit of the channel based on the space given */ + VX_CHANNEL_RANGE_RESTRICTED = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_COLOR_RANGE) + 0x1, +}; + +/*! \brief The parameter state type. + * \ingroup group_parameter + */ +enum vx_parameter_state_e { + /*! \brief Default. The parameter must be supplied. If not set, during + * Verify, an error is returned. + */ + VX_PARAMETER_STATE_REQUIRED = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_PARAMETER_STATE) + 0x0, + /*! \brief The parameter may be unspecified. The kernel takes care not + * to deference optional parameters until it is certain they are valid. + */ + VX_PARAMETER_STATE_OPTIONAL = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_PARAMETER_STATE) + 0x1, +}; + +/*! \brief The border mode list. + * \ingroup group_borders + */ +enum vx_border_e { + /*! \brief No defined border mode behavior is given. */ + VX_BORDER_UNDEFINED = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_BORDER) + 0x0, + /*! \brief For nodes that support this behavior, a constant value is + * \e filled-in when accessing out-of-bounds pixels. + */ + VX_BORDER_CONSTANT = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_BORDER) + 0x1, + /*! \brief For nodes that support this behavior, a replication of the nearest + * edge pixels value is given for out-of-bounds pixels. + */ + VX_BORDER_REPLICATE = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_BORDER) + 0x2, +}; + +/*! \brief The unsupported border mode policy list. + * \ingroup group_borders + */ +enum vx_border_policy_e { + /*! \brief Use VX_BORDER_UNDEFINED instead of unsupported border modes. */ + VX_BORDER_POLICY_DEFAULT_TO_UNDEFINED = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_BORDER_POLICY) + 0x0, + /*! \brief Return VX_ERROR_NOT_SUPPORTED for unsupported border modes. */ + VX_BORDER_POLICY_RETURN_ERROR = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_BORDER_POLICY) + 0x1, +}; + +/*! \brief The termination criteria list. + * \see group_vision_function_opticalflowpyrlk + * \ingroup group_context + */ +enum vx_termination_criteria_e { + /*! \brief Indicates a termination after a set number of iterations. */ + VX_TERM_CRITERIA_ITERATIONS = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_TERM_CRITERIA) + 0x0, + /*! \brief Indicates a termination after matching against the value of eplison provided to the function. */ + VX_TERM_CRITERIA_EPSILON = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_TERM_CRITERIA) + 0x1, + /*! \brief Indicates that both an iterations and eplison method are employed. Whichever one matches first + * causes the termination. + */ + VX_TERM_CRITERIA_BOTH = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_TERM_CRITERIA) + 0x2, +}; + +/*! \brief A normalization type. + * \see group_vision_function_canny + * \ingroup group_vision_function_canny + */ +enum vx_norm_type_e { + /*! \brief The L1 normalization. */ + VX_NORM_L1 = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NORM_TYPE) + 0x0, + /*! \brief The L2 normalization. */ + VX_NORM_L2 = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NORM_TYPE) + 0x1, +}; + +/*! \brief The delay attribute list. + * \ingroup group_delay + */ +enum vx_delay_attribute_e { + /*! \brief The type of objects in the delay. Read-only. Use a \ref vx_enum parameter. */ + VX_DELAY_TYPE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_DELAY) + 0x0, + /*! \brief The number of items in the delay. Read-only. Use a \ref vx_size parameter.*/ + VX_DELAY_SLOTS = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_DELAY) + 0x1, +}; + +/*! \brief The memory accessor hint flags. + * These enumeration values are used to indicate desired \e system behavior, + * not the \b User intent. For example: these can be interpretted as hints to the + * system about cache operations or marshalling operations. + * \ingroup group_context + */ +enum vx_accessor_e { + /*! \brief The memory shall be treated by the system as if it were read-only. + * If the User writes to this memory, the results are implementation defined. + */ + VX_READ_ONLY = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_ACCESSOR) + 0x1, + /*! \brief The memory shall be treated by the system as if it were write-only. + * If the User reads from this memory, the results are implementation defined. + */ + VX_WRITE_ONLY = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_ACCESSOR) + 0x2, + /*! \brief The memory shall be treated by the system as if it were readable and writeable. + */ + VX_READ_AND_WRITE = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_ACCESSOR) + 0x3, +}; + +/*! \brief The Round Policy Enumeration. + * \ingroup group_context + */ +enum vx_round_policy_e { + /*! \brief When scaling, this truncates the least significant values that are lost in operations. */ + VX_ROUND_POLICY_TO_ZERO = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_ROUND_POLICY) + 0x1, + /*! \brief When scaling, this rounds to nearest even output value. */ + VX_ROUND_POLICY_TO_NEAREST_EVEN = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_ROUND_POLICY) + 0x2, +}; + +/*! \brief Local binary pattern supported. + * \ingroup group_vision_function_lbp + */ +enum vx_lbp_format_e +{ + /*! \brief local binary pattern + */ + VX_LBP = VX_ENUM_BASE( VX_ID_KHRONOS, VX_ENUM_LBP_FORMAT ) + 0x0, + /*! \brief Modified Local Binary Patterns + */ + + VX_MLBP = VX_ENUM_BASE( VX_ID_KHRONOS, VX_ENUM_LBP_FORMAT ) + 0x1, + /*! \brief Uniform local binary pattern + */ + VX_ULBP = VX_ENUM_BASE( VX_ID_KHRONOS, VX_ENUM_LBP_FORMAT ) + 0x2 +}; + +/*! \brief comparing metrics. + * \details In all the equations below w and h are width and height of the template image respectively. + * \f$ R \f$ is the compare map. \f$ T \f$ is the template image.\f$ I \f$ is the image on which the template is searched. + * \ingroup group_vision_function_match_template + */ +enum vx_comp_metric_e +{ + /*! \brief hamming distance \f$ R(x,y) = \frac{1}{w*h}\sum_{\grave{x},\grave{y}}^{w,h} XOR(T(\grave{x},\grave{y}),I(x+\grave{x},y+\grave{y}))\f$ */ + VX_COMPARE_HAMMING = VX_ENUM_BASE( VX_ID_KHRONOS, VX_ENUM_COMP_METRIC ) + 0x0, + /*! \brief L1 distance \f$ R(x,y) = \frac{1}{w*h}\sum_{\grave{x},\grave{y}}^{w,h} ABS(T(\grave{x},\grave{y}) - I(x+\grave{x},y+\grave{y}))\f$ */ + VX_COMPARE_L1 = VX_ENUM_BASE( VX_ID_KHRONOS, VX_ENUM_COMP_METRIC ) + 0x1, + /*! \brief L2 distance normalized by image size \f$ R(x,y) = \frac{1}{w*h}\sum_{\grave{x},\grave{y}}^{w,h} (T(\grave{x},\grave{y}) - I(x+\grave{x},y+\grave{y}))^2\f$*/ + VX_COMPARE_L2 = VX_ENUM_BASE( VX_ID_KHRONOS, VX_ENUM_COMP_METRIC ) + 0x2, + /*! \brief cross correlation distance \f$ R(x,y) = \frac{1}{w*h}\sum_{\grave{x},\grave{y}}^{w,h} (T(\grave{x},\grave{y})*I(x+\grave{x},y+\grave{y}))\f$*/ + VX_COMPARE_CCORR = VX_ENUM_BASE( VX_ID_KHRONOS, VX_ENUM_COMP_METRIC ) + 0x3, + /*! \brief L2 normalized distance \f$ R(x,y) = \frac{\sum_{\grave{x},\grave{y}}^{w,h} (T(\grave{x},\grave{y}) - I(x+\grave{x},y+\grave{y}))^2} + * {\sqrt{\sum_{\grave{x},\grave{y}}^{w,h} T(\grave{x},\grave{y})^2 * I(x+\grave{x},y+\grave{y})^2}} \f$*/ + VX_COMPARE_L2_NORM = VX_ENUM_BASE( VX_ID_KHRONOS, VX_ENUM_COMP_METRIC ) + 0x4, + /*! \brief cross correlation normalized distance \f$ R(x,y) = \frac{\sum_{\grave{x},\grave{y}}^{w,h} T(\grave{x},\grave{y}) * I(x+\grave{x},y+\grave{y})*2^{15}} + * {\sqrt{\sum_{\grave{x},\grave{y}}^{w,h} T(\grave{x},\grave{y})^2 * I(x+\grave{x},y+\grave{y})^2}} \f$*/ + VX_COMPARE_CCORR_NORM = VX_ENUM_BASE( VX_ID_KHRONOS, VX_ENUM_COMP_METRIC ) + 0x5 +}; + +#if defined(_WIN32) || defined(UNDER_CE) +#if defined(_WIN64) +/*! \brief Use to aid in debugging values in OpenVX. + * \ingroup group_basic_features + */ +#define VX_FMT_REF "%I64u" +/*! \brief Use to aid in debugging values in OpenVX. + * \ingroup group_basic_features + */ +#define VX_FMT_SIZE "%I64u" +#else +/*! \brief Use to aid in debugging values in OpenVX. + * \ingroup group_basic_features + */ +#define VX_FMT_REF "%lu" +/*! \brief Use to aid in debugging values in OpenVX. + * \ingroup group_basic_features + */ +#define VX_FMT_SIZE "%lu" +#endif +#else +/*! \brief Use to aid in debugging values in OpenVX. + * \ingroup group_basic_features + */ +#define VX_FMT_REF "%p" +/*! \brief Use to aid in debugging values in OpenVX. + * \ingroup group_basic_features + */ +#define VX_FMT_SIZE "%zu" +#endif +/*! \brief Use to indicate the 1:1 ratio in Q22.10 format. + * \ingroup group_basic_features + */ +#define VX_SCALE_UNITY (1024u) + +/*! + * \brief The addressing image patch structure is used by the Host only + * to address pixels in an image patch. The fields of the structure are defined as: + * \arg dim - The dimensions of the image in logical pixel units in the x & y direction. + * \arg stride - The physical byte distance from a logical pixel to the next + * logically adjacent pixel in the positive x or y direction. + * \arg scale - The relationship of scaling from the primary plane (typically + * the zero indexed plane) to this plane. An integer down-scaling factor of \f$ f \f$ shall be + * set to a value equal to \f$ scale = \frac{unity}{f} \f$ and an integer up-scaling factor of \f$ f \f$ + * shall be set to a value of \f$ scale = unity * f \f$. \f$ unity \f$ is defined as \ref VX_SCALE_UNITY. + * \arg step - The step is the number of logical pixel units to skip to + * arrive at the next physically unique pixel. For example, on a plane that is + * half-scaled in a dimension, the step in that dimension is 2 to indicate that + * every other pixel in that dimension is an alias. This is useful in situations + * where iteration over unique pixels is required, such as in serializing + * or de-serializing the image patch information. + * \see \ref vxMapImagePatch + * \ingroup group_image + */ +typedef struct _vx_imagepatch_addressing_t { + vx_uint32 dim_x; /*!< \brief Width of patch in X dimension in pixels. */ + vx_uint32 dim_y; /*!< \brief Height of patch in Y dimension in pixels. */ + vx_int32 stride_x; /*!< \brief Stride in X dimension in bytes. */ + vx_int32 stride_y; /*!< \brief Stride in Y dimension in bytes. */ + vx_uint32 scale_x; /*!< \brief Scale of X dimension. For sub-sampled planes this is the scaling factor of the dimension of the plane in relation to the zero plane. Use \ref VX_SCALE_UNITY in the numerator. */ + vx_uint32 scale_y; /*!< \brief Scale of Y dimension. For sub-sampled planes this is the scaling factor of the dimension of the plane in relation to the zero plane. Use \ref VX_SCALE_UNITY in the numerator. */ + vx_uint32 step_x; /*!< \brief Step of X dimension in pixels. */ + vx_uint32 step_y; /*!< \brief Step of Y dimension in pixels. */ + vx_uint16 stride_x_bits; /*!< \brief Stride in X dimension in bits. Used when stride_x is not an integer number of bytes. */ +} vx_imagepatch_addressing_t; + +/*! \brief Use to initialize a \ref vx_imagepatch_addressing_t structure on the stack. + * \ingroup group_image + */ +#define VX_IMAGEPATCH_ADDR_INIT {0u, 0u, 0, 0, 0u, 0u, 0u, 0u, 0u} + +/*! \brief The performance measurement structure. The time or durations are in units of nano seconds. + * \ingroup group_performance + */ +typedef struct _vx_perf_t { + vx_uint64 tmp; /*!< \brief Holds the last measurement. */ + vx_uint64 beg; /*!< \brief Holds the first measurement in a set. */ + vx_uint64 end; /*!< \brief Holds the last measurement in a set. */ + vx_uint64 sum; /*!< \brief Holds the summation of durations. */ + vx_uint64 avg; /*!< \brief Holds the average of the durations. */ + vx_uint64 min; /*!< \brief Holds the minimum of the durations. */ + vx_uint64 num; /*!< \brief Holds the number of measurements. */ + vx_uint64 max; /*!< \brief Holds the maximum of the durations. */ +} vx_perf_t; + +/*! \brief Hough lines probability parameters. + * \ingroup group_vision_function_hough_lines_p + */ +typedef struct _vx_hough_lines_p_t +{ + /*! \brief Distance resolution of the parameter in pixels. */ + vx_float32 rho; + /*! \brief Angle resolution of the parameter in radians. */ + vx_float32 theta; + /*! \brief The minimum number of intersections to detect a line. */ + vx_int32 threshold; + /*! \brief The minimum number of points that can form a line. Line segments shorter than that are rejected. */ + vx_int32 line_length; + /*! \brief The maximum allowed gap between points on the same line to link them. */ + vx_int32 line_gap; + /*! \brief Optional restriction on theta. The max allowed value. */ + vx_float32 theta_max; + /*! \brief Optional restriction on theta. The min allowed value. */ + vx_float32 theta_min; +} vx_hough_lines_p_t; + +/*! \brief line struct + * \ingroup group_basic_features + */ +typedef struct _vx_line2d_t +{ + /*! \brief x index of line start */ + vx_float32 start_x; + /*! \brief y index of line start */ + vx_float32 start_y; + /*! \brief x index of line end*/ + vx_float32 end_x; + /*! \brief y index of line end*/ + vx_float32 end_y; +} vx_line2d_t; + +/*! \brief Matrix Multiply Parameters + * + * transpose_input1/input2/input3 : if True the matrix is transposed before the operation, otherwise the matrix is used as is. \n + * \ingroup group_vision_function_tensor_matrix_multiply + */ +typedef struct _vx_tensor_matrix_multiply_params_t{ + /*! \brief if True the matrix is transposed before the operation, otherwise the matrix is used as is*/ + vx_bool transpose_input1; + /*! \brief if True the matrix is transposed before the operation, otherwise the matrix is used as is*/ + vx_bool transpose_input2; + /*! \brief if True the matrix is transposed before the operation, otherwise the matrix is used as is*/ + vx_bool transpose_input3; +} vx_tensor_matrix_multiply_params_t; + +/*! \brief Initializes a \ref vx_perf_t on the stack. + * \ingroup group performance + */ +#define VX_PERF_INIT {0ul, 0ul, 0ul, 0ul, 0ul, 0ul} + +/*! \brief The Kernel Information Structure. This is returned by the Context + * to indicate which kernels are available in the OpenVX implementation. + * \ingroup group_kernel + */ +typedef struct _vx_kernel_info_t { + /*! \brief The kernel enumeration value from \ref vx_kernel_e (or an + * extension thereof). + * \see vxGetKernelByEnum + */ + vx_enum enumeration; + + /*! \brief The kernel name in dotted hierarchical format. + * e.g. "org.khronos.openvx.sobel_3x3" + * \see vxGetKernelByName + */ + vx_char name[VX_MAX_KERNEL_NAME]; +} vx_kernel_info_t; + +/*! \brief Use to indicate a half-scale pyramid. + * \ingroup group_pyramid + */ +#define VX_SCALE_PYRAMID_HALF (0.5f) + +/*! \brief Use to indicate a ORB scaled pyramid whose scaling factor is \f$ \frac{1}{\root 4 \of {2}} \f$. + * \ingroup group_pyramid + */ +#define VX_SCALE_PYRAMID_ORB ((vx_float32)0.8408964f) + +/*! \brief The keypoint data structure. + * \ingroup group_basic_features + */ +typedef struct _vx_keypoint_t { + vx_int32 x; /*!< \brief The x coordinate. */ + vx_int32 y; /*!< \brief The y coordinate. */ + vx_float32 strength; /*!< \brief The strength of the keypoint. Its definition is specific to the corner detector. */ + vx_float32 scale; /*!< \brief Initialized to 0 by corner detectors. */ + vx_float32 orientation; /*!< \brief Initialized to 0 by corner detectors. */ + vx_int32 tracking_status; /*!< \brief A zero indicates a lost point. Initialized to 1 by corner detectors. */ + vx_float32 error; /*!< \brief A tracking method specific error. Initialized to 0 by corner detectors. */ +} vx_keypoint_t; + +/*! \brief The rectangle data structure that is shared with the users. The area of the rectangle can be computed as (end_x-start_x)*(end_y-start_y). + * \ingroup group_basic_features + */ +typedef struct _vx_rectangle_t { + vx_uint32 start_x; /*!< \brief The Start X coordinate. */ + vx_uint32 start_y; /*!< \brief The Start Y coordinate. */ + vx_uint32 end_x; /*!< \brief The End X coordinate. */ + vx_uint32 end_y; /*!< \brief The End Y coordinate. */ +} vx_rectangle_t; + +/*! \brief The 2D Coordinates structure. + * \ingroup group_basic_features + */ +typedef struct _vx_coordinates2d_t { + vx_uint32 x; /*!< \brief The X coordinate. */ + vx_uint32 y; /*!< \brief The Y coordinate. */ +} vx_coordinates2d_t; + +/*! \brief The floating-point 2D Coordinates structure. + * \ingroup group_basic_features + */ +typedef struct _vx_coordinates2df_t { + vx_float32 x; /*!< \brief The X coordinate. */ + vx_float32 y; /*!< \brief The Y coordinate. */ +} vx_coordinates2df_t; + +/*! \brief The 3D Coordinates structure. + * \ingroup group_basic_features + */ +typedef struct _vx_coordinates3d_t { + vx_uint32 x; /*!< \brief The X coordinate. */ + vx_uint32 y; /*!< \brief The Y coordinate. */ + vx_uint32 z; /*!< \brief The Z coordinate. */ +} vx_coordinates3d_t; + +/*! \brief Union that describes the value of a pixel for any image format. Use the field +* corresponding to the image format. +* \ingroup group_image +*/ +typedef union _vx_pixel_value_t { + vx_uint8 RGB[3]; /*!< \brief \ref VX_DF_IMAGE_RGB format in the R,G,B order */ + vx_uint8 RGBX[4]; /*!< \brief \ref VX_DF_IMAGE_RGBX format in the R,G,B,X order */ + vx_uint8 YUV[3]; /*!< \brief All YUV formats in the Y,U,V order */ + vx_bool U1; /*!< \brief \ref VX_DF_IMAGE_U1 */ + vx_uint8 U8; /*!< \brief \ref VX_DF_IMAGE_U8 */ + vx_uint16 U16; /*!< \brief \ref VX_DF_IMAGE_U16 */ + vx_int16 S16; /*!< \brief \ref VX_DF_IMAGE_S16 */ + vx_uint32 U32; /*!< \brief \ref VX_DF_IMAGE_U32 */ + vx_int32 S32; /*!< \brief \ref VX_DF_IMAGE_S32 */ + vx_uint8 reserved[16]; +} vx_pixel_value_t; + +/*! \brief The HOG descriptor structure. + * \ingroup group_vision_function_hog + */ +typedef struct { + /*! \brief The histogram cell width of type \ref VX_TYPE_INT32.*/ + vx_int32 cell_width; + /*! \brief The histogram cell height of type \ref VX_TYPE_INT32.*/ + vx_int32 cell_height; + /*! \brief The histogram block width of type \ref VX_TYPE_INT32. Must be divisible by cell_width. */ + vx_int32 block_width; + /*! \brief The histogram block height of type \ref VX_TYPE_INT32. Must be divisible by cell_height. */ + vx_int32 block_height; + /*! \brief The histogram block stride within the window of type \ref VX_TYPE_INT32. Must be an integral number of cell_width and cell_height.*/ + vx_int32 block_stride; + /*! \brief The histogram size of type \ref VX_TYPE_INT32.*/ + vx_int32 num_bins; + /*! \brief The feature descriptor window width of type \ref VX_TYPE_INT32*/ + vx_int32 window_width; + /*! \brief The feature descriptor window height of type \ref VX_TYPE_INT32*/ + vx_int32 window_height; + /*! \brief The feature descriptor window stride of type \ref VX_TYPE_INT32*/ + vx_int32 window_stride; + /*! \brief The threshold for the maximum L2-norm value for a histogram bin. It is used as part of block normalization. It defaults to 0.2. */ + vx_float32 threshold; +} vx_hog_t; + +/*! \brief Use with the enumeration \ref VX_NODE_BORDER to set the +* border mode behavior of a node that supports borders. +* +* If the indicated border mode is not supported, an error \ref VX_ERROR_NOT_SUPPORTED will be reported +* either at the time the \ref VX_NODE_BORDER is set or at the time of graph verification. +* \ingroup group_borders +*/ +typedef struct _vx_border_t { + /*! \brief See \ref vx_border_e. */ + vx_enum mode; + /*! \brief For the mode \ref VX_BORDER_CONSTANT, this union contains the + * value of out-of-bound pixels. + */ + vx_pixel_value_t constant_value; +} vx_border_t; + +/*! +* \brief The type of the vxPublishKernels entry function of modules loaded +* by \ref vxLoadKernels and unloaded by \ref vxUnloadKernels. +* \param [in] context The reference to the context kernels must be added to. +* \ingroup group_user_kernels +*/ +typedef vx_status(VX_API_CALL *vx_publish_kernels_f)(vx_context context); + +/*! +* \brief The type of the vxUnpublishKernels entry function of modules loaded +* by \ref vxLoadKernels and unloaded by \ref vxUnloadKernels. +* \param [in] context The reference to the context kernels have been added to. +* \ingroup group_user_kernels +*/ +typedef vx_status(VX_API_CALL *vx_unpublish_kernels_f)(vx_context context); + +/*! +* \brief The pointer to the Host side kernel. +* \param [in] node The handle to the node that contains this kernel. +* \param [in] parameters The array of parameter references. +* \param [in] num The number of parameters. +* \ingroup group_user_kernels +*/ +typedef vx_status(VX_CALLBACK *vx_kernel_f)(vx_node node, const vx_reference *parameters, vx_uint32 num); + +/*! +* \brief The pointer to the kernel initializer. If the host code requires a call +* to initialize data once all the parameters have been validated, this function is called +* if not NULL. +* \param [in] node The handle to the node that contains this kernel. +* \param [in] parameters The array of parameter references. +* \param [in] num The number of parameters. +* \ingroup group_user_kernels +*/ +typedef vx_status(VX_CALLBACK *vx_kernel_initialize_f)(vx_node node, const vx_reference *parameters, vx_uint32 num); + +/*! +* \brief The pointer to the kernel deinitializer. If the host code requires a call +* to deinitialize data during a node garbage collection, this function is called +* if not NULL. +* \param [in] node The handle to the node that contains this kernel. +* \param [in] parameters The array of parameter references. +* \param [in] num The number of parameters. +* \ingroup group_user_kernels +*/ +typedef vx_status(VX_CALLBACK *vx_kernel_deinitialize_f)(vx_node node, const vx_reference *parameters, vx_uint32 num); + +/*! +* \brief The user-defined kernel node parameters validation function. The function only +* needs to fill in the meta data structure(s). +* \note This function is called once for whole set of parameters. +* \param [in] node The handle to the node that is being validated. +* \param [in] parameters The array of parameters to be validated. +* \param [in] num Number of parameters to be validated. +* \param [in] metas A pointer to a pre-allocated array of structure references that the system holds. +* The system pre-allocates a number of vx_meta_format structures for the output parameters only, +* indexed by the same indices as parameters[]. The validation function fills in the correct type, format, +* and dimensionality for the system to use either to create memory or to check against existing memory. +* \return An error code describing the validation status on parameters. +* \ingroup group_user_kernels +*/ +typedef vx_status(VX_CALLBACK *vx_kernel_validate_f)(vx_node node, const vx_reference parameters[], vx_uint32 num, vx_meta_format metas[]); + +/*! +* \brief A user-defined callback function to set the valid rectangle of an output image. +* +* The \ref VX_VALID_RECT_CALLBACK attribute in the \ref vx_meta_format object should be +* set to the desired callback during user node's output validator. The callback must not call +* \ref vxGetValidRegionImage or \ref vxSetImageValidRectangle. Instead, an array of the +* valid rectangles of all the input images is supplied to the callback to calculate the output +* valid rectangle. The output of the user node may be a pyramid, or just an image. If it is just an +* image, the 'Out' array associated with that output only has one element. If the output is a +* pyramid, the array size is equal to the number of pyramid levels. Notice that the array memory +* allocation passed to the callback is managed by the framework, the application must not allocate or +* deallocate those pointers. +* +* The behavior of the callback function vx_kernel_image_valid_rectangle_f is undefined +* if one of the following is true: +* - One of the input arguments of a user node is a pyramid or an array of images. +* - Either input or output argument of a user node is an array of pyramids. +* +* \param [in,out] node The handle to the node that is being validated. +* \param [in] index The index of the output parameter for which a valid region should be set. +* \param [in] input_valid A pointer to an array of valid regions of input images or images +* contained in image container (e.g. pyramids). They are provided in same order as the parameter +* list of the kernel's declaration. +* \param [out] output_valid An array of valid regions that should be set for the output images or +* image containers (e.g. pyramid) after graph processing. The length of the array should be equal +* to the size of the image container (e.g. number of levels in the pyramid). For a simple output +* image the array size is always one. Each rectangle supplies the valid region for one image. The +* array memory allocation is managed by the framework. +* \return An error code describing the validation status on parameters. +* \ingroup group_user_kernels +*/ +typedef vx_status(VX_CALLBACK *vx_kernel_image_valid_rectangle_f)(vx_node node, vx_uint32 index, const vx_rectangle_t* const input_valid[], vx_rectangle_t* const output_valid[]); + +/*! \brief The log callback function. + * \ingroup group_log + */ +typedef void (VX_CALLBACK *vx_log_callback_f)(vx_context context, + vx_reference ref, + vx_status status, + const vx_char string[]); + +/*! \brief The Map/Unmap operation enumeration. + * \ingroup group_image + */ +enum vx_map_flag_e { + VX_NOGAP_X = 1, /*!< \brief No Gap. */ +}; + + +enum vx_const_tensor_cache_mode +{ + VX_PRELOAD_NULL = 0, + VX_PRELOAD_CONST_TENSOR_VIPSRAM = 1, + VX_PRELOAD_CONST_TENSOR_AXISRAM = 2, + VX_KERNEL_CACHE_PARTIAL_MODE = 3, + VX_KERNEL_CACHE_STREAM_MODE = 4, + VX_PRELOAD_TYPE_COUNT +}; +#endif diff --git a/unified-tina/inc/VX/vx_vendors.h b/unified-tina/inc/VX/vx_vendors.h new file mode 100644 index 0000000..9d49f95 --- /dev/null +++ b/unified-tina/inc/VX/vx_vendors.h @@ -0,0 +1,67 @@ +/* + + * Copyright (c) 2012-2017 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _OPENVX_VENDORS_H_ +#define _OPENVX_VENDORS_H_ + +/*! + * \file + * \brief The Vendor ID list for OpenVX. + */ + +/*! \brief The Vendor ID of the Implementation. As new vendors submit their + * implementations, this enumeration will grow. + * \ingroup group_basic_features + */ +enum vx_vendor_id_e { + VX_ID_KHRONOS = 0x000, /*!< \brief The Khronos Group */ + VX_ID_TI = 0x001, /*!< \brief Texas Instruments, Inc. */ + VX_ID_QUALCOMM = 0x002, /*!< \brief Qualcomm, Inc. */ + VX_ID_NVIDIA = 0x003, /*!< \brief NVIDIA Corporation */ + VX_ID_ARM = 0x004, /*!< \brief ARM Ltd. */ + VX_ID_BDTI = 0x005, /*!< \brief Berkley Design Technology, Inc. */ + VX_ID_RENESAS = 0x006, /*!< \brief Renasas Electronics */ + VX_ID_VIVANTE = 0x007, /*!< \brief Vivante Corporation */ + VX_ID_XILINX = 0x008, /*!< \brief Xilinx Inc. */ + VX_ID_AXIS = 0x009, /*!< \brief Axis Communications */ + VX_ID_MOVIDIUS = 0x00A, /*!< \brief Movidius Ltd. */ + VX_ID_SAMSUNG = 0x00B, /*!< \brief Samsung Electronics */ + VX_ID_FREESCALE = 0x00C, /*!< \brief Freescale Semiconductor */ + VX_ID_AMD = 0x00D, /*!< \brief Advanced Micro Devices */ + VX_ID_BROADCOM = 0x00E, /*!< \brief Broadcom Corporation */ + VX_ID_INTEL = 0x00F, /*!< \brief Intel Corporation */ + VX_ID_MARVELL = 0x010, /*!< \brief Marvell Technology Group Ltd. */ + VX_ID_MEDIATEK = 0x011, /*!< \brief MediaTek, Inc. */ + VX_ID_ST = 0x012, /*!< \brief STMicroelectronics */ + VX_ID_CEVA = 0x013, /*!< \brief CEVA DSP */ + VX_ID_ITSEEZ = 0x014, /*!< \brief Itseez, Inc. */ + VX_ID_IMAGINATION=0x015, /*!< \brief Imagination Technologies */ + VX_ID_NXP = 0x016, /*!< \brief NXP Semiconductors */ + VX_ID_VIDEANTIS = 0x017, /*!< \brief Videantis */ + VX_ID_SYNOPSYS = 0x018, /*!< \brief Synopsys */ + VX_ID_CADENCE = 0x019, /*!< \brief Cadence */ + VX_ID_HUAWEI = 0x01A, /*!< \brief Huawei */ + VX_ID_SOCIONEXT = 0x01B, /*!< \brief Socionext */ + /* Add new vendor code above this line */ + VX_ID_USER = 0xFFE, /*!< \brief For use by vxAllocateUserKernelId and vxAllocateUserKernelLibraryId */ + VX_ID_MAX = 0xFFF, + /*! \brief For use by all Kernel authors until they can obtain an assigned ID. */ + VX_ID_DEFAULT = VX_ID_MAX, +}; + +#endif + diff --git a/unified-tina/inc/VX/vx_viv_sys.h b/unified-tina/inc/VX/vx_viv_sys.h new file mode 100644 index 0000000..fe0e4f9 --- /dev/null +++ b/unified-tina/inc/VX/vx_viv_sys.h @@ -0,0 +1,50 @@ +#ifndef _VX_VIV_SYS_H_ +#define _VX_VIV_SYS_H_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/*! \brief set clock fscale value to change core and shader frequency. + * \param [in] coreIndex Global core index to set the specific core clock frequency. + * If the value is 0xFFFFFFFF, all the cores will be set. + * \param [in] vipFscaleValue Set core frequency scale size. Value can be 64, 32, 16, 8, 4, 2, 1. + * 64 means 64/64 full frequency, 1 means 1/64 frequency. + * \param [in] shaderFscaleValue Set shader frequency scale size. Value can be 64, 32, 16, 8, 4, 2, 1. + * 64 means 64/64 full frequency, 1 means 1/64 frequency. + * + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS No errors; + * \retval VX_ERROR_INVAID_PARAMETERS Invalid frequency scale values. + * \retval VX_FAILURE Failed to change core and shader frequency. + */ +VX_API_ENTRY vx_status VX_API_CALL vxSysSetVipFrequency( + vx_uint32 coreIndex, + vx_uint32 vipFscaleValue, + vx_uint32 shaderFscaleValue + ); + +/*! \brief cancel all VIP processing jobs on a device. + * \param [in] context The reference to the implementation context. + * \param [in] deviceID bound to graph. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Cancelled all VIP processing job successfully on a device + * and user can check return of vxProcessGraph() to get cancelled status. + * \retval VX_ERROR_INVAID_PARAMETERS Invalid context reference. + * \retval VX_ERROR_NOT_SUPPORTED Hardware does not support job cancellation. + * \retval VX_FAILURE Failed to cancel VIP proccessing job on a device. + */ +VX_API_ENTRY vx_status VX_API_CALL vxSysCancelJob( + vx_context context, + vx_uint32 deviceID + ); + +#ifdef __cplusplus +} +#endif + + +#endif + diff --git a/unified-tina/inc/VX/vxu.h b/unified-tina/inc/VX/vxu.h new file mode 100644 index 0000000..8c6971d --- /dev/null +++ b/unified-tina/inc/VX/vxu.h @@ -0,0 +1,924 @@ +/* + + * Copyright (c) 2012-2017 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _OPENVX_UTILITY_H_ +#define _OPENVX_UTILITY_H_ + +/*! + * \file + * \brief The OpenVX Utility Library. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/*! \brief [Immediate] Invokes an immediate Color Conversion. + * \param [in] context The reference to the overall context. + * \param [in] input The input image. + * \param [out] output The output image. + * \ingroup group_vision_function_colorconvert + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuColorConvert(vx_context context, vx_image input, vx_image output); + +/*! \brief [Immediate] Invokes an immediate Channel Extract. + * \param [in] context The reference to the overall context. + * \param [in] input The input image. Must be one of the defined \ref vx_df_image_e multi-channel formats. + * \param [in] channel The \ref vx_channel_e enumeration to extract. + * \param [out] output The output image. Must be \ref VX_DF_IMAGE_U8. + * \ingroup group_vision_function_channelextract + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuChannelExtract(vx_context context, vx_image input, vx_enum channel, vx_image output); + +/*! \brief [Immediate] Invokes an immediate Channel Combine. + * \param [in] context The reference to the overall context. + * \param [in] plane0 The plane that forms channel 0. Must be \ref VX_DF_IMAGE_U8. + * \param [in] plane1 The plane that forms channel 1. Must be \ref VX_DF_IMAGE_U8. + * \param [in] plane2 [optional] The plane that forms channel 2. Must be \ref VX_DF_IMAGE_U8. + * \param [in] plane3 [optional] The plane that forms channel 3. Must be \ref VX_DF_IMAGE_U8. + * \param [out] output The output image. + * \ingroup group_vision_function_channelcombine + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuChannelCombine(vx_context context, vx_image plane0, vx_image plane1, vx_image plane2, vx_image plane3, vx_image output); + +/*! \brief [Immediate] Invokes an immediate Sobel 3x3. + * \param [in] context The reference to the overall context. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 format. + * \param [out] output_x [optional] The output gradient in the x direction in \ref VX_DF_IMAGE_S16. + * \param [out] output_y [optional] The output gradient in the y direction in \ref VX_DF_IMAGE_S16. + * \ingroup group_vision_function_sobel3x3 + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuSobel3x3(vx_context context, vx_image input, vx_image output_x, vx_image output_y); + +/*! \brief [Immediate] Invokes an immediate Magnitude. + * \param [in] context The reference to the overall context. + * \param [in] grad_x The input x image. This must be in \ref VX_DF_IMAGE_S16 format. + * \param [in] grad_y The input y image. This must be in \ref VX_DF_IMAGE_S16 format. + * \param [out] mag The magnitude image. This will be in \ref VX_DF_IMAGE_S16 format. + * \ingroup group_vision_function_magnitude + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuMagnitude(vx_context context, vx_image grad_x, vx_image grad_y, vx_image mag); + +/*! \brief [Immediate] Invokes an immediate Phase. + * \param [in] context The reference to the overall context. + * \param [in] grad_x The input x image. This must be in \ref VX_DF_IMAGE_S16 format. + * \param [in] grad_y The input y image. This must be in \ref VX_DF_IMAGE_S16 format. + * \param [out] orientation The phase image. This will be in \ref VX_DF_IMAGE_U8 format. + * \ingroup group_vision_function_phase + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuPhase(vx_context context, vx_image grad_x, vx_image grad_y, vx_image orientation); + +/*! \brief [Immediate] Scales an input image to an output image. + * \param [in] context The reference to the overall context. + * \param [in] src The source image of type \ref VX_DF_IMAGE_U8. + * \param [out] dst The destintation image of type \ref VX_DF_IMAGE_U8. + * \param [in] type The interpolation type. \see vx_interpolation_type_e. + * \ingroup group_vision_function_scale_image + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuScaleImage(vx_context context, vx_image src, vx_image dst, vx_enum type); + +/*! \brief [Immediate] Processes the image through the LUT. + * \param [in] context The reference to the overall context. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16. + * \param [in] lut The LUT which is of type \ref VX_TYPE_UINT8 if input image is \ref VX_DF_IMAGE_U8 or \ref VX_TYPE_INT16 if input image is \ref VX_DF_IMAGE_S16. + * \param [out] output The output image of the same type as the input image. + * \ingroup group_vision_function_lut + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuTableLookup(vx_context context, vx_image input, vx_lut lut, vx_image output); + +/*! \brief [Immediate] Generates a distribution from an image. + * \param [in] context The reference to the overall context. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 + * \param [out] distribution The output distribution. + * \ingroup group_vision_function_histogram + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuHistogram(vx_context context, vx_image input, vx_distribution distribution); + +/*! \brief [Immediate] Equalizes the Histogram of a grayscale image. + * \param [in] context The reference to the overall context. + * \param [in] input The grayscale input image in \ref VX_DF_IMAGE_U8 + * \param [out] output The grayscale output image of type \ref VX_DF_IMAGE_U8 with equalized brightness and contrast. + * \ingroup group_vision_function_equalize_hist + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuEqualizeHist(vx_context context, vx_image input, vx_image output); + +/*! \brief [Immediate] Computes the absolute difference between two images. + * \param [in] context The reference to the overall context. + * \param [in] in1 An input image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 format. + * \param [in] in2 An input image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 format. + * \param [out] out The output image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 format. + * \ingroup group_vision_function_absdiff + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuAbsDiff(vx_context context, vx_image in1, vx_image in2, vx_image out); + +/*! \brief [Immediate] Computes the mean value and optionally the standard deviation. + * \param [in] context The reference to the overall context. + * \param [in] input The input image. \ref VX_DF_IMAGE_U8 is supported. + * \param [out] mean The average pixel value. + * \param [out] stddev [optional] The standard deviation of the pixel values. + * \ingroup group_vision_function_meanstddev + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuMeanStdDev(vx_context context, vx_image input, vx_float32 *mean, vx_float32 *stddev); + +/*! \brief [Immediate] Threshold's an input image and produces a \ref VX_DF_IMAGE_U8 boolean image. + * \param [in] context The reference to the overall context. + * \param [in] input The input image. Only images with format \ref VX_DF_IMAGE_U8 + * and \ref VX_DF_IMAGE_S16 are supported. + * \param [in] thresh The thresholding object that defines the parameters of + * the operation. The \ref VX_THRESHOLD_INPUT_FORMAT must be the same as the input image format and + * the \ref VX_THRESHOLD_OUTPUT_FORMAT must be the same as the output image format. + * \param [out] output The output image, that will contain as pixel value + * true and false values defined by \p thresh. Only images with format + * \ref VX_DF_IMAGE_U8 are supported. + * \ingroup group_vision_function_threshold + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuThreshold(vx_context context, vx_image input, vx_threshold thresh, vx_image output); + +/*! \brief [Immediate] Performs Non-Maxima Suppression on an image, producing an image of the same type. + * \param [in] context The reference to the overall context. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 format. + * \param [in] mask [optional] Constrict suppression to a ROI. The mask image is of type \ref VX_DF_IMAGE_U8 and must be the same dimensions as the input image. + * \param [in] win_size The size of window over which to perform the localized non-maxima suppression. Must be odd, and less than or equal to the smallest dimension of the input image. + * \param [out] output The output image, of the same type as the input, that has been non-maxima suppressed. + * \ingroup group_vision_function_nms + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuNonMaxSuppression(vx_context context, vx_image input, vx_image mask, vx_int32 win_size, vx_image output); + +/*! \brief [Immediate] Computes the integral image of the input. + * \param [in] context The reference to the overall context. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 format. + * \param [out] output The output image in \ref VX_DF_IMAGE_U32 format. + * \ingroup group_vision_function_integral_image + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuIntegralImage(vx_context context, vx_image input, vx_image output); + +/*! \brief [Immediate] Erodes an image by a 3x3 window. + * \param [in] context The reference to the overall context. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 format. + * \param [out] output The output image in \ref VX_DF_IMAGE_U8 format. + * \ingroup group_vision_function_erode_image + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuErode3x3(vx_context context, vx_image input, vx_image output); + +/*! \brief [Immediate] Dilates an image by a 3x3 window. + * \param [in] context The reference to the overall context. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 format. + * \param [out] output The output image in \ref VX_DF_IMAGE_U8 format. + * \ingroup group_vision_function_dilate_image + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuDilate3x3(vx_context context, vx_image input, vx_image output); + +/*! \brief [Immediate] Computes a median filter on the image by a 3x3 window. + * \param [in] context The reference to the overall context. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 format. + * \param [out] output The output image in \ref VX_DF_IMAGE_U8 format. + * \ingroup group_vision_function_median_image + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuMedian3x3(vx_context context, vx_image input, vx_image output); + +/*! \brief [Immediate] Computes a box filter on the image by a 3x3 window. + * \param [in] context The reference to the overall context. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 format. + * \param [out] output The output image in \ref VX_DF_IMAGE_U8 format. + * \ingroup group_vision_function_box_image + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuBox3x3(vx_context context, vx_image input, vx_image output); + +/*! \brief [Immediate] Computes a gaussian filter on the image by a 3x3 window. + * \param [in] context The reference to the overall context. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 format. + * \param [out] output The output image in \ref VX_DF_IMAGE_U8 format. + * \ingroup group_vision_function_gaussian_image + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuGaussian3x3(vx_context context, vx_image input, vx_image output); + +/*! \brief [Immediate] Performs Non-linear Filtering. + * \param [in] context The reference to the overall context. + * \param [in] function The non-linear filter function. See \ref vx_non_linear_filter_e. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 format. + * \param [in] mask The mask to be applied to the Non-linear function. \ref VX_MATRIX_ORIGIN attribute is used + * to place the mask appropriately when computing the resulting image. See \ref vxCreateMatrixFromPattern and \ref vxCreateMatrixFromPatternAndOrigin. + * \param [out] output The output image in \ref VX_DF_IMAGE_U8 format. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + * \ingroup group_vision_function_nonlinear_filter + */ +VX_API_ENTRY vx_status VX_API_CALL vxuNonLinearFilter(vx_context context, vx_enum function, vx_image input, vx_matrix mask, vx_image output); + + +/*! \brief [Immediate] Computes a convolution on the input image with the supplied + * matrix. + * \param [in] context The reference to the overall context. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 format. + * \param [in] conv The \ref vx_int16 convolution matrix. + * \param [out] output The output image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 format. + * \ingroup group_vision_function_custom_convolution + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuConvolve(vx_context context, vx_image input, vx_convolution conv, vx_image output); + +/*! \brief [Immediate] Computes a Gaussian pyramid from an input image. + * \param [in] context The reference to the overall context. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 + * \param [out] gaussian The Gaussian pyramid with \ref VX_DF_IMAGE_U8 to construct. + * \ingroup group_vision_function_gaussian_pyramid + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuGaussianPyramid(vx_context context, vx_image input, vx_pyramid gaussian); + +/*! \brief [Immediate] Computes a Laplacian pyramid from an input image. + * \param [in] context The reference to the overall context. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 format. + * \param [out] laplacian The Laplacian pyramid with \ref VX_DF_IMAGE_S16 to construct. + * \param [out] output The lowest resolution image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 format necessary to reconstruct the input image from the pyramid. The output image format should be same as input image format. + * \ingroup group_vision_function_laplacian_pyramid + * \see group_pyramid + * \return A \ref vx_status enumeration. + * \retval VX_SUCCESS Success. + * \retval * An error occured. See \ref vx_status_e + */ +VX_API_ENTRY vx_status VX_API_CALL vxuLaplacianPyramid(vx_context context, vx_image input, vx_pyramid laplacian, vx_image output); + +/*! \brief [Immediate] Reconstructs an image from a Laplacian Image pyramid. + * \param [in] context The reference to the overall context. + * \param [in] laplacian The Laplacian pyramid with \ref VX_DF_IMAGE_S16 format. + * \param [in] input The lowest resolution image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 format for the Laplacian pyramid. + * \param [out] output The output image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 format with the highest possible resolution reconstructed from the Laplacian pyramid. The output image format should be same as input image format. + * \ingroup group_vision_function_laplacian_reconstruct + * \see group_pyramid + * \return A \ref vx_status enumeration. + * \retval VX_SUCCESS Success. + * \retval * An error occured. See \ref vx_status_e + */ +VX_API_ENTRY vx_status VX_API_CALL vxuLaplacianReconstruct(vx_context context, vx_pyramid laplacian, vx_image input, + vx_image output); + +/*! \brief [Immediate] Computes a weighted average image. + * \param [in] context The reference to the overall context. + * \param [in] img1 The first \ref VX_DF_IMAGE_U8 image. + * \param [in] alpha A \ref VX_TYPE_FLOAT32 type, the input value with the range \f$ 0.0 \le \alpha \le 1.0 \f$. + * \param [in] img2 The second \ref VX_DF_IMAGE_U8 image. + * \param [out] output The output \ref VX_DF_IMAGE_U8 image. + * \ingroup group_vision_function_weighted_average + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuWeightedAverage(vx_context context, vx_image img1, vx_scalar alpha, vx_image img2, vx_image output); + +/*! \brief [Immediate] Computes the minimum and maximum values of the image. + * \param [in] context The reference to the overall context. + * \param [in] input The input image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 format. + * \param [out] minVal The minimum value in the image, which corresponds to the type of the input. + * \param [out] maxVal The maximum value in the image, which corresponds to the type of the input. + * \param [out] minLoc [optional] The minimum \ref VX_TYPE_COORDINATES2D locations. If the input image has several minimums, the kernel will return up to the capacity of the array. + * \param [out] maxLoc [optional] The maximum \ref VX_TYPE_COORDINATES2D locations. If the input image has several maximums, the kernel will return up to the capacity of the array. + * \param [out] minCount [optional] The total number of detected minimums in image. Use a \ref VX_TYPE_SIZE scalar. + * \param [out] maxCount [optional] The total number of detected maximums in image. Use a \ref VX_TYPE_SIZE scalar. + * \ingroup group_vision_function_minmaxloc + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuMinMaxLoc(vx_context context, vx_image input, + vx_scalar minVal, vx_scalar maxVal, + vx_array minLoc, vx_array maxLoc, + vx_scalar minCount, vx_scalar maxCount); + +/*! \brief [Immediate] Computes pixel-wise minimum values between two images. + * \param [in] context The reference to the overall context. + * \param [in] in1 The first input image. Must be of type \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16. + * \param [in] in2 The second input image. Must be of type \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16. + * \param [out] out The output image which will hold the result of min. + * \ingroup group_vision_function_min + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuMin(vx_context context, vx_image in1, vx_image in2, vx_image out); + +/*! \brief [Immediate] Computes pixel-wise maximum values between two images. + * \param [in] context The reference to the overall context. + * \param [in] in1 The first input image. Must be of type \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16. + * \param [in] in2 The second input image. Must be of type \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16. + * \param [out] out The output image which will hold the result of max. + * \ingroup group_vision_function_max + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuMax(vx_context context, vx_image in1, vx_image in2, vx_image out); + +/*! \brief [Immediate] Converts the input images bit-depth into the output image. + * \param [in] context The reference to the overall context. + * \param [in] input The input image. + * \param [out] output The output image. + * \param [in] policy A \ref VX_TYPE_ENUM of the \ref vx_convert_policy_e enumeration. + * \param [in] shift A scalar containing a \ref VX_TYPE_INT32 of the shift value. + * \ingroup group_vision_function_convertdepth + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e.. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuConvertDepth(vx_context context, vx_image input, vx_image output, vx_enum policy, vx_int32 shift); + +/*! \brief [Immediate] Computes Canny Edges on the input image into the output image. + * \param [in] context The reference to the overall context. + * \param [in] input The input \ref VX_DF_IMAGE_U8 image. + * \param [in] hyst The double threshold for hysteresis. The \ref VX_THRESHOLD_INPUT_FORMAT shall be either + * \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16. The \ref VX_THRESHOLD_OUTPUT_FORMAT is ignored. + * \param [in] gradient_size The size of the Sobel filter window, must support at least 3, 5 and 7. + * \param [in] norm_type A flag indicating the norm used to compute the gradient, \ref VX_NORM_L1 or \ref VX_NORM_L2. + * \param [out] output The output image in \ref VX_DF_IMAGE_U8 format with values either 0 or 255. + * \ingroup group_vision_function_canny + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuCannyEdgeDetector(vx_context context, vx_image input, vx_threshold hyst, + vx_int32 gradient_size, vx_enum norm_type, + vx_image output); + +/*! \brief [Immediate] Performs a Gaussian Blur on an image then half-scales it. The interpolation mode used is nearest-neighbor. + * \param [in] context The reference to the overall context. + * \param [in] input The input \ref VX_DF_IMAGE_U8 image. + * \param [out] output The output \ref VX_DF_IMAGE_U8 image. + * \param [in] kernel_size The input size of the Gaussian filter. Supported values are 1, 3 and 5. + * \ingroup group_vision_function_scale_image + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuHalfScaleGaussian(vx_context context, vx_image input, vx_image output, vx_int32 kernel_size); + +/*! \brief [Immediate] Computes the bitwise and between two images. + * \param [in] context The reference to the overall context. + * \param [in] in1 A \ref VX_DF_IMAGE_U8 input image + * \param [in] in2 A \ref VX_DF_IMAGE_U8 input image + * \param [out] out The \ref VX_DF_IMAGE_U8 output image. + * \ingroup group_vision_function_and + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuAnd(vx_context context, vx_image in1, vx_image in2, vx_image out); + +/*! \brief [Immediate] Computes the bitwise inclusive-or between two images. + * \param [in] context The reference to the overall context. + * \param [in] in1 A \ref VX_DF_IMAGE_U8 input image + * \param [in] in2 A \ref VX_DF_IMAGE_U8 input image + * \param [out] out The \ref VX_DF_IMAGE_U8 output image. + * \ingroup group_vision_function_or + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuOr(vx_context context, vx_image in1, vx_image in2, vx_image out); + +/*! \brief [Immediate] Computes the bitwise exclusive-or between two images. + * \param [in] context The reference to the overall context. + * \param [in] in1 A \ref VX_DF_IMAGE_U8 input image + * \param [in] in2 A \ref VX_DF_IMAGE_U8 input image + * \param [out] out The \ref VX_DF_IMAGE_U8 output image. + * \ingroup group_vision_function_xor + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuXor(vx_context context, vx_image in1, vx_image in2, vx_image out); + +/*! \brief [Immediate] Computes the bitwise not of an image. + * \param [in] context The reference to the overall context. + * \param [in] input The \ref VX_DF_IMAGE_U8 input image + * \param [out] output The \ref VX_DF_IMAGE_U8 output image. + * \ingroup group_vision_function_not + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuNot(vx_context context, vx_image input, vx_image output); + +/*! \brief [Immediate] Performs elementwise multiplications on pixel values in the input images and a scale. + * \param [in] context The reference to the overall context. + * \param [in] in1 A \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 input image. + * \param [in] in2 A \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 input image. + * \param [in] scale A non-negative \ref VX_TYPE_FLOAT32 multiplied to each product before overflow handling. + * \param [in] overflow_policy A \ref vx_convert_policy_e enumeration. + * \param [in] rounding_policy A \ref vx_round_policy_e enumeration. + * \param [out] out The output image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 format. + * \ingroup group_vision_function_mult + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuMultiply(vx_context context, vx_image in1, vx_image in2, vx_float32 scale, vx_enum overflow_policy, vx_enum rounding_policy, vx_image out); + +/*! \brief [Immediate] Performs arithmetic addition on pixel values in the input images. + * \param [in] context The reference to the overall context. + * \param [in] in1 A \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 input image. + * \param [in] in2 A \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 input image. + * \param [in] policy A \ref vx_convert_policy_e enumeration. + * \param [out] out The output image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 format. + * \ingroup group_vision_function_add + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuAdd(vx_context context, vx_image in1, vx_image in2, vx_enum policy, vx_image out); + +/*! \brief [Immediate] Performs arithmetic subtraction on pixel values in the input images. + * \param [in] context The reference to the overall context. + * \param [in] in1 A \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 input image, the minuend. + * \param [in] in2 A \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 input image, the subtrahend. + * \param [in] policy A \ref vx_convert_policy_e enumeration. + * \param [out] out The output image in \ref VX_DF_IMAGE_U8 or \ref VX_DF_IMAGE_S16 format. + * \ingroup group_vision_function_sub + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuSubtract(vx_context context, vx_image in1, vx_image in2, vx_enum policy, vx_image out); + +/*! \brief [Immediate] Performs an Affine warp on an image. + * \param [in] context The reference to the overall context. + * \param [in] input The input \ref VX_DF_IMAGE_U8 image. + * \param [in] matrix The affine matrix. Must be 2x3 of type \ref VX_TYPE_FLOAT32. + * \param [in] type The interpolation type from \ref vx_interpolation_type_e. + * \ref VX_INTERPOLATION_AREA is not supported. + * \param [out] output The output \ref VX_DF_IMAGE_U8 image. + * \ingroup group_vision_function_warp_affine + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuWarpAffine(vx_context context, vx_image input, vx_matrix matrix, vx_enum type, vx_image output); + +/*! \brief [Immediate] Performs an Perspective warp on an image. + * \param [in] context The reference to the overall context. + * \param [in] input The input \ref VX_DF_IMAGE_U8 image. + * \param [in] matrix The perspective matrix. Must be 3x3 of type \ref VX_TYPE_FLOAT32. + * \param [in] type The interpolation type from \ref vx_interpolation_type_e. + * \ref VX_INTERPOLATION_AREA is not supported. + * \param [out] output The output \ref VX_DF_IMAGE_U8 image. + * \ingroup group_vision_function_warp_perspective + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuWarpPerspective(vx_context context, vx_image input, vx_matrix matrix, vx_enum type, vx_image output); + +/*! \brief [Immediate] Computes the Harris Corners over an image and produces the array of scored points. + * \param [in] context The reference to the overall context. + * \param [in] input The input \ref VX_DF_IMAGE_U8 image. + * \param [in] strength_thresh The \ref VX_TYPE_FLOAT32 minimum threshold which to eliminate Harris Corner scores (computed using the normalized Sobel kernel). + * \param [in] min_distance The \ref VX_TYPE_FLOAT32 radial Euclidean distance for non-maximum suppression. + * \param [in] sensitivity The \ref VX_TYPE_FLOAT32 scalar sensitivity threshold \f$ k \f$ from the Harris-Stephens equation. + * \param [in] gradient_size The gradient window size to use on the input. The + * implementation must support at least 3, 5, and 7. + * \param [in] block_size The block window size used to compute the harris corner score. + * The implementation must support at least 3, 5, and 7. + * \param [out] corners The array of \ref VX_TYPE_KEYPOINT structs. The order of the keypoints in this array is implementation dependent. + * \param [out] num_corners [optional] The total number of detected corners in image. Use a \ref VX_TYPE_SIZE scalar + * \ingroup group_vision_function_harris + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuHarrisCorners(vx_context context, + vx_image input, + vx_scalar strength_thresh, + vx_scalar min_distance, + vx_scalar sensitivity, + vx_int32 gradient_size, + vx_int32 block_size, + vx_array corners, + vx_scalar num_corners); + + +/*! \brief [Immediate] Computes corners on an image using FAST algorithm and produces the array of feature points. + * \param [in] context The reference to the overall context. + * \param [in] input The input \ref VX_DF_IMAGE_U8 image. + * \param [in] strength_thresh Threshold on difference between intensity of the central pixel and pixels on Bresenham's circle + * of radius 3 (\ref VX_TYPE_FLOAT32 scalar), with a value in the range of 0.0 \f$\le\f$ strength_thresh < 256.0. + * Any fractional value will be truncated to an integer. + * \param [in] nonmax_suppression If true, non-maximum suppression is applied to + * detected corners before being places in the \ref vx_array of \ref VX_TYPE_KEYPOINT structs. + * \param [out] corners Output corner \ref vx_array of \ref VX_TYPE_KEYPOINT. The order of the keypoints in this array is implementation dependent. + * \param [out] num_corners [optional] The total number of detected corners in image. Use a \ref VX_TYPE_SIZE scalar. + * \ingroup group_vision_function_fast + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuFastCorners(vx_context context, vx_image input, vx_scalar strength_thresh, vx_bool nonmax_suppression, vx_array corners, vx_scalar num_corners); + +/*! \brief [Immediate] Computes an optical flow on two images. + * \param [in] context The reference to the overall context. + * \param [in] old_images Input of first (old) image pyramid in \ref VX_DF_IMAGE_U8. + * \param [in] new_images Input of destination (new) image pyramid in \ref VX_DF_IMAGE_U8 + * \param [in] old_points an array of key points in a vx_array of \ref VX_TYPE_KEYPOINT those key points are defined at + * the old_images high resolution pyramid + * \param [in] new_points_estimates an array of estimation on what is the output key points in a \ref vx_array of + * \ref VX_TYPE_KEYPOINT those keypoints are defined at the new_images high resolution pyramid + * \param [out] new_points an output array of key points in a \ref vx_array of \ref VX_TYPE_KEYPOINT those key points are + * defined at the new_images high resolution pyramid + * \param [in] termination termination can be \ref VX_TERM_CRITERIA_ITERATIONS or \ref VX_TERM_CRITERIA_EPSILON or + * \ref VX_TERM_CRITERIA_BOTH + * \param [in] epsilon is the \ref vx_float32 error for terminating the algorithm + * \param [in] num_iterations is the number of iterations. Use a \ref VX_TYPE_UINT32 scalar. + * \param [in] use_initial_estimate Can be set to either \ref vx_false_e or \ref vx_true_e. + * \param [in] window_dimension The size of the window on which to perform the algorithm. See + * \ref VX_CONTEXT_OPTICAL_FLOW_MAX_WINDOW_DIMENSION + * + * \ingroup group_vision_function_opticalflowpyrlk + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuOpticalFlowPyrLK(vx_context context, + vx_pyramid old_images, + vx_pyramid new_images, + vx_array old_points, + vx_array new_points_estimates, + vx_array new_points, + vx_enum termination, + vx_scalar epsilon, + vx_scalar num_iterations, + vx_scalar use_initial_estimate, + vx_size window_dimension); + +/*! \brief [Immediate] The function compares an image template against overlapped image regions. + * \details The detailed equation to the matching can be found in \ref vx_comp_metric_e. + * The output of the template matching node is a comparison map as described in \ref vx_comp_metric_e. + * The Node have a limitation on the template image size (width*height). It should not be larger then 65535. + * If the valid region of the template image is smaller than the entire template image, the result in the destination image is implementation-dependent. + * \param [in] context The reference to the overall context. + * \param [in] src The input image of type \ref VX_DF_IMAGE_U8. + * \param [in] templateImage Searched template of type \ref VX_DF_IMAGE_U8. + * \param [in] matchingMethod attribute specifying the comparison method \ref vx_comp_metric_e. This function support only \ref VX_COMPARE_CCORR_NORM and \ref VX_COMPARE_L2. + * \param [out] output Map of comparison results. The output is an image of type \ref VX_DF_IMAGE_S16 + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + * \ingroup group_vision_function_match_template + */ + VX_API_ENTRY vx_status VX_API_CALL vxuMatchTemplate(vx_context context, vx_image src, vx_image templateImage, vx_enum matchingMethod, vx_image output); + + /*! \brief [Immediate] The function extracts LBP image from an input image + * \param [in] context The reference to the overall context. + * \param [in] in An input image in vx_image. Or \f$ SrcImg\f$ in the equations. the image is of type \ref VX_DF_IMAGE_U8 + * \param [in] format A variation of LBP like original LBP and mLBP. see \ref vx_lbp_format_e + * \param [in] kernel_size Kernel size. Only size of 3 and 5 are supported + * \param [out] out An output image in vx_image.Or \f$ DstImg\f$ in the equations. the image is of type \ref VX_DF_IMAGE_U8 + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + * \ingroup group_vision_function_lbp + */ +VX_API_ENTRY vx_status VX_API_CALL vxuLBP(vx_context context, + vx_image in, vx_enum format, vx_int8 kernel_size, vx_image out); + +/*! \brief [Immediate] Performs cell calculations for the average gradient magnitude and gradient orientation histograms. + * \details Firstly, the gradient magnitude and gradient orientation are computed for each pixel in the input image. + * Two 1-D centred, point discrete derivative masks are applied to the input image in the horizontal and vertical directions. + * \f[ M_h = [-1, 0, 1] \f] and \f[ M_v = [-1, 0, 1]^T \f] + * \f$G_v\f$ is the result of applying mask \f$M_v\f$ to the input image, and \f$G_h\f$ is the result of applying mask \f$M_h\f$ to the input image. + * The border mode used for the gradient calculation is implementation dependent. Its behavior should be similar to \ref VX_BORDER_UNDEFINED. + * The gradient magnitudes and gradient orientations for each pixel are then calculated in the following manner. + * \f[ G(x,y) = \sqrt{G_v(x,y)^2 + G_h(x,y)^2} \f] + * \f[ \theta(x,y) = arctan(G_v(x,y), G_h(x,y)) \f] + * where \f$arctan(v, h)\f$ + * is \f$ tan^{-1}(v/h)\f$ when \f$h!=0\f$, + * + * \f$ -pi/2 \f$ if \f$v<0\f$ and \f$h==0\f$, + * + * \f$ pi/2 \f$ if \f$v>0\f$ and \f$h==0\f$ + * + * and \f$ 0 \f$ if \f$v==0\f$ and \f$h==0\f$ + * + * Secondly, the gradient magnitudes and orientations are used to compute the bins output tensor and optional magnitudes output tensor. + * These tensors are computed on a cell level where the cells are rectangular in shape. + * The magnitudes tensor contains the average gradient magnitude for each cell. + * \f[magnitudes(c) = \frac{1}{(cell\_width * cell\_height)}\sum\limits_{w=0}^{cell\_width} \sum\limits_{h=0}^{cell\_height} G_c(w,h)\f] + * where \f$G_c\f$ is the gradient magnitudes related to cell \f$c\f$. + * The bins tensor contains histograms of gradient orientations for each cell. + * The gradient orientations at each pixel range from 0 to 360 degrees. These are quantised into a set of histogram bins based on the num_bins parameter. + * Each pixel votes for a specific cell histogram bin based on its gradient orientation. The vote itself is the pixel's gradient magnitude. + * \f[bins(c, n) = \sum\limits_{w=0}^{cell\_width} \sum\limits_{h=0}^{cell\_height} G_c(w,h) * 1[B_c(w, h, num\_bins) == n]\f] + * where \f$B_c\f$ produces the histogram bin number based on the gradient orientation of the pixel at location (\f$w\f$, \f$h\f$) in cell \f$c\f$ based on + * the \f$num\_bins\f$ and \f[1[B_c(w, h, num\_bins) == n]\f] is a delta-function with value 1 when \f$B_c(w, h, num\_bins) == n\f$ or 0 otherwise. + * \param [in] context The reference to the overall context. + * \param [in] input The input image of type \ref VX_DF_IMAGE_U8. + * \param [in] cell_width The histogram cell width of type \ref VX_TYPE_INT32. + * \param [in] cell_height The histogram cell height of type \ref VX_TYPE_INT32. + * \param [in] num_bins The histogram size of type \ref VX_TYPE_INT32. + * \param [out] magnitudes The output average gradient magnitudes per cell of \ref vx_tensor of type \ref VX_TYPE_INT16 of size \f$ [floor(image_{width}/cell_{width}) ,floor(image_{height}/cell_{height}) ] \f$. + * \param [out] bins The output gradient orientation histograms per cell of \ref vx_tensor of type \ref VX_TYPE_INT16 of size \f$ [floor(image_{width}/cell_{width}) ,floor(image_{height}/cell_{height}), num_{bins}] \f$. + * + * \ingroup group_vision_function_hog + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuHOGCells(vx_context context, vx_image input, vx_int32 cell_width, vx_int32 cell_height, vx_int32 num_bins, vx_tensor magnitudes, vx_tensor bins); + +/*! \brief [Immediate] Computes Histogram of Oriented Gradients features for the W1xW2 window in a sliding window fashion over the whole input image. + * \details Firstly if a magnitudes tensor is provided the cell histograms in the bins tensor are normalised by the average cell gradient magnitudes. + \f[bins(c,n) = \frac{bins(c,n)}{magnitudes(c)}\f] + * To account for changes in illumination and contrast the cell histograms must be locally normalized which requires grouping the cell histograms together into larger spatially connected blocks. + * Blocks are rectangular grids represented by three parameters: the number of cells per block, the number of pixels per cell, and the number of bins per cell histogram. + * These blocks typically overlap, meaning that each cell histogram contributes more than once to the final descriptor. + * To normalize a block its cell histograms \f$h\f$ are grouped together to form a vector \f$v = [h_1, h_2, h_3, ... , h_n]\f$. + * This vector is normalised using L2-Hys which means performing L2-norm on this vector; clipping the result (by limiting the maximum values of v to be threshold) and renormalizing again. If the threshold is equal to zero then L2-Hys normalization is not performed. + * \f[L2norm(v) = \frac{v}{\sqrt{\|v\|_2^2 + \epsilon^2}}\f] + * where \f$ \|v\|_k \f$ be its k-norm for k=1, 2, and \f$ \epsilon \f$ be a small constant. + * For a specific window its HOG descriptor is then the concatenated vector of the components of the normalized cell histograms from all of the block regions contained in the window. + * The W1xW2 window starting position is at coordinates 0x0. + * If the input image has dimensions that are not an integer multiple of W1xW2 blocks with the specified stride, then the last positions that contain only a partial W1xW2 window + * will be calculated with the remaining part of the W1xW2 window padded with zeroes. + * The Window W1xW2 must also have a size so that it contains an integer number of cells, otherwise the node is not well-defined. + * The final output tensor will contain HOG descriptors equal to the number of windows in the input image. + * The output features tensor has 3 dimensions, given by:\n + * \f[[ (floor((image_{width}-window_{width})/window_{stride}) + 1),\f] + * \f[ (floor((image_{height}-window_{height})/window_{stride}) + 1),\f] + * \f[ floor((window_{width} - block_{width})/block_{stride} + 1) * floor((window_{height} - block_{height})/block_{stride} + 1) *\f] +* \f[ (((block_{width} * block_{height}) / (cell_{width} * cell_{height})) * num_{bins})] \f] + * See \ref vxCreateTensor and \ref vxCreateVirtualTensor. + * The output tensor from this function may be very large. For this reason, is it not recommended that this "immediate mode" version of the function be used. + * The preferred method to perform this function is as graph node with a virtual tensor as the output. + * \param [in] context The reference to the overall context. + * \param [in] input The input image of type \ref VX_DF_IMAGE_U8. + * \param [in] magnitudes The averge gradient magnitudes per cell of \ref vx_tensor of type \ref VX_TYPE_INT16. It is the output of \ref vxuHOGCells. + * \param [in] bins The gradient orientation histogram per cell of \ref vx_tensor of type \ref VX_TYPE_INT16. It is the output of \ref vxuHOGCells. + * \param [in] params The parameters of type \ref vx_hog_t. + * \param [in] hog_param_size Size of \ref vx_hog_t in bytes. + * \param [out] features The output HOG features of \ref vx_tensor of type \ref VX_TYPE_INT16. + * + * \ingroup group_vision_function_hog + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ + +VX_API_ENTRY vx_status VX_API_CALL vxuHOGFeatures(vx_context context, vx_image input, vx_tensor magnitudes, vx_tensor bins, const vx_hog_t *params, vx_size hog_param_size, vx_tensor features); + +/*! \brief [Immediate] Finds the Probabilistic Hough Lines detected in the input binary image, each line is stored in the output array as a set of points (x1, y1, x2, y2) . + * \details Some implementations of the algorithm may have a random or non-deterministic element. If the target application is in a safety-critical environment this + * should be borne in mind and steps taken in the implementation, the application or both to achieve the level of determinism required by the system design. + * \param [in] context The reference to the overall context. + * \param [in] input 8 bit, single channel binary source image + * \param [in] params parameters of the struct \ref vx_hough_lines_p_t + * \param [out] lines_array lines_array contains array of lines, see \ref vx_line2d_t The order of lines in implementation dependent + * \param [out] num_lines [optional] The total number of detected lines in image. Use a VX_TYPE_SIZE scalar + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + * \ingroup group_vision_function_hough_lines_p + */ +VX_API_ENTRY vx_status VX_API_CALL vxuHoughLinesP(vx_context context, vx_image input, const vx_hough_lines_p_t *params, vx_array lines_array, vx_scalar num_lines); + +/*! \brief [Immediate] Remaps an output image from an input image. + * \param [in] context The reference to the overall context. + * \param [in] input The input \ref VX_DF_IMAGE_U8 image. + * \param [in] table The remap table object. + * \param [in] policy The interpolation policy from \ref vx_interpolation_type_e. + * \ref VX_INTERPOLATION_AREA is not supported. + * \param [out] output The output \ref VX_DF_IMAGE_U8 image. + * \return A \ref vx_status_e enumeration. + * \ingroup group_vision_function_remap + */ +VX_API_ENTRY vx_status VX_API_CALL vxuRemap(vx_context context, + vx_image input, + vx_remap table, + vx_enum policy, + vx_image output); + +/*! \brief [Immediate] The function applies bilateral filtering to the input tensor. +* \param [in] context The reference to the overall context. +* \param [in] src The input data a \ref vx_tensor. maximum 3 dimension and minimum 2. The tensor is of type \ref VX_TYPE_UINT8 or \ref VX_TYPE_INT16. +* dimensions are [radiometric ,width,height] or [width,height] +* \param [in] diameter of each pixel neighbourhood that is used during filtering. Values of diameter must be odd. Bigger then 3 and smaller then 10. +* \param [in] sigmaValues Filter sigma in the radiometric space. Supported values are bigger then 0 and smaller or equal 20. +* \param [in] sigmaSpace Filter sigma in the spatial space. Supported values are bigger then 0 and smaller or equal 20. +* \param [out] dst The output data a \ref vx_tensor,Of type \ref VX_TYPE_UINT8 or \ref VX_TYPE_INT16. And must be the same type and size of the input. +* \note The border modes +* \ref VX_NODE_BORDER value +* \ref VX_BORDER_REPLICATE and \ref VX_BORDER_CONSTANT are supported. +* \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. +* \ingroup group_vision_function_bilateral_filter +*/ +VX_API_ENTRY vx_status VX_API_CALL vxuBilateralFilter(vx_context context, vx_tensor src, vx_int32 diameter, vx_float32 sigmaSpace, vx_float32 sigmaValues, vx_tensor dst); + +/*! \brief [Immediate] Performs element wise multiplications on element values in the input tensor data with a scale. + * \param [in] context The reference to the overall context. + * \param [in] input1 Input tensor data. Implementations must support input tensor data type \ref VX_TYPE_INT16 with fixed_point_position 8, + * and tensor data types \ref VX_TYPE_UINT8 and \ref VX_TYPE_INT8, with fixed_point_position 0. + * \param [in] input2 Input tensor data. The dimensions and sizes of input2 match those of input1, unless the vx_tensor of one or more dimensions in input2 is 1. + * In this case, those dimensions are treated as if this tensor was expanded to match the size of the corresponding dimension of input1, + * and data was duplicated on all terms in that dimension. After this expansion, the dimensions will be equal. + * The data type must match the data type of Input1. + * \param [in] scale A non-negative \ref VX_TYPE_FLOAT32 multiplied to each product before overflow handling. + * \param [in] overflow_policy A \ref vx_convert_policy_e enumeration. + * \param [in] rounding_policy A \ref vx_round_policy_e enumeration. + * \param [out] output The output tensor data with the same dimensions as the input tensor data. + * \ingroup group_vision_function_tensor_multiply + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuTensorMultiply(vx_context context, vx_tensor input1, vx_tensor input2, vx_scalar scale, vx_enum overflow_policy, + vx_enum rounding_policy, vx_tensor output); + +/*! \brief [Immediate] Performs arithmetic addition on element values in the input tensor data. + * \param [in] context The reference to the overall context. + * \param [in] input1 Input tensor data. Implementations must support input tensor data type \ref VX_TYPE_INT16 with fixed_point_position 8, + * and tensor data types \ref VX_TYPE_UINT8 and \ref VX_TYPE_INT8, with fixed_point_position 0. + * \param [in] input2 Input tensor data. The dimensions and sizes of input2 match those of input1, unless the vx_tensor of one or more dimensions in input2 is 1. + * In this case, those dimensions are treated as if this tensor was expanded to match the size of the corresponding dimension of input1, + * and data was duplicated on all terms in that dimension. After this expansion, the dimensions will be equal. + * The data type must match the data type of Input1. + * \param [in] policy A \ref vx_convert_policy_e enumeration. + * \param [out] output The output tensor data with the same dimensions as the input tensor data. + * \ingroup group_vision_function_tensor_add + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuTensorAdd(vx_context context, vx_tensor input1, vx_tensor input2, vx_enum policy, vx_tensor output); + +/*! \brief [Immediate] Performs arithmetic subtraction on element values in the input tensor data. + * \param [in] context The reference to the overall context. + * \param [in] input1 Input tensor data. Implementations must support input tensor data type \ref VX_TYPE_INT16 with fixed_point_position 8, + * and tensor data types \ref VX_TYPE_UINT8 and \ref VX_TYPE_INT8, with fixed_point_position 0. + * \param [in] input2 Input tensor data. The dimensions and sizes of input2 match those of input1, unless the vx_tensor of one or more dimensions in input2 is 1. + * In this case, those dimensions are treated as if this tensor was expanded to match the size of the corresponding dimension of input1, + * and data was duplicated on all terms in that dimension. After this expansion, the dimensions will be equal. + * The data type must match the data type of Input1. + * \param [in] policy A \ref vx_convert_policy_e enumeration. + * \param [out] output The output tensor data with the same dimensions as the input tensor data. + * \ingroup group_vision_function_tensor_subtract + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuTensorSubtract(vx_context context, vx_tensor input1, vx_tensor input2, vx_enum policy, vx_tensor output); + +/*! \brief [Immediate] Performs LUT on element values in the input tensor data. + * \param [in] context The reference to the overall context. + * \param [in] input1 Input tensor data. Implementations must support input tensor data type \ref VX_TYPE_INT16 with fixed_point_position 8, + * and tensor data types \ref VX_TYPE_UINT8, with fixed_point_position 0. + * \param [in] lut The look-up table to use, of type \ref vx_lut. + * The elements of input1 are treated as unsigned integers to determine an index into the look-up table. + * The data type of the items in the look-up table must match that of the output tensor. + * \param [out] output The output tensor data with the same dimensions as the input tensor data. + * \ingroup group_vision_function_tensor_tablelookup + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuTensorTableLookup(vx_context context, vx_tensor input1, vx_lut lut, vx_tensor output); + +/*! \brief [Immediate] Performs transpose on the input tensor. + * The tensor is transposed according to a specified 2 indexes in the tensor (0-based indexing) + * \param [in] context The reference to the overall context. + * \param [in] input Input tensor data, Implementations must support input tensor data type \ref VX_TYPE_INT16 with fixed_point_position 8, + * and tensor data types \ref VX_TYPE_UINT8 and \ref VX_TYPE_INT8, with fixed_point_position 0. + * \param [out] output output tensor data, + * \param [in] dimension1 Dimension index that is transposed with dim 2. + * \param [in] dimension2 Dimension index that is transposed with dim 1. + * \ingroup group_vision_function_tensor_transpose + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuTensorTranspose(vx_context context, vx_tensor input, vx_tensor output, vx_size dimension1, vx_size dimension2); + +/*! \brief [Immediate] Performs a bit-depth conversion. + * \param [in] context The reference to the overall context. + * \param [in] input The input tensor. Implementations must support input tensor data type \ref VX_TYPE_INT16 with fixed_point_position 8, + * and tensor data types \ref VX_TYPE_UINT8 and \ref VX_TYPE_INT8, with fixed_point_position 0. + * \param [in] policy A \ref VX_TYPE_ENUM of the \ref vx_convert_policy_e enumeration. + * \param [in] norm A scalar containing a \ref VX_TYPE_FLOAT32 of the normalization value. + * \param [in] offset A scalar containing a \ref VX_TYPE_FLOAT32 of the offset value subtracted before normalization. + * \param [out] output The output tensor. Implementations must support input tensor data type VX_TYPE_INT16. with fixed_point_position 8. + * And VX_TYPE_UINT8 with fixed_point_position 0. + * \ingroup group_vision_function_tensor_convert_depth + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuTensorConvertDepth(vx_context context, vx_tensor input, vx_enum policy, vx_scalar norm, vx_scalar offset, vx_tensor output); + +/*! \brief [Immediate] Performs a generalized matrix multiplication. + * \param [in] context The reference to the overall context. + * \param [in] input1 The first input 2D tensor of type \ref VX_TYPE_INT16 with fixed_point_pos 8, or tensor data types \ref VX_TYPE_UINT8 or \ref VX_TYPE_INT8, with fixed_point_pos 0. + * \param [in] input2 The second 2D tensor. Must be in the same data type as input1. + * \param [in] input3 The third 2D tensor. Must be in the same data type as input1. [optional]. + * \param [in] matrix_multiply_params Matrix multiply parameters, see \ref vx_tensor_matrix_multiply_params_t . + * \param [out] output The output 2D tensor. Must be in the same data type as input1. Output dimension must agree the formula in the description. + * \ingroup group_vision_function_tensor_matrix_multiply + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + */ +VX_API_ENTRY vx_status VX_API_CALL vxuTensorMatrixMultiply(vx_context context, vx_tensor input1, vx_tensor input2, vx_tensor input3, + const vx_tensor_matrix_multiply_params_t *matrix_multiply_params, vx_tensor output); + + +/*! \brief [Immediate] Copy data from one object to another. + * \param [in] context The reference to the overall context. + * \param [in] input The input data object. + * \param [out] output The output data object. + * \return A \ref vx_status_e enumeration. + * \retval VX_SUCCESS Success + * \retval * An error occurred. See \ref vx_status_e. + * \ingroup group_vision_function_copy + */ +VX_API_ENTRY vx_status VX_API_CALL vxuCopy(vx_context context, vx_reference input, vx_reference output); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/unified-tina/lib/aarch64-none-linux-gnu/libArchModelSw.so b/unified-tina/lib/aarch64-none-linux-gnu/libArchModelSw.so new file mode 100755 index 0000000..b441cd7 Binary files /dev/null and b/unified-tina/lib/aarch64-none-linux-gnu/libArchModelSw.so differ diff --git a/unified-tina/lib/aarch64-none-linux-gnu/libCLC.so b/unified-tina/lib/aarch64-none-linux-gnu/libCLC.so new file mode 100755 index 0000000..aad4f18 Binary files /dev/null and b/unified-tina/lib/aarch64-none-linux-gnu/libCLC.so differ diff --git a/unified-tina/lib/aarch64-none-linux-gnu/libGAL.so b/unified-tina/lib/aarch64-none-linux-gnu/libGAL.so new file mode 100755 index 0000000..950d1bf Binary files /dev/null and b/unified-tina/lib/aarch64-none-linux-gnu/libGAL.so differ diff --git a/unified-tina/lib/aarch64-none-linux-gnu/libGLSLC.so b/unified-tina/lib/aarch64-none-linux-gnu/libGLSLC.so new file mode 100755 index 0000000..9724938 Binary files /dev/null and b/unified-tina/lib/aarch64-none-linux-gnu/libGLSLC.so differ diff --git a/unified-tina/lib/aarch64-none-linux-gnu/libNNArchPerf.so b/unified-tina/lib/aarch64-none-linux-gnu/libNNArchPerf.so new file mode 100755 index 0000000..66573d2 Binary files /dev/null and b/unified-tina/lib/aarch64-none-linux-gnu/libNNArchPerf.so differ diff --git a/unified-tina/lib/aarch64-none-linux-gnu/libNNGPUBinary.so b/unified-tina/lib/aarch64-none-linux-gnu/libNNGPUBinary.so new file mode 100755 index 0000000..80cc8b3 Binary files /dev/null and b/unified-tina/lib/aarch64-none-linux-gnu/libNNGPUBinary.so differ diff --git a/unified-tina/lib/aarch64-none-linux-gnu/libNNVXCBinary.so b/unified-tina/lib/aarch64-none-linux-gnu/libNNVXCBinary.so new file mode 100755 index 0000000..0c1f4a5 Binary files /dev/null and b/unified-tina/lib/aarch64-none-linux-gnu/libNNVXCBinary.so differ diff --git a/unified-tina/lib/aarch64-none-linux-gnu/libOpenVX.so b/unified-tina/lib/aarch64-none-linux-gnu/libOpenVX.so new file mode 100755 index 0000000..55baa2b Binary files /dev/null and b/unified-tina/lib/aarch64-none-linux-gnu/libOpenVX.so differ diff --git a/unified-tina/lib/aarch64-none-linux-gnu/libOpenVX.so.1 b/unified-tina/lib/aarch64-none-linux-gnu/libOpenVX.so.1 new file mode 100755 index 0000000..55baa2b Binary files /dev/null and b/unified-tina/lib/aarch64-none-linux-gnu/libOpenVX.so.1 differ diff --git a/unified-tina/lib/aarch64-none-linux-gnu/libOpenVX.so.1.3.0 b/unified-tina/lib/aarch64-none-linux-gnu/libOpenVX.so.1.3.0 new file mode 100755 index 0000000..55baa2b Binary files /dev/null and b/unified-tina/lib/aarch64-none-linux-gnu/libOpenVX.so.1.3.0 differ diff --git a/unified-tina/lib/aarch64-none-linux-gnu/libOpenVXU.so b/unified-tina/lib/aarch64-none-linux-gnu/libOpenVXU.so new file mode 100755 index 0000000..0fc9afe Binary files /dev/null and b/unified-tina/lib/aarch64-none-linux-gnu/libOpenVXU.so differ diff --git a/unified-tina/lib/aarch64-none-linux-gnu/libOvx12VXCBinary.so b/unified-tina/lib/aarch64-none-linux-gnu/libOvx12VXCBinary.so new file mode 100755 index 0000000..53d09b2 Binary files /dev/null and b/unified-tina/lib/aarch64-none-linux-gnu/libOvx12VXCBinary.so differ diff --git a/unified-tina/lib/aarch64-none-linux-gnu/libOvxGPUVXCBinary.so b/unified-tina/lib/aarch64-none-linux-gnu/libOvxGPUVXCBinary.so new file mode 100755 index 0000000..d0cc08c Binary files /dev/null and b/unified-tina/lib/aarch64-none-linux-gnu/libOvxGPUVXCBinary.so differ diff --git a/unified-tina/lib/aarch64-none-linux-gnu/libVSC.so b/unified-tina/lib/aarch64-none-linux-gnu/libVSC.so new file mode 100755 index 0000000..7296772 Binary files /dev/null and b/unified-tina/lib/aarch64-none-linux-gnu/libVSC.so differ diff --git a/unified-tina/lib/aarch64-none-linux-gnu/libovxlib.so b/unified-tina/lib/aarch64-none-linux-gnu/libovxlib.so new file mode 100755 index 0000000..d036de1 Binary files /dev/null and b/unified-tina/lib/aarch64-none-linux-gnu/libovxlib.so differ diff --git a/unified-tina/lib/glibc-gcc10_2_0/libArchModelSw.so b/unified-tina/lib/glibc-gcc10_2_0/libArchModelSw.so new file mode 100644 index 0000000..6902cd9 Binary files /dev/null and b/unified-tina/lib/glibc-gcc10_2_0/libArchModelSw.so differ diff --git a/unified-tina/lib/glibc-gcc10_2_0/libCLC.so b/unified-tina/lib/glibc-gcc10_2_0/libCLC.so new file mode 100644 index 0000000..45e3551 Binary files /dev/null and b/unified-tina/lib/glibc-gcc10_2_0/libCLC.so differ diff --git a/unified-tina/lib/glibc-gcc10_2_0/libGAL.so b/unified-tina/lib/glibc-gcc10_2_0/libGAL.so new file mode 100644 index 0000000..b2420a6 Binary files /dev/null and b/unified-tina/lib/glibc-gcc10_2_0/libGAL.so differ diff --git a/unified-tina/lib/glibc-gcc10_2_0/libGLSLC.so b/unified-tina/lib/glibc-gcc10_2_0/libGLSLC.so new file mode 100644 index 0000000..a7500b5 Binary files /dev/null and b/unified-tina/lib/glibc-gcc10_2_0/libGLSLC.so differ diff --git a/unified-tina/lib/glibc-gcc10_2_0/libNNArchPerf.so b/unified-tina/lib/glibc-gcc10_2_0/libNNArchPerf.so new file mode 100644 index 0000000..261b56c Binary files /dev/null and b/unified-tina/lib/glibc-gcc10_2_0/libNNArchPerf.so differ diff --git a/unified-tina/lib/glibc-gcc10_2_0/libNNGPUBinary.so b/unified-tina/lib/glibc-gcc10_2_0/libNNGPUBinary.so new file mode 100644 index 0000000..66418c5 Binary files /dev/null and b/unified-tina/lib/glibc-gcc10_2_0/libNNGPUBinary.so differ diff --git a/unified-tina/lib/glibc-gcc10_2_0/libNNVXCBinary.so b/unified-tina/lib/glibc-gcc10_2_0/libNNVXCBinary.so new file mode 100644 index 0000000..1716532 Binary files /dev/null and b/unified-tina/lib/glibc-gcc10_2_0/libNNVXCBinary.so differ diff --git a/unified-tina/lib/glibc-gcc10_2_0/libOpenVX.so b/unified-tina/lib/glibc-gcc10_2_0/libOpenVX.so new file mode 100644 index 0000000..0240c27 Binary files /dev/null and b/unified-tina/lib/glibc-gcc10_2_0/libOpenVX.so differ diff --git a/unified-tina/lib/glibc-gcc10_2_0/libOpenVX.so.1 b/unified-tina/lib/glibc-gcc10_2_0/libOpenVX.so.1 new file mode 100644 index 0000000..0240c27 Binary files /dev/null and b/unified-tina/lib/glibc-gcc10_2_0/libOpenVX.so.1 differ diff --git a/unified-tina/lib/glibc-gcc10_2_0/libOpenVX.so.1.3.0 b/unified-tina/lib/glibc-gcc10_2_0/libOpenVX.so.1.3.0 new file mode 100644 index 0000000..0240c27 Binary files /dev/null and b/unified-tina/lib/glibc-gcc10_2_0/libOpenVX.so.1.3.0 differ diff --git a/unified-tina/lib/glibc-gcc10_2_0/libOpenVXU.so b/unified-tina/lib/glibc-gcc10_2_0/libOpenVXU.so new file mode 100644 index 0000000..8741dc0 Binary files /dev/null and b/unified-tina/lib/glibc-gcc10_2_0/libOpenVXU.so differ diff --git a/unified-tina/lib/glibc-gcc10_2_0/libOvx12VXCBinary.so b/unified-tina/lib/glibc-gcc10_2_0/libOvx12VXCBinary.so new file mode 100644 index 0000000..f335ef1 Binary files /dev/null and b/unified-tina/lib/glibc-gcc10_2_0/libOvx12VXCBinary.so differ diff --git a/unified-tina/lib/glibc-gcc10_2_0/libOvxGPUVXCBinary.so b/unified-tina/lib/glibc-gcc10_2_0/libOvxGPUVXCBinary.so new file mode 100644 index 0000000..4d5b9d5 Binary files /dev/null and b/unified-tina/lib/glibc-gcc10_2_0/libOvxGPUVXCBinary.so differ diff --git a/unified-tina/lib/glibc-gcc10_2_0/libVSC.so b/unified-tina/lib/glibc-gcc10_2_0/libVSC.so new file mode 100644 index 0000000..38433a4 Binary files /dev/null and b/unified-tina/lib/glibc-gcc10_2_0/libVSC.so differ diff --git a/unified-tina/lib/glibc-gcc10_2_0/libovxlib.so b/unified-tina/lib/glibc-gcc10_2_0/libovxlib.so new file mode 100644 index 0000000..b733f60 Binary files /dev/null and b/unified-tina/lib/glibc-gcc10_2_0/libovxlib.so differ diff --git a/unified-tina/lib/glibc-gcc13_2_0/libArchModelSw.so b/unified-tina/lib/glibc-gcc13_2_0/libArchModelSw.so new file mode 100644 index 0000000..6902cd9 Binary files /dev/null and b/unified-tina/lib/glibc-gcc13_2_0/libArchModelSw.so differ diff --git a/unified-tina/lib/glibc-gcc13_2_0/libCLC.so b/unified-tina/lib/glibc-gcc13_2_0/libCLC.so new file mode 100644 index 0000000..45e3551 Binary files /dev/null and b/unified-tina/lib/glibc-gcc13_2_0/libCLC.so differ diff --git a/unified-tina/lib/glibc-gcc13_2_0/libGAL.so b/unified-tina/lib/glibc-gcc13_2_0/libGAL.so new file mode 100644 index 0000000..1c90393 Binary files /dev/null and b/unified-tina/lib/glibc-gcc13_2_0/libGAL.so differ diff --git a/unified-tina/lib/glibc-gcc13_2_0/libGLSLC.so b/unified-tina/lib/glibc-gcc13_2_0/libGLSLC.so new file mode 100644 index 0000000..a7500b5 Binary files /dev/null and b/unified-tina/lib/glibc-gcc13_2_0/libGLSLC.so differ diff --git a/unified-tina/lib/glibc-gcc13_2_0/libNNArchPerf.so b/unified-tina/lib/glibc-gcc13_2_0/libNNArchPerf.so new file mode 100644 index 0000000..261b56c Binary files /dev/null and b/unified-tina/lib/glibc-gcc13_2_0/libNNArchPerf.so differ diff --git a/unified-tina/lib/glibc-gcc13_2_0/libNNGPUBinary.so b/unified-tina/lib/glibc-gcc13_2_0/libNNGPUBinary.so new file mode 100644 index 0000000..fe3e3cb Binary files /dev/null and b/unified-tina/lib/glibc-gcc13_2_0/libNNGPUBinary.so differ diff --git a/unified-tina/lib/glibc-gcc13_2_0/libNNVXCBinary.so b/unified-tina/lib/glibc-gcc13_2_0/libNNVXCBinary.so new file mode 100644 index 0000000..0ab0c66 Binary files /dev/null and b/unified-tina/lib/glibc-gcc13_2_0/libNNVXCBinary.so differ diff --git a/unified-tina/lib/glibc-gcc13_2_0/libOpenVX.so b/unified-tina/lib/glibc-gcc13_2_0/libOpenVX.so new file mode 100644 index 0000000..a4c0235 Binary files /dev/null and b/unified-tina/lib/glibc-gcc13_2_0/libOpenVX.so differ diff --git a/unified-tina/lib/glibc-gcc13_2_0/libOpenVX.so.1 b/unified-tina/lib/glibc-gcc13_2_0/libOpenVX.so.1 new file mode 100644 index 0000000..a4c0235 Binary files /dev/null and b/unified-tina/lib/glibc-gcc13_2_0/libOpenVX.so.1 differ diff --git a/unified-tina/lib/glibc-gcc13_2_0/libOpenVX.so.1.3.0 b/unified-tina/lib/glibc-gcc13_2_0/libOpenVX.so.1.3.0 new file mode 100644 index 0000000..a4c0235 Binary files /dev/null and b/unified-tina/lib/glibc-gcc13_2_0/libOpenVX.so.1.3.0 differ diff --git a/unified-tina/lib/glibc-gcc13_2_0/libOpenVXU.so b/unified-tina/lib/glibc-gcc13_2_0/libOpenVXU.so new file mode 100644 index 0000000..e09489a Binary files /dev/null and b/unified-tina/lib/glibc-gcc13_2_0/libOpenVXU.so differ diff --git a/unified-tina/lib/glibc-gcc13_2_0/libOvx12VXCBinary.so b/unified-tina/lib/glibc-gcc13_2_0/libOvx12VXCBinary.so new file mode 100644 index 0000000..2c7ee08 Binary files /dev/null and b/unified-tina/lib/glibc-gcc13_2_0/libOvx12VXCBinary.so differ diff --git a/unified-tina/lib/glibc-gcc13_2_0/libOvxGPUVXCBinary.so b/unified-tina/lib/glibc-gcc13_2_0/libOvxGPUVXCBinary.so new file mode 100644 index 0000000..e6a681f Binary files /dev/null and b/unified-tina/lib/glibc-gcc13_2_0/libOvxGPUVXCBinary.so differ diff --git a/unified-tina/lib/glibc-gcc13_2_0/libVSC.so b/unified-tina/lib/glibc-gcc13_2_0/libVSC.so new file mode 100644 index 0000000..38433a4 Binary files /dev/null and b/unified-tina/lib/glibc-gcc13_2_0/libVSC.so differ diff --git a/unified-tina/lib/glibc-gcc13_2_0/libovxlib.so b/unified-tina/lib/glibc-gcc13_2_0/libovxlib.so new file mode 100644 index 0000000..2896a5a Binary files /dev/null and b/unified-tina/lib/glibc-gcc13_2_0/libovxlib.so differ diff --git a/unified-tina/makefile.linux.def b/unified-tina/makefile.linux.def new file mode 100644 index 0000000..b4230da --- /dev/null +++ b/unified-tina/makefile.linux.def @@ -0,0 +1,124 @@ +############################################################################## +# +# Copyright (c) 2005 - 2021 by Vivante Corp. All rights reserved. +# +# The material in this file is confidential and contains trade secrets +# of Vivante Corporation. This is proprietary information owned by +# Vivante Corporation. No part of this work may be disclosed, +# reproduced, copied, transmitted, or used in any way for any purpose, +# without the express written permission of Vivante Corporation. +# +############################################################################## + +################################################################ +# Arch. + +ARCH_TYPE ?= arm64 +CPU_TYPE ?= cortex-a55 +CPU_ARCH ?= +ABI ?= +ENDIANNESS ?= +FPU ?= +FLOAT_ABI ?= + +gcdSTATIC_LINK ?= 0 + + CROSS_COMPILE ?= aarch64-linux-gnu- + CC = $(CROSS_COMPILE)gcc + CXX = $(CROSS_COMPILE)g++ + AR = $(CROSS_COMPILE)ar + AS = $(CROSS_COMPILE)as + LD = $(CROSS_COMPILE)ld + RANLIB = $(CROSS_COMPILE)ranlib + STRIP = $(CROSS_COMPILE)strip + +PKG_CONFIG ?= $(CROSS_COMPILE)pkg-config + + +################################################################ +# Resource. + +VIVANTE_SDK_DIR ?= $(AQROOT) +VIVANTE_SDK_INC ?= $(VIVANTE_SDK_DIR)/include +VIVANTE_SDK_LIB ?= $(VIVANTE_SDK_DIR)/lib/$(ARCH_TYPE)/$(SOC_BOARD) + +################################################################ +# Target directory. + +ifeq ($(DEBUG),1) + OBJ_DIR ?= bin_d +else + OBJ_DIR ?= bin_r +endif + +############################################################### +# Common flags. +LDFLAGS += $(LFLAGS) +ifneq ($(ROOTFS),) +LDFLAGS += --sysroot=$(ROOTFS) +endif + +ifneq (,$(findstring -mcpu=,$(CC) $(CFLAGS))) +CPU_TYPE=0 +CPU_ARCH=0 +endif + + ifneq ($(ABI),) + ifneq ($(ABI),0) + CFLAGS += -mabi=$(ABI) + endif + endif + + ifneq ($(ENDIANNESS),) + CFLAGS += $(ENDIANNESS) +# LDFLAGS += $(ENDIANNESS) + PFLAGS += $(ENDIANNESS) + endif + + CFLAGS += -DLINUX + CFLAGS += -Wall -D_REENTRANT -fno-strict-aliasing + + ifneq ($(CPU_TYPE),) + ifneq ($(CPU_TYPE),0) + CFLAGS += -mtune=$(CPU_TYPE) +# LDFLAGS += -mtune=$(CPU_TYPE) + PFLAGS += -mtune=$(CPU_TYPE) + endif + endif + + ifneq ($(CPU_ARCH),) + ifneq ($(CPU_ARCH),0) + CFLAGS += -march=$(CPU_ARCH) +# LDFLAGS += -march=$(CPU_ARCH) + PFLAGS += -march=$(CPU_ARCH) + endif + endif + +ifneq ($(FPU),) + CFLAGS += -mfpu=$(FPU) + CXXFLAGS += -mfpu=$(FPU) +endif + +ifneq ($(FLOAT_ABI),) + CFLAGS += -mfloat-abi=$(FLOAT_ABI) + CXXFLAGS += -mfloat-abi=$(FLOAT_ABI) +# LDFLAGS += -mfloat-abi=$(FLOAT_ABI) + PFLAGS += -mfloat-abi=$(FLOAT_ABI) +endif + +ifeq ($(DEBUG),1) +# CFLAGS += -g3 -ggdb3 -O0 -DDEBUG -D_DEBUG -DgcdDEBUG=1 + CFLAGS += -g -O1 -DDEBUG -D_DEBUG -DgcdDEBUG=1 +else + ifeq ("$(DEBUG)", "valgrind") + CFLAGS += -g -O -DgcdBUILT_FOR_VALGRIND=1 + else + CFLAGS += -O2 + endif +endif + +ifeq ($(gcdSTATIC_LINK),1) + CFLAGS += -DgcdSTATIC_LINK=1 +else + CFLAGS += -DgcdSTATIC_LINK=0 +endif diff --git a/viplite-tina/Makefile b/viplite-tina/Makefile new file mode 100755 index 0000000..f1c770e --- /dev/null +++ b/viplite-tina/Makefile @@ -0,0 +1,15 @@ + +ifdef AI_SDK_PLATFORM +include ../machinfo/$(AI_SDK_PLATFORM)/config.mk +endif + +all:$(TARGET) install + +install: $(TARGET) + -@mkdir -p $(INSTALL_PREFIX)/usr/lib + -@mkdir -p $(INSTALL_PREFIX)/usr/include + @cp lib/$(C_LIB_TYPE)/$(NPU_SW_VERSION)/inc/*.h $(INSTALL_PREFIX)/usr/include + @cp lib/$(C_LIB_TYPE)/$(NPU_SW_VERSION)/*.so $(INSTALL_PREFIX)/usr/lib + +clean: + rm -rf *.o *~ diff --git a/viplite-tina/lib/aarch64-none-linux-gnu/v1.13/inc/vip_lite.h b/viplite-tina/lib/aarch64-none-linux-gnu/v1.13/inc/vip_lite.h new file mode 100644 index 0000000..2aa6900 --- /dev/null +++ b/viplite-tina/lib/aarch64-none-linux-gnu/v1.13/inc/vip_lite.h @@ -0,0 +1,1013 @@ +/******************************************************************************\ +|* Copyright (c) 2017-2023 by Vivante Corporation. All Rights Reserved. *| +|* *| +|* The material in this file is confidential and contains trade secrets of *| +|* of Vivante Corporation. This is proprietary information owned by Vivante *| +|* Corporation. No part of this work may be disclosed, reproduced, copied, *| +|* transmitted, or used in any way for any purpose, without the express *| +|* written permission of Vivante Corporation. *| +|* *| +\******************************************************************************/ + +#ifndef _VIP_LITE_H +#define _VIP_LITE_H + +#include "vip_lite_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + *\brief The VIP lite API for Convolution Neural Network application on CPU/MCU/DSP type of embedded environment. + *\details This VIP lite APIs is not thread-safe if vpmdENABLE_MULTIPLE_TASK is set to 0, + user must guarantee to call these APIs in a proper way. + But defines vpmdENABLE_MULTIPLE_TASK 1, VIPLite can support multiple task(multiple thread/process). + and it's thread-safe. + *Memory allocation and file io functions used inside driver internal would depend on working enviroment. + + *\defgroup group_global Data Type Definitions and Global APIs, + *\ brief Data type definition and global APIs that are used in the VIPLite + *\defgroup group_buffer Buffer API, + The API to manage input/output buffers + *\defgroup group_network Network API + The API to manage networks + */ + +/* !\brief The data format list for buffer + * \ingroup group_buffer + * \version 2.0 + */ +typedef enum _vip_buffer_format_e +{ + /*! \brief A float type of buffer data */ + VIP_BUFFER_FORMAT_FP32 = 0, + /*! \brief A half float type of buffer data */ + VIP_BUFFER_FORMAT_FP16 = 1, + /*! \brief A 8 bit unsigned integer type of buffer data */ + VIP_BUFFER_FORMAT_UINT8 = 2, + /*! \brief A 8 bit signed integer type of buffer data */ + VIP_BUFFER_FORMAT_INT8 = 3, + /*! \brief A 16 bit unsigned integer type of buffer data */ + VIP_BUFFER_FORMAT_UINT16 = 4, + /*! \brief A 16 signed integer type of buffer data */ + VIP_BUFFER_FORMAT_INT16 = 5, + /*! \brief A char type of data */ + VIP_BUFFER_FORMAT_CHAR = 6, + /*! \brief A bfloat 16 type of data */ + VIP_BUFFER_FORMAT_BFP16 = 7, + /*! \brief A 32 bit integer type of data */ + VIP_BUFFER_FORMAT_INT32 = 8, + /*! \brief A 32 bit unsigned signed integer type of buffer */ + VIP_BUFFER_FORMAT_UINT32 = 9, + /*! \brief A 64 bit signed integer type of data */ + VIP_BUFFER_FORMAT_INT64 = 10, + /*! \brief A 64 bit unsigned integer type of data */ + VIP_BUFFER_FORMAT_UINT64 = 11, + /*! \brief A 64 bit float type of buffer data */ + VIP_BUFFER_FORMAT_FP64 = 12, + /*! \brief A signed 4bits tensor */ + VIP_BUFFER_FORMAT_INT4 = 13, + /*! \brief A unsigned 4bits tensor */ + VIP_BUFFER_FORMAT_UINT4 = 14, +} vip_buffer_format_e; + +/* !\brief The quantization format list for buffer data + * \ingroup group_buffer + * \version 1.0 + */ +typedef enum _vip_buffer_quantize_format_e +{ + /*! \brief Not quantized format */ + VIP_BUFFER_QUANTIZE_NONE = 0, + /*! \brief A quantization data type which specifies the fixed point position for whole tensor. */ + VIP_BUFFER_QUANTIZE_DYNAMIC_FIXED_POINT = 1, + /*! \brief A quantization data type which has scale value and zero point to match with TF and + Android NN API for whole tensor. */ + VIP_BUFFER_QUANTIZE_TF_ASYMM = 2, + /*! \brief A max vaule support quantize format */ + VIP_BUFFER_QUANTIZE_MAX, +} vip_buffer_quantize_format_e; + +/* !\brief The memory type for vip buffer + * \ingroup group_buffer + * \version 1.2.2 + */ +typedef enum _vip_buffer_memory_type_e +{ + /*! \brief Not memory type. default memory type. + use for allocate video memory from driver calling vip_create_buffer. + */ + VIP_BUFFER_MEMORY_TYPE_DEFAULT = 0x000, + /*! \brief Create a VIP buffer from the Host (logical, physical). */ + VIP_BUFFER_MEMORY_TYPE_HOST = 0x001, + /*! \brief Create a VIP buffer from DMA_BUF */ + VIP_BUFFER_MEMORY_TYPE_DMA_BUF = 0x003, + /*! \brief The max memory type */ + VIP_BUFFER_MEMORY_TYPE_MAX, +} vip_buffer_memory_type_e; + +/* \brief The list of create network type + * \ingroup group_network + * \version 1.0 + */ +typedef enum _vip_create_network_type_e +{ + /*!< \brief NONE */ + VIP_CREATE_NETWORK_FROM_NONE = 0x00, + /*!< \brief Create network from a file path */ + VIP_CREATE_NETWORK_FROM_FILE = 0x01, + /*!< \brief Create network from buffer, NBG has been loaded in this buffer before */ + VIP_CREATE_NETWORK_FROM_MEMORY = 0x02, + /*!< \brief Create network from flash */ + VIP_CREATE_NETWORK_FROM_FLASH = 0x04, + + VIP_CREATE_NETWORK_MAX, +} vip_create_network_type_e; + +/* \brief The list of duplicate network type. + The original network can't be destroy if the dup network is running or will be run later. + * \ingroup group_network + */ +typedef enum _vip_dup_network_type_e +{ + /*!< \brief NONE */ + VIP_DUP_NONE = 0x00, + /*!< \brief Duplicate command for sharing weight with another network + 1. Sharing weight with original network. + 2. The original network has the same input/output shape as the dup network. + 3. Only the input/output addresses of network are difference between the original network with dup network. + */ + VIP_DUP_FOR_CMD_BY_NETWORK = 0x01, + /*!< \brief Duplicate command for sharing weight with difference network(NBGs) + 1. Sharing weight with original network. + 2. Share weight between networks with the same network topology. + For example, to support different shapes of input, such as 640x480, 480x640 and 640x960. + */ + VIP_DUP_FOR_CMD_BY_NBG = 0x02, + + /*!< \brief Indicate that the dup network is duplicated from NBG file patch */ + VIP_DUP_FROM_NBG_FILE = 0x100, + /*!< \brief Indicate that the dup network is duplicated from NBG in memory */ + VIP_DUP_FROM_NBG_MEMORY = 0x200, + /*!< \brief Indicate that the dup network is duplicated from NBG in flash */ + VIP_DUP_FROM_NBG_FLASH = 0x400, + /*!< \brief Indicate that the dup network is duplicated from network object */ + VIP_DUP_FROM_NETWORK = 0x800, + + VIP_DUP_NETWORK_MAX, +} vip_dup_network_type_e; + +/* \brief An enumeration property that specifies which power management operation to execute. + * \ingroup group_global + * \version 1.2 + */ +typedef enum _vip_power_property_e +{ + VIP_POWER_PROPERTY_NONE = 0x0000, + /*!< \brief specify the VIP frequency */ + VIP_POWER_PROPERTY_SET_FREQUENCY = 0x0001, + /*!< \brief power off VIP hardware */ + VIP_POWER_PROPERTY_OFF = 0x0002, + /*!< \brief power on VIP hardware */ + VIP_POWER_PROPERTY_ON = 0x0004, + /*!< \brief stop VIP perform network */ + VIP_POWER_PROPERTY_STOP = 0x0008, + /*!< \brief start VIP perform network */ + VIP_POWER_PROPERTY_START = 0x0010, + VIP_POWER_PROPERTY_MAX +} vip_power_property_e; + +/* \brief query hardware caps property + * \ingroup group_global + */ +typedef enum _vip_query_hardware_property_e +{ + /*!< \brief the customer ID of this VIP/NPU, the returned value is vip_uint32_t */ + VIP_QUERY_HW_PROP_CID = 0, + /*!< \brief the number of deivce, the returned value is vip_uint32_t */ + VIP_QUERY_HW_PROP_DEVICE_COUNT = 1, + /*!< \brief the number of core count for each device, the returned value is vip_uint32_t * device_count */ + VIP_QUERY_HW_PROP_CORE_COUNT_EACH_DEVICE = 2, + VIP_QUERY_HW_PROP_MAX, +} vip_query_hardware_property_e; + +/* \brief The list of properties of a network. + * \ingroup group_network + * \version 1.0 + */ +typedef enum _vip_network_property_e +{ + /* query network */ + /*!< \brief The number of layers in this network, the returned value is vip_uint32_t */ + VIP_NETWORK_PROP_LAYER_COUNT = 0, + /*!< \brief The number of input in this network, the returned value is vip_uint32_t*/ + VIP_NETWORK_PROP_INPUT_COUNT = 1, + /*!< \brief The number of output in this network, the returned value is vip_uint32_t*/ + VIP_NETWORK_PROP_OUTPUT_COUNT = 2, + /*!< \brief The network name, the returned value is vip_char_t[64] */ + VIP_NETWORK_PROP_NETWORK_NAME = 3, + /*!< \brief address information of wait-link, command, input-output buffers for viplite-Agent trigger, + not used if only use viplite. the returned value is \ref vip_address_info_t + */ + VIP_NETWORK_PROP_ADDRESS_INFO = 4, + /*!< \brief read interruput irq register value for cleaning up IRQ. + the returned value is vip_uint32_t*/ + VIP_NETWORK_PROP_READ_REG_IRQ = 5, + /*!< \brief The size of memory pool, the returned value is vip_uint32_t*/ + VIP_NETWORK_PROP_MEMORY_POOL_SIZE = 6, + + /*!< \brief The network profling data, the returned value is vip_inference_profile_t */ + VIP_NETWORK_PROP_PROFILING = 7, + + /*!< \brief The the number of core for this network, the returned value is vip_uint8_t */ + VIP_NETWORK_PROP_CORE_COUNT = 8, + + /* set network */ + /* set network property should be called before vip_prepare_network */ + /*!< \brief set network to enable change PPU parameters feature for this vip_network. + the vip_set_network value param used to indicates disable or enable this feature. + vip_uint32_t *value is 1, enable change ppu param. + vip_uint32_t *value is 0, disable change ppu param */ + VIP_NETWORK_PROP_CHANGE_PPU_PARAM = 64, + /*!< \brief set memory pool buffer for network. networks can share a memory pool buffer. + the set value is \ref vip_buffer */ + VIP_NETWORK_PROP_SET_MEMORY_POOL = 65, + /*!< \brief set device id for network. networks can be submitted this vip device. */ + VIP_NETWORK_PROP_SET_DEVICE_ID = 66, + /*!< \brief set priority of network. 0 ~ 255, 0 indicates the lowest priority. */ + VIP_NETWORK_PROP_SET_PRIORITY = 67, + /*!< \brief set time out of network. unit: ms */ + VIP_NETWORK_PROP_SET_TIME_OUT = 68, + /*!< \brief set a memory for partial of full pre-load coeff data to this memory. + This memory can't be freed until the network is released. */ + VIP_NETWORK_PROP_SET_COEFF_MEMORY = 69, +} vip_network_property_e; + +/* \brief The list of properties of a group. + * \ingroup group_network + * \version 1.0 + */ +typedef enum _vip_group_property_e +{ + /* query group */ + /*!< \brief The group profling data, the returned value is vip_inference_profile_t */ + VIP_GROUP_PROP_PROFILING = 0, + + /* set group */ + /* set group property should be called before vip_add_network() + and all network in group runs on same device */ + /*!< \brief set device id for group. networks in group can be submitted this vip device. + * This prop should be called before vip_prepare_network */ + VIP_GROUP_PROP_SET_DEVICE_ID = 64, + /*!< \brief setting inference timeout value for group. unit: ms */ + VIP_GROUP_PROP_SET_TIME_OUT = 68, +} vip_group_property_e; + +/* \brief The list of property of an input or output. + * \ingroup group_buffer + * \version 1.0 + */ +typedef enum _vip_buffer_property_e +{ + /*!< \brief The quantization format, the returned value is \ref vip_buffer_quantize_format_e */ + VIP_BUFFER_PROP_QUANT_FORMAT = 0, + /*!< \brief The number of dimension for this input, the returned value is vip_uint32_t */ + VIP_BUFFER_PROP_NUM_OF_DIMENSION = 1, + /*!< \brief The size of each dimension for this input, the returned value is vip_uint32_t * num_of_dim */ + VIP_BUFFER_PROP_SIZES_OF_DIMENSION = 2, + /*!< \brief The data format for this input, the returned value is \ref vip_buffer_format_e */ + VIP_BUFFER_PROP_DATA_FORMAT = 3, + /*!< \brief The position of fixed point for dynamic fixed point, the returned value is vip_uint8_t */ + VIP_BUFFER_PROP_FIXED_POINT_POS = 4, + /*!< \brief The scale value for TF quantization format, the returned value is vip_float_t */ + VIP_BUFFER_PROP_TF_SCALE = 5, + /*!< \brief The zero point for TF quantization format, the returned value is vip_uint32_t */ + VIP_BUFFER_PROP_TF_ZERO_POINT = 6, + /*!< \brief The name for network's inputs and outputs, the returned value is vip_char_t[64] */ + VIP_BUFFER_PROP_NAME = 7, +} vip_buffer_property_e; + +/* \brief The list of property of operation vip_buffer type. + * \ingroup group_buffer + * \version 1.3 + */ +typedef enum _vip_buffer_operation_type_e +{ + /*!< \brief None operation */ + VIP_BUFFER_OPER_TYPE_NONE = 0, + /*!< \brief Flush the vip buffer */ + VIP_BUFFER_OPER_TYPE_FLUSH = 1, + /*!< \brief invalidate the vip buffer */ + VIP_BUFFER_OPER_TYPE_INVALIDATE = 2, + VIP_BUFFER_OPER_TYPE_MAX, +} vip_buffer_operation_type_e; + +typedef struct _vip_network *vip_network; +typedef struct _vip_buffer *vip_buffer; +typedef struct _vip_group *vip_group; + + +/*! \brief Input parameter for vip_create_buffer + * \ingroup group_buffer + */ +typedef struct _vip_buffer_create_params_t +{ + /*!< \brief The number of dimensions specified in *sizes*/ + vip_uint32_t num_of_dims; + /*!< \brief The pointer to an array of dimension */ + vip_uint32_t sizes[6]; + /*!< \brief Data format for the tensor, see \ref vip_buffer_format_e */ + vip_enum data_format; + /*!< \brief Quantized format see \ref vip_buffer_quantize_format_e . */ + vip_enum quant_format; + /*\ref vip_uint32_t + * \ingroup group_global + */ +VIP_API +vip_uint32_t vip_get_version( + void + ); + +/*! \brief Initial VIP Hardware, VIP lite software environment and power on VIP hardware. + * \details when vpmdENABLE_MULTIPLE_TASK set to 0, + This function should be only called once before using VIP hardware if. + when vpmdENABLE_MULTIPLE_TASK set to 1, + vip_init can be called multiple times, but should paired with vip_destroy. + vip_init should be called in every process. + only need call vip_init once in multi-thread. + * VIP lite driver would construct some global variable for this call.Also + * it will reset VIP and initialize VIP hardware to a ready state to accept jobs. + * \return \ref vip_status_e + * \ingroup group_global + * \version 1.0 + */ +VIP_API +vip_status_e vip_init( + void + ); + +/*! \brief Terminate VIP lite driver and shut down VIP hardware. + * \details This function should be the last function called by application. + vip_destroy should paired with vip_init called. + * After it, no VIP lite API should be called except \ref vip_init + * \return \ref vip_status_e + * \ingroup group_global + * \version 1.0 + * \notes vip_destroy should be called in the same thread as vip_init. + */ +VIP_API +vip_status_e vip_destroy( + void + ); + +/*! \brief Queries hardware caps information. This function shold be called after calling vip_init. + *\param property, the query property enum. + *\param size, the size of value buffer. + *\param value, the value buffer of returns. + * \ingroup group_global +*/ +VIP_API +vip_status_e vip_query_hardware( + IN vip_query_hardware_property_e property, + IN vip_uint32_t size, + OUT void *value + ); + +/*! \brief Create a input or output buffer with specified parameters. + *\details The buffer object always takes [w, h, c, n] order, + there is no padding/hole between lines/slices/batches. + *\param [in] create_param The pointer to \ref vip_buffer_create_params_t structure. + *\param [in] size_of_param The size of create_param pointer. + *\param [out] buffer An opaque handle for the new buffer object if the request is executed successfully. + *\return \ref vip_status_e + *\ingroup group_buffer + *\version 1.0 + */ +VIP_API +vip_status_e vip_create_buffer( + IN vip_buffer_create_params_t *create_param, + IN vip_uint32_t size_of_param, + OUT vip_buffer *buffer + ); + +/* +@brief Create a buffer used by the network's input and output. + use vip_create_buffer_from_handle function. +@param create_param The parametes of buffer be created. +@param size_of_param The size of create paramters. +@param buffer The returns buffer object. +*/ +VIP_API +vip_status_e vip_create_buffer_cache( + IN vip_buffer_create_params_t *create_param, + IN vip_uint32_t size_of_param, + OUT vip_buffer *buffer + ); + +/*! \brief Create a buffer from user contiguous or scatter non-contiguous physical address. + the vip_buffer created by this APi doesn't support flush CPU cache in driver. + So the physical memory should be a non-cache buffer or flush CPU on Host control. + not map user space logical on Linux. + *\param [in] create_param The pointer to \ref vip_buffer_create_params_t structure. + *\param [in] physical_table Physical address table. should be wraped for VIP hardware. + *\param [in] size_table The size of physical memory for each physical_table element. + *\param [in] physical_num The number of physical table element. + physical_num is 1 when create buffer from contiguous phyiscal. + *\param [out] buffer. vip lite buffer object. + *\return \ref vip_status_e +*\ingroup group_buffer +*/ +VIP_API +vip_status_e vip_create_buffer_from_physical( + IN const vip_buffer_create_params_t *create_param, + IN const vip_address_t *physical_table, + IN const vip_uint32_t *size_table, + IN vip_uint32_t physical_num, + OUT vip_buffer *buffer + ); + +/*! \brief Create a vip buffer with specified parameters. + The vip_buffer can be used to input, output, memory pool and so on. + NOTE: driver will operation CPU cache when call vip_flush_buffer API. + application should call vip_flush_buffer API if the memory handle have CPU cache. + after write data into this buffer, APP should call vip_flush_buffer(VIP_BUFFER_OPER_TYPE_FLUSH) + before CPU read date from this buffer. APP should call vip_flush_buffer(VIP_BUFFER_OPER_TYPE_INVALIDATE) + *\ when MMU disabled, create buffer from a contiguous physical memory. + *\ when MMU enabled, create buffer from a contiguous physical memory or + logical address(convert to physical in kenrel pace). + *\details The buffer object always takes [w, h, c, n] order, + there is no padding/hole between lines/slices/batches. + *\param [in] create_param The pointer to \ref vip_buffer_create_params_t structure. + *\param [in] handle_logical The logical address of the handle. + create vip buffer from the logical address. + *\param [in] the handle_size should be aligned to 64byte(vpmdCPU_CACHE_LINE_SIZE) for easy flash CPU cache. + *\param [out] buffer. vip lite buffer object. + *\return \ref vip_status_e + *\ingroup group_buffer + *\version 1.1 +*/ +VIP_API +vip_status_e vip_create_buffer_from_handle( + IN const vip_buffer_create_params_t *create_param, + IN const vip_ptr handle_logical, + IN vip_uint32_t handle_size, + OUT vip_buffer *buffer + ); + +/*! \brief Create a vip buffer from user fd(file descriptor). + only support create buffer from dma-buf on Linux. + the vip_buffer created by this APi doesn't support flush CPU cache in driver. + So the dma-buf should be a non-cache buffer or flush CPU on Host control. + *\param [in] create_param The pointer to \ref vip_buffer_create_params_t structure. + *\param [in] fd user memory file descriptor. + *\param [in] memory_size The size of user memory. + the handle_size should be aligned to 64byte(vpmdCPU_CACHE_LINE_SIZE) for easy flash CPU cache. + *\param [out] buffer. vip lite buffer object. + *\return \ref vip_status_e + *\ingroup group_buffer +*/ +VIP_API +vip_status_e vip_create_buffer_from_fd( + IN const vip_buffer_create_params_t *create_param, + IN vip_uint32_t fd, + IN vip_uint32_t memory_size, + OUT vip_buffer *buffer + ); + +/*! \brief Destroy a buffer object which was created before. + *\param [in] buffer The opaque handle of buffer to be destroyed. + *\return \ref vip_status_e + *\ingroup group_buffer + *\version 1.0 +*/ +VIP_API +vip_status_e vip_destroy_buffer( + IN vip_buffer buffer + ); + +/*! \brief Map a buffer to get the CPU accessible address for read or write + *\param [in] buffer The handle of buffer to be mapped. + *\return A pointer that application can use to read or write the buffer data. + *\ingroup group_buffer + *\version 1.0 +*/ +VIP_API +void * vip_map_buffer( + IN vip_buffer buffer + ); + +/*! \brief Unmap a buffer which was mapped before. + *\param [in] buffer The handle of buffer to be unmapped. + *\return \ref vip_status_e + *\ingroup group_buffer + *\version 1.0 +*/ +VIP_API +vip_status_e vip_unmap_buffer( + IN vip_buffer buffer + ); + +/*! \brief Get the size of bytes allocated for the buffer. + *\param [in] buffer The handle of buffer to be queried. + *\return \ref the size of bytes + *\ingroup group_buffer + *\version 1.0 +*/ +VIP_API +vip_uint32_t vip_get_buffer_size( + IN vip_buffer buffer + ); + +/*! \brief operation the vip buffer CPU chace. flush, invalidate cache. + You should call vip_flush_buffer to flush buffer for input. + and invalidate buffer for network's output if these memories with CPU cache. +*\param buffer The vip buffer object. +*\param the type of this operation. see vip_buffer_operation_type_e. +*\ingroup group_buffer +*/ +VIP_API +vip_status_e vip_flush_buffer( + IN vip_buffer buffer, + IN vip_buffer_operation_type_e type + ); + +/*! \brief Create a network object from the given binary data. + *\details The binary is generated by the binary graph generator and it's a blob binary. + *\VIP lite Driver could interprete it to create a network object. + *\param [in] data The pointer to the binary graph. it can be a file path or a memory pointer, depending on type. + *\param [in] size_of_data The byte size of data object. the byte size of NBG buffer. + You can ignore it if create network form fil path. + *\param [in] type how to create a network object. please refer to vip_create_network_type_e enum. + *\param [out] network An opaque handle to the new network object if the request is executed successfully + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_create_network( + IN const void *data, + IN vip_uint32_t size_of_data, + IN vip_create_network_type_e type, + OUT vip_network *network + ); + +/* +@brief Duplicate a network for sharing weight. + The original network can't be destroy if the dup network is running or will be run later. +@param data, NBG file path, the buffer of NBG + different data according to type(vip_dup_network_type_e) is from file/memory/flash. + The data object can be set to VIP_NULL when type is VIP_DUP_NETWORK_FROM_NETWORK. +@param size_of_data, the bytes size of data. the byte size of NBG buffer. + the size can be set to 0 when *data is NBG file path. +@param type, vip_dup_network_type_e. +*\param network, original network to be dup. +*\param dup_network, network object which created by duplicated. +*/ +VIP_API +vip_status_e vip_dup_network( + IN const void *data, + IN vip_uint32_t size_of_data, + IN vip_dup_network_type_e type, + IN vip_network network, + OUT vip_network *dup_network + ); + +/*! \brief Weak dup a vip_network object. + The weak dup netowrk copy new command buffer. and share coefficient data and ppu instruction with original network. + Notes: + 1. The original network can't be destroy if the weak dup network is running or will be run later. + 2. The original network has the same input/output shape as the dup network. + 3. Only the input/output addresses of network are difference between the original network with dup network. + eg: Used to support batch network. +*\param network, original network to be dup. +*\param dup_network, output network. +*\return \ref vip_status_e +*\version 1.0 +*/ +VIP_API +vip_status_e vip_weak_dup_network( + IN vip_network network, + OUT vip_network *dup_network + ); + +/*! \brief Destroy a network object + *\details Release all resources allocated for this network. + *\param [in] network The opaque handle to the network to be destroyed + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_destroy_network( + IN vip_network network + ); + +/*! \brief Configure network property. configure network. this API should be called before calling vip_prepare_network. + *\details Configure network's layer inputs/outputs information + *\param [in] network A property \ref vip_network_property_e to be configuied. + *\return \ref vip_status_e + */ +VIP_API +vip_status_e vip_set_network( + IN vip_network network, + IN vip_enum property, + IN void *value + ); + +/*! \brief Query a property of the network object. + *\details User can use this API to get any properties from a network. + *\param [in] network The opaque handle to the network to be queried + *\param [in] property A property \ref vip_network_property_e to be queried. + *\param [out] value A pointer to memory to store the return value, + different property could return different type/size of value. + * please see comment of \ref vip_network_property_e for detail. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_query_network( + IN vip_network network, + IN vip_enum property, + OUT void *value + ); + +/*! \brief Prepare a network to run on VIP. + *\details This function only need to be called once to prepare a network and make it ready to execute on VIP hardware. + * It would do all heavy-duty work, including allocate internal memory resource for this network, + deploy all operation's resource + * to internal memory pool, allocate/generate command buffer for this network, + patch command buffer for the resource in the internal memory + * allocations. If this function is called more than once, driver will silently ignore it. + If this function is executed successfully, this network is prepared. + *\param [in] network The opaque handle to the network which need to be prepared. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_prepare_network( + IN vip_network network + ); + +/*! \brief Query a property of a specific input of a given network. + *\details The specified input/property/network must be valid, otherwise VIP_ERROR_INVALID_ARGUMENTS will be returned. + *\param [in] network The opaque handle to the network to be queried + *\param [in] index Specify which input to be queried in case there are multiple inputs in the network + *\param [in] property Specify which property application wants to know, see \ref vip_buffer_property_e + *\param [out] value Returned value, the details type/size, please refer to the comment of \ref vip_input_property_e + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_query_input( + IN vip_network network, + IN vip_uint32_t index, + IN vip_enum property, + OUT void *value + ); + +/*! \brief Query a property of a specific output of a given network. + *\details The specified output/property/network must be valid, otherwise VIP_ERROR_INVALID_ARGUMENTS will be returned. + *\param [in] network The opaque handle to the network to be queried + *\param [in] index Specify which output to be queried in case there are multiple outputs in the network + *\param [in] property Specify which property application wants to know, see \ref vip_buffer_property_e + *\param [out] value Returned value, the details type/size, please refer to the comment of \ref vip_input_property_e + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_query_output( + IN vip_network network, + IN vip_uint32_t index, + IN vip_enum property, + OUT void *value + ); + +/*! \brief Attach an input buffer to the specified index of the network. + *\details All the inputs of the network need to be attached to a valid input buffer before running a network, otherwise + * VIP_ERROR_MISSING_INPUT_OUTPUT will be returned when calling \ref vip_run_network . + When attaching an input buffer + * to the network, driver would patch the network command buffer to fill in this input buffer address. + This function could be called + * multiple times to let application update the input buffers before next network execution. + The network must be prepared by \ref vip_prepare_network before + * attaching an input. + *\param [in] network The opaque handle to a network which we want to attach an input buffer + *\param [in] index The index specify which input in the network will be set + *\param [in] input The opaque handle to a buffer which will be attached to the network. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_set_input( + IN vip_network network, + IN vip_uint32_t index, + IN vip_buffer input + ); + +/*! \brief Attach an output buffer to the specified index of the network. + *\details All the outputs of the network need to be attached to a valid output buffer before running a network, otherwise + * VIP_ERROR_MISSING_INPUT_OUTPUT will be returned when calling \ref vip_run_network . + When attaching an output buffer + * to the network, driver would patch the network command buffer to fill in this output buffer address. + This function could be called + * multiple times to let application update the output buffers before next network execution. + The network must be prepared by \ref vip_prepare_network before + * attaching an output. + *\param [in] network The opaque handle to a network which we want to attach an output buffer + *\param [in] index The index specify which output in the network will be set + *\param [in] output The opaque handle to a buffer which will be attached to the network. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_set_output( + IN vip_network network, + IN vip_uint32_t index, + IN vip_buffer output + ); + +/*! \brief. Kick off the network execution and send command buffer of this network to VIP hardware. + *\details This function can be called multiple times. + Every time it's called it would do inference with current attached + * input buffers and output buffers. It would return until VIP finish the execution. + If the network is not ready to execute + * for some reason like not be prepared by \ref vip_prepare_network , + it would fail with status reported. + *\param [in] network The opaque handle to the network to be executed. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_run_network( + IN vip_network network + ); + +/*! \brief Finish using this network to do inference. + *\details This function is paired with \ref vip_prepare_network . + It's suggested to be called once after \ref vip_prepare_network called. + * If it's called more than that, it will be silently ignored. + If the network is not prepared but finished is called, it's silently ignored too. + * This function would release all internal memory allocations which are allocated when + the network is prepared. Since the preparation of network takes much time, + * it is suggested that if the network will be still used later, application should not + finish the network unless there is no much system resource remained for other + * networks. The network object is still alive unitl it's destroyed by \ref vip_destroy_network . + *\param [in] network The opaque handle to the network which will be finished. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_finish_network( + IN vip_network network + ); + +/*! \brief. Kick off the network execution and send command buffer of this network to VIP hardware. +*\details This function is similar to \ref vip_run_network except that it returns + immediately without waiting for HW to complete the commands. +*\param [in] network The opaque handle to the network to be executed. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_trigger_network( + IN vip_network network + ); + +/*! \brief. Run tasks in group,these tasks is added by vip_add_network. + The order of executuion of tasks is call vip_add_network. +*\details This function is similar to \ref vip_run_group except that it returns + immediately without waiting for HW to complete the commands. +*\return \ref vip_status_e +*\param group vip_group object +*\param the number of task will be run. + eg: num is 4, the 0, 1, 2, 3 taks index in group will be run(inference). +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_trigger_group( + IN vip_group group, + IN vip_uint32_t num + ); + +/*! \brief. Explicitly wait for HW to finish executing the submitted commands. +*\details This function waits for HW to complete the commands. + This should be called once CPU needs to access the network currently being run. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_wait_network( + IN vip_network network + ); + +/*! \brief. Explicitly wait for HW to finish executing the submitted task in group. +*\details This function waits for HW to complete the submitted commands in group. + This should be called once CPU needs to access the group currently being run. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_wait_group( + IN vip_group group + ); + +/*! \brief. Cancle network running on vip hardware after network is commited. +*\details This function is cancel network running on vip hardware. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_cancel_network( + IN vip_network network + ); + +/*! \brief. give user applications more control over power management for VIP cores. +*\details. control VIP core frequency and power status by property. see vip_power_property_e. +*\param ID of the managed device. device_id is 0 if VIP is single core. +*\param perperty Control VIP core frequency and power status by property. see vip_power_property_e. +*\param value The value for vip_power_property_e property. + Please see vip_power_frequency_t if property is setting to VIP_POWER_PROPERTY_SET_FREQUENCY. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_power_management( + IN vip_uint32_t device_id, + IN vip_power_property_e property, + IN void *value + ); + +/*! \brief. Create a vip_group object to run multiple tasks(network or node) + and without interrupt between each task. +*\return \ref vip_status_e +*\param count The maximum number of tasks supports by this group. +*\param group Return vip_group object be created. +*\version 1.0 +*/ +VIP_API +vip_status_e vip_create_group( + IN vip_uint32_t count, + OUT vip_group *group + ); + +/*! \brief. Destroy group object which created by vip_create_group. +*\return \ref vip_status_e +*\param group vip_group object/ +*\version 1.0 +*/ +VIP_API +vip_status_e vip_destroy_group( + IN vip_group group + ); + +/* +@brief set group property. configure group. this API should be called before calling vip_run_group. +@param group The group object which created by vip_create_group(). +@param property The property be set. see vip_group_property_e. +@param value The set data. +*/ +VIP_API +vip_status_e vip_set_group( + IN vip_group group, + IN vip_enum property, + IN void *value + ); + +/*! \brief Query a property of the group object. + *\param [in] group The group object which created by vip_create_group(). + *\param [in] property A property \ref vip_group_property_e to be queried. + *\param [out] value A pointer to memory to store the return value, + different property could return different type/size of value. + * please see comment of \ref vip_group_property_e for detail. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_query_group( + IN vip_group group, + IN vip_enum property, + OUT void *value + ); + +/*! \brief. add a vip_network object into group. +*\return \ref vip_status_e +*\param group vip_group object, network be added into group. +*\param network vip_network added into group. +*\version 1.0 +*/ +VIP_API +vip_status_e vip_add_network( + IN vip_group group, + IN vip_network network + ); + +/*! \brief. run tasks in group. only issue a interrupt after tasks complete. + These tasks is added by vip_add_network. + The order of executuion of tasks is call vip_add_network. +*\return \ref vip_status_e +*\param group vip_group object +*\param the number of task will be run. + eg: num is 4, the 0, 1, 2, 3 taks index in group will be run(inference). +*\version 1.0 +*/ +VIP_API +vip_status_e vip_run_group( + IN vip_group group, + IN vip_uint32_t num + ); + +/*! \brief. change PPU engine parameters. + change local size, global size, global offset and global scale. +*\return \ref vip_status_e +*\param network The network object should be changed. +*\param param PPU parameters +*\param index The index of PPU node, not used. please set to zero. +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_set_ppu_param( + IN vip_network network, + IN vip_ppu_param_t *param, + IN vip_uint32_t index + ); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/viplite-tina/lib/aarch64-none-linux-gnu/v1.13/inc/vip_lite_common.h b/viplite-tina/lib/aarch64-none-linux-gnu/v1.13/inc/vip_lite_common.h new file mode 100644 index 0000000..e478383 --- /dev/null +++ b/viplite-tina/lib/aarch64-none-linux-gnu/v1.13/inc/vip_lite_common.h @@ -0,0 +1,236 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2017 - 2023 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2017 - 2023 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + +#ifndef _VIP_LITE_COMMON_H +#define _VIP_LITE_COMMON_H + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + *\brief The VIP lite API for Convolution Neural Network application on CPU/MCU/DSP type of embedded environment. + *\details This VIP lite APIs is not thread-safe if vpmdENABLE_MULTIPLE_TASK is set to 0, + user must guarantee to call these APIs in a proper way. + But defines vpmdENABLE_MULTIPLE_TASK 1, VIPLite can support multiple task(multiple thread/process). + and it's thread-safe. + *Memory allocation and file io functions used inside driver internal would depend on working enviroment. + *\defgroup group_global Data Type Definitions and Global APIs + *\brief Data type definition and global APIs that are used in the VIP lite + *\defgroup group_buffer Buffer API + *\brief The API to manage input/output buffers + *\defgroup group_network Network API + *\brief The API to manage networks + */ + +/*! \brief An 8-bit unsigned value. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned char vip_uint8_t; + +/*! \brief An 16-bit unsigned value. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned short vip_uint16_t; + +/*! \brief An 32-bit unsigned value. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned int vip_uint32_t; + +/*! \brief An 64-bit unsigned value. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned long long vip_uint64_t; + +/*! \brief An 8-bit signed value. + * \ingroup group_global + * \version 1.0 + */ +typedef signed char vip_int8_t; + +/*! \brief An 16-bit signed value. + * \ingroup group_global + * \version 1.0 + */ +typedef signed short vip_int16_t; + +/*! \brief An 32-bit signed value. + * \ingroup group_global + * \version 1.0 + */ +typedef signed int vip_int32_t; + +/*! \brief An 64-bit signed value. + * \ingroup group_global + * \version 1.0 + */ +typedef signed long long vip_int64_t; + +/*! \brief An 8 bit ASCII character. + * \ingroup group_global + * \version 1.0 + */ +typedef char vip_char_t; + +/*! \brief An 32 bit float value. + * \ingroup group_global + * \version 1.0 + */ +typedef float vip_float_t; + +/*! \brief Sets the standard enumeration type size to be a fixed quantity. + * \ingroup group_global + * \version 1.0 + */ +typedef vip_int32_t vip_enum; + +/*! \brief a void pointer. + * \ingroup group_global + * \version 1.0 + */ +typedef void* vip_ptr; + +/*! \brief A 64-bit float value (aka double). + * \ingroup group_basic_features + */ +typedef double vip_float64_t; + +/*! \brief address type. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned long long vip_address_t; + +/*! \brief A zero value for pointer + *\ingroup group_global + *\version 1.0 + */ +#ifndef VIP_NULL +#define VIP_NULL 0 +#endif + +/***** Helper Macros. *****/ +#define VIP_API + +#define IN +#define OUT + +/*! \brief A invalid value if a property is not avaialbe for the query. + *\ingroup group_global + *\version 1.0 + */ +#define VIP_INVALID_VALUE ~0UL + +/*! \brief A Boolean value. + *\details This allows 0 to be FALSE, as it is in C, and any non-zero to be TRUE. + *\ingroup group_global + *\version 1.0 + */ +typedef enum _vip_bool_e { + /*! \brief The "false" value. */ + vip_false_e = 0, + /*! \brief The "true" value. */ + vip_true_e, +} vip_bool_e; + +/*! \brief The enumeration of all status codes. + * \ingroup group_global + * \version 1.0 + */ +typedef enum _vip_status +{ + /*!< \brief Indicates the network is canceld */ + VIP_ERROR_CANCELED = -15, + /*!< \brief Indicates the hardware is recovery done after hang */ + VIP_ERROR_RECOVERY = -14, + /*!< \brief Indicates the hardware is stoed */ + VIP_ERROR_POWER_STOP = -13, + /*!< \brief Indicates the hardware is in power off status */ + VIP_ERROR_POWER_OFF = -12, + /*!< \brief Indicates the failure */ + VIP_ERROR_FAILURE = -11, + /*!< \brief Indicates the binary is not compatible with the current runtime hardware */ + VIP_ERROR_NETWORK_INCOMPATIBLE = -10, + /*!< \brief Indicates the network is not prepared so current function call can't go through */ + VIP_ERROR_NETWORK_NOT_PREPARED = -9, + /*!< \brief Indicates the network misses either input or output when running the network */ + VIP_ERROR_MISSING_INPUT_OUTPUT = -8, + /*!< \brief Indicates the network binary is invalid */ + VIP_ERROR_INVALID_NETWORK = -7, + /*!< \brief Indicates driver is running out of memory of system */ + VIP_ERROR_OUT_OF_MEMORY = -6, + /*!< \brief Indicates there is no enough resource */ + VIP_ERROR_OUT_OF_RESOURCE = -5, + /*!< \brief Indicates it's supported by driver implementation */ + VIP_ERROR_NOT_SUPPORTED = -4, + /*!< \brief Indicates some arguments are not valid */ + VIP_ERROR_INVALID_ARGUMENTS = -3, + /*!< \brief Indicates there are some IO related error */ + VIP_ERROR_IO = -2, + /*!< \brief Indicates VIP timeout, could be VIP stuck somewhere */ + VIP_ERROR_TIMEOUT = -1, + /*!< \brief Indicates the execution is successfuly */ + VIP_SUCCESS = 0, +} vip_status_e; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/viplite-tina/lib/aarch64-none-linux-gnu/v1.13/libVIPlite.so b/viplite-tina/lib/aarch64-none-linux-gnu/v1.13/libVIPlite.so new file mode 100644 index 0000000..c02eddf Binary files /dev/null and b/viplite-tina/lib/aarch64-none-linux-gnu/v1.13/libVIPlite.so differ diff --git a/viplite-tina/lib/aarch64-none-linux-gnu/v1.13/libVIPuser.so b/viplite-tina/lib/aarch64-none-linux-gnu/v1.13/libVIPuser.so new file mode 100644 index 0000000..018ef0d Binary files /dev/null and b/viplite-tina/lib/aarch64-none-linux-gnu/v1.13/libVIPuser.so differ diff --git a/viplite-tina/lib/aarch64-none-linux-gnu/v2.0/inc/vip_lite.h b/viplite-tina/lib/aarch64-none-linux-gnu/v2.0/inc/vip_lite.h new file mode 100644 index 0000000..fced603 --- /dev/null +++ b/viplite-tina/lib/aarch64-none-linux-gnu/v2.0/inc/vip_lite.h @@ -0,0 +1,1112 @@ +/******************************************************************************\ +|* Copyright (c) 2017-2024 by Vivante Corporation. All Rights Reserved. *| +|* *| +|* The material in this file is confidential and contains trade secrets of *| +|* of Vivante Corporation. This is proprietary information owned by Vivante *| +|* Corporation. No part of this work may be disclosed, reproduced, copied, *| +|* transmitted, or used in any way for any purpose, without the express *| +|* written permission of Vivante Corporation. *| +|* *| +\******************************************************************************/ + +#ifndef _VIP_LITE_H +#define _VIP_LITE_H + +#include "vip_lite_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + *\brief The VIP lite API for Convolution Neural Network application on CPU/MCU/DSP type of + embedded environment. + *\details This VIP lite APIs is not thread-safe if vpmdENABLE_MULTIPLE_TASK is set to 0, + user must guarantee to call these APIs in a proper way. + But defines vpmdENABLE_MULTIPLE_TASK 1, VIPLite can support multiple task + (multiple thread/process). + and it's thread-safe. + *Memory allocation and file io functions used inside driver internal would + depend on working enviroment. + + *\defgroup group_global Data Type Definitions and Global APIs, + *\ brief Data type definition and global APIs that are used in the VIPLite + *\defgroup group_buffer Buffer API, + The API to manage input/output buffers + *\defgroup group_network Network API + The API to manage networks + */ + +/* !\brief The data format list for buffer + * \ingroup group_buffer + * \version 2.0 + */ +typedef enum _vip_buffer_format_e +{ + /*! \brief A float type of buffer data */ + VIP_BUFFER_FORMAT_FP32 = 0, + /*! \brief A half float type of buffer data */ + VIP_BUFFER_FORMAT_FP16 = 1, + /*! \brief A 8 bit unsigned integer type of buffer data */ + VIP_BUFFER_FORMAT_UINT8 = 2, + /*! \brief A 8 bit signed integer type of buffer data */ + VIP_BUFFER_FORMAT_INT8 = 3, + /*! \brief A 16 bit unsigned integer type of buffer data */ + VIP_BUFFER_FORMAT_UINT16 = 4, + /*! \brief A 16 signed integer type of buffer data */ + VIP_BUFFER_FORMAT_INT16 = 5, + /*! \brief A char type of data */ + VIP_BUFFER_FORMAT_CHAR = 6, + /*! \brief A bfloat 16 type of data */ + VIP_BUFFER_FORMAT_BFP16 = 7, + /*! \brief A 32 bit integer type of data */ + VIP_BUFFER_FORMAT_INT32 = 8, + /*! \brief A 32 bit unsigned signed integer type of buffer */ + VIP_BUFFER_FORMAT_UINT32 = 9, + /*! \brief A 64 bit signed integer type of data */ + VIP_BUFFER_FORMAT_INT64 = 10, + /*! \brief A 64 bit unsigned integer type of data */ + VIP_BUFFER_FORMAT_UINT64 = 11, + /*! \brief A 64 bit float type of buffer data */ + VIP_BUFFER_FORMAT_FP64 = 12, + /*! \brief A signed 4bits tensor */ + VIP_BUFFER_FORMAT_INT4 = 13, + /*! \brief A unsigned 4bits tensor */ + VIP_BUFFER_FORMAT_UINT4 = 14, + /*! \brief A bool 8 bit tensor */ + VIP_BUFFER_FORMAT_BOOL8 = 16, +} vip_buffer_format_e; + +/* !\brief The quantization format list for buffer data + * \ingroup group_buffer + * \version 1.0 + */ +typedef enum _vip_buffer_quantize_format_e +{ + /*! \brief Not quantized format */ + VIP_BUFFER_QUANTIZE_NONE = 0, + /*! \brief A quantization data type which specifies + the fixed point position for whole tensor.*/ + VIP_BUFFER_QUANTIZE_DYNAMIC_FIXED_POINT = 1, + /*! \brief A quantization data type which has scale value and + zero point to match with TF and Android NN API for whole tensor. */ + VIP_BUFFER_QUANTIZE_TF_ASYMM = 2, + /*! \brief A max vaule support quantize format */ + VIP_BUFFER_QUANTIZE_MAX, +} vip_buffer_quantize_format_e; + +/* !\brief The memory type for vip buffer + * \ingroup group_buffer + * \version 1.2.2 + */ +typedef enum _vip_buffer_memory_type_e +{ + /*! \brief Not memory type. default memory type. + use for allocate video memory from driver calling vip_create_buffer. + */ + VIP_BUFFER_MEMORY_TYPE_DEFAULT = 0x000, + /*! \brief Create a VIP buffer from the Host (logical, physical). */ + VIP_BUFFER_MEMORY_TYPE_HOST = 0x001, + /*! \brief Create a VIP buffer from DMA_BUF */ + VIP_BUFFER_MEMORY_TYPE_DMA_BUF = 0x003, + /*! \brief The max memory type */ + VIP_BUFFER_MEMORY_TYPE_MAX, +} vip_buffer_memory_type_e; + +/* \brief The list of create network type + * \ingroup group_network + * \version 1.0 + */ +typedef enum _vip_create_network_type_e +{ + /*!< \brief NONE */ + VIP_CREATE_NETWORK_FROM_NONE = 0x00, + /*!< \brief Create network from a file path */ + VIP_CREATE_NETWORK_FROM_FILE = 0x01, + /*!< \brief Create network from buffer, NBG has been loaded in this buffer before */ + VIP_CREATE_NETWORK_FROM_MEMORY = 0x02, + /*!< \brief Create network from flash device or user memory. + The *data param of vip_create_network means are: + 1. If the NPU's MMU is enabled, the *data means that the CPU's logical address which access the memory. + 2. If the NPU's MMU is disabled, the *data means that the NPU's phyiscal address which access the memory. + This is for DDR-less project. + 1. Load NBG from flash device. The NBG file should be placed to flash device before running VIPLite. + Pass the NBG size and the location of NBG in flash device to this API. + 2. The NBG file pre-load into user memory which alloc via malloc function, or contiguous physical. + Advantage: coeff data is not copied again, save more memory than create_network_from_memory type. + Need enable VIP's MMU when works on Linux. + */ + VIP_CREATE_NETWORK_FROM_FLASH = 0x04, + + VIP_CREATE_NETWORK_MAX, +} vip_create_network_type_e; + +/* \brief The list of duplicate network type. + The original network can't be destroy if the dup network is running or will be run later. + * \ingroup group_network + */ +typedef enum _vip_dup_network_type_e +{ + /*!< \brief NONE */ + VIP_DUP_NONE = 0x00, + /*!< \brief Duplicate command for sharing weight with another network + 1. Sharing weight with original network. + 2. The original network has the same input/output shape as the dup network. + 3. Only the input/output addresses of network are difference between the + original network with dup network. + */ + VIP_DUP_FOR_CMD_BY_NETWORK = 0x01, + /*!< \brief Duplicate command for sharing weight with difference network(NBGs) + 1. Sharing weight with original network. + 2. Share weight between networks with the same network topology. + For example, to support different shapes of input, such as 640x480, 480x640 and 640x960. + */ + VIP_DUP_FOR_CMD_BY_NBG = 0x02, + + /*!< \brief Indicate that the dup network is duplicated from NBG file patch */ + VIP_DUP_FROM_NBG_FILE = 0x100, + /*!< \brief Indicate that the dup network is duplicated from NBG in memory */ + VIP_DUP_FROM_NBG_MEMORY = 0x200, + /*!< \brief Indicate that the dup network is duplicated from NBG in flash */ + VIP_DUP_FROM_NBG_FLASH = 0x400, + /*!< \brief Indicate that the dup network is duplicated from network object */ + VIP_DUP_FROM_NETWORK = 0x800, + + VIP_DUP_NETWORK_MAX, +} vip_dup_network_type_e; + +/* \brief An enumeration property that specifies which power management operation to execute. + * \ingroup group_global + * \version 1.2 + */ +typedef enum _vip_power_property_e +{ + VIP_POWER_PROPERTY_NONE = 0x0000, + /*!< \brief specify the VIP frequency */ + VIP_POWER_PROPERTY_SET_FREQUENCY = 0x0001, + /*!< \brief power off VIP hardware */ + VIP_POWER_PROPERTY_OFF = 0x0002, + /*!< \brief power on VIP hardware */ + VIP_POWER_PROPERTY_ON = 0x0004, + /*!< \brief stop VIP perform network */ + VIP_POWER_PROPERTY_STOP = 0x0008, + /*!< \brief start VIP perform network */ + VIP_POWER_PROPERTY_START = 0x0010, + VIP_POWER_PROPERTY_MAX +} vip_power_property_e; + +/* \brief query hardware caps property + * \ingroup group_global + */ +typedef enum _vip_query_hardware_property_e +{ + /*!< \brief the customer ID of this VIP/NPU, the returned value is vip_uint32_t */ + VIP_QUERY_HW_PROP_CID = 0, + /*!< \brief the number of deivce, the returned value is vip_uint32_t */ + VIP_QUERY_HW_PROP_DEVICE_COUNT = 1, + /*!< \brief the number of core count for each device, the returned value is + vip_uint32_t * device_count */ + VIP_QUERY_HW_PROP_CORE_COUNT_EACH_DEVICE = 2, + VIP_QUERY_HW_PROP_MAX, +} vip_query_hardware_property_e; + +/* \brief The list of properties of a network. + * \ingroup group_network + * \version 1.0 + */ +typedef enum _vip_network_property_e +{ + /* query network */ + /*!< \brief The number of layers in this network, the returned value is vip_uint32_t */ + VIP_NETWORK_PROP_LAYER_COUNT = 0, + /*!< \brief The number of input in this network, the returned value is vip_uint32_t*/ + VIP_NETWORK_PROP_INPUT_COUNT = 1, + /*!< \brief The number of output in this network, the returned value is vip_uint32_t*/ + VIP_NETWORK_PROP_OUTPUT_COUNT = 2, + /*!< \brief The network name, the returned value is vip_char_t[64] */ + VIP_NETWORK_PROP_NETWORK_NAME = 3, + /*!< \brief address information of wait-link, command, input-output buffers for viplite-Agent trigger, + not used if only use viplite. the returned value is \ref vip_address_info_t + */ + VIP_NETWORK_PROP_ADDRESS_INFO = 4, + /*!< \brief The size of memory pool, the returned value is vip_uint32_t*/ + VIP_NETWORK_PROP_MEMORY_POOL_SIZE = 6, + /*!< \brief The network profling data, the returned value is vip_inference_profile_t */ + VIP_NETWORK_PROP_PROFILING = 7, + /*!< \brief The the number of core for this network, the returned value is vip_uint8_t */ + VIP_NETWORK_PROP_CORE_COUNT = 8, + /*!< \brief get the information of output of dumped layer. the returned value is vip_nld_output_t */ + VIP_NETWORK_PROP_GET_LAYER_DUMP_OUTPUT = 9, + + + /* set network */ + /* set network property should be called before vip_prepare_network */ + /*!< \brief set network to enable change PPU parameters feature for this vip_network. + the vip_set_network value param used to indicates disable or enable this feature. + vip_uint32_t *value is 1, enable change ppu param. + vip_uint32_t *value is 0, disable change ppu param */ + VIP_NETWORK_PROP_CHANGE_PPU_PARAM = 64, + /*!< \brief set memory pool buffer for network. networks can share a memory pool buffer. + the set value is \ref vip_buffer */ + VIP_NETWORK_PROP_SET_MEMORY_POOL = 65, + /*!< \brief set device index for network. networks can be submitted this vip device. */ + VIP_NETWORK_PROP_SET_DEVICE_ID = 66, /* will be rejected later */ + VIP_NETWORK_PROP_SET_DEVICE_INDEX = 66, + /*!< \brief set priority of network. 0 ~ 255, 0 indicates the lowest priority. */ + VIP_NETWORK_PROP_SET_PRIORITY = 67, + /*!< \brief set time out of network. unit: ms . the value is vip_uint32_t */ + VIP_NETWORK_PROP_SET_TIME_OUT = 68, + /*!< \brief set a memory for partial of full pre-load coeff data to this memory. + This memory can't be freed until the network is released. the value is vip_buffer */ + VIP_NETWORK_PROP_SET_COEFF_MEMORY = 69, + /*!< \brief set core index for network. network start with which core of device. + the value is vip_buffer data type */ + VIP_NETWORK_PROP_SET_CORE_INDEX = 70, + /*!< \brief enable probe mode performance function, should be called before vip_prepare_network. + * the value is vip_bool_e data type, set 1 to enable NPD */ + VIP_NETWORK_PROP_SET_ENABLE_NPD = 71, + /*!< \brief enable preload coeff into vipsram. the value is vip_bool_e data type */ + VIP_NETWORK_PROP_SET_VIPSRAM_PRELOAD = 72, + /*!< \brief set layer ids that need to be layer dumped. the value is vip_nld_layer_id_t */ + VIP_NETWORK_PROP_SET_LAYER_DUMP_ID = 73, + +} vip_network_property_e; + +/* \brief The list of properties of a group. + * \ingroup group_network + * \version 1.0 + */ +typedef enum _vip_group_property_e +{ + /* query group */ + /*!< \brief The group profling data, the returned value is vip_inference_profile_t */ + VIP_GROUP_PROP_PROFILING = 0, + + /* set group */ + /* set group property should be called before vip_add_network() + and all network in group runs on same device */ + /*!< \brief set device index for group. networks in group can be submitted this vip device. + * This prop should be called before vip_prepare_network */ + VIP_GROUP_PROP_SET_DEVICE_ID = 64, /* will be rejected later */ + VIP_GROUP_PROP_SET_DEVICE_INDEX = 64, + /*!< \brief set core index for group. networks in group start with which core of current device. + * This prop should be called before vip_prepare_network */ + VIP_GROUP_PROP_SET_CORE_INDEX = 65, + /*!< \brief setting inference timeout value for group. unit: ms */ + VIP_GROUP_PROP_SET_TIME_OUT = 68, +} vip_group_property_e; + +/* \brief The list of property of an input or output. + * \ingroup group_buffer + * \version 1.0 + */ +typedef enum _vip_buffer_property_e +{ + /*!< \brief The quantization format, the returned value is \ref + vip_buffer_quantize_format_e */ + VIP_BUFFER_PROP_QUANT_FORMAT = 0, + /*!< \brief The number of dimension for this input, the returned value is vip_uint32_t */ + VIP_BUFFER_PROP_NUM_OF_DIMENSION = 1, + /*!< \brief The size of each dimension for this input, + the returned value is vip_uint32_t * num_of_dim */ + VIP_BUFFER_PROP_SIZES_OF_DIMENSION = 2, + /*!< \brief The data format for this input, + the returned value is \ref vip_buffer_format_e */ + VIP_BUFFER_PROP_DATA_FORMAT = 3, + /*!< \brief The position of fixed point for dynamic fixed point, + the returned value is vip_uint8_t */ + VIP_BUFFER_PROP_FIXED_POINT_POS = 4, + /*!< \brief The scale value for TF quantization format, the returned value is vip_float_t */ + VIP_BUFFER_PROP_TF_SCALE = 5, + /*!< \brief The zero point for TF quantization format, the returned value is vip_uint32_t */ + VIP_BUFFER_PROP_TF_ZERO_POINT = 6, + /*!< \brief The name for network's inputs and outputs, the returned value is vip_char_t[64] */ + VIP_BUFFER_PROP_NAME = 7, +} vip_buffer_property_e; + +/* \brief The list of property of operation vip_buffer type. + * \ingroup group_buffer + * \version 1.3 + */ +typedef enum _vip_buffer_operation_type_e +{ + /*!< \brief None operation */ + VIP_BUFFER_OPER_TYPE_NONE = 0, + /*!< \brief Flush the vip buffer */ + VIP_BUFFER_OPER_TYPE_FLUSH = 1, + /*!< \brief invalidate the vip buffer */ + VIP_BUFFER_OPER_TYPE_INVALIDATE = 2, + VIP_BUFFER_OPER_TYPE_MAX, +} vip_buffer_operation_type_e; + +typedef struct _vip_network *vip_network; +typedef struct _vip_buffer *vip_buffer; +typedef struct _vip_group *vip_group; + + +/*! \brief Input parameter for vip_create_buffer + * \ingroup group_buffer + */ +typedef struct _vip_buffer_create_params_t +{ + /*!< \brief The number of dimensions specified in *sizes*/ + vip_uint32_t num_of_dims; + /*!< \brief The pointer to an array of dimension */ + vip_uint32_t sizes[6]; + /*!< \brief Data format for the tensor, see \ref vip_buffer_format_e */ + vip_enum data_format; + /*!< \brief Quantized format see \ref vip_buffer_quantize_format_e . */ + vip_enum quant_format; + /*\ref vip_uint32_t + * \ingroup group_global + */ +VIP_API +vip_uint32_t vip_get_version( + void + ); + +/*! \brief Initial VIP Hardware, VIP lite software environment and power on VIP hardware. + * \details when vpmdENABLE_MULTIPLE_TASK set to 0, + This function should be only called once before using VIP hardware if. + when vpmdENABLE_MULTIPLE_TASK set to 1, + vip_init can be called multiple times, but should paired with vip_destroy. + vip_init should be called in every process. + only need call vip_init once in multi-thread. + * VIP lite driver would construct some global variable for this call.Also + * it will reset VIP and initialize VIP hardware to a ready state to accept jobs. + * \return \ref vip_status_e + * \ingroup group_global + * \version 1.0 + */ +VIP_API +vip_status_e vip_init( + void + ); + +/*! \brief Terminate VIP lite driver and shut down VIP hardware. + * \details This function should be the last function called by application. + vip_destroy should paired with vip_init called. + * After it, no VIP lite API should be called except \ref vip_init + * \return \ref vip_status_e + * \ingroup group_global + * \version 1.0 + * \notes vip_destroy should be called in the same thread as vip_init. + */ +VIP_API +vip_status_e vip_destroy( + void + ); + +/*! \brief Queries hardware caps information. This function shold be called after calling vip_init. + *\param property, the query property enum. + *\param size, the size of value buffer. + *\param value, the value buffer of returns. + * \ingroup group_global +*/ +VIP_API +vip_status_e vip_query_hardware( + IN vip_query_hardware_property_e property, + IN vip_uint32_t size, + OUT void *value + ); + +/*! \brief Create a input or output buffer with specified parameters. + *\details The buffer object always takes [w, h, c, n] order, + there is no padding/hole between lines/slices/batches. + *\param [in] create_param The pointer to \ref vip_buffer_create_params_t structure. + *\param [in] size_of_param The size of create_param pointer. + *\param [out] buffer An opaque handle for the new buffer object if the request is executed successfully. + *\return \ref vip_status_e + *\ingroup group_buffer + *\version 1.0 + */ +VIP_API +vip_status_e vip_create_buffer( + IN vip_buffer_create_params_t *create_param, + IN vip_uint32_t size_of_param, + OUT vip_buffer *buffer + ); + +/* +@brief Create a buffer used by the network's input and output. + use vip_create_buffer_from_handle function. +@param create_param The parametes of buffer be created. +@param size_of_param The size of create paramters. +@param buffer The returns buffer object. +*/ +VIP_API +vip_status_e vip_create_buffer_cache( + IN vip_buffer_create_params_t *create_param, + IN vip_uint32_t size_of_param, + OUT vip_buffer *buffer + ); + +/*! \brief Create a buffer from user contiguous or scatter non-contiguous physical address. + the vip_buffer created by this APi doesn't support flush CPU cache in driver. + So the physical memory should be a non-cache buffer or flush CPU on Host control. + not map user space logical on Linux. + *\param [in] create_param The pointer to \ref vip_buffer_create_params_t structure. + *\param [in] physical_table Physical address table of VIP. should be wraped for VIP hardware. + *\param [in] size_table The size of physical memory for each physical_table element. + *\param [in] physical_num The number of physical table element. + physical_num is 1 when create buffer from contiguous phyiscal. + *\param [out] buffer. vip lite buffer object. + *\return \ref vip_status_e +*\ingroup group_buffer +*/ +VIP_API +vip_status_e vip_create_buffer_from_physical( + IN const vip_buffer_create_params_t *create_param, + IN const vip_address_t *physical_table, + IN const vip_uint32_t *size_table, + IN vip_uint32_t physical_num, + OUT vip_buffer *buffer + ); + +/*! \brief Create a vip buffer with specified parameters. + The vip_buffer can be used to input, output, memory pool and so on. + NOTE: driver will operation CPU cache when call vip_flush_buffer API. + application should call vip_flush_buffer API if the memory handle have CPU cache. + after write data into this buffer, APP should call vip_flush_buffer(VIP_BUFFER_OPER_TYPE_FLUSH) + before CPU read date from this buffer. APP should call vip_flush_buffer(VIP_BUFFER_OPER_TYPE_INVALIDATE) + *\ when MMU disabled, create buffer from a contiguous physical memory. + *\ when MMU enabled, create buffer from a contiguous physical memory or + logical address(convert to physical in kenrel pace). + *\details The buffer object always takes [w, h, c, n] order, + there is no padding/hole between lines/slices/batches. + *\param [in] create_param The pointer to \ref vip_buffer_create_params_t structure. + *\param [in] handle_logical The logical address of the handle. + create vip buffer from the logical address. + *\param [in] the handle_size should be aligned to 64byte(vpmdCPU_CACHE_LINE_SIZE) for easy flash CPU cache. + *\param [out] buffer. vip lite buffer object. + *\return \ref vip_status_e + *\ingroup group_buffer + *\version 1.1 +*/ +VIP_API +vip_status_e vip_create_buffer_from_handle( + IN const vip_buffer_create_params_t *create_param, + IN const vip_ptr handle_logical, + IN vip_uint32_t handle_size, + OUT vip_buffer *buffer + ); + +/*! \brief Create a vip buffer from user fd(file descriptor). + only support create buffer from dma-buf on Linux. + the vip_buffer created by this APi doesn't support flush CPU cache in driver. + So the dma-buf should be a non-cache buffer or flush CPU on Host control. + *\param [in] create_param The pointer to \ref vip_buffer_create_params_t structure. + *\param [in] fd user memory file descriptor. + *\param [in] memory_size The size of user memory. + the handle_size should be aligned to 64byte(vpmdCPU_CACHE_LINE_SIZE) for easy flash CPU cache. + *\param [out] buffer. vip lite buffer object. + *\return \ref vip_status_e + *\ingroup group_buffer +*/ +VIP_API +vip_status_e vip_create_buffer_from_fd( + IN const vip_buffer_create_params_t *create_param, + IN vip_uint32_t fd, + IN vip_uint32_t memory_size, + OUT vip_buffer *buffer + ); + +/*! \brief Destroy a buffer object which was created before. + *\param [in] buffer The opaque handle of buffer to be destroyed. + *\return \ref vip_status_e + *\ingroup group_buffer + *\version 1.0 +*/ +VIP_API +vip_status_e vip_destroy_buffer( + IN vip_buffer buffer + ); + +/*! \brief Map a buffer to get the CPU accessible address for read or write + *\param [in] buffer The handle of buffer to be mapped. + *\return A pointer that application can use to read or write the buffer data. + *\ingroup group_buffer + *\version 1.0 +*/ +VIP_API +void * vip_map_buffer( + IN vip_buffer buffer + ); + +/*! \brief Unmap a buffer which was mapped before. + *\param [in] buffer The handle of buffer to be unmapped. + *\return \ref vip_status_e + *\ingroup group_buffer + *\version 1.0 +*/ +VIP_API +vip_status_e vip_unmap_buffer( + IN vip_buffer buffer + ); + +/*! \brief Get the size of bytes allocated for the buffer. + *\param [in] buffer The handle of buffer to be queried. + *\return \ref the size of bytes + *\ingroup group_buffer + *\version 1.0 +*/ +VIP_API +vip_uint32_t vip_get_buffer_size( + IN vip_buffer buffer + ); + +/*! \brief operation the vip buffer CPU chace. flush, invalidate cache. + You should call vip_flush_buffer to flush buffer for input. + and invalidate buffer for network's output if these memories with CPU cache. +*\param buffer The vip buffer object. +*\param the type of this operation. see vip_buffer_operation_type_e. +*\ingroup group_buffer +*/ +VIP_API +vip_status_e vip_flush_buffer( + IN vip_buffer buffer, + IN vip_buffer_operation_type_e type + ); + +/*! \brief Create a network object from the given binary data. + *\details The binary is generated by the binary graph generator and it's a blob binary. + *\VIP lite Driver could interprete it to create a network object. + *\param [in] data The pointer to the binary graph. + it can be a file path or a memory pointer, depending on type. + *\param [in] size_of_data The byte size of data object. the byte size of NBG buffer. + You can ignore it if create network form fil path. + *\param [in] type how to create a network object. please refer to vip_create_network_type_e enum. + *\param [out] network An opaque handle to the new network object if the request is executed successfully + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_create_network( + IN const void *data, + IN vip_uint32_t size_of_data, + IN vip_create_network_type_e type, + OUT vip_network *network + ); + +/* +@brief Duplicate a network for sharing weight. + The original network can't be destroy if the dup network is running or will be run later. +@param data, NBG file path, the buffer of NBG + different data according to type(vip_dup_network_type_e) is from file/memory/flash. + The data object can be set to VIP_NULL when type is VIP_DUP_NETWORK_FROM_NETWORK. +@param size_of_data, the bytes size of data. the byte size of NBG buffer. + the size can be set to 0 when *data is NBG file path. +@param type, vip_dup_network_type_e. +*\param network, original network to be dup. +*\param dup_network, network object which created by duplicated. +*/ +VIP_API +vip_status_e vip_dup_network( + IN const void *data, + IN vip_uint32_t size_of_data, + IN vip_dup_network_type_e type, + IN vip_network network, + OUT vip_network *dup_network + ); + +/*! \brief Destroy a network object + *\details Release all resources allocated for this network. + *\param [in] network The opaque handle to the network to be destroyed + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_destroy_network( + IN vip_network network + ); + +/*! \brief Configure network property. configure network. this API should be called before + calling vip_prepare_network. + *\details Configure network's layer inputs/outputs information + *\param [in] network A property \ref vip_network_property_e to be configuied. + *\return \ref vip_status_e + */ +VIP_API +vip_status_e vip_set_network( + IN vip_network network, + IN vip_enum property, + IN void *value + ); + +/*! \brief Query a property of the network object. + *\details User can use this API to get any properties from a network. + *\param [in] network The opaque handle to the network to be queried + *\param [in] property A property \ref vip_network_property_e to be queried. + *\param [out] value A pointer to memory to store the return value, + different property could return different type/size of value. + * please see comment of \ref vip_network_property_e for detail. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_query_network( + IN vip_network network, + IN vip_enum property, + OUT void *value + ); + +/*! \brief Prepare a network to run on VIP. + *\details This function only need to be called once to prepare a network and + make it ready to execute on VIP hardware. + * It would do all heavy-duty work, including allocate internal memory resource for this network, + deploy all operation's resource + * to internal memory pool, allocate/generate command buffer for this network, + patch command buffer for the resource in the internal memory + * allocations. If this function is called more than once, driver will silently ignore it. + If this function is executed successfully, this network is prepared. + *\param [in] network The opaque handle to the network which need to be prepared. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_prepare_network( + IN vip_network network + ); + +/*! \brief Query a property of a specific input of a given network. + *\details The specified input/property/network must be valid, + otherwise VIP_ERROR_INVALID_ARGUMENTS will be returned. + *\param [in] network The opaque handle to the network to be queried + *\param [in] index Specify which input to be queried in case there are multiple inputs in the network + *\param [in] property Specify which property application wants to know, see \ref vip_buffer_property_e + *\param [out] value Returned value, the details type/size, please refer to the comment of + \ref vip_input_property_e + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_query_input( + IN vip_network network, + IN vip_uint32_t index, + IN vip_enum property, + OUT void *value + ); + +/*! \brief Query a property of a specific output of a given network. + *\details The specified output/property/network must be valid, + otherwise VIP_ERROR_INVALID_ARGUMENTS will be returned. + *\param [in] network The opaque handle to the network to be queried + *\param [in] index Specify which output to be queried in case there are multiple outputs in the network + *\param [in] property Specify which property application wants to know, see \ref vip_buffer_property_e + *\param [out] value Returned value, the details type/size, please refer to the comment of + \ref vip_input_property_e + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_query_output( + IN vip_network network, + IN vip_uint32_t index, + IN vip_enum property, + OUT void *value + ); + +/*! \brief Attach an input buffer to the specified index of the network. + *\details All the inputs of the network need to be attached to a valid input buffer before running a network, + otherwise + * VIP_ERROR_MISSING_INPUT_OUTPUT will be returned when calling \ref vip_run_network . + When attaching an input buffer + * to the network, driver would patch the network command buffer to fill in this input buffer address. + This function could be called + * multiple times to let application update the input buffers before next network execution. + The network must be prepared by \ref vip_prepare_network before + * attaching an input. + *\param [in] network The opaque handle to a network which we want to attach an input buffer + *\param [in] index The index specify which input in the network will be set + *\param [in] input The opaque handle to a buffer which will be attached to the network. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_set_input( + IN vip_network network, + IN vip_uint32_t index, + IN vip_buffer input + ); + +/*! \brief Attach an output buffer to the specified index of the network. + *\details All the outputs of the network need to be attached to a + valid output buffer before running a network, otherwise + * VIP_ERROR_MISSING_INPUT_OUTPUT will be returned when calling \ref vip_run_network . + When attaching an output buffer + * to the network, driver would patch the network command buffer to fill in this output buffer address. + This function could be called + * multiple times to let application update the output buffers before next network execution. + The network must be prepared by \ref vip_prepare_network before + * attaching an output. + *\param [in] network The opaque handle to a network which we want to attach an output buffer + *\param [in] index The index specify which output in the network will be set + *\param [in] output The opaque handle to a buffer which will be attached to the network. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_set_output( + IN vip_network network, + IN vip_uint32_t index, + IN vip_buffer output + ); + +/*! \brief. Kick off the network execution and send command buffer of this network to VIP hardware. + *\details This function can be called multiple times. + Every time it's called it would do inference with current attached + * input buffers and output buffers. It would return until VIP finish the execution. + If the network is not ready to execute + * for some reason like not be prepared by \ref vip_prepare_network , + it would fail with status reported. + *\param [in] network The opaque handle to the network to be executed. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_run_network( + IN vip_network network + ); + +/*! \brief Finish using this network to do inference. + *\details This function is paired with \ref vip_prepare_network . + It's suggested to be called once after \ref vip_prepare_network called. + * If it's called more than that, it will be silently ignored. + If the network is not prepared but finished is called, it's silently ignored too. + * This function would release all internal memory allocations which are allocated when + the network is prepared. Since the preparation of network takes much time, + * it is suggested that if the network will be still used later, application should not + finish the network unless there is no much system resource remained for other + * networks. The network object is still alive unitl it's destroyed by \ref vip_destroy_network . + *\param [in] network The opaque handle to the network which will be finished. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_finish_network( + IN vip_network network + ); + +/*! \brief. Kick off the network execution and send command buffer of this network to VIP hardware. +*\details This function is similar to \ref vip_run_network except that it returns + immediately without waiting for HW to complete the commands. +*\param [in] network The opaque handle to the network to be executed. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_trigger_network( + IN vip_network network + ); + +/*! \brief. Run tasks in group,these tasks is added by vip_add_network. + The order of executuion of tasks is call vip_add_network. +*\details This function is similar to \ref vip_run_group except that it returns + immediately without waiting for HW to complete the commands. +*\return \ref vip_status_e +*\param group vip_group object +*\param the number of task will be run. + eg: num is 4, the 0, 1, 2, 3 taks index in group will be run(inference). +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_trigger_group( + IN vip_group group, + IN vip_uint32_t num + ); + +/*! \brief. Explicitly wait for HW to finish executing the submitted commands. +*\details This function waits for HW to complete the commands. + This should be called once CPU needs to access the network currently being run. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_wait_network( + IN vip_network network + ); + +/*! \brief. Explicitly wait for HW to finish executing the submitted task in group. +*\details This function waits for HW to complete the submitted commands in group. + This should be called once CPU needs to access the group currently being run. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_wait_group( + IN vip_group group + ); + +/*! \brief. Cancle network running on vip hardware after network is commited. +*\details This function is cancel network running on vip hardware. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_cancel_network( + IN vip_network network + ); + +/*! \brief. give user applications more control over power management for VIP cores. +*\details. control VIP core frequency and power status by property. see vip_power_property_e. +*\param ID of the managed device. device_index is 0 if VIP is single core. +*\param perperty Control VIP core frequency and power status by property. see vip_power_property_e. +*\param value The value for vip_power_property_e property. + Please see vip_power_frequency_t if property is setting to VIP_POWER_PROPERTY_SET_FREQUENCY. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_power_management( + IN vip_uint32_t device_index, + IN vip_power_property_e property, + IN void *value + ); + +/*! \brief. Create a vip_group object to run multiple tasks(network or node) + and without interrupt between each task. +*\return \ref vip_status_e +*\param count The maximum number of tasks supports by this group. +*\param group Return vip_group object be created. +*\version 1.0 +*/ +VIP_API +vip_status_e vip_create_group( + IN vip_uint32_t count, + OUT vip_group *group + ); + +/*! \brief. Destroy group object which created by vip_create_group. +*\return \ref vip_status_e +*\param group vip_group object/ +*\version 1.0 +*/ +VIP_API +vip_status_e vip_destroy_group( + IN vip_group group + ); + +/* +@brief set group property. configure group. this API should be called before calling vip_run_group. +@param group The group object which created by vip_create_group(). +@param property The property be set. see vip_group_property_e. +@param value The set data. +*/ +VIP_API +vip_status_e vip_set_group( + IN vip_group group, + IN vip_enum property, + IN void *value + ); + +/*! \brief Query a property of the group object. + *\param [in] group The group object which created by vip_create_group(). + *\param [in] property A property \ref vip_group_property_e to be queried. + *\param [out] value A pointer to memory to store the return value, + different property could return different type/size of value. + * please see comment of \ref vip_group_property_e for detail. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_query_group( + IN vip_group group, + IN vip_enum property, + OUT void *value + ); + +/*! \brief. add a vip_network object into group. +*\return \ref vip_status_e +*\param group vip_group object, network be added into group. +*\param network vip_network added into group. +*\version 1.0 +*/ +VIP_API +vip_status_e vip_add_network( + IN vip_group group, + IN vip_network network + ); + +/*! \brief. run tasks in group. only issue a interrupt after tasks complete. + These tasks is added by vip_add_network. + The order of executuion of tasks is call vip_add_network. +*\return \ref vip_status_e +*\param group vip_group object +*\param the number of task will be run. + eg: num is 4, the 0, 1, 2, 3 taks index in group will be run(inference). +*\version 1.0 +*/ +VIP_API +vip_status_e vip_run_group( + IN vip_group group, + IN vip_uint32_t num + ); + +/*! \brief. change PPU engine parameters. + change local size, global size, global offset and global scale. +*\return \ref vip_status_e +*\param network The network object should be changed. +*\param param PPU parameters +*\param index The index of PPU node, not used. please set to zero. +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_set_ppu_param( + IN vip_network network, + IN vip_ppu_param_t *param, + IN vip_uint32_t index + ); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/viplite-tina/lib/aarch64-none-linux-gnu/v2.0/inc/vip_lite_common.h b/viplite-tina/lib/aarch64-none-linux-gnu/v2.0/inc/vip_lite_common.h new file mode 100644 index 0000000..49d03a9 --- /dev/null +++ b/viplite-tina/lib/aarch64-none-linux-gnu/v2.0/inc/vip_lite_common.h @@ -0,0 +1,240 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2017 - 2024 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2017 - 2024 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + +#ifndef _VIP_LITE_COMMON_H +#define _VIP_LITE_COMMON_H + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + *\brief The VIP lite API for Convolution Neural Network application on CPU/MCU/DSP type of embedded environment. + *\details This VIP lite APIs is not thread-safe if vpmdENABLE_MULTIPLE_TASK is set to 0, + user must guarantee to call these APIs in a proper way. + But defines vpmdENABLE_MULTIPLE_TASK 1, VIPLite can support multiple task(multiple thread/process). + and it's thread-safe. + *Memory allocation and file io functions used inside driver internal would depend on working enviroment. + *\defgroup group_global Data Type Definitions and Global APIs + *\brief Data type definition and global APIs that are used in the VIP lite + *\defgroup group_buffer Buffer API + *\brief The API to manage input/output buffers + *\defgroup group_network Network API + *\brief The API to manage networks + */ + +/*! \brief An 8-bit unsigned value. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned char vip_uint8_t; + +/*! \brief An 16-bit unsigned value. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned short vip_uint16_t; + +/*! \brief An 32-bit unsigned value. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned int vip_uint32_t; + +/*! \brief An 64-bit unsigned value. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned long long vip_uint64_t; + +/*! \brief An 8-bit signed value. + * \ingroup group_global + * \version 1.0 + */ +typedef signed char vip_int8_t; + +/*! \brief An 16-bit signed value. + * \ingroup group_global + * \version 1.0 + */ +typedef signed short vip_int16_t; + +/*! \brief An 32-bit signed value. + * \ingroup group_global + * \version 1.0 + */ +typedef signed int vip_int32_t; + +/*! \brief An 64-bit signed value. + * \ingroup group_global + * \version 1.0 + */ +typedef signed long long vip_int64_t; + +/*! \brief An 8 bit ASCII character. + * \ingroup group_global + * \version 1.0 + */ +typedef char vip_char_t; + +/*! \brief An 32 bit float value. + * \ingroup group_global + * \version 1.0 + */ +typedef float vip_float_t; + +/*! \brief Sets the standard enumeration type size to be a fixed quantity. + * \ingroup group_global + * \version 1.0 + */ +typedef vip_int32_t vip_enum; + +/*! \brief a void pointer. + * \ingroup group_global + * \version 1.0 + */ +typedef void* vip_ptr; + +/*! \brief A 64-bit float value (aka double). + * \ingroup group_basic_features + */ +typedef double vip_float64_t; + +/*! \brief address type. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned long long vip_address_t; + +/*! \brief A zero value for pointer + *\ingroup group_global + *\version 1.0 + */ +#ifndef VIP_NULL +#define VIP_NULL 0 +#endif + +/***** Helper Macros. *****/ +#define VIP_API + +#define IN +#define OUT + +/*! \brief A invalid value if a property is not avaialbe for the query. + *\ingroup group_global + *\version 1.0 + */ +#define VIP_INVALID_VALUE ~0UL + +/*! \brief A Boolean value. + *\details This allows 0 to be FALSE, as it is in C, and any non-zero to be TRUE. + *\ingroup group_global + *\version 1.0 + */ +typedef enum _vip_bool_e { + /*! \brief The "false" value. */ + vip_false_e = 0, + /*! \brief The "true" value. */ + vip_true_e, +} vip_bool_e; + +/*! \brief The enumeration of all status codes. + * \ingroup group_global + * \version 1.0 + */ +typedef enum _vip_status +{ + /*!< \brief Indicates a FUSA error occurs */ + VIP_ERROR_FUSA = -17, + /*!< \brief Indicates the network hit Not A Number or Infinite error */ + VIP_ERROR_NAN_INF = -16, + /*!< \brief Indicates the network is canceld */ + VIP_ERROR_CANCELED = -15, + /*!< \brief Indicates the hardware is recovery done after hang */ + VIP_ERROR_RECOVERY = -14, + /*!< \brief Indicates the hardware is stoed */ + VIP_ERROR_POWER_STOP = -13, + /*!< \brief Indicates the hardware is in power off status */ + VIP_ERROR_POWER_OFF = -12, + /*!< \brief Indicates the failure */ + VIP_ERROR_FAILURE = -11, + /*!< \brief Indicates the binary is not compatible with the current runtime hardware */ + VIP_ERROR_NETWORK_INCOMPATIBLE = -10, + /*!< \brief Indicates the network is not prepared so current function call can't go through */ + VIP_ERROR_NETWORK_NOT_PREPARED = -9, + /*!< \brief Indicates the network misses either input or output when running the network */ + VIP_ERROR_MISSING_INPUT_OUTPUT = -8, + /*!< \brief Indicates the network binary is invalid */ + VIP_ERROR_INVALID_NETWORK = -7, + /*!< \brief Indicates driver is running out of memory of video memory */ + VIP_ERROR_OUT_OF_MEMORY = -6, + /*!< \brief Indicates there is no enough resource */ + VIP_ERROR_OUT_OF_RESOURCE = -5, + /*!< \brief Indicates it's supported by driver implementation */ + VIP_ERROR_NOT_SUPPORTED = -4, + /*!< \brief Indicates some arguments are not valid */ + VIP_ERROR_INVALID_ARGUMENTS = -3, + /*!< \brief Indicates there are some IO related error */ + VIP_ERROR_IO = -2, + /*!< \brief Indicates VIP timeout, could be VIP stuck somewhere */ + VIP_ERROR_TIMEOUT = -1, + /*!< \brief Indicates the execution is successfuly */ + VIP_SUCCESS = 0, +} vip_status_e; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/viplite-tina/lib/aarch64-none-linux-gnu/v2.0/libNBGlinker.so b/viplite-tina/lib/aarch64-none-linux-gnu/v2.0/libNBGlinker.so new file mode 100644 index 0000000..7b0907c Binary files /dev/null and b/viplite-tina/lib/aarch64-none-linux-gnu/v2.0/libNBGlinker.so differ diff --git a/viplite-tina/lib/aarch64-none-linux-gnu/v2.0/libVIPhal.so b/viplite-tina/lib/aarch64-none-linux-gnu/v2.0/libVIPhal.so new file mode 100644 index 0000000..a380370 Binary files /dev/null and b/viplite-tina/lib/aarch64-none-linux-gnu/v2.0/libVIPhal.so differ diff --git a/viplite-tina/lib/glibc-gcc10_2_0/v1.13/inc/vip_lite.h b/viplite-tina/lib/glibc-gcc10_2_0/v1.13/inc/vip_lite.h new file mode 100644 index 0000000..4ef809f --- /dev/null +++ b/viplite-tina/lib/glibc-gcc10_2_0/v1.13/inc/vip_lite.h @@ -0,0 +1,931 @@ +/******************************************************************************\ +|* Copyright (c) 2017-2022 by Vivante Corporation. All Rights Reserved. *| +|* *| +|* The material in this file is confidential and contains trade secrets of *| +|* of Vivante Corporation. This is proprietary information owned by Vivante *| +|* Corporation. No part of this work may be disclosed, reproduced, copied, *| +|* transmitted, or used in any way for any purpose, without the express *| +|* written permission of Vivante Corporation. *| +|* *| +\******************************************************************************/ + +#ifndef _VIP_LITE_H +#define _VIP_LITE_H + +#include "vip_lite_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + *\brief The VIP lite API for Convolution Neural Network application on CPU/MCU/DSP type of embedded environment. + *\details This VIP lite APIs is not thread-safe if vpmdENABLE_MULTIPLE_TASK is set to 0, + user must guarantee to call these APIs in a proper way. + But defines vpmdENABLE_MULTIPLE_TASK 1, VIPLite can support multiple task(multiple thread/process). + and it's thread-safe. + *Memory allocation and file io functions used inside driver internal would depend on working enviroment. + *\defgroup group_global Data Type Definitions and Global APIs + *\brief Data type definition and global APIs that are used in the VIP lite + *\defgroup group_buffer Buffer API + *\brief The API to manage input/output buffers + *\defgroup group_network Network API + *\brief The API to manage networks + */ + +/* !\brief The data format list for buffer + * \ingroup group_buffer + * \version 2.0 + */ +typedef enum _vip_buffer_format_e +{ + /*! \brief A float type of buffer data */ + VIP_BUFFER_FORMAT_FP32 = 0, + /*! \brief A half float type of buffer data */ + VIP_BUFFER_FORMAT_FP16 = 1, + /*! \brief A 8 bit unsigned integer type of buffer data */ + VIP_BUFFER_FORMAT_UINT8 = 2, + /*! \brief A 8 bit signed integer type of buffer data */ + VIP_BUFFER_FORMAT_INT8 = 3, + /*! \brief A 16 bit unsigned integer type of buffer data */ + VIP_BUFFER_FORMAT_UINT16 = 4, + /*! \brief A 16 signed integer type of buffer data */ + VIP_BUFFER_FORMAT_INT16 = 5, + /*! \brief A char type of data */ + VIP_BUFFER_FORMAT_CHAR = 6, + /*! \brief A bfloat 16 type of data */ + VIP_BUFFER_FORMAT_BFP16 = 7, + /*! \brief A 32 bit integer type of data */ + VIP_BUFFER_FORMAT_INT32 = 8, + /*! \brief A 32 bit unsigned signed integer type of buffer */ + VIP_BUFFER_FORMAT_UINT32 = 9, + /*! \brief A 64 bit signed integer type of data */ + VIP_BUFFER_FORMAT_INT64 = 10, + /*! \brief A 64 bit unsigned integer type of data */ + VIP_BUFFER_FORMAT_UINT64 = 11, + /*! \brief A 64 bit float type of buffer data */ + VIP_BUFFER_FORMAT_FP64 = 12, + /*! \brief A signed 4bits tensor */ + VIP_BUFFER_FORMAT_INT4 = 13, + /*! \brief A unsigned 4bits tensor */ + VIP_BUFFER_FORMAT_UINT4 = 14, +} vip_buffer_format_e; + +/* !\brief The quantization format list for buffer data + * \ingroup group_buffer + * \version 1.0 + */ +typedef enum _vip_buffer_quantize_format_e +{ + /*! \brief Not quantized format */ + VIP_BUFFER_QUANTIZE_NONE = 0, + /*! \brief A quantization data type which specifies the fixed point position for whole tensor. */ + VIP_BUFFER_QUANTIZE_DYNAMIC_FIXED_POINT = 1, + /*! \brief A quantization data type which has scale value and zero point to match with TF and + Android NN API for whole tensor. */ + VIP_BUFFER_QUANTIZE_TF_ASYMM = 2, + /*! \brief A max vaule support quantize format */ + VIP_BUFFER_QUANTIZE_MAX, +} vip_buffer_quantize_format_e; + +/* !\brief The memory type for vip buffer + * \ingroup group_buffer + * \version 1.2.2 + */ +typedef enum _vip_buffer_memory_type_e +{ + /*! \brief Not memory type. default memory type. + use for allocate video memory from driver calling vip_create_buffer. + */ + VIP_BUFFER_MEMORY_TYPE_DEFAULT = 0x000, + /*! \brief Create a VIP buffer from the Host (logical, physical). */ + VIP_BUFFER_MEMORY_TYPE_HOST = 0x001, + /*! \brief Create a secure memory, sucure mode is not thread safe. */ + VIP_BUFFER_MEMORY_TYPE_SECURE = 0x002, + /*! \brief Create a VIP buffer from DMA_BUF */ + VIP_BUFFER_MEMORY_TYPE_DMA_BUF = 0x003, + /*! \brief The max memory type */ + VIP_BUFFER_MEMORY_TYPE_MAX, +} vip_buffer_memory_type_e; + +/* \brief The list of create network type + * \ingroup group_network + * \version 1.0 + */ +typedef enum _vip_create_network_type_e +{ + /*!< \brief NONE */ + VIP_CREATE_NETWORK_FROM_NONE = 0x00, + /*!< \brief Create network from a file path */ + VIP_CREATE_NETWORK_FROM_FILE = 0x01, + /*!< \brief Create network from buffer, NBG has been loaded in this buffer before */ + VIP_CREATE_NETWORK_FROM_MEMORY = 0x02, + /*!< \brief Create network from flash */ + VIP_CREATE_NETWORK_FROM_FLASH = 0x04, + + /* network work mode is not thread safe */ + /*!< \brief specify network work in Normal mode when creating network */ + VIP_CREATE_NETWORK_MODE_NORMAL = 0x10, + /*!< \brief specify network work in Secure mode when creating network */ + VIP_CREATE_NETWORK_MODE_SECURE = 0x20, + VIP_CREATE_NETWORK_MAX, +} vip_create_network_type_e; + +/* \brief An enumeration property that specifies which power management operation to execute. + * \ingroup group_network + * \version 1.2 + */ +typedef enum _vip_power_property_e +{ + VIP_POWER_PROPERTY_NONE = 0x0000, + /*!< \brief specify the VIP frequency */ + VIP_POWER_PROPERTY_SET_FREQUENCY = 0x0001, + /*!< \brief power off VIP hardware */ + VIP_POWER_PROPERTY_OFF = 0x0002, + /*!< \brief power on VIP hardware */ + VIP_POWER_PROPERTY_ON = 0x0004, + /*!< \brief stop VIP perform network */ + VIP_POWER_PROPERTY_STOP = 0x0008, + /*!< \brief start VIP perform network */ + VIP_POWER_PROPERTY_START = 0x0010, + VIP_POWER_PROPERTY_MAX +} vip_power_property_e; + +/* \brief query hardware caps property + */ +typedef enum _vip_query_hardware_property_e +{ + /*!< \brief the customer ID of this VIP/NPU, the returned value is vip_uint32_t */ + VIP_QUERY_HW_PROP_CID = 0, + /*!< \brief the number of deivce, the returned value is vip_uint32_t */ + VIP_QUERY_HW_PROP_DEVICE_COUNT = 1, + /*!< \brief the number of core count for each device, the returned value is vip_uint32_t * device_count */ + VIP_QUERY_HW_PROP_CORE_COUNT_EACH_DEVICE = 2, + VIP_QUERY_HW_PROP_MAX, +} vip_query_hardware_property_e; + +/* \brief The list of properties of a network. + * \ingroup group_network + * \version 1.0 + */ +typedef enum _vip_network_property_e +{ + /* query network */ + /*!< \brief The number of layers in this network, the returned value is vip_uint32_t */ + VIP_NETWORK_PROP_LAYER_COUNT = 0, + /*!< \brief The number of input in this network, the returned value is vip_uint32_t*/ + VIP_NETWORK_PROP_INPUT_COUNT = 1, + /*!< \brief The number of output in this network, the returned value is vip_uint32_t*/ + VIP_NETWORK_PROP_OUTPUT_COUNT = 2, + /*!< \brief The network name, the returned value is vip_char_t[64] */ + VIP_NETWORK_PROP_NETWORK_NAME = 3, + /*!< \brief address information of wait-link, command, input-output buffers for viplite-Agent trigger, + not used if only use viplite. the returned value is \ref vip_address_info_t + */ + VIP_NETWORK_PROP_ADDRESS_INFO = 4, + /*!< \brief read interruput irq register value for supporting viplite-Broker + not used if only use viplite. the returned value is vip_uint32_t*/ + VIP_NETWORK_PROP_READ_REG_IRQ = 5, + /*!< \brief The size of memory pool, the returned value is vip_uint32_t*/ + VIP_NETWORK_PROP_MEMORY_POOL_SIZE = 6, + + /*!< \brief The network profling data, the returned value is vip_inference_profile_t */ + VIP_NETWORK_PROP_PROFILING = 7, + + /*!< \brief The the number of core for this network, the returned value is vip_uint8_t */ + VIP_NETWORK_PROP_CORE_COUNT = 8, + + /* set network */ + /* set network property should be called before vip_prepare_network */ + /*!< \brief set network to enable change PPU parameters feature for this vip_network. + the vip_set_network value param used to indicates disable or enable this feature. + vip_uint32_t *value is 1, enable change ppu param. + vip_uint32_t *value is 0, disable change ppu param */ + VIP_NETWORK_PROP_CHANGE_PPU_PARAM = 64, + /*!< \brief set memory pool buffer for network. networks can share a memory pool buffer. + the set value is \ref vip_buffer */ + VIP_NETWORK_PROP_SET_MEMORY_POOL = 65, + /*!< \brief set device id for network. networks can be submitted this vip device. */ + VIP_NETWORK_PROP_SET_DEVICE_ID = 66, + /*!< \brief set priority of network. 0 ~ 255, 0 indicates the lowest priority. */ + VIP_NETWORK_PROP_SET_PRIORITY = 67, + /*!< \brief set time out of network. unit: ms */ + VIP_NETWORK_PROP_SET_TIME_OUT = 68, +} vip_network_property_e; + +/* \brief The list of properties of a group. + * \ingroup group_network + * \version 1.0 + */ +typedef enum _vip_group_property_e +{ + /* query group */ + /*!< \brief The group profling data, the returned value is vip_inference_profile_t */ + VIP_GROUP_PROP_PROFILING = 0, + + /* set group */ + /* set group property should be called before vip_add_network */ + /*!< \brief set device id for group. networks in group can be submitted this vip device. + * This prop should be called before vip_prepare_network */ + VIP_GROUP_PROP_SET_DEVICE_ID = 64, + /*!< \brief set time out of network in group. unit: ms */ + VIP_GROUP_PROP_SET_TIME_OUT = 68, +} vip_group_property_e; + +/* \brief The list of property of an input or output. + * \ingroup group_network + * \version 1.0 + */ +typedef enum _vip_buffer_property_e +{ + /*!< \brief The quantization format, the returned value is \ref vip_buffer_quantize_format_e */ + VIP_BUFFER_PROP_QUANT_FORMAT = 0, + /*!< \brief The number of dimension for this input, the returned value is vip_uint32_t */ + VIP_BUFFER_PROP_NUM_OF_DIMENSION = 1, + /*!< \brief The size of each dimension for this input, the returned value is vip_uint32_t * num_of_dim */ + VIP_BUFFER_PROP_SIZES_OF_DIMENSION = 2, + /*!< \brief The data format for this input, the returned value is \ref vip_buffer_format_e */ + VIP_BUFFER_PROP_DATA_FORMAT = 3, + /*!< \brief The position of fixed point for dynamic fixed point, the returned value is vip_uint8_t */ + VIP_BUFFER_PROP_FIXED_POINT_POS = 4, + /*!< \brief The scale value for TF quantization format, the returned value is vip_float_t */ + VIP_BUFFER_PROP_TF_SCALE = 5, + /*!< \brief The zero point for TF quantization format, the returned value is vip_uint32_t */ + VIP_BUFFER_PROP_TF_ZERO_POINT = 6, + /*!< \brief The name for network's inputs and outputs, the returned value is vip_char_t[64] */ + VIP_BUFFER_PROP_NAME = 7, +} vip_buffer_property_e; + +/* \brief The list of property of operation vip_buffer type. + * \ingroup group_network + * \version 1.3 + */ +typedef enum _vip_buffer_operation_type_e +{ + /*!< \brief None operation */ + VIP_BUFFER_OPER_TYPE_NONE = 0, + /*!< \brief Flush the vip buffer */ + VIP_BUFFER_OPER_TYPE_FLUSH = 1, + /*!< \brief invalidate the vip buffer */ + VIP_BUFFER_OPER_TYPE_INVALIDATE = 2, + VIP_BUFFER_OPER_TYPE_MAX, +} vip_buffer_operation_type_e; + +typedef struct _vip_network *vip_network; +typedef struct _vip_buffer *vip_buffer; +typedef struct _vip_group *vip_group; + + +/*! \brief Input parameter for vip_create_buffer + */ +typedef struct _vip_buffer_create_params_t +{ + /*!< \brief The number of dimensions specified in *sizes*/ + vip_uint32_t num_of_dims; + /*!< \brief The pointer to an array of dimension */ + vip_uint32_t sizes[6]; + /*!< \brief Data format for the tensor, see \ref vip_buffer_format_e */ + vip_enum data_format; + /*!< \brief Quantized format see \ref vip_buffer_quantize_format_e . */ + vip_enum quant_format; + /*\ref vip_uint32_t + */ +VIP_API +vip_uint32_t vip_get_version( + void + ); + +/*! \brief Initial VIP Hardware, VIP lite software environment and power on VIP hardware. + * \details when vpmdENABLE_MULTIPLE_TASK set to 0, + This function should be only called once before using VIP hardware if. + when vpmdENABLE_MULTIPLE_TASK set to 1, + vip_init can be called multiple times, but should paired with vip_destroy. + vip_init should be called in every process. + only need call vip_init once in multi-thread. + * VIP lite driver would construct some global variable for this call.Also + * it will reset VIP and initialize VIP hardware to a ready state to accept jobs. + * \return \ref vip_status_e + * \ingroup group_global + * \version 1.0 + */ +VIP_API +vip_status_e vip_init( + void + ); + +/*! \brief Terminate VIP lite driver and shut down VIP hardware. + * \details This function should be the last function called by application. + vip_destroy should paired with vip_init called. + * After it, no VIP lite API should be called except \ref vip_init + * \return \ref vip_status_e + * \ingroup group_global + * \version 1.0 + * \notes vip_destroy should be called in the same thread as vip_init. + */ +VIP_API +vip_status_e vip_destroy( + void + ); + +/*! \brief Queries hardware caps information. This function shold be called after calling vip_init. +*\param property, the query property enum. +*\param size, the size of value buffer. +*\param value, the value buffer of returns. +*/ +VIP_API +vip_status_e vip_query_hardware( + IN vip_query_hardware_property_e property, + IN vip_uint32_t size, + OUT void *value + ); + +/*! \brief Create a input or output buffer with specified parameters. + *\details The buffer object always takes [w, h, c, n] order, + there is no padding/hole between lines/slices/batches. + *\param [in] create_param The pointer to \ref vip_buffer_create_params_t structure. + *\param [in] size_of_param The size of create_param pointer. + *\param [out] buffer An opaque handle for the new buffer object if the request is executed successfully. + *\return \ref vip_status_e + *\ingroup group_buffer + *\version 1.0 + */ +VIP_API +vip_status_e vip_create_buffer( + IN vip_buffer_create_params_t *create_param, + IN vip_uint32_t size_of_param, + OUT vip_buffer *buffer + ); + +/*! \brief Create a buffer from user contiguous or scatter non-contiguous physical address. + the vip_buffer created by this APi doesn't support flush CPU cache in driver. + So the physical memory should be a non-cache buffer or flush CPU on Host control. + not map user space logical on Linux. + *\param [in] create_param The pointer to \ref vip_buffer_create_params_t structure. + *\param [in] physical_table Physical address table. should be wraped for VIP hardware. + *\param [in] size_table The size of physical memory for each physical_table element. + *\param [in] physical_num The number of physical table element. + physical_num is 1 when create buffer from contiguous phyiscal. + *\param [out] buffer. vip lite buffer object. + *\return \ref vip_status_e +*/ +VIP_API +vip_status_e vip_create_buffer_from_physical( + IN const vip_buffer_create_params_t *create_param, + IN const vip_address_t *physical_table, + IN const vip_uint32_t *size_table, + IN vip_uint32_t physical_num, + OUT vip_buffer *buffer + ); + +/*! \brief Create a vip buffer with specified parameters. + The vip_buffer can be used to input, output, memory pool and so on. + NOTE: driver will operation CPU cache when call vip_flush_buffer API. + application should call vip_flush_buffer API if the memory handle have CPU cache. + after write data into this buffer, APP should call vip_flush_buffer(VIP_BUFFER_OPER_TYPE_FLUSH) + before CPU read date from this buffer. APP should call vip_flush_buffer(VIP_BUFFER_OPER_TYPE_INVALIDATE) + *\ when MMU disabled, create buffer from a contiguous physical memory. + *\ when MMU enabled, create buffer from a contiguous physical memory or + logical address(convert to physical in kenrel pace). + *\details The buffer object always takes [w, h, c, n] order, + there is no padding/hole between lines/slices/batches. + *\param [in] create_param The pointer to \ref vip_buffer_create_params_t structure. + *\param [in] handle_logical The logical address of the handle. + create vip buffer from the logical address. + *\param [in] the handle_size should be aligned to 64byte(vpmdCPU_CACHE_LINE_SIZE) for easy flash CPU cache. + *\param [out] buffer. vip lite buffer object. + *\return \ref vip_status_e + *\ingroup group_buffer + *\version 1.1 + */ +VIP_API +vip_status_e vip_create_buffer_from_handle( + IN const vip_buffer_create_params_t *create_param, + IN const vip_ptr handle_logical, + IN vip_uint32_t handle_size, + OUT vip_buffer *buffer + ); + +/*! \brief Create a vip buffer from user fd(file descriptor). + only support create buffer from dma-buf on Linux. + the vip_buffer created by this APi doesn't support flush CPU cache in driver. + So the dma-buf should be a non-cache buffer or flush CPU on Host control. + *\param [in] create_param The pointer to \ref vip_buffer_create_params_t structure. + *\param [in] fd user memory file descriptor. + *\param [in] memory_size The size of user memory. + the handle_size should be aligned to 64byte(vpmdCPU_CACHE_LINE_SIZE) for easy flash CPU cache. + *\param [out] buffer. vip lite buffer object. + *\return \ref vip_status_e +*/ +VIP_API +vip_status_e vip_create_buffer_from_fd( + IN const vip_buffer_create_params_t *create_param, + IN vip_uint32_t fd, + IN vip_uint32_t memory_size, + OUT vip_buffer *buffer + ); + +/*! \brief Destroy a buffer object which was created before. + *\param [in] buffer The opaque handle of buffer to be destroyed. + *\return \ref vip_status_e + *\ingroup group_buffer + *\version 1.0 + */ +VIP_API +vip_status_e vip_destroy_buffer( + IN vip_buffer buffer + ); + +/*! \brief Map a buffer to get the CPU accessible address for read or write + *\param [in] buffer The handle of buffer to be mapped. + *\return A pointer that application can use to read or write the buffer data. + *\ingroup group_buffer + *\version 1.0 + */ +VIP_API +void * vip_map_buffer( + IN vip_buffer buffer + ); + +/*! \brief Unmap a buffer which was mapped before. + *\param [in] buffer The handle of buffer to be unmapped. + *\return \ref vip_status_e + *\ingroup group_buffer + *\version 1.0 + */ +VIP_API +vip_status_e vip_unmap_buffer( + IN vip_buffer buffer + ); + +/*! \brief Get the size of bytes allocated for the buffer. + *\param [in] buffer The handle of buffer to be queried. + *\return \ref the size of bytes + *\ingroup group_buffer + *\version 1.0 + */ +VIP_API +vip_uint32_t vip_get_buffer_size( + IN vip_buffer buffer + ); + +/*! \brief operation the vip buffer CPU chace. flush, invalidate cache. + You should call vip_flush_buffer to flush buffer for input. + and invalidate buffer for network's output if these memories with CPU cache. +*\param buffer The vip buffer object. +*\param the type of this operation. see vip_buffer_operation_type_e. +*/ +VIP_API +vip_status_e vip_flush_buffer( + IN vip_buffer buffer, + IN vip_buffer_operation_type_e type + ); + +/*! \brief Create a network object from the given binary data. + *\details The binary is generated by the binary graph generator and it's a blob binary. + *\VIP lite Driver could interprete it to create a network object. + *\param [in] data The pointer to the binary graph. it can be a file path or a memory pointer, depending on type. + *\param [in] size_of_data The byte size of data object. the byte size of NBG buffer. + You can ignore it if create network form fil path. + *\param [in] type how to create a network object. please refer to vip_create_network_type_e enum. + *\param [out] network An opaque handle to the new network object if the request is executed successfully + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_create_network( + IN const void *data, + IN vip_uint32_t size_of_data, + IN vip_enum type, + OUT vip_network *network + ); + +/*! \brief Destroy a network object + *\details Release all resources allocated for this network. + *\param [in] network The opaque handle to the network to be destroyed + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_destroy_network( + IN vip_network network + ); + +/*! \brief Configure network property. configure network. this API should be called before calling vip_prepare_network. + *\details Configure network's layer inputs/outputs information + *\param [in] network A property \ref vip_network_property_e to be configuied. + *\return \ref vip_status_e + */ +VIP_API +vip_status_e vip_set_network( + IN vip_network network, + IN vip_enum property, + IN void *value + ); + +/*! \brief Query a property of the network object. + *\details User can use this API to get any properties from a network. + *\param [in] network The opaque handle to the network to be queried + *\param [in] property A property \ref vip_network_property_e to be queried. + *\param [out] value A pointer to memory to store the return value, + different property could return different type/size of value. + * please see comment of \ref vip_network_property_e for detail. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_query_network( + IN vip_network network, + IN vip_enum property, + OUT void *value + ); + +/*! \brief Prepare a network to run on VIP. + *\details This function only need to be called once to prepare a network and make it ready to execute on VIP hardware. + * It would do all heavy-duty work, including allocate internal memory resource for this network, + deploy all operation's resource + * to internal memory pool, allocate/generate command buffer for this network, + patch command buffer for the resource in the internal memory + * allocations. If this function is called more than once, driver will silently ignore it. + If this function is executed successfully, this network is prepared. + *\param [in] network The opaque handle to the network which need to be prepared. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_prepare_network( + IN vip_network network + ); + +/*! \brief Query a property of a specific input of a given network. + *\details The specified input/property/network must be valid, otherwise VIP_ERROR_INVALID_ARGUMENTS will be returned. + *\param [in] network The opaque handle to the network to be queried + *\param [in] index Specify which input to be queried in case there are multiple inputs in the network + *\param [in] property Specify which property application wants to know, see \ref vip_buffer_property_e + *\param [out] value Returned value, the details type/size, please refer to the comment of \ref vip_input_property_e + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_query_input( + IN vip_network network, + IN vip_uint32_t index, + IN vip_enum property, + OUT void *value + ); + +/*! \brief Query a property of a specific output of a given network. + *\details The specified output/property/network must be valid, otherwise VIP_ERROR_INVALID_ARGUMENTS will be returned. + *\param [in] network The opaque handle to the network to be queried + *\param [in] index Specify which output to be queried in case there are multiple outputs in the network + *\param [in] property Specify which property application wants to know, see \ref vip_buffer_property_e + *\param [out] value Returned value, the details type/size, please refer to the comment of \ref vip_input_property_e + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_query_output( + IN vip_network network, + IN vip_uint32_t index, + IN vip_enum property, + OUT void *value + ); + +/*! \brief Attach an input buffer to the specified index of the network. + *\details All the inputs of the network need to be attached to a valid input buffer before running a network, otherwise + * VIP_ERROR_MISSING_INPUT_OUTPUT will be returned when calling \ref vip_run_network . + When attaching an input buffer + * to the network, driver would patch the network command buffer to fill in this input buffer address. + This function could be called + * multiple times to let application update the input buffers before next network execution. + The network must be prepared by \ref vip_prepare_network before + * attaching an input. + *\param [in] network The opaque handle to a network which we want to attach an input buffer + *\param [in] index The index specify which input in the network will be set + *\param [in] input The opaque handle to a buffer which will be attached to the network. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_set_input( + IN vip_network network, + IN vip_uint32_t index, + IN vip_buffer input + ); + +/*! \brief Attach an output buffer to the specified index of the network. + *\details All the outputs of the network need to be attached to a valid output buffer before running a network, otherwise + * VIP_ERROR_MISSING_INPUT_OUTPUT will be returned when calling \ref vip_run_network . + When attaching an output buffer + * to the network, driver would patch the network command buffer to fill in this output buffer address. + This function could be called + * multiple times to let application update the output buffers before next network execution. + The network must be prepared by \ref vip_prepare_network before + * attaching an output. + *\param [in] network The opaque handle to a network which we want to attach an output buffer + *\param [in] index The index specify which output in the network will be set + *\param [in] output The opaque handle to a buffer which will be attached to the network. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_set_output( + IN vip_network network, + IN vip_uint32_t index, + IN vip_buffer output + ); + +/*! \brief. Kick off the network execution and send command buffer of this network to VIP hardware. + *\details This function can be called multiple times. + Every time it's called it would do inference with current attached + * input buffers and output buffers. It would return until VIP finish the execution. + If the network is not ready to execute + * for some reason like not be prepared by \ref vip_prepare_network , + it would fail with status reported. + *\param [in] network The opaque handle to the network to be executed. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_run_network( + IN vip_network network + ); + +/*! \brief Finish using this network to do inference. + *\details This function is paired with \ref vip_prepare_network . + It's suggested to be called once after \ref vip_prepare_network called. + * If it's called more than that, it will be silently ignored. + If the network is not prepared but finished is called, it's silently ignored too. + * This function would release all internal memory allocations which are allocated when + the network is prepared. Since the preparation of network takes much time, + * it is suggested that if the network will be still used later, application should not + finish the network unless there is no much system resource remained for other + * networks. The network object is still alive unitl it's destroyed by \ref vip_destroy_network . + *\param [in] network The opaque handle to the network which will be finished. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_finish_network( + IN vip_network network + ); + +/*! \brief. Kick off the network execution and send command buffer of this network to VIP hardware. +*\details This function is similar to \ref vip_run_network except that it returns + immediately without waiting for HW to complete the commands. +*\param [in] network The opaque handle to the network to be executed. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_trigger_network( + IN vip_network network + ); + +/*! \brief. Run tasks in group,these tasks is added by vip_add_network. + The order of executuion of tasks is call vip_add_network. +*\details This function is similar to \ref vip_run_group except that it returns + immediately without waiting for HW to complete the commands. +*\return \ref vip_status_e +*\param group vip_group object +*\param the number of task will be run. + eg: num is 4, the 0, 1, 2, 3 taks index in group will be run(inference). +*\version 1.0 +*/ +VIP_API +vip_status_e vip_trigger_group( + IN vip_group group, + IN vip_uint32_t num + ); + +/*! \brief. Explicitly wait for HW to finish executing the submitted commands. +*\details This function waits for HW to complete the commands. + This should be called once CPU needs to access the network currently being run. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_wait_network( + IN vip_network network + ); + +/*! \brief. Explicitly wait for HW to finish executing the submitted task in group. +*\details This function waits for HW to complete the submitted commands in group. + This should be called once CPU needs to access the group currently being run. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_wait_group( + IN vip_group group + ); + +/*! \brief. Cancle network running on vip hardware after network is commited. +*\details This function is cancel network running on vip hardware. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_cancel_network( + IN vip_network network + ); + +/*! \brief. give user applications more control over power management for VIP cores. +*\details. control VIP core frequency and power status by property. see vip_power_property_e. +*\param ID of the managed device. +*\param perperty Control VIP core frequency and power status by property. see vip_power_property_e. +*\param value The value for vip_power_property_e property. + Please see vip_power_frequency_t if property is setting to VIP_POWER_PROPERTY_SET_FREQUENCY. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_power_management( + IN vip_uint32_t device_id, + IN vip_power_property_e property, + IN void *value + ); + +/*! \brief. Create a vip_group object to run multiple tasks(network or node) + and without interrupt between each task. +*\return \ref vip_status_e +*\param count The maximum number of tasks supports by this group. +*\param group Return vip_group object be created. +*\version 1.0 +*/ +VIP_API +vip_status_e vip_create_group( + IN vip_uint32_t count, + OUT vip_group *group + ); + +/*! \brief. Destroy group object which created by vip_create_group. +*\return \ref vip_status_e +*\param group vip_group object/ +*\version 1.0 +*/ +VIP_API +vip_status_e vip_destroy_group( + IN vip_group group + ); + +/* +@brief set group property. configure group. this API should be called before calling vip_run_group. +@param group The group object which created by vip_create_group(). +@param property The property be set. see vip_group_property_e. +@param value The set data. +*/ +VIP_API +vip_status_e vip_set_group( + IN vip_group group, + IN vip_enum property, + IN void *value + ); + +/*! \brief Query a property of the group object. + *\param [in] group The group object which created by vip_create_group(). + *\param [in] property A property \ref vip_group_property_e to be queried. + *\param [out] value A pointer to memory to store the return value, + different property could return different type/size of value. + * please see comment of \ref vip_group_property_e for detail. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_query_group( + IN vip_group group, + IN vip_enum property, + OUT void *value + ); + +/*! \brief. add a vip_network object into group. +*\return \ref vip_status_e +*\param group vip_group object, network be added into group. +*\param network vip_network added into group. +*\version 1.0 +*/ +VIP_API +vip_status_e vip_add_network( + IN vip_group group, + IN vip_network network + ); + +/*! \brief Weak dup a vip_network object. + The weak dup netowrk copy new command buffer. and share coefficient data with original network. + The original network can't be destroy if the weak dup network is running or will be run later. +*\param network vip_network to dup. +*\param dup_network output vip_network. +*\return \ref vip_status_e +*\version 1.0 +*/ +VIP_API +vip_status_e vip_weak_dup_network( + IN vip_network network, + OUT vip_network *dup_network + ); + +/*! \brief. run tasks in group. only issue a interrupt after tasks complete. + These tasks is added by vip_add_network. + The order of executuion of tasks is call vip_add_network. +*\return \ref vip_status_e +*\param group vip_group object +*\param the number of task will be run. + eg: num is 4, the 0, 1, 2, 3 taks index in group will be run(inference). +*\version 1.0 +*/ +VIP_API +vip_status_e vip_run_group( + IN vip_group group, + IN vip_uint32_t num + ); + +/*! \brief. change PPU engine parameters. + change local size, global size, global offset and global scale. +*\return \ref vip_status_e +*\param network The network object should be changed. +*\param param PPU parameters +*\param index The index of PPU node, not used. please set to zero. +*\version 1.0 +*/ +VIP_API +vip_status_e vip_set_ppu_param( + IN vip_network network, + IN vip_ppu_param_t *param, + IN vip_uint32_t index + ); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/viplite-tina/lib/glibc-gcc10_2_0/v1.13/inc/vip_lite_common.h b/viplite-tina/lib/glibc-gcc10_2_0/v1.13/inc/vip_lite_common.h new file mode 100644 index 0000000..ea0977c --- /dev/null +++ b/viplite-tina/lib/glibc-gcc10_2_0/v1.13/inc/vip_lite_common.h @@ -0,0 +1,236 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2017 - 2022 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2017 - 2022 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + +#ifndef _VIP_COMMON_H +#define _VIP_COMMON_H + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + *\brief The VIP lite API for Convolution Neural Network application on CPU/MCU/DSP type of embedded environment. + *\details This VIP lite APIs is not thread-safe if vpmdENABLE_MULTIPLE_TASK is set to 0, + user must guarantee to call these APIs in a proper way. + But defines vpmdENABLE_MULTIPLE_TASK 1, VIPLite can support multiple task(multiple thread/process). + and it's thread-safe. + *Memory allocation and file io functions used inside driver internal would depend on working enviroment. + *\defgroup group_global Data Type Definitions and Global APIs + *\brief Data type definition and global APIs that are used in the VIP lite + *\defgroup group_buffer Buffer API + *\brief The API to manage input/output buffers + *\defgroup group_network Network API + *\brief The API to manage networks + */ + +/*! \brief An 8-bit unsigned value. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned char vip_uint8_t; + +/*! \brief An 16-bit unsigned value. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned short vip_uint16_t; + +/*! \brief An 32-bit unsigned value. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned int vip_uint32_t; + +/*! \brief An 64-bit unsigned value. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned long long vip_uint64_t; + +/*! \brief An 8-bit signed value. + * \ingroup group_global + * \version 1.0 + */ +typedef signed char vip_int8_t; + +/*! \brief An 16-bit signed value. + * \ingroup group_global + * \version 1.0 + */ +typedef signed short vip_int16_t; + +/*! \brief An 32-bit signed value. + * \ingroup group_global + * \version 1.0 + */ +typedef signed int vip_int32_t; + +/*! \brief An 64-bit signed value. + * \ingroup group_global + * \version 1.0 + */ +typedef signed long long vip_int64_t; + +/*! \brief An 8 bit ASCII character. + * \ingroup group_global + * \version 1.0 + */ +typedef char vip_char_t; + +/*! \brief An 32 bit float value. + * \ingroup group_global + * \version 1.0 + */ +typedef float vip_float_t; + +/*! \brief Sets the standard enumeration type size to be a fixed quantity. + * \ingroup group_global + * \version 1.0 + */ +typedef vip_int32_t vip_enum; + +/*! \brief a void pointer. + * \ingroup group_global + * \version 1.0 + */ +typedef void* vip_ptr; + +/*! \brief A 64-bit float value (aka double). + * \ingroup group_basic_features + */ +typedef double vip_float64_t; + +/*! \brief address type. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned long long vip_address_t; + +/*! \brief A zero value for pointer + *\ingroup group_global + *\version 1.0 + */ +#ifndef VIP_NULL +#define VIP_NULL 0 +#endif + +/***** Helper Macros. *****/ +#define VIP_API + +#define IN +#define OUT + +/*! \brief A invalid value if a property is not avaialbe for the query. + *\ingroup group_global + *\version 1.0 + */ +#define VIP_INVALID_VALUE ~0UL + +/*! \brief A Boolean value. + *\details This allows 0 to be FALSE, as it is in C, and any non-zero to be TRUE. + *\ingroup group_global + *\version 1.0 + */ +typedef enum _vip_bool_e { + /*! \brief The "false" value. */ + vip_false_e = 0, + /*! \brief The "true" value. */ + vip_true_e, +} vip_bool_e; + +/*! \brief The enumeration of all status codes. + * \ingroup group_global + * \version 1.0 + */ +typedef enum _vip_status +{ + /*!< \brief Indicates the network is canceld */ + VIP_ERROR_CANCELED = -15, + /*!< \brief Indicates the hardware is recovery done after hang */ + VIP_ERROR_RECOVERY = -14, + /*!< \brief Indicates the hardware is stoed */ + VIP_ERROR_POWER_STOP = -13, + /*!< \brief Indicates the hardware is in power off status */ + VIP_ERROR_POWER_OFF = -12, + /*!< \brief Indicates the failure */ + VIP_ERROR_FAILURE = -11, + /*!< \brief Indicates the binary is not compatible with the current runtime hardware */ + VIP_ERROR_NETWORK_INCOMPATIBLE = -10, + /*!< \brief Indicates the network is not prepared so current function call can't go through */ + VIP_ERROR_NETWORK_NOT_PREPARED = -9, + /*!< \brief Indicates the network misses either input or output when running the network */ + VIP_ERROR_MISSING_INPUT_OUTPUT = -8, + /*!< \brief Indicates the network binary is invalid */ + VIP_ERROR_INVALID_NETWORK = -7, + /*!< \brief Indicates driver is running out of memory of system */ + VIP_ERROR_OUT_OF_MEMORY = -6, + /*!< \brief Indicates there is no enough resource */ + VIP_ERROR_OUT_OF_RESOURCE = -5, + /*!< \brief Indicates it's supported by driver implementation */ + VIP_ERROR_NOT_SUPPORTED = -4, + /*!< \brief Indicates some arguments are not valid */ + VIP_ERROR_INVALID_ARGUMENTS = -3, + /*!< \brief Indicates there are some IO related error */ + VIP_ERROR_IO = -2, + /*!< \brief Indicates VIP timeout, could be VIP stuck somewhere */ + VIP_ERROR_TIMEOUT = -1, + /*!< \brief Indicates the execution is successfuly */ + VIP_SUCCESS = 0, +} vip_status_e; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/viplite-tina/lib/glibc-gcc10_2_0/v1.13/libVIPlite.so b/viplite-tina/lib/glibc-gcc10_2_0/v1.13/libVIPlite.so new file mode 100644 index 0000000..0214485 Binary files /dev/null and b/viplite-tina/lib/glibc-gcc10_2_0/v1.13/libVIPlite.so differ diff --git a/viplite-tina/lib/glibc-gcc10_2_0/v1.13/libVIPuser.so b/viplite-tina/lib/glibc-gcc10_2_0/v1.13/libVIPuser.so new file mode 100644 index 0000000..840a322 Binary files /dev/null and b/viplite-tina/lib/glibc-gcc10_2_0/v1.13/libVIPuser.so differ diff --git a/viplite-tina/lib/glibc-gcc11_3_0/v1.13/inc/vip_lite.h b/viplite-tina/lib/glibc-gcc11_3_0/v1.13/inc/vip_lite.h new file mode 100644 index 0000000..4ef809f --- /dev/null +++ b/viplite-tina/lib/glibc-gcc11_3_0/v1.13/inc/vip_lite.h @@ -0,0 +1,931 @@ +/******************************************************************************\ +|* Copyright (c) 2017-2022 by Vivante Corporation. All Rights Reserved. *| +|* *| +|* The material in this file is confidential and contains trade secrets of *| +|* of Vivante Corporation. This is proprietary information owned by Vivante *| +|* Corporation. No part of this work may be disclosed, reproduced, copied, *| +|* transmitted, or used in any way for any purpose, without the express *| +|* written permission of Vivante Corporation. *| +|* *| +\******************************************************************************/ + +#ifndef _VIP_LITE_H +#define _VIP_LITE_H + +#include "vip_lite_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + *\brief The VIP lite API for Convolution Neural Network application on CPU/MCU/DSP type of embedded environment. + *\details This VIP lite APIs is not thread-safe if vpmdENABLE_MULTIPLE_TASK is set to 0, + user must guarantee to call these APIs in a proper way. + But defines vpmdENABLE_MULTIPLE_TASK 1, VIPLite can support multiple task(multiple thread/process). + and it's thread-safe. + *Memory allocation and file io functions used inside driver internal would depend on working enviroment. + *\defgroup group_global Data Type Definitions and Global APIs + *\brief Data type definition and global APIs that are used in the VIP lite + *\defgroup group_buffer Buffer API + *\brief The API to manage input/output buffers + *\defgroup group_network Network API + *\brief The API to manage networks + */ + +/* !\brief The data format list for buffer + * \ingroup group_buffer + * \version 2.0 + */ +typedef enum _vip_buffer_format_e +{ + /*! \brief A float type of buffer data */ + VIP_BUFFER_FORMAT_FP32 = 0, + /*! \brief A half float type of buffer data */ + VIP_BUFFER_FORMAT_FP16 = 1, + /*! \brief A 8 bit unsigned integer type of buffer data */ + VIP_BUFFER_FORMAT_UINT8 = 2, + /*! \brief A 8 bit signed integer type of buffer data */ + VIP_BUFFER_FORMAT_INT8 = 3, + /*! \brief A 16 bit unsigned integer type of buffer data */ + VIP_BUFFER_FORMAT_UINT16 = 4, + /*! \brief A 16 signed integer type of buffer data */ + VIP_BUFFER_FORMAT_INT16 = 5, + /*! \brief A char type of data */ + VIP_BUFFER_FORMAT_CHAR = 6, + /*! \brief A bfloat 16 type of data */ + VIP_BUFFER_FORMAT_BFP16 = 7, + /*! \brief A 32 bit integer type of data */ + VIP_BUFFER_FORMAT_INT32 = 8, + /*! \brief A 32 bit unsigned signed integer type of buffer */ + VIP_BUFFER_FORMAT_UINT32 = 9, + /*! \brief A 64 bit signed integer type of data */ + VIP_BUFFER_FORMAT_INT64 = 10, + /*! \brief A 64 bit unsigned integer type of data */ + VIP_BUFFER_FORMAT_UINT64 = 11, + /*! \brief A 64 bit float type of buffer data */ + VIP_BUFFER_FORMAT_FP64 = 12, + /*! \brief A signed 4bits tensor */ + VIP_BUFFER_FORMAT_INT4 = 13, + /*! \brief A unsigned 4bits tensor */ + VIP_BUFFER_FORMAT_UINT4 = 14, +} vip_buffer_format_e; + +/* !\brief The quantization format list for buffer data + * \ingroup group_buffer + * \version 1.0 + */ +typedef enum _vip_buffer_quantize_format_e +{ + /*! \brief Not quantized format */ + VIP_BUFFER_QUANTIZE_NONE = 0, + /*! \brief A quantization data type which specifies the fixed point position for whole tensor. */ + VIP_BUFFER_QUANTIZE_DYNAMIC_FIXED_POINT = 1, + /*! \brief A quantization data type which has scale value and zero point to match with TF and + Android NN API for whole tensor. */ + VIP_BUFFER_QUANTIZE_TF_ASYMM = 2, + /*! \brief A max vaule support quantize format */ + VIP_BUFFER_QUANTIZE_MAX, +} vip_buffer_quantize_format_e; + +/* !\brief The memory type for vip buffer + * \ingroup group_buffer + * \version 1.2.2 + */ +typedef enum _vip_buffer_memory_type_e +{ + /*! \brief Not memory type. default memory type. + use for allocate video memory from driver calling vip_create_buffer. + */ + VIP_BUFFER_MEMORY_TYPE_DEFAULT = 0x000, + /*! \brief Create a VIP buffer from the Host (logical, physical). */ + VIP_BUFFER_MEMORY_TYPE_HOST = 0x001, + /*! \brief Create a secure memory, sucure mode is not thread safe. */ + VIP_BUFFER_MEMORY_TYPE_SECURE = 0x002, + /*! \brief Create a VIP buffer from DMA_BUF */ + VIP_BUFFER_MEMORY_TYPE_DMA_BUF = 0x003, + /*! \brief The max memory type */ + VIP_BUFFER_MEMORY_TYPE_MAX, +} vip_buffer_memory_type_e; + +/* \brief The list of create network type + * \ingroup group_network + * \version 1.0 + */ +typedef enum _vip_create_network_type_e +{ + /*!< \brief NONE */ + VIP_CREATE_NETWORK_FROM_NONE = 0x00, + /*!< \brief Create network from a file path */ + VIP_CREATE_NETWORK_FROM_FILE = 0x01, + /*!< \brief Create network from buffer, NBG has been loaded in this buffer before */ + VIP_CREATE_NETWORK_FROM_MEMORY = 0x02, + /*!< \brief Create network from flash */ + VIP_CREATE_NETWORK_FROM_FLASH = 0x04, + + /* network work mode is not thread safe */ + /*!< \brief specify network work in Normal mode when creating network */ + VIP_CREATE_NETWORK_MODE_NORMAL = 0x10, + /*!< \brief specify network work in Secure mode when creating network */ + VIP_CREATE_NETWORK_MODE_SECURE = 0x20, + VIP_CREATE_NETWORK_MAX, +} vip_create_network_type_e; + +/* \brief An enumeration property that specifies which power management operation to execute. + * \ingroup group_network + * \version 1.2 + */ +typedef enum _vip_power_property_e +{ + VIP_POWER_PROPERTY_NONE = 0x0000, + /*!< \brief specify the VIP frequency */ + VIP_POWER_PROPERTY_SET_FREQUENCY = 0x0001, + /*!< \brief power off VIP hardware */ + VIP_POWER_PROPERTY_OFF = 0x0002, + /*!< \brief power on VIP hardware */ + VIP_POWER_PROPERTY_ON = 0x0004, + /*!< \brief stop VIP perform network */ + VIP_POWER_PROPERTY_STOP = 0x0008, + /*!< \brief start VIP perform network */ + VIP_POWER_PROPERTY_START = 0x0010, + VIP_POWER_PROPERTY_MAX +} vip_power_property_e; + +/* \brief query hardware caps property + */ +typedef enum _vip_query_hardware_property_e +{ + /*!< \brief the customer ID of this VIP/NPU, the returned value is vip_uint32_t */ + VIP_QUERY_HW_PROP_CID = 0, + /*!< \brief the number of deivce, the returned value is vip_uint32_t */ + VIP_QUERY_HW_PROP_DEVICE_COUNT = 1, + /*!< \brief the number of core count for each device, the returned value is vip_uint32_t * device_count */ + VIP_QUERY_HW_PROP_CORE_COUNT_EACH_DEVICE = 2, + VIP_QUERY_HW_PROP_MAX, +} vip_query_hardware_property_e; + +/* \brief The list of properties of a network. + * \ingroup group_network + * \version 1.0 + */ +typedef enum _vip_network_property_e +{ + /* query network */ + /*!< \brief The number of layers in this network, the returned value is vip_uint32_t */ + VIP_NETWORK_PROP_LAYER_COUNT = 0, + /*!< \brief The number of input in this network, the returned value is vip_uint32_t*/ + VIP_NETWORK_PROP_INPUT_COUNT = 1, + /*!< \brief The number of output in this network, the returned value is vip_uint32_t*/ + VIP_NETWORK_PROP_OUTPUT_COUNT = 2, + /*!< \brief The network name, the returned value is vip_char_t[64] */ + VIP_NETWORK_PROP_NETWORK_NAME = 3, + /*!< \brief address information of wait-link, command, input-output buffers for viplite-Agent trigger, + not used if only use viplite. the returned value is \ref vip_address_info_t + */ + VIP_NETWORK_PROP_ADDRESS_INFO = 4, + /*!< \brief read interruput irq register value for supporting viplite-Broker + not used if only use viplite. the returned value is vip_uint32_t*/ + VIP_NETWORK_PROP_READ_REG_IRQ = 5, + /*!< \brief The size of memory pool, the returned value is vip_uint32_t*/ + VIP_NETWORK_PROP_MEMORY_POOL_SIZE = 6, + + /*!< \brief The network profling data, the returned value is vip_inference_profile_t */ + VIP_NETWORK_PROP_PROFILING = 7, + + /*!< \brief The the number of core for this network, the returned value is vip_uint8_t */ + VIP_NETWORK_PROP_CORE_COUNT = 8, + + /* set network */ + /* set network property should be called before vip_prepare_network */ + /*!< \brief set network to enable change PPU parameters feature for this vip_network. + the vip_set_network value param used to indicates disable or enable this feature. + vip_uint32_t *value is 1, enable change ppu param. + vip_uint32_t *value is 0, disable change ppu param */ + VIP_NETWORK_PROP_CHANGE_PPU_PARAM = 64, + /*!< \brief set memory pool buffer for network. networks can share a memory pool buffer. + the set value is \ref vip_buffer */ + VIP_NETWORK_PROP_SET_MEMORY_POOL = 65, + /*!< \brief set device id for network. networks can be submitted this vip device. */ + VIP_NETWORK_PROP_SET_DEVICE_ID = 66, + /*!< \brief set priority of network. 0 ~ 255, 0 indicates the lowest priority. */ + VIP_NETWORK_PROP_SET_PRIORITY = 67, + /*!< \brief set time out of network. unit: ms */ + VIP_NETWORK_PROP_SET_TIME_OUT = 68, +} vip_network_property_e; + +/* \brief The list of properties of a group. + * \ingroup group_network + * \version 1.0 + */ +typedef enum _vip_group_property_e +{ + /* query group */ + /*!< \brief The group profling data, the returned value is vip_inference_profile_t */ + VIP_GROUP_PROP_PROFILING = 0, + + /* set group */ + /* set group property should be called before vip_add_network */ + /*!< \brief set device id for group. networks in group can be submitted this vip device. + * This prop should be called before vip_prepare_network */ + VIP_GROUP_PROP_SET_DEVICE_ID = 64, + /*!< \brief set time out of network in group. unit: ms */ + VIP_GROUP_PROP_SET_TIME_OUT = 68, +} vip_group_property_e; + +/* \brief The list of property of an input or output. + * \ingroup group_network + * \version 1.0 + */ +typedef enum _vip_buffer_property_e +{ + /*!< \brief The quantization format, the returned value is \ref vip_buffer_quantize_format_e */ + VIP_BUFFER_PROP_QUANT_FORMAT = 0, + /*!< \brief The number of dimension for this input, the returned value is vip_uint32_t */ + VIP_BUFFER_PROP_NUM_OF_DIMENSION = 1, + /*!< \brief The size of each dimension for this input, the returned value is vip_uint32_t * num_of_dim */ + VIP_BUFFER_PROP_SIZES_OF_DIMENSION = 2, + /*!< \brief The data format for this input, the returned value is \ref vip_buffer_format_e */ + VIP_BUFFER_PROP_DATA_FORMAT = 3, + /*!< \brief The position of fixed point for dynamic fixed point, the returned value is vip_uint8_t */ + VIP_BUFFER_PROP_FIXED_POINT_POS = 4, + /*!< \brief The scale value for TF quantization format, the returned value is vip_float_t */ + VIP_BUFFER_PROP_TF_SCALE = 5, + /*!< \brief The zero point for TF quantization format, the returned value is vip_uint32_t */ + VIP_BUFFER_PROP_TF_ZERO_POINT = 6, + /*!< \brief The name for network's inputs and outputs, the returned value is vip_char_t[64] */ + VIP_BUFFER_PROP_NAME = 7, +} vip_buffer_property_e; + +/* \brief The list of property of operation vip_buffer type. + * \ingroup group_network + * \version 1.3 + */ +typedef enum _vip_buffer_operation_type_e +{ + /*!< \brief None operation */ + VIP_BUFFER_OPER_TYPE_NONE = 0, + /*!< \brief Flush the vip buffer */ + VIP_BUFFER_OPER_TYPE_FLUSH = 1, + /*!< \brief invalidate the vip buffer */ + VIP_BUFFER_OPER_TYPE_INVALIDATE = 2, + VIP_BUFFER_OPER_TYPE_MAX, +} vip_buffer_operation_type_e; + +typedef struct _vip_network *vip_network; +typedef struct _vip_buffer *vip_buffer; +typedef struct _vip_group *vip_group; + + +/*! \brief Input parameter for vip_create_buffer + */ +typedef struct _vip_buffer_create_params_t +{ + /*!< \brief The number of dimensions specified in *sizes*/ + vip_uint32_t num_of_dims; + /*!< \brief The pointer to an array of dimension */ + vip_uint32_t sizes[6]; + /*!< \brief Data format for the tensor, see \ref vip_buffer_format_e */ + vip_enum data_format; + /*!< \brief Quantized format see \ref vip_buffer_quantize_format_e . */ + vip_enum quant_format; + /*\ref vip_uint32_t + */ +VIP_API +vip_uint32_t vip_get_version( + void + ); + +/*! \brief Initial VIP Hardware, VIP lite software environment and power on VIP hardware. + * \details when vpmdENABLE_MULTIPLE_TASK set to 0, + This function should be only called once before using VIP hardware if. + when vpmdENABLE_MULTIPLE_TASK set to 1, + vip_init can be called multiple times, but should paired with vip_destroy. + vip_init should be called in every process. + only need call vip_init once in multi-thread. + * VIP lite driver would construct some global variable for this call.Also + * it will reset VIP and initialize VIP hardware to a ready state to accept jobs. + * \return \ref vip_status_e + * \ingroup group_global + * \version 1.0 + */ +VIP_API +vip_status_e vip_init( + void + ); + +/*! \brief Terminate VIP lite driver and shut down VIP hardware. + * \details This function should be the last function called by application. + vip_destroy should paired with vip_init called. + * After it, no VIP lite API should be called except \ref vip_init + * \return \ref vip_status_e + * \ingroup group_global + * \version 1.0 + * \notes vip_destroy should be called in the same thread as vip_init. + */ +VIP_API +vip_status_e vip_destroy( + void + ); + +/*! \brief Queries hardware caps information. This function shold be called after calling vip_init. +*\param property, the query property enum. +*\param size, the size of value buffer. +*\param value, the value buffer of returns. +*/ +VIP_API +vip_status_e vip_query_hardware( + IN vip_query_hardware_property_e property, + IN vip_uint32_t size, + OUT void *value + ); + +/*! \brief Create a input or output buffer with specified parameters. + *\details The buffer object always takes [w, h, c, n] order, + there is no padding/hole between lines/slices/batches. + *\param [in] create_param The pointer to \ref vip_buffer_create_params_t structure. + *\param [in] size_of_param The size of create_param pointer. + *\param [out] buffer An opaque handle for the new buffer object if the request is executed successfully. + *\return \ref vip_status_e + *\ingroup group_buffer + *\version 1.0 + */ +VIP_API +vip_status_e vip_create_buffer( + IN vip_buffer_create_params_t *create_param, + IN vip_uint32_t size_of_param, + OUT vip_buffer *buffer + ); + +/*! \brief Create a buffer from user contiguous or scatter non-contiguous physical address. + the vip_buffer created by this APi doesn't support flush CPU cache in driver. + So the physical memory should be a non-cache buffer or flush CPU on Host control. + not map user space logical on Linux. + *\param [in] create_param The pointer to \ref vip_buffer_create_params_t structure. + *\param [in] physical_table Physical address table. should be wraped for VIP hardware. + *\param [in] size_table The size of physical memory for each physical_table element. + *\param [in] physical_num The number of physical table element. + physical_num is 1 when create buffer from contiguous phyiscal. + *\param [out] buffer. vip lite buffer object. + *\return \ref vip_status_e +*/ +VIP_API +vip_status_e vip_create_buffer_from_physical( + IN const vip_buffer_create_params_t *create_param, + IN const vip_address_t *physical_table, + IN const vip_uint32_t *size_table, + IN vip_uint32_t physical_num, + OUT vip_buffer *buffer + ); + +/*! \brief Create a vip buffer with specified parameters. + The vip_buffer can be used to input, output, memory pool and so on. + NOTE: driver will operation CPU cache when call vip_flush_buffer API. + application should call vip_flush_buffer API if the memory handle have CPU cache. + after write data into this buffer, APP should call vip_flush_buffer(VIP_BUFFER_OPER_TYPE_FLUSH) + before CPU read date from this buffer. APP should call vip_flush_buffer(VIP_BUFFER_OPER_TYPE_INVALIDATE) + *\ when MMU disabled, create buffer from a contiguous physical memory. + *\ when MMU enabled, create buffer from a contiguous physical memory or + logical address(convert to physical in kenrel pace). + *\details The buffer object always takes [w, h, c, n] order, + there is no padding/hole between lines/slices/batches. + *\param [in] create_param The pointer to \ref vip_buffer_create_params_t structure. + *\param [in] handle_logical The logical address of the handle. + create vip buffer from the logical address. + *\param [in] the handle_size should be aligned to 64byte(vpmdCPU_CACHE_LINE_SIZE) for easy flash CPU cache. + *\param [out] buffer. vip lite buffer object. + *\return \ref vip_status_e + *\ingroup group_buffer + *\version 1.1 + */ +VIP_API +vip_status_e vip_create_buffer_from_handle( + IN const vip_buffer_create_params_t *create_param, + IN const vip_ptr handle_logical, + IN vip_uint32_t handle_size, + OUT vip_buffer *buffer + ); + +/*! \brief Create a vip buffer from user fd(file descriptor). + only support create buffer from dma-buf on Linux. + the vip_buffer created by this APi doesn't support flush CPU cache in driver. + So the dma-buf should be a non-cache buffer or flush CPU on Host control. + *\param [in] create_param The pointer to \ref vip_buffer_create_params_t structure. + *\param [in] fd user memory file descriptor. + *\param [in] memory_size The size of user memory. + the handle_size should be aligned to 64byte(vpmdCPU_CACHE_LINE_SIZE) for easy flash CPU cache. + *\param [out] buffer. vip lite buffer object. + *\return \ref vip_status_e +*/ +VIP_API +vip_status_e vip_create_buffer_from_fd( + IN const vip_buffer_create_params_t *create_param, + IN vip_uint32_t fd, + IN vip_uint32_t memory_size, + OUT vip_buffer *buffer + ); + +/*! \brief Destroy a buffer object which was created before. + *\param [in] buffer The opaque handle of buffer to be destroyed. + *\return \ref vip_status_e + *\ingroup group_buffer + *\version 1.0 + */ +VIP_API +vip_status_e vip_destroy_buffer( + IN vip_buffer buffer + ); + +/*! \brief Map a buffer to get the CPU accessible address for read or write + *\param [in] buffer The handle of buffer to be mapped. + *\return A pointer that application can use to read or write the buffer data. + *\ingroup group_buffer + *\version 1.0 + */ +VIP_API +void * vip_map_buffer( + IN vip_buffer buffer + ); + +/*! \brief Unmap a buffer which was mapped before. + *\param [in] buffer The handle of buffer to be unmapped. + *\return \ref vip_status_e + *\ingroup group_buffer + *\version 1.0 + */ +VIP_API +vip_status_e vip_unmap_buffer( + IN vip_buffer buffer + ); + +/*! \brief Get the size of bytes allocated for the buffer. + *\param [in] buffer The handle of buffer to be queried. + *\return \ref the size of bytes + *\ingroup group_buffer + *\version 1.0 + */ +VIP_API +vip_uint32_t vip_get_buffer_size( + IN vip_buffer buffer + ); + +/*! \brief operation the vip buffer CPU chace. flush, invalidate cache. + You should call vip_flush_buffer to flush buffer for input. + and invalidate buffer for network's output if these memories with CPU cache. +*\param buffer The vip buffer object. +*\param the type of this operation. see vip_buffer_operation_type_e. +*/ +VIP_API +vip_status_e vip_flush_buffer( + IN vip_buffer buffer, + IN vip_buffer_operation_type_e type + ); + +/*! \brief Create a network object from the given binary data. + *\details The binary is generated by the binary graph generator and it's a blob binary. + *\VIP lite Driver could interprete it to create a network object. + *\param [in] data The pointer to the binary graph. it can be a file path or a memory pointer, depending on type. + *\param [in] size_of_data The byte size of data object. the byte size of NBG buffer. + You can ignore it if create network form fil path. + *\param [in] type how to create a network object. please refer to vip_create_network_type_e enum. + *\param [out] network An opaque handle to the new network object if the request is executed successfully + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_create_network( + IN const void *data, + IN vip_uint32_t size_of_data, + IN vip_enum type, + OUT vip_network *network + ); + +/*! \brief Destroy a network object + *\details Release all resources allocated for this network. + *\param [in] network The opaque handle to the network to be destroyed + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_destroy_network( + IN vip_network network + ); + +/*! \brief Configure network property. configure network. this API should be called before calling vip_prepare_network. + *\details Configure network's layer inputs/outputs information + *\param [in] network A property \ref vip_network_property_e to be configuied. + *\return \ref vip_status_e + */ +VIP_API +vip_status_e vip_set_network( + IN vip_network network, + IN vip_enum property, + IN void *value + ); + +/*! \brief Query a property of the network object. + *\details User can use this API to get any properties from a network. + *\param [in] network The opaque handle to the network to be queried + *\param [in] property A property \ref vip_network_property_e to be queried. + *\param [out] value A pointer to memory to store the return value, + different property could return different type/size of value. + * please see comment of \ref vip_network_property_e for detail. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_query_network( + IN vip_network network, + IN vip_enum property, + OUT void *value + ); + +/*! \brief Prepare a network to run on VIP. + *\details This function only need to be called once to prepare a network and make it ready to execute on VIP hardware. + * It would do all heavy-duty work, including allocate internal memory resource for this network, + deploy all operation's resource + * to internal memory pool, allocate/generate command buffer for this network, + patch command buffer for the resource in the internal memory + * allocations. If this function is called more than once, driver will silently ignore it. + If this function is executed successfully, this network is prepared. + *\param [in] network The opaque handle to the network which need to be prepared. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_prepare_network( + IN vip_network network + ); + +/*! \brief Query a property of a specific input of a given network. + *\details The specified input/property/network must be valid, otherwise VIP_ERROR_INVALID_ARGUMENTS will be returned. + *\param [in] network The opaque handle to the network to be queried + *\param [in] index Specify which input to be queried in case there are multiple inputs in the network + *\param [in] property Specify which property application wants to know, see \ref vip_buffer_property_e + *\param [out] value Returned value, the details type/size, please refer to the comment of \ref vip_input_property_e + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_query_input( + IN vip_network network, + IN vip_uint32_t index, + IN vip_enum property, + OUT void *value + ); + +/*! \brief Query a property of a specific output of a given network. + *\details The specified output/property/network must be valid, otherwise VIP_ERROR_INVALID_ARGUMENTS will be returned. + *\param [in] network The opaque handle to the network to be queried + *\param [in] index Specify which output to be queried in case there are multiple outputs in the network + *\param [in] property Specify which property application wants to know, see \ref vip_buffer_property_e + *\param [out] value Returned value, the details type/size, please refer to the comment of \ref vip_input_property_e + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_query_output( + IN vip_network network, + IN vip_uint32_t index, + IN vip_enum property, + OUT void *value + ); + +/*! \brief Attach an input buffer to the specified index of the network. + *\details All the inputs of the network need to be attached to a valid input buffer before running a network, otherwise + * VIP_ERROR_MISSING_INPUT_OUTPUT will be returned when calling \ref vip_run_network . + When attaching an input buffer + * to the network, driver would patch the network command buffer to fill in this input buffer address. + This function could be called + * multiple times to let application update the input buffers before next network execution. + The network must be prepared by \ref vip_prepare_network before + * attaching an input. + *\param [in] network The opaque handle to a network which we want to attach an input buffer + *\param [in] index The index specify which input in the network will be set + *\param [in] input The opaque handle to a buffer which will be attached to the network. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_set_input( + IN vip_network network, + IN vip_uint32_t index, + IN vip_buffer input + ); + +/*! \brief Attach an output buffer to the specified index of the network. + *\details All the outputs of the network need to be attached to a valid output buffer before running a network, otherwise + * VIP_ERROR_MISSING_INPUT_OUTPUT will be returned when calling \ref vip_run_network . + When attaching an output buffer + * to the network, driver would patch the network command buffer to fill in this output buffer address. + This function could be called + * multiple times to let application update the output buffers before next network execution. + The network must be prepared by \ref vip_prepare_network before + * attaching an output. + *\param [in] network The opaque handle to a network which we want to attach an output buffer + *\param [in] index The index specify which output in the network will be set + *\param [in] output The opaque handle to a buffer which will be attached to the network. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_set_output( + IN vip_network network, + IN vip_uint32_t index, + IN vip_buffer output + ); + +/*! \brief. Kick off the network execution and send command buffer of this network to VIP hardware. + *\details This function can be called multiple times. + Every time it's called it would do inference with current attached + * input buffers and output buffers. It would return until VIP finish the execution. + If the network is not ready to execute + * for some reason like not be prepared by \ref vip_prepare_network , + it would fail with status reported. + *\param [in] network The opaque handle to the network to be executed. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_run_network( + IN vip_network network + ); + +/*! \brief Finish using this network to do inference. + *\details This function is paired with \ref vip_prepare_network . + It's suggested to be called once after \ref vip_prepare_network called. + * If it's called more than that, it will be silently ignored. + If the network is not prepared but finished is called, it's silently ignored too. + * This function would release all internal memory allocations which are allocated when + the network is prepared. Since the preparation of network takes much time, + * it is suggested that if the network will be still used later, application should not + finish the network unless there is no much system resource remained for other + * networks. The network object is still alive unitl it's destroyed by \ref vip_destroy_network . + *\param [in] network The opaque handle to the network which will be finished. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_finish_network( + IN vip_network network + ); + +/*! \brief. Kick off the network execution and send command buffer of this network to VIP hardware. +*\details This function is similar to \ref vip_run_network except that it returns + immediately without waiting for HW to complete the commands. +*\param [in] network The opaque handle to the network to be executed. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_trigger_network( + IN vip_network network + ); + +/*! \brief. Run tasks in group,these tasks is added by vip_add_network. + The order of executuion of tasks is call vip_add_network. +*\details This function is similar to \ref vip_run_group except that it returns + immediately without waiting for HW to complete the commands. +*\return \ref vip_status_e +*\param group vip_group object +*\param the number of task will be run. + eg: num is 4, the 0, 1, 2, 3 taks index in group will be run(inference). +*\version 1.0 +*/ +VIP_API +vip_status_e vip_trigger_group( + IN vip_group group, + IN vip_uint32_t num + ); + +/*! \brief. Explicitly wait for HW to finish executing the submitted commands. +*\details This function waits for HW to complete the commands. + This should be called once CPU needs to access the network currently being run. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_wait_network( + IN vip_network network + ); + +/*! \brief. Explicitly wait for HW to finish executing the submitted task in group. +*\details This function waits for HW to complete the submitted commands in group. + This should be called once CPU needs to access the group currently being run. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_wait_group( + IN vip_group group + ); + +/*! \brief. Cancle network running on vip hardware after network is commited. +*\details This function is cancel network running on vip hardware. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_cancel_network( + IN vip_network network + ); + +/*! \brief. give user applications more control over power management for VIP cores. +*\details. control VIP core frequency and power status by property. see vip_power_property_e. +*\param ID of the managed device. +*\param perperty Control VIP core frequency and power status by property. see vip_power_property_e. +*\param value The value for vip_power_property_e property. + Please see vip_power_frequency_t if property is setting to VIP_POWER_PROPERTY_SET_FREQUENCY. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_power_management( + IN vip_uint32_t device_id, + IN vip_power_property_e property, + IN void *value + ); + +/*! \brief. Create a vip_group object to run multiple tasks(network or node) + and without interrupt between each task. +*\return \ref vip_status_e +*\param count The maximum number of tasks supports by this group. +*\param group Return vip_group object be created. +*\version 1.0 +*/ +VIP_API +vip_status_e vip_create_group( + IN vip_uint32_t count, + OUT vip_group *group + ); + +/*! \brief. Destroy group object which created by vip_create_group. +*\return \ref vip_status_e +*\param group vip_group object/ +*\version 1.0 +*/ +VIP_API +vip_status_e vip_destroy_group( + IN vip_group group + ); + +/* +@brief set group property. configure group. this API should be called before calling vip_run_group. +@param group The group object which created by vip_create_group(). +@param property The property be set. see vip_group_property_e. +@param value The set data. +*/ +VIP_API +vip_status_e vip_set_group( + IN vip_group group, + IN vip_enum property, + IN void *value + ); + +/*! \brief Query a property of the group object. + *\param [in] group The group object which created by vip_create_group(). + *\param [in] property A property \ref vip_group_property_e to be queried. + *\param [out] value A pointer to memory to store the return value, + different property could return different type/size of value. + * please see comment of \ref vip_group_property_e for detail. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_query_group( + IN vip_group group, + IN vip_enum property, + OUT void *value + ); + +/*! \brief. add a vip_network object into group. +*\return \ref vip_status_e +*\param group vip_group object, network be added into group. +*\param network vip_network added into group. +*\version 1.0 +*/ +VIP_API +vip_status_e vip_add_network( + IN vip_group group, + IN vip_network network + ); + +/*! \brief Weak dup a vip_network object. + The weak dup netowrk copy new command buffer. and share coefficient data with original network. + The original network can't be destroy if the weak dup network is running or will be run later. +*\param network vip_network to dup. +*\param dup_network output vip_network. +*\return \ref vip_status_e +*\version 1.0 +*/ +VIP_API +vip_status_e vip_weak_dup_network( + IN vip_network network, + OUT vip_network *dup_network + ); + +/*! \brief. run tasks in group. only issue a interrupt after tasks complete. + These tasks is added by vip_add_network. + The order of executuion of tasks is call vip_add_network. +*\return \ref vip_status_e +*\param group vip_group object +*\param the number of task will be run. + eg: num is 4, the 0, 1, 2, 3 taks index in group will be run(inference). +*\version 1.0 +*/ +VIP_API +vip_status_e vip_run_group( + IN vip_group group, + IN vip_uint32_t num + ); + +/*! \brief. change PPU engine parameters. + change local size, global size, global offset and global scale. +*\return \ref vip_status_e +*\param network The network object should be changed. +*\param param PPU parameters +*\param index The index of PPU node, not used. please set to zero. +*\version 1.0 +*/ +VIP_API +vip_status_e vip_set_ppu_param( + IN vip_network network, + IN vip_ppu_param_t *param, + IN vip_uint32_t index + ); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/viplite-tina/lib/glibc-gcc11_3_0/v1.13/inc/vip_lite_common.h b/viplite-tina/lib/glibc-gcc11_3_0/v1.13/inc/vip_lite_common.h new file mode 100644 index 0000000..ea0977c --- /dev/null +++ b/viplite-tina/lib/glibc-gcc11_3_0/v1.13/inc/vip_lite_common.h @@ -0,0 +1,236 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2017 - 2022 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2017 - 2022 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + +#ifndef _VIP_COMMON_H +#define _VIP_COMMON_H + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + *\brief The VIP lite API for Convolution Neural Network application on CPU/MCU/DSP type of embedded environment. + *\details This VIP lite APIs is not thread-safe if vpmdENABLE_MULTIPLE_TASK is set to 0, + user must guarantee to call these APIs in a proper way. + But defines vpmdENABLE_MULTIPLE_TASK 1, VIPLite can support multiple task(multiple thread/process). + and it's thread-safe. + *Memory allocation and file io functions used inside driver internal would depend on working enviroment. + *\defgroup group_global Data Type Definitions and Global APIs + *\brief Data type definition and global APIs that are used in the VIP lite + *\defgroup group_buffer Buffer API + *\brief The API to manage input/output buffers + *\defgroup group_network Network API + *\brief The API to manage networks + */ + +/*! \brief An 8-bit unsigned value. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned char vip_uint8_t; + +/*! \brief An 16-bit unsigned value. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned short vip_uint16_t; + +/*! \brief An 32-bit unsigned value. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned int vip_uint32_t; + +/*! \brief An 64-bit unsigned value. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned long long vip_uint64_t; + +/*! \brief An 8-bit signed value. + * \ingroup group_global + * \version 1.0 + */ +typedef signed char vip_int8_t; + +/*! \brief An 16-bit signed value. + * \ingroup group_global + * \version 1.0 + */ +typedef signed short vip_int16_t; + +/*! \brief An 32-bit signed value. + * \ingroup group_global + * \version 1.0 + */ +typedef signed int vip_int32_t; + +/*! \brief An 64-bit signed value. + * \ingroup group_global + * \version 1.0 + */ +typedef signed long long vip_int64_t; + +/*! \brief An 8 bit ASCII character. + * \ingroup group_global + * \version 1.0 + */ +typedef char vip_char_t; + +/*! \brief An 32 bit float value. + * \ingroup group_global + * \version 1.0 + */ +typedef float vip_float_t; + +/*! \brief Sets the standard enumeration type size to be a fixed quantity. + * \ingroup group_global + * \version 1.0 + */ +typedef vip_int32_t vip_enum; + +/*! \brief a void pointer. + * \ingroup group_global + * \version 1.0 + */ +typedef void* vip_ptr; + +/*! \brief A 64-bit float value (aka double). + * \ingroup group_basic_features + */ +typedef double vip_float64_t; + +/*! \brief address type. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned long long vip_address_t; + +/*! \brief A zero value for pointer + *\ingroup group_global + *\version 1.0 + */ +#ifndef VIP_NULL +#define VIP_NULL 0 +#endif + +/***** Helper Macros. *****/ +#define VIP_API + +#define IN +#define OUT + +/*! \brief A invalid value if a property is not avaialbe for the query. + *\ingroup group_global + *\version 1.0 + */ +#define VIP_INVALID_VALUE ~0UL + +/*! \brief A Boolean value. + *\details This allows 0 to be FALSE, as it is in C, and any non-zero to be TRUE. + *\ingroup group_global + *\version 1.0 + */ +typedef enum _vip_bool_e { + /*! \brief The "false" value. */ + vip_false_e = 0, + /*! \brief The "true" value. */ + vip_true_e, +} vip_bool_e; + +/*! \brief The enumeration of all status codes. + * \ingroup group_global + * \version 1.0 + */ +typedef enum _vip_status +{ + /*!< \brief Indicates the network is canceld */ + VIP_ERROR_CANCELED = -15, + /*!< \brief Indicates the hardware is recovery done after hang */ + VIP_ERROR_RECOVERY = -14, + /*!< \brief Indicates the hardware is stoed */ + VIP_ERROR_POWER_STOP = -13, + /*!< \brief Indicates the hardware is in power off status */ + VIP_ERROR_POWER_OFF = -12, + /*!< \brief Indicates the failure */ + VIP_ERROR_FAILURE = -11, + /*!< \brief Indicates the binary is not compatible with the current runtime hardware */ + VIP_ERROR_NETWORK_INCOMPATIBLE = -10, + /*!< \brief Indicates the network is not prepared so current function call can't go through */ + VIP_ERROR_NETWORK_NOT_PREPARED = -9, + /*!< \brief Indicates the network misses either input or output when running the network */ + VIP_ERROR_MISSING_INPUT_OUTPUT = -8, + /*!< \brief Indicates the network binary is invalid */ + VIP_ERROR_INVALID_NETWORK = -7, + /*!< \brief Indicates driver is running out of memory of system */ + VIP_ERROR_OUT_OF_MEMORY = -6, + /*!< \brief Indicates there is no enough resource */ + VIP_ERROR_OUT_OF_RESOURCE = -5, + /*!< \brief Indicates it's supported by driver implementation */ + VIP_ERROR_NOT_SUPPORTED = -4, + /*!< \brief Indicates some arguments are not valid */ + VIP_ERROR_INVALID_ARGUMENTS = -3, + /*!< \brief Indicates there are some IO related error */ + VIP_ERROR_IO = -2, + /*!< \brief Indicates VIP timeout, could be VIP stuck somewhere */ + VIP_ERROR_TIMEOUT = -1, + /*!< \brief Indicates the execution is successfuly */ + VIP_SUCCESS = 0, +} vip_status_e; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/viplite-tina/lib/glibc-gcc11_3_0/v1.13/libVIPlite.so b/viplite-tina/lib/glibc-gcc11_3_0/v1.13/libVIPlite.so new file mode 100644 index 0000000..4614c72 Binary files /dev/null and b/viplite-tina/lib/glibc-gcc11_3_0/v1.13/libVIPlite.so differ diff --git a/viplite-tina/lib/glibc-gcc11_3_0/v1.13/libVIPuser.so b/viplite-tina/lib/glibc-gcc11_3_0/v1.13/libVIPuser.so new file mode 100644 index 0000000..b35f54f Binary files /dev/null and b/viplite-tina/lib/glibc-gcc11_3_0/v1.13/libVIPuser.so differ diff --git a/viplite-tina/lib/glibc-gcc13_2_0/v1.13/inc/vip_lite.h b/viplite-tina/lib/glibc-gcc13_2_0/v1.13/inc/vip_lite.h new file mode 100644 index 0000000..4ef809f --- /dev/null +++ b/viplite-tina/lib/glibc-gcc13_2_0/v1.13/inc/vip_lite.h @@ -0,0 +1,931 @@ +/******************************************************************************\ +|* Copyright (c) 2017-2022 by Vivante Corporation. All Rights Reserved. *| +|* *| +|* The material in this file is confidential and contains trade secrets of *| +|* of Vivante Corporation. This is proprietary information owned by Vivante *| +|* Corporation. No part of this work may be disclosed, reproduced, copied, *| +|* transmitted, or used in any way for any purpose, without the express *| +|* written permission of Vivante Corporation. *| +|* *| +\******************************************************************************/ + +#ifndef _VIP_LITE_H +#define _VIP_LITE_H + +#include "vip_lite_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + *\brief The VIP lite API for Convolution Neural Network application on CPU/MCU/DSP type of embedded environment. + *\details This VIP lite APIs is not thread-safe if vpmdENABLE_MULTIPLE_TASK is set to 0, + user must guarantee to call these APIs in a proper way. + But defines vpmdENABLE_MULTIPLE_TASK 1, VIPLite can support multiple task(multiple thread/process). + and it's thread-safe. + *Memory allocation and file io functions used inside driver internal would depend on working enviroment. + *\defgroup group_global Data Type Definitions and Global APIs + *\brief Data type definition and global APIs that are used in the VIP lite + *\defgroup group_buffer Buffer API + *\brief The API to manage input/output buffers + *\defgroup group_network Network API + *\brief The API to manage networks + */ + +/* !\brief The data format list for buffer + * \ingroup group_buffer + * \version 2.0 + */ +typedef enum _vip_buffer_format_e +{ + /*! \brief A float type of buffer data */ + VIP_BUFFER_FORMAT_FP32 = 0, + /*! \brief A half float type of buffer data */ + VIP_BUFFER_FORMAT_FP16 = 1, + /*! \brief A 8 bit unsigned integer type of buffer data */ + VIP_BUFFER_FORMAT_UINT8 = 2, + /*! \brief A 8 bit signed integer type of buffer data */ + VIP_BUFFER_FORMAT_INT8 = 3, + /*! \brief A 16 bit unsigned integer type of buffer data */ + VIP_BUFFER_FORMAT_UINT16 = 4, + /*! \brief A 16 signed integer type of buffer data */ + VIP_BUFFER_FORMAT_INT16 = 5, + /*! \brief A char type of data */ + VIP_BUFFER_FORMAT_CHAR = 6, + /*! \brief A bfloat 16 type of data */ + VIP_BUFFER_FORMAT_BFP16 = 7, + /*! \brief A 32 bit integer type of data */ + VIP_BUFFER_FORMAT_INT32 = 8, + /*! \brief A 32 bit unsigned signed integer type of buffer */ + VIP_BUFFER_FORMAT_UINT32 = 9, + /*! \brief A 64 bit signed integer type of data */ + VIP_BUFFER_FORMAT_INT64 = 10, + /*! \brief A 64 bit unsigned integer type of data */ + VIP_BUFFER_FORMAT_UINT64 = 11, + /*! \brief A 64 bit float type of buffer data */ + VIP_BUFFER_FORMAT_FP64 = 12, + /*! \brief A signed 4bits tensor */ + VIP_BUFFER_FORMAT_INT4 = 13, + /*! \brief A unsigned 4bits tensor */ + VIP_BUFFER_FORMAT_UINT4 = 14, +} vip_buffer_format_e; + +/* !\brief The quantization format list for buffer data + * \ingroup group_buffer + * \version 1.0 + */ +typedef enum _vip_buffer_quantize_format_e +{ + /*! \brief Not quantized format */ + VIP_BUFFER_QUANTIZE_NONE = 0, + /*! \brief A quantization data type which specifies the fixed point position for whole tensor. */ + VIP_BUFFER_QUANTIZE_DYNAMIC_FIXED_POINT = 1, + /*! \brief A quantization data type which has scale value and zero point to match with TF and + Android NN API for whole tensor. */ + VIP_BUFFER_QUANTIZE_TF_ASYMM = 2, + /*! \brief A max vaule support quantize format */ + VIP_BUFFER_QUANTIZE_MAX, +} vip_buffer_quantize_format_e; + +/* !\brief The memory type for vip buffer + * \ingroup group_buffer + * \version 1.2.2 + */ +typedef enum _vip_buffer_memory_type_e +{ + /*! \brief Not memory type. default memory type. + use for allocate video memory from driver calling vip_create_buffer. + */ + VIP_BUFFER_MEMORY_TYPE_DEFAULT = 0x000, + /*! \brief Create a VIP buffer from the Host (logical, physical). */ + VIP_BUFFER_MEMORY_TYPE_HOST = 0x001, + /*! \brief Create a secure memory, sucure mode is not thread safe. */ + VIP_BUFFER_MEMORY_TYPE_SECURE = 0x002, + /*! \brief Create a VIP buffer from DMA_BUF */ + VIP_BUFFER_MEMORY_TYPE_DMA_BUF = 0x003, + /*! \brief The max memory type */ + VIP_BUFFER_MEMORY_TYPE_MAX, +} vip_buffer_memory_type_e; + +/* \brief The list of create network type + * \ingroup group_network + * \version 1.0 + */ +typedef enum _vip_create_network_type_e +{ + /*!< \brief NONE */ + VIP_CREATE_NETWORK_FROM_NONE = 0x00, + /*!< \brief Create network from a file path */ + VIP_CREATE_NETWORK_FROM_FILE = 0x01, + /*!< \brief Create network from buffer, NBG has been loaded in this buffer before */ + VIP_CREATE_NETWORK_FROM_MEMORY = 0x02, + /*!< \brief Create network from flash */ + VIP_CREATE_NETWORK_FROM_FLASH = 0x04, + + /* network work mode is not thread safe */ + /*!< \brief specify network work in Normal mode when creating network */ + VIP_CREATE_NETWORK_MODE_NORMAL = 0x10, + /*!< \brief specify network work in Secure mode when creating network */ + VIP_CREATE_NETWORK_MODE_SECURE = 0x20, + VIP_CREATE_NETWORK_MAX, +} vip_create_network_type_e; + +/* \brief An enumeration property that specifies which power management operation to execute. + * \ingroup group_network + * \version 1.2 + */ +typedef enum _vip_power_property_e +{ + VIP_POWER_PROPERTY_NONE = 0x0000, + /*!< \brief specify the VIP frequency */ + VIP_POWER_PROPERTY_SET_FREQUENCY = 0x0001, + /*!< \brief power off VIP hardware */ + VIP_POWER_PROPERTY_OFF = 0x0002, + /*!< \brief power on VIP hardware */ + VIP_POWER_PROPERTY_ON = 0x0004, + /*!< \brief stop VIP perform network */ + VIP_POWER_PROPERTY_STOP = 0x0008, + /*!< \brief start VIP perform network */ + VIP_POWER_PROPERTY_START = 0x0010, + VIP_POWER_PROPERTY_MAX +} vip_power_property_e; + +/* \brief query hardware caps property + */ +typedef enum _vip_query_hardware_property_e +{ + /*!< \brief the customer ID of this VIP/NPU, the returned value is vip_uint32_t */ + VIP_QUERY_HW_PROP_CID = 0, + /*!< \brief the number of deivce, the returned value is vip_uint32_t */ + VIP_QUERY_HW_PROP_DEVICE_COUNT = 1, + /*!< \brief the number of core count for each device, the returned value is vip_uint32_t * device_count */ + VIP_QUERY_HW_PROP_CORE_COUNT_EACH_DEVICE = 2, + VIP_QUERY_HW_PROP_MAX, +} vip_query_hardware_property_e; + +/* \brief The list of properties of a network. + * \ingroup group_network + * \version 1.0 + */ +typedef enum _vip_network_property_e +{ + /* query network */ + /*!< \brief The number of layers in this network, the returned value is vip_uint32_t */ + VIP_NETWORK_PROP_LAYER_COUNT = 0, + /*!< \brief The number of input in this network, the returned value is vip_uint32_t*/ + VIP_NETWORK_PROP_INPUT_COUNT = 1, + /*!< \brief The number of output in this network, the returned value is vip_uint32_t*/ + VIP_NETWORK_PROP_OUTPUT_COUNT = 2, + /*!< \brief The network name, the returned value is vip_char_t[64] */ + VIP_NETWORK_PROP_NETWORK_NAME = 3, + /*!< \brief address information of wait-link, command, input-output buffers for viplite-Agent trigger, + not used if only use viplite. the returned value is \ref vip_address_info_t + */ + VIP_NETWORK_PROP_ADDRESS_INFO = 4, + /*!< \brief read interruput irq register value for supporting viplite-Broker + not used if only use viplite. the returned value is vip_uint32_t*/ + VIP_NETWORK_PROP_READ_REG_IRQ = 5, + /*!< \brief The size of memory pool, the returned value is vip_uint32_t*/ + VIP_NETWORK_PROP_MEMORY_POOL_SIZE = 6, + + /*!< \brief The network profling data, the returned value is vip_inference_profile_t */ + VIP_NETWORK_PROP_PROFILING = 7, + + /*!< \brief The the number of core for this network, the returned value is vip_uint8_t */ + VIP_NETWORK_PROP_CORE_COUNT = 8, + + /* set network */ + /* set network property should be called before vip_prepare_network */ + /*!< \brief set network to enable change PPU parameters feature for this vip_network. + the vip_set_network value param used to indicates disable or enable this feature. + vip_uint32_t *value is 1, enable change ppu param. + vip_uint32_t *value is 0, disable change ppu param */ + VIP_NETWORK_PROP_CHANGE_PPU_PARAM = 64, + /*!< \brief set memory pool buffer for network. networks can share a memory pool buffer. + the set value is \ref vip_buffer */ + VIP_NETWORK_PROP_SET_MEMORY_POOL = 65, + /*!< \brief set device id for network. networks can be submitted this vip device. */ + VIP_NETWORK_PROP_SET_DEVICE_ID = 66, + /*!< \brief set priority of network. 0 ~ 255, 0 indicates the lowest priority. */ + VIP_NETWORK_PROP_SET_PRIORITY = 67, + /*!< \brief set time out of network. unit: ms */ + VIP_NETWORK_PROP_SET_TIME_OUT = 68, +} vip_network_property_e; + +/* \brief The list of properties of a group. + * \ingroup group_network + * \version 1.0 + */ +typedef enum _vip_group_property_e +{ + /* query group */ + /*!< \brief The group profling data, the returned value is vip_inference_profile_t */ + VIP_GROUP_PROP_PROFILING = 0, + + /* set group */ + /* set group property should be called before vip_add_network */ + /*!< \brief set device id for group. networks in group can be submitted this vip device. + * This prop should be called before vip_prepare_network */ + VIP_GROUP_PROP_SET_DEVICE_ID = 64, + /*!< \brief set time out of network in group. unit: ms */ + VIP_GROUP_PROP_SET_TIME_OUT = 68, +} vip_group_property_e; + +/* \brief The list of property of an input or output. + * \ingroup group_network + * \version 1.0 + */ +typedef enum _vip_buffer_property_e +{ + /*!< \brief The quantization format, the returned value is \ref vip_buffer_quantize_format_e */ + VIP_BUFFER_PROP_QUANT_FORMAT = 0, + /*!< \brief The number of dimension for this input, the returned value is vip_uint32_t */ + VIP_BUFFER_PROP_NUM_OF_DIMENSION = 1, + /*!< \brief The size of each dimension for this input, the returned value is vip_uint32_t * num_of_dim */ + VIP_BUFFER_PROP_SIZES_OF_DIMENSION = 2, + /*!< \brief The data format for this input, the returned value is \ref vip_buffer_format_e */ + VIP_BUFFER_PROP_DATA_FORMAT = 3, + /*!< \brief The position of fixed point for dynamic fixed point, the returned value is vip_uint8_t */ + VIP_BUFFER_PROP_FIXED_POINT_POS = 4, + /*!< \brief The scale value for TF quantization format, the returned value is vip_float_t */ + VIP_BUFFER_PROP_TF_SCALE = 5, + /*!< \brief The zero point for TF quantization format, the returned value is vip_uint32_t */ + VIP_BUFFER_PROP_TF_ZERO_POINT = 6, + /*!< \brief The name for network's inputs and outputs, the returned value is vip_char_t[64] */ + VIP_BUFFER_PROP_NAME = 7, +} vip_buffer_property_e; + +/* \brief The list of property of operation vip_buffer type. + * \ingroup group_network + * \version 1.3 + */ +typedef enum _vip_buffer_operation_type_e +{ + /*!< \brief None operation */ + VIP_BUFFER_OPER_TYPE_NONE = 0, + /*!< \brief Flush the vip buffer */ + VIP_BUFFER_OPER_TYPE_FLUSH = 1, + /*!< \brief invalidate the vip buffer */ + VIP_BUFFER_OPER_TYPE_INVALIDATE = 2, + VIP_BUFFER_OPER_TYPE_MAX, +} vip_buffer_operation_type_e; + +typedef struct _vip_network *vip_network; +typedef struct _vip_buffer *vip_buffer; +typedef struct _vip_group *vip_group; + + +/*! \brief Input parameter for vip_create_buffer + */ +typedef struct _vip_buffer_create_params_t +{ + /*!< \brief The number of dimensions specified in *sizes*/ + vip_uint32_t num_of_dims; + /*!< \brief The pointer to an array of dimension */ + vip_uint32_t sizes[6]; + /*!< \brief Data format for the tensor, see \ref vip_buffer_format_e */ + vip_enum data_format; + /*!< \brief Quantized format see \ref vip_buffer_quantize_format_e . */ + vip_enum quant_format; + /*\ref vip_uint32_t + */ +VIP_API +vip_uint32_t vip_get_version( + void + ); + +/*! \brief Initial VIP Hardware, VIP lite software environment and power on VIP hardware. + * \details when vpmdENABLE_MULTIPLE_TASK set to 0, + This function should be only called once before using VIP hardware if. + when vpmdENABLE_MULTIPLE_TASK set to 1, + vip_init can be called multiple times, but should paired with vip_destroy. + vip_init should be called in every process. + only need call vip_init once in multi-thread. + * VIP lite driver would construct some global variable for this call.Also + * it will reset VIP and initialize VIP hardware to a ready state to accept jobs. + * \return \ref vip_status_e + * \ingroup group_global + * \version 1.0 + */ +VIP_API +vip_status_e vip_init( + void + ); + +/*! \brief Terminate VIP lite driver and shut down VIP hardware. + * \details This function should be the last function called by application. + vip_destroy should paired with vip_init called. + * After it, no VIP lite API should be called except \ref vip_init + * \return \ref vip_status_e + * \ingroup group_global + * \version 1.0 + * \notes vip_destroy should be called in the same thread as vip_init. + */ +VIP_API +vip_status_e vip_destroy( + void + ); + +/*! \brief Queries hardware caps information. This function shold be called after calling vip_init. +*\param property, the query property enum. +*\param size, the size of value buffer. +*\param value, the value buffer of returns. +*/ +VIP_API +vip_status_e vip_query_hardware( + IN vip_query_hardware_property_e property, + IN vip_uint32_t size, + OUT void *value + ); + +/*! \brief Create a input or output buffer with specified parameters. + *\details The buffer object always takes [w, h, c, n] order, + there is no padding/hole between lines/slices/batches. + *\param [in] create_param The pointer to \ref vip_buffer_create_params_t structure. + *\param [in] size_of_param The size of create_param pointer. + *\param [out] buffer An opaque handle for the new buffer object if the request is executed successfully. + *\return \ref vip_status_e + *\ingroup group_buffer + *\version 1.0 + */ +VIP_API +vip_status_e vip_create_buffer( + IN vip_buffer_create_params_t *create_param, + IN vip_uint32_t size_of_param, + OUT vip_buffer *buffer + ); + +/*! \brief Create a buffer from user contiguous or scatter non-contiguous physical address. + the vip_buffer created by this APi doesn't support flush CPU cache in driver. + So the physical memory should be a non-cache buffer or flush CPU on Host control. + not map user space logical on Linux. + *\param [in] create_param The pointer to \ref vip_buffer_create_params_t structure. + *\param [in] physical_table Physical address table. should be wraped for VIP hardware. + *\param [in] size_table The size of physical memory for each physical_table element. + *\param [in] physical_num The number of physical table element. + physical_num is 1 when create buffer from contiguous phyiscal. + *\param [out] buffer. vip lite buffer object. + *\return \ref vip_status_e +*/ +VIP_API +vip_status_e vip_create_buffer_from_physical( + IN const vip_buffer_create_params_t *create_param, + IN const vip_address_t *physical_table, + IN const vip_uint32_t *size_table, + IN vip_uint32_t physical_num, + OUT vip_buffer *buffer + ); + +/*! \brief Create a vip buffer with specified parameters. + The vip_buffer can be used to input, output, memory pool and so on. + NOTE: driver will operation CPU cache when call vip_flush_buffer API. + application should call vip_flush_buffer API if the memory handle have CPU cache. + after write data into this buffer, APP should call vip_flush_buffer(VIP_BUFFER_OPER_TYPE_FLUSH) + before CPU read date from this buffer. APP should call vip_flush_buffer(VIP_BUFFER_OPER_TYPE_INVALIDATE) + *\ when MMU disabled, create buffer from a contiguous physical memory. + *\ when MMU enabled, create buffer from a contiguous physical memory or + logical address(convert to physical in kenrel pace). + *\details The buffer object always takes [w, h, c, n] order, + there is no padding/hole between lines/slices/batches. + *\param [in] create_param The pointer to \ref vip_buffer_create_params_t structure. + *\param [in] handle_logical The logical address of the handle. + create vip buffer from the logical address. + *\param [in] the handle_size should be aligned to 64byte(vpmdCPU_CACHE_LINE_SIZE) for easy flash CPU cache. + *\param [out] buffer. vip lite buffer object. + *\return \ref vip_status_e + *\ingroup group_buffer + *\version 1.1 + */ +VIP_API +vip_status_e vip_create_buffer_from_handle( + IN const vip_buffer_create_params_t *create_param, + IN const vip_ptr handle_logical, + IN vip_uint32_t handle_size, + OUT vip_buffer *buffer + ); + +/*! \brief Create a vip buffer from user fd(file descriptor). + only support create buffer from dma-buf on Linux. + the vip_buffer created by this APi doesn't support flush CPU cache in driver. + So the dma-buf should be a non-cache buffer or flush CPU on Host control. + *\param [in] create_param The pointer to \ref vip_buffer_create_params_t structure. + *\param [in] fd user memory file descriptor. + *\param [in] memory_size The size of user memory. + the handle_size should be aligned to 64byte(vpmdCPU_CACHE_LINE_SIZE) for easy flash CPU cache. + *\param [out] buffer. vip lite buffer object. + *\return \ref vip_status_e +*/ +VIP_API +vip_status_e vip_create_buffer_from_fd( + IN const vip_buffer_create_params_t *create_param, + IN vip_uint32_t fd, + IN vip_uint32_t memory_size, + OUT vip_buffer *buffer + ); + +/*! \brief Destroy a buffer object which was created before. + *\param [in] buffer The opaque handle of buffer to be destroyed. + *\return \ref vip_status_e + *\ingroup group_buffer + *\version 1.0 + */ +VIP_API +vip_status_e vip_destroy_buffer( + IN vip_buffer buffer + ); + +/*! \brief Map a buffer to get the CPU accessible address for read or write + *\param [in] buffer The handle of buffer to be mapped. + *\return A pointer that application can use to read or write the buffer data. + *\ingroup group_buffer + *\version 1.0 + */ +VIP_API +void * vip_map_buffer( + IN vip_buffer buffer + ); + +/*! \brief Unmap a buffer which was mapped before. + *\param [in] buffer The handle of buffer to be unmapped. + *\return \ref vip_status_e + *\ingroup group_buffer + *\version 1.0 + */ +VIP_API +vip_status_e vip_unmap_buffer( + IN vip_buffer buffer + ); + +/*! \brief Get the size of bytes allocated for the buffer. + *\param [in] buffer The handle of buffer to be queried. + *\return \ref the size of bytes + *\ingroup group_buffer + *\version 1.0 + */ +VIP_API +vip_uint32_t vip_get_buffer_size( + IN vip_buffer buffer + ); + +/*! \brief operation the vip buffer CPU chace. flush, invalidate cache. + You should call vip_flush_buffer to flush buffer for input. + and invalidate buffer for network's output if these memories with CPU cache. +*\param buffer The vip buffer object. +*\param the type of this operation. see vip_buffer_operation_type_e. +*/ +VIP_API +vip_status_e vip_flush_buffer( + IN vip_buffer buffer, + IN vip_buffer_operation_type_e type + ); + +/*! \brief Create a network object from the given binary data. + *\details The binary is generated by the binary graph generator and it's a blob binary. + *\VIP lite Driver could interprete it to create a network object. + *\param [in] data The pointer to the binary graph. it can be a file path or a memory pointer, depending on type. + *\param [in] size_of_data The byte size of data object. the byte size of NBG buffer. + You can ignore it if create network form fil path. + *\param [in] type how to create a network object. please refer to vip_create_network_type_e enum. + *\param [out] network An opaque handle to the new network object if the request is executed successfully + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_create_network( + IN const void *data, + IN vip_uint32_t size_of_data, + IN vip_enum type, + OUT vip_network *network + ); + +/*! \brief Destroy a network object + *\details Release all resources allocated for this network. + *\param [in] network The opaque handle to the network to be destroyed + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_destroy_network( + IN vip_network network + ); + +/*! \brief Configure network property. configure network. this API should be called before calling vip_prepare_network. + *\details Configure network's layer inputs/outputs information + *\param [in] network A property \ref vip_network_property_e to be configuied. + *\return \ref vip_status_e + */ +VIP_API +vip_status_e vip_set_network( + IN vip_network network, + IN vip_enum property, + IN void *value + ); + +/*! \brief Query a property of the network object. + *\details User can use this API to get any properties from a network. + *\param [in] network The opaque handle to the network to be queried + *\param [in] property A property \ref vip_network_property_e to be queried. + *\param [out] value A pointer to memory to store the return value, + different property could return different type/size of value. + * please see comment of \ref vip_network_property_e for detail. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_query_network( + IN vip_network network, + IN vip_enum property, + OUT void *value + ); + +/*! \brief Prepare a network to run on VIP. + *\details This function only need to be called once to prepare a network and make it ready to execute on VIP hardware. + * It would do all heavy-duty work, including allocate internal memory resource for this network, + deploy all operation's resource + * to internal memory pool, allocate/generate command buffer for this network, + patch command buffer for the resource in the internal memory + * allocations. If this function is called more than once, driver will silently ignore it. + If this function is executed successfully, this network is prepared. + *\param [in] network The opaque handle to the network which need to be prepared. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_prepare_network( + IN vip_network network + ); + +/*! \brief Query a property of a specific input of a given network. + *\details The specified input/property/network must be valid, otherwise VIP_ERROR_INVALID_ARGUMENTS will be returned. + *\param [in] network The opaque handle to the network to be queried + *\param [in] index Specify which input to be queried in case there are multiple inputs in the network + *\param [in] property Specify which property application wants to know, see \ref vip_buffer_property_e + *\param [out] value Returned value, the details type/size, please refer to the comment of \ref vip_input_property_e + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_query_input( + IN vip_network network, + IN vip_uint32_t index, + IN vip_enum property, + OUT void *value + ); + +/*! \brief Query a property of a specific output of a given network. + *\details The specified output/property/network must be valid, otherwise VIP_ERROR_INVALID_ARGUMENTS will be returned. + *\param [in] network The opaque handle to the network to be queried + *\param [in] index Specify which output to be queried in case there are multiple outputs in the network + *\param [in] property Specify which property application wants to know, see \ref vip_buffer_property_e + *\param [out] value Returned value, the details type/size, please refer to the comment of \ref vip_input_property_e + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_query_output( + IN vip_network network, + IN vip_uint32_t index, + IN vip_enum property, + OUT void *value + ); + +/*! \brief Attach an input buffer to the specified index of the network. + *\details All the inputs of the network need to be attached to a valid input buffer before running a network, otherwise + * VIP_ERROR_MISSING_INPUT_OUTPUT will be returned when calling \ref vip_run_network . + When attaching an input buffer + * to the network, driver would patch the network command buffer to fill in this input buffer address. + This function could be called + * multiple times to let application update the input buffers before next network execution. + The network must be prepared by \ref vip_prepare_network before + * attaching an input. + *\param [in] network The opaque handle to a network which we want to attach an input buffer + *\param [in] index The index specify which input in the network will be set + *\param [in] input The opaque handle to a buffer which will be attached to the network. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_set_input( + IN vip_network network, + IN vip_uint32_t index, + IN vip_buffer input + ); + +/*! \brief Attach an output buffer to the specified index of the network. + *\details All the outputs of the network need to be attached to a valid output buffer before running a network, otherwise + * VIP_ERROR_MISSING_INPUT_OUTPUT will be returned when calling \ref vip_run_network . + When attaching an output buffer + * to the network, driver would patch the network command buffer to fill in this output buffer address. + This function could be called + * multiple times to let application update the output buffers before next network execution. + The network must be prepared by \ref vip_prepare_network before + * attaching an output. + *\param [in] network The opaque handle to a network which we want to attach an output buffer + *\param [in] index The index specify which output in the network will be set + *\param [in] output The opaque handle to a buffer which will be attached to the network. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_set_output( + IN vip_network network, + IN vip_uint32_t index, + IN vip_buffer output + ); + +/*! \brief. Kick off the network execution and send command buffer of this network to VIP hardware. + *\details This function can be called multiple times. + Every time it's called it would do inference with current attached + * input buffers and output buffers. It would return until VIP finish the execution. + If the network is not ready to execute + * for some reason like not be prepared by \ref vip_prepare_network , + it would fail with status reported. + *\param [in] network The opaque handle to the network to be executed. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_run_network( + IN vip_network network + ); + +/*! \brief Finish using this network to do inference. + *\details This function is paired with \ref vip_prepare_network . + It's suggested to be called once after \ref vip_prepare_network called. + * If it's called more than that, it will be silently ignored. + If the network is not prepared but finished is called, it's silently ignored too. + * This function would release all internal memory allocations which are allocated when + the network is prepared. Since the preparation of network takes much time, + * it is suggested that if the network will be still used later, application should not + finish the network unless there is no much system resource remained for other + * networks. The network object is still alive unitl it's destroyed by \ref vip_destroy_network . + *\param [in] network The opaque handle to the network which will be finished. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_finish_network( + IN vip_network network + ); + +/*! \brief. Kick off the network execution and send command buffer of this network to VIP hardware. +*\details This function is similar to \ref vip_run_network except that it returns + immediately without waiting for HW to complete the commands. +*\param [in] network The opaque handle to the network to be executed. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_trigger_network( + IN vip_network network + ); + +/*! \brief. Run tasks in group,these tasks is added by vip_add_network. + The order of executuion of tasks is call vip_add_network. +*\details This function is similar to \ref vip_run_group except that it returns + immediately without waiting for HW to complete the commands. +*\return \ref vip_status_e +*\param group vip_group object +*\param the number of task will be run. + eg: num is 4, the 0, 1, 2, 3 taks index in group will be run(inference). +*\version 1.0 +*/ +VIP_API +vip_status_e vip_trigger_group( + IN vip_group group, + IN vip_uint32_t num + ); + +/*! \brief. Explicitly wait for HW to finish executing the submitted commands. +*\details This function waits for HW to complete the commands. + This should be called once CPU needs to access the network currently being run. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_wait_network( + IN vip_network network + ); + +/*! \brief. Explicitly wait for HW to finish executing the submitted task in group. +*\details This function waits for HW to complete the submitted commands in group. + This should be called once CPU needs to access the group currently being run. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_wait_group( + IN vip_group group + ); + +/*! \brief. Cancle network running on vip hardware after network is commited. +*\details This function is cancel network running on vip hardware. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_cancel_network( + IN vip_network network + ); + +/*! \brief. give user applications more control over power management for VIP cores. +*\details. control VIP core frequency and power status by property. see vip_power_property_e. +*\param ID of the managed device. +*\param perperty Control VIP core frequency and power status by property. see vip_power_property_e. +*\param value The value for vip_power_property_e property. + Please see vip_power_frequency_t if property is setting to VIP_POWER_PROPERTY_SET_FREQUENCY. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_power_management( + IN vip_uint32_t device_id, + IN vip_power_property_e property, + IN void *value + ); + +/*! \brief. Create a vip_group object to run multiple tasks(network or node) + and without interrupt between each task. +*\return \ref vip_status_e +*\param count The maximum number of tasks supports by this group. +*\param group Return vip_group object be created. +*\version 1.0 +*/ +VIP_API +vip_status_e vip_create_group( + IN vip_uint32_t count, + OUT vip_group *group + ); + +/*! \brief. Destroy group object which created by vip_create_group. +*\return \ref vip_status_e +*\param group vip_group object/ +*\version 1.0 +*/ +VIP_API +vip_status_e vip_destroy_group( + IN vip_group group + ); + +/* +@brief set group property. configure group. this API should be called before calling vip_run_group. +@param group The group object which created by vip_create_group(). +@param property The property be set. see vip_group_property_e. +@param value The set data. +*/ +VIP_API +vip_status_e vip_set_group( + IN vip_group group, + IN vip_enum property, + IN void *value + ); + +/*! \brief Query a property of the group object. + *\param [in] group The group object which created by vip_create_group(). + *\param [in] property A property \ref vip_group_property_e to be queried. + *\param [out] value A pointer to memory to store the return value, + different property could return different type/size of value. + * please see comment of \ref vip_group_property_e for detail. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_query_group( + IN vip_group group, + IN vip_enum property, + OUT void *value + ); + +/*! \brief. add a vip_network object into group. +*\return \ref vip_status_e +*\param group vip_group object, network be added into group. +*\param network vip_network added into group. +*\version 1.0 +*/ +VIP_API +vip_status_e vip_add_network( + IN vip_group group, + IN vip_network network + ); + +/*! \brief Weak dup a vip_network object. + The weak dup netowrk copy new command buffer. and share coefficient data with original network. + The original network can't be destroy if the weak dup network is running or will be run later. +*\param network vip_network to dup. +*\param dup_network output vip_network. +*\return \ref vip_status_e +*\version 1.0 +*/ +VIP_API +vip_status_e vip_weak_dup_network( + IN vip_network network, + OUT vip_network *dup_network + ); + +/*! \brief. run tasks in group. only issue a interrupt after tasks complete. + These tasks is added by vip_add_network. + The order of executuion of tasks is call vip_add_network. +*\return \ref vip_status_e +*\param group vip_group object +*\param the number of task will be run. + eg: num is 4, the 0, 1, 2, 3 taks index in group will be run(inference). +*\version 1.0 +*/ +VIP_API +vip_status_e vip_run_group( + IN vip_group group, + IN vip_uint32_t num + ); + +/*! \brief. change PPU engine parameters. + change local size, global size, global offset and global scale. +*\return \ref vip_status_e +*\param network The network object should be changed. +*\param param PPU parameters +*\param index The index of PPU node, not used. please set to zero. +*\version 1.0 +*/ +VIP_API +vip_status_e vip_set_ppu_param( + IN vip_network network, + IN vip_ppu_param_t *param, + IN vip_uint32_t index + ); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/viplite-tina/lib/glibc-gcc13_2_0/v1.13/inc/vip_lite_common.h b/viplite-tina/lib/glibc-gcc13_2_0/v1.13/inc/vip_lite_common.h new file mode 100644 index 0000000..ea0977c --- /dev/null +++ b/viplite-tina/lib/glibc-gcc13_2_0/v1.13/inc/vip_lite_common.h @@ -0,0 +1,236 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2017 - 2022 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2017 - 2022 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + +#ifndef _VIP_COMMON_H +#define _VIP_COMMON_H + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + *\brief The VIP lite API for Convolution Neural Network application on CPU/MCU/DSP type of embedded environment. + *\details This VIP lite APIs is not thread-safe if vpmdENABLE_MULTIPLE_TASK is set to 0, + user must guarantee to call these APIs in a proper way. + But defines vpmdENABLE_MULTIPLE_TASK 1, VIPLite can support multiple task(multiple thread/process). + and it's thread-safe. + *Memory allocation and file io functions used inside driver internal would depend on working enviroment. + *\defgroup group_global Data Type Definitions and Global APIs + *\brief Data type definition and global APIs that are used in the VIP lite + *\defgroup group_buffer Buffer API + *\brief The API to manage input/output buffers + *\defgroup group_network Network API + *\brief The API to manage networks + */ + +/*! \brief An 8-bit unsigned value. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned char vip_uint8_t; + +/*! \brief An 16-bit unsigned value. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned short vip_uint16_t; + +/*! \brief An 32-bit unsigned value. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned int vip_uint32_t; + +/*! \brief An 64-bit unsigned value. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned long long vip_uint64_t; + +/*! \brief An 8-bit signed value. + * \ingroup group_global + * \version 1.0 + */ +typedef signed char vip_int8_t; + +/*! \brief An 16-bit signed value. + * \ingroup group_global + * \version 1.0 + */ +typedef signed short vip_int16_t; + +/*! \brief An 32-bit signed value. + * \ingroup group_global + * \version 1.0 + */ +typedef signed int vip_int32_t; + +/*! \brief An 64-bit signed value. + * \ingroup group_global + * \version 1.0 + */ +typedef signed long long vip_int64_t; + +/*! \brief An 8 bit ASCII character. + * \ingroup group_global + * \version 1.0 + */ +typedef char vip_char_t; + +/*! \brief An 32 bit float value. + * \ingroup group_global + * \version 1.0 + */ +typedef float vip_float_t; + +/*! \brief Sets the standard enumeration type size to be a fixed quantity. + * \ingroup group_global + * \version 1.0 + */ +typedef vip_int32_t vip_enum; + +/*! \brief a void pointer. + * \ingroup group_global + * \version 1.0 + */ +typedef void* vip_ptr; + +/*! \brief A 64-bit float value (aka double). + * \ingroup group_basic_features + */ +typedef double vip_float64_t; + +/*! \brief address type. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned long long vip_address_t; + +/*! \brief A zero value for pointer + *\ingroup group_global + *\version 1.0 + */ +#ifndef VIP_NULL +#define VIP_NULL 0 +#endif + +/***** Helper Macros. *****/ +#define VIP_API + +#define IN +#define OUT + +/*! \brief A invalid value if a property is not avaialbe for the query. + *\ingroup group_global + *\version 1.0 + */ +#define VIP_INVALID_VALUE ~0UL + +/*! \brief A Boolean value. + *\details This allows 0 to be FALSE, as it is in C, and any non-zero to be TRUE. + *\ingroup group_global + *\version 1.0 + */ +typedef enum _vip_bool_e { + /*! \brief The "false" value. */ + vip_false_e = 0, + /*! \brief The "true" value. */ + vip_true_e, +} vip_bool_e; + +/*! \brief The enumeration of all status codes. + * \ingroup group_global + * \version 1.0 + */ +typedef enum _vip_status +{ + /*!< \brief Indicates the network is canceld */ + VIP_ERROR_CANCELED = -15, + /*!< \brief Indicates the hardware is recovery done after hang */ + VIP_ERROR_RECOVERY = -14, + /*!< \brief Indicates the hardware is stoed */ + VIP_ERROR_POWER_STOP = -13, + /*!< \brief Indicates the hardware is in power off status */ + VIP_ERROR_POWER_OFF = -12, + /*!< \brief Indicates the failure */ + VIP_ERROR_FAILURE = -11, + /*!< \brief Indicates the binary is not compatible with the current runtime hardware */ + VIP_ERROR_NETWORK_INCOMPATIBLE = -10, + /*!< \brief Indicates the network is not prepared so current function call can't go through */ + VIP_ERROR_NETWORK_NOT_PREPARED = -9, + /*!< \brief Indicates the network misses either input or output when running the network */ + VIP_ERROR_MISSING_INPUT_OUTPUT = -8, + /*!< \brief Indicates the network binary is invalid */ + VIP_ERROR_INVALID_NETWORK = -7, + /*!< \brief Indicates driver is running out of memory of system */ + VIP_ERROR_OUT_OF_MEMORY = -6, + /*!< \brief Indicates there is no enough resource */ + VIP_ERROR_OUT_OF_RESOURCE = -5, + /*!< \brief Indicates it's supported by driver implementation */ + VIP_ERROR_NOT_SUPPORTED = -4, + /*!< \brief Indicates some arguments are not valid */ + VIP_ERROR_INVALID_ARGUMENTS = -3, + /*!< \brief Indicates there are some IO related error */ + VIP_ERROR_IO = -2, + /*!< \brief Indicates VIP timeout, could be VIP stuck somewhere */ + VIP_ERROR_TIMEOUT = -1, + /*!< \brief Indicates the execution is successfuly */ + VIP_SUCCESS = 0, +} vip_status_e; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/viplite-tina/lib/glibc-gcc13_2_0/v1.13/libVIPlite.so b/viplite-tina/lib/glibc-gcc13_2_0/v1.13/libVIPlite.so new file mode 100644 index 0000000..a66cef4 Binary files /dev/null and b/viplite-tina/lib/glibc-gcc13_2_0/v1.13/libVIPlite.so differ diff --git a/viplite-tina/lib/glibc-gcc13_2_0/v1.13/libVIPuser.so b/viplite-tina/lib/glibc-gcc13_2_0/v1.13/libVIPuser.so new file mode 100644 index 0000000..9ed0bb7 Binary files /dev/null and b/viplite-tina/lib/glibc-gcc13_2_0/v1.13/libVIPuser.so differ diff --git a/viplite-tina/lib/glibc-gcc13_2_0/v2.0/inc/vip_lite.h b/viplite-tina/lib/glibc-gcc13_2_0/v2.0/inc/vip_lite.h new file mode 100644 index 0000000..fced603 --- /dev/null +++ b/viplite-tina/lib/glibc-gcc13_2_0/v2.0/inc/vip_lite.h @@ -0,0 +1,1112 @@ +/******************************************************************************\ +|* Copyright (c) 2017-2024 by Vivante Corporation. All Rights Reserved. *| +|* *| +|* The material in this file is confidential and contains trade secrets of *| +|* of Vivante Corporation. This is proprietary information owned by Vivante *| +|* Corporation. No part of this work may be disclosed, reproduced, copied, *| +|* transmitted, or used in any way for any purpose, without the express *| +|* written permission of Vivante Corporation. *| +|* *| +\******************************************************************************/ + +#ifndef _VIP_LITE_H +#define _VIP_LITE_H + +#include "vip_lite_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + *\brief The VIP lite API for Convolution Neural Network application on CPU/MCU/DSP type of + embedded environment. + *\details This VIP lite APIs is not thread-safe if vpmdENABLE_MULTIPLE_TASK is set to 0, + user must guarantee to call these APIs in a proper way. + But defines vpmdENABLE_MULTIPLE_TASK 1, VIPLite can support multiple task + (multiple thread/process). + and it's thread-safe. + *Memory allocation and file io functions used inside driver internal would + depend on working enviroment. + + *\defgroup group_global Data Type Definitions and Global APIs, + *\ brief Data type definition and global APIs that are used in the VIPLite + *\defgroup group_buffer Buffer API, + The API to manage input/output buffers + *\defgroup group_network Network API + The API to manage networks + */ + +/* !\brief The data format list for buffer + * \ingroup group_buffer + * \version 2.0 + */ +typedef enum _vip_buffer_format_e +{ + /*! \brief A float type of buffer data */ + VIP_BUFFER_FORMAT_FP32 = 0, + /*! \brief A half float type of buffer data */ + VIP_BUFFER_FORMAT_FP16 = 1, + /*! \brief A 8 bit unsigned integer type of buffer data */ + VIP_BUFFER_FORMAT_UINT8 = 2, + /*! \brief A 8 bit signed integer type of buffer data */ + VIP_BUFFER_FORMAT_INT8 = 3, + /*! \brief A 16 bit unsigned integer type of buffer data */ + VIP_BUFFER_FORMAT_UINT16 = 4, + /*! \brief A 16 signed integer type of buffer data */ + VIP_BUFFER_FORMAT_INT16 = 5, + /*! \brief A char type of data */ + VIP_BUFFER_FORMAT_CHAR = 6, + /*! \brief A bfloat 16 type of data */ + VIP_BUFFER_FORMAT_BFP16 = 7, + /*! \brief A 32 bit integer type of data */ + VIP_BUFFER_FORMAT_INT32 = 8, + /*! \brief A 32 bit unsigned signed integer type of buffer */ + VIP_BUFFER_FORMAT_UINT32 = 9, + /*! \brief A 64 bit signed integer type of data */ + VIP_BUFFER_FORMAT_INT64 = 10, + /*! \brief A 64 bit unsigned integer type of data */ + VIP_BUFFER_FORMAT_UINT64 = 11, + /*! \brief A 64 bit float type of buffer data */ + VIP_BUFFER_FORMAT_FP64 = 12, + /*! \brief A signed 4bits tensor */ + VIP_BUFFER_FORMAT_INT4 = 13, + /*! \brief A unsigned 4bits tensor */ + VIP_BUFFER_FORMAT_UINT4 = 14, + /*! \brief A bool 8 bit tensor */ + VIP_BUFFER_FORMAT_BOOL8 = 16, +} vip_buffer_format_e; + +/* !\brief The quantization format list for buffer data + * \ingroup group_buffer + * \version 1.0 + */ +typedef enum _vip_buffer_quantize_format_e +{ + /*! \brief Not quantized format */ + VIP_BUFFER_QUANTIZE_NONE = 0, + /*! \brief A quantization data type which specifies + the fixed point position for whole tensor.*/ + VIP_BUFFER_QUANTIZE_DYNAMIC_FIXED_POINT = 1, + /*! \brief A quantization data type which has scale value and + zero point to match with TF and Android NN API for whole tensor. */ + VIP_BUFFER_QUANTIZE_TF_ASYMM = 2, + /*! \brief A max vaule support quantize format */ + VIP_BUFFER_QUANTIZE_MAX, +} vip_buffer_quantize_format_e; + +/* !\brief The memory type for vip buffer + * \ingroup group_buffer + * \version 1.2.2 + */ +typedef enum _vip_buffer_memory_type_e +{ + /*! \brief Not memory type. default memory type. + use for allocate video memory from driver calling vip_create_buffer. + */ + VIP_BUFFER_MEMORY_TYPE_DEFAULT = 0x000, + /*! \brief Create a VIP buffer from the Host (logical, physical). */ + VIP_BUFFER_MEMORY_TYPE_HOST = 0x001, + /*! \brief Create a VIP buffer from DMA_BUF */ + VIP_BUFFER_MEMORY_TYPE_DMA_BUF = 0x003, + /*! \brief The max memory type */ + VIP_BUFFER_MEMORY_TYPE_MAX, +} vip_buffer_memory_type_e; + +/* \brief The list of create network type + * \ingroup group_network + * \version 1.0 + */ +typedef enum _vip_create_network_type_e +{ + /*!< \brief NONE */ + VIP_CREATE_NETWORK_FROM_NONE = 0x00, + /*!< \brief Create network from a file path */ + VIP_CREATE_NETWORK_FROM_FILE = 0x01, + /*!< \brief Create network from buffer, NBG has been loaded in this buffer before */ + VIP_CREATE_NETWORK_FROM_MEMORY = 0x02, + /*!< \brief Create network from flash device or user memory. + The *data param of vip_create_network means are: + 1. If the NPU's MMU is enabled, the *data means that the CPU's logical address which access the memory. + 2. If the NPU's MMU is disabled, the *data means that the NPU's phyiscal address which access the memory. + This is for DDR-less project. + 1. Load NBG from flash device. The NBG file should be placed to flash device before running VIPLite. + Pass the NBG size and the location of NBG in flash device to this API. + 2. The NBG file pre-load into user memory which alloc via malloc function, or contiguous physical. + Advantage: coeff data is not copied again, save more memory than create_network_from_memory type. + Need enable VIP's MMU when works on Linux. + */ + VIP_CREATE_NETWORK_FROM_FLASH = 0x04, + + VIP_CREATE_NETWORK_MAX, +} vip_create_network_type_e; + +/* \brief The list of duplicate network type. + The original network can't be destroy if the dup network is running or will be run later. + * \ingroup group_network + */ +typedef enum _vip_dup_network_type_e +{ + /*!< \brief NONE */ + VIP_DUP_NONE = 0x00, + /*!< \brief Duplicate command for sharing weight with another network + 1. Sharing weight with original network. + 2. The original network has the same input/output shape as the dup network. + 3. Only the input/output addresses of network are difference between the + original network with dup network. + */ + VIP_DUP_FOR_CMD_BY_NETWORK = 0x01, + /*!< \brief Duplicate command for sharing weight with difference network(NBGs) + 1. Sharing weight with original network. + 2. Share weight between networks with the same network topology. + For example, to support different shapes of input, such as 640x480, 480x640 and 640x960. + */ + VIP_DUP_FOR_CMD_BY_NBG = 0x02, + + /*!< \brief Indicate that the dup network is duplicated from NBG file patch */ + VIP_DUP_FROM_NBG_FILE = 0x100, + /*!< \brief Indicate that the dup network is duplicated from NBG in memory */ + VIP_DUP_FROM_NBG_MEMORY = 0x200, + /*!< \brief Indicate that the dup network is duplicated from NBG in flash */ + VIP_DUP_FROM_NBG_FLASH = 0x400, + /*!< \brief Indicate that the dup network is duplicated from network object */ + VIP_DUP_FROM_NETWORK = 0x800, + + VIP_DUP_NETWORK_MAX, +} vip_dup_network_type_e; + +/* \brief An enumeration property that specifies which power management operation to execute. + * \ingroup group_global + * \version 1.2 + */ +typedef enum _vip_power_property_e +{ + VIP_POWER_PROPERTY_NONE = 0x0000, + /*!< \brief specify the VIP frequency */ + VIP_POWER_PROPERTY_SET_FREQUENCY = 0x0001, + /*!< \brief power off VIP hardware */ + VIP_POWER_PROPERTY_OFF = 0x0002, + /*!< \brief power on VIP hardware */ + VIP_POWER_PROPERTY_ON = 0x0004, + /*!< \brief stop VIP perform network */ + VIP_POWER_PROPERTY_STOP = 0x0008, + /*!< \brief start VIP perform network */ + VIP_POWER_PROPERTY_START = 0x0010, + VIP_POWER_PROPERTY_MAX +} vip_power_property_e; + +/* \brief query hardware caps property + * \ingroup group_global + */ +typedef enum _vip_query_hardware_property_e +{ + /*!< \brief the customer ID of this VIP/NPU, the returned value is vip_uint32_t */ + VIP_QUERY_HW_PROP_CID = 0, + /*!< \brief the number of deivce, the returned value is vip_uint32_t */ + VIP_QUERY_HW_PROP_DEVICE_COUNT = 1, + /*!< \brief the number of core count for each device, the returned value is + vip_uint32_t * device_count */ + VIP_QUERY_HW_PROP_CORE_COUNT_EACH_DEVICE = 2, + VIP_QUERY_HW_PROP_MAX, +} vip_query_hardware_property_e; + +/* \brief The list of properties of a network. + * \ingroup group_network + * \version 1.0 + */ +typedef enum _vip_network_property_e +{ + /* query network */ + /*!< \brief The number of layers in this network, the returned value is vip_uint32_t */ + VIP_NETWORK_PROP_LAYER_COUNT = 0, + /*!< \brief The number of input in this network, the returned value is vip_uint32_t*/ + VIP_NETWORK_PROP_INPUT_COUNT = 1, + /*!< \brief The number of output in this network, the returned value is vip_uint32_t*/ + VIP_NETWORK_PROP_OUTPUT_COUNT = 2, + /*!< \brief The network name, the returned value is vip_char_t[64] */ + VIP_NETWORK_PROP_NETWORK_NAME = 3, + /*!< \brief address information of wait-link, command, input-output buffers for viplite-Agent trigger, + not used if only use viplite. the returned value is \ref vip_address_info_t + */ + VIP_NETWORK_PROP_ADDRESS_INFO = 4, + /*!< \brief The size of memory pool, the returned value is vip_uint32_t*/ + VIP_NETWORK_PROP_MEMORY_POOL_SIZE = 6, + /*!< \brief The network profling data, the returned value is vip_inference_profile_t */ + VIP_NETWORK_PROP_PROFILING = 7, + /*!< \brief The the number of core for this network, the returned value is vip_uint8_t */ + VIP_NETWORK_PROP_CORE_COUNT = 8, + /*!< \brief get the information of output of dumped layer. the returned value is vip_nld_output_t */ + VIP_NETWORK_PROP_GET_LAYER_DUMP_OUTPUT = 9, + + + /* set network */ + /* set network property should be called before vip_prepare_network */ + /*!< \brief set network to enable change PPU parameters feature for this vip_network. + the vip_set_network value param used to indicates disable or enable this feature. + vip_uint32_t *value is 1, enable change ppu param. + vip_uint32_t *value is 0, disable change ppu param */ + VIP_NETWORK_PROP_CHANGE_PPU_PARAM = 64, + /*!< \brief set memory pool buffer for network. networks can share a memory pool buffer. + the set value is \ref vip_buffer */ + VIP_NETWORK_PROP_SET_MEMORY_POOL = 65, + /*!< \brief set device index for network. networks can be submitted this vip device. */ + VIP_NETWORK_PROP_SET_DEVICE_ID = 66, /* will be rejected later */ + VIP_NETWORK_PROP_SET_DEVICE_INDEX = 66, + /*!< \brief set priority of network. 0 ~ 255, 0 indicates the lowest priority. */ + VIP_NETWORK_PROP_SET_PRIORITY = 67, + /*!< \brief set time out of network. unit: ms . the value is vip_uint32_t */ + VIP_NETWORK_PROP_SET_TIME_OUT = 68, + /*!< \brief set a memory for partial of full pre-load coeff data to this memory. + This memory can't be freed until the network is released. the value is vip_buffer */ + VIP_NETWORK_PROP_SET_COEFF_MEMORY = 69, + /*!< \brief set core index for network. network start with which core of device. + the value is vip_buffer data type */ + VIP_NETWORK_PROP_SET_CORE_INDEX = 70, + /*!< \brief enable probe mode performance function, should be called before vip_prepare_network. + * the value is vip_bool_e data type, set 1 to enable NPD */ + VIP_NETWORK_PROP_SET_ENABLE_NPD = 71, + /*!< \brief enable preload coeff into vipsram. the value is vip_bool_e data type */ + VIP_NETWORK_PROP_SET_VIPSRAM_PRELOAD = 72, + /*!< \brief set layer ids that need to be layer dumped. the value is vip_nld_layer_id_t */ + VIP_NETWORK_PROP_SET_LAYER_DUMP_ID = 73, + +} vip_network_property_e; + +/* \brief The list of properties of a group. + * \ingroup group_network + * \version 1.0 + */ +typedef enum _vip_group_property_e +{ + /* query group */ + /*!< \brief The group profling data, the returned value is vip_inference_profile_t */ + VIP_GROUP_PROP_PROFILING = 0, + + /* set group */ + /* set group property should be called before vip_add_network() + and all network in group runs on same device */ + /*!< \brief set device index for group. networks in group can be submitted this vip device. + * This prop should be called before vip_prepare_network */ + VIP_GROUP_PROP_SET_DEVICE_ID = 64, /* will be rejected later */ + VIP_GROUP_PROP_SET_DEVICE_INDEX = 64, + /*!< \brief set core index for group. networks in group start with which core of current device. + * This prop should be called before vip_prepare_network */ + VIP_GROUP_PROP_SET_CORE_INDEX = 65, + /*!< \brief setting inference timeout value for group. unit: ms */ + VIP_GROUP_PROP_SET_TIME_OUT = 68, +} vip_group_property_e; + +/* \brief The list of property of an input or output. + * \ingroup group_buffer + * \version 1.0 + */ +typedef enum _vip_buffer_property_e +{ + /*!< \brief The quantization format, the returned value is \ref + vip_buffer_quantize_format_e */ + VIP_BUFFER_PROP_QUANT_FORMAT = 0, + /*!< \brief The number of dimension for this input, the returned value is vip_uint32_t */ + VIP_BUFFER_PROP_NUM_OF_DIMENSION = 1, + /*!< \brief The size of each dimension for this input, + the returned value is vip_uint32_t * num_of_dim */ + VIP_BUFFER_PROP_SIZES_OF_DIMENSION = 2, + /*!< \brief The data format for this input, + the returned value is \ref vip_buffer_format_e */ + VIP_BUFFER_PROP_DATA_FORMAT = 3, + /*!< \brief The position of fixed point for dynamic fixed point, + the returned value is vip_uint8_t */ + VIP_BUFFER_PROP_FIXED_POINT_POS = 4, + /*!< \brief The scale value for TF quantization format, the returned value is vip_float_t */ + VIP_BUFFER_PROP_TF_SCALE = 5, + /*!< \brief The zero point for TF quantization format, the returned value is vip_uint32_t */ + VIP_BUFFER_PROP_TF_ZERO_POINT = 6, + /*!< \brief The name for network's inputs and outputs, the returned value is vip_char_t[64] */ + VIP_BUFFER_PROP_NAME = 7, +} vip_buffer_property_e; + +/* \brief The list of property of operation vip_buffer type. + * \ingroup group_buffer + * \version 1.3 + */ +typedef enum _vip_buffer_operation_type_e +{ + /*!< \brief None operation */ + VIP_BUFFER_OPER_TYPE_NONE = 0, + /*!< \brief Flush the vip buffer */ + VIP_BUFFER_OPER_TYPE_FLUSH = 1, + /*!< \brief invalidate the vip buffer */ + VIP_BUFFER_OPER_TYPE_INVALIDATE = 2, + VIP_BUFFER_OPER_TYPE_MAX, +} vip_buffer_operation_type_e; + +typedef struct _vip_network *vip_network; +typedef struct _vip_buffer *vip_buffer; +typedef struct _vip_group *vip_group; + + +/*! \brief Input parameter for vip_create_buffer + * \ingroup group_buffer + */ +typedef struct _vip_buffer_create_params_t +{ + /*!< \brief The number of dimensions specified in *sizes*/ + vip_uint32_t num_of_dims; + /*!< \brief The pointer to an array of dimension */ + vip_uint32_t sizes[6]; + /*!< \brief Data format for the tensor, see \ref vip_buffer_format_e */ + vip_enum data_format; + /*!< \brief Quantized format see \ref vip_buffer_quantize_format_e . */ + vip_enum quant_format; + /*\ref vip_uint32_t + * \ingroup group_global + */ +VIP_API +vip_uint32_t vip_get_version( + void + ); + +/*! \brief Initial VIP Hardware, VIP lite software environment and power on VIP hardware. + * \details when vpmdENABLE_MULTIPLE_TASK set to 0, + This function should be only called once before using VIP hardware if. + when vpmdENABLE_MULTIPLE_TASK set to 1, + vip_init can be called multiple times, but should paired with vip_destroy. + vip_init should be called in every process. + only need call vip_init once in multi-thread. + * VIP lite driver would construct some global variable for this call.Also + * it will reset VIP and initialize VIP hardware to a ready state to accept jobs. + * \return \ref vip_status_e + * \ingroup group_global + * \version 1.0 + */ +VIP_API +vip_status_e vip_init( + void + ); + +/*! \brief Terminate VIP lite driver and shut down VIP hardware. + * \details This function should be the last function called by application. + vip_destroy should paired with vip_init called. + * After it, no VIP lite API should be called except \ref vip_init + * \return \ref vip_status_e + * \ingroup group_global + * \version 1.0 + * \notes vip_destroy should be called in the same thread as vip_init. + */ +VIP_API +vip_status_e vip_destroy( + void + ); + +/*! \brief Queries hardware caps information. This function shold be called after calling vip_init. + *\param property, the query property enum. + *\param size, the size of value buffer. + *\param value, the value buffer of returns. + * \ingroup group_global +*/ +VIP_API +vip_status_e vip_query_hardware( + IN vip_query_hardware_property_e property, + IN vip_uint32_t size, + OUT void *value + ); + +/*! \brief Create a input or output buffer with specified parameters. + *\details The buffer object always takes [w, h, c, n] order, + there is no padding/hole between lines/slices/batches. + *\param [in] create_param The pointer to \ref vip_buffer_create_params_t structure. + *\param [in] size_of_param The size of create_param pointer. + *\param [out] buffer An opaque handle for the new buffer object if the request is executed successfully. + *\return \ref vip_status_e + *\ingroup group_buffer + *\version 1.0 + */ +VIP_API +vip_status_e vip_create_buffer( + IN vip_buffer_create_params_t *create_param, + IN vip_uint32_t size_of_param, + OUT vip_buffer *buffer + ); + +/* +@brief Create a buffer used by the network's input and output. + use vip_create_buffer_from_handle function. +@param create_param The parametes of buffer be created. +@param size_of_param The size of create paramters. +@param buffer The returns buffer object. +*/ +VIP_API +vip_status_e vip_create_buffer_cache( + IN vip_buffer_create_params_t *create_param, + IN vip_uint32_t size_of_param, + OUT vip_buffer *buffer + ); + +/*! \brief Create a buffer from user contiguous or scatter non-contiguous physical address. + the vip_buffer created by this APi doesn't support flush CPU cache in driver. + So the physical memory should be a non-cache buffer or flush CPU on Host control. + not map user space logical on Linux. + *\param [in] create_param The pointer to \ref vip_buffer_create_params_t structure. + *\param [in] physical_table Physical address table of VIP. should be wraped for VIP hardware. + *\param [in] size_table The size of physical memory for each physical_table element. + *\param [in] physical_num The number of physical table element. + physical_num is 1 when create buffer from contiguous phyiscal. + *\param [out] buffer. vip lite buffer object. + *\return \ref vip_status_e +*\ingroup group_buffer +*/ +VIP_API +vip_status_e vip_create_buffer_from_physical( + IN const vip_buffer_create_params_t *create_param, + IN const vip_address_t *physical_table, + IN const vip_uint32_t *size_table, + IN vip_uint32_t physical_num, + OUT vip_buffer *buffer + ); + +/*! \brief Create a vip buffer with specified parameters. + The vip_buffer can be used to input, output, memory pool and so on. + NOTE: driver will operation CPU cache when call vip_flush_buffer API. + application should call vip_flush_buffer API if the memory handle have CPU cache. + after write data into this buffer, APP should call vip_flush_buffer(VIP_BUFFER_OPER_TYPE_FLUSH) + before CPU read date from this buffer. APP should call vip_flush_buffer(VIP_BUFFER_OPER_TYPE_INVALIDATE) + *\ when MMU disabled, create buffer from a contiguous physical memory. + *\ when MMU enabled, create buffer from a contiguous physical memory or + logical address(convert to physical in kenrel pace). + *\details The buffer object always takes [w, h, c, n] order, + there is no padding/hole between lines/slices/batches. + *\param [in] create_param The pointer to \ref vip_buffer_create_params_t structure. + *\param [in] handle_logical The logical address of the handle. + create vip buffer from the logical address. + *\param [in] the handle_size should be aligned to 64byte(vpmdCPU_CACHE_LINE_SIZE) for easy flash CPU cache. + *\param [out] buffer. vip lite buffer object. + *\return \ref vip_status_e + *\ingroup group_buffer + *\version 1.1 +*/ +VIP_API +vip_status_e vip_create_buffer_from_handle( + IN const vip_buffer_create_params_t *create_param, + IN const vip_ptr handle_logical, + IN vip_uint32_t handle_size, + OUT vip_buffer *buffer + ); + +/*! \brief Create a vip buffer from user fd(file descriptor). + only support create buffer from dma-buf on Linux. + the vip_buffer created by this APi doesn't support flush CPU cache in driver. + So the dma-buf should be a non-cache buffer or flush CPU on Host control. + *\param [in] create_param The pointer to \ref vip_buffer_create_params_t structure. + *\param [in] fd user memory file descriptor. + *\param [in] memory_size The size of user memory. + the handle_size should be aligned to 64byte(vpmdCPU_CACHE_LINE_SIZE) for easy flash CPU cache. + *\param [out] buffer. vip lite buffer object. + *\return \ref vip_status_e + *\ingroup group_buffer +*/ +VIP_API +vip_status_e vip_create_buffer_from_fd( + IN const vip_buffer_create_params_t *create_param, + IN vip_uint32_t fd, + IN vip_uint32_t memory_size, + OUT vip_buffer *buffer + ); + +/*! \brief Destroy a buffer object which was created before. + *\param [in] buffer The opaque handle of buffer to be destroyed. + *\return \ref vip_status_e + *\ingroup group_buffer + *\version 1.0 +*/ +VIP_API +vip_status_e vip_destroy_buffer( + IN vip_buffer buffer + ); + +/*! \brief Map a buffer to get the CPU accessible address for read or write + *\param [in] buffer The handle of buffer to be mapped. + *\return A pointer that application can use to read or write the buffer data. + *\ingroup group_buffer + *\version 1.0 +*/ +VIP_API +void * vip_map_buffer( + IN vip_buffer buffer + ); + +/*! \brief Unmap a buffer which was mapped before. + *\param [in] buffer The handle of buffer to be unmapped. + *\return \ref vip_status_e + *\ingroup group_buffer + *\version 1.0 +*/ +VIP_API +vip_status_e vip_unmap_buffer( + IN vip_buffer buffer + ); + +/*! \brief Get the size of bytes allocated for the buffer. + *\param [in] buffer The handle of buffer to be queried. + *\return \ref the size of bytes + *\ingroup group_buffer + *\version 1.0 +*/ +VIP_API +vip_uint32_t vip_get_buffer_size( + IN vip_buffer buffer + ); + +/*! \brief operation the vip buffer CPU chace. flush, invalidate cache. + You should call vip_flush_buffer to flush buffer for input. + and invalidate buffer for network's output if these memories with CPU cache. +*\param buffer The vip buffer object. +*\param the type of this operation. see vip_buffer_operation_type_e. +*\ingroup group_buffer +*/ +VIP_API +vip_status_e vip_flush_buffer( + IN vip_buffer buffer, + IN vip_buffer_operation_type_e type + ); + +/*! \brief Create a network object from the given binary data. + *\details The binary is generated by the binary graph generator and it's a blob binary. + *\VIP lite Driver could interprete it to create a network object. + *\param [in] data The pointer to the binary graph. + it can be a file path or a memory pointer, depending on type. + *\param [in] size_of_data The byte size of data object. the byte size of NBG buffer. + You can ignore it if create network form fil path. + *\param [in] type how to create a network object. please refer to vip_create_network_type_e enum. + *\param [out] network An opaque handle to the new network object if the request is executed successfully + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_create_network( + IN const void *data, + IN vip_uint32_t size_of_data, + IN vip_create_network_type_e type, + OUT vip_network *network + ); + +/* +@brief Duplicate a network for sharing weight. + The original network can't be destroy if the dup network is running or will be run later. +@param data, NBG file path, the buffer of NBG + different data according to type(vip_dup_network_type_e) is from file/memory/flash. + The data object can be set to VIP_NULL when type is VIP_DUP_NETWORK_FROM_NETWORK. +@param size_of_data, the bytes size of data. the byte size of NBG buffer. + the size can be set to 0 when *data is NBG file path. +@param type, vip_dup_network_type_e. +*\param network, original network to be dup. +*\param dup_network, network object which created by duplicated. +*/ +VIP_API +vip_status_e vip_dup_network( + IN const void *data, + IN vip_uint32_t size_of_data, + IN vip_dup_network_type_e type, + IN vip_network network, + OUT vip_network *dup_network + ); + +/*! \brief Destroy a network object + *\details Release all resources allocated for this network. + *\param [in] network The opaque handle to the network to be destroyed + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_destroy_network( + IN vip_network network + ); + +/*! \brief Configure network property. configure network. this API should be called before + calling vip_prepare_network. + *\details Configure network's layer inputs/outputs information + *\param [in] network A property \ref vip_network_property_e to be configuied. + *\return \ref vip_status_e + */ +VIP_API +vip_status_e vip_set_network( + IN vip_network network, + IN vip_enum property, + IN void *value + ); + +/*! \brief Query a property of the network object. + *\details User can use this API to get any properties from a network. + *\param [in] network The opaque handle to the network to be queried + *\param [in] property A property \ref vip_network_property_e to be queried. + *\param [out] value A pointer to memory to store the return value, + different property could return different type/size of value. + * please see comment of \ref vip_network_property_e for detail. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_query_network( + IN vip_network network, + IN vip_enum property, + OUT void *value + ); + +/*! \brief Prepare a network to run on VIP. + *\details This function only need to be called once to prepare a network and + make it ready to execute on VIP hardware. + * It would do all heavy-duty work, including allocate internal memory resource for this network, + deploy all operation's resource + * to internal memory pool, allocate/generate command buffer for this network, + patch command buffer for the resource in the internal memory + * allocations. If this function is called more than once, driver will silently ignore it. + If this function is executed successfully, this network is prepared. + *\param [in] network The opaque handle to the network which need to be prepared. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_prepare_network( + IN vip_network network + ); + +/*! \brief Query a property of a specific input of a given network. + *\details The specified input/property/network must be valid, + otherwise VIP_ERROR_INVALID_ARGUMENTS will be returned. + *\param [in] network The opaque handle to the network to be queried + *\param [in] index Specify which input to be queried in case there are multiple inputs in the network + *\param [in] property Specify which property application wants to know, see \ref vip_buffer_property_e + *\param [out] value Returned value, the details type/size, please refer to the comment of + \ref vip_input_property_e + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_query_input( + IN vip_network network, + IN vip_uint32_t index, + IN vip_enum property, + OUT void *value + ); + +/*! \brief Query a property of a specific output of a given network. + *\details The specified output/property/network must be valid, + otherwise VIP_ERROR_INVALID_ARGUMENTS will be returned. + *\param [in] network The opaque handle to the network to be queried + *\param [in] index Specify which output to be queried in case there are multiple outputs in the network + *\param [in] property Specify which property application wants to know, see \ref vip_buffer_property_e + *\param [out] value Returned value, the details type/size, please refer to the comment of + \ref vip_input_property_e + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_query_output( + IN vip_network network, + IN vip_uint32_t index, + IN vip_enum property, + OUT void *value + ); + +/*! \brief Attach an input buffer to the specified index of the network. + *\details All the inputs of the network need to be attached to a valid input buffer before running a network, + otherwise + * VIP_ERROR_MISSING_INPUT_OUTPUT will be returned when calling \ref vip_run_network . + When attaching an input buffer + * to the network, driver would patch the network command buffer to fill in this input buffer address. + This function could be called + * multiple times to let application update the input buffers before next network execution. + The network must be prepared by \ref vip_prepare_network before + * attaching an input. + *\param [in] network The opaque handle to a network which we want to attach an input buffer + *\param [in] index The index specify which input in the network will be set + *\param [in] input The opaque handle to a buffer which will be attached to the network. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_set_input( + IN vip_network network, + IN vip_uint32_t index, + IN vip_buffer input + ); + +/*! \brief Attach an output buffer to the specified index of the network. + *\details All the outputs of the network need to be attached to a + valid output buffer before running a network, otherwise + * VIP_ERROR_MISSING_INPUT_OUTPUT will be returned when calling \ref vip_run_network . + When attaching an output buffer + * to the network, driver would patch the network command buffer to fill in this output buffer address. + This function could be called + * multiple times to let application update the output buffers before next network execution. + The network must be prepared by \ref vip_prepare_network before + * attaching an output. + *\param [in] network The opaque handle to a network which we want to attach an output buffer + *\param [in] index The index specify which output in the network will be set + *\param [in] output The opaque handle to a buffer which will be attached to the network. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_set_output( + IN vip_network network, + IN vip_uint32_t index, + IN vip_buffer output + ); + +/*! \brief. Kick off the network execution and send command buffer of this network to VIP hardware. + *\details This function can be called multiple times. + Every time it's called it would do inference with current attached + * input buffers and output buffers. It would return until VIP finish the execution. + If the network is not ready to execute + * for some reason like not be prepared by \ref vip_prepare_network , + it would fail with status reported. + *\param [in] network The opaque handle to the network to be executed. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_run_network( + IN vip_network network + ); + +/*! \brief Finish using this network to do inference. + *\details This function is paired with \ref vip_prepare_network . + It's suggested to be called once after \ref vip_prepare_network called. + * If it's called more than that, it will be silently ignored. + If the network is not prepared but finished is called, it's silently ignored too. + * This function would release all internal memory allocations which are allocated when + the network is prepared. Since the preparation of network takes much time, + * it is suggested that if the network will be still used later, application should not + finish the network unless there is no much system resource remained for other + * networks. The network object is still alive unitl it's destroyed by \ref vip_destroy_network . + *\param [in] network The opaque handle to the network which will be finished. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_finish_network( + IN vip_network network + ); + +/*! \brief. Kick off the network execution and send command buffer of this network to VIP hardware. +*\details This function is similar to \ref vip_run_network except that it returns + immediately without waiting for HW to complete the commands. +*\param [in] network The opaque handle to the network to be executed. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_trigger_network( + IN vip_network network + ); + +/*! \brief. Run tasks in group,these tasks is added by vip_add_network. + The order of executuion of tasks is call vip_add_network. +*\details This function is similar to \ref vip_run_group except that it returns + immediately without waiting for HW to complete the commands. +*\return \ref vip_status_e +*\param group vip_group object +*\param the number of task will be run. + eg: num is 4, the 0, 1, 2, 3 taks index in group will be run(inference). +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_trigger_group( + IN vip_group group, + IN vip_uint32_t num + ); + +/*! \brief. Explicitly wait for HW to finish executing the submitted commands. +*\details This function waits for HW to complete the commands. + This should be called once CPU needs to access the network currently being run. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_wait_network( + IN vip_network network + ); + +/*! \brief. Explicitly wait for HW to finish executing the submitted task in group. +*\details This function waits for HW to complete the submitted commands in group. + This should be called once CPU needs to access the group currently being run. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_wait_group( + IN vip_group group + ); + +/*! \brief. Cancle network running on vip hardware after network is commited. +*\details This function is cancel network running on vip hardware. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_cancel_network( + IN vip_network network + ); + +/*! \brief. give user applications more control over power management for VIP cores. +*\details. control VIP core frequency and power status by property. see vip_power_property_e. +*\param ID of the managed device. device_index is 0 if VIP is single core. +*\param perperty Control VIP core frequency and power status by property. see vip_power_property_e. +*\param value The value for vip_power_property_e property. + Please see vip_power_frequency_t if property is setting to VIP_POWER_PROPERTY_SET_FREQUENCY. +*\return \ref vip_status_e +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_power_management( + IN vip_uint32_t device_index, + IN vip_power_property_e property, + IN void *value + ); + +/*! \brief. Create a vip_group object to run multiple tasks(network or node) + and without interrupt between each task. +*\return \ref vip_status_e +*\param count The maximum number of tasks supports by this group. +*\param group Return vip_group object be created. +*\version 1.0 +*/ +VIP_API +vip_status_e vip_create_group( + IN vip_uint32_t count, + OUT vip_group *group + ); + +/*! \brief. Destroy group object which created by vip_create_group. +*\return \ref vip_status_e +*\param group vip_group object/ +*\version 1.0 +*/ +VIP_API +vip_status_e vip_destroy_group( + IN vip_group group + ); + +/* +@brief set group property. configure group. this API should be called before calling vip_run_group. +@param group The group object which created by vip_create_group(). +@param property The property be set. see vip_group_property_e. +@param value The set data. +*/ +VIP_API +vip_status_e vip_set_group( + IN vip_group group, + IN vip_enum property, + IN void *value + ); + +/*! \brief Query a property of the group object. + *\param [in] group The group object which created by vip_create_group(). + *\param [in] property A property \ref vip_group_property_e to be queried. + *\param [out] value A pointer to memory to store the return value, + different property could return different type/size of value. + * please see comment of \ref vip_group_property_e for detail. + *\return \ref vip_status_e + *\ingroup group_network + *\version 1.0 + */ +VIP_API +vip_status_e vip_query_group( + IN vip_group group, + IN vip_enum property, + OUT void *value + ); + +/*! \brief. add a vip_network object into group. +*\return \ref vip_status_e +*\param group vip_group object, network be added into group. +*\param network vip_network added into group. +*\version 1.0 +*/ +VIP_API +vip_status_e vip_add_network( + IN vip_group group, + IN vip_network network + ); + +/*! \brief. run tasks in group. only issue a interrupt after tasks complete. + These tasks is added by vip_add_network. + The order of executuion of tasks is call vip_add_network. +*\return \ref vip_status_e +*\param group vip_group object +*\param the number of task will be run. + eg: num is 4, the 0, 1, 2, 3 taks index in group will be run(inference). +*\version 1.0 +*/ +VIP_API +vip_status_e vip_run_group( + IN vip_group group, + IN vip_uint32_t num + ); + +/*! \brief. change PPU engine parameters. + change local size, global size, global offset and global scale. +*\return \ref vip_status_e +*\param network The network object should be changed. +*\param param PPU parameters +*\param index The index of PPU node, not used. please set to zero. +*\ingroup group_network +*\version 1.0 +*/ +VIP_API +vip_status_e vip_set_ppu_param( + IN vip_network network, + IN vip_ppu_param_t *param, + IN vip_uint32_t index + ); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/viplite-tina/lib/glibc-gcc13_2_0/v2.0/inc/vip_lite_common.h b/viplite-tina/lib/glibc-gcc13_2_0/v2.0/inc/vip_lite_common.h new file mode 100644 index 0000000..49d03a9 --- /dev/null +++ b/viplite-tina/lib/glibc-gcc13_2_0/v2.0/inc/vip_lite_common.h @@ -0,0 +1,240 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2017 - 2024 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2017 - 2024 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + +#ifndef _VIP_LITE_COMMON_H +#define _VIP_LITE_COMMON_H + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + *\brief The VIP lite API for Convolution Neural Network application on CPU/MCU/DSP type of embedded environment. + *\details This VIP lite APIs is not thread-safe if vpmdENABLE_MULTIPLE_TASK is set to 0, + user must guarantee to call these APIs in a proper way. + But defines vpmdENABLE_MULTIPLE_TASK 1, VIPLite can support multiple task(multiple thread/process). + and it's thread-safe. + *Memory allocation and file io functions used inside driver internal would depend on working enviroment. + *\defgroup group_global Data Type Definitions and Global APIs + *\brief Data type definition and global APIs that are used in the VIP lite + *\defgroup group_buffer Buffer API + *\brief The API to manage input/output buffers + *\defgroup group_network Network API + *\brief The API to manage networks + */ + +/*! \brief An 8-bit unsigned value. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned char vip_uint8_t; + +/*! \brief An 16-bit unsigned value. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned short vip_uint16_t; + +/*! \brief An 32-bit unsigned value. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned int vip_uint32_t; + +/*! \brief An 64-bit unsigned value. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned long long vip_uint64_t; + +/*! \brief An 8-bit signed value. + * \ingroup group_global + * \version 1.0 + */ +typedef signed char vip_int8_t; + +/*! \brief An 16-bit signed value. + * \ingroup group_global + * \version 1.0 + */ +typedef signed short vip_int16_t; + +/*! \brief An 32-bit signed value. + * \ingroup group_global + * \version 1.0 + */ +typedef signed int vip_int32_t; + +/*! \brief An 64-bit signed value. + * \ingroup group_global + * \version 1.0 + */ +typedef signed long long vip_int64_t; + +/*! \brief An 8 bit ASCII character. + * \ingroup group_global + * \version 1.0 + */ +typedef char vip_char_t; + +/*! \brief An 32 bit float value. + * \ingroup group_global + * \version 1.0 + */ +typedef float vip_float_t; + +/*! \brief Sets the standard enumeration type size to be a fixed quantity. + * \ingroup group_global + * \version 1.0 + */ +typedef vip_int32_t vip_enum; + +/*! \brief a void pointer. + * \ingroup group_global + * \version 1.0 + */ +typedef void* vip_ptr; + +/*! \brief A 64-bit float value (aka double). + * \ingroup group_basic_features + */ +typedef double vip_float64_t; + +/*! \brief address type. + * \ingroup group_global + * \version 1.0 + */ +typedef unsigned long long vip_address_t; + +/*! \brief A zero value for pointer + *\ingroup group_global + *\version 1.0 + */ +#ifndef VIP_NULL +#define VIP_NULL 0 +#endif + +/***** Helper Macros. *****/ +#define VIP_API + +#define IN +#define OUT + +/*! \brief A invalid value if a property is not avaialbe for the query. + *\ingroup group_global + *\version 1.0 + */ +#define VIP_INVALID_VALUE ~0UL + +/*! \brief A Boolean value. + *\details This allows 0 to be FALSE, as it is in C, and any non-zero to be TRUE. + *\ingroup group_global + *\version 1.0 + */ +typedef enum _vip_bool_e { + /*! \brief The "false" value. */ + vip_false_e = 0, + /*! \brief The "true" value. */ + vip_true_e, +} vip_bool_e; + +/*! \brief The enumeration of all status codes. + * \ingroup group_global + * \version 1.0 + */ +typedef enum _vip_status +{ + /*!< \brief Indicates a FUSA error occurs */ + VIP_ERROR_FUSA = -17, + /*!< \brief Indicates the network hit Not A Number or Infinite error */ + VIP_ERROR_NAN_INF = -16, + /*!< \brief Indicates the network is canceld */ + VIP_ERROR_CANCELED = -15, + /*!< \brief Indicates the hardware is recovery done after hang */ + VIP_ERROR_RECOVERY = -14, + /*!< \brief Indicates the hardware is stoed */ + VIP_ERROR_POWER_STOP = -13, + /*!< \brief Indicates the hardware is in power off status */ + VIP_ERROR_POWER_OFF = -12, + /*!< \brief Indicates the failure */ + VIP_ERROR_FAILURE = -11, + /*!< \brief Indicates the binary is not compatible with the current runtime hardware */ + VIP_ERROR_NETWORK_INCOMPATIBLE = -10, + /*!< \brief Indicates the network is not prepared so current function call can't go through */ + VIP_ERROR_NETWORK_NOT_PREPARED = -9, + /*!< \brief Indicates the network misses either input or output when running the network */ + VIP_ERROR_MISSING_INPUT_OUTPUT = -8, + /*!< \brief Indicates the network binary is invalid */ + VIP_ERROR_INVALID_NETWORK = -7, + /*!< \brief Indicates driver is running out of memory of video memory */ + VIP_ERROR_OUT_OF_MEMORY = -6, + /*!< \brief Indicates there is no enough resource */ + VIP_ERROR_OUT_OF_RESOURCE = -5, + /*!< \brief Indicates it's supported by driver implementation */ + VIP_ERROR_NOT_SUPPORTED = -4, + /*!< \brief Indicates some arguments are not valid */ + VIP_ERROR_INVALID_ARGUMENTS = -3, + /*!< \brief Indicates there are some IO related error */ + VIP_ERROR_IO = -2, + /*!< \brief Indicates VIP timeout, could be VIP stuck somewhere */ + VIP_ERROR_TIMEOUT = -1, + /*!< \brief Indicates the execution is successfuly */ + VIP_SUCCESS = 0, +} vip_status_e; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/viplite-tina/lib/glibc-gcc13_2_0/v2.0/libNBGlinker.so b/viplite-tina/lib/glibc-gcc13_2_0/v2.0/libNBGlinker.so new file mode 100644 index 0000000..4780913 Binary files /dev/null and b/viplite-tina/lib/glibc-gcc13_2_0/v2.0/libNBGlinker.so differ diff --git a/viplite-tina/lib/glibc-gcc13_2_0/v2.0/libVIPhal.so b/viplite-tina/lib/glibc-gcc13_2_0/v2.0/libVIPhal.so new file mode 100644 index 0000000..80de0b0 Binary files /dev/null and b/viplite-tina/lib/glibc-gcc13_2_0/v2.0/libVIPhal.so differ