803 lines
21 KiB
C
Executable File
803 lines
21 KiB
C
Executable File
/*
|
|
* Copyright (c) 2008-2016 Allwinner Technology Co. Ltd.
|
|
* All rights reserved.
|
|
*
|
|
* File : ionAlloc.c
|
|
* Description :
|
|
* History :
|
|
* Author : xyliu <xyliu@allwinnertech.com>
|
|
* Date : 2016/04/13
|
|
* Comment :
|
|
*
|
|
*
|
|
*/
|
|
|
|
|
|
/*
|
|
* ion_alloc.c
|
|
*
|
|
* john.fu@allwinnertech.com
|
|
*
|
|
* ion memory allocate
|
|
*
|
|
*/
|
|
|
|
//#define CONFIG_LOG_LEVEL OPTION_LOG_LEVEL_DETAIL
|
|
#define LOG_TAG "ion_alloc"
|
|
|
|
#include "aw_ion_alloc_list.h"
|
|
#include "aw_ion_alloc.h"
|
|
#include "aw_ion_util.h"
|
|
|
|
#include <sys/ioctl.h>
|
|
#include <errno.h>
|
|
|
|
#define DEBUG_ION_REF 0 //just for H3 ION memery info debug
|
|
#define ION_ALLOC_ALIGN SZ_4k
|
|
#define DEV_NAME "/dev/ion"
|
|
#define ION_IOC_SUNXI_POOL_INFO 10
|
|
|
|
#define UNUSA_PARAM(param) (void)param
|
|
|
|
enum VE_IOCTL_CMD {
|
|
IOCTL_UNKOWN = 0x100,
|
|
IOCTL_GET_ENV_INFO,
|
|
IOCTL_WAIT_VE_DE,
|
|
IOCTL_WAIT_VE_EN,
|
|
IOCTL_RESET_VE,
|
|
IOCTL_ENABLE_VE,
|
|
IOCTL_DISABLE_VE,
|
|
IOCTL_SET_VE_FREQ,
|
|
|
|
IOCTL_CONFIG_AVS2 = 0x200,
|
|
IOCTL_GETVALUE_AVS2 ,
|
|
IOCTL_PAUSE_AVS2 ,
|
|
IOCTL_START_AVS2 ,
|
|
IOCTL_RESET_AVS2 ,
|
|
IOCTL_ADJUST_AVS2,
|
|
IOCTL_ENGINE_REQ,
|
|
IOCTL_ENGINE_REL,
|
|
IOCTL_ENGINE_CHECK_DELAY,
|
|
IOCTL_GET_IC_VER,
|
|
IOCTL_ADJUST_AVS2_ABS,
|
|
IOCTL_FLUSH_CACHE,
|
|
IOCTL_SET_REFCOUNT,
|
|
IOCTL_FLUSH_CACHE_ALL,
|
|
IOCTL_TEST_VERSION,
|
|
|
|
IOCTL_GET_LOCK = 0x310,
|
|
IOCTL_RELEASE_LOCK,
|
|
|
|
IOCTL_SET_VOL = 0x400,
|
|
|
|
IOCTL_WAIT_JPEG_DEC = 0x500,
|
|
/*for get the ve ref_count for ipc to delete the semphore*/
|
|
IOCTL_GET_REFCOUNT,
|
|
|
|
/*for iommu*/
|
|
IOCTL_GET_IOMMU_ADDR,
|
|
IOCTL_FREE_IOMMU_ADDR,
|
|
|
|
/*for fush cache range since kernel 5.4*/
|
|
IOCTL_FLUSH_CACHE_RANGE = 0x506,
|
|
|
|
/*debug just for encoder*/
|
|
IOCTL_SET_PROC_INFO,
|
|
IOCTL_STOP_PROC_INFO,
|
|
IOCTL_COPY_PROC_INFO,
|
|
|
|
IOCTL_SET_DRAM_HIGH_CHANNAL = 0x600,
|
|
IOCTL_READ_DDR_VALUE,
|
|
IOCTL_WRITE_DDR_VALUE,
|
|
IOCTL_CLEAR_DDR_VALUE,
|
|
|
|
/* debug for decoder and encoder*/
|
|
IOCTL_PROC_INFO_COPY = 0x610,
|
|
IOCTL_PROC_INFO_STOP,
|
|
|
|
|
|
IOCTL_POWER_SETUP = 0x700,
|
|
IOCTL_POWER_SHUTDOWN,
|
|
};
|
|
|
|
struct user_iommu_param {
|
|
int fd;
|
|
unsigned int iommu_addr;
|
|
};
|
|
|
|
|
|
//----------------------
|
|
#if DEBUG_ION_REF==1
|
|
int cdx_use_mem = 0;
|
|
typedef struct ION_BUF_NODE_TEST
|
|
{
|
|
unsigned long addr;
|
|
int size;
|
|
} ion_buf_node_test;
|
|
|
|
#define ION_BUF_LEN 50
|
|
ion_buf_node_test ion_buf_nodes_test[ION_BUF_LEN];
|
|
#endif
|
|
//----------------------
|
|
|
|
struct sunxi_pool_info {
|
|
unsigned int total; //unit kb
|
|
unsigned int free_kb; // size kb
|
|
unsigned int free_mb; // size mb
|
|
};
|
|
|
|
|
|
typedef struct BUFFER_NODE
|
|
{
|
|
struct aw_mem_list_head i_list;
|
|
unsigned long phy; //phisical address
|
|
unsigned long vir; //virtual address
|
|
unsigned int size; //buffer size
|
|
unsigned int tee; //
|
|
unsigned long user_virt;//
|
|
ion_fd_data_t fd_data;
|
|
struct user_iommu_param iommu_buffer;
|
|
}buffer_node;
|
|
|
|
typedef struct ION_ALLOC_CONTEXT
|
|
{
|
|
int fd; // driver handle
|
|
struct aw_mem_list_head list; // buffer list
|
|
int ref_cnt; // reference count
|
|
unsigned int phyOffset;
|
|
int ve_fd;
|
|
}ion_alloc_context;
|
|
|
|
static ion_alloc_context *g_alloc_context = NULL;
|
|
static pthread_mutex_t g_mutex_alloc = PTHREAD_MUTEX_INITIALIZER;
|
|
|
|
int get_memory_type()
|
|
{
|
|
MEMORY_TYPE eMemoryType = MEMORY_NORMAL;
|
|
|
|
#if CONF_USE_IOMMU
|
|
eMemoryType = MEMORY_IOMMU;
|
|
#else
|
|
|
|
#ifdef __ANDROID__
|
|
char prop_value[512];
|
|
property_get("ro.kernel.iomem.type", prop_value, "0xaf01");
|
|
logv("++++ prop_value: %s", prop_value);
|
|
if(strcmp(prop_value, "0xaf10")==0)
|
|
{
|
|
eMemoryType = MEMORY_IOMMU;
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
eMemoryType = MEMORY_IOMMU;
|
|
|
|
logv("get_memory_type: %d\n", eMemoryType);
|
|
|
|
return eMemoryType;
|
|
}
|
|
|
|
|
|
static int getPhyAddr(int nIonFd, uintptr_t handle, void *pIommuBuf,
|
|
unsigned long *pAddr)
|
|
{
|
|
if(get_memory_type()==MEMORY_IOMMU)
|
|
{
|
|
struct user_iommu_param *pIommuBuffer = (struct user_iommu_param *)pIommuBuf;
|
|
|
|
int ret = ioctl(g_alloc_context->ve_fd, IOCTL_GET_IOMMU_ADDR, pIommuBuffer);
|
|
|
|
if(pIommuBuffer->iommu_addr & 0xff)
|
|
{
|
|
loge("get iommu addr maybe wrong:%x\n", pIommuBuffer->iommu_addr);
|
|
return -1;
|
|
}
|
|
logv("getPhyAddr: fd:%d, iommu_addr:%x\n", pIommuBuffer->fd,
|
|
pIommuBuffer->iommu_addr);
|
|
|
|
*pAddr = (unsigned long)pIommuBuffer->iommu_addr;
|
|
}
|
|
else
|
|
{
|
|
struct aw_ion_custom_info custom_data;
|
|
cdc_sunxi_phys_data phys_data;
|
|
memset(&phys_data, 0, sizeof(cdc_sunxi_phys_data));
|
|
custom_data.aw_cmd = ION_IOC_SUNXI_PHYS_ADDR;
|
|
phys_data.handle = (aw_ion_user_handle_t)handle;
|
|
custom_data.aw_arg = (unsigned long)&phys_data;
|
|
int ret = ioctl(nIonFd, AW_MEM_ION_IOC_CUSTOM, &custom_data);
|
|
if(ret) {
|
|
loge("ION_IOC_CUSTOM err, ret %d\n", ret);
|
|
return -1;
|
|
}
|
|
logv("CdcIonGetPhyAdrVe:%x, fd:%d, handle:%d\n",phys_data.phys_addr, nIonFd, handle);
|
|
|
|
*pAddr = (unsigned long)phys_data.phys_addr;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
#if DEBUG_ION_REF == 1
|
|
static int ion_alloc_get_total_size();
|
|
#endif
|
|
|
|
/*funciton begin*/
|
|
int aw_rt_ion_alloc_open()
|
|
{
|
|
logv("begin aw_rt_ion_alloc_open \n");
|
|
|
|
pthread_mutex_lock(&g_mutex_alloc);
|
|
if (g_alloc_context != NULL)
|
|
{
|
|
logv("ion allocator has already been created \n");
|
|
goto SUCCEED_OUT;
|
|
}
|
|
|
|
g_alloc_context = (ion_alloc_context*)malloc(sizeof(ion_alloc_context));
|
|
if (g_alloc_context == NULL)
|
|
{
|
|
loge("create ion allocator failed, out of memory \n");
|
|
goto ERROR_OUT;
|
|
}
|
|
else
|
|
{
|
|
logv("pid: %d, g_alloc_context = %p \n", getpid(), g_alloc_context);
|
|
}
|
|
|
|
memset((void*)g_alloc_context, 0, sizeof(ion_alloc_context));
|
|
|
|
g_alloc_context->phyOffset = 0;
|
|
logv("** phy offset = %x",g_alloc_context->phyOffset);
|
|
|
|
|
|
/* Readonly should be enough. */
|
|
g_alloc_context->fd = open(DEV_NAME, O_RDONLY, 0);
|
|
|
|
if (g_alloc_context->fd <= 0)
|
|
{
|
|
loge("open %s failed \n", DEV_NAME);
|
|
goto ERROR_OUT;
|
|
}
|
|
|
|
g_alloc_context->ve_fd = open("/dev/cedar_dev", O_RDWR);
|
|
|
|
if (g_alloc_context->ve_fd <= 0)
|
|
{
|
|
loge("open cedar_dev failed \n");
|
|
goto ERROR_OUT;
|
|
}
|
|
ioctl(g_alloc_context->ve_fd, IOCTL_ENGINE_REQ, 0);
|
|
|
|
#if DEBUG_ION_REF==1
|
|
cdx_use_mem = 0;
|
|
memset(&ion_buf_nodes_test, 0, sizeof(ion_buf_nodes_test));
|
|
logd("ion_open, cdx_use_mem=[%dByte].", cdx_use_mem);
|
|
ion_alloc_get_total_size();
|
|
#endif
|
|
|
|
AW_MEM_INIT_LIST_HEAD(&g_alloc_context->list);
|
|
|
|
SUCCEED_OUT:
|
|
g_alloc_context->ref_cnt++;
|
|
pthread_mutex_unlock(&g_mutex_alloc);
|
|
return 0;
|
|
|
|
ERROR_OUT:
|
|
if (g_alloc_context != NULL
|
|
&& g_alloc_context->fd > 0)
|
|
{
|
|
close(g_alloc_context->fd);
|
|
g_alloc_context->fd = 0;
|
|
}
|
|
|
|
if (g_alloc_context != NULL)
|
|
{
|
|
free(g_alloc_context);
|
|
g_alloc_context = NULL;
|
|
}
|
|
|
|
pthread_mutex_unlock(&g_mutex_alloc);
|
|
return -1;
|
|
}
|
|
|
|
void aw_rt_ion_alloc_close()
|
|
{
|
|
struct aw_mem_list_head * pos, *q;
|
|
|
|
logv("aw_rt_ion_alloc_close \n");
|
|
|
|
pthread_mutex_lock(&g_mutex_alloc);
|
|
if (--g_alloc_context->ref_cnt <= 0)
|
|
{
|
|
logv("pid: %d, release g_alloc_context = %p \n", getpid(), g_alloc_context);
|
|
|
|
aw_mem_list_for_each_safe(pos, q, &g_alloc_context->list)
|
|
{
|
|
buffer_node * tmp;
|
|
tmp = aw_mem_list_entry(pos, buffer_node, i_list);
|
|
logv("aw_rt_ion_alloc_close del item phy= 0x%lx vir= 0x%lx, size= %d \n", \
|
|
tmp->phy, tmp->vir, tmp->size);
|
|
aw_mem_list_del(pos);
|
|
free(tmp);
|
|
}
|
|
#if DEBUG_ION_REF==1
|
|
logd("ion_close, cdx_use_mem=[%d MB]", cdx_use_mem/1024/1024);
|
|
ion_alloc_get_total_size();
|
|
#endif
|
|
close(g_alloc_context->fd);
|
|
g_alloc_context->fd = -1;
|
|
|
|
ioctl(g_alloc_context->ve_fd, IOCTL_ENGINE_REL, 0);
|
|
|
|
close(g_alloc_context->ve_fd);
|
|
g_alloc_context->ve_fd = -1;
|
|
|
|
free(g_alloc_context);
|
|
g_alloc_context = NULL;
|
|
}
|
|
else
|
|
{
|
|
logv("ref cnt: %d > 0, do not free \n", g_alloc_context->ref_cnt);
|
|
}
|
|
pthread_mutex_unlock(&g_mutex_alloc);
|
|
|
|
//--------------
|
|
#if DEBUG_ION_REF==1
|
|
int i = 0;
|
|
int counter = 0;
|
|
for(i=0; i<ION_BUF_LEN; i++)
|
|
{
|
|
if(ion_buf_nodes_test[i].addr != 0 || ion_buf_nodes_test[i].size != 0){
|
|
|
|
loge("ion mem leak???? addr->[0x%lx], leak size->[%dByte]", \
|
|
ion_buf_nodes_test[i].addr, ion_buf_nodes_test[i].size);
|
|
counter ++;
|
|
}
|
|
}
|
|
|
|
if(counter != 0)
|
|
{
|
|
loge("my god, have [%d]blocks ion mem leak.!!!!", counter);
|
|
}
|
|
else
|
|
{
|
|
logd("well done, no ion mem leak.");
|
|
}
|
|
#endif
|
|
//--------------
|
|
return ;
|
|
}
|
|
|
|
// return virtual address: 0 failed
|
|
void* aw_rt_ion_alloc_palloc_base(int size, unsigned char bIsCache)
|
|
{
|
|
aw_ion_allocation_info_t alloc_data;
|
|
ion_fd_data_t fd_data;
|
|
struct aw_ion_handle_data handle_data;
|
|
|
|
int rest_size = 0;
|
|
unsigned long addr_phy = 0;
|
|
unsigned long addr_vir = 0;
|
|
buffer_node * alloc_buffer = NULL;
|
|
int ret = 0;
|
|
memset(&alloc_data, 0, sizeof(aw_ion_allocation_info_t));
|
|
pthread_mutex_lock(&g_mutex_alloc);
|
|
|
|
if (g_alloc_context == NULL)
|
|
{
|
|
loge("ion_alloc do not opened, should call aw_rt_ion_alloc_open() \
|
|
before ion_alloc_alloc(size) \n");
|
|
goto ALLOC_OUT;
|
|
}
|
|
|
|
if(size <= 0)
|
|
{
|
|
loge("can not alloc size 0 \n");
|
|
goto ALLOC_OUT;
|
|
}
|
|
|
|
alloc_data.aw_len = (size_t)size;
|
|
alloc_data.aw_align = ION_ALLOC_ALIGN ;
|
|
|
|
if(get_memory_type() == MEMORY_IOMMU)
|
|
{
|
|
alloc_data.aw_heap_id_mask = AW_ION_SYSTEM_HEAP_MASK | AW_ION_CARVEOUT_HEAP_MASK;
|
|
}
|
|
else
|
|
{
|
|
alloc_data.aw_heap_id_mask = AW_ION_DMA_HEAP_MASK | AW_ION_CARVEOUT_HEAP_MASK;
|
|
}
|
|
|
|
if(bIsCache != 0)
|
|
{
|
|
alloc_data.flags = AW_ION_CACHED_FLAG | AW_ION_CACHED_NEEDS_SYNC_FLAG;
|
|
}
|
|
|
|
#if 0
|
|
#ifdef CONF_KERNEL_VERSION_4_9
|
|
alloc_data.aw_heap_id_mask = AW_ION_DMA_HEAP_MASK | AW_ION_CARVEOUT_HEAP_MASK;
|
|
alloc_data.flags = AW_ION_CACHED_FLAG | AW_ION_CACHED_NEEDS_SYNC_FLAG;
|
|
#else
|
|
alloc_data.flags = AW_ION_CACHED_FLAG | AW_ION_CACHED_NEEDS_SYNC_FLAG;
|
|
#endif
|
|
#endif
|
|
ret = ioctl(g_alloc_context->fd, AW_MEM_ION_IOC_ALLOC, &alloc_data);
|
|
if (ret)
|
|
{
|
|
loge("ION_IOC_ALLOC error , size = %d\n", size);
|
|
goto ALLOC_OUT;
|
|
}
|
|
|
|
/* get dmabuf fd */
|
|
fd_data.handle = alloc_data.handle;
|
|
ret = ioctl(g_alloc_context->fd, AW_MEM_ION_IOC_MAP, &fd_data);
|
|
if(ret)
|
|
{
|
|
loge("ION_IOC_MAP err, ret %d, dmabuf fd 0x%08x\n", ret, (unsigned int)fd_data.aw_fd);
|
|
goto ALLOC_OUT;
|
|
}
|
|
|
|
/* mmap to user */
|
|
addr_vir = (unsigned long)mmap(NULL, alloc_data.aw_len, \
|
|
PROT_READ|PROT_WRITE, MAP_SHARED, fd_data.aw_fd, 0);
|
|
if((unsigned long)MAP_FAILED == addr_vir)
|
|
{
|
|
loge("mmap err, ret %lx\n", (unsigned long)addr_vir);
|
|
addr_vir = 0;
|
|
goto ALLOC_OUT;
|
|
}
|
|
|
|
alloc_buffer = (buffer_node *)malloc(sizeof(buffer_node));
|
|
if (alloc_buffer == NULL)
|
|
{
|
|
loge("malloc buffer node failed");
|
|
|
|
/* unmmap */
|
|
ret = munmap((void*)addr_vir, alloc_data.aw_len);
|
|
if(ret) {
|
|
loge("munmap err, ret %d\n", ret);
|
|
}
|
|
|
|
/* close dmabuf fd */
|
|
close(fd_data.aw_fd);
|
|
|
|
/* free buffer */
|
|
handle_data.handle = alloc_data.handle;
|
|
ret = ioctl(g_alloc_context->fd, AW_MEM_ION_IOC_FREE, &handle_data);
|
|
|
|
if(ret) {
|
|
loge("ION_IOC_FREE err, ret %d\n", ret);
|
|
}
|
|
|
|
addr_phy = 0;
|
|
addr_vir = 0; // value of MAP_FAILED is -1, should return 0
|
|
|
|
goto ALLOC_OUT;
|
|
}
|
|
|
|
struct user_iommu_param iommu_buffer;
|
|
memset(&iommu_buffer, 0, sizeof(struct user_iommu_param));
|
|
iommu_buffer.fd = fd_data.aw_fd;
|
|
|
|
ret = getPhyAddr(g_alloc_context->fd, (uintptr_t)alloc_data.handle,
|
|
(void *)&iommu_buffer, &addr_phy);
|
|
if(ret < 0)
|
|
{
|
|
loge("get phy addr error\n");
|
|
goto ALLOC_OUT;
|
|
}
|
|
memcpy(&alloc_buffer->iommu_buffer, &iommu_buffer, sizeof(struct user_iommu_param));
|
|
alloc_buffer->phy = addr_phy;
|
|
alloc_buffer->vir = addr_vir;
|
|
alloc_buffer->user_virt = addr_vir;
|
|
alloc_buffer->size = size;
|
|
alloc_buffer->fd_data.handle = fd_data.handle;
|
|
alloc_buffer->fd_data.aw_fd = fd_data.aw_fd;
|
|
|
|
logv("alloc succeed, addr_phy: 0x%lx, addr_vir: 0x%lx, size: %d", addr_phy, addr_vir, size);
|
|
|
|
aw_mem_list_add_tail(&alloc_buffer->i_list, &g_alloc_context->list);
|
|
|
|
//------start-----------------
|
|
#if DEBUG_ION_REF==1
|
|
cdx_use_mem += size;
|
|
logd("++++++cdx_use_mem = [%d MB], increase size->[%d B], addr_vir=[0x%lx], addr_phy=[0x%lx]", \
|
|
cdx_use_mem/1024/1024, size, addr_vir, addr_phy);
|
|
int i = 0;
|
|
for(i=0; i<ION_BUF_LEN; i++)
|
|
{
|
|
if(ion_buf_nodes_test[i].addr == 0 && ion_buf_nodes_test[i].size == 0){
|
|
ion_buf_nodes_test[i].addr = addr_vir;
|
|
ion_buf_nodes_test[i].size = size;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if(i>= ION_BUF_LEN){
|
|
loge("error, ion buf len is large than [%d]", ION_BUF_LEN);
|
|
}
|
|
#endif
|
|
//--------------------------------
|
|
|
|
ALLOC_OUT:
|
|
pthread_mutex_unlock(&g_mutex_alloc);
|
|
return (void*)addr_vir;
|
|
}
|
|
|
|
void* aw_rt_ion_alloc_palloc(int size)
|
|
{
|
|
unsigned char bIsCache = 1;
|
|
return aw_rt_ion_alloc_palloc_base(size, bIsCache);
|
|
}
|
|
|
|
void* aw_rt_ion_alloc_no_cache_palloc(int size)
|
|
{
|
|
unsigned char bIsCache = 0;
|
|
return aw_rt_ion_alloc_palloc_base(size, bIsCache);
|
|
}
|
|
|
|
void aw_rt_ion_alloc_pfree(void * pbuf)
|
|
{
|
|
int flag = 0;
|
|
unsigned long addr_vir = (unsigned long)pbuf;
|
|
buffer_node * tmp;
|
|
int ret;
|
|
struct aw_ion_handle_data handle_data;
|
|
|
|
if (0 == pbuf)
|
|
{
|
|
loge("can not free NULL buffer \n");
|
|
return ;
|
|
}
|
|
|
|
pthread_mutex_lock(&g_mutex_alloc);
|
|
|
|
if (g_alloc_context == NULL)
|
|
{
|
|
loge("ion_alloc do not opened, should call aw_rt_ion_alloc_open() \
|
|
before ion_alloc_alloc(size) \n");
|
|
pthread_mutex_unlock(&g_mutex_alloc);
|
|
return ;
|
|
}
|
|
|
|
aw_mem_list_for_each_entry(tmp, &g_alloc_context->list, i_list)
|
|
{
|
|
if (tmp->vir == addr_vir)
|
|
{
|
|
logv("ion_alloc_free item phy= 0x%lx vir= 0x%lx, size= %d \n", \
|
|
tmp->phy, tmp->vir, tmp->size);
|
|
/*unmap user space*/
|
|
if(get_memory_type() == MEMORY_IOMMU)
|
|
{
|
|
logv("aw_rt_ion_alloc_pfree: fd:%d, iommu_addr:%x\n",
|
|
tmp->iommu_buffer.fd, tmp->iommu_buffer.iommu_addr);
|
|
|
|
ret = ioctl(g_alloc_context->ve_fd, IOCTL_FREE_IOMMU_ADDR, &tmp->iommu_buffer);
|
|
if(ret < 0)
|
|
loge("VeFreeIommuAddr error\n");
|
|
}
|
|
|
|
if (munmap((void *)(tmp->user_virt), tmp->size) < 0)
|
|
{
|
|
loge("munmap 0x%p, size: %d failed \n", (void*)addr_vir, tmp->size);
|
|
}
|
|
|
|
/*close dma buffer fd*/
|
|
close(tmp->fd_data.aw_fd);
|
|
/* free buffer */
|
|
handle_data.handle = tmp->fd_data.handle;
|
|
|
|
ret = ioctl(g_alloc_context->fd, AW_MEM_ION_IOC_FREE, &handle_data);
|
|
if (ret)
|
|
{
|
|
logv("TON_IOC_FREE failed \n");
|
|
}
|
|
|
|
aw_mem_list_del(&tmp->i_list);
|
|
free(tmp);
|
|
|
|
flag = 1;
|
|
|
|
//------start-----------------
|
|
#if DEBUG_ION_REF==1
|
|
int i = 0;
|
|
for(i=0; i<ION_BUF_LEN; i++)
|
|
{
|
|
if(ion_buf_nodes_test[i].addr == addr_vir && ion_buf_nodes_test[i].size > 0){
|
|
|
|
cdx_use_mem -= ion_buf_nodes_test[i].size;
|
|
logv("--------cdx_use_mem = [%d MB], reduce size->[%d B]",\
|
|
cdx_use_mem/1024/1024, ion_buf_nodes_test[i].size);
|
|
ion_buf_nodes_test[i].addr = 0;
|
|
ion_buf_nodes_test[i].size = 0;
|
|
|
|
break;
|
|
}
|
|
}
|
|
|
|
if(i>= ION_BUF_LEN){
|
|
loge("error, ion buf len is large than [%d]", ION_BUF_LEN);
|
|
}
|
|
#endif
|
|
//--------------------------------
|
|
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (0 == flag)
|
|
{
|
|
loge("ion_alloc_free failed, do not find virtual address: 0x%lx \n", addr_vir);
|
|
}
|
|
|
|
pthread_mutex_unlock(&g_mutex_alloc);
|
|
return ;
|
|
}
|
|
|
|
void* aw_rt_ion_alloc_vir2phy(void * pbuf)
|
|
{
|
|
int flag = 0;
|
|
unsigned long addr_vir = (unsigned long)pbuf;
|
|
unsigned long addr_phy = 0;
|
|
buffer_node * tmp;
|
|
|
|
if (0 == pbuf)
|
|
{
|
|
// logv("can not vir2phy NULL buffer \n");
|
|
return 0;
|
|
}
|
|
|
|
pthread_mutex_lock(&g_mutex_alloc);
|
|
|
|
aw_mem_list_for_each_entry(tmp, &g_alloc_context->list, i_list)
|
|
{
|
|
if (addr_vir >= tmp->vir
|
|
&& addr_vir < tmp->vir + tmp->size)
|
|
{
|
|
addr_phy = tmp->phy + addr_vir - tmp->vir;
|
|
// logv("aw_rt_ion_alloc_vir2phy phy= 0x%08x vir= 0x%08x \n", addr_phy, addr_vir);
|
|
flag = 1;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (0 == flag)
|
|
{
|
|
loge("aw_rt_ion_alloc_vir2phy failed, do not find virtual address: 0x%lx \n", addr_vir);
|
|
}
|
|
|
|
pthread_mutex_unlock(&g_mutex_alloc);
|
|
|
|
return (void*)addr_phy;
|
|
}
|
|
|
|
void* aw_rt_ion_alloc_phy2vir(void * pbuf)
|
|
{
|
|
int flag = 0;
|
|
unsigned long addr_vir = 0;
|
|
unsigned long addr_phy = (unsigned long)pbuf;
|
|
buffer_node * tmp;
|
|
|
|
if (0 == pbuf)
|
|
{
|
|
loge("can not phy2vir NULL buffer \n");
|
|
return 0;
|
|
}
|
|
|
|
pthread_mutex_lock(&g_mutex_alloc);
|
|
|
|
aw_mem_list_for_each_entry(tmp, &g_alloc_context->list, i_list)
|
|
{
|
|
if (addr_phy >= tmp->phy
|
|
&& addr_phy < tmp->phy + tmp->size)
|
|
{
|
|
addr_vir = tmp->vir + addr_phy - tmp->phy;
|
|
flag = 1;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (0 == flag)
|
|
{
|
|
loge("aw_rt_ion_alloc_phy2vir failed, do not find physical address: 0x%lx \n", addr_phy);
|
|
}
|
|
|
|
pthread_mutex_unlock(&g_mutex_alloc);
|
|
|
|
return (void*)addr_vir;
|
|
}
|
|
|
|
//* use the flush_cache in CONF_KERNEL_VERSION_3_4
|
|
#if 0
|
|
void ion_alloc_flush_cache(void* startAddr, int size)
|
|
{
|
|
sunxi_cache_range range;
|
|
struct aw_ion_custom_info custom_data;
|
|
int ret;
|
|
|
|
/* clean and invalid user cache */
|
|
range.start = (unsigned long)startAddr;
|
|
range.end = (unsigned long)startAddr + size;
|
|
|
|
custom_data.aw_cmd = ION_IOC_SUNXI_FLUSH_RANGE;
|
|
custom_data.aw_arg = (unsigned long)⦥
|
|
|
|
ret = ioctl(g_alloc_context->fd, AW_MEM_ION_IOC_CUSTOM, &custom_data);
|
|
if (ret)
|
|
{
|
|
loge("ION_IOC_CUSTOM failed \n");
|
|
}
|
|
|
|
return;
|
|
}
|
|
#endif
|
|
|
|
void aw_rt_ion_alloc_flush_cache(void* startAddr, int size)
|
|
{
|
|
int ret;
|
|
sunxi_cache_range range;
|
|
|
|
/* clean and invalid user cache */
|
|
range.start = (unsigned long)startAddr;
|
|
range.end = (unsigned long)startAddr + size;
|
|
//logv("start:%p, end:%lx, size:%lx(%ld)\n", startAddr, range.end, (long)size, (long)size);
|
|
ret = ioctl(g_alloc_context->fd, ION_IOC_SUNXI_FLUSH_RANGE, &range);
|
|
if (ret)
|
|
{
|
|
loge("ION_IOC_SUNXI_FLUSH_RANGE failed errno: %d, ret: %d", errno, ret);
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
void aw_rt_ion_flush_cache_all()
|
|
{
|
|
ioctl(g_alloc_context->fd, ION_IOC_SUNXI_FLUSH_ALL, 0);
|
|
}
|
|
|
|
int aw_rt_ion_alloc_memset(void* buf, int value, size_t n)
|
|
{
|
|
memset(buf, value, n);
|
|
return -1;
|
|
}
|
|
|
|
int aw_rt_ion_alloc_copy(void* dst, void* src, size_t n)
|
|
{
|
|
memcpy(dst, src, n);
|
|
return -1;
|
|
}
|
|
|
|
int aw_rt_ion_alloc_read(void* dst, void* src, size_t n)
|
|
{
|
|
memcpy(dst, src, n);
|
|
return -1;
|
|
}
|
|
|
|
int aw_rt_ion_alloc_write(void* dst, void* src, size_t n)
|
|
{
|
|
memcpy(dst, src, n);
|
|
return -1;
|
|
}
|
|
|
|
int aw_rt_ion_alloc_setup()
|
|
{
|
|
return -1;
|
|
}
|
|
|
|
int aw_rt_ion_alloc_shutdown()
|
|
{
|
|
return -1;
|
|
}
|
|
|
|
unsigned int aw_rt_ion_alloc_get_ve_addr_offset()
|
|
{
|
|
if(g_alloc_context != NULL)
|
|
return g_alloc_context->phyOffset;
|
|
else
|
|
{
|
|
loge("g_alloc_context is NULL, please call aw_rt_ion_alloc_open\n");
|
|
return 0;
|
|
}
|
|
}
|
|
|