Commit 9a6dad93 authored by Dmitriy Beykun's avatar Dmitriy Beykun
Browse files

added r3p1-01rel0 libump

parent 9d0b7e62
...@@ -34,23 +34,23 @@ void *ump_uk_ctx = NULL; ...@@ -34,23 +34,23 @@ void *ump_uk_ctx = NULL;
static volatile int ump_ref_count = 0; static volatile int ump_ref_count = 0;
/** Lock for critical section in open/close */ /** Lock for critical section in open/close */
_ump_osu_lock_t * ump_lock = NULL; _ump_osu_lock_t * ump_lock_arch = NULL;
ump_result ump_arch_open(void) ump_result ump_arch_open(void)
{ {
ump_result retval = UMP_OK; ump_result retval = UMP_OK;
_ump_osu_lock_auto_init( &ump_lock, 0, 0, 0 ); _ump_osu_lock_auto_init( &ump_lock_arch, 0, 0, 0 );
/* Check that the lock was initialized */ /* Check that the lock was initialized */
if (NULL == ump_lock) if (NULL == ump_lock_arch)
{ {
UMP_DEBUG_PRINT(1, ("UMP: ump_arch_open() failed to init lock\n")); UMP_DEBUG_PRINT(1, ("UMP: ump_arch_open() failed to init lock\n"));
return UMP_ERROR; return UMP_ERROR;
} }
/* Attempt to obtain a lock */ /* Attempt to obtain a lock */
if( _UMP_OSU_ERR_OK != _ump_osu_lock_wait( ump_lock, _UMP_OSU_LOCKMODE_RW ) ) if( _UMP_OSU_ERR_OK != _ump_osu_lock_wait( ump_lock_arch, _UMP_OSU_LOCKMODE_RW ) )
{ {
UMP_DEBUG_PRINT(1, ("UMP: ump_arch_open() failed to acquire lock\n")); UMP_DEBUG_PRINT(1, ("UMP: ump_arch_open() failed to acquire lock\n"));
return UMP_ERROR; return UMP_ERROR;
...@@ -73,7 +73,7 @@ ump_result ump_arch_open(void) ...@@ -73,7 +73,7 @@ ump_result ump_arch_open(void)
} }
/* Signal the lock so someone else can use it */ /* Signal the lock so someone else can use it */
_ump_osu_lock_signal( ump_lock, _UMP_OSU_LOCKMODE_RW ); _ump_osu_lock_signal( ump_lock_arch, _UMP_OSU_LOCKMODE_RW );
return retval; return retval;
} }
...@@ -82,17 +82,17 @@ ump_result ump_arch_open(void) ...@@ -82,17 +82,17 @@ ump_result ump_arch_open(void)
void ump_arch_close(void) void ump_arch_close(void)
{ {
_ump_osu_lock_auto_init( &ump_lock, 0, 0, 0 ); _ump_osu_lock_auto_init( &ump_lock_arch, 0, 0, 0 );
/* Check that the lock was initialized */ /* Check that the lock was initialized */
if(NULL == ump_lock) if(NULL == ump_lock_arch)
{ {
UMP_DEBUG_PRINT(1, ("UMP: ump_arch_close() failed to init lock\n")); UMP_DEBUG_PRINT(1, ("UMP: ump_arch_close() failed to init lock\n"));
return; return;
} }
/* Attempt to obtain a lock */ /* Attempt to obtain a lock */
if( _UMP_OSU_ERR_OK != _ump_osu_lock_wait( ump_lock, _UMP_OSU_LOCKMODE_RW ) ) if( _UMP_OSU_ERR_OK != _ump_osu_lock_wait( ump_lock_arch, _UMP_OSU_LOCKMODE_RW ) )
{ {
UMP_DEBUG_PRINT(1, ("UMP: ump_arch_close() failed to acquire lock\n")); UMP_DEBUG_PRINT(1, ("UMP: ump_arch_close() failed to acquire lock\n"));
return; return;
...@@ -108,15 +108,15 @@ void ump_arch_close(void) ...@@ -108,15 +108,15 @@ void ump_arch_close(void)
UMP_DEBUG_ASSERT(retval == _UMP_OSU_ERR_OK, ("UMP: Failed to close UMP interface")); UMP_DEBUG_ASSERT(retval == _UMP_OSU_ERR_OK, ("UMP: Failed to close UMP interface"));
UMP_IGNORE(retval); UMP_IGNORE(retval);
ump_uk_ctx = NULL; ump_uk_ctx = NULL;
_ump_osu_lock_signal( ump_lock, _UMP_OSU_LOCKMODE_RW ); _ump_osu_lock_signal( ump_lock_arch, _UMP_OSU_LOCKMODE_RW );
_ump_osu_lock_term( ump_lock ); /* Not 100% thread safe, since another thread can already be waiting for this lock in ump_arch_open() */ _ump_osu_lock_term( ump_lock_arch ); /* Not 100% thread safe, since another thread can already be waiting for this lock in ump_arch_open() */
ump_lock = NULL; ump_lock_arch = NULL;
return; return;
} }
} }
/* Signal the lock so someone else can use it */ /* Signal the lock so someone else can use it */
_ump_osu_lock_signal( ump_lock, _UMP_OSU_LOCKMODE_RW ); _ump_osu_lock_signal( ump_lock_arch, _UMP_OSU_LOCKMODE_RW );
} }
...@@ -258,3 +258,55 @@ int ump_arch_msync(ump_secure_id secure_id, void* mapping, unsigned long cookie, ...@@ -258,3 +258,55 @@ int ump_arch_msync(ump_secure_id secure_id, void* mapping, unsigned long cookie,
} }
return dd_msync_call_arg.is_cached; return dd_msync_call_arg.is_cached;
} }
/** Cache operation control. Tell when cache maintenance operations start and end.
This will allow the kernel to merge cache operations togheter, thus making them faster */
int ump_arch_cache_operations_control(ump_cache_op_control op)
{
_ump_uk_cache_operations_control_s dd_cache_control_arg;
dd_cache_control_arg.op = (ump_uk_cache_op_control)op;
dd_cache_control_arg.ctx = ump_uk_ctx;
UMP_DEBUG_PRINT(4, ("Cache control op:%d",(u32)op ));
_ump_uku_cache_operations_control( &dd_cache_control_arg );
return 1; /* Always success */
}
int ump_arch_switch_hw_usage( ump_secure_id secure_id, ump_hw_usage new_user )
{
_ump_uk_switch_hw_usage_s dd_sitch_user_arg;
dd_sitch_user_arg.secure_id = secure_id;
dd_sitch_user_arg.new_user = (ump_uk_user)new_user;
dd_sitch_user_arg.ctx = ump_uk_ctx;
UMP_DEBUG_PRINT(4, ("Switch user UMP:%d User:%d",secure_id, (u32)new_user ));
_ump_uku_switch_hw_usage( &dd_sitch_user_arg );
return 1; /* Always success */
}
int ump_arch_lock( ump_secure_id secure_id, ump_lock_usage lock_usage )
{
_ump_uk_lock_s dd_lock_arg;
dd_lock_arg.ctx = ump_uk_ctx;
dd_lock_arg.secure_id = secure_id;
dd_lock_arg.lock_usage = (ump_uk_lock_usage) lock_usage;
UMP_DEBUG_PRINT(4, ("Lock UMP:%d ",secure_id));
_ump_uku_lock( &dd_lock_arg );
return 1; /* Always success */
}
int ump_arch_unlock( ump_secure_id secure_id )
{
_ump_uk_unlock_s dd_unlock_arg;
dd_unlock_arg.ctx = ump_uk_ctx;
dd_unlock_arg.secure_id = secure_id;
UMP_DEBUG_PRINT(4, ("Lock UMP:%d ",secure_id));
_ump_uku_unlock( &dd_unlock_arg );
return 1; /* Always success */
}
...@@ -58,6 +58,20 @@ void ump_arch_unmap(void* mapping, unsigned long size, unsigned long cookie); ...@@ -58,6 +58,20 @@ void ump_arch_unmap(void* mapping, unsigned long size, unsigned long cookie);
* @return Is_cached: 1==True 0==NonCached */ * @return Is_cached: 1==True 0==NonCached */
int ump_arch_msync(ump_secure_id secure_id, void* mapping, unsigned long cookie, void * address, unsigned long size, ump_cpu_msync_op op); int ump_arch_msync(ump_secure_id secure_id, void* mapping, unsigned long cookie, void * address, unsigned long size, ump_cpu_msync_op op);
/** Cache operation control. Tell when cache maintenance operations start and end.
This will allow the kernel to merge cache operations togheter, thus making them faster */
int ump_arch_cache_operations_control(ump_cache_op_control op);
/** Memory synchronization - cache flushing if previous user was different hardware */
int ump_arch_switch_hw_usage( ump_secure_id secure_id, ump_hw_usage new_user );
/** Locking buffer. Blocking call if the buffer is already locked. */
int ump_arch_lock( ump_secure_id secure_id, ump_lock_usage lock_usage );
/** Unlocking buffer. Let other users lock the buffer for their usage */
int ump_arch_unlock( ump_secure_id secure_id );
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
......
...@@ -45,6 +45,7 @@ ump_handle ump_ref_drv_allocate(unsigned long size, ump_alloc_constraints constr ...@@ -45,6 +45,7 @@ ump_handle ump_ref_drv_allocate(unsigned long size, ump_alloc_constraints constr
UMP_API_EXPORT int ump_cpu_msync_now(ump_handle memh, ump_cpu_msync_op op, void* address, int size) UMP_API_EXPORT int ump_cpu_msync_now(ump_handle memh, ump_cpu_msync_op op, void* address, int size)
{ {
int offset;
ump_mem * mem = (ump_mem*)memh; ump_mem * mem = (ump_mem*)memh;
UMP_DEBUG_ASSERT(UMP_INVALID_MEMORY_HANDLE != memh, ("Handle is invalid")); UMP_DEBUG_ASSERT(UMP_INVALID_MEMORY_HANDLE != memh, ("Handle is invalid"));
...@@ -52,16 +53,73 @@ UMP_API_EXPORT int ump_cpu_msync_now(ump_handle memh, ump_cpu_msync_op op, void* ...@@ -52,16 +53,73 @@ UMP_API_EXPORT int ump_cpu_msync_now(ump_handle memh, ump_cpu_msync_op op, void*
Else we skip flushing if the userspace handle says that it is uncached */ Else we skip flushing if the userspace handle says that it is uncached */
if ((UMP_MSYNC_READOUT_CACHE_ENABLED!=op) && (0 == mem->is_cached) ) return 0; if ((UMP_MSYNC_READOUT_CACHE_ENABLED!=op) && (0 == mem->is_cached) ) return 0;
if ( NULL == address )
{
address = ((ump_mem*)mem)->mapped_mem;
}
offset = (int) ((unsigned long)address - (unsigned long)((ump_mem*)mem)->mapped_mem);
if ( 0 == size )
{
size = (int)((ump_mem*)mem)->size;
}
UMP_DEBUG_ASSERT(0 < (((ump_mem*)mem)->ref_count), ("Reference count too low")); UMP_DEBUG_ASSERT(0 < (((ump_mem*)mem)->ref_count), ("Reference count too low"));
UMP_DEBUG_ASSERT((size>=0) && (size <= (int)((ump_mem*)mem)->size), ("Memory size of passed handle too low")); UMP_DEBUG_ASSERT((size>=0) && (size <= (int)((ump_mem*)mem)->size), ("Memory size of passed handle too low"));
UMP_DEBUG_ASSERT(NULL != ((ump_mem*)mem)->mapped_mem, ("Error in mapping pointer (not mapped)")); UMP_DEBUG_ASSERT(NULL != ((ump_mem*)mem)->mapped_mem, ("Error in mapping pointer (not mapped)"));
if (size > (int)mem->size) size = mem->size; if ( (offset+size) > (int)mem->size)
{
size = mem->size - offset;
}
mem->is_cached = ump_arch_msync(mem->secure_id, mem->mapped_mem, mem->cookie, address, size, op); mem->is_cached = ump_arch_msync(mem->secure_id, mem->mapped_mem, mem->cookie, address, size, op);
return mem->is_cached ; return mem->is_cached ;
} }
UMP_API_EXPORT int ump_cache_operations_control(ump_cache_op_control op)
{
return ump_arch_cache_operations_control(op);
}
UMP_API_EXPORT int ump_switch_hw_usage( ump_handle memh, ump_hw_usage new_user )
{
ump_mem * mem = (ump_mem*)memh;
UMP_DEBUG_ASSERT(UMP_INVALID_MEMORY_HANDLE != memh, ("Handle is invalid"));
return ump_arch_switch_hw_usage(mem->secure_id, new_user);
}
UMP_API_EXPORT int ump_lock( ump_handle memh, ump_lock_usage lock_usage)
{
ump_mem * mem = (ump_mem*)memh;
UMP_DEBUG_ASSERT(UMP_INVALID_MEMORY_HANDLE != memh, ("Handle is invalid"));
return ump_arch_lock(mem->secure_id, lock_usage);
}
UMP_API_EXPORT int ump_unlock( ump_handle memh )
{
ump_mem * mem = (ump_mem*)memh;
UMP_DEBUG_ASSERT(UMP_INVALID_MEMORY_HANDLE != memh, ("Handle is invalid"));
return ump_arch_unlock(mem->secure_id);
}
UMP_API_EXPORT int ump_switch_hw_usage_secure_id( ump_secure_id ump_id, ump_hw_usage new_user )
{
return ump_arch_switch_hw_usage(ump_id, new_user);
}
/** Locking buffer. Blocking call if the buffer is already locked. */
UMP_API_EXPORT int ump_lock_secure_id( ump_secure_id ump_id, ump_lock_usage lock_usage )
{
return ump_arch_lock(ump_id, lock_usage);
}
/** Unlocking buffer. Let other users lock the buffer for their usage */
UMP_API_EXPORT int ump_unlock_secure_id( ump_secure_id ump_id )
{
return ump_arch_unlock(ump_id);
}
/* Allocate a buffer which can be used directly by hardware, 4kb aligned */ /* Allocate a buffer which can be used directly by hardware, 4kb aligned */
static ump_handle ump_ref_drv_allocate_internal(unsigned long size, ump_alloc_constraints constraints, ump_cache_enabled cache) static ump_handle ump_ref_drv_allocate_internal(unsigned long size, ump_alloc_constraints constraints, ump_cache_enabled cache)
{ {
......
...@@ -188,7 +188,7 @@ UMP_API_EXPORT void ump_write(ump_handle dst, unsigned long offset, const void * ...@@ -188,7 +188,7 @@ UMP_API_EXPORT void ump_write(ump_handle dst, unsigned long offset, const void *
* This function retrieves a memory mapped pointer to the specified UMP memory, * This function retrieves a memory mapped pointer to the specified UMP memory,
* that can be used by the CPU. Every successful call to * that can be used by the CPU. Every successful call to
* @ref ump_mapped_pointer_get "ump_mapped_pointer_get" is reference counted, * @ref ump_mapped_pointer_get "ump_mapped_pointer_get" is reference counted,
* and must therefor be followed by a call to * and must therefore be followed by a call to
* @ref ump_mapped_pointer_release "ump_mapped_pointer_release " when the * @ref ump_mapped_pointer_release "ump_mapped_pointer_release " when the
* memory mapping is no longer needed. * memory mapping is no longer needed.
* *
......
...@@ -48,9 +48,16 @@ typedef enum ...@@ -48,9 +48,16 @@ typedef enum
{ {
UMP_MSYNC_CLEAN = 0 , UMP_MSYNC_CLEAN = 0 ,
UMP_MSYNC_CLEAN_AND_INVALIDATE = 1, UMP_MSYNC_CLEAN_AND_INVALIDATE = 1,
UMP_MSYNC_INVALIDATE = 2,
UMP_MSYNC_READOUT_CACHE_ENABLED = 128, UMP_MSYNC_READOUT_CACHE_ENABLED = 128,
} ump_cpu_msync_op; } ump_cpu_msync_op;
typedef enum
{
UMP_READ = 1,
UMP_READ_WRITE = 3,
} ump_lock_usage;
/** Flushing cache for an ump_handle. /** Flushing cache for an ump_handle.
* The function will always CLEAN_AND_INVALIDATE as long as the \a op is not UMP_MSYNC_READOUT_CACHE_ENABLED. * The function will always CLEAN_AND_INVALIDATE as long as the \a op is not UMP_MSYNC_READOUT_CACHE_ENABLED.
* If so it will only report back if the given ump_handle is cacheable. * If so it will only report back if the given ump_handle is cacheable.
...@@ -59,6 +66,42 @@ typedef enum ...@@ -59,6 +66,42 @@ typedef enum
UMP_API_EXPORT int ump_cpu_msync_now(ump_handle mem, ump_cpu_msync_op op, void* address, int size); UMP_API_EXPORT int ump_cpu_msync_now(ump_handle mem, ump_cpu_msync_op op, void* address, int size);
typedef enum
{
UMP_USED_BY_CPU = 0,
UMP_USED_BY_MALI = 1,
UMP_USED_BY_UNKNOWN_DEVICE = 100,
} ump_hw_usage;
typedef enum
{
UMP_CACHE_OP_START = 0,
UMP_CACHE_OP_FINISH = 1,
} ump_cache_op_control;
/** Cache operation control. Tell when cache maintenance operations start and end.
This will allow the kernel to merge cache operations togheter, thus making them faster */
UMP_API_EXPORT int ump_cache_operations_control(ump_cache_op_control op);
/** Memory synchronization - cache flushing if previous user was different hardware */
UMP_API_EXPORT int ump_switch_hw_usage( ump_handle mem, ump_hw_usage new_user );
/** Memory synchronization - cache flushing if previous user was different hardware */
UMP_API_EXPORT int ump_switch_hw_usage_secure_id( ump_secure_id ump_id, ump_hw_usage new_user );
/** Locking buffer. Blocking call if the buffer is already locked. */
UMP_API_EXPORT int ump_lock( ump_handle mem, ump_lock_usage lock_usage );
/** Locking buffer. Blocking call if the buffer is already locked. */
UMP_API_EXPORT int ump_lock_secure_id( ump_secure_id ump_id, ump_lock_usage lock_usage );
/** Unlocking buffer. Let other users lock the buffer for their usage */
UMP_API_EXPORT int ump_unlock( ump_handle mem );
/** Unlocking buffer. Let other users lock the buffer for their usage */
UMP_API_EXPORT int ump_unlock_secure_id( ump_secure_id ump_id );
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
......
...@@ -51,6 +51,10 @@ typedef enum ...@@ -51,6 +51,10 @@ typedef enum
_UMP_IOC_MAP_MEM, /* not used in Linux */ _UMP_IOC_MAP_MEM, /* not used in Linux */
_UMP_IOC_UNMAP_MEM, /* not used in Linux */ _UMP_IOC_UNMAP_MEM, /* not used in Linux */
_UMP_IOC_MSYNC, _UMP_IOC_MSYNC,
_UMP_IOC_CACHE_OPERATIONS_CONTROL,
_UMP_IOC_SWITCH_HW_USAGE,
_UMP_IOC_LOCK,
_UMP_IOC_UNLOCK,
}_ump_uk_functions; }_ump_uk_functions;
typedef enum typedef enum
...@@ -64,9 +68,30 @@ typedef enum ...@@ -64,9 +68,30 @@ typedef enum
{ {
_UMP_UK_MSYNC_CLEAN = 0, _UMP_UK_MSYNC_CLEAN = 0,
_UMP_UK_MSYNC_CLEAN_AND_INVALIDATE = 1, _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE = 1,
_UMP_UK_MSYNC_INVALIDATE = 2,
_UMP_UK_MSYNC_FLUSH_L1 = 3,
_UMP_UK_MSYNC_READOUT_CACHE_ENABLED = 128, _UMP_UK_MSYNC_READOUT_CACHE_ENABLED = 128,
} ump_uk_msync_op; } ump_uk_msync_op;
typedef enum
{
_UMP_UK_CACHE_OP_START = 0,
_UMP_UK_CACHE_OP_FINISH = 1,
} ump_uk_cache_op_control;
typedef enum
{
_UMP_UK_READ = 1,
_UMP_UK_READ_WRITE = 3,
} ump_uk_lock_usage;
typedef enum
{
_UMP_UK_USED_BY_CPU = 0,
_UMP_UK_USED_BY_MALI = 1,
_UMP_UK_USED_BY_UNKNOWN_DEVICE= 100,
} ump_uk_user;
/** /**
* Get API version ([in,out] u32 api_version, [out] u32 compatible) * Get API version ([in,out] u32 api_version, [out] u32 compatible)
*/ */
...@@ -136,10 +161,38 @@ typedef struct _ump_uk_msync_s ...@@ -136,10 +161,38 @@ typedef struct _ump_uk_msync_s
u32 size; /**< [in] size to flush */ u32 size; /**< [in] size to flush */
ump_uk_msync_op op; /**< [in] flush operation */ ump_uk_msync_op op; /**< [in] flush operation */
u32 cookie; /**< [in] cookie stored with reference to the kernel mapping internals */ u32 cookie; /**< [in] cookie stored with reference to the kernel mapping internals */
u32 secure_id; /**< [in] cookie stored with reference to the kernel mapping internals */ u32 secure_id; /**< [in] secure_id that identifies the ump buffer */
u32 is_cached; /**< [out] caching of CPU mappings */ u32 is_cached; /**< [out] caching of CPU mappings */
} _ump_uk_msync_s; } _ump_uk_msync_s;
typedef struct _ump_uk_cache_operations_control_s
{
void *ctx; /**< [in,out] user-kernel context (trashed on output) */
ump_uk_cache_op_control op; /**< [in] cache operations start/stop */
} _ump_uk_cache_operations_control_s;
typedef struct _ump_uk_switch_hw_usage_s
{
void *ctx; /**< [in,out] user-kernel context (trashed on output) */
u32 secure_id; /**< [in] secure_id that identifies the ump buffer */
ump_uk_user new_user; /**< [in] cookie stored with reference to the kernel mapping internals */
} _ump_uk_switch_hw_usage_s;
typedef struct _ump_uk_lock_s
{
void *ctx; /**< [in,out] user-kernel context (trashed on output) */
u32 secure_id; /**< [in] secure_id that identifies the ump buffer */
ump_uk_lock_usage lock_usage;
} _ump_uk_lock_s;
typedef struct _ump_uk_unlock_s
{
void *ctx; /**< [in,out] user-kernel context (trashed on output) */
u32 secure_id; /**< [in] secure_id that identifies the ump buffer */
} _ump_uk_unlock_s;
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
......
...@@ -45,7 +45,12 @@ extern "C" ...@@ -45,7 +45,12 @@ extern "C"
#define UMP_IOC_ALLOCATE _IOWR(UMP_IOCTL_NR, _UMP_IOC_ALLOCATE, _ump_uk_allocate_s) #define UMP_IOC_ALLOCATE _IOWR(UMP_IOCTL_NR, _UMP_IOC_ALLOCATE, _ump_uk_allocate_s)
#define UMP_IOC_RELEASE _IOR(UMP_IOCTL_NR, _UMP_IOC_RELEASE, _ump_uk_release_s) #define UMP_IOC_RELEASE _IOR(UMP_IOCTL_NR, _UMP_IOC_RELEASE, _ump_uk_release_s)
#define UMP_IOC_SIZE_GET _IOWR(UMP_IOCTL_NR, _UMP_IOC_SIZE_GET, _ump_uk_size_get_s) #define UMP_IOC_SIZE_GET _IOWR(UMP_IOCTL_NR, _UMP_IOC_SIZE_GET, _ump_uk_size_get_s)
#define UMP_IOC_MSYNC _IOW(UMP_IOCTL_NR, _UMP_IOC_MSYNC, _ump_uk_size_get_s) #define UMP_IOC_MSYNC _IOW(UMP_IOCTL_NR, _UMP_IOC_MSYNC, _ump_uk_msync_s)
#define UMP_IOC_CACHE_OPERATIONS_CONTROL _IOW(UMP_IOCTL_NR, _UMP_IOC_CACHE_OPERATIONS_CONTROL, _ump_uk_cache_operations_control_s)
#define UMP_IOC_SWITCH_HW_USAGE _IOW(UMP_IOCTL_NR, _UMP_IOC_SWITCH_HW_USAGE, _ump_uk_switch_hw_usage_s)
#define UMP_IOC_LOCK _IOW(UMP_IOCTL_NR, _UMP_IOC_LOCK, _ump_uk_lock_s)
#define UMP_IOC_UNLOCK _IOW(UMP_IOCTL_NR, _UMP_IOC_UNLOCK, _ump_uk_unlock_s)
#ifdef __cplusplus #ifdef __cplusplus
......
...@@ -19,8 +19,12 @@ ...@@ -19,8 +19,12 @@
#define _XOPEN_SOURCE 600 #define _XOPEN_SOURCE 600
#endif #endif
#ifndef _POSIX_C_SOURCE
#define _POSIX_C_SOURCE 200112L
#elif _POSIX_C_SOURCE < 200112L
#undef _POSIX_C_SOURCE
#define _POSIX_C_SOURCE 200112L #define _POSIX_C_SOURCE 200112L
#endif
#include <ump/ump_osu.h> #include <ump/ump_osu.h>
#include <ump/ump_debug.h> #include <ump/ump_debug.h>
......
...@@ -133,6 +133,26 @@ void _ump_uku_msynch(_ump_uk_msync_s *args) ...@@ -133,6 +133,26 @@ void _ump_uku_msynch(_ump_uk_msync_s *args)
ump_driver_ioctl(args->ctx, UMP_IOC_MSYNC, args); ump_driver_ioctl(args->ctx, UMP_IOC_MSYNC, args);
} }
void _ump_uku_cache_operations_control( _ump_uk_cache_operations_control_s *args )
{
ump_driver_ioctl(args->ctx, UMP_IOC_CACHE_OPERATIONS_CONTROL, args);
}
void _ump_uku_switch_hw_usage( _ump_uk_switch_hw_usage_s *args )
{
ump_driver_ioctl(args->ctx, UMP_IOC_SWITCH_HW_USAGE, args);
}
void _ump_uku_lock( _ump_uk_lock_s *args )
{
ump_driver_ioctl(args->ctx, UMP_IOC_LOCK, args);
}
void _ump_uku_unlock( _ump_uk_unlock_s *args )
{
ump_driver_ioctl(args->ctx, UMP_IOC_UNLOCK, args);
}
int _ump_uku_map_mem(_ump_uk_map_mem_s *args) int _ump_uku_map_mem(_ump_uk_map_mem_s *args)
{ {
int flags; int flags;
......
...@@ -49,6 +49,15 @@ void _ump_uku_unmap_mem( _ump_uk_unmap_mem_s *args ); ...@@ -49,6 +49,15 @@ void _ump_uku_unmap_mem( _ump_uk_unmap_mem_s *args );
void _ump_uku_msynch(_ump_uk_msync_s *args); void _ump_uku_msynch(_ump_uk_msync_s *args);
int _ump_uku_map_mem( _ump_uk_map_mem_s *args );
void _ump_uku_cache_operations_control( _ump_uk_cache_operations_control_s *args );
void _ump_uku_switch_hw_usage( _ump_uk_switch_hw_usage_s *dd_msync_call_arg );
void _ump_uku_lock( _ump_uk_lock_s *dd_msync_call_arg );
void _ump_uku_unlock( _ump_uk_unlock_s *dd_msync_call_arg );
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment