Commit 3211f013 authored by Luc Verhaegen's avatar Luc Verhaegen
Browse files

import r3p0 source


Signed-off-by: default avatarLuc Verhaegen <libv@skynet.be>
parent fa5ad94a
......@@ -188,7 +188,7 @@ UMP_API_EXPORT void ump_write(ump_handle dst, unsigned long offset, const void *
* This function retrieves a memory mapped pointer to the specified UMP memory,
* that can be used by the CPU. Every successful call to
* @ref ump_mapped_pointer_get "ump_mapped_pointer_get" is reference counted,
* and must therefor be followed by a call to
* and must therefore be followed by a call to
* @ref ump_mapped_pointer_release "ump_mapped_pointer_release " when the
* memory mapping is no longer needed.
*
......
......@@ -258,3 +258,57 @@ int ump_arch_msync(ump_secure_id secure_id, void* mapping, unsigned long cookie,
}
return dd_msync_call_arg.is_cached;
}
#if UNIFIED_MEMORY_PROVIDER_VERSION > 2
/** Cache operation control. Tell when cache maintenance operations start and end.
This will allow the kernel to merge cache operations togheter, thus making them faster */
int ump_arch_cache_operations_control(ump_cache_op_control op)
{
_ump_uk_cache_operations_control_s dd_cache_control_arg;
dd_cache_control_arg.op = (ump_uk_cache_op_control)op;
dd_cache_control_arg.ctx = ump_uk_ctx;
UMP_DEBUG_PRINT(4, ("Cache control op:%d",(u32)op ));
_ump_uku_cache_operations_control( &dd_cache_control_arg );
return 1; /* Always success */
}
int ump_arch_switch_hw_usage( ump_secure_id secure_id, ump_hw_usage new_user )
{
_ump_uk_switch_hw_usage_s dd_sitch_user_arg;
dd_sitch_user_arg.secure_id = secure_id;
dd_sitch_user_arg.new_user = (ump_uk_user)new_user;
dd_sitch_user_arg.ctx = ump_uk_ctx;
UMP_DEBUG_PRINT(4, ("Switch user UMP:%d User:%d",secure_id, (u32)new_user ));
_ump_uku_switch_hw_usage( &dd_sitch_user_arg );
return 1; /* Always success */
}
int ump_arch_lock( ump_secure_id secure_id, ump_lock_usage lock_usage )
{
_ump_uk_lock_s dd_lock_arg;
dd_lock_arg.ctx = ump_uk_ctx;
dd_lock_arg.secure_id = secure_id;
dd_lock_arg.lock_usage = (ump_uk_lock_usage) lock_usage;
UMP_DEBUG_PRINT(4, ("Lock UMP:%d ",secure_id));
_ump_uku_lock( &dd_lock_arg );
return 1; /* Always success */
}
int ump_arch_unlock( ump_secure_id secure_id )
{
_ump_uk_unlock_s dd_unlock_arg;
dd_unlock_arg.ctx = ump_uk_ctx;
dd_unlock_arg.secure_id = secure_id;
UMP_DEBUG_PRINT(4, ("Lock UMP:%d ",secure_id));
_ump_uku_unlock( &dd_unlock_arg );
return 1; /* Always success */
}
#endif /* UNIFIED_MEMORY_PROVIDER_VERSION */
......@@ -58,6 +58,21 @@ void ump_arch_unmap(void* mapping, unsigned long size, unsigned long cookie);
* @return Is_cached: 1==True 0==NonCached */
int ump_arch_msync(ump_secure_id secure_id, void* mapping, unsigned long cookie, void * address, unsigned long size, ump_cpu_msync_op op);
#if UNIFIED_MEMORY_PROVIDER_VERSION > 2
/** Cache operation control. Tell when cache maintenance operations start and end.
This will allow the kernel to merge cache operations togheter, thus making them faster */
int ump_arch_cache_operations_control(ump_cache_op_control op);
/** Memory synchronization - cache flushing if previous user was different hardware */
int ump_arch_switch_hw_usage( ump_secure_id secure_id, ump_hw_usage new_user );
/** Locking buffer. Blocking call if the buffer is already locked. */
int ump_arch_lock( ump_secure_id secure_id, ump_lock_usage lock_usage );
/** Unlocking buffer. Let other users lock the buffer for their usage */
int ump_arch_unlock( ump_secure_id secure_id );
#endif /* UNIFIED_MEMORY_PROVIDER_VERSION */
#ifdef __cplusplus
}
#endif
......
......@@ -45,8 +45,14 @@ extern "C"
#define UMP_IOC_ALLOCATE _IOWR(UMP_IOCTL_NR, _UMP_IOC_ALLOCATE, _ump_uk_allocate_s)
#define UMP_IOC_RELEASE _IOR(UMP_IOCTL_NR, _UMP_IOC_RELEASE, _ump_uk_release_s)
#define UMP_IOC_SIZE_GET _IOWR(UMP_IOCTL_NR, _UMP_IOC_SIZE_GET, _ump_uk_size_get_s)
#define UMP_IOC_MSYNC _IOW(UMP_IOCTL_NR, _UMP_IOC_MSYNC, _ump_uk_size_get_s)
#define UMP_IOC_MSYNC _IOW(UMP_IOCTL_NR, _UMP_IOC_MSYNC, _ump_uk_msync_s)
#if UNIFIED_MEMORY_PROVIDER_VERSION > 2
#define UMP_IOC_CACHE_OPERATIONS_CONTROL _IOW(UMP_IOCTL_NR, _UMP_IOC_CACHE_OPERATIONS_CONTROL, _ump_uk_cache_operations_control_s)
#define UMP_IOC_SWITCH_HW_USAGE _IOW(UMP_IOCTL_NR, _UMP_IOC_SWITCH_HW_USAGE, _ump_uk_switch_hw_usage_s)
#define UMP_IOC_LOCK _IOW(UMP_IOCTL_NR, _UMP_IOC_LOCK, _ump_uk_lock_s)
#define UMP_IOC_UNLOCK _IOW(UMP_IOCTL_NR, _UMP_IOC_UNLOCK, _ump_uk_unlock_s)
#endif /* UNIFIED_MEMORY_PROVIDER_VERSION */
#ifdef __cplusplus
}
......
......@@ -45,6 +45,7 @@ ump_handle ump_ref_drv_allocate(unsigned long size, ump_alloc_constraints constr
UMP_API_EXPORT int ump_cpu_msync_now(ump_handle memh, ump_cpu_msync_op op, void* address, int size)
{
int offset;
ump_mem * mem = (ump_mem*)memh;
UMP_DEBUG_ASSERT(UMP_INVALID_MEMORY_HANDLE != memh, ("Handle is invalid"));
......@@ -52,16 +53,75 @@ UMP_API_EXPORT int ump_cpu_msync_now(ump_handle memh, ump_cpu_msync_op op, void*
Else we skip flushing if the userspace handle says that it is uncached */
if ((UMP_MSYNC_READOUT_CACHE_ENABLED!=op) && (0 == mem->is_cached) ) return 0;
if ( NULL == address )
{
address = ((ump_mem*)mem)->mapped_mem;
}
offset = (int) ((unsigned long)address - (unsigned long)((ump_mem*)mem)->mapped_mem);
if ( 0 == size )
{
size = (int)((ump_mem*)mem)->size;
}
UMP_DEBUG_ASSERT(0 < (((ump_mem*)mem)->ref_count), ("Reference count too low"));
UMP_DEBUG_ASSERT((size>=0) && (size <= (int)((ump_mem*)mem)->size), ("Memory size of passed handle too low"));
UMP_DEBUG_ASSERT(NULL != ((ump_mem*)mem)->mapped_mem, ("Error in mapping pointer (not mapped)"));
if (size > (int)mem->size) size = mem->size;
if ( (offset+size) > (int)mem->size)
{
size = mem->size - offset;
}
mem->is_cached = ump_arch_msync(mem->secure_id, mem->mapped_mem, mem->cookie, address, size, op);
return mem->is_cached ;
}
#if UNIFIED_MEMORY_PROVIDER_VERSION > 2
UMP_API_EXPORT int ump_cache_operations_control(ump_cache_op_control op)
{
return ump_arch_cache_operations_control(op);
}
UMP_API_EXPORT int ump_switch_hw_usage( ump_handle memh, ump_hw_usage new_user )
{
ump_mem * mem = (ump_mem*)memh;
UMP_DEBUG_ASSERT(UMP_INVALID_MEMORY_HANDLE != memh, ("Handle is invalid"));
return ump_arch_switch_hw_usage(mem->secure_id, new_user);
}
UMP_API_EXPORT int ump_lock( ump_handle memh, ump_lock_usage lock_usage)
{
ump_mem * mem = (ump_mem*)memh;
UMP_DEBUG_ASSERT(UMP_INVALID_MEMORY_HANDLE != memh, ("Handle is invalid"));
return ump_arch_lock(mem->secure_id, lock_usage);
}
UMP_API_EXPORT int ump_unlock( ump_handle memh )
{
ump_mem * mem = (ump_mem*)memh;
UMP_DEBUG_ASSERT(UMP_INVALID_MEMORY_HANDLE != memh, ("Handle is invalid"));
return ump_arch_unlock(mem->secure_id);
}
UMP_API_EXPORT int ump_switch_hw_usage_secure_id( ump_secure_id ump_id, ump_hw_usage new_user )
{
return ump_arch_switch_hw_usage(ump_id, new_user);
}
/** Locking buffer. Blocking call if the buffer is already locked. */
UMP_API_EXPORT int ump_lock_secure_id( ump_secure_id ump_id, ump_lock_usage lock_usage )
{
return ump_arch_lock(ump_id, lock_usage);
}
/** Unlocking buffer. Let other users lock the buffer for their usage */
UMP_API_EXPORT int ump_unlock_secure_id( ump_secure_id ump_id )
{
return ump_arch_unlock(ump_id);
}
#endif /* UNIFIED_MEMORY_PROVIDER_VERSION */
/* Allocate a buffer which can be used directly by hardware, 4kb aligned */
static ump_handle ump_ref_drv_allocate_internal(unsigned long size, ump_alloc_constraints constraints, ump_cache_enabled cache)
{
......
......@@ -48,9 +48,20 @@ typedef enum
{
UMP_MSYNC_CLEAN = 0 ,
UMP_MSYNC_CLEAN_AND_INVALIDATE = 1,
#if UNIFIED_MEMORY_PROVIDER_VERSION > 2
UMP_MSYNC_INVALIDATE = 2,
#endif
UMP_MSYNC_READOUT_CACHE_ENABLED = 128,
} ump_cpu_msync_op;
#if UNIFIED_MEMORY_PROVIDER_VERSION > 2
typedef enum
{
UMP_READ = 1,
UMP_READ_WRITE = 3,
} ump_lock_usage;
#endif /* UNIFIED_MEMORY_PROVIDER_VERSION */
/** Flushing cache for an ump_handle.
* The function will always CLEAN_AND_INVALIDATE as long as the \a op is not UMP_MSYNC_READOUT_CACHE_ENABLED.
* If so it will only report back if the given ump_handle is cacheable.
......@@ -58,6 +69,42 @@ typedef enum
* Return value is 1 if cache is enabled, and 0 if it is disabled for the given allocation.*/
UMP_API_EXPORT int ump_cpu_msync_now(ump_handle mem, ump_cpu_msync_op op, void* address, int size);
#if UNIFIED_MEMORY_PROVIDER_VERSION > 2
typedef enum
{
UMP_USED_BY_CPU = 0,
UMP_USED_BY_MALI = 1,
UMP_USED_BY_UNKNOWN_DEVICE = 100,
} ump_hw_usage;
typedef enum
{
UMP_CACHE_OP_START = 0,
UMP_CACHE_OP_FINISH = 1,
} ump_cache_op_control;
/** Cache operation control. Tell when cache maintenance operations start and end.
This will allow the kernel to merge cache operations togheter, thus making them faster */
UMP_API_EXPORT int ump_cache_operations_control(ump_cache_op_control op);
/** Memory synchronization - cache flushing if previous user was different hardware */
UMP_API_EXPORT int ump_switch_hw_usage( ump_handle mem, ump_hw_usage new_user );
/** Memory synchronization - cache flushing if previous user was different hardware */
UMP_API_EXPORT int ump_switch_hw_usage_secure_id( ump_secure_id ump_id, ump_hw_usage new_user );
/** Locking buffer. Blocking call if the buffer is already locked. */
UMP_API_EXPORT int ump_lock( ump_handle mem, ump_lock_usage lock_usage );
/** Locking buffer. Blocking call if the buffer is already locked. */
UMP_API_EXPORT int ump_lock_secure_id( ump_secure_id ump_id, ump_lock_usage lock_usage );
/** Unlocking buffer. Let other users lock the buffer for their usage */
UMP_API_EXPORT int ump_unlock( ump_handle mem );
/** Unlocking buffer. Let other users lock the buffer for their usage */
UMP_API_EXPORT int ump_unlock_secure_id( ump_secure_id ump_id );
#endif /* UNIFIED_MEMORY_PROVIDER_VERSION */
#ifdef __cplusplus
}
......
......@@ -51,6 +51,12 @@ typedef enum
_UMP_IOC_MAP_MEM, /* not used in Linux */
_UMP_IOC_UNMAP_MEM, /* not used in Linux */
_UMP_IOC_MSYNC,
#if UNIFIED_MEMORY_PROVIDER_VERSION > 2
_UMP_IOC_CACHE_OPERATIONS_CONTROL,
_UMP_IOC_SWITCH_HW_USAGE,
_UMP_IOC_LOCK,
_UMP_IOC_UNLOCK,
#endif /* UNIFIED_MEMORY_PROVIDER_VERSION */
}_ump_uk_functions;
typedef enum
......@@ -64,9 +70,34 @@ typedef enum
{
_UMP_UK_MSYNC_CLEAN = 0,
_UMP_UK_MSYNC_CLEAN_AND_INVALIDATE = 1,
#if UNIFIED_MEMORY_PROVIDER_VERSION > 2
_UMP_UK_MSYNC_INVALIDATE = 2,
_UMP_UK_MSYNC_FLUSH_L1 = 3,
#endif /* UNIFIED_MEMORY_PROVIDER_VERSION */
_UMP_UK_MSYNC_READOUT_CACHE_ENABLED = 128,
} ump_uk_msync_op;
#if UNIFIED_MEMORY_PROVIDER_VERSION > 2
typedef enum
{
_UMP_UK_CACHE_OP_START = 0,
_UMP_UK_CACHE_OP_FINISH = 1,
} ump_uk_cache_op_control;
typedef enum
{
_UMP_UK_READ = 1,
_UMP_UK_READ_WRITE = 3,
} ump_uk_lock_usage;
typedef enum
{
_UMP_UK_USED_BY_CPU = 0,
_UMP_UK_USED_BY_MALI = 1,
_UMP_UK_USED_BY_UNKNOWN_DEVICE= 100,
} ump_uk_user;
#endif /* UNIFIED_MEMORY_PROVIDER_VERSION */
/**
* Get API version ([in,out] u32 api_version, [out] u32 compatible)
*/
......@@ -136,10 +167,40 @@ typedef struct _ump_uk_msync_s
u32 size; /**< [in] size to flush */
ump_uk_msync_op op; /**< [in] flush operation */
u32 cookie; /**< [in] cookie stored with reference to the kernel mapping internals */
u32 secure_id; /**< [in] cookie stored with reference to the kernel mapping internals */
u32 secure_id; /**< [in] secure_id that identifies the ump buffer */
u32 is_cached; /**< [out] caching of CPU mappings */
} _ump_uk_msync_s;
#if UNIFIED_MEMORY_PROVIDER_VERSION > 2
typedef struct _ump_uk_cache_operations_control_s
{
void *ctx; /**< [in,out] user-kernel context (trashed on output) */
ump_uk_cache_op_control op; /**< [in] cache operations start/stop */
} _ump_uk_cache_operations_control_s;
typedef struct _ump_uk_switch_hw_usage_s
{
void *ctx; /**< [in,out] user-kernel context (trashed on output) */
u32 secure_id; /**< [in] secure_id that identifies the ump buffer */
ump_uk_user new_user; /**< [in] cookie stored with reference to the kernel mapping internals */
} _ump_uk_switch_hw_usage_s;
typedef struct _ump_uk_lock_s
{
void *ctx; /**< [in,out] user-kernel context (trashed on output) */
u32 secure_id; /**< [in] secure_id that identifies the ump buffer */
ump_uk_lock_usage lock_usage;
} _ump_uk_lock_s;
typedef struct _ump_uk_unlock_s
{
void *ctx; /**< [in,out] user-kernel context (trashed on output) */
u32 secure_id; /**< [in] secure_id that identifies the ump buffer */
} _ump_uk_unlock_s;
#endif /* UNIFIED_MEMORY_PROVIDER_VERSION */
#ifdef __cplusplus
}
#endif
......
......@@ -19,6 +19,7 @@
* File implements the user side of the user-kernel interface
*/
#include "ump.h"
#include "ump_uku.h"
#include <stdio.h>
#include "ump_ioctl.h"
......@@ -133,6 +134,28 @@ void _ump_uku_msynch(_ump_uk_msync_s *args)
ump_driver_ioctl(args->ctx, UMP_IOC_MSYNC, args);
}
#if UNIFIED_MEMORY_PROVIDER_VERSION > 2
void _ump_uku_cache_operations_control( _ump_uk_cache_operations_control_s *args )
{
ump_driver_ioctl(args->ctx, UMP_IOC_CACHE_OPERATIONS_CONTROL, args);
}
void _ump_uku_switch_hw_usage( _ump_uk_switch_hw_usage_s *args )
{
ump_driver_ioctl(args->ctx, UMP_IOC_SWITCH_HW_USAGE, args);
}
void _ump_uku_lock( _ump_uk_lock_s *args )
{
ump_driver_ioctl(args->ctx, UMP_IOC_LOCK, args);
}
void _ump_uku_unlock( _ump_uk_unlock_s *args )
{
ump_driver_ioctl(args->ctx, UMP_IOC_UNLOCK, args);
}
#endif /* UNIFIED_MEMORY_PROVIDER_VERSION */
int _ump_uku_map_mem(_ump_uk_map_mem_s *args)
{
int flags;
......
......@@ -43,12 +43,22 @@ _ump_osu_errcode_t _ump_uku_size_get( _ump_uk_size_get_s *args );
_ump_osu_errcode_t _ump_uku_get_api_version( _ump_uk_api_version_s *args );
#if UNIFIED_MEMORY_PROVIDER_VERSION > 2
int _ump_uku_map_mem( _ump_uk_map_mem_s *args );
void _ump_uku_unmap_mem( _ump_uk_unmap_mem_s *args );
void _ump_uku_msynch(_ump_uk_msync_s *args);
int _ump_uku_map_mem( _ump_uk_map_mem_s *args );
void _ump_uku_cache_operations_control( _ump_uk_cache_operations_control_s *args );
void _ump_uku_switch_hw_usage( _ump_uk_switch_hw_usage_s *dd_msync_call_arg );
void _ump_uku_lock( _ump_uk_lock_s *dd_msync_call_arg );
void _ump_uku_unlock( _ump_uk_unlock_s *dd_msync_call_arg );
#endif /* UNIFIED_MEMORY_PROVIDER_VERSION */
#ifdef __cplusplus
}
#endif
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment