You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge commit '2c563880ea' into work.xattr
pick xattr_handler conversion from lustre tree
This commit is contained in:
@@ -59,7 +59,6 @@ static struct dentry *binder_debugfs_dir_entry_proc;
|
||||
static struct binder_node *binder_context_mgr_node;
|
||||
static kuid_t binder_context_mgr_uid = INVALID_UID;
|
||||
static int binder_last_id;
|
||||
static struct workqueue_struct *binder_deferred_workqueue;
|
||||
|
||||
#define BINDER_DEBUG_ENTRY(name) \
|
||||
static int binder_##name##_open(struct inode *inode, struct file *file) \
|
||||
@@ -3227,7 +3226,7 @@ binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
|
||||
if (hlist_unhashed(&proc->deferred_work_node)) {
|
||||
hlist_add_head(&proc->deferred_work_node,
|
||||
&binder_deferred_list);
|
||||
queue_work(binder_deferred_workqueue, &binder_deferred_work);
|
||||
schedule_work(&binder_deferred_work);
|
||||
}
|
||||
mutex_unlock(&binder_deferred_lock);
|
||||
}
|
||||
@@ -3679,10 +3678,6 @@ static int __init binder_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
binder_deferred_workqueue = create_singlethread_workqueue("binder");
|
||||
if (!binder_deferred_workqueue)
|
||||
return -ENOMEM;
|
||||
|
||||
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
|
||||
if (binder_debugfs_dir_entry_root)
|
||||
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
|
||||
|
||||
@@ -17,4 +17,17 @@ config SYNC_FILE
|
||||
Files fds, to the DRM driver for example. More details at
|
||||
Documentation/sync_file.txt.
|
||||
|
||||
config SW_SYNC
|
||||
bool "Sync File Validation Framework"
|
||||
default n
|
||||
depends on SYNC_FILE
|
||||
depends on DEBUG_FS
|
||||
---help---
|
||||
A sync object driver that uses a 32bit counter to coordinate
|
||||
synchronization. Useful when there is no hardware primitive backing
|
||||
the synchronization.
|
||||
|
||||
WARNING: improper use of this can result in deadlocking kernel
|
||||
drivers from userspace. Intended for test and debug only.
|
||||
|
||||
endmenu
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o
|
||||
obj-$(CONFIG_SYNC_FILE) += sync_file.o
|
||||
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* drivers/dma-buf/sw_sync.c
|
||||
* Sync File validation framework
|
||||
*
|
||||
* Copyright (C) 2012 Google, Inc.
|
||||
*
|
||||
@@ -23,8 +23,38 @@
|
||||
#include "sync_debug.h"
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "trace/sync.h"
|
||||
#include "sync_trace.h"
|
||||
|
||||
/*
|
||||
* SW SYNC validation framework
|
||||
*
|
||||
* A sync object driver that uses a 32bit counter to coordinate
|
||||
* synchronization. Useful when there is no hardware primitive backing
|
||||
* the synchronization.
|
||||
*
|
||||
* To start the framework just open:
|
||||
*
|
||||
* <debugfs>/sync/sw_sync
|
||||
*
|
||||
* That will create a sync timeline, all fences created under this timeline
|
||||
* file descriptor will belong to the this timeline.
|
||||
*
|
||||
* The 'sw_sync' file can be opened many times as to create different
|
||||
* timelines.
|
||||
*
|
||||
* Fences can be created with SW_SYNC_IOC_CREATE_FENCE ioctl with struct
|
||||
* sw_sync_ioctl_create_fence as parameter.
|
||||
*
|
||||
* To increment the timeline counter, SW_SYNC_IOC_INC ioctl should be used
|
||||
* with the increment as u32. This will update the last signaled value
|
||||
* from the timeline and signal any fence that has a seqno smaller or equal
|
||||
* to it.
|
||||
*
|
||||
* struct sw_sync_ioctl_create_fence
|
||||
* @value: the seqno to initialise the fence with
|
||||
* @name: the name of the new sync point
|
||||
* @fence: return the fd of the new sync_file with the created fence
|
||||
*/
|
||||
struct sw_sync_create_fence_data {
|
||||
__u32 value;
|
||||
char name[32];
|
||||
@@ -35,6 +65,7 @@ struct sw_sync_create_fence_data {
|
||||
|
||||
#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
|
||||
struct sw_sync_create_fence_data)
|
||||
|
||||
#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
|
||||
|
||||
static const struct fence_ops timeline_fence_ops;
|
||||
@@ -176,7 +207,7 @@ static void timeline_fence_release(struct fence *fence)
|
||||
|
||||
spin_lock_irqsave(fence->lock, flags);
|
||||
list_del(&pt->child_list);
|
||||
if (WARN_ON_ONCE(!list_empty(&pt->active_list)))
|
||||
if (!list_empty(&pt->active_list))
|
||||
list_del(&pt->active_list);
|
||||
spin_unlock_irqrestore(fence->lock, flags);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* drivers/base/sync.c
|
||||
* Sync File validation framework and debug information
|
||||
*
|
||||
* Copyright (C) 2012 Google, Inc.
|
||||
*
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* include/linux/sync.h
|
||||
* Sync File validation framework and debug infomation
|
||||
*
|
||||
* Copyright (C) 2012 Google, Inc.
|
||||
*
|
||||
@@ -1,11 +1,11 @@
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_INCLUDE_PATH ../../drivers/staging/android/trace
|
||||
#define TRACE_SYSTEM sync
|
||||
#define TRACE_INCLUDE_PATH ../../drivers/dma-buf
|
||||
#define TRACE_SYSTEM sync_trace
|
||||
|
||||
#if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_SYNC_H
|
||||
|
||||
#include "../sync_debug.h"
|
||||
#include "sync_debug.h"
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
TRACE_EVENT(sync_timeline,
|
||||
@@ -24,19 +24,6 @@ config ANDROID_LOW_MEMORY_KILLER
|
||||
scripts (/init.rc), and it defines priority values with minimum free memory size
|
||||
for each priority.
|
||||
|
||||
config SW_SYNC
|
||||
bool "Software synchronization framework"
|
||||
default n
|
||||
depends on SYNC_FILE
|
||||
depends on DEBUG_FS
|
||||
---help---
|
||||
A sync object driver that uses a 32bit counter to coordinate
|
||||
synchronization. Useful when there is no hardware primitive backing
|
||||
the synchronization.
|
||||
|
||||
WARNING: improper use of this can result in deadlocking kernel
|
||||
drivers from userspace. Intended for test and debug only.
|
||||
|
||||
source "drivers/staging/android/ion/Kconfig"
|
||||
|
||||
endif # if ANDROID
|
||||
|
||||
@@ -4,4 +4,3 @@ obj-y += ion/
|
||||
|
||||
obj-$(CONFIG_ASHMEM) += ashmem.o
|
||||
obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
|
||||
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
|
||||
|
||||
@@ -205,19 +205,16 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
|
||||
goto err2;
|
||||
}
|
||||
|
||||
buffer->dev = dev;
|
||||
buffer->size = len;
|
||||
|
||||
table = heap->ops->map_dma(heap, buffer);
|
||||
if (WARN_ONCE(table == NULL,
|
||||
"heap->ops->map_dma should return ERR_PTR on error"))
|
||||
table = ERR_PTR(-EINVAL);
|
||||
if (IS_ERR(table)) {
|
||||
if (buffer->sg_table == NULL) {
|
||||
WARN_ONCE(1, "This heap needs to set the sgtable");
|
||||
ret = -EINVAL;
|
||||
goto err1;
|
||||
}
|
||||
|
||||
buffer->sg_table = table;
|
||||
table = buffer->sg_table;
|
||||
buffer->dev = dev;
|
||||
buffer->size = len;
|
||||
|
||||
if (ion_buffer_fault_user_mappings(buffer)) {
|
||||
int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
|
||||
struct scatterlist *sg;
|
||||
@@ -226,7 +223,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
|
||||
buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
|
||||
if (!buffer->pages) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
goto err1;
|
||||
}
|
||||
|
||||
for_each_sg(table->sgl, sg, table->nents, i) {
|
||||
@@ -260,8 +257,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
|
||||
mutex_unlock(&dev->buffer_lock);
|
||||
return buffer;
|
||||
|
||||
err:
|
||||
heap->ops->unmap_dma(heap, buffer);
|
||||
err1:
|
||||
heap->ops->free(buffer);
|
||||
err2:
|
||||
@@ -273,7 +268,6 @@ void ion_buffer_destroy(struct ion_buffer *buffer)
|
||||
{
|
||||
if (WARN_ON(buffer->kmap_cnt > 0))
|
||||
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
|
||||
buffer->heap->ops->unmap_dma(buffer->heap, buffer);
|
||||
buffer->heap->ops->free(buffer);
|
||||
vfree(buffer->pages);
|
||||
kfree(buffer);
|
||||
@@ -551,7 +545,8 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
|
||||
}
|
||||
EXPORT_SYMBOL(ion_alloc);
|
||||
|
||||
static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
|
||||
static void ion_free_nolock(struct ion_client *client,
|
||||
struct ion_handle *handle)
|
||||
{
|
||||
bool valid_handle;
|
||||
|
||||
@@ -576,32 +571,6 @@ void ion_free(struct ion_client *client, struct ion_handle *handle)
|
||||
}
|
||||
EXPORT_SYMBOL(ion_free);
|
||||
|
||||
int ion_phys(struct ion_client *client, struct ion_handle *handle,
|
||||
ion_phys_addr_t *addr, size_t *len)
|
||||
{
|
||||
struct ion_buffer *buffer;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&client->lock);
|
||||
if (!ion_handle_validate(client, handle)) {
|
||||
mutex_unlock(&client->lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
buffer = handle->buffer;
|
||||
|
||||
if (!buffer->heap->ops->phys) {
|
||||
pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
|
||||
__func__, buffer->heap->name, buffer->heap->type);
|
||||
mutex_unlock(&client->lock);
|
||||
return -ENODEV;
|
||||
}
|
||||
mutex_unlock(&client->lock);
|
||||
ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ion_phys);
|
||||
|
||||
static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
|
||||
{
|
||||
void *vaddr;
|
||||
@@ -917,26 +886,6 @@ void ion_client_destroy(struct ion_client *client)
|
||||
}
|
||||
EXPORT_SYMBOL(ion_client_destroy);
|
||||
|
||||
struct sg_table *ion_sg_table(struct ion_client *client,
|
||||
struct ion_handle *handle)
|
||||
{
|
||||
struct ion_buffer *buffer;
|
||||
struct sg_table *table;
|
||||
|
||||
mutex_lock(&client->lock);
|
||||
if (!ion_handle_validate(client, handle)) {
|
||||
pr_err("%s: invalid handle passed to map_dma.\n",
|
||||
__func__);
|
||||
mutex_unlock(&client->lock);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
buffer = handle->buffer;
|
||||
table = buffer->sg_table;
|
||||
mutex_unlock(&client->lock);
|
||||
return table;
|
||||
}
|
||||
EXPORT_SYMBOL(ion_sg_table);
|
||||
|
||||
static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
|
||||
struct device *dev,
|
||||
enum dma_data_direction direction);
|
||||
@@ -1358,7 +1307,8 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
struct ion_handle *handle;
|
||||
|
||||
mutex_lock(&client->lock);
|
||||
handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
|
||||
handle = ion_handle_get_by_id_nolock(client,
|
||||
data.handle.handle);
|
||||
if (IS_ERR(handle)) {
|
||||
mutex_unlock(&client->lock);
|
||||
return PTR_ERR(handle);
|
||||
@@ -1588,8 +1538,7 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
|
||||
{
|
||||
struct dentry *debug_file;
|
||||
|
||||
if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
|
||||
!heap->ops->unmap_dma)
|
||||
if (!heap->ops->allocate || !heap->ops->free)
|
||||
pr_err("%s: can not add heap with invalid ops struct.\n",
|
||||
__func__);
|
||||
|
||||
@@ -1703,37 +1652,3 @@ void ion_device_destroy(struct ion_device *dev)
|
||||
}
|
||||
EXPORT_SYMBOL(ion_device_destroy);
|
||||
|
||||
void __init ion_reserve(struct ion_platform_data *data)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < data->nr; i++) {
|
||||
if (data->heaps[i].size == 0)
|
||||
continue;
|
||||
|
||||
if (data->heaps[i].base == 0) {
|
||||
phys_addr_t paddr;
|
||||
|
||||
paddr = memblock_alloc_base(data->heaps[i].size,
|
||||
data->heaps[i].align,
|
||||
MEMBLOCK_ALLOC_ANYWHERE);
|
||||
if (!paddr) {
|
||||
pr_err("%s: error allocating memblock for heap %d\n",
|
||||
__func__, i);
|
||||
continue;
|
||||
}
|
||||
data->heaps[i].base = paddr;
|
||||
} else {
|
||||
int ret = memblock_reserve(data->heaps[i].base,
|
||||
data->heaps[i].size);
|
||||
if (ret)
|
||||
pr_err("memblock reserve of %zx@%lx failed\n",
|
||||
data->heaps[i].size,
|
||||
data->heaps[i].base);
|
||||
}
|
||||
pr_info("%s: %s reserved base %lx size %zu\n", __func__,
|
||||
data->heaps[i].name,
|
||||
data->heaps[i].base,
|
||||
data->heaps[i].size);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,17 +72,6 @@ struct ion_platform_data {
|
||||
struct ion_platform_heap *heaps;
|
||||
};
|
||||
|
||||
/**
|
||||
* ion_reserve() - reserve memory for ion heaps if applicable
|
||||
* @data: platform data specifying starting physical address and
|
||||
* size
|
||||
*
|
||||
* Calls memblock reserve to set aside memory for heaps that are
|
||||
* located at specific memory addresses or of specific sizes not
|
||||
* managed by the kernel
|
||||
*/
|
||||
void ion_reserve(struct ion_platform_data *data);
|
||||
|
||||
/**
|
||||
* ion_client_create() - allocate a client and returns it
|
||||
* @dev: the global ion device
|
||||
@@ -129,36 +118,6 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
|
||||
*/
|
||||
void ion_free(struct ion_client *client, struct ion_handle *handle);
|
||||
|
||||
/**
|
||||
* ion_phys - returns the physical address and len of a handle
|
||||
* @client: the client
|
||||
* @handle: the handle
|
||||
* @addr: a pointer to put the address in
|
||||
* @len: a pointer to put the length in
|
||||
*
|
||||
* This function queries the heap for a particular handle to get the
|
||||
* handle's physical address. It't output is only correct if
|
||||
* a heap returns physically contiguous memory -- in other cases
|
||||
* this api should not be implemented -- ion_sg_table should be used
|
||||
* instead. Returns -EINVAL if the handle is invalid. This has
|
||||
* no implications on the reference counting of the handle --
|
||||
* the returned value may not be valid if the caller is not
|
||||
* holding a reference.
|
||||
*/
|
||||
int ion_phys(struct ion_client *client, struct ion_handle *handle,
|
||||
ion_phys_addr_t *addr, size_t *len);
|
||||
|
||||
/**
|
||||
* ion_map_dma - return an sg_table describing a handle
|
||||
* @client: the client
|
||||
* @handle: the handle
|
||||
*
|
||||
* This function returns the sg_table describing
|
||||
* a particular ion handle.
|
||||
*/
|
||||
struct sg_table *ion_sg_table(struct ion_client *client,
|
||||
struct ion_handle *handle);
|
||||
|
||||
/**
|
||||
* ion_map_kernel - create mapping for the given handle
|
||||
* @client: the client
|
||||
|
||||
@@ -25,6 +25,8 @@
|
||||
#include "ion.h"
|
||||
#include "ion_priv.h"
|
||||
|
||||
#define ION_CARVEOUT_ALLOCATE_FAIL -1
|
||||
|
||||
struct ion_carveout_heap {
|
||||
struct ion_heap heap;
|
||||
struct gen_pool *pool;
|
||||
@@ -56,19 +58,6 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
|
||||
gen_pool_free(carveout_heap->pool, addr, size);
|
||||
}
|
||||
|
||||
static int ion_carveout_heap_phys(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer,
|
||||
ion_phys_addr_t *addr, size_t *len)
|
||||
{
|
||||
struct sg_table *table = buffer->priv_virt;
|
||||
struct page *page = sg_page(table->sgl);
|
||||
ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
|
||||
|
||||
*addr = paddr;
|
||||
*len = buffer->size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ion_carveout_heap_allocate(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer,
|
||||
unsigned long size, unsigned long align,
|
||||
@@ -95,7 +84,7 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
|
||||
}
|
||||
|
||||
sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
|
||||
buffer->priv_virt = table;
|
||||
buffer->sg_table = table;
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -109,7 +98,7 @@ err_free:
|
||||
static void ion_carveout_heap_free(struct ion_buffer *buffer)
|
||||
{
|
||||
struct ion_heap *heap = buffer->heap;
|
||||
struct sg_table *table = buffer->priv_virt;
|
||||
struct sg_table *table = buffer->sg_table;
|
||||
struct page *page = sg_page(table->sgl);
|
||||
ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
|
||||
|
||||
@@ -124,23 +113,9 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer)
|
||||
kfree(table);
|
||||
}
|
||||
|
||||
static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
return buffer->priv_virt;
|
||||
}
|
||||
|
||||
static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
}
|
||||
|
||||
static struct ion_heap_ops carveout_heap_ops = {
|
||||
.allocate = ion_carveout_heap_allocate,
|
||||
.free = ion_carveout_heap_free,
|
||||
.phys = ion_carveout_heap_phys,
|
||||
.map_dma = ion_carveout_heap_map_dma,
|
||||
.unmap_dma = ion_carveout_heap_unmap_dma,
|
||||
.map_user = ion_heap_map_user,
|
||||
.map_kernel = ion_heap_map_kernel,
|
||||
.unmap_kernel = ion_heap_unmap_kernel,
|
||||
|
||||
@@ -75,7 +75,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
|
||||
buffer->priv_virt = table;
|
||||
buffer->sg_table = table;
|
||||
chunk_heap->allocated += allocated_size;
|
||||
return 0;
|
||||
err:
|
||||
@@ -95,7 +95,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
|
||||
struct ion_heap *heap = buffer->heap;
|
||||
struct ion_chunk_heap *chunk_heap =
|
||||
container_of(heap, struct ion_chunk_heap, heap);
|
||||
struct sg_table *table = buffer->priv_virt;
|
||||
struct sg_table *table = buffer->sg_table;
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
unsigned long allocated_size;
|
||||
@@ -117,22 +117,9 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
|
||||
kfree(table);
|
||||
}
|
||||
|
||||
static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
return buffer->priv_virt;
|
||||
}
|
||||
|
||||
static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
}
|
||||
|
||||
static struct ion_heap_ops chunk_heap_ops = {
|
||||
.allocate = ion_chunk_heap_allocate,
|
||||
.free = ion_chunk_heap_free,
|
||||
.map_dma = ion_chunk_heap_map_dma,
|
||||
.unmap_dma = ion_chunk_heap_unmap_dma,
|
||||
.map_user = ion_heap_map_user,
|
||||
.map_kernel = ion_heap_map_kernel,
|
||||
.unmap_kernel = ion_heap_unmap_kernel,
|
||||
|
||||
@@ -78,6 +78,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
|
||||
goto free_table;
|
||||
/* keep this for memory release */
|
||||
buffer->priv_virt = info;
|
||||
buffer->sg_table = info->table;
|
||||
dev_dbg(dev, "Allocate buffer %p\n", buffer);
|
||||
return 0;
|
||||
|
||||
@@ -105,36 +106,6 @@ static void ion_cma_free(struct ion_buffer *buffer)
|
||||
kfree(info);
|
||||
}
|
||||
|
||||
/* return physical address in addr */
|
||||
static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
|
||||
ion_phys_addr_t *addr, size_t *len)
|
||||
{
|
||||
struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
|
||||
struct device *dev = cma_heap->dev;
|
||||
struct ion_cma_buffer_info *info = buffer->priv_virt;
|
||||
|
||||
dev_dbg(dev, "Return buffer %p physical address %pa\n", buffer,
|
||||
&info->handle);
|
||||
|
||||
*addr = info->handle;
|
||||
*len = buffer->size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
struct ion_cma_buffer_info *info = buffer->priv_virt;
|
||||
|
||||
return info->table;
|
||||
}
|
||||
|
||||
static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
}
|
||||
|
||||
static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
@@ -162,9 +133,6 @@ static void ion_cma_unmap_kernel(struct ion_heap *heap,
|
||||
static struct ion_heap_ops ion_cma_ops = {
|
||||
.allocate = ion_cma_allocate,
|
||||
.free = ion_cma_free,
|
||||
.map_dma = ion_cma_heap_map_dma,
|
||||
.unmap_dma = ion_cma_heap_unmap_dma,
|
||||
.phys = ion_cma_phys,
|
||||
.map_user = ion_cma_mmap,
|
||||
.map_kernel = ion_cma_map_kernel,
|
||||
.unmap_kernel = ion_cma_unmap_kernel,
|
||||
|
||||
@@ -42,8 +42,6 @@ struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
|
||||
* @size: size of the buffer
|
||||
* @priv_virt: private data to the buffer representable as
|
||||
* a void *
|
||||
* @priv_phys: private data to the buffer representable as
|
||||
* an ion_phys_addr_t (and someday a phys_addr_t)
|
||||
* @lock: protects the buffers cnt fields
|
||||
* @kmap_cnt: number of times the buffer is mapped to the kernel
|
||||
* @vaddr: the kernel mapping if kmap_cnt is not zero
|
||||
@@ -69,10 +67,7 @@ struct ion_buffer {
|
||||
unsigned long flags;
|
||||
unsigned long private_flags;
|
||||
size_t size;
|
||||
union {
|
||||
void *priv_virt;
|
||||
ion_phys_addr_t priv_phys;
|
||||
};
|
||||
void *priv_virt;
|
||||
struct mutex lock;
|
||||
int kmap_cnt;
|
||||
void *vaddr;
|
||||
@@ -91,10 +86,6 @@ void ion_buffer_destroy(struct ion_buffer *buffer);
|
||||
* struct ion_heap_ops - ops to operate on a given heap
|
||||
* @allocate: allocate memory
|
||||
* @free: free memory
|
||||
* @phys get physical address of a buffer (only define on
|
||||
* physically contiguous heaps)
|
||||
* @map_dma map the memory for dma to a scatterlist
|
||||
* @unmap_dma unmap the memory for dma
|
||||
* @map_kernel map memory to the kernel
|
||||
* @unmap_kernel unmap memory to the kernel
|
||||
* @map_user map memory to userspace
|
||||
@@ -111,11 +102,6 @@ struct ion_heap_ops {
|
||||
struct ion_buffer *buffer, unsigned long len,
|
||||
unsigned long align, unsigned long flags);
|
||||
void (*free)(struct ion_buffer *buffer);
|
||||
int (*phys)(struct ion_heap *heap, struct ion_buffer *buffer,
|
||||
ion_phys_addr_t *addr, size_t *len);
|
||||
struct sg_table * (*map_dma)(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer);
|
||||
void (*unmap_dma)(struct ion_heap *heap, struct ion_buffer *buffer);
|
||||
void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
|
||||
void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
|
||||
int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
|
||||
@@ -327,20 +313,6 @@ void ion_chunk_heap_destroy(struct ion_heap *);
|
||||
struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
|
||||
void ion_cma_heap_destroy(struct ion_heap *);
|
||||
|
||||
/**
|
||||
* kernel api to allocate/free from carveout -- used when carveout is
|
||||
* used to back an architecture specific custom heap
|
||||
*/
|
||||
ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
|
||||
unsigned long align);
|
||||
void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
|
||||
unsigned long size);
|
||||
/**
|
||||
* The carveout heap returns physical addresses, since 0 may be a valid
|
||||
* physical address, this is used to indicate allocation failed
|
||||
*/
|
||||
#define ION_CARVEOUT_ALLOCATE_FAIL -1
|
||||
|
||||
/**
|
||||
* functions for creating and destroying a heap pool -- allows you
|
||||
* to keep a pool of pre allocated memory to use from your heap. Keeping
|
||||
|
||||
@@ -164,7 +164,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
|
||||
list_del(&page->lru);
|
||||
}
|
||||
|
||||
buffer->priv_virt = table;
|
||||
buffer->sg_table = table;
|
||||
return 0;
|
||||
|
||||
free_table:
|
||||
@@ -199,17 +199,6 @@ static void ion_system_heap_free(struct ion_buffer *buffer)
|
||||
kfree(table);
|
||||
}
|
||||
|
||||
static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
return buffer->priv_virt;
|
||||
}
|
||||
|
||||
static void ion_system_heap_unmap_dma(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
}
|
||||
|
||||
static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
|
||||
int nr_to_scan)
|
||||
{
|
||||
@@ -243,8 +232,6 @@ static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
|
||||
static struct ion_heap_ops system_heap_ops = {
|
||||
.allocate = ion_system_heap_allocate,
|
||||
.free = ion_system_heap_free,
|
||||
.map_dma = ion_system_heap_map_dma,
|
||||
.unmap_dma = ion_system_heap_unmap_dma,
|
||||
.map_kernel = ion_heap_map_kernel,
|
||||
.unmap_kernel = ion_heap_unmap_kernel,
|
||||
.map_user = ion_heap_map_user,
|
||||
@@ -358,7 +345,7 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
|
||||
|
||||
sg_set_page(table->sgl, page, len, 0);
|
||||
|
||||
buffer->priv_virt = table;
|
||||
buffer->sg_table = table;
|
||||
|
||||
ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
|
||||
|
||||
@@ -375,7 +362,7 @@ free_pages:
|
||||
|
||||
static void ion_system_contig_heap_free(struct ion_buffer *buffer)
|
||||
{
|
||||
struct sg_table *table = buffer->priv_virt;
|
||||
struct sg_table *table = buffer->sg_table;
|
||||
struct page *page = sg_page(table->sgl);
|
||||
unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
|
||||
unsigned long i;
|
||||
@@ -386,34 +373,9 @@ static void ion_system_contig_heap_free(struct ion_buffer *buffer)
|
||||
kfree(table);
|
||||
}
|
||||
|
||||
static int ion_system_contig_heap_phys(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer,
|
||||
ion_phys_addr_t *addr, size_t *len)
|
||||
{
|
||||
struct sg_table *table = buffer->priv_virt;
|
||||
struct page *page = sg_page(table->sgl);
|
||||
*addr = page_to_phys(page);
|
||||
*len = buffer->size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
return buffer->priv_virt;
|
||||
}
|
||||
|
||||
static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
}
|
||||
|
||||
static struct ion_heap_ops kmalloc_ops = {
|
||||
.allocate = ion_system_contig_heap_allocate,
|
||||
.free = ion_system_contig_heap_free,
|
||||
.phys = ion_system_contig_heap_phys,
|
||||
.map_dma = ion_system_contig_heap_map_dma,
|
||||
.unmap_dma = ion_system_contig_heap_unmap_dma,
|
||||
.map_kernel = ion_heap_map_kernel,
|
||||
.unmap_kernel = ion_heap_unmap_kernel,
|
||||
.map_user = ion_heap_map_user,
|
||||
|
||||
@@ -946,10 +946,8 @@ static int usbduxfast_auto_attach(struct comedi_device *dev,
|
||||
}
|
||||
|
||||
devpriv->urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
if (!devpriv->urb) {
|
||||
dev_err(dev->class_dev, "Could not alloc. urb\n");
|
||||
if (!devpriv->urb)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
devpriv->inbuf = kmalloc(SIZEINBUF, GFP_KERNEL);
|
||||
if (!devpriv->inbuf)
|
||||
|
||||
@@ -297,11 +297,10 @@ static int enqueue_txdev(struct ks_wlan_private *priv, unsigned char *p,
|
||||
static int write_to_device(struct ks_wlan_private *priv, unsigned char *buffer,
|
||||
unsigned long size)
|
||||
{
|
||||
int rc, retval;
|
||||
int retval;
|
||||
unsigned char rw_data;
|
||||
struct hostif_hdr *hdr;
|
||||
hdr = (struct hostif_hdr *)buffer;
|
||||
rc = 0;
|
||||
|
||||
DPRINTK(4, "size=%d\n", hdr->size);
|
||||
if (hdr->event < HIF_DATA_REQ || HIF_REQ_MAX < hdr->event) {
|
||||
@@ -711,7 +710,6 @@ static int ks7010_sdio_update_index(struct ks_wlan_private *priv, u32 index)
|
||||
int rc = 0;
|
||||
int retval;
|
||||
unsigned char *data_buf;
|
||||
data_buf = NULL;
|
||||
|
||||
data_buf = kmalloc(sizeof(u32), GFP_KERNEL);
|
||||
if (!data_buf) {
|
||||
@@ -732,8 +730,7 @@ static int ks7010_sdio_update_index(struct ks_wlan_private *priv, u32 index)
|
||||
goto error_out;
|
||||
}
|
||||
error_out:
|
||||
if (data_buf)
|
||||
kfree(data_buf);
|
||||
kfree(data_buf);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -744,7 +741,7 @@ static int ks7010_sdio_data_compare(struct ks_wlan_private *priv, u32 address,
|
||||
int rc = 0;
|
||||
int retval;
|
||||
unsigned char *read_buf;
|
||||
read_buf = NULL;
|
||||
|
||||
read_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL);
|
||||
if (!read_buf) {
|
||||
rc = 1;
|
||||
@@ -763,8 +760,7 @@ static int ks7010_sdio_data_compare(struct ks_wlan_private *priv, u32 address,
|
||||
goto error_out;
|
||||
}
|
||||
error_out:
|
||||
if (read_buf)
|
||||
kfree(read_buf);
|
||||
kfree(read_buf);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -778,8 +774,6 @@ static int ks7010_upload_firmware(struct ks_wlan_private *priv,
|
||||
int length;
|
||||
const struct firmware *fw_entry = NULL;
|
||||
|
||||
rom_buf = NULL;
|
||||
|
||||
/* buffer allocate */
|
||||
rom_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL);
|
||||
if (!rom_buf) {
|
||||
@@ -879,8 +873,7 @@ static int ks7010_upload_firmware(struct ks_wlan_private *priv,
|
||||
release_firmware(fw_entry);
|
||||
error_out0:
|
||||
sdio_release_host(card->func);
|
||||
if (rom_buf)
|
||||
kfree(rom_buf);
|
||||
kfree(rom_buf);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -1141,7 +1134,6 @@ static void ks7010_sdio_remove(struct sdio_func *func)
|
||||
int ret;
|
||||
struct ks_sdio_card *card;
|
||||
struct ks_wlan_private *priv;
|
||||
struct net_device *netdev;
|
||||
DPRINTK(1, "ks7010_sdio_remove()\n");
|
||||
|
||||
card = sdio_get_drvdata(func);
|
||||
@@ -1151,8 +1143,9 @@ static void ks7010_sdio_remove(struct sdio_func *func)
|
||||
|
||||
DPRINTK(1, "priv = card->priv\n");
|
||||
priv = card->priv;
|
||||
netdev = priv->net_dev;
|
||||
if (priv) {
|
||||
struct net_device *netdev = priv->net_dev;
|
||||
|
||||
ks_wlan_net_stop(netdev);
|
||||
DPRINTK(1, "ks_wlan_net_stop\n");
|
||||
|
||||
@@ -1199,9 +1192,7 @@ static void ks7010_sdio_remove(struct sdio_func *func)
|
||||
unregister_netdev(netdev);
|
||||
|
||||
trx_device_exit(priv);
|
||||
if (priv->ks_wlan_hw.read_buf) {
|
||||
kfree(priv->ks_wlan_hw.read_buf);
|
||||
}
|
||||
kfree(priv->ks_wlan_hw.read_buf);
|
||||
free_netdev(priv->net_dev);
|
||||
card->priv = NULL;
|
||||
}
|
||||
|
||||
@@ -20,15 +20,21 @@
|
||||
#define getUInt32( A, B ) (uint32_t)(A[B+0] << 0) + (A[B+1] << 8) + (A[B+2] << 16) + (A[B+3] << 24)
|
||||
|
||||
// Convert from UInt32 to Byte[] in a portable way
|
||||
#define putUInt32( A, B, C ) A[B+0] = (uint8_t) (C & 0xff); \
|
||||
A[B+1] = (uint8_t) ((C>>8) & 0xff); \
|
||||
A[B+2] = (uint8_t) ((C>>16) & 0xff); \
|
||||
A[B+3] = (uint8_t) ((C>>24) & 0xff)
|
||||
#define putUInt32(A, B, C) \
|
||||
do { \
|
||||
A[B + 0] = (uint8_t)(C & 0xff); \
|
||||
A[B + 1] = (uint8_t)((C >> 8) & 0xff); \
|
||||
A[B + 2] = (uint8_t)((C >> 16) & 0xff); \
|
||||
A[B + 3] = (uint8_t)((C >> 24) & 0xff); \
|
||||
} while (0)
|
||||
|
||||
// Reset the state to the empty message.
|
||||
#define MichaelClear( A ) A->L = A->K0; \
|
||||
A->R = A->K1; \
|
||||
A->nBytesInM = 0;
|
||||
#define MichaelClear(A) \
|
||||
do { \
|
||||
A->L = A->K0; \
|
||||
A->R = A->K1; \
|
||||
A->nBytesInM = 0; \
|
||||
} while (0)
|
||||
|
||||
static
|
||||
void MichaelInitializeFunction(struct michel_mic_t *Mic, uint8_t * key)
|
||||
|
||||
@@ -1468,11 +1468,6 @@ ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
|
||||
|
||||
conn->ksnc_route = NULL;
|
||||
|
||||
#if 0 /* irrelevant with only eager routes */
|
||||
/* make route least favourite */
|
||||
list_del(&route->ksnr_list);
|
||||
list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
|
||||
#endif
|
||||
ksocknal_route_decref(route); /* drop conn's ref on route */
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user