[PATCH] slab: remove SLAB_KERNEL

SLAB_KERNEL is an alias of GFP_KERNEL.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Christoph Lameter
2006-12-06 20:33:17 -08:00
committed by Linus Torvalds
parent 54e6ecb239
commit e94b176609
114 changed files with 164 additions and 165 deletions
+1 -1
View File
@@ -132,7 +132,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
goto up_fail; goto up_fail;
} }
vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL); vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma) { if (!vma) {
ret = -ENOMEM; ret = -ENOMEM;
goto up_fail; goto up_fail;
+4 -4
View File
@@ -91,7 +91,7 @@ ia64_elf32_init (struct pt_regs *regs)
* it with privilege level 3 because the IVE uses non-privileged accesses to these * it with privilege level 3 because the IVE uses non-privileged accesses to these
* tables. IA-32 segmentation is used to protect against IA-32 accesses to them. * tables. IA-32 segmentation is used to protect against IA-32 accesses to them.
*/ */
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (vma) { if (vma) {
memset(vma, 0, sizeof(*vma)); memset(vma, 0, sizeof(*vma));
vma->vm_mm = current->mm; vma->vm_mm = current->mm;
@@ -117,7 +117,7 @@ ia64_elf32_init (struct pt_regs *regs)
* code is locked in specific gate page, which is pointed by pretcode * code is locked in specific gate page, which is pointed by pretcode
* when setup_frame_ia32 * when setup_frame_ia32
*/ */
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (vma) { if (vma) {
memset(vma, 0, sizeof(*vma)); memset(vma, 0, sizeof(*vma));
vma->vm_mm = current->mm; vma->vm_mm = current->mm;
@@ -142,7 +142,7 @@ ia64_elf32_init (struct pt_regs *regs)
* Install LDT as anonymous memory. This gives us all-zero segment descriptors * Install LDT as anonymous memory. This gives us all-zero segment descriptors
* until a task modifies them via modify_ldt(). * until a task modifies them via modify_ldt().
*/ */
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (vma) { if (vma) {
memset(vma, 0, sizeof(*vma)); memset(vma, 0, sizeof(*vma));
vma->vm_mm = current->mm; vma->vm_mm = current->mm;
@@ -214,7 +214,7 @@ ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
bprm->loader += stack_base; bprm->loader += stack_base;
bprm->exec += stack_base; bprm->exec += stack_base;
mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!mpnt) if (!mpnt)
return -ENOMEM; return -ENOMEM;
+1 -1
View File
@@ -2302,7 +2302,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon
DPRINT(("smpl_buf @%p\n", smpl_buf)); DPRINT(("smpl_buf @%p\n", smpl_buf));
/* allocate vma */ /* allocate vma */
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!vma) { if (!vma) {
DPRINT(("Cannot allocate vma\n")); DPRINT(("Cannot allocate vma\n"));
goto error_kmem; goto error_kmem;
+2 -2
View File
@@ -156,7 +156,7 @@ ia64_init_addr_space (void)
* the problem. When the process attempts to write to the register backing store * the problem. When the process attempts to write to the register backing store
* for the first time, it will get a SEGFAULT in this case. * for the first time, it will get a SEGFAULT in this case.
*/ */
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (vma) { if (vma) {
memset(vma, 0, sizeof(*vma)); memset(vma, 0, sizeof(*vma));
vma->vm_mm = current->mm; vma->vm_mm = current->mm;
@@ -175,7 +175,7 @@ ia64_init_addr_space (void)
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
if (!(current->personality & MMAP_PAGE_ZERO)) { if (!(current->personality & MMAP_PAGE_ZERO)) {
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (vma) { if (vma) {
memset(vma, 0, sizeof(*vma)); memset(vma, 0, sizeof(*vma));
vma->vm_mm = current->mm; vma->vm_mm = current->mm;
+1 -1
View File
@@ -264,7 +264,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
/* Allocate a VMA structure and fill it up */ /* Allocate a VMA structure and fill it up */
vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL); vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma == NULL) { if (vma == NULL) {
rc = -ENOMEM; rc = -ENOMEM;
goto fail_mmapsem; goto fail_mmapsem;
+1 -1
View File
@@ -48,7 +48,7 @@ spufs_alloc_inode(struct super_block *sb)
{ {
struct spufs_inode_info *ei; struct spufs_inode_info *ei;
ei = kmem_cache_alloc(spufs_inode_cache, SLAB_KERNEL); ei = kmem_cache_alloc(spufs_inode_cache, GFP_KERNEL);
if (!ei) if (!ei)
return NULL; return NULL;
+1 -1
View File
@@ -97,7 +97,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
goto up_fail; goto up_fail;
} }
vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL); vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma) { if (!vma) {
ret = -ENOMEM; ret = -ENOMEM;
goto up_fail; goto up_fail;
+1 -1
View File
@@ -351,7 +351,7 @@ int ia32_setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top,
bprm->loader += stack_base; bprm->loader += stack_base;
bprm->exec += stack_base; bprm->exec += stack_base;
mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!mpnt) if (!mpnt)
return -ENOMEM; return -ENOMEM;
+1 -1
View File
@@ -49,7 +49,7 @@ int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
int ret; int ret;
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!vma) if (!vma)
return -ENOMEM; return -ENOMEM;
+2 -2
View File
@@ -820,7 +820,7 @@ he_init_group(struct he_dev *he_dev, int group)
void *cpuaddr; void *cpuaddr;
#ifdef USE_RBPS_POOL #ifdef USE_RBPS_POOL
cpuaddr = pci_pool_alloc(he_dev->rbps_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle); cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|SLAB_DMA, &dma_handle);
if (cpuaddr == NULL) if (cpuaddr == NULL)
return -ENOMEM; return -ENOMEM;
#else #else
@@ -884,7 +884,7 @@ he_init_group(struct he_dev *he_dev, int group)
void *cpuaddr; void *cpuaddr;
#ifdef USE_RBPL_POOL #ifdef USE_RBPL_POOL
cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle); cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|SLAB_DMA, &dma_handle);
if (cpuaddr == NULL) if (cpuaddr == NULL)
return -ENOMEM; return -ENOMEM;
#else #else
+1 -1
View File
@@ -126,7 +126,7 @@ dma_pool_create (const char *name, struct device *dev,
} else if (allocation < size) } else if (allocation < size)
return NULL; return NULL;
if (!(retval = kmalloc (sizeof *retval, SLAB_KERNEL))) if (!(retval = kmalloc (sizeof *retval, GFP_KERNEL)))
return retval; return retval;
strlcpy (retval->name, name, sizeof retval->name); strlcpy (retval->name, name, sizeof retval->name);
+2 -2
View File
@@ -636,10 +636,10 @@ static int ioat_self_test(struct ioat_device *device)
dma_cookie_t cookie; dma_cookie_t cookie;
int err = 0; int err = 0;
src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, SLAB_KERNEL); src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
if (!src) if (!src)
return -ENOMEM; return -ENOMEM;
dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, SLAB_KERNEL); dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
if (!dest) { if (!dest) {
kfree(src); kfree(src);
return -ENOMEM; return -ENOMEM;
+1 -1
View File
@@ -123,7 +123,7 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
int i; int i;
int hostnum = 0; int hostnum = 0;
h = kzalloc(sizeof(*h) + extra, SLAB_KERNEL); h = kzalloc(sizeof(*h) + extra, GFP_KERNEL);
if (!h) if (!h)
return NULL; return NULL;
+4 -4
View File
@@ -1225,7 +1225,7 @@ static int ohci_iso_recv_init(struct hpsb_iso *iso)
int ctx; int ctx;
int ret = -ENOMEM; int ret = -ENOMEM;
recv = kmalloc(sizeof(*recv), SLAB_KERNEL); recv = kmalloc(sizeof(*recv), GFP_KERNEL);
if (!recv) if (!recv)
return -ENOMEM; return -ENOMEM;
@@ -1918,7 +1918,7 @@ static int ohci_iso_xmit_init(struct hpsb_iso *iso)
int ctx; int ctx;
int ret = -ENOMEM; int ret = -ENOMEM;
xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL); xmit = kmalloc(sizeof(*xmit), GFP_KERNEL);
if (!xmit) if (!xmit)
return -ENOMEM; return -ENOMEM;
@@ -3021,7 +3021,7 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
return -ENOMEM; return -ENOMEM;
} }
d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i); d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i); OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
if (d->prg_cpu[i] != NULL) { if (d->prg_cpu[i] != NULL) {
@@ -3117,7 +3117,7 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
OHCI_DMA_ALLOC("dma_rcv prg pool"); OHCI_DMA_ALLOC("dma_rcv prg pool");
for (i = 0; i < d->num_desc; i++) { for (i = 0; i < d->num_desc; i++) {
d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i); d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i); OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
if (d->prg_cpu[i] != NULL) { if (d->prg_cpu[i] != NULL) {
+1 -1
View File
@@ -1428,7 +1428,7 @@ static int __devinit add_card(struct pci_dev *dev,
struct i2c_algo_bit_data i2c_adapter_data; struct i2c_algo_bit_data i2c_adapter_data;
error = -ENOMEM; error = -ENOMEM;
i2c_ad = kmalloc(sizeof(*i2c_ad), SLAB_KERNEL); i2c_ad = kmalloc(sizeof(*i2c_ad), GFP_KERNEL);
if (!i2c_ad) FAIL("failed to allocate I2C adapter memory"); if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter)); memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter));
+5 -5
View File
@@ -112,7 +112,7 @@ static struct pending_request *__alloc_pending_request(gfp_t flags)
static inline struct pending_request *alloc_pending_request(void) static inline struct pending_request *alloc_pending_request(void)
{ {
return __alloc_pending_request(SLAB_KERNEL); return __alloc_pending_request(GFP_KERNEL);
} }
static void free_pending_request(struct pending_request *req) static void free_pending_request(struct pending_request *req)
@@ -1737,7 +1737,7 @@ static int arm_register(struct file_info *fi, struct pending_request *req)
return (-EINVAL); return (-EINVAL);
} }
/* addr-list-entry for fileinfo */ /* addr-list-entry for fileinfo */
addr = kmalloc(sizeof(*addr), SLAB_KERNEL); addr = kmalloc(sizeof(*addr), GFP_KERNEL);
if (!addr) { if (!addr) {
req->req.length = 0; req->req.length = 0;
return (-ENOMEM); return (-ENOMEM);
@@ -2103,7 +2103,7 @@ static int write_phypacket(struct file_info *fi, struct pending_request *req)
static int get_config_rom(struct file_info *fi, struct pending_request *req) static int get_config_rom(struct file_info *fi, struct pending_request *req)
{ {
int ret = sizeof(struct raw1394_request); int ret = sizeof(struct raw1394_request);
quadlet_t *data = kmalloc(req->req.length, SLAB_KERNEL); quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL);
int status; int status;
if (!data) if (!data)
@@ -2133,7 +2133,7 @@ static int get_config_rom(struct file_info *fi, struct pending_request *req)
static int update_config_rom(struct file_info *fi, struct pending_request *req) static int update_config_rom(struct file_info *fi, struct pending_request *req)
{ {
int ret = sizeof(struct raw1394_request); int ret = sizeof(struct raw1394_request);
quadlet_t *data = kmalloc(req->req.length, SLAB_KERNEL); quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL);
if (!data) if (!data)
return -ENOMEM; return -ENOMEM;
if (copy_from_user(data, int2ptr(req->req.sendb), req->req.length)) { if (copy_from_user(data, int2ptr(req->req.sendb), req->req.length)) {
@@ -2779,7 +2779,7 @@ static int raw1394_open(struct inode *inode, struct file *file)
{ {
struct file_info *fi; struct file_info *fi;
fi = kzalloc(sizeof(*fi), SLAB_KERNEL); fi = kzalloc(sizeof(*fi), GFP_KERNEL);
if (!fi) if (!fi)
return -ENOMEM; return -ENOMEM;
+1 -1
View File
@@ -57,7 +57,7 @@ struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
ib_device); ib_device);
av = kmem_cache_alloc(av_cache, SLAB_KERNEL); av = kmem_cache_alloc(av_cache, GFP_KERNEL);
if (!av) { if (!av) {
ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p", ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p",
pd, ah_attr); pd, ah_attr);
+1 -1
View File
@@ -134,7 +134,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
my_cq = kmem_cache_alloc(cq_cache, SLAB_KERNEL); my_cq = kmem_cache_alloc(cq_cache, GFP_KERNEL);
if (!my_cq) { if (!my_cq) {
ehca_err(device, "Out of memory for ehca_cq struct device=%p", ehca_err(device, "Out of memory for ehca_cq struct device=%p",
device); device);
+1 -1
View File
@@ -108,7 +108,7 @@ static struct kmem_cache *ctblk_cache = NULL;
void *ehca_alloc_fw_ctrlblock(void) void *ehca_alloc_fw_ctrlblock(void)
{ {
void *ret = kmem_cache_zalloc(ctblk_cache, SLAB_KERNEL); void *ret = kmem_cache_zalloc(ctblk_cache, GFP_KERNEL);
if (!ret) if (!ret)
ehca_gen_err("Out of memory for ctblk"); ehca_gen_err("Out of memory for ctblk");
return ret; return ret;
+2 -2
View File
@@ -53,7 +53,7 @@ static struct ehca_mr *ehca_mr_new(void)
{ {
struct ehca_mr *me; struct ehca_mr *me;
me = kmem_cache_alloc(mr_cache, SLAB_KERNEL); me = kmem_cache_alloc(mr_cache, GFP_KERNEL);
if (me) { if (me) {
memset(me, 0, sizeof(struct ehca_mr)); memset(me, 0, sizeof(struct ehca_mr));
spin_lock_init(&me->mrlock); spin_lock_init(&me->mrlock);
@@ -72,7 +72,7 @@ static struct ehca_mw *ehca_mw_new(void)
{ {
struct ehca_mw *me; struct ehca_mw *me;
me = kmem_cache_alloc(mw_cache, SLAB_KERNEL); me = kmem_cache_alloc(mw_cache, GFP_KERNEL);
if (me) { if (me) {
memset(me, 0, sizeof(struct ehca_mw)); memset(me, 0, sizeof(struct ehca_mw));
spin_lock_init(&me->mwlock); spin_lock_init(&me->mwlock);

Some files were not shown because too many files have changed in this diff Show More