You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm
* 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm: (237 commits) ARM: 7004/1: fix traps.h compile warnings ARM: 6998/2: kernel: use proper memory barriers for bitops ARM: 6997/1: ep93xx: increase NR_BANKS to 16 for support of 128MB RAM ARM: Fix build errors caused by adding generic macros ARM: CPU hotplug: ensure we migrate all IRQs off a downed CPU ARM: CPU hotplug: pass in proper affinity mask on IRQ migration ARM: GIC: avoid routing interrupts to offline CPUs ARM: CPU hotplug: fix abuse of irqdesc->node ARM: 6981/2: mmci: adjust calculation of f_min ARM: 7000/1: LPAE: Use long long printk format for displaying the pud ARM: 6999/1: head, zImage: Always Enter the kernel in ARM state ARM: btc: avoid invalidating the branch target cache on kernel TLB maintanence ARM: ARM_DMA_ZONE_SIZE is no more ARM: mach-shark: move ARM_DMA_ZONE_SIZE to mdesc->dma_zone_size ARM: mach-sa1100: move ARM_DMA_ZONE_SIZE to mdesc->dma_zone_size ARM: mach-realview: move from ARM_DMA_ZONE_SIZE to mdesc->dma_zone_size ARM: mach-pxa: move from ARM_DMA_ZONE_SIZE to mdesc->dma_zone_size ARM: mach-ixp4xx: move from ARM_DMA_ZONE_SIZE to mdesc->dma_zone_size ARM: mach-h720x: move from ARM_DMA_ZONE_SIZE to mdesc->dma_zone_size ARM: mach-davinci: move from ARM_DMA_ZONE_SIZE to mdesc->dma_zone_size ...
This commit is contained in:
@@ -164,3 +164,8 @@ In either case, the following conditions must be met:
|
||||
- The boot loader is expected to call the kernel image by jumping
|
||||
directly to the first instruction of the kernel image.
|
||||
|
||||
On CPUs supporting the ARM instruction set, the entry must be
|
||||
made in ARM state, even for a Thumb-2 kernel.
|
||||
|
||||
On CPUs supporting only the Thumb instruction set such as
|
||||
Cortex-M class CPUs, the entry must be made in Thumb state.
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
ROM-able zImage boot from eSD
|
||||
-----------------------------
|
||||
|
||||
An ROM-able zImage compiled with ZBOOT_ROM_SDHI may be written to eSD and
|
||||
SuperH Mobile ARM will to boot directly from the SDHI hardware block.
|
||||
|
||||
This is achieved by the mask ROM loading the first portion of the image into
|
||||
MERAM and then jumping to it. This portion contains loader code which
|
||||
copies the entire image to SDRAM and jumps to it. From there the zImage
|
||||
boot code proceeds as normal, uncompressing the image into its final
|
||||
location and then jumping to it.
|
||||
|
||||
This code has been tested on an mackerel board using the developer 1A eSD
|
||||
boot mode which is configured using the following jumper settings.
|
||||
|
||||
8 7 6 5 4 3 2 1
|
||||
x|x|x|x| |x|x|
|
||||
S4 -+-+-+-+-+-+-+-
|
||||
| | | |x| | |x on
|
||||
|
||||
The eSD card needs to be present in SDHI slot 1 (CN7).
|
||||
As such S1 and S33 also need to be configured as per
|
||||
the notes in arch/arm/mach-shmobile/board-mackerel.c.
|
||||
|
||||
A partial zImage must be written to physical partition #1 (boot)
|
||||
of the eSD at sector 0 in vrl4 format. A utility vrl4 is supplied to
|
||||
accomplish this.
|
||||
|
||||
e.g.
|
||||
vrl4 < zImage | dd of=/dev/sdX bs=512 count=17
|
||||
|
||||
A full copy of _the same_ zImage should be written to physical partition #1
|
||||
(boot) of the eSD at sector 0. This should _not_ be in vrl4 format.
|
||||
|
||||
vrl4 < zImage | dd of=/dev/sdX bs=512
|
||||
|
||||
Note: The commands above assume that the physical partition has been
|
||||
switched. No such facility currently exists in the Linux Kernel.
|
||||
|
||||
Physical partitions are described in the eSD specification. At the time of
|
||||
writing they are not the same as partitions that are typically configured
|
||||
using fdisk and visible through /proc/partitions
|
||||
@@ -0,0 +1,267 @@
|
||||
Kernel-provided User Helpers
|
||||
============================
|
||||
|
||||
These are segment of kernel provided user code reachable from user space
|
||||
at a fixed address in kernel memory. This is used to provide user space
|
||||
with some operations which require kernel help because of unimplemented
|
||||
native feature and/or instructions in many ARM CPUs. The idea is for this
|
||||
code to be executed directly in user mode for best efficiency but which is
|
||||
too intimate with the kernel counter part to be left to user libraries.
|
||||
In fact this code might even differ from one CPU to another depending on
|
||||
the available instruction set, or whether it is a SMP systems. In other
|
||||
words, the kernel reserves the right to change this code as needed without
|
||||
warning. Only the entry points and their results as documented here are
|
||||
guaranteed to be stable.
|
||||
|
||||
This is different from (but doesn't preclude) a full blown VDSO
|
||||
implementation, however a VDSO would prevent some assembly tricks with
|
||||
constants that allows for efficient branching to those code segments. And
|
||||
since those code segments only use a few cycles before returning to user
|
||||
code, the overhead of a VDSO indirect far call would add a measurable
|
||||
overhead to such minimalistic operations.
|
||||
|
||||
User space is expected to bypass those helpers and implement those things
|
||||
inline (either in the code emitted directly by the compiler, or part of
|
||||
the implementation of a library call) when optimizing for a recent enough
|
||||
processor that has the necessary native support, but only if resulting
|
||||
binaries are already to be incompatible with earlier ARM processors due to
|
||||
useage of similar native instructions for other things. In other words
|
||||
don't make binaries unable to run on earlier processors just for the sake
|
||||
of not using these kernel helpers if your compiled code is not going to
|
||||
use new instructions for other purpose.
|
||||
|
||||
New helpers may be added over time, so an older kernel may be missing some
|
||||
helpers present in a newer kernel. For this reason, programs must check
|
||||
the value of __kuser_helper_version (see below) before assuming that it is
|
||||
safe to call any particular helper. This check should ideally be
|
||||
performed only once at process startup time, and execution aborted early
|
||||
if the required helpers are not provided by the kernel version that
|
||||
process is running on.
|
||||
|
||||
kuser_helper_version
|
||||
--------------------
|
||||
|
||||
Location: 0xffff0ffc
|
||||
|
||||
Reference declaration:
|
||||
|
||||
extern int32_t __kuser_helper_version;
|
||||
|
||||
Definition:
|
||||
|
||||
This field contains the number of helpers being implemented by the
|
||||
running kernel. User space may read this to determine the availability
|
||||
of a particular helper.
|
||||
|
||||
Usage example:
|
||||
|
||||
#define __kuser_helper_version (*(int32_t *)0xffff0ffc)
|
||||
|
||||
void check_kuser_version(void)
|
||||
{
|
||||
if (__kuser_helper_version < 2) {
|
||||
fprintf(stderr, "can't do atomic operations, kernel too old\n");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
Notes:
|
||||
|
||||
User space may assume that the value of this field never changes
|
||||
during the lifetime of any single process. This means that this
|
||||
field can be read once during the initialisation of a library or
|
||||
startup phase of a program.
|
||||
|
||||
kuser_get_tls
|
||||
-------------
|
||||
|
||||
Location: 0xffff0fe0
|
||||
|
||||
Reference prototype:
|
||||
|
||||
void * __kuser_get_tls(void);
|
||||
|
||||
Input:
|
||||
|
||||
lr = return address
|
||||
|
||||
Output:
|
||||
|
||||
r0 = TLS value
|
||||
|
||||
Clobbered registers:
|
||||
|
||||
none
|
||||
|
||||
Definition:
|
||||
|
||||
Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
|
||||
|
||||
Usage example:
|
||||
|
||||
typedef void * (__kuser_get_tls_t)(void);
|
||||
#define __kuser_get_tls (*(__kuser_get_tls_t *)0xffff0fe0)
|
||||
|
||||
void foo()
|
||||
{
|
||||
void *tls = __kuser_get_tls();
|
||||
printf("TLS = %p\n", tls);
|
||||
}
|
||||
|
||||
Notes:
|
||||
|
||||
- Valid only if __kuser_helper_version >= 1 (from kernel version 2.6.12).
|
||||
|
||||
kuser_cmpxchg
|
||||
-------------
|
||||
|
||||
Location: 0xffff0fc0
|
||||
|
||||
Reference prototype:
|
||||
|
||||
int __kuser_cmpxchg(int32_t oldval, int32_t newval, volatile int32_t *ptr);
|
||||
|
||||
Input:
|
||||
|
||||
r0 = oldval
|
||||
r1 = newval
|
||||
r2 = ptr
|
||||
lr = return address
|
||||
|
||||
Output:
|
||||
|
||||
r0 = success code (zero or non-zero)
|
||||
C flag = set if r0 == 0, clear if r0 != 0
|
||||
|
||||
Clobbered registers:
|
||||
|
||||
r3, ip, flags
|
||||
|
||||
Definition:
|
||||
|
||||
Atomically store newval in *ptr only if *ptr is equal to oldval.
|
||||
Return zero if *ptr was changed or non-zero if no exchange happened.
|
||||
The C flag is also set if *ptr was changed to allow for assembly
|
||||
optimization in the calling code.
|
||||
|
||||
Usage example:
|
||||
|
||||
typedef int (__kuser_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
|
||||
#define __kuser_cmpxchg (*(__kuser_cmpxchg_t *)0xffff0fc0)
|
||||
|
||||
int atomic_add(volatile int *ptr, int val)
|
||||
{
|
||||
int old, new;
|
||||
|
||||
do {
|
||||
old = *ptr;
|
||||
new = old + val;
|
||||
} while(__kuser_cmpxchg(old, new, ptr));
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
Notes:
|
||||
|
||||
- This routine already includes memory barriers as needed.
|
||||
|
||||
- Valid only if __kuser_helper_version >= 2 (from kernel version 2.6.12).
|
||||
|
||||
kuser_memory_barrier
|
||||
--------------------
|
||||
|
||||
Location: 0xffff0fa0
|
||||
|
||||
Reference prototype:
|
||||
|
||||
void __kuser_memory_barrier(void);
|
||||
|
||||
Input:
|
||||
|
||||
lr = return address
|
||||
|
||||
Output:
|
||||
|
||||
none
|
||||
|
||||
Clobbered registers:
|
||||
|
||||
none
|
||||
|
||||
Definition:
|
||||
|
||||
Apply any needed memory barrier to preserve consistency with data modified
|
||||
manually and __kuser_cmpxchg usage.
|
||||
|
||||
Usage example:
|
||||
|
||||
typedef void (__kuser_dmb_t)(void);
|
||||
#define __kuser_dmb (*(__kuser_dmb_t *)0xffff0fa0)
|
||||
|
||||
Notes:
|
||||
|
||||
- Valid only if __kuser_helper_version >= 3 (from kernel version 2.6.15).
|
||||
|
||||
kuser_cmpxchg64
|
||||
---------------
|
||||
|
||||
Location: 0xffff0f60
|
||||
|
||||
Reference prototype:
|
||||
|
||||
int __kuser_cmpxchg64(const int64_t *oldval,
|
||||
const int64_t *newval,
|
||||
volatile int64_t *ptr);
|
||||
|
||||
Input:
|
||||
|
||||
r0 = pointer to oldval
|
||||
r1 = pointer to newval
|
||||
r2 = pointer to target value
|
||||
lr = return address
|
||||
|
||||
Output:
|
||||
|
||||
r0 = success code (zero or non-zero)
|
||||
C flag = set if r0 == 0, clear if r0 != 0
|
||||
|
||||
Clobbered registers:
|
||||
|
||||
r3, lr, flags
|
||||
|
||||
Definition:
|
||||
|
||||
Atomically store the 64-bit value pointed by *newval in *ptr only if *ptr
|
||||
is equal to the 64-bit value pointed by *oldval. Return zero if *ptr was
|
||||
changed or non-zero if no exchange happened.
|
||||
|
||||
The C flag is also set if *ptr was changed to allow for assembly
|
||||
optimization in the calling code.
|
||||
|
||||
Usage example:
|
||||
|
||||
typedef int (__kuser_cmpxchg64_t)(const int64_t *oldval,
|
||||
const int64_t *newval,
|
||||
volatile int64_t *ptr);
|
||||
#define __kuser_cmpxchg64 (*(__kuser_cmpxchg64_t *)0xffff0f60)
|
||||
|
||||
int64_t atomic_add64(volatile int64_t *ptr, int64_t val)
|
||||
{
|
||||
int64_t old, new;
|
||||
|
||||
do {
|
||||
old = *ptr;
|
||||
new = old + val;
|
||||
} while(__kuser_cmpxchg64(&old, &new, ptr));
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
Notes:
|
||||
|
||||
- This routine already includes memory barriers as needed.
|
||||
|
||||
- Due to the length of this sequence, this spans 2 conventional kuser
|
||||
"slots", therefore 0xffff0f80 is not used as a valid entry point.
|
||||
|
||||
- Valid only if __kuser_helper_version >= 5 (from kernel version 3.1).
|
||||
@@ -0,0 +1,21 @@
|
||||
* ARM Performance Monitor Units
|
||||
|
||||
ARM cores often have a PMU for counting cpu and cache events like cache misses
|
||||
and hits. The interface to the PMU is part of the ARM ARM. The ARM PMU
|
||||
representation in the device tree should be done as under:-
|
||||
|
||||
Required properties:
|
||||
|
||||
- compatible : should be one of
|
||||
"arm,cortex-a9-pmu"
|
||||
"arm,cortex-a8-pmu"
|
||||
"arm,arm1176-pmu"
|
||||
"arm,arm1136-pmu"
|
||||
- interrupts : 1 combined interrupt or 1 per core.
|
||||
|
||||
Example:
|
||||
|
||||
pmu {
|
||||
compatible = "arm,cortex-a9-pmu";
|
||||
interrupts = <100 101>;
|
||||
};
|
||||
+29
-10
@@ -10,7 +10,7 @@ config ARM
|
||||
select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI)
|
||||
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
|
||||
select HAVE_ARCH_KGDB
|
||||
select HAVE_KPROBES if (!XIP_KERNEL && !THUMB2_KERNEL)
|
||||
select HAVE_KPROBES if !XIP_KERNEL
|
||||
select HAVE_KRETPROBES if (HAVE_KPROBES)
|
||||
select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
|
||||
select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
|
||||
@@ -37,6 +37,9 @@ config ARM
|
||||
Europe. There is an ARM Linux project with a web page at
|
||||
<http://www.arm.linux.org.uk/>.
|
||||
|
||||
config ARM_HAS_SG_CHAIN
|
||||
bool
|
||||
|
||||
config HAVE_PWM
|
||||
bool
|
||||
|
||||
@@ -1347,7 +1350,6 @@ config SMP_ON_UP
|
||||
|
||||
config HAVE_ARM_SCU
|
||||
bool
|
||||
depends on SMP
|
||||
help
|
||||
This option enables support for the ARM system coherency unit
|
||||
|
||||
@@ -1716,17 +1718,34 @@ config ZBOOT_ROM
|
||||
Say Y here if you intend to execute your compressed kernel image
|
||||
(zImage) directly from ROM or flash. If unsure, say N.
|
||||
|
||||
choice
|
||||
prompt "Include SD/MMC loader in zImage (EXPERIMENTAL)"
|
||||
depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL
|
||||
default ZBOOT_ROM_NONE
|
||||
help
|
||||
Include experimental SD/MMC loading code in the ROM-able zImage.
|
||||
With this enabled it is possible to write the the ROM-able zImage
|
||||
kernel image to an MMC or SD card and boot the kernel straight
|
||||
from the reset vector. At reset the processor Mask ROM will load
|
||||
the first part of the the ROM-able zImage which in turn loads the
|
||||
rest the kernel image to RAM.
|
||||
|
||||
config ZBOOT_ROM_NONE
|
||||
bool "No SD/MMC loader in zImage (EXPERIMENTAL)"
|
||||
help
|
||||
Do not load image from SD or MMC
|
||||
|
||||
config ZBOOT_ROM_MMCIF
|
||||
bool "Include MMCIF loader in zImage (EXPERIMENTAL)"
|
||||
depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL
|
||||
help
|
||||
Say Y here to include experimental MMCIF loading code in the
|
||||
ROM-able zImage. With this enabled it is possible to write the
|
||||
the ROM-able zImage kernel image to an MMC card and boot the
|
||||
kernel straight from the reset vector. At reset the processor
|
||||
Mask ROM will load the first part of the the ROM-able zImage
|
||||
which in turn loads the rest the kernel image to RAM using the
|
||||
MMCIF hardware block.
|
||||
Load image from MMCIF hardware block.
|
||||
|
||||
config ZBOOT_ROM_SH_MOBILE_SDHI
|
||||
bool "Include SuperH Mobile SDHI loader in zImage (EXPERIMENTAL)"
|
||||
help
|
||||
Load image from SDHI hardware block
|
||||
|
||||
endchoice
|
||||
|
||||
config CMDLINE
|
||||
string "Default kernel command string"
|
||||
|
||||
@@ -6,13 +6,19 @@
|
||||
|
||||
OBJS =
|
||||
|
||||
# Ensure that mmcif loader code appears early in the image
|
||||
# Ensure that MMCIF loader code appears early in the image
|
||||
# to minimise that number of bocks that have to be read in
|
||||
# order to load it.
|
||||
ifeq ($(CONFIG_ZBOOT_ROM_MMCIF),y)
|
||||
ifeq ($(CONFIG_ARCH_SH7372),y)
|
||||
OBJS += mmcif-sh7372.o
|
||||
endif
|
||||
|
||||
# Ensure that SDHI loader code appears early in the image
|
||||
# to minimise that number of bocks that have to be read in
|
||||
# order to load it.
|
||||
ifeq ($(CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI),y)
|
||||
OBJS += sdhi-shmobile.o
|
||||
OBJS += sdhi-sh7372.o
|
||||
endif
|
||||
|
||||
AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET)
|
||||
|
||||
@@ -25,14 +25,14 @@
|
||||
/* load board-specific initialization code */
|
||||
#include <mach/zboot.h>
|
||||
|
||||
#ifdef CONFIG_ZBOOT_ROM_MMCIF
|
||||
/* Load image from MMC */
|
||||
adr sp, __tmp_stack + 128
|
||||
#if defined(CONFIG_ZBOOT_ROM_MMCIF) || defined(CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI)
|
||||
/* Load image from MMC/SD */
|
||||
adr sp, __tmp_stack + 256
|
||||
ldr r0, __image_start
|
||||
ldr r1, __image_end
|
||||
subs r1, r1, r0
|
||||
ldr r0, __load_base
|
||||
bl mmcif_loader
|
||||
bl mmc_loader
|
||||
|
||||
/* Jump to loaded code */
|
||||
ldr r0, __loaded
|
||||
@@ -51,9 +51,9 @@ __loaded:
|
||||
.long __continue
|
||||
.align
|
||||
__tmp_stack:
|
||||
.space 128
|
||||
.space 256
|
||||
__continue:
|
||||
#endif /* CONFIG_ZBOOT_ROM_MMCIF */
|
||||
#endif /* CONFIG_ZBOOT_ROM_MMC || CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI */
|
||||
|
||||
b 1f
|
||||
__atags:@ tag #1
|
||||
|
||||
@@ -353,7 +353,8 @@ not_relocated: mov r0, #0
|
||||
mov r0, #0 @ must be zero
|
||||
mov r1, r7 @ restore architecture number
|
||||
mov r2, r8 @ restore atags pointer
|
||||
mov pc, r4 @ call kernel
|
||||
ARM( mov pc, r4 ) @ call kernel
|
||||
THUMB( bx r4 ) @ entry point is always ARM
|
||||
|
||||
.align 2
|
||||
.type LC0, #object
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
* to an MMC card
|
||||
* # dd if=vrl4.out of=/dev/sdx bs=512 seek=1
|
||||
*/
|
||||
asmlinkage void mmcif_loader(unsigned char *buf, unsigned long len)
|
||||
asmlinkage void mmc_loader(unsigned char *buf, unsigned long len)
|
||||
{
|
||||
mmc_init_progress();
|
||||
mmc_update_progress(MMC_PROGRESS_ENTER);
|
||||
|
||||
@@ -0,0 +1,95 @@
|
||||
/*
|
||||
* SuperH Mobile SDHI
|
||||
*
|
||||
* Copyright (C) 2010 Magnus Damm
|
||||
* Copyright (C) 2010 Kuninori Morimoto
|
||||
* Copyright (C) 2010 Simon Horman
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Parts inspired by u-boot
|
||||
*/
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <mach/mmc.h>
|
||||
#include <linux/mmc/boot.h>
|
||||
#include <linux/mmc/tmio.h>
|
||||
|
||||
#include "sdhi-shmobile.h"
|
||||
|
||||
#define PORT179CR 0xe60520b3
|
||||
#define PORT180CR 0xe60520b4
|
||||
#define PORT181CR 0xe60520b5
|
||||
#define PORT182CR 0xe60520b6
|
||||
#define PORT183CR 0xe60520b7
|
||||
#define PORT184CR 0xe60520b8
|
||||
|
||||
#define SMSTPCR3 0xe615013c
|
||||
|
||||
#define CR_INPUT_ENABLE 0x10
|
||||
#define CR_FUNCTION1 0x01
|
||||
|
||||
#define SDHI1_BASE (void __iomem *)0xe6860000
|
||||
#define SDHI_BASE SDHI1_BASE
|
||||
|
||||
/* SuperH Mobile SDHI loader
|
||||
*
|
||||
* loads the zImage from an SD card starting from block 0
|
||||
* on physical partition 1
|
||||
*
|
||||
* The image must be start with a vrl4 header and
|
||||
* the zImage must start at offset 512 of the image. That is,
|
||||
* at block 1 (=byte 512) of physical partition 1
|
||||
*
|
||||
* Use the following line to write the vrl4 formated zImage
|
||||
* to an SD card
|
||||
* # dd if=vrl4.out of=/dev/sdx bs=512
|
||||
*/
|
||||
asmlinkage void mmc_loader(unsigned short *buf, unsigned long len)
|
||||
{
|
||||
int high_capacity;
|
||||
|
||||
mmc_init_progress();
|
||||
|
||||
mmc_update_progress(MMC_PROGRESS_ENTER);
|
||||
/* Initialise SDHI1 */
|
||||
/* PORT184CR: GPIO_FN_SDHICMD1 Control */
|
||||
__raw_writeb(CR_FUNCTION1, PORT184CR);
|
||||
/* PORT179CR: GPIO_FN_SDHICLK1 Control */
|
||||
__raw_writeb(CR_INPUT_ENABLE|CR_FUNCTION1, PORT179CR);
|
||||
/* PORT181CR: GPIO_FN_SDHID1_3 Control */
|
||||
__raw_writeb(CR_FUNCTION1, PORT183CR);
|
||||
/* PORT182CR: GPIO_FN_SDHID1_2 Control */
|
||||
__raw_writeb(CR_FUNCTION1, PORT182CR);
|
||||
/* PORT183CR: GPIO_FN_SDHID1_1 Control */
|
||||
__raw_writeb(CR_FUNCTION1, PORT181CR);
|
||||
/* PORT180CR: GPIO_FN_SDHID1_0 Control */
|
||||
__raw_writeb(CR_FUNCTION1, PORT180CR);
|
||||
|
||||
/* Enable clock to SDHI1 hardware block */
|
||||
__raw_writel(__raw_readl(SMSTPCR3) & ~(1 << 13), SMSTPCR3);
|
||||
|
||||
/* setup SDHI hardware */
|
||||
mmc_update_progress(MMC_PROGRESS_INIT);
|
||||
high_capacity = sdhi_boot_init(SDHI_BASE);
|
||||
if (high_capacity < 0)
|
||||
goto err;
|
||||
|
||||
mmc_update_progress(MMC_PROGRESS_LOAD);
|
||||
/* load kernel */
|
||||
if (sdhi_boot_do_read(SDHI_BASE, high_capacity,
|
||||
0, /* Kernel is at block 1 */
|
||||
(len + TMIO_BBS - 1) / TMIO_BBS, buf))
|
||||
goto err;
|
||||
|
||||
/* Disable clock to SDHI1 hardware block */
|
||||
__raw_writel(__raw_readl(SMSTPCR3) & (1 << 13), SMSTPCR3);
|
||||
|
||||
mmc_update_progress(MMC_PROGRESS_DONE);
|
||||
|
||||
return;
|
||||
err:
|
||||
for(;;);
|
||||
}
|
||||
@@ -0,0 +1,449 @@
|
||||
/*
|
||||
* SuperH Mobile SDHI
|
||||
*
|
||||
* Copyright (C) 2010 Magnus Damm
|
||||
* Copyright (C) 2010 Kuninori Morimoto
|
||||
* Copyright (C) 2010 Simon Horman
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Parts inspired by u-boot
|
||||
*/
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/mmc/host.h>
|
||||
#include <linux/mmc/core.h>
|
||||
#include <linux/mmc/mmc.h>
|
||||
#include <linux/mmc/sd.h>
|
||||
#include <linux/mmc/tmio.h>
|
||||
#include <mach/sdhi.h>
|
||||
|
||||
#define OCR_FASTBOOT (1<<29)
|
||||
#define OCR_HCS (1<<30)
|
||||
#define OCR_BUSY (1<<31)
|
||||
|
||||
#define RESP_CMD12 0x00000030
|
||||
|
||||
static inline u16 sd_ctrl_read16(void __iomem *base, int addr)
|
||||
{
|
||||
return __raw_readw(base + addr);
|
||||
}
|
||||
|
||||
static inline u32 sd_ctrl_read32(void __iomem *base, int addr)
|
||||
{
|
||||
return __raw_readw(base + addr) |
|
||||
__raw_readw(base + addr + 2) << 16;
|
||||
}
|
||||
|
||||
static inline void sd_ctrl_write16(void __iomem *base, int addr, u16 val)
|
||||
{
|
||||
__raw_writew(val, base + addr);
|
||||
}
|
||||
|
||||
static inline void sd_ctrl_write32(void __iomem *base, int addr, u32 val)
|
||||
{
|
||||
__raw_writew(val, base + addr);
|
||||
__raw_writew(val >> 16, base + addr + 2);
|
||||
}
|
||||
|
||||
#define ALL_ERROR (TMIO_STAT_CMD_IDX_ERR | TMIO_STAT_CRCFAIL | \
|
||||
TMIO_STAT_STOPBIT_ERR | TMIO_STAT_DATATIMEOUT | \
|
||||
TMIO_STAT_RXOVERFLOW | TMIO_STAT_TXUNDERRUN | \
|
||||
TMIO_STAT_CMDTIMEOUT | TMIO_STAT_ILL_ACCESS | \
|
||||
TMIO_STAT_ILL_FUNC)
|
||||
|
||||
static int sdhi_intr(void __iomem *base)
|
||||
{
|
||||
unsigned long state = sd_ctrl_read32(base, CTL_STATUS);
|
||||
|
||||
if (state & ALL_ERROR) {
|
||||
sd_ctrl_write32(base, CTL_STATUS, ~ALL_ERROR);
|
||||
sd_ctrl_write32(base, CTL_IRQ_MASK,
|
||||
ALL_ERROR |
|
||||
sd_ctrl_read32(base, CTL_IRQ_MASK));
|
||||
return -EINVAL;
|
||||
}
|
||||
if (state & TMIO_STAT_CMDRESPEND) {
|
||||
sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_CMDRESPEND);
|
||||
sd_ctrl_write32(base, CTL_IRQ_MASK,
|
||||
TMIO_STAT_CMDRESPEND |
|
||||
sd_ctrl_read32(base, CTL_IRQ_MASK));
|
||||
return 0;
|
||||
}
|
||||
if (state & TMIO_STAT_RXRDY) {
|
||||
sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_RXRDY);
|
||||
sd_ctrl_write32(base, CTL_IRQ_MASK,
|
||||
TMIO_STAT_RXRDY | TMIO_STAT_TXUNDERRUN |
|
||||
sd_ctrl_read32(base, CTL_IRQ_MASK));
|
||||
return 0;
|
||||
}
|
||||
if (state & TMIO_STAT_DATAEND) {
|
||||
sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_DATAEND);
|
||||
sd_ctrl_write32(base, CTL_IRQ_MASK,
|
||||
TMIO_STAT_DATAEND |
|
||||
sd_ctrl_read32(base, CTL_IRQ_MASK));
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
static int sdhi_boot_wait_resp_end(void __iomem *base)
|
||||
{
|
||||
int err = -EAGAIN, timeout = 10000000;
|
||||
|
||||
while (timeout--) {
|
||||
err = sdhi_intr(base);
|
||||
if (err != -EAGAIN)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/* SDHI_CLK_CTRL */
|
||||
#define CLK_MMC_ENABLE (1 << 8)
|
||||
#define CLK_MMC_INIT (1 << 6) /* clk / 256 */
|
||||
|
||||
static void sdhi_boot_mmc_clk_stop(void __iomem *base)
|
||||
{
|
||||
sd_ctrl_write16(base, CTL_CLK_AND_WAIT_CTL, 0x0000);
|
||||
msleep(10);
|
||||
sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL, ~CLK_MMC_ENABLE &
|
||||
sd_ctrl_read16(base, CTL_SD_CARD_CLK_CTL));
|
||||
msleep(10);
|
||||
}
|
||||
|
||||
static void sdhi_boot_mmc_clk_start(void __iomem *base)
|
||||
{
|
||||
sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL, CLK_MMC_ENABLE |
|
||||
sd_ctrl_read16(base, CTL_SD_CARD_CLK_CTL));
|
||||
msleep(10);
|
||||
sd_ctrl_write16(base, CTL_CLK_AND_WAIT_CTL, CLK_MMC_ENABLE);
|
||||
msleep(10);
|
||||
}
|
||||
|
||||
static void sdhi_boot_reset(void __iomem *base)
|
||||
{
|
||||
sd_ctrl_write16(base, CTL_RESET_SD, 0x0000);
|
||||
msleep(10);
|
||||
sd_ctrl_write16(base, CTL_RESET_SD, 0x0001);
|
||||
msleep(10);
|
||||
}
|
||||
|
||||
/* Set MMC clock / power.
|
||||
* Note: This controller uses a simple divider scheme therefore it cannot
|
||||
* run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
|
||||
* MMC wont run that fast, it has to be clocked at 12MHz which is the next
|
||||
* slowest setting.
|
||||
*/
|
||||
static int sdhi_boot_mmc_set_ios(void __iomem *base, struct mmc_ios *ios)
|
||||
{
|
||||
if (sd_ctrl_read32(base, CTL_STATUS) & TMIO_STAT_CMD_BUSY)
|
||||
return -EBUSY;
|
||||
|
||||
if (ios->clock)
|
||||
sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL,
|
||||
ios->clock | CLK_MMC_ENABLE);
|
||||
|
||||
/* Power sequence - OFF -> ON -> UP */
|
||||
switch (ios->power_mode) {
|
||||
case MMC_POWER_OFF: /* power down SD bus */
|
||||
sdhi_boot_mmc_clk_stop(base);
|
||||
break;
|
||||
case MMC_POWER_ON: /* power up SD bus */
|
||||
break;
|
||||
case MMC_POWER_UP: /* start bus clock */
|
||||
sdhi_boot_mmc_clk_start(base);
|
||||
break;
|
||||
}
|
||||
|
||||
switch (ios->bus_width) {
|
||||
case MMC_BUS_WIDTH_1:
|
||||
sd_ctrl_write16(base, CTL_SD_MEM_CARD_OPT, 0x80e0);
|
||||
break;
|
||||
case MMC_BUS_WIDTH_4:
|
||||
sd_ctrl_write16(base, CTL_SD_MEM_CARD_OPT, 0x00e0);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Let things settle. delay taken from winCE driver */
|
||||
udelay(140);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* These are the bitmasks the tmio chip requires to implement the MMC response
|
||||
* types. Note that R1 and R6 are the same in this scheme. */
|
||||
#define RESP_NONE 0x0300
|
||||
#define RESP_R1 0x0400
|
||||
#define RESP_R1B 0x0500
|
||||
#define RESP_R2 0x0600
|
||||
#define RESP_R3 0x0700
|
||||
#define DATA_PRESENT 0x0800
|
||||
#define TRANSFER_READ 0x1000
|
||||
|
||||
static int sdhi_boot_request(void __iomem *base, struct mmc_command *cmd)
|
||||
{
|
||||
int err, c = cmd->opcode;
|
||||
|
||||
switch (mmc_resp_type(cmd)) {
|
||||
case MMC_RSP_NONE: c |= RESP_NONE; break;
|
||||
case MMC_RSP_R1: c |= RESP_R1; break;
|
||||
case MMC_RSP_R1B: c |= RESP_R1B; break;
|
||||
case MMC_RSP_R2: c |= RESP_R2; break;
|
||||
case MMC_RSP_R3: c |= RESP_R3; break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* No interrupts so this may not be cleared */
|
||||
sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_CMDRESPEND);
|
||||
|
||||
sd_ctrl_write32(base, CTL_IRQ_MASK, TMIO_STAT_CMDRESPEND |
|
||||
sd_ctrl_read32(base, CTL_IRQ_MASK));
|
||||
sd_ctrl_write32(base, CTL_ARG_REG, cmd->arg);
|
||||
sd_ctrl_write16(base, CTL_SD_CMD, c);
|
||||
|
||||
|
||||
sd_ctrl_write32(base, CTL_IRQ_MASK,
|
||||
~(TMIO_STAT_CMDRESPEND | ALL_ERROR) &
|
||||
sd_ctrl_read32(base, CTL_IRQ_MASK));
|
||||
|
||||
err = sdhi_boot_wait_resp_end(base);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
cmd->resp[0] = sd_ctrl_read32(base, CTL_RESPONSE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sdhi_boot_do_read_single(void __iomem *base, int high_capacity,
|
||||
unsigned long block, unsigned short *buf)
|
||||
{
|
||||
int err, i;
|
||||
|
||||
/* CMD17 - Read */
|
||||
{
|
||||
struct mmc_command cmd;
|
||||
|
||||
cmd.opcode = MMC_READ_SINGLE_BLOCK | \
|
||||
TRANSFER_READ | DATA_PRESENT;
|
||||
if (high_capacity)
|
||||
cmd.arg = block;
|
||||
else
|
||||
cmd.arg = block * TMIO_BBS;
|
||||
cmd.flags = MMC_RSP_R1;
|
||||
err = sdhi_boot_request(base, &cmd);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
sd_ctrl_write32(base, CTL_IRQ_MASK,
|
||||
~(TMIO_STAT_DATAEND | TMIO_STAT_RXRDY |
|
||||
TMIO_STAT_TXUNDERRUN) &
|
||||
sd_ctrl_read32(base, CTL_IRQ_MASK));
|
||||
err = sdhi_boot_wait_resp_end(base);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
sd_ctrl_write16(base, CTL_SD_XFER_LEN, TMIO_BBS);
|
||||
for (i = 0; i < TMIO_BBS / sizeof(*buf); i++)
|
||||
*buf++ = sd_ctrl_read16(base, RESP_CMD12);
|
||||
|
||||
err = sdhi_boot_wait_resp_end(base);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sdhi_boot_do_read(void __iomem *base, int high_capacity,
|
||||
unsigned long offset, unsigned short count,
|
||||
unsigned short *buf)
|
||||
{
|
||||
unsigned long i;
|
||||
int err = 0;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
err = sdhi_boot_do_read_single(base, high_capacity, offset + i,
|
||||
buf + (i * TMIO_BBS /
|
||||
sizeof(*buf)));
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define VOLTAGES (MMC_VDD_32_33 | MMC_VDD_33_34)
|
||||
|
||||
int sdhi_boot_init(void __iomem *base)
|
||||
{
|
||||
bool sd_v2 = false, sd_v1_0 = false;
|
||||
unsigned short cid;
|
||||
int err, high_capacity = 0;
|
||||
|
||||
sdhi_boot_mmc_clk_stop(base);
|
||||
sdhi_boot_reset(base);
|
||||
|
||||
/* mmc0: clock 400000Hz busmode 1 powermode 2 cs 0 Vdd 21 width 0 timing 0 */
|
||||
{
|
||||
struct mmc_ios ios;
|
||||
ios.power_mode = MMC_POWER_ON;
|
||||
ios.bus_width = MMC_BUS_WIDTH_1;
|
||||
ios.clock = CLK_MMC_INIT;
|
||||
err = sdhi_boot_mmc_set_ios(base, &ios);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
/* CMD0 */
|
||||
{
|
||||
struct mmc_command cmd;
|
||||
msleep(1);
|
||||
cmd.opcode = MMC_GO_IDLE_STATE;
|
||||
cmd.arg = 0;
|
||||
cmd.flags = MMC_RSP_NONE;
|
||||
err = sdhi_boot_request(base, &cmd);
|
||||
if (err)
|
||||
return err;
|
||||
msleep(2);
|
||||
}
|
||||
|
||||
/* CMD8 - Test for SD version 2 */
|
||||
{
|
||||
struct mmc_command cmd;
|
||||
cmd.opcode = SD_SEND_IF_COND;
|
||||
cmd.arg = (VOLTAGES != 0) << 8 | 0xaa;
|
||||
cmd.flags = MMC_RSP_R1;
|
||||
err = sdhi_boot_request(base, &cmd); /* Ignore error */
|
||||
if ((cmd.resp[0] & 0xff) == 0xaa)
|
||||
sd_v2 = true;
|
||||
}
|
||||
|
||||
/* CMD55 - Get OCR (SD) */
|
||||
{
|
||||
int timeout = 1000;
|
||||
struct mmc_command cmd;
|
||||
|
||||
cmd.arg = 0;
|
||||
|
||||
do {
|
||||
cmd.opcode = MMC_APP_CMD;
|
||||
cmd.flags = MMC_RSP_R1;
|
||||
cmd.arg = 0;
|
||||
err = sdhi_boot_request(base, &cmd);
|
||||
if (err)
|
||||
break;
|
||||
|
||||
cmd.opcode = SD_APP_OP_COND;
|
||||
cmd.flags = MMC_RSP_R3;
|
||||
cmd.arg = (VOLTAGES & 0xff8000);
|
||||
if (sd_v2)
|
||||
cmd.arg |= OCR_HCS;
|
||||
cmd.arg |= OCR_FASTBOOT;
|
||||
err = sdhi_boot_request(base, &cmd);
|
||||
if (err)
|
||||
break;
|
||||
|
||||
msleep(1);
|
||||
} while((!(cmd.resp[0] & OCR_BUSY)) && --timeout);
|
||||
|
||||
if (!err && timeout) {
|
||||
if (!sd_v2)
|
||||
sd_v1_0 = true;
|
||||
high_capacity = (cmd.resp[0] & OCR_HCS) == OCR_HCS;
|
||||
}
|
||||
}
|
||||
|
||||
/* CMD1 - Get OCR (MMC) */
|
||||
if (!sd_v2 && !sd_v1_0) {
|
||||
int timeout = 1000;
|
||||
struct mmc_command cmd;
|
||||
|
||||
do {
|
||||
cmd.opcode = MMC_SEND_OP_COND;
|
||||
cmd.arg = VOLTAGES | OCR_HCS;
|
||||
cmd.flags = MMC_RSP_R3;
|
||||
err = sdhi_boot_request(base, &cmd);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
msleep(1);
|
||||
} while((!(cmd.resp[0] & OCR_BUSY)) && --timeout);
|
||||
|
||||
if (!timeout)
|
||||
return -EAGAIN;
|
||||
|
||||
high_capacity = (cmd.resp[0] & OCR_HCS) == OCR_HCS;
|
||||
}
|
||||
|
||||
/* CMD2 - Get CID */
|
||||
{
|
||||
struct mmc_command cmd;
|
||||
cmd.opcode = MMC_ALL_SEND_CID;
|
||||
cmd.arg = 0;
|
||||
cmd.flags = MMC_RSP_R2;
|
||||
err = sdhi_boot_request(base, &cmd);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
/* CMD3
|
||||
* MMC: Set the relative address
|
||||
* SD: Get the relative address
|
||||
* Also puts the card into the standby state
|
||||
*/
|
||||
{
|
||||
struct mmc_command cmd;
|
||||
cmd.opcode = MMC_SET_RELATIVE_ADDR;
|
||||
cmd.arg = 0;
|
||||
cmd.flags = MMC_RSP_R1;
|
||||
err = sdhi_boot_request(base, &cmd);
|
||||
if (err)
|
||||
return err;
|
||||
cid = cmd.resp[0] >> 16;
|
||||
}
|
||||
|
||||
/* CMD9 - Get CSD */
|
||||
{
|
||||
struct mmc_command cmd;
|
||||
cmd.opcode = MMC_SEND_CSD;
|
||||
cmd.arg = cid << 16;
|
||||
cmd.flags = MMC_RSP_R2;
|
||||
err = sdhi_boot_request(base, &cmd);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
/* CMD7 - Select the card */
|
||||
{
|
||||
struct mmc_command cmd;
|
||||
cmd.opcode = MMC_SELECT_CARD;
|
||||
//cmd.arg = rca << 16;
|
||||
cmd.arg = cid << 16;
|
||||
//cmd.flags = MMC_RSP_R1B;
|
||||
cmd.flags = MMC_RSP_R1;
|
||||
err = sdhi_boot_request(base, &cmd);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
/* CMD16 - Set the block size */
|
||||
{
|
||||
struct mmc_command cmd;
|
||||
cmd.opcode = MMC_SET_BLOCKLEN;
|
||||
cmd.arg = TMIO_BBS;
|
||||
cmd.flags = MMC_RSP_R1;
|
||||
err = sdhi_boot_request(base, &cmd);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return high_capacity;
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
#ifndef SDHI_MOBILE_H
|
||||
#define SDHI_MOBILE_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
|
||||
int sdhi_boot_do_read(void __iomem *base, int high_capacity,
|
||||
unsigned long offset, unsigned short count,
|
||||
unsigned short *buf);
|
||||
int sdhi_boot_init(void __iomem *base);
|
||||
|
||||
#endif
|
||||
@@ -33,20 +33,24 @@ SECTIONS
|
||||
*(.text.*)
|
||||
*(.fixup)
|
||||
*(.gnu.warning)
|
||||
*(.glue_7t)
|
||||
*(.glue_7)
|
||||
}
|
||||
.rodata : {
|
||||
*(.rodata)
|
||||
*(.rodata.*)
|
||||
*(.glue_7)
|
||||
*(.glue_7t)
|
||||
}
|
||||
.piggydata : {
|
||||
*(.piggydata)
|
||||
. = ALIGN(4);
|
||||
}
|
||||
|
||||
. = ALIGN(4);
|
||||
_etext = .;
|
||||
|
||||
.got.plt : { *(.got.plt) }
|
||||
_got_start = .;
|
||||
.got : { *(.got) }
|
||||
_got_end = .;
|
||||
.got.plt : { *(.got.plt) }
|
||||
_edata = .;
|
||||
|
||||
. = BSS_START;
|
||||
|
||||
+88
-115
@@ -79,6 +79,8 @@ struct dmabounce_device_info {
|
||||
struct dmabounce_pool large;
|
||||
|
||||
rwlock_t lock;
|
||||
|
||||
int (*needs_bounce)(struct device *, dma_addr_t, size_t);
|
||||
};
|
||||
|
||||
#ifdef STATS
|
||||
@@ -210,114 +212,91 @@ static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
|
||||
if (!dev || !dev->archdata.dmabounce)
|
||||
return NULL;
|
||||
if (dma_mapping_error(dev, dma_addr)) {
|
||||
if (dev)
|
||||
dev_err(dev, "Trying to %s invalid mapping\n", where);
|
||||
else
|
||||
pr_err("unknown device: Trying to %s invalid mapping\n", where);
|
||||
dev_err(dev, "Trying to %s invalid mapping\n", where);
|
||||
return NULL;
|
||||
}
|
||||
return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
|
||||
}
|
||||
|
||||
static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
|
||||
{
|
||||
if (!dev || !dev->archdata.dmabounce)
|
||||
return 0;
|
||||
|
||||
if (dev->dma_mask) {
|
||||
unsigned long limit, mask = *dev->dma_mask;
|
||||
|
||||
limit = (mask + 1) & ~mask;
|
||||
if (limit && size > limit) {
|
||||
dev_err(dev, "DMA mapping too big (requested %#x "
|
||||
"mask %#Lx)\n", size, *dev->dma_mask);
|
||||
return -E2BIG;
|
||||
}
|
||||
|
||||
/* Figure out if we need to bounce from the DMA mask. */
|
||||
if ((dma_addr | (dma_addr + size - 1)) & ~mask)
|
||||
return 1;
|
||||
}
|
||||
|
||||
return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size);
|
||||
}
|
||||
|
||||
static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
|
||||
dma_addr_t dma_addr;
|
||||
int needs_bounce = 0;
|
||||
struct safe_buffer *buf;
|
||||
|
||||
if (device_info)
|
||||
DO_STATS ( device_info->map_op_count++ );
|
||||
|
||||
dma_addr = virt_to_dma(dev, ptr);
|
||||
|
||||
if (dev->dma_mask) {
|
||||
unsigned long mask = *dev->dma_mask;
|
||||
unsigned long limit;
|
||||
|
||||
limit = (mask + 1) & ~mask;
|
||||
if (limit && size > limit) {
|
||||
dev_err(dev, "DMA mapping too big (requested %#x "
|
||||
"mask %#Lx)\n", size, *dev->dma_mask);
|
||||
return ~0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Figure out if we need to bounce from the DMA mask.
|
||||
*/
|
||||
needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
|
||||
buf = alloc_safe_buffer(device_info, ptr, size, dir);
|
||||
if (buf == NULL) {
|
||||
dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
|
||||
__func__, ptr);
|
||||
return ~0;
|
||||
}
|
||||
|
||||
if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
|
||||
struct safe_buffer *buf;
|
||||
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
|
||||
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
|
||||
buf->safe, buf->safe_dma_addr);
|
||||
|
||||
buf = alloc_safe_buffer(device_info, ptr, size, dir);
|
||||
if (buf == 0) {
|
||||
dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
|
||||
__func__, ptr);
|
||||
return ~0;
|
||||
}
|
||||
|
||||
dev_dbg(dev,
|
||||
"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
|
||||
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
|
||||
buf->safe, buf->safe_dma_addr);
|
||||
|
||||
if ((dir == DMA_TO_DEVICE) ||
|
||||
(dir == DMA_BIDIRECTIONAL)) {
|
||||
dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
|
||||
__func__, ptr, buf->safe, size);
|
||||
memcpy(buf->safe, ptr, size);
|
||||
}
|
||||
ptr = buf->safe;
|
||||
|
||||
dma_addr = buf->safe_dma_addr;
|
||||
} else {
|
||||
/*
|
||||
* We don't need to sync the DMA buffer since
|
||||
* it was allocated via the coherent allocators.
|
||||
*/
|
||||
__dma_single_cpu_to_dev(ptr, size, dir);
|
||||
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
|
||||
dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
|
||||
__func__, ptr, buf->safe, size);
|
||||
memcpy(buf->safe, ptr, size);
|
||||
}
|
||||
|
||||
return dma_addr;
|
||||
return buf->safe_dma_addr;
|
||||
}
|
||||
|
||||
static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
|
||||
static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");
|
||||
BUG_ON(buf->size != size);
|
||||
BUG_ON(buf->direction != dir);
|
||||
|
||||
if (buf) {
|
||||
BUG_ON(buf->size != size);
|
||||
BUG_ON(buf->direction != dir);
|
||||
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
|
||||
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
|
||||
buf->safe, buf->safe_dma_addr);
|
||||
|
||||
dev_dbg(dev,
|
||||
"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
|
||||
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
|
||||
buf->safe, buf->safe_dma_addr);
|
||||
DO_STATS(dev->archdata.dmabounce->bounce_count++);
|
||||
|
||||
DO_STATS(dev->archdata.dmabounce->bounce_count++);
|
||||
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
|
||||
void *ptr = buf->ptr;
|
||||
|
||||
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
|
||||
void *ptr = buf->ptr;
|
||||
dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
|
||||
__func__, buf->safe, ptr, size);
|
||||
memcpy(ptr, buf->safe, size);
|
||||
|
||||
dev_dbg(dev,
|
||||
"%s: copy back safe %p to unsafe %p size %d\n",
|
||||
__func__, buf->safe, ptr, size);
|
||||
memcpy(ptr, buf->safe, size);
|
||||
|
||||
/*
|
||||
* Since we may have written to a page cache page,
|
||||
* we need to ensure that the data will be coherent
|
||||
* with user mappings.
|
||||
*/
|
||||
__cpuc_flush_dcache_area(ptr, size);
|
||||
}
|
||||
free_safe_buffer(dev->archdata.dmabounce, buf);
|
||||
} else {
|
||||
__dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
|
||||
/*
|
||||
* Since we may have written to a page cache page,
|
||||
* we need to ensure that the data will be coherent
|
||||
* with user mappings.
|
||||
*/
|
||||
__cpuc_flush_dcache_area(ptr, size);
|
||||
}
|
||||
free_safe_buffer(dev->archdata.dmabounce, buf);
|
||||
}
|
||||
|
||||
/* ************************************************** */
|
||||
@@ -328,45 +307,28 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
|
||||
* substitute the safe buffer for the unsafe one.
|
||||
* (basically move the buffer from an unsafe area to a safe one)
|
||||
*/
|
||||
dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
|
||||
__func__, ptr, size, dir);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
return map_single(dev, ptr, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(__dma_map_single);
|
||||
|
||||
/*
|
||||
* see if a mapped address was really a "safe" buffer and if so, copy
|
||||
* the data from the safe buffer back to the unsafe buffer and free up
|
||||
* the safe buffer. (basically return things back to the way they
|
||||
* should be)
|
||||
*/
|
||||
void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
|
||||
__func__, (void *) dma_addr, size, dir);
|
||||
|
||||
unmap_single(dev, dma_addr, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(__dma_unmap_single);
|
||||
|
||||
dma_addr_t __dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dma_addr_t dma_addr;
|
||||
int ret;
|
||||
|
||||
dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
|
||||
__func__, page, offset, size, dir);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;
|
||||
|
||||
ret = needs_bounce(dev, dma_addr, size);
|
||||
if (ret < 0)
|
||||
return ~0;
|
||||
|
||||
if (ret == 0) {
|
||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||
return dma_addr;
|
||||
}
|
||||
|
||||
if (PageHighMem(page)) {
|
||||
dev_err(dev, "DMA buffer bouncing of HIGHMEM pages "
|
||||
"is not supported\n");
|
||||
dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
|
||||
return ~0;
|
||||
}
|
||||
|
||||
@@ -383,10 +345,19 @@ EXPORT_SYMBOL(__dma_map_page);
|
||||
void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
|
||||
__func__, (void *) dma_addr, size, dir);
|
||||
struct safe_buffer *buf;
|
||||
|
||||
unmap_single(dev, dma_addr, size, dir);
|
||||
dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n",
|
||||
__func__, dma_addr, size, dir);
|
||||
|
||||
buf = find_safe_buffer_dev(dev, dma_addr, __func__);
|
||||
if (!buf) {
|
||||
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
|
||||
dma_addr & ~PAGE_MASK, size, dir);
|
||||
return;
|
||||
}
|
||||
|
||||
unmap_single(dev, buf, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(__dma_unmap_page);
|
||||
|
||||
@@ -461,7 +432,8 @@ static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
|
||||
}
|
||||
|
||||
int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
|
||||
unsigned long large_buffer_size)
|
||||
unsigned long large_buffer_size,
|
||||
int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t))
|
||||
{
|
||||
struct dmabounce_device_info *device_info;
|
||||
int ret;
|
||||
@@ -497,6 +469,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
|
||||
device_info->dev = dev;
|
||||
INIT_LIST_HEAD(&device_info->safe_buffers);
|
||||
rwlock_init(&device_info->lock);
|
||||
device_info->needs_bounce = needs_bounce_fn;
|
||||
|
||||
#ifdef STATS
|
||||
device_info->total_allocs = 0;
|
||||
|
||||
@@ -179,22 +179,21 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
||||
{
|
||||
void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
|
||||
unsigned int shift = (d->irq % 4) * 8;
|
||||
unsigned int cpu = cpumask_first(mask_val);
|
||||
unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
|
||||
u32 val, mask, bit;
|
||||
|
||||
if (cpu >= 8)
|
||||
if (cpu >= 8 || cpu >= nr_cpu_ids)
|
||||
return -EINVAL;
|
||||
|
||||
mask = 0xff << shift;
|
||||
bit = 1 << (cpu + shift);
|
||||
|
||||
spin_lock(&irq_controller_lock);
|
||||
d->node = cpu;
|
||||
val = readl_relaxed(reg) & ~mask;
|
||||
writel_relaxed(val | bit, reg);
|
||||
spin_unlock(&irq_controller_lock);
|
||||
|
||||
return 0;
|
||||
return IRQ_SET_MASK_OK;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@@ -243,6 +243,12 @@ static struct resource it8152_mem = {
|
||||
* ITE8152 chip can address up to 64MByte, so all the devices
|
||||
* connected to ITE8152 (PCI and USB) should have limited DMA window
|
||||
*/
|
||||
static int it8152_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
|
||||
{
|
||||
dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
|
||||
__func__, dma_addr, size);
|
||||
return (dma_addr + size - PHYS_OFFSET) >= SZ_64M;
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup DMA mask to 64MB on devices connected to ITE8152. Ignore all
|
||||
@@ -254,7 +260,7 @@ static int it8152_pci_platform_notify(struct device *dev)
|
||||
if (dev->dma_mask)
|
||||
*dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
|
||||
dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
|
||||
dmabounce_register_dev(dev, 2048, 4096);
|
||||
dmabounce_register_dev(dev, 2048, 4096, it8152_needs_bounce);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -267,14 +273,6 @@ static int it8152_pci_platform_notify_remove(struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
|
||||
{
|
||||
dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
|
||||
__func__, dma_addr, size);
|
||||
return (dev->bus == &pci_bus_type) &&
|
||||
((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
|
||||
}
|
||||
|
||||
int dma_set_coherent_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
if (mask >= PHYS_OFFSET + SZ_64M - 1)
|
||||
|
||||
+31
-29
@@ -579,7 +579,36 @@ sa1111_configure_smc(struct sa1111 *sachip, int sdram, unsigned int drac,
|
||||
|
||||
sachip->dev->coherent_dma_mask &= sa1111_dma_mask[drac >> 2];
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DMABOUNCE
|
||||
/*
|
||||
* According to the "Intel StrongARM SA-1111 Microprocessor Companion
|
||||
* Chip Specification Update" (June 2000), erratum #7, there is a
|
||||
* significant bug in the SA1111 SDRAM shared memory controller. If
|
||||
* an access to a region of memory above 1MB relative to the bank base,
|
||||
* it is important that address bit 10 _NOT_ be asserted. Depending
|
||||
* on the configuration of the RAM, bit 10 may correspond to one
|
||||
* of several different (processor-relative) address bits.
|
||||
*
|
||||
* This routine only identifies whether or not a given DMA address
|
||||
* is susceptible to the bug.
|
||||
*
|
||||
* This should only get called for sa1111_device types due to the
|
||||
* way we configure our device dma_masks.
|
||||
*/
|
||||
static int sa1111_needs_bounce(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
/*
|
||||
* Section 4.6 of the "Intel StrongARM SA-1111 Development Module
|
||||
* User's Guide" mentions that jumpers R51 and R52 control the
|
||||
* target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
|
||||
* SDRAM bank 1 on Neponset). The default configuration selects
|
||||
* Assabet, so any address in bank 1 is necessarily invalid.
|
||||
*/
|
||||
return (machine_is_assabet() || machine_is_pfs168()) &&
|
||||
(addr >= 0xc8000000 || (addr + size) >= 0xc8000000);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void sa1111_dev_release(struct device *_dev)
|
||||
@@ -644,7 +673,8 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent,
|
||||
dev->dev.dma_mask = &dev->dma_mask;
|
||||
|
||||
if (dev->dma_mask != 0xffffffffUL) {
|
||||
ret = dmabounce_register_dev(&dev->dev, 1024, 4096);
|
||||
ret = dmabounce_register_dev(&dev->dev, 1024, 4096,
|
||||
sa1111_needs_bounce);
|
||||
if (ret) {
|
||||
dev_err(&dev->dev, "SA1111: Failed to register"
|
||||
" with dmabounce\n");
|
||||
@@ -818,34 +848,6 @@ static void __sa1111_remove(struct sa1111 *sachip)
|
||||
kfree(sachip);
|
||||
}
|
||||
|
||||
/*
|
||||
* According to the "Intel StrongARM SA-1111 Microprocessor Companion
|
||||
* Chip Specification Update" (June 2000), erratum #7, there is a
|
||||
* significant bug in the SA1111 SDRAM shared memory controller. If
|
||||
* an access to a region of memory above 1MB relative to the bank base,
|
||||
* it is important that address bit 10 _NOT_ be asserted. Depending
|
||||
* on the configuration of the RAM, bit 10 may correspond to one
|
||||
* of several different (processor-relative) address bits.
|
||||
*
|
||||
* This routine only identifies whether or not a given DMA address
|
||||
* is susceptible to the bug.
|
||||
*
|
||||
* This should only get called for sa1111_device types due to the
|
||||
* way we configure our device dma_masks.
|
||||
*/
|
||||
int dma_needs_bounce(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
/*
|
||||
* Section 4.6 of the "Intel StrongARM SA-1111 Development Module
|
||||
* User's Guide" mentions that jumpers R51 and R52 control the
|
||||
* target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
|
||||
* SDRAM bank 1 on Neponset). The default configuration selects
|
||||
* Assabet, so any address in bank 1 is necessarily invalid.
|
||||
*/
|
||||
return ((machine_is_assabet() || machine_is_pfs168()) &&
|
||||
(addr >= 0xc8000000 || (addr + size) >= 0xc8000000));
|
||||
}
|
||||
|
||||
struct sa1111_save_data {
|
||||
unsigned int skcr;
|
||||
unsigned int skpcr;
|
||||
|
||||
@@ -293,4 +293,13 @@
|
||||
.macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
|
||||
usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
|
||||
.endm
|
||||
|
||||
/* Utility macro for declaring string literals */
|
||||
.macro string name:req, string
|
||||
.type \name , #object
|
||||
\name:
|
||||
.asciz "\string"
|
||||
.size \name , . - \name
|
||||
.endm
|
||||
|
||||
#endif /* __ASM_ASSEMBLER_H__ */
|
||||
|
||||
@@ -26,8 +26,8 @@
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
#define smp_mb__before_clear_bit() mb()
|
||||
#define smp_mb__after_clear_bit() mb()
|
||||
#define smp_mb__before_clear_bit() smp_mb()
|
||||
#define smp_mb__after_clear_bit() smp_mb()
|
||||
|
||||
/*
|
||||
* These functions are the basis of our bit ops.
|
||||
|
||||
@@ -115,39 +115,8 @@ static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
|
||||
___dma_page_dev_to_cpu(page, off, size, dir);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return whether the given device DMA address mask can be supported
|
||||
* properly. For example, if your device can only drive the low 24-bits
|
||||
* during bus mastering, then you would pass 0x00ffffff as the mask
|
||||
* to this function.
|
||||
*
|
||||
* FIXME: This should really be a platform specific issue - we should
|
||||
* return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
|
||||
*/
|
||||
static inline int dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
if (mask < ISA_DMA_THRESHOLD)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int dma_set_mask(struct device *dev, u64 dma_mask)
|
||||
{
|
||||
#ifdef CONFIG_DMABOUNCE
|
||||
if (dev->archdata.dmabounce) {
|
||||
if (dma_mask >= ISA_DMA_THRESHOLD)
|
||||
return 0;
|
||||
else
|
||||
return -EIO;
|
||||
}
|
||||
#endif
|
||||
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
|
||||
return -EIO;
|
||||
|
||||
*dev->dma_mask = dma_mask;
|
||||
|
||||
return 0;
|
||||
}
|
||||
extern int dma_supported(struct device *, u64);
|
||||
extern int dma_set_mask(struct device *, u64);
|
||||
|
||||
/*
|
||||
* DMA errors are defined by all-bits-set in the DMA address.
|
||||
@@ -256,14 +225,14 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
|
||||
* @dev: valid struct device pointer
|
||||
* @small_buf_size: size of buffers to use with small buffer pool
|
||||
* @large_buf_size: size of buffers to use with large buffer pool (can be 0)
|
||||
* @needs_bounce_fn: called to determine whether buffer needs bouncing
|
||||
*
|
||||
* This function should be called by low-level platform code to register
|
||||
* a device as requireing DMA buffer bouncing. The function will allocate
|
||||
* appropriate DMA pools for the device.
|
||||
*
|
||||
*/
|
||||
extern int dmabounce_register_dev(struct device *, unsigned long,
|
||||
unsigned long);
|
||||
unsigned long, int (*)(struct device *, dma_addr_t, size_t));
|
||||
|
||||
/**
|
||||
* dmabounce_unregister_dev
|
||||
@@ -277,31 +246,9 @@ extern int dmabounce_register_dev(struct device *, unsigned long,
|
||||
*/
|
||||
extern void dmabounce_unregister_dev(struct device *);
|
||||
|
||||
/**
|
||||
* dma_needs_bounce
|
||||
*
|
||||
* @dev: valid struct device pointer
|
||||
* @dma_handle: dma_handle of unbounced buffer
|
||||
* @size: size of region being mapped
|
||||
*
|
||||
* Platforms that utilize the dmabounce mechanism must implement
|
||||
* this function.
|
||||
*
|
||||
* The dmabounce routines call this function whenever a dma-mapping
|
||||
* is requested to determine whether a given buffer needs to be bounced
|
||||
* or not. The function must return 0 if the buffer is OK for
|
||||
* DMA access and 1 if the buffer needs to be bounced.
|
||||
*
|
||||
*/
|
||||
extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
|
||||
|
||||
/*
|
||||
* The DMA API, implemented by dmabounce.c. See below for descriptions.
|
||||
*/
|
||||
extern dma_addr_t __dma_map_single(struct device *, void *, size_t,
|
||||
enum dma_data_direction);
|
||||
extern void __dma_unmap_single(struct device *, dma_addr_t, size_t,
|
||||
enum dma_data_direction);
|
||||
extern dma_addr_t __dma_map_page(struct device *, struct page *,
|
||||
unsigned long, size_t, enum dma_data_direction);
|
||||
extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
|
||||
@@ -328,13 +275,6 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
|
||||
}
|
||||
|
||||
|
||||
static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__dma_single_cpu_to_dev(cpu_addr, size, dir);
|
||||
return virt_to_dma(dev, cpu_addr);
|
||||
}
|
||||
|
||||
static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
@@ -342,12 +282,6 @@ static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
|
||||
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
|
||||
}
|
||||
|
||||
static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
|
||||
}
|
||||
|
||||
static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
@@ -373,14 +307,18 @@ static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
unsigned long offset;
|
||||
struct page *page;
|
||||
dma_addr_t addr;
|
||||
|
||||
BUG_ON(!virt_addr_valid(cpu_addr));
|
||||
BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
addr = __dma_map_single(dev, cpu_addr, size, dir);
|
||||
debug_dma_map_page(dev, virt_to_page(cpu_addr),
|
||||
(unsigned long)cpu_addr & ~PAGE_MASK, size,
|
||||
dir, addr, true);
|
||||
page = virt_to_page(cpu_addr);
|
||||
offset = (unsigned long)cpu_addr & ~PAGE_MASK;
|
||||
addr = __dma_map_page(dev, page, offset, size, dir);
|
||||
debug_dma_map_page(dev, page, offset, size, dir, addr, true);
|
||||
|
||||
return addr;
|
||||
}
|
||||
@@ -430,7 +368,7 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
debug_dma_unmap_page(dev, handle, size, dir, true);
|
||||
__dma_unmap_single(dev, handle, size, dir);
|
||||
__dma_unmap_page(dev, handle, size, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user