From c243efabd3707900d4c8183988efcdc796813e41 Mon Sep 17 00:00:00 2001
From: Robert Nelson <robertcnelson@gmail.com>
Date: Tue, 5 Mar 2013 16:27:03 -0600
Subject: [PATCH] add dma/mmc patches for the am335x/bone

Signed-off-by: Robert Nelson <robertcnelson@gmail.com>
---
 patch.sh                                      |   31 +-
 patches/defconfig                             |    4 +-
 ...edma-dmaengine-induced-null-pointer-.patch |   39 +
 ...-move-private-EDMA-API-to-arm-common.patch | 4021 +++++++++++++++++
 ...-unused-transfer-controller-handlers.patch |   73 +
 ...33XX-support-to-the-private-EDMA-API.patch |  406 ++
 ...aengine-edma-enable-build-for-AM33XX.patch |   28 +
 ...edma-Add-TI-EDMA-device-tree-binding.patch |   72 +
 ...0007-ARM-dts-add-AM33XX-EDMA-support.patch |   47 +
 ...convert-to-dma_request_slave_channel.patch |   91 +
 ...add-generic-DMA-request-support-to-t.patch |   58 +
 ...0-ARM-dts-add-AM33XX-SPI-DMA-support.patch |   43 +
 ...onvert-to-dma_request_slave_channel_.patch |   46 +
 ...kip-platform_get_resource_byname-for.patch |   62 +
 ...dd-generic-DMA-request-support-to-th.patch |   60 +
 patches/ref_omap2plus_defconfig               |    2 +
 version.sh                                    |    2 +-
 17 files changed, 5082 insertions(+), 3 deletions(-)
 create mode 100644 patches/dma/0001-arm-davinci-fix-edma-dmaengine-induced-null-pointer-.patch
 create mode 100644 patches/dma/0002-ARM-davinci-move-private-EDMA-API-to-arm-common.patch
 create mode 100644 patches/dma/0003-ARM-edma-remove-unused-transfer-controller-handlers.patch
 create mode 100644 patches/dma/0004-ARM-edma-add-AM33XX-support-to-the-private-EDMA-API.patch
 create mode 100644 patches/dma/0005-dmaengine-edma-enable-build-for-AM33XX.patch
 create mode 100644 patches/dma/0006-dmaengine-edma-Add-TI-EDMA-device-tree-binding.patch
 create mode 100644 patches/dma/0007-ARM-dts-add-AM33XX-EDMA-support.patch
 create mode 100644 patches/dma/0008-spi-omap2-mcspi-convert-to-dma_request_slave_channel.patch
 create mode 100644 patches/dma/0009-spi-omap2-mcspi-add-generic-DMA-request-support-to-t.patch
 create mode 100644 patches/dma/0010-ARM-dts-add-AM33XX-SPI-DMA-support.patch
 create mode 100644 patches/mmc/0001-mmc-omap_hsmmc-convert-to-dma_request_slave_channel_.patch
 create mode 100644 patches/mmc/0002-mmc-omap_hsmmc-Skip-platform_get_resource_byname-for.patch
 create mode 100644 patches/mmc/0003-mmc-omap_hsmmc-add-generic-DMA-request-support-to-th.patch

diff --git a/patch.sh b/patch.sh
index a31a6121a..1784ebf72 100644
--- a/patch.sh
+++ b/patch.sh
@@ -50,8 +50,34 @@ arm () {
 	${git} "${DIR}/patches/arm/0001-deb-pkg-Simplify-architecture-matching-for-cross-bui.patch"
 }
 
+dma () {
+	echo "dir: dma"
+
+	#[PATCH v2] arm: davinci: fix edma dmaengine induced null pointer dereference on da830
+	${git} "${DIR}/patches/dma/0001-arm-davinci-fix-edma-dmaengine-induced-null-pointer-.patch"
+
+	#[PATCH v8 0/9] DMA Engine support for AM33XX
+	${git} "${DIR}/patches/dma/0002-ARM-davinci-move-private-EDMA-API-to-arm-common.patch"
+	${git} "${DIR}/patches/dma/0003-ARM-edma-remove-unused-transfer-controller-handlers.patch"
+	${git} "${DIR}/patches/dma/0004-ARM-edma-add-AM33XX-support-to-the-private-EDMA-API.patch"
+	${git} "${DIR}/patches/dma/0005-dmaengine-edma-enable-build-for-AM33XX.patch"
+	${git} "${DIR}/patches/dma/0006-dmaengine-edma-Add-TI-EDMA-device-tree-binding.patch"
+	${git} "${DIR}/patches/dma/0007-ARM-dts-add-AM33XX-EDMA-support.patch"
+	${git} "${DIR}/patches/dma/0008-spi-omap2-mcspi-convert-to-dma_request_slave_channel.patch"
+	${git} "${DIR}/patches/dma/0009-spi-omap2-mcspi-add-generic-DMA-request-support-to-t.patch"
+	${git} "${DIR}/patches/dma/0010-ARM-dts-add-AM33XX-SPI-DMA-support.patch"
+}
+
+mmc () {
+	echo "dir: mmc"
+	#[PATCH v2 0/3] omap_hsmmc DT DMA Client support
+	${git} "${DIR}/patches/mmc/0001-mmc-omap_hsmmc-convert-to-dma_request_slave_channel_.patch"
+	${git} "${DIR}/patches/mmc/0002-mmc-omap_hsmmc-Skip-platform_get_resource_byname-for.patch"
+	${git} "${DIR}/patches/mmc/0003-mmc-omap_hsmmc-add-generic-DMA-request-support-to-th.patch"
+}
+
 imx () {
-	echo "imx patches"
+	echo "dir: imx"
 	${git} "${DIR}/patches/imx/0001-ARM-imx-Enable-UART1-for-Sabrelite.patch"
 	${git} "${DIR}/patches/imx/0002-Add-IMX6Q-AHCI-support.patch"
 	${git} "${DIR}/patches/imx/0003-imx-Add-IMX53-AHCI-support.patch"
@@ -100,6 +126,9 @@ omap () {
 	${git} "${DIR}/patches/omap_panda/0002-ti-st-st-kim-fixing-firmware-path.patch"
 }
 
+dma
+mmc
+
 arm
 imx
 omap
diff --git a/patches/defconfig b/patches/defconfig
index 840e69e04..12df0816a 100644
--- a/patches/defconfig
+++ b/patches/defconfig
@@ -510,6 +510,7 @@ CONFIG_ARM_ERRATA_764369=y
 CONFIG_PL310_ERRATA_769419=y
 CONFIG_ARM_ERRATA_775420=y
 CONFIG_ICST=y
+CONFIG_TI_PRIV_EDMA=y
 
 #
 # Bus support
@@ -4110,8 +4111,9 @@ CONFIG_DMADEVICES=y
 CONFIG_MX3_IPU=y
 CONFIG_MX3_IPU_IRQS=4
 CONFIG_TIMB_DMA=m
+CONFIG_TI_EDMA=y
 CONFIG_PL330_DMA=y
-# CONFIG_IMX_SDMA is not set
+CONFIG_IMX_SDMA=y
 # CONFIG_IMX_DMA is not set
 # CONFIG_MXS_DMA is not set
 CONFIG_DMA_OMAP=y
diff --git a/patches/dma/0001-arm-davinci-fix-edma-dmaengine-induced-null-pointer-.patch b/patches/dma/0001-arm-davinci-fix-edma-dmaengine-induced-null-pointer-.patch
new file mode 100644
index 000000000..e2d59a979
--- /dev/null
+++ b/patches/dma/0001-arm-davinci-fix-edma-dmaengine-induced-null-pointer-.patch
@@ -0,0 +1,39 @@
+From c895ff57a548dab19db4d830f47425a59d7a7dab Mon Sep 17 00:00:00 2001
+From: Matt Porter <mporter@ti.com>
+Date: Tue, 5 Mar 2013 10:58:22 -0500
+Subject: [PATCH 01/10] arm: davinci: fix edma dmaengine induced null pointer
+ dereference on da830
+
+This adds additional error checking to the private edma api implementation
+to catch the case where the edma_alloc_slot() has an invalid controller
+parameter. The edma dmaengine wrapper driver relies on this condition
+being handled in order to avoid setting up a second edma dmaengine
+instance on DA830.
+
+Verfied using a DA850 with the second EDMA controller platform instance
+removed to simulate a DA830 which only has a single EDMA controller.
+
+Reported-by: Tomas Novotny <tomas@novotny.cz>
+Signed-off-by: Matt Porter <mporter@ti.com>
+Cc: stable@vger.kernel.org # v3.7.x+
+---
+ arch/arm/mach-davinci/dma.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c
+index a685e97..45b7c71 100644
+--- a/arch/arm/mach-davinci/dma.c
++++ b/arch/arm/mach-davinci/dma.c
+@@ -743,6 +743,9 @@ EXPORT_SYMBOL(edma_free_channel);
+  */
+ int edma_alloc_slot(unsigned ctlr, int slot)
+ {
++	if (!edma_cc[ctlr])
++		return -EINVAL;
++
+ 	if (slot >= 0)
+ 		slot = EDMA_CHAN_SLOT(slot);
+ 
+-- 
+1.7.10.4
+
diff --git a/patches/dma/0002-ARM-davinci-move-private-EDMA-API-to-arm-common.patch b/patches/dma/0002-ARM-davinci-move-private-EDMA-API-to-arm-common.patch
new file mode 100644
index 000000000..48c36e8b6
--- /dev/null
+++ b/patches/dma/0002-ARM-davinci-move-private-EDMA-API-to-arm-common.patch
@@ -0,0 +1,4021 @@
+From 3c5e0b1198e05f439e8a3c83397cba09f96fc296 Mon Sep 17 00:00:00 2001
+From: Matt Porter <mporter@ti.com>
+Date: Tue, 5 Mar 2013 15:53:07 -0500
+Subject: [PATCH 02/10] ARM: davinci: move private EDMA API to arm/common
+
+Move mach-davinci/dma.c to common/edma.c so it can be used
+by OMAP (specifically AM33xx) as well.
+
+Signed-off-by: Matt Porter <mporter@ti.com>
+Acked-by: Sekhar Nori <nsekhar@ti.com>
+---
+ arch/arm/Kconfig                            |    1 +
+ arch/arm/common/Kconfig                     |    3 +
+ arch/arm/common/Makefile                    |    1 +
+ arch/arm/common/edma.c                      | 1590 ++++++++++++++++++++++++++
+ arch/arm/mach-davinci/Makefile              |    2 +-
+ arch/arm/mach-davinci/board-tnetv107x-evm.c |    2 +-
+ arch/arm/mach-davinci/davinci.h             |    2 +-
+ arch/arm/mach-davinci/devices-tnetv107x.c   |    2 +-
+ arch/arm/mach-davinci/devices.c             |    6 +-
+ arch/arm/mach-davinci/dm355.c               |    2 +-
+ arch/arm/mach-davinci/dm365.c               |    2 +-
+ arch/arm/mach-davinci/dm644x.c              |    2 +-
+ arch/arm/mach-davinci/dm646x.c              |    2 +-
+ arch/arm/mach-davinci/dma.c                 | 1591 ---------------------------
+ arch/arm/mach-davinci/include/mach/da8xx.h  |    2 +-
+ arch/arm/mach-davinci/include/mach/edma.h   |  267 -----
+ drivers/dma/edma.c                          |    2 +-
+ drivers/mmc/host/davinci_mmc.c              |    1 +
+ include/linux/mfd/davinci_voicecodec.h      |    3 +-
+ include/linux/platform_data/edma.h          |  182 +++
+ include/linux/platform_data/spi-davinci.h   |    2 +-
+ sound/soc/davinci/davinci-evm.c             |    1 +
+ sound/soc/davinci/davinci-pcm.c             |    1 +
+ sound/soc/davinci/davinci-pcm.h             |    2 +-
+ sound/soc/davinci/davinci-sffsdr.c          |    5 +-
+ 25 files changed, 1802 insertions(+), 1874 deletions(-)
+ create mode 100644 arch/arm/common/edma.c
+ delete mode 100644 arch/arm/mach-davinci/dma.c
+ delete mode 100644 arch/arm/mach-davinci/include/mach/edma.h
+ create mode 100644 include/linux/platform_data/edma.h
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 5b71469..cb80a4d 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -956,6 +956,7 @@ config ARCH_DAVINCI
+ 	select GENERIC_IRQ_CHIP
+ 	select HAVE_IDE
+ 	select NEED_MACH_GPIO_H
++	select TI_PRIV_EDMA
+ 	select USE_OF
+ 	select ZONE_DMA
+ 	help
+diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
+index 9353184..c3a4e9c 100644
+--- a/arch/arm/common/Kconfig
++++ b/arch/arm/common/Kconfig
+@@ -17,3 +17,6 @@ config SHARP_PARAM
+ 
+ config SHARP_SCOOP
+ 	bool
++
++config TI_PRIV_EDMA
++	bool
+diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
+index dc8dd0d..9643c50 100644
+--- a/arch/arm/common/Makefile
++++ b/arch/arm/common/Makefile
+@@ -11,3 +11,4 @@ obj-$(CONFIG_SHARP_PARAM)	+= sharpsl_param.o
+ obj-$(CONFIG_SHARP_SCOOP)	+= scoop.o
+ obj-$(CONFIG_PCI_HOST_ITE8152)  += it8152.o
+ obj-$(CONFIG_ARM_TIMER_SP804)	+= timer-sp.o
++obj-$(CONFIG_TI_PRIV_EDMA)	+= edma.o
+diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
+new file mode 100644
+index 0000000..f112be7
+--- /dev/null
++++ b/arch/arm/common/edma.c
+@@ -0,0 +1,1590 @@
++/*
++ * EDMA3 support for DaVinci
++ *
++ * Copyright (C) 2006-2009 Texas Instruments.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <linux/io.h>
++#include <linux/slab.h>
++
++#include <linux/platform_data/edma.h>
++
++/* Offsets matching "struct edmacc_param" */
++#define PARM_OPT		0x00
++#define PARM_SRC		0x04
++#define PARM_A_B_CNT		0x08
++#define PARM_DST		0x0c
++#define PARM_SRC_DST_BIDX	0x10
++#define PARM_LINK_BCNTRLD	0x14
++#define PARM_SRC_DST_CIDX	0x18
++#define PARM_CCNT		0x1c
++
++#define PARM_SIZE		0x20
++
++/* Offsets for EDMA CC global channel registers and their shadows */
++#define SH_ER		0x00	/* 64 bits */
++#define SH_ECR		0x08	/* 64 bits */
++#define SH_ESR		0x10	/* 64 bits */
++#define SH_CER		0x18	/* 64 bits */
++#define SH_EER		0x20	/* 64 bits */
++#define SH_EECR		0x28	/* 64 bits */
++#define SH_EESR		0x30	/* 64 bits */
++#define SH_SER		0x38	/* 64 bits */
++#define SH_SECR		0x40	/* 64 bits */
++#define SH_IER		0x50	/* 64 bits */
++#define SH_IECR		0x58	/* 64 bits */
++#define SH_IESR		0x60	/* 64 bits */
++#define SH_IPR		0x68	/* 64 bits */
++#define SH_ICR		0x70	/* 64 bits */
++#define SH_IEVAL	0x78
++#define SH_QER		0x80
++#define SH_QEER		0x84
++#define SH_QEECR	0x88
++#define SH_QEESR	0x8c
++#define SH_QSER		0x90
++#define SH_QSECR	0x94
++#define SH_SIZE		0x200
++
++/* Offsets for EDMA CC global registers */
++#define EDMA_REV	0x0000
++#define EDMA_CCCFG	0x0004
++#define EDMA_QCHMAP	0x0200	/* 8 registers */
++#define EDMA_DMAQNUM	0x0240	/* 8 registers (4 on OMAP-L1xx) */
++#define EDMA_QDMAQNUM	0x0260
++#define EDMA_QUETCMAP	0x0280
++#define EDMA_QUEPRI	0x0284
++#define EDMA_EMR	0x0300	/* 64 bits */
++#define EDMA_EMCR	0x0308	/* 64 bits */
++#define EDMA_QEMR	0x0310
++#define EDMA_QEMCR	0x0314
++#define EDMA_CCERR	0x0318
++#define EDMA_CCERRCLR	0x031c
++#define EDMA_EEVAL	0x0320
++#define EDMA_DRAE	0x0340	/* 4 x 64 bits*/
++#define EDMA_QRAE	0x0380	/* 4 registers */
++#define EDMA_QUEEVTENTRY	0x0400	/* 2 x 16 registers */
++#define EDMA_QSTAT	0x0600	/* 2 registers */
++#define EDMA_QWMTHRA	0x0620
++#define EDMA_QWMTHRB	0x0624
++#define EDMA_CCSTAT	0x0640
++
++#define EDMA_M		0x1000	/* global channel registers */
++#define EDMA_ECR	0x1008
++#define EDMA_ECRH	0x100C
++#define EDMA_SHADOW0	0x2000	/* 4 regions shadowing global channels */
++#define EDMA_PARM	0x4000	/* 128 param entries */
++
++#define PARM_OFFSET(param_no)	(EDMA_PARM + ((param_no) << 5))
++
++#define EDMA_DCHMAP	0x0100  /* 64 registers */
++#define CHMAP_EXIST	BIT(24)
++
++#define EDMA_MAX_DMACH           64
++#define EDMA_MAX_PARAMENTRY     512
++
++/*****************************************************************************/
++
++static void __iomem *edmacc_regs_base[EDMA_MAX_CC];
++
++static inline unsigned int edma_read(unsigned ctlr, int offset)
++{
++	return (unsigned int)__raw_readl(edmacc_regs_base[ctlr] + offset);
++}
++
++static inline void edma_write(unsigned ctlr, int offset, int val)
++{
++	__raw_writel(val, edmacc_regs_base[ctlr] + offset);
++}
++static inline void edma_modify(unsigned ctlr, int offset, unsigned and,
++		unsigned or)
++{
++	unsigned val = edma_read(ctlr, offset);
++	val &= and;
++	val |= or;
++	edma_write(ctlr, offset, val);
++}
++static inline void edma_and(unsigned ctlr, int offset, unsigned and)
++{
++	unsigned val = edma_read(ctlr, offset);
++	val &= and;
++	edma_write(ctlr, offset, val);
++}
++static inline void edma_or(unsigned ctlr, int offset, unsigned or)
++{
++	unsigned val = edma_read(ctlr, offset);
++	val |= or;
++	edma_write(ctlr, offset, val);
++}
++static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i)
++{
++	return edma_read(ctlr, offset + (i << 2));
++}
++static inline void edma_write_array(unsigned ctlr, int offset, int i,
++		unsigned val)
++{
++	edma_write(ctlr, offset + (i << 2), val);
++}
++static inline void edma_modify_array(unsigned ctlr, int offset, int i,
++		unsigned and, unsigned or)
++{
++	edma_modify(ctlr, offset + (i << 2), and, or);
++}
++static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or)
++{
++	edma_or(ctlr, offset + (i << 2), or);
++}
++static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j,
++		unsigned or)
++{
++	edma_or(ctlr, offset + ((i*2 + j) << 2), or);
++}
++static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j,
++		unsigned val)
++{
++	edma_write(ctlr, offset + ((i*2 + j) << 2), val);
++}
++static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset)
++{
++	return edma_read(ctlr, EDMA_SHADOW0 + offset);
++}
++static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset,
++		int i)
++{
++	return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2));
++}
++static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val)
++{
++	edma_write(ctlr, EDMA_SHADOW0 + offset, val);
++}
++static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i,
++		unsigned val)
++{
++	edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val);
++}
++static inline unsigned int edma_parm_read(unsigned ctlr, int offset,
++		int param_no)
++{
++	return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5));
++}
++static inline void edma_parm_write(unsigned ctlr, int offset, int param_no,
++		unsigned val)
++{
++	edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val);
++}
++static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no,
++		unsigned and, unsigned or)
++{
++	edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or);
++}
++static inline void edma_parm_and(unsigned ctlr, int offset, int param_no,
++		unsigned and)
++{
++	edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and);
++}
++static inline void edma_parm_or(unsigned ctlr, int offset, int param_no,
++		unsigned or)
++{
++	edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or);
++}
++
++static inline void set_bits(int offset, int len, unsigned long *p)
++{
++	for (; len > 0; len--)
++		set_bit(offset + (len - 1), p);
++}
++
++static inline void clear_bits(int offset, int len, unsigned long *p)
++{
++	for (; len > 0; len--)
++		clear_bit(offset + (len - 1), p);
++}
++
++/*****************************************************************************/
++
++/* actual number of DMA channels and slots on this silicon */
++struct edma {
++	/* how many dma resources of each type */
++	unsigned	num_channels;
++	unsigned	num_region;
++	unsigned	num_slots;
++	unsigned	num_tc;
++	unsigned	num_cc;
++	enum dma_event_q 	default_queue;
++
++	/* list of channels with no even trigger; terminated by "-1" */
++	const s8	*noevent;
++
++	/* The edma_inuse bit for each PaRAM slot is clear unless the
++	 * channel is in use ... by ARM or DSP, for QDMA, or whatever.
++	 */
++	DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY);
++
++	/* The edma_unused bit for each channel is clear unless
++	 * it is not being used on this platform. It uses a bit
++	 * of SOC-specific initialization code.
++	 */
++	DECLARE_BITMAP(edma_unused, EDMA_MAX_DMACH);
++
++	unsigned	irq_res_start;
++	unsigned	irq_res_end;
++
++	struct dma_interrupt_data {
++		void (*callback)(unsigned channel, unsigned short ch_status,
++				void *data);
++		void *data;
++	} intr_data[EDMA_MAX_DMACH];
++};
++
++static struct edma *edma_cc[EDMA_MAX_CC];
++static int arch_num_cc;
++
++/* dummy param set used to (re)initialize parameter RAM slots */
++static const struct edmacc_param dummy_paramset = {
++	.link_bcntrld = 0xffff,
++	.ccnt = 1,
++};
++
++/*****************************************************************************/
++
++static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
++		enum dma_event_q queue_no)
++{
++	int bit = (ch_no & 0x7) * 4;
++
++	/* default to low priority queue */
++	if (queue_no == EVENTQ_DEFAULT)
++		queue_no = edma_cc[ctlr]->default_queue;
++
++	queue_no &= 7;
++	edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3),
++			~(0x7 << bit), queue_no << bit);
++}
++
++static void __init map_queue_tc(unsigned ctlr, int queue_no, int tc_no)
++{
++	int bit = queue_no * 4;
++	edma_modify(ctlr, EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit));
++}
++
++static void __init assign_priority_to_queue(unsigned ctlr, int queue_no,
++		int priority)
++{
++	int bit = queue_no * 4;
++	edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit),
++			((priority & 0x7) << bit));
++}
++
++/**
++ * map_dmach_param - Maps channel number to param entry number
++ *
++ * This maps the dma channel number to param entry numberter. In
++ * other words using the DMA channel mapping registers a param entry
++ * can be mapped to any channel
++ *
++ * Callers are responsible for ensuring the channel mapping logic is
++ * included in that particular EDMA variant (Eg : dm646x)
++ *
++ */
++static void __init map_dmach_param(unsigned ctlr)
++{
++	int i;
++	for (i = 0; i < EDMA_MAX_DMACH; i++)
++		edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5));
++}
++
++static inline void
++setup_dma_interrupt(unsigned lch,
++	void (*callback)(unsigned channel, u16 ch_status, void *data),
++	void *data)
++{
++	unsigned ctlr;
++
++	ctlr = EDMA_CTLR(lch);
++	lch = EDMA_CHAN_SLOT(lch);
++
++	if (!callback)
++		edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5,
++				BIT(lch & 0x1f));
++
++	edma_cc[ctlr]->intr_data[lch].callback = callback;
++	edma_cc[ctlr]->intr_data[lch].data = data;
++
++	if (callback) {
++		edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5,
++				BIT(lch & 0x1f));
++		edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5,
++				BIT(lch & 0x1f));
++	}
++}
++
++static int irq2ctlr(int irq)
++{
++	if (irq >= edma_cc[0]->irq_res_start && irq <= edma_cc[0]->irq_res_end)
++		return 0;
++	else if (irq >= edma_cc[1]->irq_res_start &&
++		irq <= edma_cc[1]->irq_res_end)
++		return 1;
++
++	return -1;
++}
++
++/******************************************************************************
++ *
++ * DMA interrupt handler
++ *
++ *****************************************************************************/
++static irqreturn_t dma_irq_handler(int irq, void *data)
++{
++	int ctlr;
++	u32 sh_ier;
++	u32 sh_ipr;
++	u32 bank;
++
++	ctlr = irq2ctlr(irq);
++	if (ctlr < 0)
++		return IRQ_NONE;
++
++	dev_dbg(data, "dma_irq_handler\n");
++
++	sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 0);
++	if (!sh_ipr) {
++		sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 1);
++		if (!sh_ipr)
++			return IRQ_NONE;
++		sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 1);
++		bank = 1;
++	} else {
++		sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 0);
++		bank = 0;
++	}
++
++	do {
++		u32 slot;
++		u32 channel;
++
++		dev_dbg(data, "IPR%d %08x\n", bank, sh_ipr);
++
++		slot = __ffs(sh_ipr);
++		sh_ipr &= ~(BIT(slot));
++
++		if (sh_ier & BIT(slot)) {
++			channel = (bank << 5) | slot;
++			/* Clear the corresponding IPR bits */
++			edma_shadow0_write_array(ctlr, SH_ICR, bank,
++					BIT(slot));
++			if (edma_cc[ctlr]->intr_data[channel].callback)
++				edma_cc[ctlr]->intr_data[channel].callback(
++					channel, DMA_COMPLETE,
++					edma_cc[ctlr]->intr_data[channel].data);
++		}
++	} while (sh_ipr);
++
++	edma_shadow0_write(ctlr, SH_IEVAL, 1);
++	return IRQ_HANDLED;
++}
++
++/******************************************************************************
++ *
++ * DMA error interrupt handler
++ *
++ *****************************************************************************/
++static irqreturn_t dma_ccerr_handler(int irq, void *data)
++{
++	int i;
++	int ctlr;
++	unsigned int cnt = 0;
++
++	ctlr = irq2ctlr(irq);
++	if (ctlr < 0)
++		return IRQ_NONE;
++
++	dev_dbg(data, "dma_ccerr_handler\n");
++
++	if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
++	    (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
++	    (edma_read(ctlr, EDMA_QEMR) == 0) &&
++	    (edma_read(ctlr, EDMA_CCERR) == 0))
++		return IRQ_NONE;
++
++	while (1) {
++		int j = -1;
++		if (edma_read_array(ctlr, EDMA_EMR, 0))
++			j = 0;
++		else if (edma_read_array(ctlr, EDMA_EMR, 1))
++			j = 1;
++		if (j >= 0) {
++			dev_dbg(data, "EMR%d %08x\n", j,
++					edma_read_array(ctlr, EDMA_EMR, j));
++			for (i = 0; i < 32; i++) {
++				int k = (j << 5) + i;
++				if (edma_read_array(ctlr, EDMA_EMR, j) &
++							BIT(i)) {
++					/* Clear the corresponding EMR bits */
++					edma_write_array(ctlr, EDMA_EMCR, j,
++							BIT(i));
++					/* Clear any SER */
++					edma_shadow0_write_array(ctlr, SH_SECR,
++								j, BIT(i));
++					if (edma_cc[ctlr]->intr_data[k].
++								callback) {
++						edma_cc[ctlr]->intr_data[k].
++						callback(k,
++						DMA_CC_ERROR,
++						edma_cc[ctlr]->intr_data
++						[k].data);
++					}
++				}
++			}
++		} else if (edma_read(ctlr, EDMA_QEMR)) {
++			dev_dbg(data, "QEMR %02x\n",
++				edma_read(ctlr, EDMA_QEMR));
++			for (i = 0; i < 8; i++) {
++				if (edma_read(ctlr, EDMA_QEMR) & BIT(i)) {
++					/* Clear the corresponding IPR bits */
++					edma_write(ctlr, EDMA_QEMCR, BIT(i));
++					edma_shadow0_write(ctlr, SH_QSECR,
++								BIT(i));
++
++					/* NOTE:  not reported!! */
++				}
++			}
++		} else if (edma_read(ctlr, EDMA_CCERR)) {
++			dev_dbg(data, "CCERR %08x\n",
++				edma_read(ctlr, EDMA_CCERR));
++			/* FIXME:  CCERR.BIT(16) ignored!  much better
++			 * to just write CCERRCLR with CCERR value...
++			 */
++			for (i = 0; i < 8; i++) {
++				if (edma_read(ctlr, EDMA_CCERR) & BIT(i)) {
++					/* Clear the corresponding IPR bits */
++					edma_write(ctlr, EDMA_CCERRCLR, BIT(i));
++
++					/* NOTE:  not reported!! */
++				}
++			}
++		}
++		if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
++		    (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
++		    (edma_read(ctlr, EDMA_QEMR) == 0) &&
++		    (edma_read(ctlr, EDMA_CCERR) == 0))
++			break;
++		cnt++;
++		if (cnt > 10)
++			break;
++	}
++	edma_write(ctlr, EDMA_EEVAL, 1);
++	return IRQ_HANDLED;
++}
++
++/******************************************************************************
++ *
++ * Transfer controller error interrupt handlers
++ *
++ *****************************************************************************/
++
++#define tc_errs_handled	false	/* disabled as long as they're NOPs */
++
++static irqreturn_t dma_tc0err_handler(int irq, void *data)
++{
++	dev_dbg(data, "dma_tc0err_handler\n");
++	return IRQ_HANDLED;
++}
++
++static irqreturn_t dma_tc1err_handler(int irq, void *data)
++{
++	dev_dbg(data, "dma_tc1err_handler\n");
++	return IRQ_HANDLED;
++}
++
++static int reserve_contiguous_slots(int ctlr, unsigned int id,
++				     unsigned int num_slots,
++				     unsigned int start_slot)
++{
++	int i, j;
++	unsigned int count = num_slots;
++	int stop_slot = start_slot;
++	DECLARE_BITMAP(tmp_inuse, EDMA_MAX_PARAMENTRY);
++
++	for (i = start_slot; i < edma_cc[ctlr]->num_slots; ++i) {
++		j = EDMA_CHAN_SLOT(i);
++		if (!test_and_set_bit(j, edma_cc[ctlr]->edma_inuse)) {
++			/* Record our current beginning slot */
++			if (count == num_slots)
++				stop_slot = i;
++
++			count--;
++			set_bit(j, tmp_inuse);
++
++			if (count == 0)
++				break;
++		} else {
++			clear_bit(j, tmp_inuse);
++
++			if (id == EDMA_CONT_PARAMS_FIXED_EXACT) {
++				stop_slot = i;
++				break;
++			} else {
++				count = num_slots;
++			}
++		}
++	}
++
++	/*
++	 * We have to clear any bits that we set
++	 * if we run out parameter RAM slots, i.e we do find a set
++	 * of contiguous parameter RAM slots but do not find the exact number
++	 * requested as we may reach the total number of parameter RAM slots
++	 */
++	if (i == edma_cc[ctlr]->num_slots)
++		stop_slot = i;
++
++	j = start_slot;
++	for_each_set_bit_from(j, tmp_inuse, stop_slot)
++		clear_bit(j, edma_cc[ctlr]->edma_inuse);
++
++	if (count)
++		return -EBUSY;
++
++	for (j = i - num_slots + 1; j <= i; ++j)
++		memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(j),
++			&dummy_paramset, PARM_SIZE);
++
++	return EDMA_CTLR_CHAN(ctlr, i - num_slots + 1);
++}
++
++static int prepare_unused_channel_list(struct device *dev, void *data)
++{
++	struct platform_device *pdev = to_platform_device(dev);
++	int i, ctlr;
++
++	for (i = 0; i < pdev->num_resources; i++) {
++		if ((pdev->resource[i].flags & IORESOURCE_DMA) &&
++				(int)pdev->resource[i].start >= 0) {
++			ctlr = EDMA_CTLR(pdev->resource[i].start);
++			clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
++					edma_cc[ctlr]->edma_unused);
++		}
++	}
++
++	return 0;
++}
++
++/*-----------------------------------------------------------------------*/
++
++static bool unused_chan_list_done;
++
++/* Resource alloc/free:  dma channels, parameter RAM slots */
++
++/**
++ * edma_alloc_channel - allocate DMA channel and paired parameter RAM
++ * @channel: specific channel to allocate; negative for "any unmapped channel"
++ * @callback: optional; to be issued on DMA completion or errors
++ * @data: passed to callback
++ * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
++ *	Controller (TC) executes requests using this channel.  Use
++ *	EVENTQ_DEFAULT unless you really need a high priority queue.
++ *
++ * This allocates a DMA channel and its associated parameter RAM slot.
++ * The parameter RAM is initialized to hold a dummy transfer.
++ *
++ * Normal use is to pass a specific channel number as @channel, to make
++ * use of hardware events mapped to that channel.  When the channel will
++ * be used only for software triggering or event chaining, channels not
++ * mapped to hardware events (or mapped to unused events) are preferable.
++ *
++ * DMA transfers start from a channel using edma_start(), or by
++ * chaining.  When the transfer described in that channel's parameter RAM
++ * slot completes, that slot's data may be reloaded through a link.
++ *
++ * DMA errors are only reported to the @callback associated with the
++ * channel driving that transfer, but transfer completion callbacks can
++ * be sent to another channel under control of the TCC field in
++ * the option word of the transfer's parameter RAM set.  Drivers must not
++ * use DMA transfer completion callbacks for channels they did not allocate.
++ * (The same applies to TCC codes used in transfer chaining.)
++ *
++ * Returns the number of the channel, else negative errno.
++ */
++int edma_alloc_channel(int channel,
++		void (*callback)(unsigned channel, u16 ch_status, void *data),
++		void *data,
++		enum dma_event_q eventq_no)
++{
++	unsigned i, done = 0, ctlr = 0;
++	int ret = 0;
++
++	if (!unused_chan_list_done) {
++		/*
++		 * Scan all the platform devices to find out the EDMA channels
++		 * used and clear them in the unused list, making the rest
++		 * available for ARM usage.
++		 */
++		ret = bus_for_each_dev(&platform_bus_type, NULL, NULL,
++				prepare_unused_channel_list);
++		if (ret < 0)
++			return ret;
++
++		unused_chan_list_done = true;
++	}
++
++	if (channel >= 0) {
++		ctlr = EDMA_CTLR(channel);
++		channel = EDMA_CHAN_SLOT(channel);
++	}
++
++	if (channel < 0) {
++		for (i = 0; i < arch_num_cc; i++) {
++			channel = 0;
++			for (;;) {
++				channel = find_next_bit(edma_cc[i]->edma_unused,
++						edma_cc[i]->num_channels,
++						channel);
++				if (channel == edma_cc[i]->num_channels)
++					break;
++				if (!test_and_set_bit(channel,
++						edma_cc[i]->edma_inuse)) {
++					done = 1;
++					ctlr = i;
++					break;
++				}
++				channel++;
++			}
++			if (done)
++				break;
++		}
++		if (!done)
++			return -ENOMEM;
++	} else if (channel >= edma_cc[ctlr]->num_channels) {
++		return -EINVAL;
++	} else if (test_and_set_bit(channel, edma_cc[ctlr]->edma_inuse)) {
++		return -EBUSY;
++	}
++
++	/* ensure access through shadow region 0 */
++	edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
++
++	/* ensure no events are pending */
++	edma_stop(EDMA_CTLR_CHAN(ctlr, channel));
++	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
++			&dummy_paramset, PARM_SIZE);
++
++	if (callback)
++		setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel),
++					callback, data);
++
++	map_dmach_queue(ctlr, channel, eventq_no);
++
++	return EDMA_CTLR_CHAN(ctlr, channel);
++}
++EXPORT_SYMBOL(edma_alloc_channel);
++
++
++/**
++ * edma_free_channel - deallocate DMA channel
++ * @channel: dma channel returned from edma_alloc_channel()
++ *
++ * This deallocates the DMA channel and associated parameter RAM slot
++ * allocated by edma_alloc_channel().
++ *
++ * Callers are responsible for ensuring the channel is inactive, and
++ * will not be reactivated by linking, chaining, or software calls to
++ * edma_start().
++ */
++void edma_free_channel(unsigned channel)
++{
++	unsigned ctlr;
++
++	ctlr = EDMA_CTLR(channel);
++	channel = EDMA_CHAN_SLOT(channel);
++
++	if (channel >= edma_cc[ctlr]->num_channels)
++		return;
++
++	setup_dma_interrupt(channel, NULL, NULL);
++	/* REVISIT should probably take out of shadow region 0 */
++
++	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
++			&dummy_paramset, PARM_SIZE);
++	clear_bit(channel, edma_cc[ctlr]->edma_inuse);
++}
++EXPORT_SYMBOL(edma_free_channel);
++
++/**
++ * edma_alloc_slot - allocate DMA parameter RAM
++ * @slot: specific slot to allocate; negative for "any unused slot"
++ *
++ * This allocates a parameter RAM slot, initializing it to hold a
++ * dummy transfer.  Slots allocated using this routine have not been
++ * mapped to a hardware DMA channel, and will normally be used by
++ * linking to them from a slot associated with a DMA channel.
++ *
++ * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
++ * slots may be allocated on behalf of DSP firmware.
++ *
++ * Returns the number of the slot, else negative errno.
++ */
++int edma_alloc_slot(unsigned ctlr, int slot)
++{
++	if (!edma_cc[ctlr])
++		return -EINVAL;
++
++	if (slot >= 0)
++		slot = EDMA_CHAN_SLOT(slot);
++
++	if (slot < 0) {
++		slot = edma_cc[ctlr]->num_channels;
++		for (;;) {
++			slot = find_next_zero_bit(edma_cc[ctlr]->edma_inuse,
++					edma_cc[ctlr]->num_slots, slot);
++			if (slot == edma_cc[ctlr]->num_slots)
++				return -ENOMEM;
++			if (!test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse))
++				break;
++		}
++	} else if (slot < edma_cc[ctlr]->num_channels ||
++			slot >= edma_cc[ctlr]->num_slots) {
++		return -EINVAL;
++	} else if (test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) {
++		return -EBUSY;
++	}
++
++	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
++			&dummy_paramset, PARM_SIZE);
++
++	return EDMA_CTLR_CHAN(ctlr, slot);
++}
++EXPORT_SYMBOL(edma_alloc_slot);
++
++/**
++ * edma_free_slot - deallocate DMA parameter RAM
++ * @slot: parameter RAM slot returned from edma_alloc_slot()
++ *
++ * This deallocates the parameter RAM slot allocated by edma_alloc_slot().
++ * Callers are responsible for ensuring the slot is inactive, and will
++ * not be activated.
++ */
++void edma_free_slot(unsigned slot)
++{
++	unsigned ctlr;
++
++	ctlr = EDMA_CTLR(slot);
++	slot = EDMA_CHAN_SLOT(slot);
++
++	if (slot < edma_cc[ctlr]->num_channels ||
++		slot >= edma_cc[ctlr]->num_slots)
++		return;
++
++	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
++			&dummy_paramset, PARM_SIZE);
++	clear_bit(slot, edma_cc[ctlr]->edma_inuse);
++}
++EXPORT_SYMBOL(edma_free_slot);
++
++
++/**
++ * edma_alloc_cont_slots- alloc contiguous parameter RAM slots
++ * The API will return the starting point of a set of
++ * contiguous parameter RAM slots that have been requested
++ *
++ * @id: can only be EDMA_CONT_PARAMS_ANY or EDMA_CONT_PARAMS_FIXED_EXACT
++ * or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
++ * @count: number of contiguous Paramter RAM slots
++ * @slot  - the start value of Parameter RAM slot that should be passed if id
++ * is EDMA_CONT_PARAMS_FIXED_EXACT or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
++ *
++ * If id is EDMA_CONT_PARAMS_ANY then the API starts looking for a set of
++ * contiguous Parameter RAM slots from parameter RAM 64 in the case of
++ * DaVinci SOCs and 32 in the case of DA8xx SOCs.
++ *
++ * If id is EDMA_CONT_PARAMS_FIXED_EXACT then the API starts looking for a
++ * set of contiguous parameter RAM slots from the "slot" that is passed as an
++ * argument to the API.
++ *
++ * If id is EDMA_CONT_PARAMS_FIXED_NOT_EXACT then the API initially tries
++ * starts looking for a set of contiguous parameter RAMs from the "slot"
++ * that is passed as an argument to the API. On failure the API will try to
++ * find a set of contiguous Parameter RAM slots from the remaining Parameter
++ * RAM slots
++ */
++int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
++{
++	/*
++	 * The start slot requested should be greater than
++	 * the number of channels and lesser than the total number
++	 * of slots
++	 */
++	if ((id != EDMA_CONT_PARAMS_ANY) &&
++		(slot < edma_cc[ctlr]->num_channels ||
++		slot >= edma_cc[ctlr]->num_slots))
++		return -EINVAL;
++
++	/*
++	 * The number of parameter RAM slots requested cannot be less than 1
++	 * and cannot be more than the number of slots minus the number of
++	 * channels
++	 */
++	if (count < 1 || count >
++		(edma_cc[ctlr]->num_slots - edma_cc[ctlr]->num_channels))
++		return -EINVAL;
++
++	switch (id) {
++	case EDMA_CONT_PARAMS_ANY:
++		return reserve_contiguous_slots(ctlr, id, count,
++						 edma_cc[ctlr]->num_channels);
++	case EDMA_CONT_PARAMS_FIXED_EXACT:
++	case EDMA_CONT_PARAMS_FIXED_NOT_EXACT:
++		return reserve_contiguous_slots(ctlr, id, count, slot);
++	default:
++		return -EINVAL;
++	}
++
++}
++EXPORT_SYMBOL(edma_alloc_cont_slots);
++
++/**
++ * edma_free_cont_slots - deallocate DMA parameter RAM slots
++ * @slot: first parameter RAM of a set of parameter RAM slots to be freed
++ * @count: the number of contiguous parameter RAM slots to be freed
++ *
++ * This deallocates the parameter RAM slots allocated by
++ * edma_alloc_cont_slots.
++ * Callers/applications need to keep track of sets of contiguous
++ * parameter RAM slots that have been allocated using the edma_alloc_cont_slots
++ * API.
++ * Callers are responsible for ensuring the slots are inactive, and will
++ * not be activated.
++ */
++int edma_free_cont_slots(unsigned slot, int count)
++{
++	unsigned ctlr, slot_to_free;
++	int i;
++
++	ctlr = EDMA_CTLR(slot);
++	slot = EDMA_CHAN_SLOT(slot);
++
++	if (slot < edma_cc[ctlr]->num_channels ||
++		slot >= edma_cc[ctlr]->num_slots ||
++		count < 1)
++		return -EINVAL;
++
++	for (i = slot; i < slot + count; ++i) {
++		ctlr = EDMA_CTLR(i);
++		slot_to_free = EDMA_CHAN_SLOT(i);
++
++		memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot_to_free),
++			&dummy_paramset, PARM_SIZE);
++		clear_bit(slot_to_free, edma_cc[ctlr]->edma_inuse);
++	}
++
++	return 0;
++}
++EXPORT_SYMBOL(edma_free_cont_slots);
++
++/*-----------------------------------------------------------------------*/
++
++/* Parameter RAM operations (i) -- read/write partial slots */
++
++/**
++ * edma_set_src - set initial DMA source address in parameter RAM slot
++ * @slot: parameter RAM slot being configured
++ * @src_port: physical address of source (memory, controller FIFO, etc)
++ * @addressMode: INCR, except in very rare cases
++ * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
++ *	width to use when addressing the fifo (e.g. W8BIT, W32BIT)
++ *
++ * Note that the source address is modified during the DMA transfer
++ * according to edma_set_src_index().
++ */
++void edma_set_src(unsigned slot, dma_addr_t src_port,
++				enum address_mode mode, enum fifo_width width)
++{
++	unsigned ctlr;
++
++	ctlr = EDMA_CTLR(slot);
++	slot = EDMA_CHAN_SLOT(slot);
++
++	if (slot < edma_cc[ctlr]->num_slots) {
++		unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
++
++		if (mode) {
++			/* set SAM and program FWID */
++			i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8));
++		} else {
++			/* clear SAM */
++			i &= ~SAM;
++		}
++		edma_parm_write(ctlr, PARM_OPT, slot, i);
++
++		/* set the source port address
++		   in source register of param structure */
++		edma_parm_write(ctlr, PARM_SRC, slot, src_port);
++	}
++}
++EXPORT_SYMBOL(edma_set_src);
++
++/**
++ * edma_set_dest - set initial DMA destination address in parameter RAM slot
++ * @slot: parameter RAM slot being configured
++ * @dest_port: physical address of destination (memory, controller FIFO, etc)
++ * @addressMode: INCR, except in very rare cases
++ * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
++ *	width to use when addressing the fifo (e.g. W8BIT, W32BIT)
++ *
++ * Note that the destination address is modified during the DMA transfer
++ * according to edma_set_dest_index().
++ */
++void edma_set_dest(unsigned slot, dma_addr_t dest_port,
++				 enum address_mode mode, enum fifo_width width)
++{
++	unsigned ctlr;
++
++	ctlr = EDMA_CTLR(slot);
++	slot = EDMA_CHAN_SLOT(slot);
++
++	if (slot < edma_cc[ctlr]->num_slots) {
++		unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
++
++		if (mode) {
++			/* set DAM and program FWID */
++			i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8));
++		} else {
++			/* clear DAM */
++			i &= ~DAM;
++		}
++		edma_parm_write(ctlr, PARM_OPT, slot, i);
++		/* set the destination port address
++		   in dest register of param structure */
++		edma_parm_write(ctlr, PARM_DST, slot, dest_port);
++	}
++}
++EXPORT_SYMBOL(edma_set_dest);
++
++/**
++ * edma_get_position - returns the current transfer points
++ * @slot: parameter RAM slot being examined
++ * @src: pointer to source port position
++ * @dst: pointer to destination port position
++ *
++ * Returns current source and destination addresses for a particular
++ * parameter RAM slot.  Its channel should not be active when this is called.
++ */
++void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst)
++{
++	struct edmacc_param temp;
++	unsigned ctlr;
++
++	ctlr = EDMA_CTLR(slot);
++	slot = EDMA_CHAN_SLOT(slot);
++
++	edma_read_slot(EDMA_CTLR_CHAN(ctlr, slot), &temp);
++	if (src != NULL)
++		*src = temp.src;
++	if (dst != NULL)
++		*dst = temp.dst;
++}
++EXPORT_SYMBOL(edma_get_position);
++
++/**
++ * edma_set_src_index - configure DMA source address indexing
++ * @slot: parameter RAM slot being configured
++ * @src_bidx: byte offset between source arrays in a frame
++ * @src_cidx: byte offset between source frames in a block
++ *
++ * Offsets are specified to support either contiguous or discontiguous
++ * memory transfers, or repeated access to a hardware register, as needed.
++ * When accessing hardware registers, both offsets are normally zero.
++ */
++void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx)
++{
++	unsigned ctlr;
++
++	ctlr = EDMA_CTLR(slot);
++	slot = EDMA_CHAN_SLOT(slot);
++
++	if (slot < edma_cc[ctlr]->num_slots) {
++		edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
++				0xffff0000, src_bidx);
++		edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
++				0xffff0000, src_cidx);
++	}
++}
++EXPORT_SYMBOL(edma_set_src_index);
++
++/**
++ * edma_set_dest_index - configure DMA destination address indexing
++ * @slot: parameter RAM slot being configured
++ * @dest_bidx: byte offset between destination arrays in a frame
++ * @dest_cidx: byte offset between destination frames in a block
++ *
++ * Offsets are specified to support either contiguous or discontiguous
++ * memory transfers, or repeated access to a hardware register, as needed.
++ * When accessing hardware registers, both offsets are normally zero.
++ */
++void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx)
++{
++	unsigned ctlr;
++
++	ctlr = EDMA_CTLR(slot);
++	slot = EDMA_CHAN_SLOT(slot);
++
++	if (slot < edma_cc[ctlr]->num_slots) {
++		edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
++				0x0000ffff, dest_bidx << 16);
++		edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
++				0x0000ffff, dest_cidx << 16);
++	}
++}
++EXPORT_SYMBOL(edma_set_dest_index);
++
++/**
++ * edma_set_transfer_params - configure DMA transfer parameters
++ * @slot: parameter RAM slot being configured
++ * @acnt: how many bytes per array (at least one)
++ * @bcnt: how many arrays per frame (at least one)
++ * @ccnt: how many frames per block (at least one)
++ * @bcnt_rld: used only for A-Synchronized transfers; this specifies
++ *	the value to reload into bcnt when it decrements to zero
++ * @sync_mode: ASYNC or ABSYNC
++ *
++ * See the EDMA3 documentation to understand how to configure and link
++ * transfers using the fields in PaRAM slots.  If you are not doing it
++ * all at once with edma_write_slot(), you will use this routine
++ * plus two calls each for source and destination, setting the initial
++ * address and saying how to index that address.
++ *
++ * An example of an A-Synchronized transfer is a serial link using a
++ * single word shift register.  In that case, @acnt would be equal to
++ * that word size; the serial controller issues a DMA synchronization
++ * event to transfer each word, and memory access by the DMA transfer
++ * controller will be word-at-a-time.
++ *
++ * An example of an AB-Synchronized transfer is a device using a FIFO.
++ * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
++ * The controller with the FIFO issues DMA synchronization events when
++ * the FIFO threshold is reached, and the DMA transfer controller will
++ * transfer one frame to (or from) the FIFO.  It will probably use
++ * efficient burst modes to access memory.
++ */
++void edma_set_transfer_params(unsigned slot,
++		u16 acnt, u16 bcnt, u16 ccnt,
++		u16 bcnt_rld, enum sync_dimension sync_mode)
++{
++	unsigned ctlr;
++
++	ctlr = EDMA_CTLR(slot);
++	slot = EDMA_CHAN_SLOT(slot);
++
++	if (slot < edma_cc[ctlr]->num_slots) {
++		edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot,
++				0x0000ffff, bcnt_rld << 16);
++		if (sync_mode == ASYNC)
++			edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM);
++		else
++			edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM);
++		/* Set the acount, bcount, ccount registers */
++		edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt);
++		edma_parm_write(ctlr, PARM_CCNT, slot, ccnt);
++	}
++}
++EXPORT_SYMBOL(edma_set_transfer_params);
++
++/**
++ * edma_link - link one parameter RAM slot to another
++ * @from: parameter RAM slot originating the link
++ * @to: parameter RAM slot which is the link target
++ *
++ * The originating slot should not be part of any active DMA transfer.
++ */
++void edma_link(unsigned from, unsigned to)
++{
++	unsigned ctlr_from, ctlr_to;
++
++	ctlr_from = EDMA_CTLR(from);
++	from = EDMA_CHAN_SLOT(from);
++	ctlr_to = EDMA_CTLR(to);
++	to = EDMA_CHAN_SLOT(to);
++
++	if (from >= edma_cc[ctlr_from]->num_slots)
++		return;
++	if (to >= edma_cc[ctlr_to]->num_slots)
++		return;
++	edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000,
++				PARM_OFFSET(to));
++}
++EXPORT_SYMBOL(edma_link);
++
++/**
++ * edma_unlink - cut link from one parameter RAM slot
++ * @from: parameter RAM slot originating the link
++ *
++ * The originating slot should not be part of any active DMA transfer.
++ * Its link is set to 0xffff.
++ */
++void edma_unlink(unsigned from)
++{
++	unsigned ctlr;
++
++	ctlr = EDMA_CTLR(from);
++	from = EDMA_CHAN_SLOT(from);
++
++	if (from >= edma_cc[ctlr]->num_slots)
++		return;
++	edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff);
++}
++EXPORT_SYMBOL(edma_unlink);
++
++/*-----------------------------------------------------------------------*/
++
++/* Parameter RAM operations (ii) -- read/write whole parameter sets */
++
++/**
++ * edma_write_slot - write parameter RAM data for slot
++ * @slot: number of parameter RAM slot being modified
++ * @param: data to be written into parameter RAM slot
++ *
++ * Use this to assign all parameters of a transfer at once.  This
++ * allows more efficient setup of transfers than issuing multiple
++ * calls to set up those parameters in small pieces, and provides
++ * complete control over all transfer options.
++ */
++void edma_write_slot(unsigned slot, const struct edmacc_param *param)
++{
++	unsigned ctlr;
++
++	ctlr = EDMA_CTLR(slot);
++	slot = EDMA_CHAN_SLOT(slot);
++
++	if (slot >= edma_cc[ctlr]->num_slots)
++		return;
++	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param,
++			PARM_SIZE);
++}
++EXPORT_SYMBOL(edma_write_slot);
++
++/**
++ * edma_read_slot - read parameter RAM data from slot
++ * @slot: number of parameter RAM slot being copied
++ * @param: where to store copy of parameter RAM data
++ *
++ * Use this to read data from a parameter RAM slot, perhaps to
++ * save them as a template for later reuse.
++ */
++void edma_read_slot(unsigned slot, struct edmacc_param *param)
++{
++	unsigned ctlr;
++
++	ctlr = EDMA_CTLR(slot);
++	slot = EDMA_CHAN_SLOT(slot);
++
++	if (slot >= edma_cc[ctlr]->num_slots)
++		return;
++	memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
++			PARM_SIZE);
++}
++EXPORT_SYMBOL(edma_read_slot);
++
++/*-----------------------------------------------------------------------*/
++
++/* Various EDMA channel control operations */
++
++/**
++ * edma_pause - pause dma on a channel
++ * @channel: on which edma_start() has been called
++ *
++ * This temporarily disables EDMA hardware events on the specified channel,
++ * preventing them from triggering new transfers on its behalf
++ */
++void edma_pause(unsigned channel)
++{
++	unsigned ctlr;
++
++	ctlr = EDMA_CTLR(channel);
++	channel = EDMA_CHAN_SLOT(channel);
++
++	if (channel < edma_cc[ctlr]->num_channels) {
++		unsigned int mask = BIT(channel & 0x1f);
++
++		edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask);
++	}
++}
++EXPORT_SYMBOL(edma_pause);
++
++/**
++ * edma_resume - resumes dma on a paused channel
++ * @channel: on which edma_pause() has been called
++ *
++ * This re-enables EDMA hardware events on the specified channel.
++ */
++void edma_resume(unsigned channel)
++{
++	unsigned ctlr;
++
++	ctlr = EDMA_CTLR(channel);
++	channel = EDMA_CHAN_SLOT(channel);
++
++	if (channel < edma_cc[ctlr]->num_channels) {
++		unsigned int mask = BIT(channel & 0x1f);
++
++		edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask);
++	}
++}
++EXPORT_SYMBOL(edma_resume);
++
++/**
++ * edma_start - start dma on a channel
++ * @channel: channel being activated
++ *
++ * Channels with event associations will be triggered by their hardware
++ * events, and channels without such associations will be triggered by
++ * software.  (At this writing there is no interface for using software
++ * triggers except with channels that don't support hardware triggers.)
++ *
++ * Returns zero on success, else negative errno.
++ */
++int edma_start(unsigned channel)
++{
++	unsigned ctlr;
++
++	ctlr = EDMA_CTLR(channel);
++	channel = EDMA_CHAN_SLOT(channel);
++
++	if (channel < edma_cc[ctlr]->num_channels) {
++		int j = channel >> 5;
++		unsigned int mask = BIT(channel & 0x1f);
++
++		/* EDMA channels without event association */
++		if (test_bit(channel, edma_cc[ctlr]->edma_unused)) {
++			pr_debug("EDMA: ESR%d %08x\n", j,
++				edma_shadow0_read_array(ctlr, SH_ESR, j));
++			edma_shadow0_write_array(ctlr, SH_ESR, j, mask);
++			return 0;
++		}
++
++		/* EDMA channel with event association */
++		pr_debug("EDMA: ER%d %08x\n", j,
++			edma_shadow0_read_array(ctlr, SH_ER, j));
++		/* Clear any pending event or error */
++		edma_write_array(ctlr, EDMA_ECR, j, mask);
++		edma_write_array(ctlr, EDMA_EMCR, j, mask);
++		/* Clear any SER */
++		edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
++		edma_shadow0_write_array(ctlr, SH_EESR, j, mask);
++		pr_debug("EDMA: EER%d %08x\n", j,
++			edma_shadow0_read_array(ctlr, SH_EER, j));
++		return 0;
++	}
++
++	return -EINVAL;
++}
++EXPORT_SYMBOL(edma_start);
++
++/**
++ * edma_stop - stops dma on the channel passed
++ * @channel: channel being deactivated
++ *
++ * When @lch is a channel, any active transfer is paused and
++ * all pending hardware events are cleared.  The current transfer
++ * may not be resumed, and the channel's Parameter RAM should be
++ * reinitialized before being reused.
++ */
++void edma_stop(unsigned channel)
++{
++	unsigned ctlr;
++
++	ctlr = EDMA_CTLR(channel);
++	channel = EDMA_CHAN_SLOT(channel);
++
++	if (channel < edma_cc[ctlr]->num_channels) {
++		int j = channel >> 5;
++		unsigned int mask = BIT(channel & 0x1f);
++
++		edma_shadow0_write_array(ctlr, SH_EECR, j, mask);
++		edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
++		edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
++		edma_write_array(ctlr, EDMA_EMCR, j, mask);
++
++		pr_debug("EDMA: EER%d %08x\n", j,
++				edma_shadow0_read_array(ctlr, SH_EER, j));
++
++		/* REVISIT:  consider guarding against inappropriate event
++		 * chaining by overwriting with dummy_paramset.
++		 */
++	}
++}
++EXPORT_SYMBOL(edma_stop);
++
++/******************************************************************************
++ *
++ * It cleans ParamEntry qand bring back EDMA to initial state if media has
++ * been removed before EDMA has finished.It is usedful for removable media.
++ * Arguments:
++ *      ch_no     - channel no
++ *
++ * Return: zero on success, or corresponding error no on failure
++ *
++ * FIXME this should not be needed ... edma_stop() should suffice.
++ *
++ *****************************************************************************/
++
++void edma_clean_channel(unsigned channel)
++{
++	unsigned ctlr;
++
++	ctlr = EDMA_CTLR(channel);
++	channel = EDMA_CHAN_SLOT(channel);
++
++	if (channel < edma_cc[ctlr]->num_channels) {
++		int j = (channel >> 5);
++		unsigned int mask = BIT(channel & 0x1f);
++
++		pr_debug("EDMA: EMR%d %08x\n", j,
++				edma_read_array(ctlr, EDMA_EMR, j));
++		edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
++		/* Clear the corresponding EMR bits */
++		edma_write_array(ctlr, EDMA_EMCR, j, mask);
++		/* Clear any SER */
++		edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
++		edma_write(ctlr, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
++	}
++}
++EXPORT_SYMBOL(edma_clean_channel);
++
++/*
++ * edma_clear_event - clear an outstanding event on the DMA channel
++ * Arguments:
++ *	channel - channel number
++ */
++void edma_clear_event(unsigned channel)
++{
++	unsigned ctlr;
++
++	ctlr = EDMA_CTLR(channel);
++	channel = EDMA_CHAN_SLOT(channel);
++
++	if (channel >= edma_cc[ctlr]->num_channels)
++		return;
++	if (channel < 32)
++		edma_write(ctlr, EDMA_ECR, BIT(channel));
++	else
++		edma_write(ctlr, EDMA_ECRH, BIT(channel - 32));
++}
++EXPORT_SYMBOL(edma_clear_event);
++
++/*-----------------------------------------------------------------------*/
++
++static int __init edma_probe(struct platform_device *pdev)
++{
++	struct edma_soc_info	**info = pdev->dev.platform_data;
++	const s8		(*queue_priority_mapping)[2];
++	const s8		(*queue_tc_mapping)[2];
++	int			i, j, off, ln, found = 0;
++	int			status = -1;
++	const s16		(*rsv_chans)[2];
++	const s16		(*rsv_slots)[2];
++	int			irq[EDMA_MAX_CC] = {0, 0};
++	int			err_irq[EDMA_MAX_CC] = {0, 0};
++	struct resource		*r[EDMA_MAX_CC] = {NULL};
++	resource_size_t		len[EDMA_MAX_CC];
++	char			res_name[10];
++	char			irq_name[10];
++
++	if (!info)
++		return -ENODEV;
++
++	for (j = 0; j < EDMA_MAX_CC; j++) {
++		sprintf(res_name, "edma_cc%d", j);
++		r[j] = platform_get_resource_byname(pdev, IORESOURCE_MEM,
++						res_name);
++		if (!r[j] || !info[j]) {
++			if (found)
++				break;
++			else
++				return -ENODEV;
++		} else {
++			found = 1;
++		}
++
++		len[j] = resource_size(r[j]);
++
++		r[j] = request_mem_region(r[j]->start, len[j],
++			dev_name(&pdev->dev));
++		if (!r[j]) {
++			status = -EBUSY;
++			goto fail1;
++		}
++
++		edmacc_regs_base[j] = ioremap(r[j]->start, len[j]);
++		if (!edmacc_regs_base[j]) {
++			status = -EBUSY;
++			goto fail1;
++		}
++
++		edma_cc[j] = kzalloc(sizeof(struct edma), GFP_KERNEL);
++		if (!edma_cc[j]) {
++			status = -ENOMEM;
++			goto fail1;
++		}
++
++		edma_cc[j]->num_channels = min_t(unsigned, info[j]->n_channel,
++							EDMA_MAX_DMACH);
++		edma_cc[j]->num_slots = min_t(unsigned, info[j]->n_slot,
++							EDMA_MAX_PARAMENTRY);
++		edma_cc[j]->num_cc = min_t(unsigned, info[j]->n_cc,
++							EDMA_MAX_CC);
++
++		edma_cc[j]->default_queue = info[j]->default_queue;
++
++		dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n",
++			edmacc_regs_base[j]);
++
++		for (i = 0; i < edma_cc[j]->num_slots; i++)
++			memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i),
++					&dummy_paramset, PARM_SIZE);
++
++		/* Mark all channels as unused */
++		memset(edma_cc[j]->edma_unused, 0xff,
++			sizeof(edma_cc[j]->edma_unused));
++
++		if (info[j]->rsv) {
++
++			/* Clear the reserved channels in unused list */
++			rsv_chans = info[j]->rsv->rsv_chans;
++			if (rsv_chans) {
++				for (i = 0; rsv_chans[i][0] != -1; i++) {
++					off = rsv_chans[i][0];
++					ln = rsv_chans[i][1];
++					clear_bits(off, ln,
++						edma_cc[j]->edma_unused);
++				}
++			}
++
++			/* Set the reserved slots in inuse list */
++			rsv_slots = info[j]->rsv->rsv_slots;
++			if (rsv_slots) {
++				for (i = 0; rsv_slots[i][0] != -1; i++) {
++					off = rsv_slots[i][0];
++					ln = rsv_slots[i][1];
++					set_bits(off, ln,
++						edma_cc[j]->edma_inuse);
++				}
++			}
++		}
++
++		sprintf(irq_name, "edma%d", j);
++		irq[j] = platform_get_irq_byname(pdev, irq_name);
++		edma_cc[j]->irq_res_start = irq[j];
++		status = request_irq(irq[j], dma_irq_handler, 0, "edma",
++					&pdev->dev);
++		if (status < 0) {
++			dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
++				irq[j], status);
++			goto fail;
++		}
++
++		sprintf(irq_name, "edma%d_err", j);
++		err_irq[j] = platform_get_irq_byname(pdev, irq_name);
++		edma_cc[j]->irq_res_end = err_irq[j];
++		status = request_irq(err_irq[j], dma_ccerr_handler, 0,
++					"edma_error", &pdev->dev);
++		if (status < 0) {
++			dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
++				err_irq[j], status);
++			goto fail;
++		}
++
++		for (i = 0; i < edma_cc[j]->num_channels; i++)
++			map_dmach_queue(j, i, info[j]->default_queue);
++
++		queue_tc_mapping = info[j]->queue_tc_mapping;
++		queue_priority_mapping = info[j]->queue_priority_mapping;
++
++		/* Event queue to TC mapping */
++		for (i = 0; queue_tc_mapping[i][0] != -1; i++)
++			map_queue_tc(j, queue_tc_mapping[i][0],
++					queue_tc_mapping[i][1]);
++
++		/* Event queue priority mapping */
++		for (i = 0; queue_priority_mapping[i][0] != -1; i++)
++			assign_priority_to_queue(j,
++						queue_priority_mapping[i][0],
++						queue_priority_mapping[i][1]);
++
++		/* Map the channel to param entry if channel mapping logic
++		 * exist
++		 */
++		if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
++			map_dmach_param(j);
++
++		for (i = 0; i < info[j]->n_region; i++) {
++			edma_write_array2(j, EDMA_DRAE, i, 0, 0x0);
++			edma_write_array2(j, EDMA_DRAE, i, 1, 0x0);
++			edma_write_array(j, EDMA_QRAE, i, 0x0);
++		}
++		arch_num_cc++;
++	}
++
++	if (tc_errs_handled) {
++		status = request_irq(IRQ_TCERRINT0, dma_tc0err_handler, 0,
++					"edma_tc0", &pdev->dev);
++		if (status < 0) {
++			dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
++				IRQ_TCERRINT0, status);
++			return status;
++		}
++		status = request_irq(IRQ_TCERRINT, dma_tc1err_handler, 0,
++					"edma_tc1", &pdev->dev);
++		if (status < 0) {
++			dev_dbg(&pdev->dev, "request_irq %d --> %d\n",
++				IRQ_TCERRINT, status);
++			return status;
++		}
++	}
++
++	return 0;
++
++fail:
++	for (i = 0; i < EDMA_MAX_CC; i++) {
++		if (err_irq[i])
++			free_irq(err_irq[i], &pdev->dev);
++		if (irq[i])
++			free_irq(irq[i], &pdev->dev);
++	}
++fail1:
++	for (i = 0; i < EDMA_MAX_CC; i++) {
++		if (r[i])
++			release_mem_region(r[i]->start, len[i]);
++		if (edmacc_regs_base[i])
++			iounmap(edmacc_regs_base[i]);
++		kfree(edma_cc[i]);
++	}
++	return status;
++}
++
++
++static struct platform_driver edma_driver = {
++	.driver.name	= "edma",
++};
++
++static int __init edma_init(void)
++{
++	return platform_driver_probe(&edma_driver, edma_probe);
++}
++arch_initcall(edma_init);
+diff --git a/arch/arm/mach-davinci/Makefile b/arch/arm/mach-davinci/Makefile
+index fb5c1aa..493a36b 100644
+--- a/arch/arm/mach-davinci/Makefile
++++ b/arch/arm/mach-davinci/Makefile
+@@ -5,7 +5,7 @@
+ 
+ # Common objects
+ obj-y 			:= time.o clock.o serial.o psc.o \
+-			   dma.o usb.o common.o sram.o aemif.o
++			   usb.o common.o sram.o aemif.o
+ 
+ obj-$(CONFIG_DAVINCI_MUX)		+= mux.o
+ 
+diff --git a/arch/arm/mach-davinci/board-tnetv107x-evm.c b/arch/arm/mach-davinci/board-tnetv107x-evm.c
+index 4f41602..10c9efd 100644
+--- a/arch/arm/mach-davinci/board-tnetv107x-evm.c
++++ b/arch/arm/mach-davinci/board-tnetv107x-evm.c
+@@ -26,12 +26,12 @@
+ #include <linux/input.h>
+ #include <linux/input/matrix_keypad.h>
+ #include <linux/spi/spi.h>
++#include <linux/platform_data/edma.h>
+ 
+ #include <asm/mach/arch.h>
+ #include <asm/mach-types.h>
+ 
+ #include <mach/irqs.h>
+-#include <mach/edma.h>
+ #include <mach/mux.h>
+ #include <mach/cp_intc.h>
+ #include <mach/tnetv107x.h>
+diff --git a/arch/arm/mach-davinci/davinci.h b/arch/arm/mach-davinci/davinci.h
+index 12d544b..d26a6bc 100644
+--- a/arch/arm/mach-davinci/davinci.h
++++ b/arch/arm/mach-davinci/davinci.h
+@@ -23,9 +23,9 @@
+ #include <linux/platform_device.h>
+ #include <linux/spi/spi.h>
+ #include <linux/platform_data/davinci_asp.h>
++#include <linux/platform_data/edma.h>
+ #include <linux/platform_data/keyscan-davinci.h>
+ #include <mach/hardware.h>
+-#include <mach/edma.h>
+ 
+ #include <media/davinci/vpfe_capture.h>
+ #include <media/davinci/vpif_types.h>
+diff --git a/arch/arm/mach-davinci/devices-tnetv107x.c b/arch/arm/mach-davinci/devices-tnetv107x.c
+index 773ab07..ba37760 100644
+--- a/arch/arm/mach-davinci/devices-tnetv107x.c
++++ b/arch/arm/mach-davinci/devices-tnetv107x.c
+@@ -18,10 +18,10 @@
+ #include <linux/dma-mapping.h>
+ #include <linux/clk.h>
+ #include <linux/slab.h>
++#include <linux/platform_data/edma.h>
+ 
+ #include <mach/common.h>
+ #include <mach/irqs.h>
+-#include <mach/edma.h>
+ #include <mach/tnetv107x.h>
+ 
+ #include "clock.h"
+diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c
+index 4c48a36..ca0c7b3 100644
+--- a/arch/arm/mach-davinci/devices.c
++++ b/arch/arm/mach-davinci/devices.c
+@@ -19,9 +19,10 @@
+ #include <mach/irqs.h>
+ #include <mach/cputype.h>
+ #include <mach/mux.h>
+-#include <mach/edma.h>
+ #include <linux/platform_data/mmc-davinci.h>
+ #include <mach/time.h>
++#include <linux/platform_data/edma.h>
++
+ 
+ #include "davinci.h"
+ #include "clock.h"
+@@ -34,6 +35,9 @@
+ #define DM365_MMCSD0_BASE	     0x01D11000
+ #define DM365_MMCSD1_BASE	     0x01D00000
+ 
++#define DAVINCI_DMA_MMCRXEVT	26
++#define DAVINCI_DMA_MMCTXEVT	27
++
+ void __iomem  *davinci_sysmod_base;
+ 
+ void davinci_map_sysmod(void)
+diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
+index b49c3b7..53998d8 100644
+--- a/arch/arm/mach-davinci/dm355.c
++++ b/arch/arm/mach-davinci/dm355.c
+@@ -19,7 +19,6 @@
+ #include <asm/mach/map.h>
+ 
+ #include <mach/cputype.h>
+-#include <mach/edma.h>
+ #include <mach/psc.h>
+ #include <mach/mux.h>
+ #include <mach/irqs.h>
+@@ -28,6 +27,7 @@
+ #include <mach/common.h>
+ #include <linux/platform_data/spi-davinci.h>
+ #include <mach/gpio-davinci.h>
++#include <linux/platform_data/edma.h>
+ 
+ #include "davinci.h"
+ #include "clock.h"
+diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
+index 6c39805..9b41d33 100644
+--- a/arch/arm/mach-davinci/dm365.c
++++ b/arch/arm/mach-davinci/dm365.c
+@@ -18,11 +18,11 @@
+ #include <linux/platform_device.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/spi/spi.h>
++#include <linux/platform_data/edma.h>
+ 
+ #include <asm/mach/map.h>
+ 
+ #include <mach/cputype.h>
+-#include <mach/edma.h>
+ #include <mach/psc.h>
+ #include <mach/mux.h>
+ #include <mach/irqs.h>
+diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
+index db1dd92..e8bf21f 100644
+--- a/arch/arm/mach-davinci/dm644x.c
++++ b/arch/arm/mach-davinci/dm644x.c
+@@ -12,11 +12,11 @@
+ #include <linux/clk.h>
+ #include <linux/serial_8250.h>
+ #include <linux/platform_device.h>
++#include <linux/platform_data/edma.h>
+ 
+ #include <asm/mach/map.h>
+ 
+ #include <mach/cputype.h>
+-#include <mach/edma.h>
+ #include <mach/irqs.h>
+ #include <mach/psc.h>
+ #include <mach/mux.h>
+diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
+index ac7b431..6d52a32 100644
+--- a/arch/arm/mach-davinci/dm646x.c
++++ b/arch/arm/mach-davinci/dm646x.c
+@@ -13,11 +13,11 @@
+ #include <linux/clk.h>
+ #include <linux/serial_8250.h>
+ #include <linux/platform_device.h>
++#include <linux/platform_data/edma.h>
+ 
+ #include <asm/mach/map.h>
+ 
+ #include <mach/cputype.h>
+-#include <mach/edma.h>
+ #include <mach/irqs.h>
+ #include <mach/psc.h>
+ #include <mach/mux.h>
+diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c
+deleted file mode 100644
+index 45b7c71..0000000
+--- a/arch/arm/mach-davinci/dma.c
++++ /dev/null
+@@ -1,1591 +0,0 @@
+-/*
+- * EDMA3 support for DaVinci
+- *
+- * Copyright (C) 2006-2009 Texas Instruments.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+- */
+-#include <linux/kernel.h>
+-#include <linux/init.h>
+-#include <linux/module.h>
+-#include <linux/interrupt.h>
+-#include <linux/platform_device.h>
+-#include <linux/io.h>
+-#include <linux/slab.h>
+-
+-#include <mach/edma.h>
+-
+-/* Offsets matching "struct edmacc_param" */
+-#define PARM_OPT		0x00
+-#define PARM_SRC		0x04
+-#define PARM_A_B_CNT		0x08
+-#define PARM_DST		0x0c
+-#define PARM_SRC_DST_BIDX	0x10
+-#define PARM_LINK_BCNTRLD	0x14
+-#define PARM_SRC_DST_CIDX	0x18
+-#define PARM_CCNT		0x1c
+-
+-#define PARM_SIZE		0x20
+-
+-/* Offsets for EDMA CC global channel registers and their shadows */
+-#define SH_ER		0x00	/* 64 bits */
+-#define SH_ECR		0x08	/* 64 bits */
+-#define SH_ESR		0x10	/* 64 bits */
+-#define SH_CER		0x18	/* 64 bits */
+-#define SH_EER		0x20	/* 64 bits */
+-#define SH_EECR		0x28	/* 64 bits */
+-#define SH_EESR		0x30	/* 64 bits */
+-#define SH_SER		0x38	/* 64 bits */
+-#define SH_SECR		0x40	/* 64 bits */
+-#define SH_IER		0x50	/* 64 bits */
+-#define SH_IECR		0x58	/* 64 bits */
+-#define SH_IESR		0x60	/* 64 bits */
+-#define SH_IPR		0x68	/* 64 bits */
+-#define SH_ICR		0x70	/* 64 bits */
+-#define SH_IEVAL	0x78
+-#define SH_QER		0x80
+-#define SH_QEER		0x84
+-#define SH_QEECR	0x88
+-#define SH_QEESR	0x8c
+-#define SH_QSER		0x90
+-#define SH_QSECR	0x94
+-#define SH_SIZE		0x200
+-
+-/* Offsets for EDMA CC global registers */
+-#define EDMA_REV	0x0000
+-#define EDMA_CCCFG	0x0004
+-#define EDMA_QCHMAP	0x0200	/* 8 registers */
+-#define EDMA_DMAQNUM	0x0240	/* 8 registers (4 on OMAP-L1xx) */
+-#define EDMA_QDMAQNUM	0x0260
+-#define EDMA_QUETCMAP	0x0280
+-#define EDMA_QUEPRI	0x0284
+-#define EDMA_EMR	0x0300	/* 64 bits */
+-#define EDMA_EMCR	0x0308	/* 64 bits */
+-#define EDMA_QEMR	0x0310
+-#define EDMA_QEMCR	0x0314
+-#define EDMA_CCERR	0x0318
+-#define EDMA_CCERRCLR	0x031c
+-#define EDMA_EEVAL	0x0320
+-#define EDMA_DRAE	0x0340	/* 4 x 64 bits*/
+-#define EDMA_QRAE	0x0380	/* 4 registers */
+-#define EDMA_QUEEVTENTRY	0x0400	/* 2 x 16 registers */
+-#define EDMA_QSTAT	0x0600	/* 2 registers */
+-#define EDMA_QWMTHRA	0x0620
+-#define EDMA_QWMTHRB	0x0624
+-#define EDMA_CCSTAT	0x0640
+-
+-#define EDMA_M		0x1000	/* global channel registers */
+-#define EDMA_ECR	0x1008
+-#define EDMA_ECRH	0x100C
+-#define EDMA_SHADOW0	0x2000	/* 4 regions shadowing global channels */
+-#define EDMA_PARM	0x4000	/* 128 param entries */
+-
+-#define PARM_OFFSET(param_no)	(EDMA_PARM + ((param_no) << 5))
+-
+-#define EDMA_DCHMAP	0x0100  /* 64 registers */
+-#define CHMAP_EXIST	BIT(24)
+-
+-#define EDMA_MAX_DMACH           64
+-#define EDMA_MAX_PARAMENTRY     512
+-
+-/*****************************************************************************/
+-
+-static void __iomem *edmacc_regs_base[EDMA_MAX_CC];
+-
+-static inline unsigned int edma_read(unsigned ctlr, int offset)
+-{
+-	return (unsigned int)__raw_readl(edmacc_regs_base[ctlr] + offset);
+-}
+-
+-static inline void edma_write(unsigned ctlr, int offset, int val)
+-{
+-	__raw_writel(val, edmacc_regs_base[ctlr] + offset);
+-}
+-static inline void edma_modify(unsigned ctlr, int offset, unsigned and,
+-		unsigned or)
+-{
+-	unsigned val = edma_read(ctlr, offset);
+-	val &= and;
+-	val |= or;
+-	edma_write(ctlr, offset, val);
+-}
+-static inline void edma_and(unsigned ctlr, int offset, unsigned and)
+-{
+-	unsigned val = edma_read(ctlr, offset);
+-	val &= and;
+-	edma_write(ctlr, offset, val);
+-}
+-static inline void edma_or(unsigned ctlr, int offset, unsigned or)
+-{
+-	unsigned val = edma_read(ctlr, offset);
+-	val |= or;
+-	edma_write(ctlr, offset, val);
+-}
+-static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i)
+-{
+-	return edma_read(ctlr, offset + (i << 2));
+-}
+-static inline void edma_write_array(unsigned ctlr, int offset, int i,
+-		unsigned val)
+-{
+-	edma_write(ctlr, offset + (i << 2), val);
+-}
+-static inline void edma_modify_array(unsigned ctlr, int offset, int i,
+-		unsigned and, unsigned or)
+-{
+-	edma_modify(ctlr, offset + (i << 2), and, or);
+-}
+-static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or)
+-{
+-	edma_or(ctlr, offset + (i << 2), or);
+-}
+-static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j,
+-		unsigned or)
+-{
+-	edma_or(ctlr, offset + ((i*2 + j) << 2), or);
+-}
+-static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j,
+-		unsigned val)
+-{
+-	edma_write(ctlr, offset + ((i*2 + j) << 2), val);
+-}
+-static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset)
+-{
+-	return edma_read(ctlr, EDMA_SHADOW0 + offset);
+-}
+-static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset,
+-		int i)
+-{
+-	return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2));
+-}
+-static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val)
+-{
+-	edma_write(ctlr, EDMA_SHADOW0 + offset, val);
+-}
+-static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i,
+-		unsigned val)
+-{
+-	edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val);
+-}
+-static inline unsigned int edma_parm_read(unsigned ctlr, int offset,
+-		int param_no)
+-{
+-	return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5));
+-}
+-static inline void edma_parm_write(unsigned ctlr, int offset, int param_no,
+-		unsigned val)
+-{
+-	edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val);
+-}
+-static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no,
+-		unsigned and, unsigned or)
+-{
+-	edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or);
+-}
+-static inline void edma_parm_and(unsigned ctlr, int offset, int param_no,
+-		unsigned and)
+-{
+-	edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and);
+-}
+-static inline void edma_parm_or(unsigned ctlr, int offset, int param_no,
+-		unsigned or)
+-{
+-	edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or);
+-}
+-
+-static inline void set_bits(int offset, int len, unsigned long *p)
+-{
+-	for (; len > 0; len--)
+-		set_bit(offset + (len - 1), p);
+-}
+-
+-static inline void clear_bits(int offset, int len, unsigned long *p)
+-{
+-	for (; len > 0; len--)
+-		clear_bit(offset + (len - 1), p);
+-}
+-
+-/*****************************************************************************/
+-
+-/* actual number of DMA channels and slots on this silicon */
+-struct edma {
+-	/* how many dma resources of each type */
+-	unsigned	num_channels;
+-	unsigned	num_region;
+-	unsigned	num_slots;
+-	unsigned	num_tc;
+-	unsigned	num_cc;
+-	enum dma_event_q 	default_queue;
+-
+-	/* list of channels with no even trigger; terminated by "-1" */
+-	const s8	*noevent;
+-
+-	/* The edma_inuse bit for each PaRAM slot is clear unless the
+-	 * channel is in use ... by ARM or DSP, for QDMA, or whatever.
+-	 */
+-	DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY);
+-
+-	/* The edma_unused bit for each channel is clear unless
+-	 * it is not being used on this platform. It uses a bit
+-	 * of SOC-specific initialization code.
+-	 */
+-	DECLARE_BITMAP(edma_unused, EDMA_MAX_DMACH);
+-
+-	unsigned	irq_res_start;
+-	unsigned	irq_res_end;
+-
+-	struct dma_interrupt_data {
+-		void (*callback)(unsigned channel, unsigned short ch_status,
+-				void *data);
+-		void *data;
+-	} intr_data[EDMA_MAX_DMACH];
+-};
+-
+-static struct edma *edma_cc[EDMA_MAX_CC];
+-static int arch_num_cc;
+-
+-/* dummy param set used to (re)initialize parameter RAM slots */
+-static const struct edmacc_param dummy_paramset = {
+-	.link_bcntrld = 0xffff,
+-	.ccnt = 1,
+-};
+-
+-/*****************************************************************************/
+-
+-static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
+-		enum dma_event_q queue_no)
+-{
+-	int bit = (ch_no & 0x7) * 4;
+-
+-	/* default to low priority queue */
+-	if (queue_no == EVENTQ_DEFAULT)
+-		queue_no = edma_cc[ctlr]->default_queue;
+-
+-	queue_no &= 7;
+-	edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3),
+-			~(0x7 << bit), queue_no << bit);
+-}
+-
+-static void __init map_queue_tc(unsigned ctlr, int queue_no, int tc_no)
+-{
+-	int bit = queue_no * 4;
+-	edma_modify(ctlr, EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit));
+-}
+-
+-static void __init assign_priority_to_queue(unsigned ctlr, int queue_no,
+-		int priority)
+-{
+-	int bit = queue_no * 4;
+-	edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit),
+-			((priority & 0x7) << bit));
+-}
+-
+-/**
+- * map_dmach_param - Maps channel number to param entry number
+- *
+- * This maps the dma channel number to param entry numberter. In
+- * other words using the DMA channel mapping registers a param entry
+- * can be mapped to any channel
+- *
+- * Callers are responsible for ensuring the channel mapping logic is
+- * included in that particular EDMA variant (Eg : dm646x)
+- *
+- */
+-static void __init map_dmach_param(unsigned ctlr)
+-{
+-	int i;
+-	for (i = 0; i < EDMA_MAX_DMACH; i++)
+-		edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5));
+-}
+-
+-static inline void
+-setup_dma_interrupt(unsigned lch,
+-	void (*callback)(unsigned channel, u16 ch_status, void *data),
+-	void *data)
+-{
+-	unsigned ctlr;
+-
+-	ctlr = EDMA_CTLR(lch);
+-	lch = EDMA_CHAN_SLOT(lch);
+-
+-	if (!callback)
+-		edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5,
+-				BIT(lch & 0x1f));
+-
+-	edma_cc[ctlr]->intr_data[lch].callback = callback;
+-	edma_cc[ctlr]->intr_data[lch].data = data;
+-
+-	if (callback) {
+-		edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5,
+-				BIT(lch & 0x1f));
+-		edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5,
+-				BIT(lch & 0x1f));
+-	}
+-}
+-
+-static int irq2ctlr(int irq)
+-{
+-	if (irq >= edma_cc[0]->irq_res_start && irq <= edma_cc[0]->irq_res_end)
+-		return 0;
+-	else if (irq >= edma_cc[1]->irq_res_start &&
+-		irq <= edma_cc[1]->irq_res_end)
+-		return 1;
+-
+-	return -1;
+-}
+-
+-/******************************************************************************
+- *
+- * DMA interrupt handler
+- *
+- *****************************************************************************/
+-static irqreturn_t dma_irq_handler(int irq, void *data)
+-{
+-	int ctlr;
+-	u32 sh_ier;
+-	u32 sh_ipr;
+-	u32 bank;
+-
+-	ctlr = irq2ctlr(irq);
+-	if (ctlr < 0)
+-		return IRQ_NONE;
+-
+-	dev_dbg(data, "dma_irq_handler\n");
+-
+-	sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 0);
+-	if (!sh_ipr) {
+-		sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 1);
+-		if (!sh_ipr)
+-			return IRQ_NONE;
+-		sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 1);
+-		bank = 1;
+-	} else {
+-		sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 0);
+-		bank = 0;
+-	}
+-
+-	do {
+-		u32 slot;
+-		u32 channel;
+-
+-		dev_dbg(data, "IPR%d %08x\n", bank, sh_ipr);
+-
+-		slot = __ffs(sh_ipr);
+-		sh_ipr &= ~(BIT(slot));
+-
+-		if (sh_ier & BIT(slot)) {
+-			channel = (bank << 5) | slot;
+-			/* Clear the corresponding IPR bits */
+-			edma_shadow0_write_array(ctlr, SH_ICR, bank,
+-					BIT(slot));
+-			if (edma_cc[ctlr]->intr_data[channel].callback)
+-				edma_cc[ctlr]->intr_data[channel].callback(
+-					channel, DMA_COMPLETE,
+-					edma_cc[ctlr]->intr_data[channel].data);
+-		}
+-	} while (sh_ipr);
+-
+-	edma_shadow0_write(ctlr, SH_IEVAL, 1);
+-	return IRQ_HANDLED;
+-}
+-
+-/******************************************************************************
+- *
+- * DMA error interrupt handler
+- *
+- *****************************************************************************/
+-static irqreturn_t dma_ccerr_handler(int irq, void *data)
+-{
+-	int i;
+-	int ctlr;
+-	unsigned int cnt = 0;
+-
+-	ctlr = irq2ctlr(irq);
+-	if (ctlr < 0)
+-		return IRQ_NONE;
+-
+-	dev_dbg(data, "dma_ccerr_handler\n");
+-
+-	if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
+-	    (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
+-	    (edma_read(ctlr, EDMA_QEMR) == 0) &&
+-	    (edma_read(ctlr, EDMA_CCERR) == 0))
+-		return IRQ_NONE;
+-
+-	while (1) {
+-		int j = -1;
+-		if (edma_read_array(ctlr, EDMA_EMR, 0))
+-			j = 0;
+-		else if (edma_read_array(ctlr, EDMA_EMR, 1))
+-			j = 1;
+-		if (j >= 0) {
+-			dev_dbg(data, "EMR%d %08x\n", j,
+-					edma_read_array(ctlr, EDMA_EMR, j));
+-			for (i = 0; i < 32; i++) {
+-				int k = (j << 5) + i;
+-				if (edma_read_array(ctlr, EDMA_EMR, j) &
+-							BIT(i)) {
+-					/* Clear the corresponding EMR bits */
+-					edma_write_array(ctlr, EDMA_EMCR, j,
+-							BIT(i));
+-					/* Clear any SER */
+-					edma_shadow0_write_array(ctlr, SH_SECR,
+-								j, BIT(i));
+-					if (edma_cc[ctlr]->intr_data[k].
+-								callback) {
+-						edma_cc[ctlr]->intr_data[k].
+-						callback(k,
+-						DMA_CC_ERROR,
+-						edma_cc[ctlr]->intr_data
+-						[k].data);
+-					}
+-				}
+-			}
+-		} else if (edma_read(ctlr, EDMA_QEMR)) {
+-			dev_dbg(data, "QEMR %02x\n",
+-				edma_read(ctlr, EDMA_QEMR));
+-			for (i = 0; i < 8; i++) {
+-				if (edma_read(ctlr, EDMA_QEMR) & BIT(i)) {
+-					/* Clear the corresponding IPR bits */
+-					edma_write(ctlr, EDMA_QEMCR, BIT(i));
+-					edma_shadow0_write(ctlr, SH_QSECR,
+-								BIT(i));
+-
+-					/* NOTE:  not reported!! */
+-				}
+-			}
+-		} else if (edma_read(ctlr, EDMA_CCERR)) {
+-			dev_dbg(data, "CCERR %08x\n",
+-				edma_read(ctlr, EDMA_CCERR));
+-			/* FIXME:  CCERR.BIT(16) ignored!  much better
+-			 * to just write CCERRCLR with CCERR value...
+-			 */
+-			for (i = 0; i < 8; i++) {
+-				if (edma_read(ctlr, EDMA_CCERR) & BIT(i)) {
+-					/* Clear the corresponding IPR bits */
+-					edma_write(ctlr, EDMA_CCERRCLR, BIT(i));
+-
+-					/* NOTE:  not reported!! */
+-				}
+-			}
+-		}
+-		if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
+-		    (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
+-		    (edma_read(ctlr, EDMA_QEMR) == 0) &&
+-		    (edma_read(ctlr, EDMA_CCERR) == 0))
+-			break;
+-		cnt++;
+-		if (cnt > 10)
+-			break;
+-	}
+-	edma_write(ctlr, EDMA_EEVAL, 1);
+-	return IRQ_HANDLED;
+-}
+-
+-/******************************************************************************
+- *
+- * Transfer controller error interrupt handlers
+- *
+- *****************************************************************************/
+-
+-#define tc_errs_handled	false	/* disabled as long as they're NOPs */
+-
+-static irqreturn_t dma_tc0err_handler(int irq, void *data)
+-{
+-	dev_dbg(data, "dma_tc0err_handler\n");
+-	return IRQ_HANDLED;
+-}
+-
+-static irqreturn_t dma_tc1err_handler(int irq, void *data)
+-{
+-	dev_dbg(data, "dma_tc1err_handler\n");
+-	return IRQ_HANDLED;
+-}
+-
+-static int reserve_contiguous_slots(int ctlr, unsigned int id,
+-				     unsigned int num_slots,
+-				     unsigned int start_slot)
+-{
+-	int i, j;
+-	unsigned int count = num_slots;
+-	int stop_slot = start_slot;
+-	DECLARE_BITMAP(tmp_inuse, EDMA_MAX_PARAMENTRY);
+-
+-	for (i = start_slot; i < edma_cc[ctlr]->num_slots; ++i) {
+-		j = EDMA_CHAN_SLOT(i);
+-		if (!test_and_set_bit(j, edma_cc[ctlr]->edma_inuse)) {
+-			/* Record our current beginning slot */
+-			if (count == num_slots)
+-				stop_slot = i;
+-
+-			count--;
+-			set_bit(j, tmp_inuse);
+-
+-			if (count == 0)
+-				break;
+-		} else {
+-			clear_bit(j, tmp_inuse);
+-
+-			if (id == EDMA_CONT_PARAMS_FIXED_EXACT) {
+-				stop_slot = i;
+-				break;
+-			} else {
+-				count = num_slots;
+-			}
+-		}
+-	}
+-
+-	/*
+-	 * We have to clear any bits that we set
+-	 * if we run out parameter RAM slots, i.e we do find a set
+-	 * of contiguous parameter RAM slots but do not find the exact number
+-	 * requested as we may reach the total number of parameter RAM slots
+-	 */
+-	if (i == edma_cc[ctlr]->num_slots)
+-		stop_slot = i;
+-
+-	j = start_slot;
+-	for_each_set_bit_from(j, tmp_inuse, stop_slot)
+-		clear_bit(j, edma_cc[ctlr]->edma_inuse);
+-
+-	if (count)
+-		return -EBUSY;
+-
+-	for (j = i - num_slots + 1; j <= i; ++j)
+-		memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(j),
+-			&dummy_paramset, PARM_SIZE);
+-
+-	return EDMA_CTLR_CHAN(ctlr, i - num_slots + 1);
+-}
+-
+-static int prepare_unused_channel_list(struct device *dev, void *data)
+-{
+-	struct platform_device *pdev = to_platform_device(dev);
+-	int i, ctlr;
+-
+-	for (i = 0; i < pdev->num_resources; i++) {
+-		if ((pdev->resource[i].flags & IORESOURCE_DMA) &&
+-				(int)pdev->resource[i].start >= 0) {
+-			ctlr = EDMA_CTLR(pdev->resource[i].start);
+-			clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
+-					edma_cc[ctlr]->edma_unused);
+-		}
+-	}
+-
+-	return 0;
+-}
+-
+-/*-----------------------------------------------------------------------*/
+-
+-static bool unused_chan_list_done;
+-
+-/* Resource alloc/free:  dma channels, parameter RAM slots */
+-
+-/**
+- * edma_alloc_channel - allocate DMA channel and paired parameter RAM
+- * @channel: specific channel to allocate; negative for "any unmapped channel"
+- * @callback: optional; to be issued on DMA completion or errors
+- * @data: passed to callback
+- * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
+- *	Controller (TC) executes requests using this channel.  Use
+- *	EVENTQ_DEFAULT unless you really need a high priority queue.
+- *
+- * This allocates a DMA channel and its associated parameter RAM slot.
+- * The parameter RAM is initialized to hold a dummy transfer.
+- *
+- * Normal use is to pass a specific channel number as @channel, to make
+- * use of hardware events mapped to that channel.  When the channel will
+- * be used only for software triggering or event chaining, channels not
+- * mapped to hardware events (or mapped to unused events) are preferable.
+- *
+- * DMA transfers start from a channel using edma_start(), or by
+- * chaining.  When the transfer described in that channel's parameter RAM
+- * slot completes, that slot's data may be reloaded through a link.
+- *
+- * DMA errors are only reported to the @callback associated with the
+- * channel driving that transfer, but transfer completion callbacks can
+- * be sent to another channel under control of the TCC field in
+- * the option word of the transfer's parameter RAM set.  Drivers must not
+- * use DMA transfer completion callbacks for channels they did not allocate.
+- * (The same applies to TCC codes used in transfer chaining.)
+- *
+- * Returns the number of the channel, else negative errno.
+- */
+-int edma_alloc_channel(int channel,
+-		void (*callback)(unsigned channel, u16 ch_status, void *data),
+-		void *data,
+-		enum dma_event_q eventq_no)
+-{
+-	unsigned i, done = 0, ctlr = 0;
+-	int ret = 0;
+-
+-	if (!unused_chan_list_done) {
+-		/*
+-		 * Scan all the platform devices to find out the EDMA channels
+-		 * used and clear them in the unused list, making the rest
+-		 * available for ARM usage.
+-		 */
+-		ret = bus_for_each_dev(&platform_bus_type, NULL, NULL,
+-				prepare_unused_channel_list);
+-		if (ret < 0)
+-			return ret;
+-
+-		unused_chan_list_done = true;
+-	}
+-
+-	if (channel >= 0) {
+-		ctlr = EDMA_CTLR(channel);
+-		channel = EDMA_CHAN_SLOT(channel);
+-	}
+-
+-	if (channel < 0) {
+-		for (i = 0; i < arch_num_cc; i++) {
+-			channel = 0;
+-			for (;;) {
+-				channel = find_next_bit(edma_cc[i]->edma_unused,
+-						edma_cc[i]->num_channels,
+-						channel);
+-				if (channel == edma_cc[i]->num_channels)
+-					break;
+-				if (!test_and_set_bit(channel,
+-						edma_cc[i]->edma_inuse)) {
+-					done = 1;
+-					ctlr = i;
+-					break;
+-				}
+-				channel++;
+-			}
+-			if (done)
+-				break;
+-		}
+-		if (!done)
+-			return -ENOMEM;
+-	} else if (channel >= edma_cc[ctlr]->num_channels) {
+-		return -EINVAL;
+-	} else if (test_and_set_bit(channel, edma_cc[ctlr]->edma_inuse)) {
+-		return -EBUSY;
+-	}
+-
+-	/* ensure access through shadow region 0 */
+-	edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
+-
+-	/* ensure no events are pending */
+-	edma_stop(EDMA_CTLR_CHAN(ctlr, channel));
+-	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
+-			&dummy_paramset, PARM_SIZE);
+-
+-	if (callback)
+-		setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel),
+-					callback, data);
+-
+-	map_dmach_queue(ctlr, channel, eventq_no);
+-
+-	return EDMA_CTLR_CHAN(ctlr, channel);
+-}
+-EXPORT_SYMBOL(edma_alloc_channel);
+-
+-
+-/**
+- * edma_free_channel - deallocate DMA channel
+- * @channel: dma channel returned from edma_alloc_channel()
+- *
+- * This deallocates the DMA channel and associated parameter RAM slot
+- * allocated by edma_alloc_channel().
+- *
+- * Callers are responsible for ensuring the channel is inactive, and
+- * will not be reactivated by linking, chaining, or software calls to
+- * edma_start().
+- */
+-void edma_free_channel(unsigned channel)
+-{
+-	unsigned ctlr;
+-
+-	ctlr = EDMA_CTLR(channel);
+-	channel = EDMA_CHAN_SLOT(channel);
+-
+-	if (channel >= edma_cc[ctlr]->num_channels)
+-		return;
+-
+-	setup_dma_interrupt(channel, NULL, NULL);
+-	/* REVISIT should probably take out of shadow region 0 */
+-
+-	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
+-			&dummy_paramset, PARM_SIZE);
+-	clear_bit(channel, edma_cc[ctlr]->edma_inuse);
+-}
+-EXPORT_SYMBOL(edma_free_channel);
+-
+-/**
+- * edma_alloc_slot - allocate DMA parameter RAM
+- * @slot: specific slot to allocate; negative for "any unused slot"
+- *
+- * This allocates a parameter RAM slot, initializing it to hold a
+- * dummy transfer.  Slots allocated using this routine have not been
+- * mapped to a hardware DMA channel, and will normally be used by
+- * linking to them from a slot associated with a DMA channel.
+- *
+- * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
+- * slots may be allocated on behalf of DSP firmware.
+- *
+- * Returns the number of the slot, else negative errno.
+- */
+-int edma_alloc_slot(unsigned ctlr, int slot)
+-{
+-	if (!edma_cc[ctlr])
+-		return -EINVAL;
+-
+-	if (slot >= 0)
+-		slot = EDMA_CHAN_SLOT(slot);
+-
+-	if (slot < 0) {
+-		slot = edma_cc[ctlr]->num_channels;
+-		for (;;) {
+-			slot = find_next_zero_bit(edma_cc[ctlr]->edma_inuse,
+-					edma_cc[ctlr]->num_slots, slot);
+-			if (slot == edma_cc[ctlr]->num_slots)
+-				return -ENOMEM;
+-			if (!test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse))
+-				break;
+-		}
+-	} else if (slot < edma_cc[ctlr]->num_channels ||
+-			slot >= edma_cc[ctlr]->num_slots) {
+-		return -EINVAL;
+-	} else if (test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) {
+-		return -EBUSY;
+-	}
+-
+-	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
+-			&dummy_paramset, PARM_SIZE);
+-
+-	return EDMA_CTLR_CHAN(ctlr, slot);
+-}
+-EXPORT_SYMBOL(edma_alloc_slot);
+-
+-/**
+- * edma_free_slot - deallocate DMA parameter RAM
+- * @slot: parameter RAM slot returned from edma_alloc_slot()
+- *
+- * This deallocates the parameter RAM slot allocated by edma_alloc_slot().
+- * Callers are responsible for ensuring the slot is inactive, and will
+- * not be activated.
+- */
+-void edma_free_slot(unsigned slot)
+-{
+-	unsigned ctlr;
+-
+-	ctlr = EDMA_CTLR(slot);
+-	slot = EDMA_CHAN_SLOT(slot);
+-
+-	if (slot < edma_cc[ctlr]->num_channels ||
+-		slot >= edma_cc[ctlr]->num_slots)
+-		return;
+-
+-	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
+-			&dummy_paramset, PARM_SIZE);
+-	clear_bit(slot, edma_cc[ctlr]->edma_inuse);
+-}
+-EXPORT_SYMBOL(edma_free_slot);
+-
+-
+-/**
+- * edma_alloc_cont_slots- alloc contiguous parameter RAM slots
+- * The API will return the starting point of a set of
+- * contiguous parameter RAM slots that have been requested
+- *
+- * @id: can only be EDMA_CONT_PARAMS_ANY or EDMA_CONT_PARAMS_FIXED_EXACT
+- * or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
+- * @count: number of contiguous Paramter RAM slots
+- * @slot  - the start value of Parameter RAM slot that should be passed if id
+- * is EDMA_CONT_PARAMS_FIXED_EXACT or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
+- *
+- * If id is EDMA_CONT_PARAMS_ANY then the API starts looking for a set of
+- * contiguous Parameter RAM slots from parameter RAM 64 in the case of
+- * DaVinci SOCs and 32 in the case of DA8xx SOCs.
+- *
+- * If id is EDMA_CONT_PARAMS_FIXED_EXACT then the API starts looking for a
+- * set of contiguous parameter RAM slots from the "slot" that is passed as an
+- * argument to the API.
+- *
+- * If id is EDMA_CONT_PARAMS_FIXED_NOT_EXACT then the API initially tries
+- * starts looking for a set of contiguous parameter RAMs from the "slot"
+- * that is passed as an argument to the API. On failure the API will try to
+- * find a set of contiguous Parameter RAM slots from the remaining Parameter
+- * RAM slots
+- */
+-int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
+-{
+-	/*
+-	 * The start slot requested should be greater than
+-	 * the number of channels and lesser than the total number
+-	 * of slots
+-	 */
+-	if ((id != EDMA_CONT_PARAMS_ANY) &&
+-		(slot < edma_cc[ctlr]->num_channels ||
+-		slot >= edma_cc[ctlr]->num_slots))
+-		return -EINVAL;
+-
+-	/*
+-	 * The number of parameter RAM slots requested cannot be less than 1
+-	 * and cannot be more than the number of slots minus the number of
+-	 * channels
+-	 */
+-	if (count < 1 || count >
+-		(edma_cc[ctlr]->num_slots - edma_cc[ctlr]->num_channels))
+-		return -EINVAL;
+-
+-	switch (id) {
+-	case EDMA_CONT_PARAMS_ANY:
+-		return reserve_contiguous_slots(ctlr, id, count,
+-						 edma_cc[ctlr]->num_channels);
+-	case EDMA_CONT_PARAMS_FIXED_EXACT:
+-	case EDMA_CONT_PARAMS_FIXED_NOT_EXACT:
+-		return reserve_contiguous_slots(ctlr, id, count, slot);
+-	default:
+-		return -EINVAL;
+-	}
+-
+-}
+-EXPORT_SYMBOL(edma_alloc_cont_slots);
+-
+-/**
+- * edma_free_cont_slots - deallocate DMA parameter RAM slots
+- * @slot: first parameter RAM of a set of parameter RAM slots to be freed
+- * @count: the number of contiguous parameter RAM slots to be freed
+- *
+- * This deallocates the parameter RAM slots allocated by
+- * edma_alloc_cont_slots.
+- * Callers/applications need to keep track of sets of contiguous
+- * parameter RAM slots that have been allocated using the edma_alloc_cont_slots
+- * API.
+- * Callers are responsible for ensuring the slots are inactive, and will
+- * not be activated.
+- */
+-int edma_free_cont_slots(unsigned slot, int count)
+-{
+-	unsigned ctlr, slot_to_free;
+-	int i;
+-
+-	ctlr = EDMA_CTLR(slot);
+-	slot = EDMA_CHAN_SLOT(slot);
+-
+-	if (slot < edma_cc[ctlr]->num_channels ||
+-		slot >= edma_cc[ctlr]->num_slots ||
+-		count < 1)
+-		return -EINVAL;
+-
+-	for (i = slot; i < slot + count; ++i) {
+-		ctlr = EDMA_CTLR(i);
+-		slot_to_free = EDMA_CHAN_SLOT(i);
+-
+-		memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot_to_free),
+-			&dummy_paramset, PARM_SIZE);
+-		clear_bit(slot_to_free, edma_cc[ctlr]->edma_inuse);
+-	}
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL(edma_free_cont_slots);
+-
+-/*-----------------------------------------------------------------------*/
+-
+-/* Parameter RAM operations (i) -- read/write partial slots */
+-
+-/**
+- * edma_set_src - set initial DMA source address in parameter RAM slot
+- * @slot: parameter RAM slot being configured
+- * @src_port: physical address of source (memory, controller FIFO, etc)
+- * @addressMode: INCR, except in very rare cases
+- * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
+- *	width to use when addressing the fifo (e.g. W8BIT, W32BIT)
+- *
+- * Note that the source address is modified during the DMA transfer
+- * according to edma_set_src_index().
+- */
+-void edma_set_src(unsigned slot, dma_addr_t src_port,
+-				enum address_mode mode, enum fifo_width width)
+-{
+-	unsigned ctlr;
+-
+-	ctlr = EDMA_CTLR(slot);
+-	slot = EDMA_CHAN_SLOT(slot);
+-
+-	if (slot < edma_cc[ctlr]->num_slots) {
+-		unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
+-
+-		if (mode) {
+-			/* set SAM and program FWID */
+-			i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8));
+-		} else {
+-			/* clear SAM */
+-			i &= ~SAM;
+-		}
+-		edma_parm_write(ctlr, PARM_OPT, slot, i);
+-
+-		/* set the source port address
+-		   in source register of param structure */
+-		edma_parm_write(ctlr, PARM_SRC, slot, src_port);
+-	}
+-}
+-EXPORT_SYMBOL(edma_set_src);
+-
+-/**
+- * edma_set_dest - set initial DMA destination address in parameter RAM slot
+- * @slot: parameter RAM slot being configured
+- * @dest_port: physical address of destination (memory, controller FIFO, etc)
+- * @addressMode: INCR, except in very rare cases
+- * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
+- *	width to use when addressing the fifo (e.g. W8BIT, W32BIT)
+- *
+- * Note that the destination address is modified during the DMA transfer
+- * according to edma_set_dest_index().
+- */
+-void edma_set_dest(unsigned slot, dma_addr_t dest_port,
+-				 enum address_mode mode, enum fifo_width width)
+-{
+-	unsigned ctlr;
+-
+-	ctlr = EDMA_CTLR(slot);
+-	slot = EDMA_CHAN_SLOT(slot);
+-
+-	if (slot < edma_cc[ctlr]->num_slots) {
+-		unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
+-
+-		if (mode) {
+-			/* set DAM and program FWID */
+-			i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8));
+-		} else {
+-			/* clear DAM */
+-			i &= ~DAM;
+-		}
+-		edma_parm_write(ctlr, PARM_OPT, slot, i);
+-		/* set the destination port address
+-		   in dest register of param structure */
+-		edma_parm_write(ctlr, PARM_DST, slot, dest_port);
+-	}
+-}
+-EXPORT_SYMBOL(edma_set_dest);
+-
+-/**
+- * edma_get_position - returns the current transfer points
+- * @slot: parameter RAM slot being examined
+- * @src: pointer to source port position
+- * @dst: pointer to destination port position
+- *
+- * Returns current source and destination addresses for a particular
+- * parameter RAM slot.  Its channel should not be active when this is called.
+- */
+-void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst)
+-{
+-	struct edmacc_param temp;
+-	unsigned ctlr;
+-
+-	ctlr = EDMA_CTLR(slot);
+-	slot = EDMA_CHAN_SLOT(slot);
+-
+-	edma_read_slot(EDMA_CTLR_CHAN(ctlr, slot), &temp);
+-	if (src != NULL)
+-		*src = temp.src;
+-	if (dst != NULL)
+-		*dst = temp.dst;
+-}
+-EXPORT_SYMBOL(edma_get_position);
+-
+-/**
+- * edma_set_src_index - configure DMA source address indexing
+- * @slot: parameter RAM slot being configured
+- * @src_bidx: byte offset between source arrays in a frame
+- * @src_cidx: byte offset between source frames in a block
+- *
+- * Offsets are specified to support either contiguous or discontiguous
+- * memory transfers, or repeated access to a hardware register, as needed.
+- * When accessing hardware registers, both offsets are normally zero.
+- */
+-void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx)
+-{
+-	unsigned ctlr;
+-
+-	ctlr = EDMA_CTLR(slot);
+-	slot = EDMA_CHAN_SLOT(slot);
+-
+-	if (slot < edma_cc[ctlr]->num_slots) {
+-		edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
+-				0xffff0000, src_bidx);
+-		edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
+-				0xffff0000, src_cidx);
+-	}
+-}
+-EXPORT_SYMBOL(edma_set_src_index);
+-
+-/**
+- * edma_set_dest_index - configure DMA destination address indexing
+- * @slot: parameter RAM slot being configured
+- * @dest_bidx: byte offset between destination arrays in a frame
+- * @dest_cidx: byte offset between destination frames in a block
+- *
+- * Offsets are specified to support either contiguous or discontiguous
+- * memory transfers, or repeated access to a hardware register, as needed.
+- * When accessing hardware registers, both offsets are normally zero.
+- */
+-void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx)
+-{
+-	unsigned ctlr;
+-
+-	ctlr = EDMA_CTLR(slot);
+-	slot = EDMA_CHAN_SLOT(slot);
+-
+-	if (slot < edma_cc[ctlr]->num_slots) {
+-		edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
+-				0x0000ffff, dest_bidx << 16);
+-		edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
+-				0x0000ffff, dest_cidx << 16);
+-	}
+-}
+-EXPORT_SYMBOL(edma_set_dest_index);
+-
+-/**
+- * edma_set_transfer_params - configure DMA transfer parameters
+- * @slot: parameter RAM slot being configured
+- * @acnt: how many bytes per array (at least one)
+- * @bcnt: how many arrays per frame (at least one)
+- * @ccnt: how many frames per block (at least one)
+- * @bcnt_rld: used only for A-Synchronized transfers; this specifies
+- *	the value to reload into bcnt when it decrements to zero
+- * @sync_mode: ASYNC or ABSYNC
+- *
+- * See the EDMA3 documentation to understand how to configure and link
+- * transfers using the fields in PaRAM slots.  If you are not doing it
+- * all at once with edma_write_slot(), you will use this routine
+- * plus two calls each for source and destination, setting the initial
+- * address and saying how to index that address.
+- *
+- * An example of an A-Synchronized transfer is a serial link using a
+- * single word shift register.  In that case, @acnt would be equal to
+- * that word size; the serial controller issues a DMA synchronization
+- * event to transfer each word, and memory access by the DMA transfer
+- * controller will be word-at-a-time.
+- *
+- * An example of an AB-Synchronized transfer is a device using a FIFO.
+- * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
+- * The controller with the FIFO issues DMA synchronization events when
+- * the FIFO threshold is reached, and the DMA transfer controller will
+- * transfer one frame to (or from) the FIFO.  It will probably use
+- * efficient burst modes to access memory.
+- */
+-void edma_set_transfer_params(unsigned slot,
+-		u16 acnt, u16 bcnt, u16 ccnt,
+-		u16 bcnt_rld, enum sync_dimension sync_mode)
+-{
+-	unsigned ctlr;
+-
+-	ctlr = EDMA_CTLR(slot);
+-	slot = EDMA_CHAN_SLOT(slot);
+-
+-	if (slot < edma_cc[ctlr]->num_slots) {
+-		edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot,
+-				0x0000ffff, bcnt_rld << 16);
+-		if (sync_mode == ASYNC)
+-			edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM);
+-		else
+-			edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM);
+-		/* Set the acount, bcount, ccount registers */
+-		edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt);
+-		edma_parm_write(ctlr, PARM_CCNT, slot, ccnt);
+-	}
+-}
+-EXPORT_SYMBOL(edma_set_transfer_params);
+-
+-/**
+- * edma_link - link one parameter RAM slot to another
+- * @from: parameter RAM slot originating the link
+- * @to: parameter RAM slot which is the link target
+- *
+- * The originating slot should not be part of any active DMA transfer.
+- */
+-void edma_link(unsigned from, unsigned to)
+-{
+-	unsigned ctlr_from, ctlr_to;
+-
+-	ctlr_from = EDMA_CTLR(from);
+-	from = EDMA_CHAN_SLOT(from);
+-	ctlr_to = EDMA_CTLR(to);
+-	to = EDMA_CHAN_SLOT(to);
+-
+-	if (from >= edma_cc[ctlr_from]->num_slots)
+-		return;
+-	if (to >= edma_cc[ctlr_to]->num_slots)
+-		return;
+-	edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000,
+-				PARM_OFFSET(to));
+-}
+-EXPORT_SYMBOL(edma_link);
+-
+-/**
+- * edma_unlink - cut link from one parameter RAM slot
+- * @from: parameter RAM slot originating the link
+- *
+- * The originating slot should not be part of any active DMA transfer.
+- * Its link is set to 0xffff.
+- */
+-void edma_unlink(unsigned from)
+-{
+-	unsigned ctlr;
+-
+-	ctlr = EDMA_CTLR(from);
+-	from = EDMA_CHAN_SLOT(from);
+-
+-	if (from >= edma_cc[ctlr]->num_slots)
+-		return;
+-	edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff);
+-}
+-EXPORT_SYMBOL(edma_unlink);
+-
+-/*-----------------------------------------------------------------------*/
+-
+-/* Parameter RAM operations (ii) -- read/write whole parameter sets */
+-
+-/**
+- * edma_write_slot - write parameter RAM data for slot
+- * @slot: number of parameter RAM slot being modified
+- * @param: data to be written into parameter RAM slot
+- *
+- * Use this to assign all parameters of a transfer at once.  This
+- * allows more efficient setup of transfers than issuing multiple
+- * calls to set up those parameters in small pieces, and provides
+- * complete control over all transfer options.
+- */
+-void edma_write_slot(unsigned slot, const struct edmacc_param *param)
+-{
+-	unsigned ctlr;
+-
+-	ctlr = EDMA_CTLR(slot);
+-	slot = EDMA_CHAN_SLOT(slot);
+-
+-	if (slot >= edma_cc[ctlr]->num_slots)
+-		return;
+-	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param,
+-			PARM_SIZE);
+-}
+-EXPORT_SYMBOL(edma_write_slot);
+-
+-/**
+- * edma_read_slot - read parameter RAM data from slot
+- * @slot: number of parameter RAM slot being copied
+- * @param: where to store copy of parameter RAM data
+- *
+- * Use this to read data from a parameter RAM slot, perhaps to
+- * save them as a template for later reuse.
+- */
+-void edma_read_slot(unsigned slot, struct edmacc_param *param)
+-{
+-	unsigned ctlr;
+-
+-	ctlr = EDMA_CTLR(slot);
+-	slot = EDMA_CHAN_SLOT(slot);
+-
+-	if (slot >= edma_cc[ctlr]->num_slots)
+-		return;
+-	memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
+-			PARM_SIZE);
+-}
+-EXPORT_SYMBOL(edma_read_slot);
+-
+-/*-----------------------------------------------------------------------*/
+-
+-/* Various EDMA channel control operations */
+-
+-/**
+- * edma_pause - pause dma on a channel
+- * @channel: on which edma_start() has been called
+- *
+- * This temporarily disables EDMA hardware events on the specified channel,
+- * preventing them from triggering new transfers on its behalf
+- */
+-void edma_pause(unsigned channel)
+-{
+-	unsigned ctlr;
+-
+-	ctlr = EDMA_CTLR(channel);
+-	channel = EDMA_CHAN_SLOT(channel);
+-
+-	if (channel < edma_cc[ctlr]->num_channels) {
+-		unsigned int mask = BIT(channel & 0x1f);
+-
+-		edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask);
+-	}
+-}
+-EXPORT_SYMBOL(edma_pause);
+-
+-/**
+- * edma_resume - resumes dma on a paused channel
+- * @channel: on which edma_pause() has been called
+- *
+- * This re-enables EDMA hardware events on the specified channel.
+- */
+-void edma_resume(unsigned channel)
+-{
+-	unsigned ctlr;
+-
+-	ctlr = EDMA_CTLR(channel);
+-	channel = EDMA_CHAN_SLOT(channel);
+-
+-	if (channel < edma_cc[ctlr]->num_channels) {
+-		unsigned int mask = BIT(channel & 0x1f);
+-
+-		edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask);
+-	}
+-}
+-EXPORT_SYMBOL(edma_resume);
+-
+-/**
+- * edma_start - start dma on a channel
+- * @channel: channel being activated
+- *
+- * Channels with event associations will be triggered by their hardware
+- * events, and channels without such associations will be triggered by
+- * software.  (At this writing there is no interface for using software
+- * triggers except with channels that don't support hardware triggers.)
+- *
+- * Returns zero on success, else negative errno.
+- */
+-int edma_start(unsigned channel)
+-{
+-	unsigned ctlr;
+-
+-	ctlr = EDMA_CTLR(channel);
+-	channel = EDMA_CHAN_SLOT(channel);
+-
+-	if (channel < edma_cc[ctlr]->num_channels) {
+-		int j = channel >> 5;
+-		unsigned int mask = BIT(channel & 0x1f);
+-
+-		/* EDMA channels without event association */
+-		if (test_bit(channel, edma_cc[ctlr]->edma_unused)) {
+-			pr_debug("EDMA: ESR%d %08x\n", j,
+-				edma_shadow0_read_array(ctlr, SH_ESR, j));
+-			edma_shadow0_write_array(ctlr, SH_ESR, j, mask);
+-			return 0;
+-		}
+-
+-		/* EDMA channel with event association */
+-		pr_debug("EDMA: ER%d %08x\n", j,
+-			edma_shadow0_read_array(ctlr, SH_ER, j));
+-		/* Clear any pending event or error */
+-		edma_write_array(ctlr, EDMA_ECR, j, mask);
+-		edma_write_array(ctlr, EDMA_EMCR, j, mask);
+-		/* Clear any SER */
+-		edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
+-		edma_shadow0_write_array(ctlr, SH_EESR, j, mask);
+-		pr_debug("EDMA: EER%d %08x\n", j,
+-			edma_shadow0_read_array(ctlr, SH_EER, j));
+-		return 0;
+-	}
+-
+-	return -EINVAL;
+-}
+-EXPORT_SYMBOL(edma_start);
+-
+-/**
+- * edma_stop - stops dma on the channel passed
+- * @channel: channel being deactivated
+- *
+- * When @lch is a channel, any active transfer is paused and
+- * all pending hardware events are cleared.  The current transfer
+- * may not be resumed, and the channel's Parameter RAM should be
+- * reinitialized before being reused.
+- */
+-void edma_stop(unsigned channel)
+-{
+-	unsigned ctlr;
+-
+-	ctlr = EDMA_CTLR(channel);
+-	channel = EDMA_CHAN_SLOT(channel);
+-
+-	if (channel < edma_cc[ctlr]->num_channels) {
+-		int j = channel >> 5;
+-		unsigned int mask = BIT(channel & 0x1f);
+-
+-		edma_shadow0_write_array(ctlr, SH_EECR, j, mask);
+-		edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
+-		edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
+-		edma_write_array(ctlr, EDMA_EMCR, j, mask);
+-
+-		pr_debug("EDMA: EER%d %08x\n", j,
+-				edma_shadow0_read_array(ctlr, SH_EER, j));
+-
+-		/* REVISIT:  consider guarding against inappropriate event
+-		 * chaining by overwriting with dummy_paramset.
+-		 */
+-	}
+-}
+-EXPORT_SYMBOL(edma_stop);
+-
+-/******************************************************************************
+- *
+- * It cleans ParamEntry qand bring back EDMA to initial state if media has
+- * been removed before EDMA has finished.It is usedful for removable media.
+- * Arguments:
+- *      ch_no     - channel no
+- *
+- * Return: zero on success, or corresponding error no on failure
+- *
+- * FIXME this should not be needed ... edma_stop() should suffice.
+- *
+- *****************************************************************************/
+-
+-void edma_clean_channel(unsigned channel)
+-{
+-	unsigned ctlr;
+-
+-	ctlr = EDMA_CTLR(channel);
+-	channel = EDMA_CHAN_SLOT(channel);
+-
+-	if (channel < edma_cc[ctlr]->num_channels) {
+-		int j = (channel >> 5);
+-		unsigned int mask = BIT(channel & 0x1f);
+-
+-		pr_debug("EDMA: EMR%d %08x\n", j,
+-				edma_read_array(ctlr, EDMA_EMR, j));
+-		edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
+-		/* Clear the corresponding EMR bits */
+-		edma_write_array(ctlr, EDMA_EMCR, j, mask);
+-		/* Clear any SER */
+-		edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
+-		edma_write(ctlr, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
+-	}
+-}
+-EXPORT_SYMBOL(edma_clean_channel);
+-
+-/*
+- * edma_clear_event - clear an outstanding event on the DMA channel
+- * Arguments:
+- *	channel - channel number
+- */
+-void edma_clear_event(unsigned channel)
+-{
+-	unsigned ctlr;
+-
+-	ctlr = EDMA_CTLR(channel);
+-	channel = EDMA_CHAN_SLOT(channel);
+-
+-	if (channel >= edma_cc[ctlr]->num_channels)
+-		return;
+-	if (channel < 32)
+-		edma_write(ctlr, EDMA_ECR, BIT(channel));
+-	else
+-		edma_write(ctlr, EDMA_ECRH, BIT(channel - 32));
+-}
+-EXPORT_SYMBOL(edma_clear_event);
+-
+-/*-----------------------------------------------------------------------*/
+-
+-static int __init edma_probe(struct platform_device *pdev)
+-{
+-	struct edma_soc_info	**info = pdev->dev.platform_data;
+-	const s8		(*queue_priority_mapping)[2];
+-	const s8		(*queue_tc_mapping)[2];
+-	int			i, j, off, ln, found = 0;
+-	int			status = -1;
+-	const s16		(*rsv_chans)[2];
+-	const s16		(*rsv_slots)[2];
+-	int			irq[EDMA_MAX_CC] = {0, 0};
+-	int			err_irq[EDMA_MAX_CC] = {0, 0};
+-	struct resource		*r[EDMA_MAX_CC] = {NULL};
+-	resource_size_t		len[EDMA_MAX_CC];
+-	char			res_name[10];
+-	char			irq_name[10];
+-
+-	if (!info)
+-		return -ENODEV;
+-
+-	for (j = 0; j < EDMA_MAX_CC; j++) {
+-		sprintf(res_name, "edma_cc%d", j);
+-		r[j] = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+-						res_name);
+-		if (!r[j] || !info[j]) {
+-			if (found)
+-				break;
+-			else
+-				return -ENODEV;
+-		} else {
+-			found = 1;
+-		}
+-
+-		len[j] = resource_size(r[j]);
+-
+-		r[j] = request_mem_region(r[j]->start, len[j],
+-			dev_name(&pdev->dev));
+-		if (!r[j]) {
+-			status = -EBUSY;
+-			goto fail1;
+-		}
+-
+-		edmacc_regs_base[j] = ioremap(r[j]->start, len[j]);
+-		if (!edmacc_regs_base[j]) {
+-			status = -EBUSY;
+-			goto fail1;
+-		}
+-
+-		edma_cc[j] = kzalloc(sizeof(struct edma), GFP_KERNEL);
+-		if (!edma_cc[j]) {
+-			status = -ENOMEM;
+-			goto fail1;
+-		}
+-
+-		edma_cc[j]->num_channels = min_t(unsigned, info[j]->n_channel,
+-							EDMA_MAX_DMACH);
+-		edma_cc[j]->num_slots = min_t(unsigned, info[j]->n_slot,
+-							EDMA_MAX_PARAMENTRY);
+-		edma_cc[j]->num_cc = min_t(unsigned, info[j]->n_cc,
+-							EDMA_MAX_CC);
+-
+-		edma_cc[j]->default_queue = info[j]->default_queue;
+-
+-		dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n",
+-			edmacc_regs_base[j]);
+-
+-		for (i = 0; i < edma_cc[j]->num_slots; i++)
+-			memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i),
+-					&dummy_paramset, PARM_SIZE);
+-
+-		/* Mark all channels as unused */
+-		memset(edma_cc[j]->edma_unused, 0xff,
+-			sizeof(edma_cc[j]->edma_unused));
+-
+-		if (info[j]->rsv) {
+-
+-			/* Clear the reserved channels in unused list */
+-			rsv_chans = info[j]->rsv->rsv_chans;
+-			if (rsv_chans) {
+-				for (i = 0; rsv_chans[i][0] != -1; i++) {
+-					off = rsv_chans[i][0];
+-					ln = rsv_chans[i][1];
+-					clear_bits(off, ln,
+-						edma_cc[j]->edma_unused);
+-				}
+-			}
+-
+-			/* Set the reserved slots in inuse list */
+-			rsv_slots = info[j]->rsv->rsv_slots;
+-			if (rsv_slots) {
+-				for (i = 0; rsv_slots[i][0] != -1; i++) {
+-					off = rsv_slots[i][0];
+-					ln = rsv_slots[i][1];
+-					set_bits(off, ln,
+-						edma_cc[j]->edma_inuse);
+-				}
+-			}
+-		}
+-
+-		sprintf(irq_name, "edma%d", j);
+-		irq[j] = platform_get_irq_byname(pdev, irq_name);
+-		edma_cc[j]->irq_res_start = irq[j];
+-		status = request_irq(irq[j], dma_irq_handler, 0, "edma",
+-					&pdev->dev);
+-		if (status < 0) {
+-			dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
+-				irq[j], status);
+-			goto fail;
+-		}
+-
+-		sprintf(irq_name, "edma%d_err", j);
+-		err_irq[j] = platform_get_irq_byname(pdev, irq_name);
+-		edma_cc[j]->irq_res_end = err_irq[j];
+-		status = request_irq(err_irq[j], dma_ccerr_handler, 0,
+-					"edma_error", &pdev->dev);
+-		if (status < 0) {
+-			dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
+-				err_irq[j], status);
+-			goto fail;
+-		}
+-
+-		for (i = 0; i < edma_cc[j]->num_channels; i++)
+-			map_dmach_queue(j, i, info[j]->default_queue);
+-
+-		queue_tc_mapping = info[j]->queue_tc_mapping;
+-		queue_priority_mapping = info[j]->queue_priority_mapping;
+-
+-		/* Event queue to TC mapping */
+-		for (i = 0; queue_tc_mapping[i][0] != -1; i++)
+-			map_queue_tc(j, queue_tc_mapping[i][0],
+-					queue_tc_mapping[i][1]);
+-
+-		/* Event queue priority mapping */
+-		for (i = 0; queue_priority_mapping[i][0] != -1; i++)
+-			assign_priority_to_queue(j,
+-						queue_priority_mapping[i][0],
+-						queue_priority_mapping[i][1]);
+-
+-		/* Map the channel to param entry if channel mapping logic
+-		 * exist
+-		 */
+-		if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
+-			map_dmach_param(j);
+-
+-		for (i = 0; i < info[j]->n_region; i++) {
+-			edma_write_array2(j, EDMA_DRAE, i, 0, 0x0);
+-			edma_write_array2(j, EDMA_DRAE, i, 1, 0x0);
+-			edma_write_array(j, EDMA_QRAE, i, 0x0);
+-		}
+-		arch_num_cc++;
+-	}
+-
+-	if (tc_errs_handled) {
+-		status = request_irq(IRQ_TCERRINT0, dma_tc0err_handler, 0,
+-					"edma_tc0", &pdev->dev);
+-		if (status < 0) {
+-			dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
+-				IRQ_TCERRINT0, status);
+-			return status;
+-		}
+-		status = request_irq(IRQ_TCERRINT, dma_tc1err_handler, 0,
+-					"edma_tc1", &pdev->dev);
+-		if (status < 0) {
+-			dev_dbg(&pdev->dev, "request_irq %d --> %d\n",
+-				IRQ_TCERRINT, status);
+-			return status;
+-		}
+-	}
+-
+-	return 0;
+-
+-fail:
+-	for (i = 0; i < EDMA_MAX_CC; i++) {
+-		if (err_irq[i])
+-			free_irq(err_irq[i], &pdev->dev);
+-		if (irq[i])
+-			free_irq(irq[i], &pdev->dev);
+-	}
+-fail1:
+-	for (i = 0; i < EDMA_MAX_CC; i++) {
+-		if (r[i])
+-			release_mem_region(r[i]->start, len[i]);
+-		if (edmacc_regs_base[i])
+-			iounmap(edmacc_regs_base[i]);
+-		kfree(edma_cc[i]);
+-	}
+-	return status;
+-}
+-
+-
+-static struct platform_driver edma_driver = {
+-	.driver.name	= "edma",
+-};
+-
+-static int __init edma_init(void)
+-{
+-	return platform_driver_probe(&edma_driver, edma_probe);
+-}
+-arch_initcall(edma_init);
+-
+diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h
+index de439b7..311daec 100644
+--- a/arch/arm/mach-davinci/include/mach/da8xx.h
++++ b/arch/arm/mach-davinci/include/mach/da8xx.h
+@@ -20,8 +20,8 @@
+ #include <linux/videodev2.h>
+ 
+ #include <mach/serial.h>
+-#include <mach/edma.h>
+ #include <mach/pm.h>
++#include <linux/platform_data/edma.h>
+ #include <linux/platform_data/i2c-davinci.h>
+ #include <linux/platform_data/mmc-davinci.h>
+ #include <linux/platform_data/usb-davinci.h>
+diff --git a/arch/arm/mach-davinci/include/mach/edma.h b/arch/arm/mach-davinci/include/mach/edma.h
+deleted file mode 100644
+index 7e84c90..0000000
+--- a/arch/arm/mach-davinci/include/mach/edma.h
++++ /dev/null
+@@ -1,267 +0,0 @@
+-/*
+- *  TI DAVINCI dma definitions
+- *
+- *  Copyright (C) 2006-2009 Texas Instruments.
+- *
+- *  This program is free software; you can redistribute  it and/or modify it
+- *  under  the terms of  the GNU General  Public License as published by the
+- *  Free Software Foundation;  either version 2 of the  License, or (at your
+- *  option) any later version.
+- *
+- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
+- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
+- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- *  You should have received a copy of the  GNU General Public License along
+- *  with this program; if not, write  to the Free Software Foundation, Inc.,
+- *  675 Mass Ave, Cambridge, MA 02139, USA.
+- *
+- */
+-
+-/*
+- * This EDMA3 programming framework exposes two basic kinds of resource:
+- *
+- *  Channel	Triggers transfers, usually from a hardware event but
+- *		also manually or by "chaining" from DMA completions.
+- *		Each channel is coupled to a Parameter RAM (PaRAM) slot.
+- *
+- *  Slot	Each PaRAM slot holds a DMA transfer descriptor (PaRAM
+- *		"set"), source and destination addresses, a link to a
+- *		next PaRAM slot (if any), options for the transfer, and
+- *		instructions for updating those addresses.  There are
+- *		more than twice as many slots as event channels.
+- *
+- * Each PaRAM set describes a sequence of transfers, either for one large
+- * buffer or for several discontiguous smaller buffers.  An EDMA transfer
+- * is driven only from a channel, which performs the transfers specified
+- * in its PaRAM slot until there are no more transfers.  When that last
+- * transfer completes, the "link" field may be used to reload the channel's
+- * PaRAM slot with a new transfer descriptor.
+- *
+- * The EDMA Channel Controller (CC) maps requests from channels into physical
+- * Transfer Controller (TC) requests when the channel triggers (by hardware
+- * or software events, or by chaining).  The two physical DMA channels provided
+- * by the TCs are thus shared by many logical channels.
+- *
+- * DaVinci hardware also has a "QDMA" mechanism which is not currently
+- * supported through this interface.  (DSP firmware uses it though.)
+- */
+-
+-#ifndef EDMA_H_
+-#define EDMA_H_
+-
+-/* PaRAM slots are laid out like this */
+-struct edmacc_param {
+-	unsigned int opt;
+-	unsigned int src;
+-	unsigned int a_b_cnt;
+-	unsigned int dst;
+-	unsigned int src_dst_bidx;
+-	unsigned int link_bcntrld;
+-	unsigned int src_dst_cidx;
+-	unsigned int ccnt;
+-};
+-
+-#define CCINT0_INTERRUPT     16
+-#define CCERRINT_INTERRUPT   17
+-#define TCERRINT0_INTERRUPT   18
+-#define TCERRINT1_INTERRUPT   19
+-
+-/* fields in edmacc_param.opt */
+-#define SAM		BIT(0)
+-#define DAM		BIT(1)
+-#define SYNCDIM		BIT(2)
+-#define STATIC		BIT(3)
+-#define EDMA_FWID	(0x07 << 8)
+-#define TCCMODE		BIT(11)
+-#define EDMA_TCC(t)	((t) << 12)
+-#define TCINTEN		BIT(20)
+-#define ITCINTEN	BIT(21)
+-#define TCCHEN		BIT(22)
+-#define ITCCHEN		BIT(23)
+-
+-#define TRWORD (0x7<<2)
+-#define PAENTRY (0x1ff<<5)
+-
+-/* Drivers should avoid using these symbolic names for dm644x
+- * channels, and use platform_device IORESOURCE_DMA resources
+- * instead.  (Other DaVinci chips have different peripherals
+- * and thus have different DMA channel mappings.)
+- */
+-#define DAVINCI_DMA_MCBSP_TX              2
+-#define DAVINCI_DMA_MCBSP_RX              3
+-#define DAVINCI_DMA_VPSS_HIST             4
+-#define DAVINCI_DMA_VPSS_H3A              5
+-#define DAVINCI_DMA_VPSS_PRVU             6
+-#define DAVINCI_DMA_VPSS_RSZ              7
+-#define DAVINCI_DMA_IMCOP_IMXINT          8
+-#define DAVINCI_DMA_IMCOP_VLCDINT         9
+-#define DAVINCI_DMA_IMCO_PASQINT         10
+-#define DAVINCI_DMA_IMCOP_DSQINT         11
+-#define DAVINCI_DMA_SPI_SPIX             16
+-#define DAVINCI_DMA_SPI_SPIR             17
+-#define DAVINCI_DMA_UART0_URXEVT0        18
+-#define DAVINCI_DMA_UART0_UTXEVT0        19
+-#define DAVINCI_DMA_UART1_URXEVT1        20
+-#define DAVINCI_DMA_UART1_UTXEVT1        21
+-#define DAVINCI_DMA_UART2_URXEVT2        22
+-#define DAVINCI_DMA_UART2_UTXEVT2        23
+-#define DAVINCI_DMA_MEMSTK_MSEVT         24
+-#define DAVINCI_DMA_MMCRXEVT             26
+-#define DAVINCI_DMA_MMCTXEVT             27
+-#define DAVINCI_DMA_I2C_ICREVT           28
+-#define DAVINCI_DMA_I2C_ICXEVT           29
+-#define DAVINCI_DMA_GPIO_GPINT0          32
+-#define DAVINCI_DMA_GPIO_GPINT1          33
+-#define DAVINCI_DMA_GPIO_GPINT2          34
+-#define DAVINCI_DMA_GPIO_GPINT3          35
+-#define DAVINCI_DMA_GPIO_GPINT4          36
+-#define DAVINCI_DMA_GPIO_GPINT5          37
+-#define DAVINCI_DMA_GPIO_GPINT6          38
+-#define DAVINCI_DMA_GPIO_GPINT7          39
+-#define DAVINCI_DMA_GPIO_GPBNKINT0       40
+-#define DAVINCI_DMA_GPIO_GPBNKINT1       41
+-#define DAVINCI_DMA_GPIO_GPBNKINT2       42
+-#define DAVINCI_DMA_GPIO_GPBNKINT3       43
+-#define DAVINCI_DMA_GPIO_GPBNKINT4       44
+-#define DAVINCI_DMA_TIMER0_TINT0         48
+-#define DAVINCI_DMA_TIMER1_TINT1         49
+-#define DAVINCI_DMA_TIMER2_TINT2         50
+-#define DAVINCI_DMA_TIMER3_TINT3         51
+-#define DAVINCI_DMA_PWM0                 52
+-#define DAVINCI_DMA_PWM1                 53
+-#define DAVINCI_DMA_PWM2                 54
+-
+-/* DA830 specific EDMA3 information */
+-#define EDMA_DA830_NUM_DMACH		32
+-#define EDMA_DA830_NUM_TCC		32
+-#define EDMA_DA830_NUM_PARAMENTRY	128
+-#define EDMA_DA830_NUM_EVQUE		2
+-#define EDMA_DA830_NUM_TC		2
+-#define EDMA_DA830_CHMAP_EXIST		0
+-#define EDMA_DA830_NUM_REGIONS		4
+-#define DA830_DMACH2EVENT_MAP0		0x000FC03Fu
+-#define DA830_DMACH2EVENT_MAP1		0x00000000u
+-#define DA830_EDMA_ARM_OWN		0x30FFCCFFu
+-
+-/*ch_status paramater of callback function possible values*/
+-#define DMA_COMPLETE 1
+-#define DMA_CC_ERROR 2
+-#define DMA_TC1_ERROR 3
+-#define DMA_TC2_ERROR 4
+-
+-enum address_mode {
+-	INCR = 0,
+-	FIFO = 1
+-};
+-
+-enum fifo_width {
+-	W8BIT = 0,
+-	W16BIT = 1,
+-	W32BIT = 2,
+-	W64BIT = 3,
+-	W128BIT = 4,
+-	W256BIT = 5
+-};
+-
+-enum dma_event_q {
+-	EVENTQ_0 = 0,
+-	EVENTQ_1 = 1,
+-	EVENTQ_2 = 2,
+-	EVENTQ_3 = 3,
+-	EVENTQ_DEFAULT = -1
+-};
+-
+-enum sync_dimension {
+-	ASYNC = 0,
+-	ABSYNC = 1
+-};
+-
+-#define EDMA_CTLR_CHAN(ctlr, chan)	(((ctlr) << 16) | (chan))
+-#define EDMA_CTLR(i)			((i) >> 16)
+-#define EDMA_CHAN_SLOT(i)		((i) & 0xffff)
+-
+-#define EDMA_CHANNEL_ANY		-1	/* for edma_alloc_channel() */
+-#define EDMA_SLOT_ANY			-1	/* for edma_alloc_slot() */
+-#define EDMA_CONT_PARAMS_ANY		 1001
+-#define EDMA_CONT_PARAMS_FIXED_EXACT	 1002
+-#define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
+-
+-#define EDMA_MAX_CC               2
+-
+-/* alloc/free DMA channels and their dedicated parameter RAM slots */
+-int edma_alloc_channel(int channel,
+-	void (*callback)(unsigned channel, u16 ch_status, void *data),
+-	void *data, enum dma_event_q);
+-void edma_free_channel(unsigned channel);
+-
+-/* alloc/free parameter RAM slots */
+-int edma_alloc_slot(unsigned ctlr, int slot);
+-void edma_free_slot(unsigned slot);
+-
+-/* alloc/free a set of contiguous parameter RAM slots */
+-int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count);
+-int edma_free_cont_slots(unsigned slot, int count);
+-
+-/* calls that operate on part of a parameter RAM slot */
+-void edma_set_src(unsigned slot, dma_addr_t src_port,
+-				enum address_mode mode, enum fifo_width);
+-void edma_set_dest(unsigned slot, dma_addr_t dest_port,
+-				 enum address_mode mode, enum fifo_width);
+-void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst);
+-void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx);
+-void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx);
+-void edma_set_transfer_params(unsigned slot, u16 acnt, u16 bcnt, u16 ccnt,
+-		u16 bcnt_rld, enum sync_dimension sync_mode);
+-void edma_link(unsigned from, unsigned to);
+-void edma_unlink(unsigned from);
+-
+-/* calls that operate on an entire parameter RAM slot */
+-void edma_write_slot(unsigned slot, const struct edmacc_param *params);
+-void edma_read_slot(unsigned slot, struct edmacc_param *params);
+-
+-/* channel control operations */
+-int edma_start(unsigned channel);
+-void edma_stop(unsigned channel);
+-void edma_clean_channel(unsigned channel);
+-void edma_clear_event(unsigned channel);
+-void edma_pause(unsigned channel);
+-void edma_resume(unsigned channel);
+-
+-struct edma_rsv_info {
+-
+-	const s16	(*rsv_chans)[2];
+-	const s16	(*rsv_slots)[2];
+-};
+-
+-/* platform_data for EDMA driver */
+-struct edma_soc_info {
+-
+-	/* how many dma resources of each type */
+-	unsigned	n_channel;
+-	unsigned	n_region;
+-	unsigned	n_slot;
+-	unsigned	n_tc;
+-	unsigned	n_cc;
+-	/*
+-	 * Default queue is expected to be a low-priority queue.
+-	 * This way, long transfers on the default queue started
+-	 * by the codec engine will not cause audio defects.
+-	 */
+-	enum dma_event_q	default_queue;
+-
+-	/* Resource reservation for other cores */
+-	struct edma_rsv_info	*rsv;
+-
+-	const s8	(*queue_tc_mapping)[2];
+-	const s8	(*queue_priority_mapping)[2];
+-};
+-
+-#endif
+diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
+index cd7e328..5f3e532 100644
+--- a/drivers/dma/edma.c
++++ b/drivers/dma/edma.c
+@@ -24,7 +24,7 @@
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
+ 
+-#include <mach/edma.h>
++#include <linux/platform_data/edma.h>
+ 
+ #include "dmaengine.h"
+ #include "virt-dma.h"
+diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
+index 2063677..f5d46ea 100644
+--- a/drivers/mmc/host/davinci_mmc.c
++++ b/drivers/mmc/host/davinci_mmc.c
+@@ -35,6 +35,7 @@
+ #include <linux/edma.h>
+ #include <linux/mmc/mmc.h>
+ 
++#include <linux/platform_data/edma.h>
+ #include <linux/platform_data/mmc-davinci.h>
+ 
+ /*
+diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h
+index 0ab6132..7dd6524 100644
+--- a/include/linux/mfd/davinci_voicecodec.h
++++ b/include/linux/mfd/davinci_voicecodec.h
+@@ -26,8 +26,7 @@
+ #include <linux/kernel.h>
+ #include <linux/platform_device.h>
+ #include <linux/mfd/core.h>
+-
+-#include <mach/edma.h>
++#include <linux/platform_data/edma.h>
+ 
+ /*
+  * Register values.
+diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h
+new file mode 100644
+index 0000000..2344ea2
+--- /dev/null
++++ b/include/linux/platform_data/edma.h
+@@ -0,0 +1,182 @@
++/*
++ *  TI EDMA definitions
++ *
++ *  Copyright (C) 2006-2013 Texas Instruments.
++ *
++ *  This program is free software; you can redistribute  it and/or modify it
++ *  under  the terms of  the GNU General  Public License as published by the
++ *  Free Software Foundation;  either version 2 of the  License, or (at your
++ *  option) any later version.
++ */
++
++/*
++ * This EDMA3 programming framework exposes two basic kinds of resource:
++ *
++ *  Channel	Triggers transfers, usually from a hardware event but
++ *		also manually or by "chaining" from DMA completions.
++ *		Each channel is coupled to a Parameter RAM (PaRAM) slot.
++ *
++ *  Slot	Each PaRAM slot holds a DMA transfer descriptor (PaRAM
++ *		"set"), source and destination addresses, a link to a
++ *		next PaRAM slot (if any), options for the transfer, and
++ *		instructions for updating those addresses.  There are
++ *		more than twice as many slots as event channels.
++ *
++ * Each PaRAM set describes a sequence of transfers, either for one large
++ * buffer or for several discontiguous smaller buffers.  An EDMA transfer
++ * is driven only from a channel, which performs the transfers specified
++ * in its PaRAM slot until there are no more transfers.  When that last
++ * transfer completes, the "link" field may be used to reload the channel's
++ * PaRAM slot with a new transfer descriptor.
++ *
++ * The EDMA Channel Controller (CC) maps requests from channels into physical
++ * Transfer Controller (TC) requests when the channel triggers (by hardware
++ * or software events, or by chaining).  The two physical DMA channels provided
++ * by the TCs are thus shared by many logical channels.
++ *
++ * DaVinci hardware also has a "QDMA" mechanism which is not currently
++ * supported through this interface.  (DSP firmware uses it though.)
++ */
++
++#ifndef EDMA_H_
++#define EDMA_H_
++
++/* PaRAM slots are laid out like this */
++struct edmacc_param {
++	unsigned int opt;
++	unsigned int src;
++	unsigned int a_b_cnt;
++	unsigned int dst;
++	unsigned int src_dst_bidx;
++	unsigned int link_bcntrld;
++	unsigned int src_dst_cidx;
++	unsigned int ccnt;
++};
++
++/* fields in edmacc_param.opt */
++#define SAM		BIT(0)
++#define DAM		BIT(1)
++#define SYNCDIM		BIT(2)
++#define STATIC		BIT(3)
++#define EDMA_FWID	(0x07 << 8)
++#define TCCMODE		BIT(11)
++#define EDMA_TCC(t)	((t) << 12)
++#define TCINTEN		BIT(20)
++#define ITCINTEN	BIT(21)
++#define TCCHEN		BIT(22)
++#define ITCCHEN		BIT(23)
++
++/*ch_status paramater of callback function possible values*/
++#define DMA_COMPLETE 1
++#define DMA_CC_ERROR 2
++#define DMA_TC1_ERROR 3
++#define DMA_TC2_ERROR 4
++
++enum address_mode {
++	INCR = 0,
++	FIFO = 1
++};
++
++enum fifo_width {
++	W8BIT = 0,
++	W16BIT = 1,
++	W32BIT = 2,
++	W64BIT = 3,
++	W128BIT = 4,
++	W256BIT = 5
++};
++
++enum dma_event_q {
++	EVENTQ_0 = 0,
++	EVENTQ_1 = 1,
++	EVENTQ_2 = 2,
++	EVENTQ_3 = 3,
++	EVENTQ_DEFAULT = -1
++};
++
++enum sync_dimension {
++	ASYNC = 0,
++	ABSYNC = 1
++};
++
++#define EDMA_CTLR_CHAN(ctlr, chan)	(((ctlr) << 16) | (chan))
++#define EDMA_CTLR(i)			((i) >> 16)
++#define EDMA_CHAN_SLOT(i)		((i) & 0xffff)
++
++#define EDMA_CHANNEL_ANY		-1	/* for edma_alloc_channel() */
++#define EDMA_SLOT_ANY			-1	/* for edma_alloc_slot() */
++#define EDMA_CONT_PARAMS_ANY		 1001
++#define EDMA_CONT_PARAMS_FIXED_EXACT	 1002
++#define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
++
++#define EDMA_MAX_CC               2
++
++/* alloc/free DMA channels and their dedicated parameter RAM slots */
++int edma_alloc_channel(int channel,
++	void (*callback)(unsigned channel, u16 ch_status, void *data),
++	void *data, enum dma_event_q);
++void edma_free_channel(unsigned channel);
++
++/* alloc/free parameter RAM slots */
++int edma_alloc_slot(unsigned ctlr, int slot);
++void edma_free_slot(unsigned slot);
++
++/* alloc/free a set of contiguous parameter RAM slots */
++int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count);
++int edma_free_cont_slots(unsigned slot, int count);
++
++/* calls that operate on part of a parameter RAM slot */
++void edma_set_src(unsigned slot, dma_addr_t src_port,
++				enum address_mode mode, enum fifo_width);
++void edma_set_dest(unsigned slot, dma_addr_t dest_port,
++				 enum address_mode mode, enum fifo_width);
++void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst);
++void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx);
++void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx);
++void edma_set_transfer_params(unsigned slot, u16 acnt, u16 bcnt, u16 ccnt,
++		u16 bcnt_rld, enum sync_dimension sync_mode);
++void edma_link(unsigned from, unsigned to);
++void edma_unlink(unsigned from);
++
++/* calls that operate on an entire parameter RAM slot */
++void edma_write_slot(unsigned slot, const struct edmacc_param *params);
++void edma_read_slot(unsigned slot, struct edmacc_param *params);
++
++/* channel control operations */
++int edma_start(unsigned channel);
++void edma_stop(unsigned channel);
++void edma_clean_channel(unsigned channel);
++void edma_clear_event(unsigned channel);
++void edma_pause(unsigned channel);
++void edma_resume(unsigned channel);
++
++struct edma_rsv_info {
++
++	const s16	(*rsv_chans)[2];
++	const s16	(*rsv_slots)[2];
++};
++
++/* platform_data for EDMA driver */
++struct edma_soc_info {
++
++	/* how many dma resources of each type */
++	unsigned	n_channel;
++	unsigned	n_region;
++	unsigned	n_slot;
++	unsigned	n_tc;
++	unsigned	n_cc;
++	/*
++	 * Default queue is expected to be a low-priority queue.
++	 * This way, long transfers on the default queue started
++	 * by the codec engine will not cause audio defects.
++	 */
++	enum dma_event_q	default_queue;
++
++	/* Resource reservation for other cores */
++	struct edma_rsv_info	*rsv;
++
++	const s8	(*queue_tc_mapping)[2];
++	const s8	(*queue_priority_mapping)[2];
++};
++
++#endif
+diff --git a/include/linux/platform_data/spi-davinci.h b/include/linux/platform_data/spi-davinci.h
+index 7af305b..8dc2fa47 100644
+--- a/include/linux/platform_data/spi-davinci.h
++++ b/include/linux/platform_data/spi-davinci.h
+@@ -19,7 +19,7 @@
+ #ifndef __ARCH_ARM_DAVINCI_SPI_H
+ #define __ARCH_ARM_DAVINCI_SPI_H
+ 
+-#include <mach/edma.h>
++#include <linux/platform_data/edma.h>
+ 
+ #define SPI_INTERN_CS	0xFF
+ 
+diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
+index 484b22c..fd7c45b 100644
+--- a/sound/soc/davinci/davinci-evm.c
++++ b/sound/soc/davinci/davinci-evm.c
+@@ -14,6 +14,7 @@
+ #include <linux/timer.h>
+ #include <linux/interrupt.h>
+ #include <linux/platform_device.h>
++#include <linux/platform_data/edma.h>
+ #include <linux/i2c.h>
+ #include <sound/core.h>
+ #include <sound/pcm.h>
+diff --git a/sound/soc/davinci/davinci-pcm.c b/sound/soc/davinci/davinci-pcm.c
+index afab81f..9bdd71b 100644
+--- a/sound/soc/davinci/davinci-pcm.c
++++ b/sound/soc/davinci/davinci-pcm.c
+@@ -17,6 +17,7 @@
+ #include <linux/dma-mapping.h>
+ #include <linux/kernel.h>
+ #include <linux/genalloc.h>
++#include <linux/platform_data/edma.h>
+ 
+ #include <sound/core.h>
+ #include <sound/pcm.h>
+diff --git a/sound/soc/davinci/davinci-pcm.h b/sound/soc/davinci/davinci-pcm.h
+index b6ef703..fbb710c 100644
+--- a/sound/soc/davinci/davinci-pcm.h
++++ b/sound/soc/davinci/davinci-pcm.h
+@@ -14,7 +14,7 @@
+ 
+ #include <linux/genalloc.h>
+ #include <linux/platform_data/davinci_asp.h>
+-#include <mach/edma.h>
++#include <linux/platform_data/edma.h>
+ 
+ struct davinci_pcm_dma_params {
+ 	int channel;			/* sync dma channel ID */
+diff --git a/sound/soc/davinci/davinci-sffsdr.c b/sound/soc/davinci/davinci-sffsdr.c
+index 5be65aa..76ea586 100644
+--- a/sound/soc/davinci/davinci-sffsdr.c
++++ b/sound/soc/davinci/davinci-sffsdr.c
+@@ -17,6 +17,7 @@
+ #include <linux/timer.h>
+ #include <linux/interrupt.h>
+ #include <linux/platform_device.h>
++#include <linux/platform_data/edma.h>
+ #include <linux/gpio.h>
+ #include <sound/core.h>
+ #include <sound/pcm.h>
+@@ -28,12 +29,14 @@
+ #include <asm/plat-sffsdr/sffsdr-fpga.h>
+ #endif
+ 
+-#include <mach/edma.h>
+ 
+ #include "../codecs/pcm3008.h"
+ #include "davinci-pcm.h"
+ #include "davinci-i2s.h"
+ 
++#define DAVINCI_DMA_MCBSP_TX	2
++#define DAVINCI_DMA_MCBSP_RX	3
++
+ /*
+  * CLKX and CLKR are the inputs for the Sample Rate Generator.
+  * FSX and FSR are outputs, driven by the sample Rate Generator.
+-- 
+1.7.10.4
+
diff --git a/patches/dma/0003-ARM-edma-remove-unused-transfer-controller-handlers.patch b/patches/dma/0003-ARM-edma-remove-unused-transfer-controller-handlers.patch
new file mode 100644
index 000000000..80778cbd6
--- /dev/null
+++ b/patches/dma/0003-ARM-edma-remove-unused-transfer-controller-handlers.patch
@@ -0,0 +1,73 @@
+From caf83acee1e6cb462cd0ee435e61e4a1f10b9ae8 Mon Sep 17 00:00:00 2001
+From: Matt Porter <mporter@ti.com>
+Date: Tue, 5 Mar 2013 15:53:08 -0500
+Subject: [PATCH 03/10] ARM: edma: remove unused transfer controller handlers
+
+Fix build on OMAP, the irqs are undefined on AM33xx.
+These error interrupt handlers were hardcoded as disabled
+so since they are unused code, simply remove them.
+
+Signed-off-by: Matt Porter <mporter@ti.com>
+Acked-by: Sekhar Nori <nsekhar@ti.com>
+---
+ arch/arm/common/edma.c |   37 -------------------------------------
+ 1 file changed, 37 deletions(-)
+
+diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
+index f112be7..f6aa4fa 100644
+--- a/arch/arm/common/edma.c
++++ b/arch/arm/common/edma.c
+@@ -494,26 +494,6 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
+ 	return IRQ_HANDLED;
+ }
+ 
+-/******************************************************************************
+- *
+- * Transfer controller error interrupt handlers
+- *
+- *****************************************************************************/
+-
+-#define tc_errs_handled	false	/* disabled as long as they're NOPs */
+-
+-static irqreturn_t dma_tc0err_handler(int irq, void *data)
+-{
+-	dev_dbg(data, "dma_tc0err_handler\n");
+-	return IRQ_HANDLED;
+-}
+-
+-static irqreturn_t dma_tc1err_handler(int irq, void *data)
+-{
+-	dev_dbg(data, "dma_tc1err_handler\n");
+-	return IRQ_HANDLED;
+-}
+-
+ static int reserve_contiguous_slots(int ctlr, unsigned int id,
+ 				     unsigned int num_slots,
+ 				     unsigned int start_slot)
+@@ -1541,23 +1521,6 @@ static int __init edma_probe(struct platform_device *pdev)
+ 		arch_num_cc++;
+ 	}
+ 
+-	if (tc_errs_handled) {
+-		status = request_irq(IRQ_TCERRINT0, dma_tc0err_handler, 0,
+-					"edma_tc0", &pdev->dev);
+-		if (status < 0) {
+-			dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
+-				IRQ_TCERRINT0, status);
+-			return status;
+-		}
+-		status = request_irq(IRQ_TCERRINT, dma_tc1err_handler, 0,
+-					"edma_tc1", &pdev->dev);
+-		if (status < 0) {
+-			dev_dbg(&pdev->dev, "request_irq %d --> %d\n",
+-				IRQ_TCERRINT, status);
+-			return status;
+-		}
+-	}
+-
+ 	return 0;
+ 
+ fail:
+-- 
+1.7.10.4
+
diff --git a/patches/dma/0004-ARM-edma-add-AM33XX-support-to-the-private-EDMA-API.patch b/patches/dma/0004-ARM-edma-add-AM33XX-support-to-the-private-EDMA-API.patch
new file mode 100644
index 000000000..ecfbef6c2
--- /dev/null
+++ b/patches/dma/0004-ARM-edma-add-AM33XX-support-to-the-private-EDMA-API.patch
@@ -0,0 +1,406 @@
+From b67550691f20f7a6cb0050a00e53cb6eee89dfec Mon Sep 17 00:00:00 2001
+From: Matt Porter <mporter@ti.com>
+Date: Tue, 5 Mar 2013 15:53:09 -0500
+Subject: [PATCH 04/10] ARM: edma: add AM33XX support to the private EDMA API
+
+Adds support for parsing the TI EDMA DT data into the
+required EDMA private API platform data. Enables runtime
+PM support to initialize the EDMA hwmod. Adds AM33XX EDMA
+crossbar event mux support. Enables build on OMAP.
+
+Signed-off-by: Matt Porter <mporter@ti.com>
+Acked-by: Sekhar Nori <nsekhar@ti.com>
+---
+ arch/arm/common/edma.c             |  300 ++++++++++++++++++++++++++++++++++--
+ arch/arm/mach-omap2/Kconfig        |    1 +
+ include/linux/platform_data/edma.h |    1 +
+ 3 files changed, 292 insertions(+), 10 deletions(-)
+
+diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
+index f6aa4fa..b07b792 100644
+--- a/arch/arm/common/edma.c
++++ b/arch/arm/common/edma.c
+@@ -24,6 +24,13 @@
+ #include <linux/platform_device.h>
+ #include <linux/io.h>
+ #include <linux/slab.h>
++#include <linux/edma.h>
++#include <linux/err.h>
++#include <linux/of_address.h>
++#include <linux/of_device.h>
++#include <linux/of_dma.h>
++#include <linux/of_irq.h>
++#include <linux/pm_runtime.h>
+ 
+ #include <linux/platform_data/edma.h>
+ 
+@@ -1369,31 +1376,278 @@ void edma_clear_event(unsigned channel)
+ EXPORT_SYMBOL(edma_clear_event);
+ 
+ /*-----------------------------------------------------------------------*/
++static int edma_of_read_u32_to_s8_array(const struct device_node *np,
++					 const char *propname, s8 *out_values,
++					 size_t sz)
++{
++	int ret;
++
++	ret = of_property_read_u8_array(np, propname, out_values, sz);
++	if (ret)
++		return ret;
++
++	/* Terminate it */
++	*out_values++ = -1;
++	*out_values++ = -1;
++
++	return 0;
++}
++
++static int edma_of_read_u32_to_s16_array(const struct device_node *np,
++					 const char *propname, s16 *out_values,
++					 size_t sz)
++{
++	int ret;
++
++	ret = of_property_read_u16_array(np, propname, out_values, sz);
++	if (ret)
++		return ret;
++
++	/* Terminate it */
++	*out_values++ = -1;
++	*out_values++ = -1;
++
++	return 0;
++}
++
++static int edma_xbar_event_map(struct device *dev,
++			       struct device_node *node,
++			       struct edma_soc_info *pdata, int len)
++{
++	int ret = 0;
++	int i;
++	struct resource res;
++	void *xbar;
++	const s16 (*xbar_chans)[2];
++	u32 shift, offset, mux;
++
++	xbar_chans = devm_kzalloc(dev,
++				  len/sizeof(s16) + 2*sizeof(s16),
++				  GFP_KERNEL);
++	if (!xbar_chans)
++		return -ENOMEM;
++
++	ret = of_address_to_resource(node, 1, &res);
++	if (ret)
++		return -EIO;
++
++	xbar = devm_ioremap(dev, res.start, resource_size(&res));
++	if (!xbar)
++		return -ENOMEM;
++
++	ret = edma_of_read_u32_to_s16_array(node,
++					    "ti,edma-xbar-event-map",
++					    (s16 *)xbar_chans,
++					    len/sizeof(u32));
++	if (ret)
++		return -EIO;
++
++	for (i = 0; xbar_chans[i][0] != -1; i++) {
++		shift = (xbar_chans[i][1] % 4) * 8;
++		offset = xbar_chans[i][1] >> 2;
++		offset <<= 2;
++		mux = readl((void *)((u32)xbar + offset));
++		mux &= ~(0xff << shift);
++		mux |= xbar_chans[i][0] << shift;
++		writel(mux, (void *)((u32)xbar + offset));
++	}
++
++	pdata->xbar_chans = xbar_chans;
++
++	return 0;
++}
++
++static int edma_of_parse_dt(struct device *dev,
++			    struct device_node *node,
++			    struct edma_soc_info *pdata)
++{
++	int ret = 0;
++	u32 value;
++	struct property *prop;
++	size_t sz;
++	struct edma_rsv_info *rsv_info;
++	const s16 (*rsv_chans)[2], (*rsv_slots)[2];
++	const s8 (*queue_tc_map)[2], (*queue_priority_map)[2];
++
++	memset(pdata, 0, sizeof(struct edma_soc_info));
++
++	ret = of_property_read_u32(node, "dma-channels", &value);
++	if (ret < 0)
++		return ret;
++	pdata->n_channel = value;
++
++	ret = of_property_read_u32(node, "ti,edma-regions", &value);
++	if (ret < 0)
++		return ret;
++	pdata->n_region = value;
++
++	ret = of_property_read_u32(node, "ti,edma-slots", &value);
++	if (ret < 0)
++		return ret;
++	pdata->n_slot = value;
++
++	pdata->n_cc = 1;
++	pdata->n_tc = 3;
++
++	rsv_info =
++		devm_kzalloc(dev, sizeof(struct edma_rsv_info), GFP_KERNEL);
++	if (!rsv_info)
++		return -ENOMEM;
++	pdata->rsv = rsv_info;
++
++	/* Build the reserved channel/slots arrays */
++	prop = of_find_property(node, "ti,edma-reserved-channels", &sz);
++	if (prop) {
++		rsv_chans = devm_kzalloc(dev,
++					 sz/sizeof(s16) + 2*sizeof(s16),
++					 GFP_KERNEL);
++		if (!rsv_chans)
++			return -ENOMEM;
++		pdata->rsv->rsv_chans = rsv_chans;
++
++		ret = edma_of_read_u32_to_s16_array(node,
++						    "ti,edma-reserved-channels",
++						    (s16 *)rsv_chans,
++						    sz/sizeof(u32));
++		if (ret < 0)
++			return ret;
++	}
+ 
+-static int __init edma_probe(struct platform_device *pdev)
++	prop = of_find_property(node, "ti,edma-reserved-slots", &sz);
++	if (prop) {
++		rsv_slots = devm_kzalloc(dev,
++					 sz/sizeof(s16) + 2*sizeof(s16),
++					 GFP_KERNEL);
++		if (!rsv_slots)
++			return -ENOMEM;
++		pdata->rsv->rsv_slots = rsv_slots;
++
++		ret = edma_of_read_u32_to_s16_array(node,
++						    "ti,edma-reserved-slots",
++						    (s16 *)rsv_slots,
++						    sz/sizeof(u32));
++		if (ret < 0)
++			return ret;
++	}
++
++	prop = of_find_property(node, "ti,edma-queue-tc-map", &sz);
++	if (!prop)
++		return -EINVAL;
++
++	queue_tc_map = devm_kzalloc(dev,
++				    sz/sizeof(s8) + 2*sizeof(s8),
++				    GFP_KERNEL);
++	if (!queue_tc_map)
++		return -ENOMEM;
++	pdata->queue_tc_mapping = queue_tc_map;
++
++	ret = edma_of_read_u32_to_s8_array(node,
++					   "ti,edma-queue-tc-map",
++					   (s8 *)queue_tc_map,
++					   sz/sizeof(u32));
++	if (ret < 0)
++		return ret;
++
++	prop = of_find_property(node, "ti,edma-queue-priority-map", &sz);
++	if (!prop)
++		return -EINVAL;
++
++	queue_priority_map = devm_kzalloc(dev,
++					  sz/sizeof(s8) + 2*sizeof(s8),
++					  GFP_KERNEL);
++	if (!queue_priority_map)
++		return -ENOMEM;
++	pdata->queue_priority_mapping = queue_priority_map;
++
++	ret = edma_of_read_u32_to_s8_array(node,
++					   "ti,edma-queue-tc-map",
++					   (s8 *)queue_priority_map,
++					   sz/sizeof(u32));
++	if (ret < 0)
++		return ret;
++
++	ret = of_property_read_u32(node, "ti,edma-default-queue", &value);
++	if (ret < 0)
++		return ret;
++	pdata->default_queue = value;
++
++	prop = of_find_property(node, "ti,edma-xbar-event-map", &sz);
++	if (prop)
++		ret = edma_xbar_event_map(dev, node, pdata, sz);
++
++	return ret;
++}
++
++static struct of_dma_filter_info edma_filter_info = {
++	.filter_fn = edma_filter_fn,
++};
++
++static int edma_probe(struct platform_device *pdev)
+ {
+ 	struct edma_soc_info	**info = pdev->dev.platform_data;
++	struct edma_soc_info	*ninfo[EDMA_MAX_CC] = {NULL, NULL};
++	struct edma_soc_info	tmpinfo;
+ 	const s8		(*queue_priority_mapping)[2];
+ 	const s8		(*queue_tc_mapping)[2];
+ 	int			i, j, off, ln, found = 0;
+ 	int			status = -1;
+ 	const s16		(*rsv_chans)[2];
+ 	const s16		(*rsv_slots)[2];
++	const s16		(*xbar_chans)[2];
+ 	int			irq[EDMA_MAX_CC] = {0, 0};
+ 	int			err_irq[EDMA_MAX_CC] = {0, 0};
+-	struct resource		*r[EDMA_MAX_CC] = {NULL};
++	struct resource		*r[EDMA_MAX_CC] = {NULL, NULL};
++	struct resource		res[EDMA_MAX_CC];
+ 	resource_size_t		len[EDMA_MAX_CC];
+ 	char			res_name[10];
+ 	char			irq_name[10];
++	struct device_node	*node = pdev->dev.of_node;
++	struct device		*dev = &pdev->dev;
++	int			ret;
++
++	if (node) {
++		/* Check if this is a second instance registered */
++		if (arch_num_cc) {
++			dev_err(dev, "only one EDMA instance is supported via DT\n");
++			return -ENODEV;
++		}
++		info = ninfo;
++		edma_of_parse_dt(dev, node, &tmpinfo);
++		info[0] = &tmpinfo;
++
++		dma_cap_set(DMA_SLAVE, edma_filter_info.dma_cap);
++		of_dma_controller_register(dev->of_node,
++					   of_dma_simple_xlate,
++					   &edma_filter_info);
++	}
+ 
+ 	if (!info)
+ 		return -ENODEV;
+ 
++	pm_runtime_enable(dev);
++	ret = pm_runtime_get_sync(dev);
++	if (ret < 0) {
++		dev_err(dev, "pm_runtime_get_sync() failed\n");
++		return ret;
++	}
++
+ 	for (j = 0; j < EDMA_MAX_CC; j++) {
+-		sprintf(res_name, "edma_cc%d", j);
+-		r[j] = platform_get_resource_byname(pdev, IORESOURCE_MEM,
++		if (!info[j]) {
++			if (!found)
++				return -ENODEV;
++			break;
++		}
++		if (node) {
++			ret = of_address_to_resource(node, j, &res[j]);
++			if (!ret)
++				r[j] = &res[j];
++		} else {
++			sprintf(res_name, "edma_cc%d", j);
++			r[j] = platform_get_resource_byname(pdev,
++						IORESOURCE_MEM,
+ 						res_name);
+-		if (!r[j] || !info[j]) {
++		}
++		if (!r[j]) {
+ 			if (found)
+ 				break;
+ 			else
+@@ -1468,8 +1722,22 @@ static int __init edma_probe(struct platform_device *pdev)
+ 			}
+ 		}
+ 
+-		sprintf(irq_name, "edma%d", j);
+-		irq[j] = platform_get_irq_byname(pdev, irq_name);
++		/* Clear the xbar mapped channels in unused list */
++		xbar_chans = info[j]->xbar_chans;
++		if (xbar_chans) {
++			for (i = 0; xbar_chans[i][1] != -1; i++) {
++				off = xbar_chans[i][1];
++				clear_bits(off, 1,
++					edma_cc[j]->edma_unused);
++			}
++		}
++
++		if (node)
++			irq[j] = irq_of_parse_and_map(node, 0);
++		else {
++			sprintf(irq_name, "edma%d", j);
++			irq[j] = platform_get_irq_byname(pdev, irq_name);
++		}
+ 		edma_cc[j]->irq_res_start = irq[j];
+ 		status = request_irq(irq[j], dma_irq_handler, 0, "edma",
+ 					&pdev->dev);
+@@ -1479,8 +1747,12 @@ static int __init edma_probe(struct platform_device *pdev)
+ 			goto fail;
+ 		}
+ 
+-		sprintf(irq_name, "edma%d_err", j);
+-		err_irq[j] = platform_get_irq_byname(pdev, irq_name);
++		if (node)
++			err_irq[j] = irq_of_parse_and_map(node, 2);
++		else {
++			sprintf(irq_name, "edma%d_err", j);
++			err_irq[j] = platform_get_irq_byname(pdev, irq_name);
++		}
+ 		edma_cc[j]->irq_res_end = err_irq[j];
+ 		status = request_irq(err_irq[j], dma_ccerr_handler, 0,
+ 					"edma_error", &pdev->dev);
+@@ -1541,9 +1813,17 @@ fail1:
+ 	return status;
+ }
+ 
++static const struct of_device_id edma_of_ids[] = {
++	{ .compatible = "ti,edma3", },
++	{}
++};
+ 
+ static struct platform_driver edma_driver = {
+-	.driver.name	= "edma",
++	.driver = {
++		.name	= "edma",
++		.of_match_table = edma_of_ids,
++	},
++	.probe = edma_probe,
+ };
+ 
+ static int __init edma_init(void)
+diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
+index 49ac3df..d3b433d 100644
+--- a/arch/arm/mach-omap2/Kconfig
++++ b/arch/arm/mach-omap2/Kconfig
+@@ -16,6 +16,7 @@ config ARCH_OMAP2PLUS
+ 	select PINCTRL
+ 	select PROC_DEVICETREE if PROC_FS
+ 	select SPARSE_IRQ
++	select TI_PRIV_EDMA
+ 	select USE_OF
+ 	help
+ 	  Systems based on OMAP2, OMAP3, OMAP4 or OMAP5
+diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h
+index 2344ea2..ffc1fb2 100644
+--- a/include/linux/platform_data/edma.h
++++ b/include/linux/platform_data/edma.h
+@@ -177,6 +177,7 @@ struct edma_soc_info {
+ 
+ 	const s8	(*queue_tc_mapping)[2];
+ 	const s8	(*queue_priority_mapping)[2];
++	const s16	(*xbar_chans)[2];
+ };
+ 
+ #endif
+-- 
+1.7.10.4
+
diff --git a/patches/dma/0005-dmaengine-edma-enable-build-for-AM33XX.patch b/patches/dma/0005-dmaengine-edma-enable-build-for-AM33XX.patch
new file mode 100644
index 000000000..b489f7776
--- /dev/null
+++ b/patches/dma/0005-dmaengine-edma-enable-build-for-AM33XX.patch
@@ -0,0 +1,28 @@
+From 812977b2500481abd7a28e2726e7f6f5ce46d350 Mon Sep 17 00:00:00 2001
+From: Matt Porter <mporter@ti.com>
+Date: Tue, 5 Mar 2013 15:53:10 -0500
+Subject: [PATCH 05/10] dmaengine: edma: enable build for AM33XX
+
+Enable TI EDMA option on OMAP.
+
+Signed-off-by: Matt Porter <mporter@ti.com>
+---
+ drivers/dma/Kconfig |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
+index 80b6997..3b7ea20 100644
+--- a/drivers/dma/Kconfig
++++ b/drivers/dma/Kconfig
+@@ -222,7 +222,7 @@ config SIRF_DMA
+ 
+ config TI_EDMA
+ 	tristate "TI EDMA support"
+-	depends on ARCH_DAVINCI
++	depends on ARCH_DAVINCI || ARCH_OMAP
+ 	select DMA_ENGINE
+ 	select DMA_VIRTUAL_CHANNELS
+ 	default n
+-- 
+1.7.10.4
+
diff --git a/patches/dma/0006-dmaengine-edma-Add-TI-EDMA-device-tree-binding.patch b/patches/dma/0006-dmaengine-edma-Add-TI-EDMA-device-tree-binding.patch
new file mode 100644
index 000000000..4a5f7015f
--- /dev/null
+++ b/patches/dma/0006-dmaengine-edma-Add-TI-EDMA-device-tree-binding.patch
@@ -0,0 +1,72 @@
+From 32acacca31bd20f47d78ae0608901ab15e406520 Mon Sep 17 00:00:00 2001
+From: Matt Porter <mporter@ti.com>
+Date: Tue, 5 Mar 2013 15:53:11 -0500
+Subject: [PATCH 06/10] dmaengine: edma: Add TI EDMA device tree binding
+
+The binding definition is based on the generic DMA controller
+binding.
+
+Signed-off-by: Matt Porter <mporter@ti.com>
+---
+ Documentation/devicetree/bindings/dma/ti-edma.txt |   49 +++++++++++++++++++++
+ 1 file changed, 49 insertions(+)
+ create mode 100644 Documentation/devicetree/bindings/dma/ti-edma.txt
+
+diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt
+new file mode 100644
+index 0000000..075a60e3
+--- /dev/null
++++ b/Documentation/devicetree/bindings/dma/ti-edma.txt
+@@ -0,0 +1,49 @@
++TI EDMA
++
++Required properties:
++- compatible : "ti,edma3"
++- ti,hwmods: Name of the hwmods associated to the EDMA
++- ti,edma-regions: Number of regions
++- ti,edma-slots: Number of slots
++- ti,edma-queue-tc-map: List of transfer control to queue mappings
++- ti,edma-queue-priority-map: List of queue priority mappings
++- ti,edma-default-queue: Default queue value
++
++Optional properties:
++- ti,edma-reserved-channels: List of reserved channel regions
++- ti,edma-reserved-slots: List of reserved slot regions
++- ti,edma-xbar-event-map: Crossbar event to channel map
++
++Example:
++
++edma: edma@49000000 {
++	reg = <0x49000000 0x10000>;
++	interrupt-parent = <&intc>;
++	interrupts = <12 13 14>;
++	compatible = "ti,edma3";
++	ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2";
++	#dma-cells = <1>;
++	dma-channels = <64>;
++	ti,edma-regions = <4>;
++	ti,edma-slots = <256>;
++	ti,edma-reserved-channels = <0  2
++				     14 2
++				     26 6
++				     48 4
++				     56 8>;
++	ti,edma-reserved-slots = <0  2
++				  14 2
++				  26 6
++				  48 4
++				  56 8
++				  64 127>;
++	ti,edma-queue-tc-map = <0 0
++				1 1
++				2 2>;
++	ti,edma-queue-priority-map = <0 0
++				      1 1
++				      2 2>;
++	ti,edma-default-queue = <0>;
++	ti,edma-xbar-event-map = <1 12
++				  2 13>;
++};
+-- 
+1.7.10.4
+
diff --git a/patches/dma/0007-ARM-dts-add-AM33XX-EDMA-support.patch b/patches/dma/0007-ARM-dts-add-AM33XX-EDMA-support.patch
new file mode 100644
index 000000000..fcfebfbf0
--- /dev/null
+++ b/patches/dma/0007-ARM-dts-add-AM33XX-EDMA-support.patch
@@ -0,0 +1,47 @@
+From 21edef00f860c07a0b39fc92f663f3e4a12c2c7d Mon Sep 17 00:00:00 2001
+From: Matt Porter <mporter@ti.com>
+Date: Tue, 5 Mar 2013 15:53:12 -0500
+Subject: [PATCH 07/10] ARM: dts: add AM33XX EDMA support
+
+Adds AM33XX EDMA support to the am33xx.dtsi as documented in
+Documentation/devicetree/bindings/dma/ti-edma.txt
+
+Signed-off-by: Matt Porter <mporter@ti.com>
+---
+ arch/arm/boot/dts/am33xx.dtsi |   20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
+index 0957645..aaf44122 100644
+--- a/arch/arm/boot/dts/am33xx.dtsi
++++ b/arch/arm/boot/dts/am33xx.dtsi
+@@ -87,6 +87,26 @@
+ 			reg = <0x48200000 0x1000>;
+ 		};
+ 
++		edma: edma@49000000 {
++			compatible = "ti,edma3";
++			ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2";
++			reg =	<0x49000000 0x10000>,
++				<0x44e10f90 0x10>;
++			interrupt-parent = <&intc>;
++			interrupts = <12 13 14>;
++			#dma-cells = <1>;
++			dma-channels = <64>;
++			ti,edma-regions = <4>;
++			ti,edma-slots = <256>;
++			ti,edma-queue-tc-map = <0 0
++						1 1
++						2 2>;
++			ti,edma-queue-priority-map = <0 0
++						      1 1
++						      2 2>;
++			ti,edma-default-queue = <0>;
++		};
++
+ 		gpio1: gpio@44e07000 {
+ 			compatible = "ti,omap4-gpio";
+ 			ti,hwmods = "gpio1";
+-- 
+1.7.10.4
+
diff --git a/patches/dma/0008-spi-omap2-mcspi-convert-to-dma_request_slave_channel.patch b/patches/dma/0008-spi-omap2-mcspi-convert-to-dma_request_slave_channel.patch
new file mode 100644
index 000000000..dedbe223f
--- /dev/null
+++ b/patches/dma/0008-spi-omap2-mcspi-convert-to-dma_request_slave_channel.patch
@@ -0,0 +1,91 @@
+From 99a278c9dc7819035a1dda6caef027c01102e56f Mon Sep 17 00:00:00 2001
+From: Matt Porter <mporter@ti.com>
+Date: Tue, 5 Mar 2013 15:53:13 -0500
+Subject: [PATCH 08/10] spi: omap2-mcspi: convert to
+ dma_request_slave_channel_compat()
+
+Convert dmaengine channel requests to use
+dma_request_slave_channel_compat(). This supports the DT case of
+platforms requiring channel selection from either the OMAP DMA or
+the EDMA engine. AM33xx only boots from DT and is the only user
+implementing EDMA so in the !DT case we can default to the OMAP DMA
+filter.
+
+Signed-off-by: Matt Porter <mporter@ti.com>
+Acked-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
+---
+ drivers/spi/spi-omap2-mcspi.c |   27 ++++++++++++++++++++-------
+ 1 file changed, 20 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
+index 893c3d7..38d0915 100644
+--- a/drivers/spi/spi-omap2-mcspi.c
++++ b/drivers/spi/spi-omap2-mcspi.c
+@@ -102,6 +102,9 @@ struct omap2_mcspi_dma {
+ 
+ 	struct completion dma_tx_completion;
+ 	struct completion dma_rx_completion;
++
++	char dma_rx_ch_name[14];
++	char dma_tx_ch_name[14];
+ };
+ 
+ /* use PIO for small transfers, avoiding DMA setup/teardown overhead and
+@@ -822,14 +825,23 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
+ 	dma_cap_zero(mask);
+ 	dma_cap_set(DMA_SLAVE, mask);
+ 	sig = mcspi_dma->dma_rx_sync_dev;
+-	mcspi_dma->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
++
++	mcspi_dma->dma_rx =
++		dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
++						 &sig, &master->dev,
++						 mcspi_dma->dma_rx_ch_name);
++
+ 	if (!mcspi_dma->dma_rx) {
+ 		dev_err(&spi->dev, "no RX DMA engine channel for McSPI\n");
+ 		return -EAGAIN;
+ 	}
+ 
+ 	sig = mcspi_dma->dma_tx_sync_dev;
+-	mcspi_dma->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
++	mcspi_dma->dma_tx =
++		dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
++						 &sig, &master->dev,
++						 mcspi_dma->dma_tx_ch_name);
++
+ 	if (!mcspi_dma->dma_tx) {
+ 		dev_err(&spi->dev, "no TX DMA engine channel for McSPI\n");
+ 		dma_release_channel(mcspi_dma->dma_rx);
+@@ -1240,12 +1252,13 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
+ 		goto free_master;
+ 
+ 	for (i = 0; i < master->num_chipselect; i++) {
+-		char dma_ch_name[14];
++		char *dma_rx_ch_name = mcspi->dma_channels[i].dma_rx_ch_name;
++		char *dma_tx_ch_name = mcspi->dma_channels[i].dma_tx_ch_name;
+ 		struct resource *dma_res;
+ 
+-		sprintf(dma_ch_name, "rx%d", i);
++		sprintf(dma_rx_ch_name, "rx%d", i);
+ 		dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+-				dma_ch_name);
++				dma_rx_ch_name);
+ 		if (!dma_res) {
+ 			dev_dbg(&pdev->dev, "cannot get DMA RX channel\n");
+ 			status = -ENODEV;
+@@ -1253,9 +1266,9 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
+ 		}
+ 
+ 		mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start;
+-		sprintf(dma_ch_name, "tx%d", i);
++		sprintf(dma_tx_ch_name, "tx%d", i);
+ 		dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+-				dma_ch_name);
++				dma_tx_ch_name);
+ 		if (!dma_res) {
+ 			dev_dbg(&pdev->dev, "cannot get DMA TX channel\n");
+ 			status = -ENODEV;
+-- 
+1.7.10.4
+
diff --git a/patches/dma/0009-spi-omap2-mcspi-add-generic-DMA-request-support-to-t.patch b/patches/dma/0009-spi-omap2-mcspi-add-generic-DMA-request-support-to-t.patch
new file mode 100644
index 000000000..c9dd0de1b
--- /dev/null
+++ b/patches/dma/0009-spi-omap2-mcspi-add-generic-DMA-request-support-to-t.patch
@@ -0,0 +1,58 @@
+From e3702f88813702a1d38784ee3bee6a8e133108fb Mon Sep 17 00:00:00 2001
+From: Matt Porter <mporter@ti.com>
+Date: Tue, 5 Mar 2013 15:53:14 -0500
+Subject: [PATCH 09/10] spi: omap2-mcspi: add generic DMA request support to
+ the DT binding
+
+The binding definition is based on the generic DMA request binding
+
+Signed-off-by: Matt Porter <mporter@ti.com>
+---
+ Documentation/devicetree/bindings/spi/omap-spi.txt |   27 +++++++++++++++++++-
+ 1 file changed, 26 insertions(+), 1 deletion(-)
+
+diff --git a/Documentation/devicetree/bindings/spi/omap-spi.txt b/Documentation/devicetree/bindings/spi/omap-spi.txt
+index 938809c..4c85c4c 100644
+--- a/Documentation/devicetree/bindings/spi/omap-spi.txt
++++ b/Documentation/devicetree/bindings/spi/omap-spi.txt
+@@ -10,7 +10,18 @@ Required properties:
+ 			  input. The default is D0 as input and
+ 			  D1 as output.
+ 
+-Example:
++Optional properties:
++- dmas: List of DMA specifiers with the controller specific format
++	as described in the generic DMA client binding. A tx and rx
++	specifier is required for each chip select.
++- dma-names: List of DMA request names. These strings correspond
++	1:1 with the DMA specifiers listed in dmas. The string naming
++	is to be "rxN" and "txN" for RX and TX requests,
++	respectively, where N equals the chip select number.
++
++Examples:
++
++[hwmod populated DMA resources]
+ 
+ mcspi1: mcspi@1 {
+     #address-cells = <1>;
+@@ -20,3 +31,17 @@ mcspi1: mcspi@1 {
+     ti,spi-num-cs = <4>;
+ };
+ 
++[generic DMA request binding]
++
++mcspi1: mcspi@1 {
++    #address-cells = <1>;
++    #size-cells = <0>;
++    compatible = "ti,omap4-mcspi";
++    ti,hwmods = "mcspi1";
++    ti,spi-num-cs = <2>;
++    dmas = <&edma 42
++	    &edma 43
++	    &edma 44
++	    &edma 45>;
++    dma-names = "tx0", "rx0", "tx1", "rx1";
++};
+-- 
+1.7.10.4
+
diff --git a/patches/dma/0010-ARM-dts-add-AM33XX-SPI-DMA-support.patch b/patches/dma/0010-ARM-dts-add-AM33XX-SPI-DMA-support.patch
new file mode 100644
index 000000000..d962e65ae
--- /dev/null
+++ b/patches/dma/0010-ARM-dts-add-AM33XX-SPI-DMA-support.patch
@@ -0,0 +1,43 @@
+From ec1e90d19468f3d9a8d74497169a8f5c5bb6961d Mon Sep 17 00:00:00 2001
+From: Matt Porter <mporter@ti.com>
+Date: Tue, 5 Mar 2013 15:53:15 -0500
+Subject: [PATCH 10/10] ARM: dts: add AM33XX SPI DMA support
+
+Adds DMA resources to the AM33XX SPI nodes.
+
+Signed-off-by: Matt Porter <mporter@ti.com>
+---
+ arch/arm/boot/dts/am33xx.dtsi |   10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
+index aaf44122..a13d710 100644
+--- a/arch/arm/boot/dts/am33xx.dtsi
++++ b/arch/arm/boot/dts/am33xx.dtsi
+@@ -328,6 +328,11 @@
+ 			interrupt = <65>;
+ 			ti,spi-num-cs = <2>;
+ 			ti,hwmods = "spi0";
++			dmas = <&edma 16
++				&edma 17
++				&edma 18
++				&edma 19>;
++			dma-names = "tx0", "rx0", "tx1", "rx1";
+ 			status = "disabled";
+ 		};
+ 
+@@ -339,6 +344,11 @@
+ 			interrupt = <125>;
+ 			ti,spi-num-cs = <2>;
+ 			ti,hwmods = "spi1";
++			dmas = <&edma 42
++				&edma 43
++				&edma 44
++				&edma 45>;
++			dma-names = "tx0", "rx0", "tx1", "rx1";
+ 			status = "disabled";
+ 		};
+ 
+-- 
+1.7.10.4
+
diff --git a/patches/mmc/0001-mmc-omap_hsmmc-convert-to-dma_request_slave_channel_.patch b/patches/mmc/0001-mmc-omap_hsmmc-convert-to-dma_request_slave_channel_.patch
new file mode 100644
index 000000000..89d02ad9d
--- /dev/null
+++ b/patches/mmc/0001-mmc-omap_hsmmc-convert-to-dma_request_slave_channel_.patch
@@ -0,0 +1,46 @@
+From c551372a00e089c54d80ef2ca2ac384a4ba740ea Mon Sep 17 00:00:00 2001
+From: Matt Porter <mporter@ti.com>
+Date: Tue, 5 Mar 2013 16:13:22 -0500
+Subject: [PATCH 1/3] mmc: omap_hsmmc: convert to
+ dma_request_slave_channel_compat()
+
+Convert dmaengine channel requests to use
+dma_request_slave_channel_compat(). This supports platforms booting
+with or without DT populated.
+
+Signed-off-by: Matt Porter <mporter@ti.com>
+Acked-by: Tony Lindgren <tony@atomide.com>
+---
+ drivers/mmc/host/omap_hsmmc.c |   10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
+index bc58078..e79b12d 100644
+--- a/drivers/mmc/host/omap_hsmmc.c
++++ b/drivers/mmc/host/omap_hsmmc.c
+@@ -1915,14 +1915,20 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
+ 	dma_cap_zero(mask);
+ 	dma_cap_set(DMA_SLAVE, mask);
+ 
+-	host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req);
++	host->rx_chan =
++		dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
++						 &rx_req, &pdev->dev, "rx");
++
+ 	if (!host->rx_chan) {
+ 		dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
+ 		ret = -ENXIO;
+ 		goto err_irq;
+ 	}
+ 
+-	host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req);
++	host->tx_chan =
++		dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
++						 &tx_req, &pdev->dev, "tx");
++
+ 	if (!host->tx_chan) {
+ 		dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
+ 		ret = -ENXIO;
+-- 
+1.7.10.4
+
diff --git a/patches/mmc/0002-mmc-omap_hsmmc-Skip-platform_get_resource_byname-for.patch b/patches/mmc/0002-mmc-omap_hsmmc-Skip-platform_get_resource_byname-for.patch
new file mode 100644
index 000000000..0d4e19f17
--- /dev/null
+++ b/patches/mmc/0002-mmc-omap_hsmmc-Skip-platform_get_resource_byname-for.patch
@@ -0,0 +1,62 @@
+From d60ff3aeb1a8b87cb4d9313a45d7f03fc9a081c9 Mon Sep 17 00:00:00 2001
+From: Santosh Shilimkar <santosh.shilimkar@ti.com>
+Date: Tue, 5 Mar 2013 16:13:23 -0500
+Subject: [PATCH 2/3] mmc: omap_hsmmc: Skip platform_get_resource_byname() for
+ dt case
+
+MMC driver probe will abort for DT case because of failed
+platform_get_resource_byname() lookup. Fix it by skipping resource
+byname lookup for device tree build.
+
+Issue is hidden because hwmod popullates the IO resources which
+helps to succeed platform_get_resource_byname() and probe.
+
+Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
+---
+ drivers/mmc/host/omap_hsmmc.c |   28 +++++++++++++++-------------
+ 1 file changed, 15 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
+index e79b12d..8ae1225 100644
+--- a/drivers/mmc/host/omap_hsmmc.c
++++ b/drivers/mmc/host/omap_hsmmc.c
+@@ -1896,21 +1896,23 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
+ 
+ 	omap_hsmmc_conf_bus_power(host);
+ 
+-	res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
+-	if (!res) {
+-		dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
+-		ret = -ENXIO;
+-		goto err_irq;
+-	}
+-	tx_req = res->start;
++	if (!pdev->dev.of_node) {
++		res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
++		if (!res) {
++			dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
++			ret = -ENXIO;
++			goto err_irq;
++		}
++		tx_req = res->start;
+ 
+-	res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
+-	if (!res) {
+-		dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
+-		ret = -ENXIO;
+-		goto err_irq;
++		res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
++		if (!res) {
++			dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
++			ret = -ENXIO;
++			goto err_irq;
++		}
++		rx_req = res->start;
+ 	}
+-	rx_req = res->start;
+ 
+ 	dma_cap_zero(mask);
+ 	dma_cap_set(DMA_SLAVE, mask);
+-- 
+1.7.10.4
+
diff --git a/patches/mmc/0003-mmc-omap_hsmmc-add-generic-DMA-request-support-to-th.patch b/patches/mmc/0003-mmc-omap_hsmmc-add-generic-DMA-request-support-to-th.patch
new file mode 100644
index 000000000..52f03494c
--- /dev/null
+++ b/patches/mmc/0003-mmc-omap_hsmmc-add-generic-DMA-request-support-to-th.patch
@@ -0,0 +1,60 @@
+From c9fbc12eee0da93a9f93c46a111588bd36cda7a6 Mon Sep 17 00:00:00 2001
+From: Matt Porter <mporter@ti.com>
+Date: Tue, 5 Mar 2013 16:13:24 -0500
+Subject: [PATCH 3/3] mmc: omap_hsmmc: add generic DMA request support to the
+ DT binding
+
+The binding definition is based on the generic DMA request binding.
+
+Signed-off-by: Matt Porter <mporter@ti.com>
+Acked-by: Tony Lindgren <tony@atomide.com>
+---
+ .../devicetree/bindings/mmc/ti-omap-hsmmc.txt      |   26 +++++++++++++++++++-
+ 1 file changed, 25 insertions(+), 1 deletion(-)
+
+diff --git a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
+index ed271fc..8c8908a 100644
+--- a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
++++ b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
+@@ -20,8 +20,29 @@ ti,dual-volt: boolean, supports dual voltage cards
+ ti,non-removable: non-removable slot (like eMMC)
+ ti,needs-special-reset: Requires a special softreset sequence
+ ti,needs-special-hs-handling: HSMMC IP needs special setting for handling High Speed
++dmas: List of DMA specifiers with the controller specific format
++as described in the generic DMA client binding. A tx and rx
++specifier is required.
++dma-names: List of DMA request names. These strings correspond
++1:1 with the DMA specifiers listed in dmas. The string naming is
++to be "rx" and "tx" for RX and TX DMA requests, respectively.
++
++Examples:
++
++[hwmod populated DMA resources]
++
++	mmc1: mmc@0x4809c000 {
++		compatible = "ti,omap4-hsmmc";
++		reg = <0x4809c000 0x400>;
++		ti,hwmods = "mmc1";
++		ti,dual-volt;
++		bus-width = <4>;
++		vmmc-supply = <&vmmc>; /* phandle to regulator node */
++		ti,non-removable;
++	};
++
++[generic DMA request binding]
+ 
+-Example:
+ 	mmc1: mmc@0x4809c000 {
+ 		compatible = "ti,omap4-hsmmc";
+ 		reg = <0x4809c000 0x400>;
+@@ -30,4 +51,7 @@ Example:
+ 		bus-width = <4>;
+ 		vmmc-supply = <&vmmc>; /* phandle to regulator node */
+ 		ti,non-removable;
++		dmas = <&edma 24
++			&edma 25>;
++		dma-names = "tx", "rx";
+ 	};
+-- 
+1.7.10.4
+
diff --git a/patches/ref_omap2plus_defconfig b/patches/ref_omap2plus_defconfig
index 94356cff8..96b74c250 100644
--- a/patches/ref_omap2plus_defconfig
+++ b/patches/ref_omap2plus_defconfig
@@ -460,6 +460,7 @@ CONFIG_PL310_ERRATA_727915=y
 # CONFIG_PL310_ERRATA_769419 is not set
 # CONFIG_ARM_ERRATA_775420 is not set
 CONFIG_ICST=y
+CONFIG_TI_PRIV_EDMA=y
 
 #
 # Bus support
@@ -2466,6 +2467,7 @@ CONFIG_DMADEVICES=y
 # CONFIG_AMBA_PL08X is not set
 # CONFIG_DW_DMAC is not set
 # CONFIG_TIMB_DMA is not set
+# CONFIG_TI_EDMA is not set
 # CONFIG_PL330_DMA is not set
 CONFIG_DMA_OMAP=y
 CONFIG_DMA_ENGINE=y
diff --git a/version.sh b/version.sh
index 5b39c3d03..c9fa1b54c 100644
--- a/version.sh
+++ b/version.sh
@@ -24,7 +24,7 @@ config="omap2plus_defconfig"
 #Kernel/Build
 KERNEL_REL=3.9
 KERNEL_TAG=${KERNEL_REL}-rc1
-BUILD=armv7-d1
+BUILD=armv7-d1.1
 
 #v3.X-rcX + upto SHA
 #KERNEL_SHA="b0af9cd9aab60ceb17d3ebabb9fdf4ff0a99cf50"
-- 
GitLab