Initial support for the SD/eMMC controller in the Amlogic S905/GXBB family of SoCs. Currently working for the SD and eMMC interfaces, but not yet tested for SDIO. Tested external SD card and internal eMMC on meson-gxbb-p200 and meson-gxbb-odroidc2. TODO items: - take full advantage of DMA-like descriptors instead of bounce buffer - add SDIO support --- drivers/mmc/host/Kconfig | 7 + drivers/mmc/host/Makefile | 1 + drivers/mmc/host/meson-gxbb.c | 909 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 917 insertions(+) create mode 100644 drivers/mmc/host/meson-gxbb.c diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 0aa484c10c0a..2363c0cee9cc 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -332,6 +332,13 @@ config MMC_SDHCI_IPROC If unsure, say N. +config MMC_MESON_GXBB + tristate "Amlogic S905/GXBB SD/MMC Host Controller support" + depends on ARCH_MESON && MMC + help + This selects support for the Amlogic SD/MMC Host Controller. + If you have a controller with this interface, say Y here. + config MMC_MOXART tristate "MOXART SD/MMC Host Controller support" depends on ARCH_MOXART && MMC diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index af918d261ff9..3e0de57f55b9 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -53,6 +53,7 @@ obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o obj-$(CONFIG_MMC_VUB300) += vub300.o obj-$(CONFIG_MMC_USHC) += ushc.o obj-$(CONFIG_MMC_WMT) += wmt-sdmmc.o +obj-$(CONFIG_MMC_MESON_GXBB) += meson-gxbb.o obj-$(CONFIG_MMC_MOXART) += moxart-mmc.o obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o diff --git a/drivers/mmc/host/meson-gxbb.c b/drivers/mmc/host/meson-gxbb.c new file mode 100644 index 000000000000..ecf7b4d2d330 --- /dev/null +++ b/drivers/mmc/host/meson-gxbb.c @@ -0,0 +1,909 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Kevin Hilman <khilman@xxxxxxxxxxxx> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * BSD LICENSE + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Kevin Hilman <khilman@xxxxxxxxxxxx> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/device.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/ioport.h> +#include <linux/regmap.h> +#include <linux/spinlock.h> +#include <linux/dma-mapping.h> +#include <linux/mmc/host.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/sdio.h> +#include <linux/mmc/slot-gpio.h> +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> + +#define DRIVER_NAME "meson-gxbb-mmc" + +#define SD_EMMC_CLOCK 0x0 +#define CLK_DIV_SHIFT 0 +#define CLK_DIV_WIDTH 6 +#define CLK_DIV_MASK (0x3f << CLK_DIV_SHIFT) +#define CLK_DIV_MAX 63 +#define CLK_SRC_SHIFT 6 +#define CLK_SRC_WIDTH 2 +#define CLK_SRC_MASK (0x3 << CLK_SRC_SHIFT) +#define CLK_SRC_XTAL 0 /* external crystal */ +#define CLK_SRC_XTAL_RATE 24000000 +#define CLK_SRC_PLL 1 /* FCLK_DIV2 */ +#define CLK_SRC_PLL_RATE 1000000000 +#define CLK_PHASE_SHIFT 8 +#define CLK_PHASE_MASK (0x3 << CLK_PHASE_SHIFT) +#define CLK_PHASE_0 0 +#define CLK_PHASE_90 1 +#define CLK_PHASE_180 2 +#define CLK_PHASE_270 3 +#define CLK_ALWAYS_ON BIT(24) + +#define SD_EMMC_DElAY 0x4 +#define SD_EMMC_ADJUST 0x8 +#define SD_EMMC_CALOUT 0x10 +#define SD_EMMC_START 0x40 +#define START_DESC_INIT BIT(0) +#define START_DESC_BUSY BIT(1) +#define START_DESC_ADDR_SHIFT 2 +#define START_DESC_ADDR_MASK (~0x3) + +#define SD_EMMC_CFG 0x44 +#define CFG_BUS_WIDTH_SHIFT 0 +#define CFG_BUS_WIDTH_MASK (0x3 << CFG_BUS_WIDTH_SHIFT) +#define CFG_BUS_WIDTH_1 0x0 +#define CFG_BUS_WIDTH_4 0x1 +#define CFG_BUS_WIDTH_8 0x2 +#define CFG_DDR BIT(2) +#define CFG_BLK_LEN_SHIFT 4 +#define CFG_BLK_LEN_MASK (0xf << CFG_BLK_LEN_SHIFT) +#define CFG_RESP_TIMEOUT_SHIFT 8 +#define CFG_RESP_TIMEOUT_MASK (0xf << CFG_RESP_TIMEOUT_SHIFT) +#define CFG_RC_CC_SHIFT 12 +#define CFG_RC_CC_MASK (0xf << CFG_RC_CC_SHIFT) +#define CFG_STOP_CLOCK BIT(22) + +#define SD_EMMC_STATUS 0x48 +#define STATUS_BUSY BIT(31) + +#define SD_EMMC_IRQ_EN 0x4c +#define IRQ_EN_MASK 0x3fff +#define IRQ_RXD_ERR_SHIFT 0 +#define IRQ_RXD_ERR_MASK 0xff +#define IRQ_TXD_ERR BIT(8) +#define IRQ_DESC_ERR BIT(9) +#define IRQ_RESP_ERR BIT(10) +#define IRQ_RESP_TIMEOUT BIT(11) +#define IRQ_DESC_TIMEOUT BIT(12) +#define IRQ_END_OF_CHAIN BIT(13) +#define IRQ_RESP_STATUS BIT(14) +#define IRQ_SDIO BIT(15) + +#define SD_EMMC_CMD_CFG 0x50 +#define SD_EMMC_CMD_ARG 0x54 +#define SD_EMMC_CMD_DAT 0x58 +#define SD_EMMC_CMD_RSP 0x5c +#define SD_EMMC_CMD_RSP1 0x60 +#define SD_EMMC_CMD_RSP2 0x64 +#define SD_EMMC_CMD_RSP3 0x68 + +#define SD_EMMC_RXD 0x94 +#define SD_EMMC_TXD 0x94 +#define SD_EMMC_LAST_REG SD_EMMC_TXD + +#define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */ +#define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */ +#define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */ +#define MUX_CLK_NUM_PARENTS 2 + +struct meson_host { + struct device *dev; + struct mmc_host *mmc; + struct mmc_request *mrq; + struct mmc_command *cmd; + + spinlock_t lock; + void __iomem *regs; +#ifdef USE_REGMAP + struct regmap *regmap; +#endif + int irq; + u32 ocr_mask; + struct clk *core_clk; + struct clk_mux mux; + struct clk *mux_clk; + struct clk *mux_parent[MUX_CLK_NUM_PARENTS]; + unsigned long mux_parent_rate[MUX_CLK_NUM_PARENTS]; + + struct clk_divider cfg_div; + struct clk *cfg_div_clk; + + unsigned int bounce_buf_size; + void *bounce_buf; + dma_addr_t bounce_dma_addr; + + unsigned long clk_rate; + unsigned long clk_src_rate; + unsigned short clk_src_div; +}; + +#define reg_read(host, offset) readl(host->regs + offset) +#define reg_write(host, offset, val) writel(val, host->regs + offset) + +struct cmd_cfg { + union { + struct { + u32 length:9; + u32 block_mode:1; + u32 r1b:1; + u32 end_of_chain:1; + u32 timeout:4; + u32 no_resp:1; + u32 no_cmd:1; + u32 data_io:1; + u32 data_wr:1; + u32 resp_nocrc:1; + u32 resp_128:1; + u32 resp_num:1; + u32 data_num:1; + u32 cmd_index:6; + u32 error:1; + u32 owner:1; +#define CFG_OWNER_CPU 1 +#define CFG_OWNER_MMC 0 + }; + u32 val; + }; +}; + +struct sd_emmc_desc { + struct cmd_cfg cmd_cfg; + u32 cmd_arg; + u32 cmd_data; + u32 cmd_resp; +}; +#define CMD_DATA_MASK (~0x3) +#define CMD_DATA_BIG_ENDIAN BIT(1) +#define CMD_DATA_SRAM BIT(0) +#define CMD_RESP_MASK (~0x1) +#define CMD_RESP_SRAM BIT(0) + +static int meson_mmc_clk_set(struct meson_host *host, unsigned long clk_rate) +{ + struct mmc_host *mmc = host->mmc; + int ret = 0; + u32 cfg; + + if (clk_rate) { + if (WARN_ON(clk_rate > mmc->f_max)) + clk_rate = mmc->f_max; + else if (WARN_ON(clk_rate < mmc->f_min)) + clk_rate = mmc->f_min; + } + + if (clk_rate == host->clk_rate) + return 0; + + /* stop clock */ + cfg = reg_read(host, SD_EMMC_CFG); + if (!(cfg & CFG_STOP_CLOCK)) { + cfg |= CFG_STOP_CLOCK; + reg_write(host, SD_EMMC_CFG, cfg); + } + + dev_dbg(host->dev, "change clock rate %lu -> %lu\n", host->clk_rate, clk_rate); + ret = clk_set_rate(host->cfg_div_clk, clk_rate); + if (clk_rate && clk_rate != clk_get_rate(host->cfg_div_clk)) + dev_warn(host->dev, "divider requested rate %lu != actual rate %lu: ret=%d\n", + clk_rate, clk_get_rate(host->cfg_div_clk), ret); + else + host->clk_rate = clk_rate; + + /* (re)start clock, if non-zero */ + if (clk_rate) { + cfg = reg_read(host, SD_EMMC_CFG); + cfg &= ~CFG_STOP_CLOCK; + reg_write(host, SD_EMMC_CFG, cfg); + } + + return ret; +} + +static int meson_mmc_clk_init(struct meson_host *host) +{ + struct clk_init_data init; + char clk_name[32]; + int i, ret = 0; + const char *mux_parent_names[MUX_CLK_NUM_PARENTS]; + unsigned int mux_parent_count = 0; + const char *clk_div_parents[1]; + unsigned int f_min = UINT_MAX; + u32 clk_reg; + + /* get the mux parents from DT */ + for ( i = 0; i < MUX_CLK_NUM_PARENTS; i++) { + char name[16]; + + snprintf(name, sizeof(name), "clkin%d", i); + host->mux_parent[i] = devm_clk_get(host->dev, name); + if (IS_ERR(host->mux_parent[i])) { + ret = PTR_ERR(host->mux_parent[i]); + if (PTR_ERR(host->mux_parent[i]) != -EPROBE_DEFER) + dev_err(host->dev, "Missing clock %s\n", name); + host->mux_parent[i] = NULL; + return ret; + } else { + host->mux_parent_rate[i] = clk_get_rate(host->mux_parent[i]); + mux_parent_names[i] = __clk_get_name(host->mux_parent[i]); + mux_parent_count++; + if (host->mux_parent_rate[i] < f_min) + f_min = host->mux_parent_rate[i]; + } + } + + /* cacluate f_min based on available input clocks, and max divider value */ + if (f_min != UINT_MAX) + f_min = DIV_ROUND_UP(CLK_SRC_XTAL_RATE, CLK_DIV_MAX); + else + f_min = 4000000; /* default min: 400 MHz */ + host->mmc->f_min = f_min; + + /* create the mux */ + snprintf(clk_name, sizeof(clk_name), "%s#mux", dev_name(host->dev)); + init.name = clk_name; + init.ops = &clk_mux_ops; + init.flags = CLK_IS_BASIC; + init.parent_names = mux_parent_names; + init.num_parents = mux_parent_count; + + host->mux.reg = host->regs + SD_EMMC_CLOCK; + host->mux.shift = CLK_SRC_SHIFT; + host->mux.mask = CLK_SRC_MASK >> CLK_SRC_SHIFT; /* FIXME: fix *_MASK to not be shifted */ + host->mux.flags = 0; + host->mux.table = NULL; + host->mux.hw.init = &init; + + host->mux_clk = devm_clk_register(host->dev, &host->mux.hw); + if (WARN_ON(PTR_ERR_OR_ZERO(host->mux_clk))) + return PTR_ERR(host->mux_clk); + + /* create the divider */ + snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev)); + init.name = devm_kstrdup(host->dev, clk_name, GFP_KERNEL); + init.ops = &clk_divider_ops; + init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT; + clk_div_parents[0] = __clk_get_name(host->mux_clk); + init.parent_names = clk_div_parents; + init.num_parents = ARRAY_SIZE(clk_div_parents); + + host->cfg_div.reg = host->regs + SD_EMMC_CLOCK; + host->cfg_div.shift = CLK_DIV_SHIFT; + host->cfg_div.width = CLK_DIV_WIDTH; + host->cfg_div.hw.init = &init; + host->cfg_div.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ROUND_CLOSEST | CLK_DIVIDER_ALLOW_ZERO; + + host->cfg_div_clk = devm_clk_register(host->dev, &host->cfg_div.hw); + if (WARN_ON(PTR_ERR_OR_ZERO(host->cfg_div_clk))) + return PTR_ERR(host->cfg_div_clk); + + /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */ + clk_reg = 0; + clk_reg |= CLK_PHASE_180 << CLK_PHASE_SHIFT; + clk_reg |= CLK_SRC_XTAL << CLK_SRC_SHIFT; + clk_reg |= CLK_DIV_MAX << CLK_DIV_SHIFT; + reg_write(host, SD_EMMC_CLOCK, clk_reg); + + clk_prepare_enable(host->cfg_div_clk); + meson_mmc_clk_set(host, f_min); + + return ret; +} + +static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct meson_host *host = mmc_priv(mmc); + u32 bus_width; + u32 val, orig; + + /* + * GPIO regulator, only controls switching between 1v8 and 3v3, doesn't support + * MMC_POWER_OFF, MMC_POWER_ON. + */ + switch (ios->power_mode) { + case MMC_POWER_UP: + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); + } + + meson_mmc_clk_set(host, ios->clock); + + /* Bus width */ + val = reg_read(host, SD_EMMC_CFG); + switch (ios->bus_width) { + case MMC_BUS_WIDTH_1: + bus_width = CFG_BUS_WIDTH_1; + break; + case MMC_BUS_WIDTH_4: + bus_width = CFG_BUS_WIDTH_4; + break; + case MMC_BUS_WIDTH_8: + bus_width = CFG_BUS_WIDTH_8; + break; + default: + dev_err(host->dev, "Invalid ios->bus_width: %u. Setting to 4.\n", ios->bus_width); + bus_width = CFG_BUS_WIDTH_4; + return; + } + + val = reg_read(host, SD_EMMC_CFG); + orig = val; + + val &= ~CFG_BUS_WIDTH_MASK; + val |= bus_width << CFG_BUS_WIDTH_SHIFT; + + val &= ~CFG_BLK_LEN_MASK; + val |= ilog2(SD_EMMC_CFG_BLK_SIZE) << CFG_BLK_LEN_SHIFT; + + val &= ~CFG_RESP_TIMEOUT_MASK; + val |= ilog2(SD_EMMC_CFG_RESP_TIMEOUT) << CFG_RESP_TIMEOUT_SHIFT; + + val &= ~CFG_RC_CC_MASK; + val |= ilog2(SD_EMMC_CFG_CMD_GAP) << CFG_RC_CC_SHIFT; + + reg_write(host, SD_EMMC_CFG, val); + + if (val != orig) + dev_dbg(host->dev, "%s: SD_EMMC_CFG: 0x%08x -> 0x%08x\n", + __func__, orig, val); + + return; +} + +static int meson_mmc_wait_busy(struct mmc_host *mmc, unsigned int timeout) +{ + struct meson_host *host = mmc_priv(mmc); + u32 status; + int i; + + for ( i = timeout; i > 0; i-- ) { + status = reg_read(host, SD_EMMC_STATUS); + if (!(status & STATUS_BUSY)) + break; + }; + + return (timeout == 0); +} + +static int meson_mmc_request_done(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct meson_host *host = mmc_priv(mmc); + struct mmc_command *cmd = host->cmd; + + WARN_ON(host->mrq != mrq); + if (cmd && !cmd->error) + if (meson_mmc_wait_busy(mmc, 0xfffff)) /* FIXME: hard-coded value */ + dev_warn(host->dev, "%s: timeout busy.\n", __func__); + + host->mrq = NULL; + host->cmd = NULL; + mmc_request_done(host->mmc, mrq); + + return 0; +} + +static int meson_mmc_cmd_invalid(struct mmc_host *mmc, struct mmc_command *cmd) +{ + cmd->error = -EINVAL; + meson_mmc_request_done(mmc, cmd->mrq); + + return -EINVAL; +} + +static int meson_mmc_check_command(struct mmc_host *mmc, struct mmc_command *cmd) +{ + int ret = 0; + + if (true) { /* FIXME: for SD card only */ + if (cmd->opcode == SD_IO_SEND_OP_COND + || cmd->opcode == SD_IO_RW_DIRECT + || cmd->opcode == SD_IO_RW_EXTENDED) { + ret = meson_mmc_cmd_invalid(mmc, cmd); + } + } + + return ret; +} + +static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd) +{ + struct meson_host *host = mmc_priv(mmc); + struct sd_emmc_desc *desc, desc_tmp; + u32 cfg; + u8 blk_len; + unsigned int xfer_bytes = 0; + + /* Setup descriptors */ + dma_rmb(); + desc = &desc_tmp; + memset(desc, 0, sizeof(struct sd_emmc_desc)); + + desc->cmd_cfg.cmd_index = cmd->opcode; + desc->cmd_cfg.owner = CFG_OWNER_CPU; + desc->cmd_arg = cmd->arg; + /* FIXME: hard-coded timeouts from vendor BSP */ + if (!cmd->data) + /* 10uS based. 2^10 = 10mS for only cmd timeout */ + desc->cmd_cfg.timeout = 0xa; + else + desc->cmd_cfg.timeout = 0xc; + + /* Response */ + if (cmd->flags & MMC_RSP_PRESENT) { + desc->cmd_cfg.no_resp = 0; + if (cmd->flags & MMC_RSP_136) + desc->cmd_cfg.resp_128 = 1; + desc->cmd_cfg.resp_num = 1; + desc->cmd_resp = 0; + + if (!(cmd->flags & MMC_RSP_CRC)) + desc->cmd_cfg.resp_nocrc = 1; + + if (cmd->flags & MMC_RSP_BUSY) + desc->cmd_cfg.r1b = 1; + } else { + desc->cmd_cfg.no_resp = 1; + } + + /* data? */ + if (cmd->data) { + desc->cmd_cfg.data_io = 1; + if (cmd->data->blocks > 1) { + desc->cmd_cfg.block_mode = 1; + desc->cmd_cfg.length = cmd->data->blocks; + + /* check if block-size matches, if not update */ + cfg = reg_read(host, SD_EMMC_CFG); + blk_len = (cfg & CFG_BLK_LEN_MASK) >> CFG_BLK_LEN_SHIFT; + if (blk_len != ilog2(cmd->data->blksz)) { + dev_warn(host->dev, "%s: update blk_len %d -> %d\n", + __func__, blk_len, ilog2(cmd->data->blksz)); + blk_len = ilog2(cmd->data->blksz); + cfg &= ~CFG_BLK_LEN_MASK; + cfg |= blk_len << CFG_BLK_LEN_SHIFT; + reg_write(host, SD_EMMC_CFG, cfg); + } + } else { + desc->cmd_cfg.block_mode = 0; + desc->cmd_cfg.length = cmd->data->blksz; + } + + cmd->data->bytes_xfered = 0; + xfer_bytes = cmd->data->blksz * cmd->data->blocks; + /* dev_dbg(host->dev, "%s: sg_len=%u, %s %u bytes\n", __func__, */ + /* cmd->data->sg_len, cmd->data->flags & MMC_DATA_WRITE ? "WRITE" : "READ", xfer_bytes); */ + if (cmd->data->flags & MMC_DATA_WRITE) { + desc->cmd_cfg.data_wr = 1; + WARN_ON(xfer_bytes > host->bounce_buf_size); + sg_copy_to_buffer(cmd->data->sg, cmd->data->sg_len, host->bounce_buf, xfer_bytes); + cmd->data->bytes_xfered = xfer_bytes; + dma_wmb(); + } else { + desc->cmd_cfg.data_wr = 0; + } + + if (xfer_bytes > 0) { + desc->cmd_cfg.data_num = 0; + desc->cmd_data = host->bounce_dma_addr & CMD_DATA_MASK; + } else { + /* write data to data_addr */ + desc->cmd_cfg.data_num = 1; + desc->cmd_data = 0; + } + + } else { + desc->cmd_cfg.data_io = 0; + + /* Current 10uS based. 2^10 = 10mS for only cmd timeout */ + desc->cmd_cfg.timeout = 0xa; + } + + host->cmd = cmd; + + /* Last descriptor */ + desc->cmd_cfg.end_of_chain = 1; + reg_write(host, SD_EMMC_CMD_CFG, desc->cmd_cfg.val); + reg_write(host, SD_EMMC_CMD_DAT, desc->cmd_data); + reg_write(host, SD_EMMC_CMD_RSP, desc->cmd_resp); + wmb(); + reg_write(host, SD_EMMC_CMD_ARG, desc->cmd_arg); /* this triggers the cmd */ +} + +static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct meson_host *host = mmc_priv(mmc); + + WARN_ON(host->mrq != NULL); + + /* Stop execution */ + reg_write(host, SD_EMMC_START, 0); + + /* clear, ack, enable all interrupts */ + reg_write(host, SD_EMMC_IRQ_EN, 0); + reg_write(host, SD_EMMC_STATUS, IRQ_EN_MASK); + reg_write(host, SD_EMMC_IRQ_EN, IRQ_EN_MASK); + + host->mrq = mrq; + + if (meson_mmc_check_command(mmc, mrq->cmd)) + return; + + if (mrq->sbc) + meson_mmc_start_cmd(mmc, mrq->sbc); + else + meson_mmc_start_cmd(mmc, mrq->cmd); + + return; +} + +static int meson_mmc_read_response(struct mmc_host *mmc, struct mmc_command *cmd) +{ + struct meson_host *host = mmc_priv(mmc); + + if (!cmd) { + dev_dbg(host->dev, "%s: NULL command.\n", __func__); + return -EINVAL; + } + + if (cmd->flags & MMC_RSP_136) { + cmd->resp[0] = reg_read(host, SD_EMMC_CMD_RSP3); + cmd->resp[1] = reg_read(host, SD_EMMC_CMD_RSP2); + cmd->resp[2] = reg_read(host, SD_EMMC_CMD_RSP1); + cmd->resp[3] = reg_read(host, SD_EMMC_CMD_RSP); + } else if (cmd->flags & MMC_RSP_PRESENT) { + cmd->resp[0] = reg_read(host, SD_EMMC_CMD_RSP); + } + + return 0; +} + +static irqreturn_t meson_mmc_irq(int irq, void *dev_id) +{ + struct meson_host *host = dev_id; + struct mmc_request *mrq = host->mrq; + struct mmc_command *cmd = host->cmd; + u32 irq_en, status, raw_status; + irqreturn_t ret = IRQ_HANDLED; + + if (WARN_ON(!host)) + return IRQ_NONE; + + if (WARN_ON(!mrq)) + return IRQ_NONE; + + if (WARN_ON(!cmd)) + return IRQ_NONE; + + spin_lock(&host->lock); + irq_en = reg_read(host, SD_EMMC_IRQ_EN); + raw_status = reg_read(host, SD_EMMC_STATUS); + status = raw_status & irq_en; + + if (!status) { + dev_warn(host->dev, "Spurious IRQ! status=0x%08x, irq_en=0x%08x\n", + raw_status, irq_en); + ret = IRQ_NONE; + goto out; + } + + cmd->error = 0; + if (status & IRQ_RXD_ERR_MASK) { + dev_dbg(host->dev, "Unhandled IRQ: RXD error\n"); + cmd->error = -EILSEQ; + } + if (status & IRQ_TXD_ERR) { + dev_dbg(host->dev, "Unhandled IRQ: TXD error\n"); + cmd->error = -EILSEQ; + } + if (status & IRQ_DESC_ERR) + dev_dbg(host->dev, "Unhandled IRQ: Descriptor error\n"); + if (status & IRQ_RESP_ERR) { + dev_dbg(host->dev, "Unhandled IRQ: Response error\n"); + cmd->error = -EILSEQ; + } + if (status & IRQ_RESP_TIMEOUT) { + dev_dbg(host->dev, "Unhandled IRQ: Response timeout\n"); + cmd->error = -ETIMEDOUT; + } + if (status & IRQ_DESC_TIMEOUT) { + dev_dbg(host->dev, "Unhandled IRQ: Descriptor timeout\n"); + cmd->error = -ETIMEDOUT; + } + if (status & IRQ_SDIO) + dev_dbg(host->dev, "Unhandled IRQ: SDIO.\n"); + + if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) + ret = IRQ_WAKE_THREAD; + else { + dev_warn(host->dev, "Unknown IRQ! status=0x%04x: MMC CMD%u arg=0x%08x flags=0x%08x stop=%d\n", + status, cmd->opcode, cmd->arg, cmd->flags, mrq->stop ? 1 : 0); + if (cmd->data) { + struct mmc_data *data = cmd->data; + dev_warn(host->dev, "\tblksz %u blocks %u flags 0x%08x (%s%s)", data->blksz, data->blocks, data->flags, + data->flags & MMC_DATA_WRITE ? "write" : "", + data->flags & MMC_DATA_READ ? "read" : ""); + } + } + +out: + /* ack all (enabled) interrupts */ + reg_write(host, SD_EMMC_STATUS, status); + + if (ret == IRQ_HANDLED) { + meson_mmc_read_response(host->mmc, cmd); + meson_mmc_request_done(host->mmc, cmd->mrq); + } + + spin_unlock(&host->lock); + return ret; +} + +static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id) +{ + struct meson_host *host = dev_id; + struct mmc_request *mrq = host->mrq; + struct mmc_command *cmd = host->cmd; + struct mmc_data *data; + + unsigned int xfer_bytes; + int ret = IRQ_HANDLED; + + if (WARN_ON(!mrq)) + ret = IRQ_NONE; + + if (WARN_ON(!cmd)) + ret = IRQ_NONE; + + data = cmd->data; + if (data) { + xfer_bytes = data->blksz * data->blocks; + if (data->flags & MMC_DATA_READ) { + WARN_ON(xfer_bytes > host->bounce_buf_size); + sg_copy_from_buffer(data->sg, data->sg_len, + host->bounce_buf, xfer_bytes); + data->bytes_xfered = xfer_bytes; + } + } + + meson_mmc_read_response(host->mmc, cmd); + if (!data || !data->stop || mrq->sbc) + meson_mmc_request_done(host->mmc, mrq); + else + meson_mmc_start_cmd(host->mmc, data->stop); + + return ret; +} + +/* + * NOTE: we only need this until the GPIO/pinctrl driver can handle + * interrupts. For now, the MMC core will use this for polling. + */ +static int meson_mmc_get_cd(struct mmc_host *mmc) +{ + int status = mmc_gpio_get_cd(mmc); + + if (status == -ENOSYS) + return 1; /* assume present */ + + return status; +} + +static struct mmc_host_ops meson_mmc_ops = { + .request = meson_mmc_request, + .set_ios = meson_mmc_set_ios, + .get_cd = meson_mmc_get_cd, +}; + +static int meson_mmc_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct resource *res; + struct meson_host *host; + struct mmc_host *mmc; + int ret; + + mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev); + if (!mmc) + return -ENOMEM; + host = mmc_priv(mmc); + host->mmc = mmc; + host->dev = &pdev->dev; + dev_set_drvdata(&pdev->dev, host); + + spin_lock_init(&host->lock); + + host->core_clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(host->core_clk)) { + ret = PTR_ERR(host->core_clk); + if (ret == -EPROBE_DEFER) + dev_dbg(&pdev->dev, "Missing core clock. EPROBE_DEFER\n"); + else + dev_err(&pdev->dev, "Unable to get core clk (ret=%d).\n", ret); + goto free_host; + } + + /* Voltage supply */ + mmc_of_parse_voltage(np, &host->ocr_mask); + ret = mmc_regulator_get_supply(mmc); + if (ret == -EPROBE_DEFER) { + dev_dbg(&pdev->dev, "Missing regulator: EPROBE_DEFER"); + goto free_host; + } + + if (!mmc->ocr_avail) + mmc->ocr_avail = host->ocr_mask; + + ret = mmc_of_parse(mmc); + if (ret) { + dev_warn(&pdev->dev, "error parsing DT: %d\n", ret); + goto free_host; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + ret = -ENODEV; + goto free_host; + } + host->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(host->regs)) { + ret = PTR_ERR(host->regs); + goto free_host; + } + + host->irq = platform_get_irq(pdev, 0); + if (host->irq == 0) { + dev_err(&pdev->dev, "failed to get interrupt resource.\n"); + ret = -EINVAL; + goto free_host; + } + + ret = devm_request_threaded_irq(&pdev->dev, host->irq, + meson_mmc_irq, meson_mmc_irq_thread, + IRQF_SHARED, DRIVER_NAME, host); + if (ret) + goto free_host; + + /* data bounce buffer */ + host->bounce_buf_size = SZ_512K; + host->bounce_buf = dma_alloc_coherent(host->dev, host->bounce_buf_size, + &host->bounce_dma_addr, GFP_KERNEL); + if (host->bounce_buf == NULL) { + dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n"); + ret = -ENOMEM; + goto free_host; + } + + clk_prepare_enable(host->core_clk); + + ret = meson_mmc_clk_init(host); + if (ret) + goto free_host; + + /* Stop execution */ + reg_write(host, SD_EMMC_START, 0); + + /* clear, ack, enable all interrupts */ + reg_write(host, SD_EMMC_IRQ_EN, 0); + reg_write(host, SD_EMMC_STATUS, IRQ_EN_MASK); + + mmc->ops = &meson_mmc_ops; + mmc_add_host(mmc); + + return 0; + +free_host: + dev_dbg(host->dev, "Failed to probe: ret=%d\n", ret); + if (host->core_clk) + clk_disable_unprepare(host->core_clk); + mmc_free_host(mmc); + return ret; +} + +static int meson_mmc_remove(struct platform_device *pdev) +{ + struct meson_host *host = dev_get_drvdata(&pdev->dev); + + if (WARN_ON(!host)) + return 0; + + if (host->bounce_buf) + dma_free_coherent(host->dev, host->bounce_buf_size, + host->bounce_buf, host->bounce_dma_addr); + + if (host->cfg_div_clk) + clk_disable_unprepare(host->cfg_div_clk); + + if (host->core_clk) + clk_disable_unprepare(host->core_clk); + + mmc_free_host(host->mmc); + return 0; +} + +static const struct of_device_id meson_mmc_of_match[] = { + { + .compatible = "amlogic,meson-gxbb-mmc", + }, + {} +}; +MODULE_DEVICE_TABLE(of, meson_mmc_of_match); + +static struct platform_driver meson_mmc_driver = { + .probe = meson_mmc_probe, + .remove = meson_mmc_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = of_match_ptr(meson_mmc_of_match), + }, +}; + +module_platform_driver(meson_mmc_driver); + +MODULE_ALIAS("platform:" DRIVER_NAME); +MODULE_DESCRIPTION("Amlogic S905/GXBB SD/eMMC driver"); +MODULE_AUTHOR("Kevin Hilman <khilman@xxxxxxxxxxxx>"); +MODULE_LICENSE("Dual BSD/GPL"); + -- 2.9.0 -- To unsubscribe from this list: send the line "unsubscribe linux-mmc" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html