diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig index fac290e48e0b..95c8a17ad9a9 100644 --- a/drivers/memory/Kconfig +++ b/drivers/memory/Kconfig @@ -236,6 +236,7 @@ config STM32_FMC2_EBI devices (like SRAM, ethernet adapters, FPGAs, LCD displays, ...) on SOCs containing the FMC2 External Bus Interface. +source "drivers/memory/rockchip/Kconfig" source "drivers/memory/samsung/Kconfig" source "drivers/memory/tegra/Kconfig" diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile index e148f636c082..1a805ba1eb34 100644 --- a/drivers/memory/Makefile +++ b/drivers/memory/Makefile @@ -24,6 +24,7 @@ obj-$(CONFIG_MTK_SMI) += mtk-smi.o obj-$(CONFIG_DA8XX_DDRCTL) += da8xx-ddrctl.o obj-$(CONFIG_PL353_SMC) += pl353-smc.o obj-$(CONFIG_RENESAS_RPCIF) += renesas-rpc-if.o +obj-$(CONFIG_ROCKCHIP_DSMC) += rockchip/ obj-$(CONFIG_STM32_FMC2_EBI) += stm32-fmc2-ebi.o obj-$(CONFIG_SAMSUNG_MC) += samsung/ diff --git a/drivers/memory/rockchip/Kconfig b/drivers/memory/rockchip/Kconfig new file mode 100644 index 000000000000..42944724fb20 --- /dev/null +++ b/drivers/memory/rockchip/Kconfig @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Rockchip DSMC(Double Data Rate Serial Memory Controller) support +# + +config ROCKCHIP_DSMC + tristate "Rockchip DSMC(Double Data Rate Serial Memory Controller) driver" + depends on ARCH_ROCKCHIP + help + For enable the Rockchip DSMC driver. diff --git a/drivers/memory/rockchip/Makefile b/drivers/memory/rockchip/Makefile new file mode 100644 index 000000000000..36fca45367cb --- /dev/null +++ b/drivers/memory/rockchip/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Rockchip DSMC drivers. +# + +obj-$(CONFIG_ROCKCHIP_DSMC) += dsmc.o +dsmc-y += dsmc-controller.o dsmc-lb-device.o dsmc-host.o diff --git a/drivers/memory/rockchip/dsmc-controller.c b/drivers/memory/rockchip/dsmc-controller.c new file mode 100644 index 000000000000..41565b7254c6 --- /dev/null +++ b/drivers/memory/rockchip/dsmc-controller.c @@ -0,0 +1,851 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2024 Rockchip Electronics Co., Ltd. + */ +#include +#include +#include + +#include "dsmc-host.h" +#include "dsmc-lb-slave.h" + +#define MHZ (1000000) + +#define REG_CLRSETBITS(dsmc, offset, clrbits, setbits) \ + dsmc_modify_reg(dsmc, offset, clrbits, setbits) + +/* psram id */ +enum { + CYPRESS = 0x1, + ISSI = 0x3, + WINBOND = 0x6, + APM_PSRAM = 0xd, +}; + +struct dsmc_psram { + uint16_t id; + uint16_t protcl; + uint32_t mtr_timing; +}; + +/* DSMC psram support list */ +static const struct dsmc_psram psram_info[] = { + /* Only APM is Xccela psram, others are Hyper psram */ + {APM_PSRAM, OPI_XCCELA_PSRAM, MTR_CFG(2, 2, 0, 0, 0, 0, 0, 0)}, + {WINBOND, HYPERBUS_PSRAM, MTR_CFG(2, 2, 0, 0, 0, 0, 2, 2)}, + {CYPRESS, HYPERBUS_PSRAM, MTR_CFG(2, 2, 0, 0, 0, 0, 1, 1)}, + {ISSI, HYPERBUS_PSRAM, MTR_CFG(2, 2, 0, 0, 0, 0, 1, 1)}, +}; + +static inline void xccela_write_mr(struct dsmc_map *map, + uint32_t mr_num, uint8_t val) +{ + writew(XCCELA_PSRAM_MR_SET(val), map->virt + XCCELA_PSRAM_MR(mr_num)); +} + +static inline uint8_t xccela_read_mr(struct dsmc_map *map, uint32_t mr_num) +{ + return XCCELA_PSRAM_MR_GET(readw(map->virt + + XCCELA_PSRAM_MR(mr_num))); +} + +static inline void hyper_write_mr(struct dsmc_map *map, + uint32_t mr_num, uint16_t val) +{ + writew(val, map->virt + mr_num); +} + +static inline uint16_t hyper_read_mr(struct dsmc_map *map, uint32_t mr_num) +{ + return readw(map->virt + mr_num); +} + +static inline void lb_write_cmn(struct dsmc_map *map, + uint32_t cmn_reg, uint32_t val) +{ + writel(val, map->virt + cmn_reg); +} + +static inline uint32_t lb_read_cmn(struct dsmc_map *map, uint32_t cmn_reg) +{ + return readl(map->virt + cmn_reg); +} + +static inline void dsmc_modify_reg(struct rockchip_dsmc *dsmc, uint32_t offset, + uint32_t clrbits, uint32_t setbits) +{ + uint32_t value; + + value = readl(dsmc->regs + offset); + value &= ~clrbits; + value |= setbits; + writel(value, dsmc->regs + offset); +} + +static int find_attr_region(struct dsmc_config_cs *cfg, uint32_t attribute) +{ + int region; + + for (region = 0; region < DSMC_LB_MAX_RGN; region++) { + if (cfg->slv_rgn[region].attribute == attribute) + return region; + } + return -1; +} + +static uint32_t cap_2_dev_size(uint32_t cap) +{ + uint32_t mask = 0x80000000; + int i; + + for (i = 31; i >= 0; i--) { + if (cap & mask) + return i; + mask >>= 1; + } + return 0; +} + +static int dsmc_psram_id_detect(struct rockchip_dsmc *dsmc, uint32_t cs) +{ + uint32_t tmp, i; + int ret = -1; + struct dsmc_map *region_map = &dsmc->cs_map[cs].region_map[0]; + struct dsmc_config_cs *cfg = &dsmc->cfg.cs_cfg[cs]; + + tmp = readl(dsmc->regs + DSMC_MCR(cs)); + + /* config to CR space */ + REG_CLRSETBITS(dsmc, DSMC_MCR(cs), + (MCR_IOWIDTH_MASK << MCR_IOWIDTH_SHIFT) | + (MCR_CRT_MASK << MCR_CRT_SHIFT), + (MCR_IOWIDTH_X8 << MCR_IOWIDTH_SHIFT) | + (MCR_CRT_CR_SPACE << MCR_CRT_SHIFT)); + + if (cfg->protcl == OPI_XCCELA_PSRAM) { + uint8_t mid; + + /* reset AP memory psram */ + REG_CLRSETBITS(dsmc, DSMC_VDMC(cs), + (VDMC_RESET_CMD_MODE_MASK << VDMC_RESET_CMD_MODE_SHIFT), + (0x1 << VDMC_RESET_CMD_MODE_SHIFT)); + /* write mr any value to trigger xccela psram reset */ + xccela_write_mr(region_map, 0, 0x0); + udelay(200); + REG_CLRSETBITS(dsmc, DSMC_VDMC(cs), + (VDMC_RESET_CMD_MODE_MASK << VDMC_RESET_CMD_MODE_SHIFT), + (0x0 << VDMC_RESET_CMD_MODE_SHIFT)); + + mid = xccela_read_mr(region_map, 1); + mid &= XCCELA_DEV_ID_MASK; + + if (mid == APM_PSRAM) + ret = 0; + } else { + /* hyper psram get ID */ + uint16_t mid; + + mid = hyper_read_mr(region_map, HYPER_PSRAM_IR0); + mid &= HYPERBUS_DEV_ID_MASK; + for (i = 1; i < ARRAY_SIZE(psram_info); i++) { + if (mid == psram_info[i].id) { + ret = 0; + break; + } + } + } + + /* config to memory space */ + writel(tmp, dsmc->regs + DSMC_MCR(cs)); + + return ret; +} + +static void dsmc_psram_bw_detect(struct rockchip_dsmc *dsmc, uint32_t cs) +{ + uint32_t tmp, col; + uint16_t ir0_ir1; + struct dsmc_map *region_map = &dsmc->cs_map[cs].region_map[0]; + struct dsmc_config_cs *cfg = &dsmc->cfg.cs_cfg[cs]; + + if (cfg->protcl == OPI_XCCELA_PSRAM) { + col = 10; + if (dsmc->cfg.cap >= PSRAM_SIZE_16MBYTE) + cfg->io_width = MCR_IOWIDTH_X16; + else + cfg->io_width = MCR_IOWIDTH_X8; + } else { + tmp = readl(dsmc->regs + DSMC_MCR(cs)); + /* config to CR space */ + REG_CLRSETBITS(dsmc, DSMC_MCR(cs), + (MCR_IOWIDTH_MASK << MCR_IOWIDTH_SHIFT) | + (MCR_CRT_MASK << MCR_CRT_SHIFT), + (MCR_IOWIDTH_X8 << MCR_IOWIDTH_SHIFT) | + (MCR_CRT_CR_SPACE << MCR_CRT_SHIFT)); + + /* hyper psram get IR0 */ + ir0_ir1 = hyper_read_mr(region_map, HYPER_PSRAM_IR0); + col = ((ir0_ir1 >> IR0_COL_COUNT_SHIFT) & IR0_COL_COUNT_MASK) + 1; + + ir0_ir1 = hyper_read_mr(region_map, HYPER_PSRAM_IR1); + if ((ir0_ir1 & IR1_DEV_IO_WIDTH_MASK) == IR1_DEV_IO_WIDTH_X16) + cfg->io_width = MCR_IOWIDTH_X16; + else + cfg->io_width = MCR_IOWIDTH_X8; + + /* config to memory space */ + writel(tmp, dsmc->regs + DSMC_MCR(cs)); + + } + cfg->col = col; +} + +static int dsmc_psram_dectect(struct rockchip_dsmc *dsmc, uint32_t cs) +{ + uint32_t i = 0; + int ret = -1; + struct dsmc_config_cs *cfg = &dsmc->cfg.cs_cfg[cs]; + + /* axi read do not response error */ + REG_CLRSETBITS(dsmc, DSMC_AXICTL, + (AXICTL_RD_NO_ERR_MASK << AXICTL_RD_NO_ERR_SHIFT), + (0x1 << AXICTL_RD_NO_ERR_SHIFT)); + REG_CLRSETBITS(dsmc, DSMC_MCR(cs), + (MCR_DEVTYPE_MASK << MCR_DEVTYPE_SHIFT), + (MCR_DEVTYPE_HYPERRAM << MCR_DEVTYPE_SHIFT)); + + for (i = 0; i < ARRAY_SIZE(psram_info); i++) { + REG_CLRSETBITS(dsmc, DSMC_VDMC(cs), + (VDMC_MID_MASK << VDMC_MID_SHIFT) | + (VDMC_PROTOCOL_MASK << VDMC_PROTOCOL_SHIFT), + (psram_info[i].id << VDMC_MID_SHIFT) | + (psram_info[i].protcl << VDMC_PROTOCOL_SHIFT)); + writel(psram_info[i].mtr_timing, + dsmc->regs + DSMC_MTR(cs)); + + cfg->mid = psram_info[i].id; + cfg->protcl = psram_info[i].protcl; + cfg->mtr_timing = psram_info[i].mtr_timing; + if (!dsmc_psram_id_detect(dsmc, cs)) { + pr_info("DSMC: The cs%d %s PSRAM ID: 0x%x\n", cs, + (cfg->protcl == OPI_XCCELA_PSRAM) ? "XCCELA" : "HYPER", + psram_info[i].id); + ret = 0; + break; + } + } + if (i == ARRAY_SIZE(psram_info)) { + pr_err("DSMC: Unknown PSRAM device\n"); + ret = -1; + } else { + dsmc_psram_bw_detect(dsmc, cs); + } + + /* recovery axi read response */ + REG_CLRSETBITS(dsmc, DSMC_AXICTL, + (AXICTL_RD_NO_ERR_MASK << AXICTL_RD_NO_ERR_SHIFT), + (0x0 << AXICTL_RD_NO_ERR_SHIFT)); + + return ret; +} + +static uint32_t calc_ltcy_value(uint32_t latency) +{ + if ((latency >= 5) && (latency <= 10)) + return (latency - 5); + else + return (latency + 0xb); +} + +static int dsmc_ctrller_cfg_for_lb(struct rockchip_dsmc *dsmc, uint32_t cs) +{ + uint32_t value = 0, i; + struct regions_config *slv_rgn; + struct dsmc_config_cs *cfg = &dsmc->cfg.cs_cfg[cs]; + + writel(dsmc->cfg.clk_mode, dsmc->regs + DSMC_CLK_MD); + writel(MTR_CFG(3, 3, 1, 1, 0, 0, + calc_ltcy_value(cfg->rd_latency), + calc_ltcy_value(cfg->wr_latency)), + dsmc->regs + DSMC_MTR(cs)); + writel(cfg->rgn_num / 2, + dsmc->regs + DSMC_SLV_RGN_DIV(cs)); + for (i = 0; i < DSMC_LB_MAX_RGN; i++) { + slv_rgn = &cfg->slv_rgn[i]; + if (!slv_rgn->status) + continue; + + if (slv_rgn->dummy_clk_num >= 2) + value = (0x1 << RGNX_ATTR_DUM_CLK_EN_SHIFT) | + (0x1 << RGNX_ATTR_DUM_CLK_NUM_SHIFT); + else if (slv_rgn->dummy_clk_num >= 1) + value = (0x1 << RGNX_ATTR_DUM_CLK_EN_SHIFT) | + (0x0 << RGNX_ATTR_DUM_CLK_NUM_SHIFT); + else + value = 0x0 << RGNX_ATTR_DUM_CLK_EN_SHIFT; + writel((slv_rgn->attribute << RGNX_ATTR_SHIFT) | + (slv_rgn->cs0_ctrl << RGNX_ATTR_CTRL_SHIFT) | + (slv_rgn->cs0_be_ctrled << + RGNX_ATTR_BE_CTRLED_SHIFT) | value | + (slv_rgn->ca_addr_width << + RGNX_ATTR_ADDR_WIDTH_SHIFT), + dsmc->regs + DSMC_RGN0_ATTR(cs) + 4 * i); + } + /* clear and enable interrupt */ + writel(INT_STATUS(cs), dsmc->regs + DSMC_INT_STATUS); + writel(INT_EN(cs), dsmc->regs + DSMC_INT_EN); + + return 0; +} + +static int dsmc_slv_cmn_rgn_config(struct rockchip_dsmc *dsmc, + struct regions_config *slv_rgn, + uint32_t rgn, uint32_t cs) +{ + uint32_t tmp; + struct dsmc_map *region_map = &dsmc->cs_map[cs].region_map[0]; + struct dsmc_config_cs *cfg = &dsmc->cfg.cs_cfg[cs]; + + tmp = lb_read_cmn(region_map, RGN_CMN_CON(rgn, 0)); + if (slv_rgn->dummy_clk_num == 0) { + tmp &= ~(WR_DATA_CYC_EXTENDED_MASK << WR_DATA_CYC_EXTENDED_SHIFT); + } else if (slv_rgn->dummy_clk_num == 1) { + tmp |= slv_rgn->dummy_clk_num << WR_DATA_CYC_EXTENDED_SHIFT; + } else { + pr_err("DSMC: lb slave: dummy clk too large\n"); + return -1; + } + tmp &= ~(RD_LATENCY_CYC_MASK << RD_LATENCY_CYC_SHIFT); + if ((cfg->rd_latency == 1) || (cfg->rd_latency == 2)) { + tmp |= cfg->rd_latency << RD_LATENCY_CYC_SHIFT; + } else { + pr_err("DSMC: lb slave: read latency value error\n"); + return -1; + } + tmp &= ~(WR_LATENCY_CYC_MASK << WR_LATENCY_CYC_SHIFT); + if ((cfg->wr_latency == 1) || (cfg->wr_latency == 2)) { + tmp |= cfg->wr_latency << WR_LATENCY_CYC_SHIFT; + } else { + pr_err("DSMC: lb slave: write latency value error\n"); + return -1; + } + tmp &= ~(CA_CYC_MASK << CA_CYC_SHIFT); + if (slv_rgn->ca_addr_width == RGNX_ATTR_32BIT_ADDR_WIDTH) + tmp |= CA_CYC_32BIT << CA_CYC_SHIFT; + else + tmp |= CA_CYC_16BIT << CA_CYC_SHIFT; + + lb_write_cmn(region_map, RGN_CMN_CON(rgn, 0), tmp); + + return 0; +} + +static int dsmc_slv_cmn_config(struct rockchip_dsmc *dsmc, + struct regions_config *slv_rgn, uint32_t cs) +{ + uint32_t tmp; + struct dsmc_map *region_map = &dsmc->cs_map[cs].region_map[0]; + struct dsmc_config_cs *cfg = &dsmc->cfg.cs_cfg[cs]; + + tmp = lb_read_cmn(region_map, CMN_CON(0)); + if (slv_rgn->dummy_clk_num == 0) { + tmp &= ~(WR_DATA_CYC_EXTENDED_MASK << WR_DATA_CYC_EXTENDED_SHIFT); + } else if (slv_rgn->dummy_clk_num == 1) { + tmp |= slv_rgn->dummy_clk_num << WR_DATA_CYC_EXTENDED_SHIFT; + } else { + pr_err("DSMC: lb slave: dummy clk too large\n"); + return -1; + } + tmp &= ~(RD_LATENCY_CYC_MASK << RD_LATENCY_CYC_SHIFT); + if ((cfg->rd_latency == 1) || (cfg->rd_latency == 2)) { + tmp |= cfg->rd_latency << RD_LATENCY_CYC_SHIFT; + } else { + pr_err("DSMC: lb slave: read latency value error\n"); + return -1; + } + + tmp &= ~(CA_CYC_MASK << CA_CYC_SHIFT); + if (slv_rgn->ca_addr_width == RGNX_ATTR_32BIT_ADDR_WIDTH) + tmp |= CA_CYC_32BIT << CA_CYC_SHIFT; + else + tmp |= CA_CYC_16BIT << CA_CYC_SHIFT; + + lb_write_cmn(region_map, CMN_CON(0), tmp); + + tmp = lb_read_cmn(region_map, CMN_CON(3)); + tmp |= 0x1 << RDYN_GEN_CTRL_SHIFT; + tmp &= ~(DATA_WIDTH_MASK << DATA_WIDTH_SHIFT); + tmp |= cfg->io_width << DATA_WIDTH_SHIFT; + lb_write_cmn(region_map, CMN_CON(3), tmp); + + return 0; +} + +static int dsmc_lb_cmn_config(struct rockchip_dsmc *dsmc, uint32_t cs) +{ + uint32_t tmp, i; + struct dsmc_config_cs *cfg = &dsmc->cfg.cs_cfg[cs]; + struct regions_config *slv_rgn; + int ret = 0; + + tmp = readl(dsmc->regs + DSMC_MCR(cs)); + /* config to CR space */ + REG_CLRSETBITS(dsmc, DSMC_MCR(cs), + (MCR_IOWIDTH_MASK << MCR_IOWIDTH_SHIFT) | + (MCR_CRT_MASK << MCR_CRT_SHIFT), + (MCR_IOWIDTH_X8 << MCR_IOWIDTH_SHIFT) | + (MCR_CRT_CR_SPACE << MCR_CRT_SHIFT)); + + slv_rgn = &cfg->slv_rgn[0]; + ret = dsmc_slv_cmn_config(dsmc, slv_rgn, cs); + + for (i = 0; i < DSMC_LB_MAX_RGN; i++) { + slv_rgn = &cfg->slv_rgn[i]; + if (!slv_rgn->status) + continue; + ret = dsmc_slv_cmn_rgn_config(dsmc, slv_rgn, i, cs); + if (ret) + break; + } + + /* config to memory space */ + writel(tmp, dsmc->regs + DSMC_MCR(cs)); + + return ret; +} + +static void dsmc_lb_csr_config(struct rockchip_dsmc *dsmc, uint32_t cs) +{ + uint32_t mcr_tmp, rgn_attr_tmp; + struct dsmc_map *region_map = &dsmc->cs_map[cs].region_map[0]; + + mcr_tmp = readl(dsmc->regs + DSMC_MCR(cs)); + rgn_attr_tmp = readl(dsmc->regs + DSMC_RGN0_ATTR(cs)); + /* config to slave CSR space */ + REG_CLRSETBITS(dsmc, DSMC_MCR(cs), + MCR_CRT_MASK << MCR_CRT_SHIFT, + MCR_CRT_MEM_SPACE << MCR_CRT_SHIFT); + REG_CLRSETBITS(dsmc, DSMC_RGN0_ATTR(cs), + RGNX_ATTR_MASK << RGNX_ATTR_SHIFT, + RGNX_ATTR_REG << RGNX_ATTR_SHIFT); + + /* enable all s2h interrupt */ + writel(0xffffffff, region_map->virt + LBC_S2H_INT_STA_EN); + writel(0xffffffff, region_map->virt + LBC_S2H_INT_STA_SIG_EN); + + /* clear all s2h interrupt */ + writel(LBC_S2H_INT_STA_MASK << LBC_S2H_INT_STA_SHIFT, + region_map->virt + LBC_S2H_INT_STA); + + /* config to normal memory space */ + writel(mcr_tmp, dsmc->regs + DSMC_MCR(cs)); + writel(rgn_attr_tmp, dsmc->regs + DSMC_RGN0_ATTR(cs)); +} + +static void dsmc_cfg_latency(uint32_t rd_ltcy, uint32_t wr_ltcy, + struct rockchip_dsmc *dsmc, uint32_t cs) +{ + rd_ltcy = calc_ltcy_value(rd_ltcy); + wr_ltcy = calc_ltcy_value(wr_ltcy); + + REG_CLRSETBITS(dsmc, DSMC_MTR(cs), + (MTR_RLTCY_MASK << MTR_RLTCY_SHIFT) | + (MTR_WLTCY_MASK << MTR_WLTCY_SHIFT), + (rd_ltcy << MTR_RLTCY_SHIFT) | + (wr_ltcy << MTR_WLTCY_SHIFT)); +} + +static int dsmc_psram_cfg(struct rockchip_dsmc *dsmc, uint32_t cs) +{ + uint32_t latency, mcr, tmp; + struct dsmc_map *region_map = &dsmc->cs_map[cs].region_map[0]; + struct dsmc_config_cs *cs_cfg = &dsmc->cfg.cs_cfg[cs]; + + mcr = readl(dsmc->regs + DSMC_MCR(cs)); + /* config to CR space */ + REG_CLRSETBITS(dsmc, DSMC_MCR(cs), + (MCR_IOWIDTH_MASK << MCR_IOWIDTH_SHIFT) | + (MCR_CRT_MASK << MCR_CRT_SHIFT), + (MCR_IOWIDTH_X8 << MCR_IOWIDTH_SHIFT) | + (MCR_CRT_CR_SPACE << MCR_CRT_SHIFT)); + if (cs_cfg->protcl == OPI_XCCELA_PSRAM) { + /* Xccela psram init */ + uint8_t mr_tmp; + + mr_tmp = xccela_read_mr(region_map, 0); + tmp = cs_cfg->rd_latency - 3; + mr_tmp = (mr_tmp & (~(XCCELA_MR0_RL_MASK << XCCELA_MR0_RL_SHIFT))) | + (tmp << XCCELA_MR0_RL_SHIFT); + mr_tmp |= XCCELA_MR0_RL_TYPE_VARIABLE << XCCELA_MR0_RL_TYPE_SHIFT; + xccela_write_mr(region_map, 0, mr_tmp); + + mr_tmp = xccela_read_mr(region_map, 4); + latency = cs_cfg->wr_latency; + if (latency == 3) + tmp = 0; + else if (latency == 5) + tmp = 2; + else if (latency == 7) + tmp = 1; + else + tmp = latency; + + mr_tmp = (mr_tmp & (~(XCCELA_MR4_WL_MASK << XCCELA_MR4_WL_SHIFT))) | + (tmp << XCCELA_MR4_WL_SHIFT); + + xccela_write_mr(region_map, 4, mr_tmp); + + dsmc_cfg_latency(cs_cfg->rd_latency, cs_cfg->wr_latency, dsmc, cs); + + mr_tmp = xccela_read_mr(region_map, 8); + + if (cs_cfg->io_width == MCR_IOWIDTH_X16) { + mr_tmp |= XCCELA_MR8_IO_TYPE_X16 << XCCELA_MR8_IO_TYPE_SHIFT; + } else { + mr_tmp &= (~(XCCELA_MR8_IO_TYPE_MASK << XCCELA_MR8_IO_TYPE_SHIFT)); + mr_tmp |= XCCELA_MR8_IO_TYPE_X8 << XCCELA_MR8_IO_TYPE_SHIFT; + } + mr_tmp &= (~(XCCELA_MR8_BL_MASK << XCCELA_MR8_BL_SHIFT)); + if (cs_cfg->wrap_size == MCR_WRAPSIZE_8_CLK) + mr_tmp |= (XCCELA_MR8_BL_8_CLK << XCCELA_MR8_BL_SHIFT); + else if (cs_cfg->wrap_size == MCR_WRAPSIZE_16_CLK) + mr_tmp |= (XCCELA_MR8_BL_16_CLK << XCCELA_MR8_BL_SHIFT); + else if (cs_cfg->wrap_size == MCR_WRAPSIZE_32_CLK) + mr_tmp |= (XCCELA_MR8_BL_32_CLK << XCCELA_MR8_BL_SHIFT); + + xccela_write_mr(region_map, 8, mr_tmp); + } else { + /* Hyper psram init */ + uint16_t cr_tmp; + + cr_tmp = hyper_read_mr(region_map, HYPER_PSRAM_CR0); + + latency = cs_cfg->wr_latency; + if (latency == 3) + tmp = 0xe; + else if (latency == 4) + tmp = 0xf; + else + tmp = latency - 5; + cr_tmp = (cr_tmp & (~(CR0_INITIAL_LATENCY_MASK << CR0_INITIAL_LATENCY_SHIFT))) | + (tmp << CR0_INITIAL_LATENCY_SHIFT); + + cr_tmp = (cr_tmp & (~(CR0_BURST_LENGTH_MASK << CR0_BURST_LENGTH_SHIFT))); + + if (cs_cfg->wrap_size == MCR_WRAPSIZE_8_CLK) + cr_tmp |= (CR0_BURST_LENGTH_8_CLK << CR0_BURST_LENGTH_SHIFT); + else if (cs_cfg->wrap_size == MCR_WRAPSIZE_16_CLK) + cr_tmp |= (CR0_BURST_LENGTH_16_CLK << CR0_BURST_LENGTH_SHIFT); + else if (cs_cfg->wrap_size == MCR_WRAPSIZE_32_CLK) + cr_tmp |= (CR0_BURST_LENGTH_32_CLK << CR0_BURST_LENGTH_SHIFT); + + hyper_write_mr(region_map, HYPER_PSRAM_CR0, cr_tmp); + + dsmc_cfg_latency(latency, latency, dsmc, cs); + + cr_tmp = hyper_read_mr(region_map, HYPER_PSRAM_CR1); + cr_tmp = (cr_tmp & (~(CR1_CLOCK_TYPE_MASK << CR1_CLOCK_TYPE_SHIFT))) | + (CR1_CLOCK_TYPE_DIFF_CLK << CR1_CLOCK_TYPE_SHIFT); + hyper_write_mr(region_map, HYPER_PSRAM_CR1, cr_tmp); + } + /* config to memory space */ + writel(mcr, dsmc->regs + DSMC_MCR(cs)); + + return 0; +} + + +static int dsmc_psram_init(struct rockchip_dsmc *dsmc, uint32_t cs) +{ + uint32_t latency; + struct dsmc_config_cs *cs_cfg = &dsmc->cfg.cs_cfg[cs]; + uint32_t mhz = dsmc->cfg.freq_hz / MHZ; + + if (mhz <= 66) { + latency = 3; + } else if (mhz <= 100) { + latency = 4; + } else if (mhz <= 133) { + latency = 5; + } else if (mhz <= 166) { + latency = 6; + } else if (mhz <= 200) { + latency = 7; + } else { + pr_err("DSMC: PSRAM frequency do not support!\n"); + return -1; + } + + cs_cfg->rd_latency = cs_cfg->wr_latency = latency; + + dsmc_psram_cfg(dsmc, cs); + + return 0; +} + +static int dsmc_ctrller_cfg_for_psram(struct rockchip_dsmc *dsmc, uint32_t cs) +{ + int ret = 0; + struct dsmc_config_cs *cs_cfg = &dsmc->cfg.cs_cfg[cs]; + + REG_CLRSETBITS(dsmc, DSMC_MCR(cs), + MCR_DEVTYPE_MASK << MCR_DEVTYPE_SHIFT, + MCR_DEVTYPE_HYPERRAM << MCR_DEVTYPE_SHIFT); + + REG_CLRSETBITS(dsmc, DSMC_VDMC(cs), + (VDMC_MID_MASK << VDMC_MID_SHIFT) | + (VDMC_PROTOCOL_MASK << VDMC_PROTOCOL_SHIFT), + (cs_cfg->mid << VDMC_MID_SHIFT) | + (cs_cfg->protcl << VDMC_PROTOCOL_SHIFT)); + writel(cs_cfg->mtr_timing, + dsmc->regs + DSMC_MTR(cs)); + + ret = dsmc_psram_init(dsmc, cs); + + REG_CLRSETBITS(dsmc, DSMC_MCR(cs), + (MCR_WRAPSIZE_MASK << MCR_WRAPSIZE_SHIFT), + (cs_cfg->wrap_size << MCR_WRAPSIZE_SHIFT)); + + return ret; +} + +static void dsmc_psram_remodify_timing(struct rockchip_dsmc *dsmc, uint32_t cs) +{ + uint32_t max_length = 511, tcmd = 3; + uint32_t tcsm, tmp; + uint32_t mhz = dsmc->cfg.freq_hz / MHZ; + struct dsmc_map *region_map = &dsmc->cs_map[cs].region_map[0]; + struct dsmc_config_cs *cs_cfg = &dsmc->cfg.cs_cfg[cs]; + + if (cs_cfg->mid == APM_PSRAM) { + /* for extended temp */ + if (region_map->size <= 0x400000) + tcsm = DSMC_DEC_TCEM_2_5U; + else if (region_map->size <= 0x1000000) + tcsm = DSMC_DEC_TCEM_3U; + else + tcsm = DSMC_DEC_TCEM_0_5U; + } else { + tcsm = DSMC_DEV_TCSM_1U; + } + + tmp = (tcsm * mhz + 999) / 1000; + tmp = tmp - tcmd - 2 * cs_cfg->wr_latency - 4; + + if (tmp > max_length) + tmp = max_length; + + REG_CLRSETBITS(dsmc, DSMC_MCR(cs), + (MCR_MAXEN_MASK << MCR_MAXEN_SHIFT) | + (MCR_MAXLEN_MASK << MCR_MAXLEN_SHIFT) | + (MCR_IOWIDTH_MASK << MCR_IOWIDTH_SHIFT), + (MCR_MAX_LENGTH_EN << MCR_MAXEN_SHIFT) | + (tmp << MCR_MAXLEN_SHIFT) | + (cs_cfg->io_width << MCR_IOWIDTH_SHIFT)); + + if (cs_cfg->io_width == MCR_IOWIDTH_X16) + tmp = cs_cfg->col - 2; + else + tmp = cs_cfg->col - 1; + REG_CLRSETBITS(dsmc, DSMC_BDRTCR(cs), + (BDRTCR_COL_BIT_NUM_MASK << BDRTCR_COL_BIT_NUM_SHIFT) | + (BDRTCR_WR_BDR_XFER_EN_MASK << BDRTCR_WR_BDR_XFER_EN_SHIFT) | + (BDRTCR_RD_BDR_XFER_EN_MASK << BDRTCR_RD_BDR_XFER_EN_SHIFT), + ((tmp - 6) << BDRTCR_COL_BIT_NUM_SHIFT) | + (BDRTCR_WR_BDR_XFER_EN << BDRTCR_WR_BDR_XFER_EN_SHIFT) | + (BDRTCR_RD_BDR_XFER_EN << BDRTCR_RD_BDR_XFER_EN_SHIFT)); +} + +static void dsmc_lb_dma_clear_s2h_intrupt(struct rockchip_dsmc *dsmc, uint32_t cs) +{ + int region, manual = 0; + uint32_t mcr_tmp, rgn_attr_tmp; + struct dsmc_map *map; + struct dsmc_config_cs *cfg = &dsmc->cfg.cs_cfg[cs]; + + region = find_attr_region(cfg, RGNX_ATTR_REG); + if (region < 0) { + manual = -1; + region = 0; + } + + if (manual) { + mcr_tmp = readl(dsmc->regs + DSMC_MCR(cs)); + rgn_attr_tmp = readl(dsmc->regs + DSMC_RGN0_ATTR(cs)); + /* config to slave CSR space */ + REG_CLRSETBITS(dsmc, DSMC_MCR(cs), + MCR_CRT_MASK << MCR_CRT_SHIFT, + MCR_CRT_MEM_SPACE << MCR_CRT_SHIFT); + REG_CLRSETBITS(dsmc, DSMC_RGN0_ATTR(cs), + RGNX_ATTR_MASK << RGNX_ATTR_SHIFT, + RGNX_ATTR_REG << RGNX_ATTR_SHIFT); + } + + map = &dsmc->cs_map[cs].region_map[region]; + + /* clear all s2h interrupt */ + writel(0x1 << S2H_INT_FOR_DMA_NUM, + map->virt + LBC_S2H_INT_STA); + + if (manual) { + /* config to normal memory space */ + writel(mcr_tmp, dsmc->regs + DSMC_MCR(cs)); + writel(rgn_attr_tmp, dsmc->regs + DSMC_RGN0_ATTR(cs)); + } +} + +void rockchip_dsmc_lb_dma_hw_mode_dis(struct rockchip_dsmc *dsmc) +{ + uint32_t cs = dsmc->xfer.ops_cs; + + /* clear dsmc interrupt */ + writel(INT_STATUS(cs), dsmc->regs + DSMC_INT_STATUS); + /* disable dma request */ + writel(DMA_REQ_DIS(cs), dsmc->regs + DSMC_DMA_EN); + + dsmc_lb_dma_clear_s2h_intrupt(dsmc, cs); +} +EXPORT_SYMBOL(rockchip_dsmc_lb_dma_hw_mode_dis); + +int rockchip_dsmc_lb_dma_trigger_by_host(struct rockchip_dsmc *dsmc, uint32_t cs) +{ + int region, manual = 0; + uint32_t mcr_tmp, rgn_attr_tmp, flag_tmp; + struct dsmc_map *map; + struct dsmc_config_cs *cfg = &dsmc->cfg.cs_cfg[cs]; + + region = find_attr_region(cfg, RGNX_ATTR_REG); + if (region < 0) { + manual = -1; + region = 0; + } + + if (manual) { + mcr_tmp = readl(dsmc->regs + DSMC_MCR(cs)); + rgn_attr_tmp = readl(dsmc->regs + DSMC_RGN0_ATTR(cs)); + /* config to slave CSR space */ + REG_CLRSETBITS(dsmc, DSMC_MCR(cs), + (MCR_CRT_MASK << MCR_CRT_SHIFT), + (MCR_CRT_MEM_SPACE << MCR_CRT_SHIFT)); + REG_CLRSETBITS(dsmc, DSMC_RGN0_ATTR(cs), + RGNX_ATTR_MASK << RGNX_ATTR_SHIFT, + RGNX_ATTR_REG << RGNX_ATTR_SHIFT); + } + map = &dsmc->cs_map[cs].region_map[region]; + /* + * write (readl(LBC_CON(15)) + 1) to LBC_CON15 to slave which will + * write APP_CON(S2H_INT_FOR_DMA_NUM) trigger a slave to host interrupt + */ + flag_tmp = readl(map->virt + LBC_CON(15)); + + writel(flag_tmp + 1, map->virt + LBC_CON(15)); + + if (manual) { + /* config to normal memory space */ + writel(mcr_tmp, dsmc->regs + DSMC_MCR(cs)); + writel(rgn_attr_tmp, dsmc->regs + DSMC_RGN0_ATTR(cs)); + } + + return 0; +} +EXPORT_SYMBOL(rockchip_dsmc_lb_dma_trigger_by_host); + +int rockchip_dsmc_device_dectect(struct rockchip_dsmc *dsmc, uint32_t cs) +{ + int ret = 0; + + rockchip_dsmc_ctrller_init(dsmc, cs); + ret = dsmc_psram_dectect(dsmc, cs); + if (ret) + return ret; + + return ret; +} +EXPORT_SYMBOL(rockchip_dsmc_device_dectect); + +static void xccela_psram_reset(struct rockchip_dsmc *dsmc, uint32_t cs) +{ + struct dsmc_map *region_map = &dsmc->cs_map[cs].region_map[0]; + + REG_CLRSETBITS(dsmc, DSMC_VDMC(cs), + (VDMC_RESET_CMD_MODE_MASK << VDMC_RESET_CMD_MODE_SHIFT), + (0x1 << VDMC_RESET_CMD_MODE_SHIFT)); + xccela_write_mr(region_map, XCCELA_PSRAM_MR(0), XCCELA_PSRAM_MR_SET(0x0)); + udelay(200); + REG_CLRSETBITS(dsmc, DSMC_VDMC(cs), + (VDMC_RESET_CMD_MODE_MASK << VDMC_RESET_CMD_MODE_SHIFT), + (0x0 << VDMC_RESET_CMD_MODE_SHIFT)); +} + +int rockchip_dsmc_psram_reinit(struct rockchip_dsmc *dsmc, uint32_t cs) +{ + int ret = 0; + + if (dsmc->cfg.cs_cfg[cs].protcl == OPI_XCCELA_PSRAM) + xccela_psram_reset(dsmc, cs); + ret = dsmc_ctrller_cfg_for_psram(dsmc, cs); + dsmc_psram_remodify_timing(dsmc, cs); + + return ret; +} +EXPORT_SYMBOL(rockchip_dsmc_psram_reinit); + +int rockchip_dsmc_ctrller_init(struct rockchip_dsmc *dsmc, uint32_t cs) +{ + struct dsmc_config_cs *cfg = &dsmc->cfg.cs_cfg[cs]; + + writel(MRGTCR_READ_WRITE_MERGE_EN, + dsmc->regs + DSMC_MRGTCR(cs)); + writel((0x1 << RDS_DLL0_CTL_RDS_0_CLK_SMP_SEL_SHIFT) | + (cfg->dll_num[0] << RDS_DLL0_CTL_RDS_0_CLK_DELAY_NUM_SHIFT), + dsmc->regs + DSMC_RDS_DLL0_CTL(cs)); + writel((0x1 << RDS_DLL1_CTL_RDS_1_CLK_SMP_SEL_SHIFT) | + (cfg->dll_num[1] << RDS_DLL1_CTL_RDS_1_CLK_DELAY_NUM_SHIFT), + dsmc->regs + DSMC_RDS_DLL1_CTL(cs)); + + REG_CLRSETBITS(dsmc, DSMC_MCR(cs), + (MCR_ACS_MASK << MCR_ACS_SHIFT) | + (MCR_DEVTYPE_MASK << MCR_DEVTYPE_SHIFT) | + (MCR_IOWIDTH_MASK << MCR_IOWIDTH_SHIFT) | + (MCR_EXCLUSIVE_DQS_MASK << MCR_EXCLUSIVE_DQS_SHIFT) | + (MCR_WRAPSIZE_MASK << MCR_WRAPSIZE_SHIFT) | + (MCR_MAXEN_MASK << MCR_MAXEN_SHIFT) | + (MCR_MAXLEN_MASK << MCR_MAXLEN_SHIFT), + (cfg->acs << MCR_ACS_SHIFT) | + (MCR_DEVTYPE_HYPERRAM << MCR_DEVTYPE_SHIFT) | + (cfg->io_width << MCR_IOWIDTH_SHIFT) | + (cfg->exclusive_dqs << MCR_EXCLUSIVE_DQS_SHIFT) | + (cfg->wrap_size << MCR_WRAPSIZE_SHIFT) | + (cfg->max_length_en << MCR_MAXEN_SHIFT) | + (cfg->max_length << MCR_MAXLEN_SHIFT)); + + writel(cfg->wrap2incr_en, dsmc->regs + DSMC_WRAP2INCR(cs)); + + REG_CLRSETBITS(dsmc, DSMC_VDMC(cs), + (VDMC_LATENCY_FIXED_MASK << VDMC_LATENCY_FIXED_SHIFT) | + (VDMC_PROTOCOL_MASK << VDMC_PROTOCOL_SHIFT), + (VDMC_LATENCY_VARIABLE << VDMC_LATENCY_FIXED_SHIFT) | + (cfg->device_type << VDMC_PROTOCOL_SHIFT)); + writel(cap_2_dev_size(dsmc->cfg.cap), dsmc->regs + DSMC_DEV_SIZE); + + return 0; +} +EXPORT_SYMBOL(rockchip_dsmc_ctrller_init); + +int rockchip_dsmc_lb_init(struct rockchip_dsmc *dsmc, uint32_t cs) +{ + int ret = 0; + + dsmc_ctrller_cfg_for_lb(dsmc, cs); + ret = dsmc_lb_cmn_config(dsmc, cs); + if (ret) + return ret; + dsmc_lb_csr_config(dsmc, cs); + + return ret; +} +EXPORT_SYMBOL(rockchip_dsmc_lb_init); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Zhihuan He "); +MODULE_DESCRIPTION("ROCKCHIP DSMC controller driver"); diff --git a/drivers/memory/rockchip/dsmc-host.c b/drivers/memory/rockchip/dsmc-host.c new file mode 100644 index 000000000000..1aafa3cff3e0 --- /dev/null +++ b/drivers/memory/rockchip/dsmc-host.c @@ -0,0 +1,1070 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2024 Rockchip Electronics Co., Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dsmc-host.h" +#include "dsmc-lb-slave.h" + +/* DMA translate state flags */ +#define RXDMA (1 << 0) +#define TXDMA (1 << 1) + +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef TRUE +#define TRUE (1) +#endif + +static __maybe_unused int rk3576_dsmc_platform_init(struct platform_device *pdev) +{ + uint32_t i, val; + const struct rockchip_dsmc_device *priv; + + priv = platform_get_drvdata(pdev); + + if (IS_ERR_OR_NULL(priv->dsmc.grf)) { + dev_err(priv->dsmc.dev, "Missing rockchip,grf property\n"); + return -ENODEV; + } + + for (i = RK3576_GPIO3A_IOMUX_SEL_H; i <= RK3576_GPIO4A_IOMUX_SEL_L; i += 4) { + if (i == RK3576_GPIO4A_IOMUX_SEL_L) + val = RK3576_IOMUX_SEL(0x5, 4) | + RK3576_IOMUX_SEL(0x5, 0); + else + val = RK3576_IOMUX_SEL(0x5, 12) | + RK3576_IOMUX_SEL(0x5, 8) | + RK3576_IOMUX_SEL(0x5, 4) | + RK3576_IOMUX_SEL(0x5, 0); + regmap_write(priv->dsmc.grf, i, val); + } + + return 0; +} + +static const struct of_device_id dsmc_of_match[] = { +#if IS_ENABLED(CONFIG_CPU_RK3576) + { + .compatible = "rockchip,rk3576-dsmc", .data = rk3576_dsmc_platform_init + }, +#endif + {}, +}; +MODULE_DEVICE_TABLE(of, dsmc_of_match); + +static int rockchip_dsmc_platform_init(struct platform_device *pdev) +{ + const struct of_device_id *match; + int (*init)(struct platform_device *pdev); + int ret; + + match = of_match_node(dsmc_of_match, pdev->dev.of_node); + if (match) { + init = match->data; + if (init) { + ret = init(pdev); + if (ret) + return ret; + } + } + + return 0; +} + +#ifdef CONFIG_ARM64 +static void *rk_dsmc_map_kernel(phys_addr_t start, size_t len, uint32_t mem_attr) +{ + void *vaddr; + + if (mem_attr == DSMC_MEM_ATTRIBUTE_CACHE) + vaddr = ioremap_cache(start, len); + else if (mem_attr == DSMC_MEM_ATTRIBUTE_WR_COM) + vaddr = ioremap_wc(start, len); + else + vaddr = ioremap(start, len); + + return vaddr; +} + +static void rk_dsmc_unmap_kernel(void *vaddr) +{ + if (vaddr != NULL) + iounmap(vaddr); +} +#else +static void *rk_dsmc_map_kernel(phys_addr_t start, size_t len, uint32_t mem_attr) +{ + int i; + void *vaddr; + pgprot_t pgprot; + phys_addr_t phys; + int npages = PAGE_ALIGN(len) / PAGE_SIZE; + struct page **p = vmalloc(sizeof(struct page *) * npages); + + if (!p) + return NULL; + + if (mem_attr == DSMC_MEM_ATTRIBUTE_CACHE) + pgprot = PAGE_KERNEL; + else if (mem_attr == DSMC_MEM_ATTRIBUTE_WR_COM) + pgprot = pgprot_writecombine(PAGE_KERNEL); + else + pgprot = pgprot_noncached(PAGE_KERNEL); + + phys = start; + for (i = 0; i < npages; i++) { + p[i] = phys_to_page(phys); + phys += PAGE_SIZE; + } + + vaddr = vmap(p, npages, VM_MAP, pgprot); + vfree(p); + + return vaddr; +} + +static void rk_dsmc_unmap_kernel(void *vaddr) +{ + if (vaddr != NULL) + vunmap(vaddr); +} +#endif + +static int dsmc_parse_dt_regions(struct platform_device *pdev, struct device_node *lb_np, + struct dsmc_config_cs *cfg) +{ + int i; + int ret = 0; + char region_name[16]; + struct device_node *region_node, *child_node; + struct device *dev = &pdev->dev; + struct regions_config *rgn; + const char *attribute; + + region_node = of_get_child_by_name(lb_np, "region"); + if (!region_node) { + dev_err(dev, "Failed to get dsmc_slave node\n"); + return -ENODEV; + } + + for (i = 0; i < DSMC_LB_MAX_RGN; i++) { + rgn = &cfg->slv_rgn[i]; + snprintf(region_name, sizeof(region_name), "region%d", i); + child_node = of_get_child_by_name(region_node, region_name); + if (child_node) { + ret = of_property_read_string(child_node, "rockchip,attribute", + &attribute); + if (ret) { + dev_err(dev, "Failed to read region %d rockchip,attribute!\n", i); + ret = -ENODEV; + goto release_region_node; + } + if (strcmp(attribute, "Merged FIFO") == 0) { + rgn->attribute = RGNX_ATTR_MERGE_FIFO; + } else if (strcmp(attribute, "No-Merge FIFO") == 0) { + rgn->attribute = RGNX_ATTR_NO_MERGE_FIFO; + } else if (strcmp(attribute, "DPRA") == 0) { + rgn->attribute = RGNX_ATTR_DPRA; + } else if (strcmp(attribute, "Register") == 0) { + rgn->attribute = RGNX_ATTR_REG; + } else { + dev_err(dev, "Region %d unknown attribute: %s\n", + i, attribute); + ret = -ENODEV; + of_node_put(child_node); + goto release_region_node; + } + + if (of_property_read_u32(child_node, "rockchip,ca-addr-width", + &rgn->ca_addr_width)) { + dev_err(dev, "Failed to read rockchip,ca-addr-width!\n"); + ret = -ENODEV; + of_node_put(child_node); + goto release_region_node; + } + if (of_property_read_u32(child_node, "rockchip,dummy-clk-num", + &rgn->dummy_clk_num)) { + dev_err(dev, "Failed to read rockchip,dummy-clk-num!\n"); + ret = -ENODEV; + of_node_put(child_node); + goto release_region_node; + } + rgn->dummy_clk_num--; + + if (of_property_read_u32(child_node, "rockchip,cs0-be-ctrled", + &rgn->cs0_be_ctrled)) { + dev_err(dev, "Failed to read rockchip,cs0-be-ctrled!\n"); + ret = -ENODEV; + of_node_put(child_node); + goto release_region_node; + } + + if (of_property_read_u32(child_node, "rockchip,cs0-ctrl", + &rgn->cs0_ctrl)) { + dev_err(dev, "Failed to read rockchip,cs0-ctrl!\n"); + ret = -ENODEV; + of_node_put(child_node); + goto release_region_node; + } + rgn->status = of_device_is_available(child_node); + if (rgn->status) + cfg->rgn_num++; + of_node_put(child_node); + } else { + dev_warn(dev, "Failed to find node: %s\n", region_name); + } + } + +release_region_node: + of_node_put(region_node); + + return ret; +} + +static int dsmc_reg_remap(struct device *dev, struct dsmc_ctrl_config *cfg, + struct rockchip_dsmc *dsmc, uint64_t *mem_ranges, + uint32_t *dqs_dll) +{ + int ret = 0; + uint32_t cs, rgn_num_max; + struct dsmc_map *region_map; + + rgn_num_max = 1; + for (cs = 0; cs < DSMC_MAX_SLAVE_NUM; cs++) { + if (cfg->cs_cfg[cs].device_type == DSMC_UNKNOWN_DEVICE) + continue; + region_map = &dsmc->cs_map[cs].region_map[0]; + cfg->cap = max_t(uint32_t, cfg->cap, region_map->size); + rgn_num_max = max_t(uint32_t, rgn_num_max, cfg->cs_cfg[cs].rgn_num); + + region_map->virt = rk_dsmc_map_kernel(region_map->phys, + region_map->size, + DSMC_MEM_ATTRIBUTE_NO_CACHE); + if (!region_map->virt) { + dev_err(dev, "Failed to remap slave cs%d memory\n", cs); + ret = -EINVAL; + continue; + } + } + cfg->cap *= rgn_num_max; + + return ret; +} + +static void dsmc_lb_memory_get(struct dsmc_config_cs *cfg, struct dsmc_cs_map *cs_map) +{ + int rgn; + phys_addr_t cphys = cs_map->region_map[0].phys; + + if (cfg->rgn_num == 3) + cfg->rgn_num++; + + for (rgn = 1; rgn < DSMC_LB_MAX_RGN; rgn++) { + if (cfg->slv_rgn[rgn].status) { + cphys += cs_map->region_map[0].size; + cs_map->region_map[rgn].phys = cphys; + cs_map->region_map[rgn].size = cs_map->region_map[0].size; + } + } +} + +static int dsmc_mem_remap(struct device *dev, struct rockchip_dsmc *dsmc) +{ + int ret = 0; + uint32_t cs, i; + uint32_t mem_attr; + struct dsmc_map *region_map; + struct dsmc_ctrl_config *cfg = &dsmc->cfg; + struct regions_config *slv_rgn; + + for (cs = 0; cs < DSMC_MAX_SLAVE_NUM; cs++) { + if (cfg->cs_cfg[cs].device_type == DSMC_UNKNOWN_DEVICE) + continue; + region_map = &dsmc->cs_map[cs].region_map[0]; + /* unremap the register space */ + if (region_map->virt) { + rk_dsmc_unmap_kernel(region_map->virt); + region_map->virt = NULL; + } + + if (cfg->cs_cfg[cs].device_type == DSMC_LB_DEVICE) { + for (i = 0; i < DSMC_LB_MAX_RGN; i++) { + region_map = &dsmc->cs_map[cs].region_map[i]; + slv_rgn = &cfg->cs_cfg[cs].slv_rgn[i]; + if (!slv_rgn->status) + continue; + if (slv_rgn->attribute == RGNX_ATTR_REG) + mem_attr = DSMC_MEM_ATTRIBUTE_NO_CACHE; + else + mem_attr = DSMC_MEM_ATTRIBUTE_CACHE; + + region_map->virt = rk_dsmc_map_kernel(region_map->phys, + region_map->size, + mem_attr); + if (!region_map->virt) { + dev_err(dev, "Failed to remap slave cs%d memory\n", cs); + ret = -EINVAL; + continue; + } + } + if (rockchip_dsmc_register_lb_device(dev, cs)) { + dev_err(dev, "Failed to register local bus device\n"); + ret = -EINVAL; + return ret; + } + + } else { + region_map = &dsmc->cs_map[cs].region_map[0]; + region_map->virt = rk_dsmc_map_kernel(region_map->phys, + region_map->size, + DSMC_MEM_ATTRIBUTE_CACHE); + if (!region_map->virt) { + dev_err(dev, "Failed to remap psram cs%d memory\n", cs); + ret = -EINVAL; + continue; + } + } + } + + return ret; +} + +static int dsmc_parse_dt(struct platform_device *pdev, struct rockchip_dsmc *dsmc) +{ + int ret = 0; + uint32_t cs; + uint32_t psram = 0, lb_slave = 0; + uint64_t mem_ranges[2]; + uint32_t dqs_dll[2 * DSMC_MAX_SLAVE_NUM]; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct device_node *child_node; + struct device_node *slave_np, *dsmc_slave_np; + struct dsmc_ctrl_config *cfg = &dsmc->cfg; + struct dsmc_map *region_map; + char slave_name[16]; + + slave_np = of_get_child_by_name(np, "slave"); + if (!slave_np) { + dev_err(dev, "Failed to find slave node\n"); + return -ENODEV; + } + + ret = of_property_read_u32_array(slave_np, "rockchip,dqs-dll", dqs_dll, + ARRAY_SIZE(dqs_dll)); + if (ret) { + dev_err(dev, "Failed to read rockchip,dqs-dll!\n"); + ret = -ENODEV; + goto release_slave_node; + } + + ret = of_property_read_u64_array(slave_np, "map,ranges", + mem_ranges, ARRAY_SIZE(mem_ranges)); + if (ret) { + dev_err(dev, "Failed to read map,ranges!\n"); + ret = -ENODEV; + goto release_slave_node; + } + dsmc_slave_np = of_parse_phandle(slave_np, "rockchip,slave-dev", 0); + if (!dsmc_slave_np) { + dev_err(dev, "Failed to get rockchip,slave-dev node\n"); + ret = -ENODEV; + goto release_slave_node; + } + if (of_property_read_u32(dsmc_slave_np, "rockchip,clk-mode", &cfg->clk_mode)) { + dev_err(dev, "Failed to get rockchip,clk-mode\n"); + ret = -ENODEV; + goto release_dsmc_slave_node; + } + + for (cs = 0; cs < DSMC_MAX_SLAVE_NUM; cs++) { + region_map = &dsmc->cs_map[cs].region_map[0]; + region_map->phys = mem_ranges[0] + cs * mem_ranges[1]; + region_map->size = mem_ranges[1]; + cfg->cs_cfg[cs].dll_num[0] = dqs_dll[2 * cs]; + cfg->cs_cfg[cs].dll_num[1] = dqs_dll[2 * cs + 1]; + + snprintf(slave_name, sizeof(slave_name), "psram%d", cs); + child_node = of_get_child_by_name(dsmc_slave_np, slave_name); + if (child_node) { + if (of_device_is_available(child_node)) { + cfg->cs_cfg[cs].device_type = OPI_XCCELA_PSRAM; + psram = 1; + of_node_put(child_node); + continue; + } + of_node_put(child_node); + } + snprintf(slave_name, sizeof(slave_name), "lb_slave%d", cs); + child_node = of_get_child_by_name(dsmc_slave_np, slave_name); + if (child_node) { + if (of_device_is_available(child_node)) { + cfg->cs_cfg[cs].device_type = DSMC_LB_DEVICE; + lb_slave = 1; + if (dsmc_parse_dt_regions(pdev, child_node, + &cfg->cs_cfg[cs])) { + ret = -ENODEV; + of_node_put(child_node); + goto release_dsmc_slave_node; + } + dsmc_lb_memory_get(&cfg->cs_cfg[cs], &dsmc->cs_map[cs]); + } + of_node_put(child_node); + } + } + if (psram && lb_slave) { + dev_err(dev, "Can't have both psram and lb_slave\n"); + ret = -ENODEV; + goto release_dsmc_slave_node; + } else if (!(psram || lb_slave)) { + dev_err(dev, "psram or lb_slave need open in dts\n"); + ret = -ENODEV; + goto release_dsmc_slave_node; + } + + ret = dsmc_reg_remap(dev, cfg, dsmc, mem_ranges, dqs_dll); + +release_dsmc_slave_node: + of_node_put(dsmc_slave_np); +release_slave_node: + of_node_put(slave_np); + + return ret; +} + +static int dsmc_read(struct rockchip_dsmc_device *dsmc_dev, uint32_t cs, uint32_t region, + unsigned long addr_offset, uint32_t *data) +{ + struct dsmc_map *map = &dsmc_dev->dsmc.cs_map[cs].region_map[region]; + + *data = readl_relaxed(map->virt + addr_offset); + + return 0; +} + +static int dsmc_write(struct rockchip_dsmc_device *dsmc_dev, uint32_t cs, uint32_t region, + unsigned long addr_offset, uint32_t val) +{ + struct dsmc_map *map = &dsmc_dev->dsmc.cs_map[cs].region_map[region]; + + writel_relaxed(val, map->virt + addr_offset); + + return 0; +} + +static void dsmc_lb_dma_hw_mode_en(struct rockchip_dsmc *dsmc, uint32_t cs) +{ + struct dsmc_transfer *xfer = &dsmc->xfer; + size_t size = xfer->transfer_size; + uint32_t burst_byte = xfer->brst_len * xfer->brst_size; + uint32_t dma_req_num; + + dma_req_num = size / burst_byte; + if (size % burst_byte) { + pr_warn("DSMC: DMA size is unaligned\n"); + dma_req_num++; + } + writel(dma_req_num, dsmc->regs + DSMC_DMA_REQ_NUM(cs)); + + /* enable dma request */ + writel(DMA_REQ_EN(cs), dsmc->regs + DSMC_DMA_EN); +} + +static void rockchip_dsmc_interrupt_mask(struct rockchip_dsmc *dsmc) +{ + uint32_t cs = dsmc->xfer.ops_cs; + + /* mask dsmc interrupt */ + writel(INT_MASK(cs), dsmc->regs + DSMC_INT_MASK); +} + +static void rockchip_dsmc_interrupt_unmask(struct rockchip_dsmc *dsmc) +{ + uint32_t cs = dsmc->xfer.ops_cs; + + /* mask dsmc interrupt */ + writel(INT_UNMASK(cs), dsmc->regs + DSMC_INT_MASK); +} + +static void rockchip_dsmc_lb_dma_txcb(void *data) +{ + struct rockchip_dsmc *dsmc = data; + + atomic_fetch_andnot(TXDMA, &dsmc->xfer.state); + rockchip_dsmc_lb_dma_hw_mode_dis(dsmc); + rockchip_dsmc_interrupt_unmask(dsmc); +} + +static void rockchip_dsmc_lb_dma_rxcb(void *data) +{ + struct rockchip_dsmc *dsmc = data; + + atomic_fetch_andnot(RXDMA, &dsmc->xfer.state); + rockchip_dsmc_lb_dma_hw_mode_dis(dsmc); + rockchip_dsmc_interrupt_unmask(dsmc); +} + +static int rockchip_dsmc_lb_prepare_tx_dma(struct device *dev, + struct rockchip_dsmc *dsmc, uint32_t cs) +{ + struct dma_async_tx_descriptor *txdesc = NULL; + struct dsmc_transfer *xfer = &dsmc->xfer; + + struct dma_slave_config txconf = { + .direction = DMA_MEM_TO_DEV, + .dst_addr = xfer->dst_addr, + .dst_addr_width = xfer->brst_size, + .dst_maxburst = xfer->brst_len, + }; + + dmaengine_slave_config(dsmc->dma_req[cs], &txconf); + + txdesc = dmaengine_prep_slave_single( + dsmc->dma_req[cs], + xfer->src_addr, + xfer->transfer_size, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); + if (!txdesc) { + dev_err(dev, "Not able to get tx desc for DMA xfer\n"); + return -EIO; + } + + txdesc->callback = rockchip_dsmc_lb_dma_txcb; + txdesc->callback_param = dsmc; + + atomic_or(TXDMA, &dsmc->xfer.state); + dmaengine_submit(txdesc); + dma_async_issue_pending(dsmc->dma_req[cs]); + + /* 1 means the transfer is in progress */ + return 1; +} + +static int rockchip_dsmc_lb_prepare_rx_dma(struct device *dev, + struct rockchip_dsmc *dsmc, uint32_t cs) +{ + struct dma_async_tx_descriptor *rxdesc = NULL; + struct dsmc_transfer *xfer = &dsmc->xfer; + struct dma_slave_config rxconf = { + .direction = DMA_DEV_TO_MEM, + .src_addr = xfer->src_addr, + .src_addr_width = xfer->brst_size, + .src_maxburst = xfer->brst_len, + }; + + dmaengine_slave_config(dsmc->dma_req[cs], &rxconf); + rxdesc = dmaengine_prep_slave_single( + dsmc->dma_req[cs], + xfer->dst_addr, + xfer->transfer_size, + DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); + if (!rxdesc) { + dev_err(dev, "Not able to get rx desc for DMA xfer\n"); + return -EIO; + } + + rxdesc->callback = rockchip_dsmc_lb_dma_rxcb; + rxdesc->callback_param = dsmc; + + atomic_or(RXDMA, &dsmc->xfer.state); + dmaengine_submit(rxdesc); + dma_async_issue_pending(dsmc->dma_req[cs]); + + /* 1 means the transfer is in progress */ + return 1; + +} + +static int dsmc_copy_from(struct rockchip_dsmc_device *dsmc_dev, uint32_t cs, uint32_t region, + uint32_t from, dma_addr_t dst_phys, size_t size) +{ + struct dsmc_map *map = &dsmc_dev->dsmc.cs_map[cs].region_map[region]; + struct device *dev = dsmc_dev->dsmc.dev; + struct rockchip_dsmc *dsmc = &dsmc_dev->dsmc; + + if (atomic_read(&dsmc->xfer.state) & (RXDMA | TXDMA)) { + pr_warn("DSMC: copy_from: the transfer is busy!\n"); + return -EBUSY; + } + + dsmc->xfer.src_addr = map->phys + from; + dsmc->xfer.dst_addr = dst_phys; + dsmc->xfer.transfer_size = size; + dsmc->xfer.ops_cs = cs; + + rockchip_dsmc_interrupt_mask(dsmc); + + rockchip_dsmc_lb_prepare_rx_dma(dev, dsmc, cs); + + dsmc_lb_dma_hw_mode_en(dsmc, cs); + + rockchip_dsmc_lb_dma_trigger_by_host(dsmc, cs); + + return 0; +} + +static int dsmc_copy_from_state(struct rockchip_dsmc_device *dsmc_dev) +{ + struct rockchip_dsmc *dsmc = &dsmc_dev->dsmc; + + if (atomic_read(&dsmc->xfer.state) & RXDMA) + return -EBUSY; + else + return 0; +} + +static int dsmc_copy_to(struct rockchip_dsmc_device *dsmc_dev, uint32_t cs, uint32_t region, + dma_addr_t src_phys, uint32_t to, size_t size) +{ + struct dsmc_map *map = &dsmc_dev->dsmc.cs_map[cs].region_map[region]; + struct device *dev = dsmc_dev->dsmc.dev; + struct rockchip_dsmc *dsmc = &dsmc_dev->dsmc; + + if (atomic_read(&dsmc->xfer.state) & (RXDMA | TXDMA)) { + pr_warn("DSMC: copy_to: the transfer is busy!\n"); + return -EBUSY; + } + + dsmc->xfer.src_addr = src_phys; + dsmc->xfer.dst_addr = map->phys + to; + dsmc->xfer.transfer_size = size; + dsmc->xfer.ops_cs = cs; + + rockchip_dsmc_interrupt_mask(dsmc); + + rockchip_dsmc_lb_prepare_tx_dma(dev, dsmc, cs); + + dsmc_lb_dma_hw_mode_en(dsmc, cs); + + rockchip_dsmc_lb_dma_trigger_by_host(dsmc, cs); + + return 0; +} + +static int dsmc_copy_to_state(struct rockchip_dsmc_device *dsmc_dev) +{ + struct rockchip_dsmc *dsmc = &dsmc_dev->dsmc; + + if (atomic_read(&dsmc->xfer.state) & TXDMA) + return -EBUSY; + else + return 0; +} + +static void dsmc_data_init(struct rockchip_dsmc *dsmc) +{ + uint32_t cs; + struct dsmc_ctrl_config *cfg = &dsmc->cfg; + struct dsmc_config_cs *cs_cfg; + + dsmc->xfer.brst_len = 16; + dsmc->xfer.brst_size = DMA_SLAVE_BUSWIDTH_8_BYTES; + for (cs = 0; cs < DSMC_MAX_SLAVE_NUM; cs++) { + if (cfg->cs_cfg[cs].device_type == DSMC_UNKNOWN_DEVICE) + continue; + cs_cfg = &dsmc->cfg.cs_cfg[cs]; + + cs_cfg->exclusive_dqs = 0; + if (cs_cfg->device_type == OPI_XCCELA_PSRAM) { + cs_cfg->io_width = MCR_IOWIDTH_X16; + cs_cfg->wrap_size = DSMC_BURST_WRAPSIZE_32CLK; + cs_cfg->wrap2incr_en = 0; + cs_cfg->acs = 1; + cs_cfg->max_length_en = 1; + cs_cfg->max_length = 0xff; + } else { + cs_cfg->io_width = MCR_IOWIDTH_X16; + cs_cfg->wrap_size = DSMC_BURST_WRAPSIZE_16CLK; + cs_cfg->rd_latency = 2; + cs_cfg->wr_latency = 2; + cs_cfg->wrap2incr_en = 1; + cs_cfg->acs = 0; + cs_cfg->max_length_en = 0; + cs_cfg->max_length = 0x0; + } + } +} + +static void dsmc_reset_ctrl(struct rockchip_dsmc *dsmc) +{ + reset_control_assert(dsmc->areset); + reset_control_assert(dsmc->preset); + udelay(20); + reset_control_deassert(dsmc->areset); + reset_control_deassert(dsmc->preset); +} + +static int dsmc_init(struct rockchip_dsmc *dsmc) +{ + uint32_t cs; + struct dsmc_ctrl_config *cfg = &dsmc->cfg; + struct dsmc_config_cs *cs_cfg; + uint32_t ret = 0; + + dsmc_data_init(dsmc); + + for (cs = 0; cs < DSMC_MAX_SLAVE_NUM; cs++) { + if (cfg->cs_cfg[cs].device_type == DSMC_UNKNOWN_DEVICE) + continue; + if (cfg->cs_cfg[cs].device_type == OPI_XCCELA_PSRAM) { + ret = rockchip_dsmc_device_dectect(dsmc, cs); + if (ret) + return ret; + } + } + dsmc_reset_ctrl(dsmc); + + for (cs = 0; cs < DSMC_MAX_SLAVE_NUM; cs++) { + if (cfg->cs_cfg[cs].device_type == DSMC_UNKNOWN_DEVICE) + continue; + cs_cfg = &dsmc->cfg.cs_cfg[cs]; + pr_info("DSMC: init cs%d %s device\n", + cs, (cs_cfg->device_type == DSMC_LB_DEVICE) ? "LB" : "PSRAM"); + rockchip_dsmc_ctrller_init(dsmc, cs); + if (cs_cfg->device_type == OPI_XCCELA_PSRAM) + ret = rockchip_dsmc_psram_reinit(dsmc, cs); + else + ret = rockchip_dsmc_lb_init(dsmc, cs); + + if (ret) + break; + } + + return ret; +} + +static struct dsmc_ops rockchip_dsmc_ops = { + .read = dsmc_read, + .write = dsmc_write, + .copy_from = dsmc_copy_from, + .copy_from_state = dsmc_copy_from_state, + .copy_to = dsmc_copy_to, + .copy_to_state = dsmc_copy_to_state, +}; + +static int rockchip_dsmc_dma_request(struct device *dev, struct rockchip_dsmc *dsmc) +{ + int ret = 0; + + atomic_set(&dsmc->xfer.state, 0); + + dsmc->dma_req[0] = dma_request_chan(dev, "req0"); + if (!dsmc->dma_req[0]) { + dev_err(dev, "Failed to request DMA dsmc req0 channel!\n"); + return -ENODEV; + } + + dsmc->dma_req[1] = dma_request_chan(dev, "req1"); + if (!dsmc->dma_req[1]) { + dev_err(dev, "Failed to request DMA dsmc req1 channel!\n"); + ret = -ENODEV; + goto err; + } + return ret; +err: + dma_release_channel(dsmc->dma_req[0]); + return ret; +} + +const char *rockchip_dsmc_get_compat(int index) +{ + if (index < 0 || index >= ARRAY_SIZE(dsmc_of_match)) + return NULL; + + return dsmc_of_match[index].compatible; +} +EXPORT_SYMBOL(rockchip_dsmc_get_compat); + +static int match_dsmc_device(struct device *dev, const void *data) +{ + const char *compat = data; + + if (!dev->of_node) + return 0; + + return of_device_is_compatible(dev->of_node, compat); +} + +struct rockchip_dsmc_device *rockchip_dsmc_find_device_by_compat(const char *compat) +{ + struct device *dev; + struct platform_device *pdev; + const struct rockchip_dsmc_device *priv; + + dev = bus_find_device(&platform_bus_type, NULL, compat, match_dsmc_device); + if (!dev) + return NULL; + + pdev = to_platform_device(dev); + priv = platform_get_drvdata(pdev); + + return (struct rockchip_dsmc_device *)priv; +} +EXPORT_SYMBOL(rockchip_dsmc_find_device_by_compat); + +static int rk_dsmc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = pdev->dev.of_node; + struct rockchip_dsmc *dsmc; + struct rockchip_dsmc_device *priv; + struct resource *mem; + int ret = 0; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + platform_set_drvdata(pdev, priv); + dsmc = &priv->dsmc; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dsmc->regs = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(dsmc->regs)) + return PTR_ERR(dsmc->regs); + + dsmc->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); + if (IS_ERR(dsmc->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return ret; + } + + ret = rockchip_dsmc_platform_init(pdev); + if (ret) + return ret; + + ret = device_property_read_u32(dev, "clock-frequency", &dsmc->cfg.freq_hz); + if (ret) { + dev_err(dev, "Failed to read clock-frequency property!\n"); + return ret; + } + + dsmc->cfg.ctrl_freq_hz = dsmc->cfg.freq_hz * 2; + + if (dsmc_parse_dt(pdev, dsmc)) { + ret = -ENODEV; + dev_err(dev, "The dts parameters get fail! ret = %d\n", ret); + return ret; + } + + dsmc->areset = devm_reset_control_get(dev, "dsmc"); + if (IS_ERR(dsmc->areset)) { + ret = PTR_ERR(dsmc->areset); + dev_err(dev, "failed to get dsmc areset: %d\n", ret); + return ret; + } + dsmc->preset = devm_reset_control_get(dev, "apb"); + if (IS_ERR(dsmc->preset)) { + ret = PTR_ERR(dsmc->preset); + dev_err(dev, "failed to get dsmc preset: %d\n", ret); + return ret; + } + + dsmc->clk_sys = devm_clk_get(dev, "clk_sys"); + if (IS_ERR(dsmc->clk_sys)) { + dev_err(dev, "Can't get clk_sys clk\n"); + return PTR_ERR(dsmc->clk_sys); + } + + dsmc->aclk = devm_clk_get(dev, "aclk_dsmc"); + if (IS_ERR(dsmc->aclk)) { + dev_err(dev, "Can't get aclk_dsmc clk\n"); + return PTR_ERR(dsmc->aclk); + } + + dsmc->pclk = devm_clk_get(dev, "pclk"); + if (IS_ERR(dsmc->pclk)) { + dev_err(dev, "Can't get pclk clk\n"); + return PTR_ERR(dsmc->pclk); + } + dsmc->aclk_root = devm_clk_get(dev, "aclk_root"); + if (IS_ERR(dsmc->aclk_root)) { + dev_err(dev, "Can't get aclk_root clk\n"); + return PTR_ERR(dsmc->aclk_root); + } + + ret = clk_prepare_enable(dsmc->aclk_root); + if (ret) { + dev_err(dev, "Can't prepare enable dsmc aclk_root: %d\n", ret); + goto out; + } + ret = clk_prepare_enable(dsmc->aclk); + if (ret) { + dev_err(dev, "Can't prepare enable dsmc aclk: %d\n", ret); + goto err_dis_aclk_root; + } + ret = clk_prepare_enable(dsmc->pclk); + if (ret) { + dev_err(dev, "Can't prepare enable dsmc pclk: %d\n", ret); + goto err_dis_aclk; + } + ret = clk_prepare_enable(dsmc->clk_sys); + if (ret) { + dev_err(dev, "Can't prepare enable dsmc clk_sys: %d\n", ret); + goto err_dis_pclk; + } + + ret = clk_set_rate(dsmc->aclk_root, dsmc->cfg.freq_hz); + if (ret) { + dev_err(dev, "Failed to set dsmc aclk_root rate\n"); + goto err_dis_all_clk; + } + ret = clk_set_rate(dsmc->clk_sys, dsmc->cfg.ctrl_freq_hz); + if (ret) { + dev_err(dev, "Failed to set dsmc sys rate\n"); + goto err_dis_all_clk; + } + + ret = rockchip_dsmc_dma_request(dev, dsmc); + if (ret) { + dev_err(dev, "Failed to request dma channel\n"); + goto err_dis_all_clk; + } + + dsmc->dev = dev; + priv->ops = &rockchip_dsmc_ops; + + if (dsmc_init(dsmc)) { + ret = -ENODEV; + dev_err(dev, "DSMC init fail!\n"); + goto err_release_dma; + } + + if (dsmc_mem_remap(dev, dsmc)) { + ret = -ENODEV; + dev_err(dev, "DSMC memory remap fail!\n"); + goto err_release_dma; + } + + return 0; + +err_release_dma: + if (dsmc->dma_req[0]) + dma_release_channel(dsmc->dma_req[0]); + if (dsmc->dma_req[1]) + dma_release_channel(dsmc->dma_req[1]); +err_dis_all_clk: + clk_disable_unprepare(dsmc->clk_sys); +err_dis_pclk: + clk_disable_unprepare(dsmc->pclk); +err_dis_aclk: + clk_disable_unprepare(dsmc->aclk); +err_dis_aclk_root: + clk_disable_unprepare(dsmc->aclk_root); + +out: + return ret; +} + +static void release_dsmc_mem(struct device *dev, struct rockchip_dsmc *dsmc) +{ + int i; + uint32_t cs; + struct dsmc_map *region_map; + struct dsmc_ctrl_config *cfg = &dsmc->cfg; + + for (cs = 0; cs < DSMC_MAX_SLAVE_NUM; cs++) { + if (cfg->cs_cfg[cs].device_type == DSMC_UNKNOWN_DEVICE) + continue; + for (i = 0; i < DSMC_LB_MAX_RGN; i++) { + region_map = &dsmc->cs_map[cs].region_map[i]; + if (region_map->virt) { + rk_dsmc_unmap_kernel(region_map->virt); + region_map->virt = NULL; + } + } + if (dsmc->cfg.cs_cfg[cs].device_type == DSMC_LB_DEVICE) + rockchip_dsmc_unregister_lb_device(dev, cs); + } +} + +static int rk_dsmc_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rockchip_dsmc *dsmc; + struct rockchip_dsmc_device *priv; + + priv = platform_get_drvdata(pdev); + dsmc = &priv->dsmc; + + if (dsmc->aclk_root) { + clk_disable_unprepare(dsmc->aclk_root); + dsmc->aclk_root = NULL; + } + if (dsmc->aclk) { + clk_disable_unprepare(dsmc->aclk); + dsmc->aclk = NULL; + } + if (dsmc->pclk) { + clk_disable_unprepare(dsmc->pclk); + dsmc->pclk = NULL; + } + if (dsmc->clk_sys) { + clk_disable_unprepare(dsmc->clk_sys); + dsmc->clk_sys = NULL; + } + + release_dsmc_mem(dev, dsmc); + + if (dsmc->dma_req[0]) + dma_release_channel(dsmc->dma_req[0]); + if (dsmc->dma_req[1]) + dma_release_channel(dsmc->dma_req[1]); + + return 0; +} + +static struct platform_driver rk_dsmc_driver = { + .probe = rk_dsmc_probe, + .remove = rk_dsmc_remove, + .driver = { + .name = "dsmc", + .of_match_table = dsmc_of_match, + }, +}; + +static int __init rk_dsmc_init(void) +{ + int ret; + + ret = rockchip_dsmc_lb_class_create("dsmc"); + if (ret != 0) { + pr_err("Failed to create DSMC class\n"); + return ret; + } + + ret = platform_driver_register(&rk_dsmc_driver); + if (ret != 0) { + pr_err("Failed to register rockchip dsmc driver\n"); + rockchip_dsmc_lb_class_destroy(); + return ret; + } + + return 0; +} + +static void __exit rk_dsmc_exit(void) +{ + platform_driver_unregister(&rk_dsmc_driver); + rockchip_dsmc_lb_class_destroy(); +} + +module_init(rk_dsmc_init); +module_exit(rk_dsmc_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Zhihuan He "); +MODULE_DESCRIPTION("ROCKCHIP DSMC host driver"); diff --git a/drivers/memory/rockchip/dsmc-host.h b/drivers/memory/rockchip/dsmc-host.h new file mode 100644 index 000000000000..501399bdd328 --- /dev/null +++ b/drivers/memory/rockchip/dsmc-host.h @@ -0,0 +1,403 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2024 Rockchip Electronics Co., Ltd. + */ +#ifndef __ROCKCHIP_DSMC_HOST_H +#define __ROCKCHIP_DSMC_HOST_H + +#define DSMC_FPGA_WINBOND_X8 0 +#define DSMC_FPGA_WINBOND_X16 1 +#define DSMC_FPGA_APM_X8 0 +#define DSMC_FPGA_APM_X16 0 + +#define DSMC_FPGA_DMAC_TEST 1 + +#define DSMC_MAX_SLAVE_NUM 4 +#define DSMC_LB_MAX_RGN 4 + +#define DSMC_MEM_ATTRIBUTE_NO_CACHE 0 +#define DSMC_MEM_ATTRIBUTE_CACHE 1 +#define DSMC_MEM_ATTRIBUTE_WR_COM 2 + +#define DSMC_MAP_UNCACHE_SIZE (128 * 1024) +#define DSMC_MAP_BUFFERED_SIZE (128 * 1024) + +/* DSMC register */ +#define DSMC_VER 0x0000 +#define DSMC_CSR 0x0008 +#define DSMC_TAR 0x0010 +#define DSMC_AXICTL 0x0014 +#define DSMC_CLK_MD 0x0020 +#define DSMC_DLL_DBG_CTRL 0x0028 +#define DSMC_DEV_SIZE 0x0030 +#define DSMC_INT_EN 0x0040 +#define DSMC_INT_STATUS 0x0044 +#define DSMC_INT_MASK 0x0048 +#define DSMC_DMA_EN 0x0050 +#define DSMC_DMA_REQ_NUM(n) (0x0054 + (0x4 * (n))) +#define DSMC_DMA_MUX 0x005c +#define DSMC_VDMC(n) (0x1000 * ((n) + 1)) +#define DSMC_MCR(n) (0x1000 * ((n) + 1) + 0x10) +#define DSMC_MTR(n) (0x1000 * ((n) + 1) + 0x14) +#define DSMC_BDRTCR(n) (0x1000 * ((n) + 1) + 0x20) +#define DSMC_MRGTCR(n) (0x1000 * ((n) + 1) + 0x24) +#define DSMC_WRAP2INCR(n) (0x1000 * ((n) + 1) + 0x28) +#define DSMC_RDS_DLL0_CTL(n) (0x1000 * ((n) + 1) + 0x30) +#define DSMC_RDS_DLL1_CTL(n) (0x1000 * ((n) + 1) + 0x34) +#define DSMC_SLV_RGN_DIV(n) (0x1000 * ((n) + 1) + 0x40) +#define DSMC_RGN0_ATTR(n) (0x1000 * ((n) + 1) + 0x50) +#define DSMC_RGN1_ATTR(n) (0x1000 * ((n) + 1) + 0x54) +#define DSMC_RGN2_ATTR(n) (0x1000 * ((n) + 1) + 0x58) +#define DSMC_RGN3_ATTR(n) (0x1000 * ((n) + 1) + 0x5c) + +/* AXICTL */ +#define AXICTL_RD_NO_ERR_SHIFT 8 +#define AXICTL_RD_NO_ERR_MASK 0x1 + +/* INT_EN */ +#define INT_EN_SHIFT 0 +#define INT_EN_MASK 0xf +#define INT_EN(cs) (0x1 << (cs)) + +/* INT_STATUS */ +#define INT_STATUS_SHIFT 0 +#define INT_STATUS_MASK 0xf +#define INT_STATUS(cs) (0x1 << (cs)) + +/* INT_MASK */ +#define INT_MASK(cs) (0x1 << (cs)) +#define INT_UNMASK(cs) (0x0 << (cs)) + +/* DMA_EN */ +#define DMA_REQ_EN_SHIFT 0 +#define DMA_REQ_EN_MASK 0x1 +#define DMA_REQ_EN(cs) (0x1 << (cs)) +#define DMA_REQ_DIS(cs) (0x0 << (cs)) + +/* VDMC */ +#define VDMC_MID_SHIFT 0 +#define VDMC_MID_MASK 0xF +#define VDMC_PROTOCOL_SHIFT 4 +#define VDMC_PROTOCOL_MASK 0xF +#define VDMC_RESET_CMD_MODE_SHIFT 8 +#define VDMC_RESET_CMD_MODE_MASK 0x1 +#define VDMC_LATENCY_FIXED_SHIFT 9 +#define VDMC_LATENCY_FIXED_MASK 0x1 +#define VDMC_LATENCY_VARIABLE 0 +#define VDMC_LATENCY_FIXED 1 + +#define DSMC_UNKNOWN_DEVICE 0x0 +#define OPI_XCCELA_PSRAM 0x1 +#define HYPERBUS_PSRAM 0x2 +#define DSMC_LB_DEVICE 0x3 + +/* RDS_DLL0_CTL */ +#define RDS_DLL0_CTL_RDS_0_CLK_DELAY_NUM_SHIFT 0 +#define RDS_DLL0_CTL_RDS_0_CLK_SMP_SEL_SHIFT 31 +/* RDS_DLL1_CTL */ +#define RDS_DLL1_CTL_RDS_1_CLK_DELAY_NUM_SHIFT 0 +#define RDS_DLL1_CTL_RDS_1_CLK_SMP_SEL_SHIFT 31 + +/* MCR */ +#define MCR_WRAPSIZE_SHIFT 0 +#define MCR_WRAPSIZE_MASK 0x3 +#define MCR_WRAPSIZE_32_CLK 1 +#define MCR_WRAPSIZE_8_CLK 2 +#define MCR_WRAPSIZE_16_CLK 3 + +#define MCR_EXCLUSIVE_DQS_SHIFT 2 +#define MCR_EXCLUSIVE_DQS_MASK 0x1 +#define MCR_IOWIDTH_SHIFT 3 +#define MCR_IOWIDTH_MASK 0x1 +#define MCR_DEVTYPE_SHIFT 4 +#define MCR_DEVTYPE_MASK 0x1 +#define MCR_CRT_SHIFT 5 +#define MCR_CRT_MASK 0x1 +#define MCR_ACS_SHIFT 16 +#define MCR_ACS_MASK 0x1 +#define MCR_TCMO_SHIFT 17 +#define MCR_TCMO_MASK 0x1 +#define MCR_MAXLEN_SHIFT 18 +#define MCR_MAXLEN_MASK 0x1FF +#define MCR_MAXEN_SHIFT 31 +#define MCR_MAXEN_MASK 0x1 + +#define MCR_CRT_CR_SPACE 0x1 +#define MCR_CRT_MEM_SPACE 0x0 +#define MCR_IOWIDTH_X16 0x1 +#define MCR_IOWIDTH_X8 0x0 +#define MCR_DEVTYPE_HYPERRAM 0x1 +#define MCR_MAX_LENGTH_EN 0x1 +#define MCR_MAX_LENGTH 0x1ff + +/* BDRTCR */ +#define BDRTCR_COL_BIT_NUM_SHIFT 0 +#define BDRTCR_COL_BIT_NUM_MASK 0x7 +#define BDRTCR_WR_BDR_XFER_EN_SHIFT 4 +#define BDRTCR_WR_BDR_XFER_EN_MASK 0x1 +#define BDRTCR_WR_BDR_XFER_EN 1 +#define BDRTCR_RD_BDR_XFER_EN_SHIFT 5 +#define BDRTCR_RD_BDR_XFER_EN_MASK 0x1 +#define BDRTCR_RD_BDR_XFER_EN 1 + +/* MRGTCR */ +#define MRGTCR_READ_WRITE_MERGE_EN 0x3 + +/* MTR */ +#define MTR_WLTCY_SHIFT 0 +#define MTR_WLTCY_MASK 0xf +#define MTR_RLTCY_SHIFT 4 +#define MTR_RLTCY_MASK 0xf +#define MTR_WCSH_SHIFT 8 +#define MTR_RCSH_SHIFT 12 +#define MTR_WCSS_SHIFT 16 +#define MTR_RCSS_SHIFT 20 +#define MTR_WCSHI_SHIFT 24 +#define MTR_RCSHI_SHIFT 28 + +/* RGNX_ATTR */ +#define RGNX_ATTR_SHIFT 0 +#define RGNX_ATTR_MASK 0x3 +#define RGNX_ATTR_REG 0x0 +#define RGNX_ATTR_DPRA 0x1 +#define RGNX_ATTR_NO_MERGE_FIFO 0x2 +#define RGNX_ATTR_MERGE_FIFO 0x3 +#define RGNX_ATTR_CTRL_SHIFT 4 +#define RGNX_ATTR_BE_CTRLED_SHIFT 5 +#define RGNX_ATTR_DUM_CLK_EN_SHIFT 6 +#define RGNX_ATTR_DUM_CLK_NUM_SHIFT 7 +#define RGNX_ATTR_32BIT_ADDR_WIDTH 0 +#define RGNX_ATTR_16BIT_ADDR_WIDTH 1 +#define RGNX_ATTR_ADDR_WIDTH_SHIFT 8 + +#define RGNX_STATUS_ENABLED (1) +#define RGNX_STATUS_DISABLED (0) + +#define MTR_CFG(RCSHI, WCSHI, RCSS, WCSS, RCSH, WCSH, RLTCY, WLTCY) \ + (((RCSHI) << MTR_RCSHI_SHIFT) | \ + ((WCSHI) << MTR_WCSHI_SHIFT) | \ + ((RCSS) << MTR_RCSS_SHIFT) | \ + ((WCSS) << MTR_WCSS_SHIFT) | \ + ((RCSH) << MTR_RCSH_SHIFT) | \ + ((WCSH) << MTR_WCSH_SHIFT) | \ + ((RLTCY) << MTR_RLTCY_SHIFT) | \ + ((WLTCY) << MTR_WLTCY_SHIFT)) + +#define APM_PSRAM_LATENCY_FIXED 0x1 +#define APM_PSRAM_LATENCY_VARIABLE 0x0 + +#define DSMC_BURST_WRAPSIZE_32CLK 0x1 +#define DSMC_BURST_WRAPSIZE_8CLK 0x2 +#define DSMC_BURST_WRAPSIZE_16CLK 0x3 + +#define DSMC_DLL_EN 0x1 + +#define HYPER_PSRAM_IR0 (0x00) +#define HYPER_PSRAM_IR1 (0x02) +#define HYPER_PSRAM_CR0 (0x1000) +#define HYPER_PSRAM_CR1 (0x1002) +#define XCCELA_PSRAM_MR(n) (2 * (n)) +#define XCCELA_PSRAM_MR_GET(n) (((n) >> 8) & 0xff) +#define XCCELA_PSRAM_MR_SET(n) (((n) & 0xff) << 8) +/* device id bit mask */ +#define HYPERBUS_DEV_ID_MASK (0xf) +#define IR0_ROW_COUNT_SHIFT (0x8) +#define IR0_ROW_COUNT_MASK (0x1f) +#define IR0_COL_COUNT_SHIFT (0x4) +#define IR0_COL_COUNT_MASK (0xf) +#define IR1_DEV_IO_WIDTH_SHIFT (0) +#define IR1_DEV_IO_WIDTH_MASK (0xf) +#define IR1_DEV_IO_WIDTH_X16 (0x9) + +#define CR0_INITIAL_LATENCY_SHIFT 4 +#define CR0_INITIAL_LATENCY_MASK 0xf +#define CR0_FIXED_LATENCY_ENABLE_SHIFT 3 +#define CR0_FIXED_LATENCY_ENABLE_MASK 0x1 +#define CR0_FIXED_LATENCY_ENABLE_VARIABLE_LATENCY 0x0 +#define CR0_FIXED_LATENCY_ENABLE_FIXED_LATENCY 0x1 + +#define CR0_BURST_LENGTH_SHIFT 0 +#define CR0_BURST_LENGTH_MASK 0x3 +#define CR0_BURST_LENGTH_64_CLK 0x0 +#define CR0_BURST_LENGTH_32_CLK 0x1 +#define CR0_BURST_LENGTH_8_CLK 0x2 +#define CR0_BURST_LENGTH_16_CLK 0x3 + +#define CR1_CLOCK_TYPE_SHIFT 6 +#define CR1_CLOCK_TYPE_MASK 0x1 +#define CR1_CLOCK_TYPE_SINGLE_CLK 0x1 +#define CR1_CLOCK_TYPE_DIFF_CLK 0x0 + +#define XCCELA_DEV_ID_MASK (0x1f) + +#define XCCELA_MR0_RL_SHIFT (2) +#define XCCELA_MR0_RL_MASK (0x7) +#define XCCELA_MR0_RL_TYPE_SHIFT (5) +#define XCCELA_MR0_RL_TYPE_MASK (0x1) +#define XCCELA_MR0_RL_TYPE_FIXED (0x1) +#define XCCELA_MR0_RL_TYPE_VARIABLE (0x0) + +#define XCCELA_MR2_DEV_DENSITY_MASK (0x7) + +#define XCCELA_MR4_WL_SHIFT (5) +#define XCCELA_MR4_WL_MASK (0x7) + +#define XCCELA_MR8_IO_TYPE_SHIFT (6) +#define XCCELA_MR8_IO_TYPE_MASK (0x1) +#define XCCELA_MR8_IO_TYPE_X16 (0x1) +#define XCCELA_MR8_IO_TYPE_X8 (0x0) +#define XCCELA_MR8_BL_SHIFT (0) +#define XCCELA_MR8_BL_MASK (0x7) +#define XCCELA_MR8_BL_32_CLK (0x2) +#define XCCELA_MR8_BL_16_CLK (0x1) +#define XCCELA_MR8_BL_8_CLK (0x0) + +#define PSRAM_SIZE_32MBYTE (0x02000000) +#define PSRAM_SIZE_16MBYTE (0x01000000) +#define PSRAM_SIZE_8MBYTE (0x00800000) +#define PSRAM_SIZE_4MBYTE (0x00400000) + +/* TCSM/TCEM */ +#define DSMC_DEV_TCSM_4U (4000) +#define DSMC_DEV_TCSM_1U (1000) +#define DSMC_DEC_TCEM_2_5U (2500) +#define DSMC_DEC_TCEM_3U (3000) +#define DSMC_DEC_TCEM_0_5U (500) + +#define RK3506_GRF_SOC_CON(n) (0x4 * (n)) +#define GRF_DSMC_REQ0_SEL(n) ((0x1 << (15 + 16)) | ((n) << 15)) +#define GRF_DSMC_REQ1_SEL(n) ((0x1 << (14 + 16)) | ((n) << 14)) +#define GRF_DMAC0_CH10_SEL(n) ((0x1 << (7 + 16)) | ((n) << 7)) +#define GRF_DMAC0_CH8_SEL(n) ((0x1 << (6 + 16)) | ((n) << 6)) +#define GRF_DMAC0_CH3_SEL(n) ((0x1 << (3 + 16)) | ((n) << 3)) +#define GRF_DMAC0_CH2_SEL(n) ((0x1 << (2 + 16)) | ((n) << 2)) + +#define RK3576_TPO_IOC_OFFSET (0x4000) +#define RK3576_GPIO3A_IOMUX_SEL_H (RK3576_TPO_IOC_OFFSET + 0x64) +#define RK3576_GPIO3B_IOMUX_SEL_L (RK3576_TPO_IOC_OFFSET + 0x68) +#define RK3576_GPIO3B_IOMUX_SEL_H (RK3576_TPO_IOC_OFFSET + 0x6c) +#define RK3576_GPIO3C_IOMUX_SEL_L (RK3576_TPO_IOC_OFFSET + 0x70) +#define RK3576_GPIO3C_IOMUX_SEL_H (RK3576_TPO_IOC_OFFSET + 0x74) +#define RK3576_GPIO3D_IOMUX_SEL_L (RK3576_TPO_IOC_OFFSET + 0x78) +#define RK3576_GPIO3D_IOMUX_SEL_H (RK3576_TPO_IOC_OFFSET + 0x7c) +#define RK3576_GPIO4A_IOMUX_SEL_L (RK3576_TPO_IOC_OFFSET + 0x80) + +#define RK3576_IOMUX_SEL(v, s) (((v) << (s)) | (0xf << ((s) + 16))) + +struct regions_config { + uint32_t attribute; + uint32_t ca_addr_width; + uint32_t dummy_clk_num; + uint32_t cs0_be_ctrled; + uint32_t cs0_ctrl; + uint32_t offset_range[2]; + uint32_t status; +}; + +struct dsmc_config_cs { + uint16_t mid; + uint16_t protcl; + uint32_t device_type; + uint32_t mtr_timing; + uint32_t acs; + uint32_t exclusive_dqs; + uint32_t io_width; + uint32_t wrap_size; + uint32_t rd_latency; + uint32_t wr_latency; + uint32_t col; + uint32_t wrap2incr_en; + uint32_t max_length_en; + uint32_t max_length; + uint32_t rgn_num; + uint32_t dll_num[2]; + struct regions_config slv_rgn[DSMC_LB_MAX_RGN]; +}; + +struct dsmc_ctrl_config { + uint32_t clk_mode; + uint32_t freq_hz; + uint32_t ctrl_freq_hz; + uint32_t cap; + struct dsmc_config_cs cs_cfg[DSMC_MAX_SLAVE_NUM]; +}; + +struct dsmc_map { + void *virt; + phys_addr_t phys; + size_t size; +}; + +struct dsmc_cs_map { + struct dsmc_map region_map[DSMC_LB_MAX_RGN]; +}; + +struct dsmc_transfer { + uint32_t ops_cs; + struct dma_chan *dma_chan; + dma_addr_t src_addr; + dma_addr_t dst_addr; + size_t transfer_size; + u8 brst_size; + u8 brst_len; + atomic_t state; +}; + +struct rockchip_dsmc { + /* Hardware resources */ + void __iomem *regs; + struct regmap *grf; + struct clk *aclk_root; + struct clk *aclk; + struct clk *pclk; + struct clk *clk_sys; + struct device *dev; + + struct dma_chan *dma_req[DSMC_MAX_SLAVE_NUM]; + + struct dsmc_transfer xfer; + + struct reset_control *areset; + struct reset_control *preset; + + struct dsmc_cs_map cs_map[DSMC_MAX_SLAVE_NUM]; + struct dsmc_ctrl_config cfg; +}; + +struct rockchip_dsmc_device { + struct dsmc_ops *ops; + struct rockchip_dsmc dsmc; +}; + +struct dsmc_ops { + int (*read)(struct rockchip_dsmc_device *dsmc_dev, + uint32_t cs, uint32_t region, + unsigned long addr, uint32_t *data); + int (*write)(struct rockchip_dsmc_device *dsmc_dev, + uint32_t cs, uint32_t region, + unsigned long addr, uint32_t val); + + int (*copy_from)(struct rockchip_dsmc_device *dsmc_dev, + uint32_t cs, uint32_t region, uint32_t from, + dma_addr_t dst_phys, size_t size); + int (*copy_to)(struct rockchip_dsmc_device *dsmc_dev, + uint32_t cs, uint32_t region, dma_addr_t src_phys, + uint32_t to, size_t size); + int (*copy_from_state)(struct rockchip_dsmc_device *dsmc_dev); + int (*copy_to_state)(struct rockchip_dsmc_device *dsmc_dev); +}; + +int rockchip_dsmc_ctrller_init(struct rockchip_dsmc *dsmc, uint32_t cs); +int rockchip_dsmc_device_dectect(struct rockchip_dsmc *dsmc, uint32_t cs); +struct rockchip_dsmc_device *rockchip_dsmc_find_device_by_compat(const char *compat); +const char *rockchip_dsmc_get_compat(int index); +int rockchip_dsmc_lb_class_create(const char *name); +int rockchip_dsmc_lb_class_destroy(void); +void rockchip_dsmc_lb_dma_hw_mode_dis(struct rockchip_dsmc *dsmc); +int rockchip_dsmc_lb_dma_trigger_by_host(struct rockchip_dsmc *dsmc, uint32_t cs); +int rockchip_dsmc_lb_init(struct rockchip_dsmc *dsmc, uint32_t cs); +int rockchip_dsmc_psram_reinit(struct rockchip_dsmc *dsmc, uint32_t cs); +int rockchip_dsmc_register_lb_device(struct device *dev, uint32_t cs); +int rockchip_dsmc_unregister_lb_device(struct device *dev, uint32_t cs); + +#endif /* __BUS_ROCKCHIP_ROCKCHIP_DSMC_HOST_H */ diff --git a/drivers/memory/rockchip/dsmc-lb-device.c b/drivers/memory/rockchip/dsmc-lb-device.c new file mode 100644 index 000000000000..8815e92c80c3 --- /dev/null +++ b/drivers/memory/rockchip/dsmc-lb-device.c @@ -0,0 +1,192 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2024 Rockchip Electronics Co., Ltd. + */ +#include +#include +#include +#include + +#include "dsmc-host.h" + +struct dsmc_cs { + struct cdev cdev[DSMC_LB_MAX_RGN]; +}; + +static struct dsmc_cs cs_info[DSMC_MAX_SLAVE_NUM]; +static struct class *dsmc_class; +static dev_t dsmc_devt; + +static inline int get_cs_index(struct inode *inode) +{ + return iminor(inode) / DSMC_LB_MAX_RGN; +} + +static inline int get_mem_region_index(struct inode *inode) +{ + return iminor(inode) % DSMC_LB_MAX_RGN; +} + +static int dsmc_open(struct inode *inode, struct file *pfile) +{ + struct rockchip_dsmc_device *dsmc_dev = NULL; + struct rockchip_dsmc *dsmc = NULL; + struct dsmc_config_cs *cfg; + struct dsmc_cs_map *map; + int cs_index, mem_region_index; + + cs_index = get_cs_index(inode); + mem_region_index = get_mem_region_index(inode); + + dsmc_dev = rockchip_dsmc_find_device_by_compat(rockchip_dsmc_get_compat(0)); + if (dsmc_dev == NULL) + return -EINVAL; + + dsmc = &dsmc_dev->dsmc; + + if (cs_index < DSMC_MAX_SLAVE_NUM) + cfg = &dsmc->cfg.cs_cfg[cs_index]; + else + return -EINVAL; + if ((cfg->device_type == DSMC_UNKNOWN_DEVICE) || + (!cfg->slv_rgn[mem_region_index].status)) + return -EINVAL; + + map = &dsmc->cs_map[cs_index]; + + pfile->private_data = (void *)&map->region_map[mem_region_index]; + + return 0; +} + +static int dsmc_release(struct inode *inode, struct file *pfile) +{ + return 0; +} + +static int dsmc_mmap(struct file *pfile, struct vm_area_struct *vma) +{ + struct dsmc_map *region = (struct dsmc_map *)pfile->private_data; + unsigned long pfn; + unsigned long vm_size = 0; + + if (!region) + return -EINVAL; + + vm_flags_set(vma, VM_PFNMAP | VM_DONTDUMP); + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + + vm_size = vma->vm_end - vma->vm_start; + + pfn = __phys_to_pfn(region->phys); + + if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size, vma->vm_page_prot)) + return -EAGAIN; + + return 0; +} + +static const struct file_operations dsmc_fops = { + .owner = THIS_MODULE, + .open = dsmc_open, + .release = dsmc_release, + .mmap = dsmc_mmap, +}; + +int rockchip_dsmc_lb_class_create(const char *name) +{ + int ret; + + dsmc_class = class_create(THIS_MODULE, name); + if (IS_ERR(dsmc_class)) { + ret = PTR_ERR(dsmc_class); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(rockchip_dsmc_lb_class_create); + +int rockchip_dsmc_lb_class_destroy(void) +{ + if (!dsmc_class) + return 0; + + class_destroy(dsmc_class); + dsmc_class = NULL; + + return 0; +} +EXPORT_SYMBOL(rockchip_dsmc_lb_class_destroy); + +int rockchip_dsmc_register_lb_device(struct device *dev, uint32_t cs) +{ + int ret, j; + struct device *device_ret; + + if (!dev || (cs >= DSMC_MAX_SLAVE_NUM) || (!dsmc_class)) + return -EINVAL; + + ret = alloc_chrdev_region(&dsmc_devt, 0, + DSMC_LB_MAX_RGN, "dsmc"); + if (ret < 0) { + dev_err(dev, "Failed to alloc dsmc device region\n"); + return -ENODEV; + } + + for (j = 0; j < DSMC_LB_MAX_RGN; j++) { + device_ret = device_create(dsmc_class, NULL, + MKDEV(MAJOR(dsmc_devt), cs * DSMC_LB_MAX_RGN + j), + NULL, "dsmc/cs%d/region%d", cs, j); + if (IS_ERR(device_ret)) { + dev_err(dev, "Failed to create device for cs%d region%d\n", cs, j); + ret = PTR_ERR(device_ret); + goto err_device_create; + } + cdev_init(&cs_info[cs].cdev[j], &dsmc_fops); + ret = cdev_add(&cs_info[cs].cdev[j], + MKDEV(MAJOR(dsmc_devt), cs * DSMC_LB_MAX_RGN + j), 1); + if (ret) { + dev_err(dev, "Failed to add cdev for cs%d region%d\n", cs, j); + goto err_cdev_add; + } + } + + return 0; + +err_cdev_add: + device_destroy(dsmc_class, MKDEV(MAJOR(dsmc_devt), cs * DSMC_LB_MAX_RGN + j)); + +err_device_create: + while (j-- > 0) { + device_destroy(dsmc_class, MKDEV(MAJOR(dsmc_devt), cs * DSMC_LB_MAX_RGN + j)); + cdev_del(&cs_info[cs].cdev[j]); + } + unregister_chrdev_region(dsmc_devt, DSMC_LB_MAX_RGN); + + return ret; +} +EXPORT_SYMBOL(rockchip_dsmc_register_lb_device); + +int rockchip_dsmc_unregister_lb_device(struct device *dev, uint32_t cs) +{ + int j; + + if (!dev || (cs >= DSMC_MAX_SLAVE_NUM)) + return -EINVAL; + + for (j = 0; j < DSMC_LB_MAX_RGN; j++) { + device_destroy(dsmc_class, + MKDEV(MAJOR(dsmc_devt), + cs * DSMC_LB_MAX_RGN + j)); + cdev_del(&cs_info->cdev[j]); + } + unregister_chrdev_region(dsmc_devt, DSMC_LB_MAX_RGN); + + return 0; +} +EXPORT_SYMBOL(rockchip_dsmc_unregister_lb_device); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Zhihuan He "); +MODULE_DESCRIPTION("ROCKCHIP DSMC local bus device"); diff --git a/drivers/memory/rockchip/dsmc-lb-slave.h b/drivers/memory/rockchip/dsmc-lb-slave.h new file mode 100644 index 000000000000..06398577486d --- /dev/null +++ b/drivers/memory/rockchip/dsmc-lb-slave.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2024 Rockchip Electronics Co., Ltd. + */ + +#ifndef __ROCKCHIP_DSMC_LB_SLAVE_H +#define __ROCKCHIP_DSMC_LB_SLAVE_H + +#define S2H_INT_FOR_DMA_NUM (15) + +/* LBC_SLAVE_CMN register */ +#define CMN_CON(n) (0x4 * (n)) +#define CMN_STATUS (0x80) +#define RGN_CMN_CON(rgn, com) (0x100 + 0x100 * (rgn) + 0x4 * (com)) +#define DBG_STATUS(n) (0x900 + 0x4 * (n)) + +/* LBC_SLAVE_CSR register */ +#define APP_CON(n) (0x4 * (n)) +#define APP_H2S_INT_STA (0x80) +#define APP_H2S_INT_STA_EN (0x84) +#define APP_H2S_INT_STA_SIG_EN (0x88) +#define LBC_CON(n) (0x100 + 0x4 * (n)) +#define LBC_S2H_INT_STA (0x180) +#define LBC_S2H_INT_STA_EN (0x184) +#define LBC_S2H_INT_STA_SIG_EN (0x188) +#define AXI_WR_ADDR_BASE (0x800) +#define AXI_RD_ADDR_BASE (0x804) +#define DBG_STA(n) (0x900 + 0x4 * (n)) + +/* LBC_SLAVE_CMN_CMN_CON0 */ +#define CA_CYC_16BIT (0) +#define CA_CYC_32BIT (1) +#define CA_CYC_SHIFT (0) +#define CA_CYC_MASK (0x1) +#define WR_LATENCY_CYC_SHIFT (4) +#define WR_LATENCY_CYC_MASK (0x7) +#define RD_LATENCY_CYC_SHIFT (8) +#define RD_LATENCY_CYC_MASK (0x7) +#define WR_DATA_CYC_EXTENDED_SHIFT (11) +#define WR_DATA_CYC_EXTENDED_MASK (0x1) + +/* LBC_SLAVE_CMN_CMN_CON3 */ +#define DATA_WIDTH_SHIFT (0) +#define DATA_WIDTH_MASK (0x1) +#define RDYN_GEN_CTRL_SHIFT (4) +#define RDYN_GEN_CTRL_MASK (0x1) + +/* APP_H2S_INT_STA */ +#define APP_H2S_INT_STA_SHIFT (0) +#define APP_H2S_INT_STA_MASK (0xFFFF) + +/* APP_H2S_INT_STA_EN */ +#define APP_H2S_INT_STA_EN_SHIFT (0) +#define APP_H2S_INT_STA_EN_MASK (0xFFFF) + +/* APP_H2S_INT_STA_SIG_EN */ +#define APP_H2S_INT_STA_SIG_EN_SHIFT (0) +#define APP_H2S_INT_STA_SIG_EN_MASK (0xFFFF) + +/* LBC_S2H_INT_STA */ +#define LBC_S2H_INT_STA_SHIFT (0) +#define LBC_S2H_INT_STA_MASK (0xFFFF) +/* LBC_S2H_INT_STA_EN */ +#define LBC_S2H_INT_STA_EN_SHIFT (0) +#define LBC_S2H_INT_STA_EN_MASK (0xFFFF) +/* LBC_S2H_INT_STA_SIG_EN */ +#define LBC_S2H_INT_STA_SIG_EN_SHIFT (0) +#define LBC_S2H_INT_STA_SIG_EN_MASK (0xFFFF) + +#endif /* __BUS_ROCKCHIP_ROCKCHIP_DSMC_SLAVE_H */