diff --git a/bsp/hpmicro/libraries/drivers/drv_sdio.c b/bsp/hpmicro/libraries/drivers/drv_sdio.c index 58088c14f8c..f820c7e0d3e 100644 --- a/bsp/hpmicro/libraries/drivers/drv_sdio.c +++ b/bsp/hpmicro/libraries/drivers/drv_sdio.c @@ -97,7 +97,7 @@ static hpm_stat_t hpm_sdmmc_transfer_polling(struct hpm_mmcsd *mmcsd, sdxc_adma_ static hpm_stat_t hpm_sdmmc_transfer_interrupt_driven(struct hpm_mmcsd *mmcsd, sdxc_adma_config_t *dma_config, sdxc_xfer_t *xfer); static hpm_stat_t hpm_sdmmc_transfer(struct hpm_mmcsd *mmcsd, sdxc_adma_config_t *dma_config, sdxc_xfer_t *xfer); static rt_int32_t hpm_sdmmc_execute_tuning(struct rt_mmcsd_host *host, rt_int32_t opcode); -static rt_int32_t hpm_sdmmc_switch_uhs_voltage(struct rt_mmcsd_host *host); +static rt_err_t hpm_sdmmc_signal_voltage_switch(struct rt_mmcsd_host *host, struct rt_mmcsd_io_cfg *io_cfg); static void hpm_sdmmc_power_on_via_pin(struct hpm_mmcsd *mmcsd); static void hpm_sdmmc_power_off_via_pin(struct hpm_mmcsd *mmcsd); @@ -152,7 +152,7 @@ static void hpm_sdmmc_switch_to_1v8_via_pin(struct hpm_mmcsd *mmcsd) } -static rt_int32_t hpm_sdmmc_switch_uhs_voltage(struct rt_mmcsd_host *host) +static rt_err_t hpm_sdmmc_signal_voltage_switch(struct rt_mmcsd_host *host, struct rt_mmcsd_io_cfg *io_cfg) { struct hpm_mmcsd *mmcsd = (struct hpm_mmcsd *) host->private_data; SDXC_Type *base = mmcsd->sdxc_base; @@ -174,9 +174,17 @@ static rt_int32_t hpm_sdmmc_switch_uhs_voltage(struct rt_mmcsd_host *host) return -RT_ETIMEOUT; } - /* 3. Switch to 1.8V */ - hpm_sdmmc_switch_to_1v8_via_pin(mmcsd); - sdxc_select_voltage(mmcsd->sdxc_base, sdxc_bus_voltage_sd_1v8); + /* 3. Switch to 1.8V/3.3V */ + if (ios->signal_voltage == MMCSD_SIGNAL_VOLTAGE_330) + { + hpm_sdmmc_switch_to_3v3_via_pin(mmcsd); + sdxc_select_voltage(mmcsd->sdxc_base, sdxc_bus_voltage_sd_3v3); + } + else + { + hpm_sdmmc_switch_to_1v8_via_pin(mmcsd); + sdxc_select_voltage(mmcsd->sdxc_base, sdxc_bus_voltage_sd_1v8); + } /* 4. spec:host delay 5ms, host: give more delay time here */ rt_thread_mdelay(10); @@ -213,7 +221,7 @@ static const struct rt_mmcsd_host_ops hpm_mmcsd_host_ops = .get_card_status = NULL, .enable_sdio_irq = hpm_sdmmc_enable_sdio_irq, .execute_tuning = hpm_sdmmc_execute_tuning, - .switch_uhs_voltage = hpm_sdmmc_switch_uhs_voltage, + .signal_voltage_switch = hpm_sdmmc_signal_voltage_switch, }; void hpm_sdmmc_isr(struct hpm_mmcsd *mmcsd) diff --git a/components/drivers/include/drivers/dev_mmcsd_core.h b/components/drivers/include/drivers/dev_mmcsd_core.h index c86e9dde481..a541bc2e2d1 100644 --- a/components/drivers/include/drivers/dev_mmcsd_core.h +++ b/components/drivers/include/drivers/dev_mmcsd_core.h @@ -242,6 +242,12 @@ void mmcsd_set_bus_width(struct rt_mmcsd_host *host, rt_uint32_t width); void mmcsd_set_timing(struct rt_mmcsd_host *host, rt_uint32_t timing); void mmcsd_set_data_timeout(struct rt_mmcsd_data *data, const struct rt_mmcsd_card *card); rt_uint32_t mmcsd_select_voltage(struct rt_mmcsd_host *host, rt_uint32_t ocr); +rt_err_t mmcsd_set_signal_voltage(struct rt_mmcsd_host *host, unsigned char signal_voltage); +void mmcsd_set_initial_signal_voltage(struct rt_mmcsd_host *host); +rt_err_t mmcsd_host_set_uhs_voltage(struct rt_mmcsd_host *host); +rt_err_t mmcsd_set_uhs_voltage(struct rt_mmcsd_host *host, rt_uint32_t ocr); +rt_err_t mmcsd_send_tuning(struct rt_mmcsd_host *host, rt_uint32_t opcode, rt_err_t *cmd_error); +rt_err_t mmcsd_send_abort_tuning(struct rt_mmcsd_host *host, rt_uint32_t opcode); void mmcsd_change(struct rt_mmcsd_host *host); void mmcsd_detect(void *param); void mmcsd_host_init(struct rt_mmcsd_host *host); diff --git a/components/drivers/include/drivers/dev_sdhci.h b/components/drivers/include/drivers/dev_sdhci.h new file mode 100755 index 00000000000..be6e4279076 --- /dev/null +++ b/components/drivers/include/drivers/dev_sdhci.h @@ -0,0 +1,668 @@ +/* + * Copyright (c) 2006-2024 RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2024-08-16 zhujiale first version + */ + +#ifndef __DEV_SDHCI_H__ +#define __DEV_SDHCI_H__ + +#include +#include +#include +#include +#include + +#define RT_SDHCI_MAX_TUNING_LOOP 40 +/* + * Controller registers + */ +#define RT_SDHCI_DMA_ADDRESS 0x00 +#define RT_SDHCI_ARGUMENT2 RT_SDHCI_DMA_ADDRESS +#define RT_SDHCI_32BIT_BLK_CNT RT_SDHCI_DMA_ADDRESS + +#define RT_SDHCI_BLOCK_SIZE 0x04 +#define RT_SDHCI_MAKE_BLKSZ(dma, blksz) (((dma & 0x7) << 12) | (blksz & 0xFFF)) + +#define RT_SDHCI_BLOCK_COUNT 0x06 + +#define RT_SDHCI_ARGUMENT 0x08 + +#define RT_SDHCI_TRANSFER_MODE 0x0c +#define RT_SDHCI_TRNS_DMA 0x01 +#define RT_SDHCI_TRNS_BLK_CNT_EN 0x02 +#define RT_SDHCI_TRNS_AUTO_CMD12 0x04 +#define RT_SDHCI_TRNS_AUTO_CMD23 0x08 +#define RT_SDHCI_TRNS_AUTO_SEL 0x0c +#define RT_SDHCI_TRNS_READ 0x10 +#define RT_SDHCI_TRNS_MULTI 0x20 + +#define RT_SDHCI_COMMAND 0x0e +#define RT_SDHCI_CMD_RESP_MASK 0x03 +#define RT_SDHCI_CMD_CRC 0x08 +#define RT_SDHCI_CMD_INDEX 0x10 +#define RT_SDHCI_CMD_DATA 0x20 +#define RT_SDHCI_CMD_ABORTCMD 0xc0 + +#define RT_SDHCI_CMD_RESP_NONE 0x00 +#define RT_SDHCI_CMD_RESP_LONG 0x01 +#define RT_SDHCI_CMD_RESP_SHORT 0x02 +#define RT_SDHCI_CMD_RESP_SHORT_BUSY 0x03 + +#define RT_SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff)) +#define RT_SDHCI_GET_CMD(c) ((c >> 8) & 0x3f) + +#define RT_SDHCI_RESPONSE 0x10 + +#define RT_SDHCI_BUFFER 0x20 + +#define RT_SDHCI_PRESENT_STATE 0x24 +#define RT_SDHCI_CMD_INHIBIT 0x00000001 +#define RT_SDHCI_DATA_INHIBIT 0x00000002 +#define RT_SDHCI_DOING_WRITE 0x00000100 +#define RT_SDHCI_DOING_READ 0x00000200 +#define RT_SDHCI_SPACE_AVAILABLE 0x00000400 +#define RT_SDHCI_DATA_AVAILABLE 0x00000800 +#define RT_SDHCI_CARD_PRESENT 0x00010000 +#define RT_SDHCI_CARD_PRES_SHIFT 16 +#define RT_SDHCI_CD_STABLE 0x00020000 +#define RT_SDHCI_CD_LVL 0x00040000 +#define RT_SDHCI_CD_LVL_SHIFT 18 +#define RT_SDHCI_WRITE_PROTECT 0x00080000 +#define RT_SDHCI_DATA_LVL_MASK 0x00f00000 +#define RT_SDHCI_DATA_LVL_SHIFT 20 +#define RT_SDHCI_DATA_0_LVL_MASK 0x00100000 +#define RT_SDHCI_CMD_LVL 0x01000000 + +#define RT_SDHCI_HOST_CONTROL 0x28 +#define RT_SDHCI_CTRL_LED 0x01 +#define RT_SDHCI_CTRL_4BITBUS 0x02 +#define RT_SDHCI_CTRL_HISPD 0x04 +#define RT_SDHCI_CTRL_DMA_MASK 0x18 +#define RT_SDHCI_CTRL_SDMA 0x00 +#define RT_SDHCI_CTRL_ADMA1 0x08 +#define RT_SDHCI_CTRL_ADMA32 0x10 +#define RT_SDHCI_CTRL_ADMA64 0x18 +#define RT_SDHCI_CTRL_ADMA3 0x18 +#define RT_SDHCI_CTRL_8BITBUS 0x20 +#define RT_SDHCI_CTRL_CDTEST_INS 0x40 +#define RT_SDHCI_CTRL_CDTEST_EN 0x80 + +#define RT_SDHCI_POWER_CONTROL 0x29 +#define RT_SDHCI_POWER_ON 0x01 +#define RT_SDHCI_POWER_180 0x0a +#define RT_SDHCI_POWER_300 0x0c +#define RT_SDHCI_POWER_330 0x0e +/* + * VDD2 - UHS2 or PCIe/NVMe + * VDD2 power on/off and voltage select + */ +#define RT_SDHCI_VDD2_POWER_ON 0x10 +#define RT_SDHCI_VDD2_POWER_120 0x80 +#define RT_SDHCI_VDD2_POWER_180 0xa0 + +#define RT_SDHCI_BLOCK_GAP_CONTROL 0x2a + +#define RT_SDHCI_WAKE_UP_CONTROL 0x2b +#define RT_SDHCI_WAKE_ON_INT 0x01 +#define RT_SDHCI_WAKE_ON_INSERT 0x02 +#define RT_SDHCI_WAKE_ON_REMOVE 0x04 + +#define RT_SDHCI_CLOCK_CONTROL 0x2c +#define RT_SDHCI_DIVIDER_SHIFT 8 +#define RT_SDHCI_DIVIDER_HI_SHIFT 6 +#define RT_SDHCI_DIV_MASK 0xff +#define RT_SDHCI_DIV_MASK_LEN 8 +#define RT_SDHCI_DIV_HI_MASK 0x300 +#define RT_SDHCI_PROG_CLOCK_MODE 0x0020 +#define RT_SDHCI_CLOCK_CARD_EN 0x0004 +#define RT_SDHCI_CLOCK_PLL_EN 0x0008 +#define RT_SDHCI_CLOCK_INT_STABLE 0x0002 +#define RT_SDHCI_CLOCK_INT_EN 0x0001 + +#define RT_SDHCI_TIMEOUT_CONTROL 0x2e + +#define RT_SDHCI_SOFTWARE_RESET 0x2f +#define RT_SDHCI_RESET_ALL 0x01 +#define RT_SDHCI_RESET_CMD 0x02 +#define RT_SDHCI_RESET_DATA 0x04 + +#define RT_SDHCI_INT_STATUS 0x30 +#define RT_SDHCI_INT_ENABLE 0x34 +#define RT_SDHCI_SIGNAL_ENABLE 0x38 +#define RT_SDHCI_INT_RESPONSE 0x00000001 +#define RT_SDHCI_INT_DATA_END 0x00000002 +#define RT_SDHCI_INT_BLK_GAP 0x00000004 +#define RT_SDHCI_INT_DMA_END 0x00000008 +#define RT_SDHCI_INT_SPACE_AVAIL 0x00000010 +#define RT_SDHCI_INT_DATA_AVAIL 0x00000020 +#define RT_SDHCI_INT_CARD_INSERT 0x00000040 +#define RT_SDHCI_INT_CARD_REMOVE 0x00000080 +#define RT_SDHCI_INT_CARD_INT 0x00000100 +#define RT_SDHCI_INT_RETUNE 0x00001000 +#define RT_SDHCI_INT_CQE 0x00004000 +#define RT_SDHCI_INT_ERROR 0x00008000 +#define RT_SDHCI_INT_TIMEOUT 0x00010000 +#define RT_SDHCI_INT_CRC 0x00020000 +#define RT_SDHCI_INT_END_BIT 0x00040000 +#define RT_SDHCI_INT_INDEX 0x00080000 +#define RT_SDHCI_INT_DATA_TIMEOUT 0x00100000 +#define RT_SDHCI_INT_DATA_CRC 0x00200000 +#define RT_SDHCI_INT_DATA_END_BIT 0x00400000 +#define RT_SDHCI_INT_BUS_POWER 0x00800000 +#define RT_SDHCI_INT_AUTO_CMD_ERR 0x01000000 +#define RT_SDHCI_INT_ADMA_ERROR 0x02000000 + +#define RT_SDHCI_INT_NORMAL_MASK 0x00007fff +#define RT_SDHCI_INT_ERROR_MASK 0xffff8000 + +#define RT_SDHCI_INT_CMD_MASK (RT_SDHCI_INT_RESPONSE | RT_SDHCI_INT_TIMEOUT | \ + RT_SDHCI_INT_CRC | RT_SDHCI_INT_END_BIT | \ + RT_SDHCI_INT_INDEX | RT_SDHCI_INT_AUTO_CMD_ERR) +#define RT_SDHCI_INT_DATA_MASK (RT_SDHCI_INT_DATA_END | RT_SDHCI_INT_DMA_END | \ + RT_SDHCI_INT_DATA_AVAIL | RT_SDHCI_INT_SPACE_AVAIL | \ + RT_SDHCI_INT_DATA_TIMEOUT | RT_SDHCI_INT_DATA_CRC | \ + RT_SDHCI_INT_DATA_END_BIT | RT_SDHCI_INT_ADMA_ERROR | \ + RT_SDHCI_INT_BLK_GAP) +#define RT_SDHCI_INT_ALL_MASK ((rt_uint32_t)-1) + +#define RT_SDHCI_CQE_INT_ERR_MASK (RT_SDHCI_INT_ADMA_ERROR | RT_SDHCI_INT_BUS_POWER | \ + RT_SDHCI_INT_DATA_END_BIT | RT_SDHCI_INT_DATA_CRC | \ + RT_SDHCI_INT_DATA_TIMEOUT | RT_SDHCI_INT_INDEX | \ + RT_SDHCI_INT_END_BIT | RT_SDHCI_INT_CRC | RT_SDHCI_INT_TIMEOUT) + +#define RT_SDHCI_CQE_INT_MASK (RT_SDHCI_CQE_INT_ERR_MASK | RT_SDHCI_INT_CQE) + +#define RT_SDHCI_AUTO_CMD_STATUS 0x3c +#define RT_SDHCI_AUTO_CMD_TIMEOUT 0x00000002 +#define RT_SDHCI_AUTO_CMD_CRC 0x00000004 +#define RT_SDHCI_AUTO_CMD_END_BIT 0x00000008 +#define RT_SDHCI_AUTO_CMD_INDEX 0x00000010 + +#define RT_SDHCI_HOST_CONTROL2 0x3e +#define RT_SDHCI_CTRL_UHS_MASK 0x0007 +#define RT_SDHCI_CTRL_UHS_SDR12 0x0000 +#define RT_SDHCI_CTRL_UHS_SDR25 0x0001 +#define RT_SDHCI_CTRL_UHS_SDR50 0x0002 +#define RT_SDHCI_CTRL_UHS_SDR104 0x0003 +#define RT_SDHCI_CTRL_UHS_DDR50 0x0004 +#define RT_SDHCI_CTRL_HS400 0x0005 /* Non-standard */ +#define RT_SDHCI_CTRL_VDD_180 0x0008 +#define RT_SDHCI_CTRL_DRV_TYPE_MASK 0x0030 +#define RT_SDHCI_CTRL_DRV_TYPE_B 0x0000 +#define RT_SDHCI_CTRL_DRV_TYPE_A 0x0010 +#define RT_SDHCI_CTRL_DRV_TYPE_C 0x0020 +#define RT_SDHCI_CTRL_DRV_TYPE_D 0x0030 +#define RT_SDHCI_CTRL_EXEC_TUNING 0x0040 +#define RT_SDHCI_CTRL_TUNED_CLK 0x0080 +#define RT_SDHCI_CMD23_ENABLE 0x0800 +#define RT_SDHCI_CTRL_V4_MODE 0x1000 +#define RT_SDHCI_CTRL_64BIT_ADDR 0x2000 +#define RT_SDHCI_CTRL_PRESET_VAL_ENABLE 0x8000 + +#define RT_SDHCI_CAPABILITIES 0x40 +#define RT_SDHCI_TIMEOUT_CLK_MASK RT_GENMASK(5, 0) +#define RT_SDHCI_TIMEOUT_CLK_SHIFT 0 +#define RT_SDHCI_TIMEOUT_CLK_UNIT 0x00000080 +#define RT_SDHCI_CLOCK_BASE_MASK RT_GENMASK(13, 8) +#define RT_SDHCI_CLOCK_BASE_SHIFT 8 +#define RT_SDHCI_CLOCK_V3_BASE_MASK RT_GENMASK(15, 8) +#define RT_SDHCI_MAX_BLOCK_MASK 0x00030000 +#define RT_SDHCI_MAX_BLOCK_SHIFT 16 +#define RT_SDHCI_CAN_DO_8BIT 0x00040000 +#define RT_SDHCI_CAN_DO_ADMA2 0x00080000 +#define RT_SDHCI_CAN_DO_ADMA1 0x00100000 +#define RT_SDHCI_CAN_DO_HISPD 0x00200000 +#define RT_SDHCI_CAN_DO_SDMA 0x00400000 +#define RT_SDHCI_CAN_DO_SUSPEND 0x00800000 +#define RT_SDHCI_CAN_VDD_330 0x01000000 +#define RT_SDHCI_CAN_VDD_300 0x02000000 +#define RT_SDHCI_CAN_VDD_180 0x04000000 +#define RT_SDHCI_CAN_64BIT_V4 0x08000000 +#define RT_SDHCI_CAN_64BIT 0x10000000 + +#define RT_SDHCI_CAPABILITIES_1 0x44 +#define RT_SDHCI_SUPPORT_SDR50 0x00000001 +#define RT_SDHCI_SUPPORT_SDR104 0x00000002 +#define RT_SDHCI_SUPPORT_DDR50 0x00000004 +#define RT_SDHCI_DRIVER_TYPE_A 0x00000010 +#define RT_SDHCI_DRIVER_TYPE_C 0x00000020 +#define RT_SDHCI_DRIVER_TYPE_D 0x00000040 +#define RT_SDHCI_RETUNING_TIMER_COUNT_MASK RT_GENMASK(11, 8) +#define RT_SDHCI_USE_SDR50_TUNING 0x00002000 +#define RT_SDHCI_RETUNING_MODE_MASK RT_GENMASK(15, 14) +#define RT_SDHCI_CLOCK_MUL_MASK RT_GENMASK(23, 16) +#define RT_SDHCI_CAN_DO_ADMA3 0x08000000 +#define RT_SDHCI_SUPPORT_HS400 0x80000000 /* Non-standard */ + +#define RT_SDHCI_MAX_CURRENT 0x48 +#define RT_SDHCI_MAX_CURRENT_LIMIT RT_GENMASK(7, 0) +#define RT_SDHCI_MAX_CURRENT_330_MASK RT_GENMASK(7, 0) +#define RT_SDHCI_MAX_CURRENT_300_MASK RT_GENMASK(15, 8) +#define RT_SDHCI_MAX_CURRENT_180_MASK RT_GENMASK(23, 16) +#define RT_SDHCI_MAX_CURRENT_MULTIPLIER 4 + +/* 4C-4F reserved for more max current */ + +#define RT_SDHCI_SET_ACMD12_ERROR 0x50 +#define RT_SDHCI_SET_INT_ERROR 0x52 + +#define RT_SDHCI_ADMA_ERROR 0x54 + +/* 55-57 reserved */ + +#define RT_SDHCI_ADMA_ADDRESS 0x58 +#define RT_SDHCI_ADMA_ADDRESS_HI 0x5c + +/* 60-FB reserved */ + +#define RT_SDHCI_PRESET_FOR_HIGH_SPEED 0x64 +#define RT_SDHCI_PRESET_FOR_SDR12 0x66 +#define RT_SDHCI_PRESET_FOR_SDR25 0x68 +#define RT_SDHCI_PRESET_FOR_SDR50 0x6a +#define RT_SDHCI_PRESET_FOR_SDR104 0x6c +#define RT_SDHCI_PRESET_FOR_DDR50 0x6e +#define RT_SDHCI_PRESET_FOR_HS400 0x74 /* Non-standard */ +#define RT_SDHCI_PRESET_DRV_MASK RT_GENMASK(15, 14) + +#define RT_SDHCI_PRESET_CLKGEN_SEL RT_BIT(10) +#define RT_SDHCI_PRESET_SDCLK_FREQ_MASK RT_GENMASK(9, 0) + +#define RT_SDHCI_SLOT_INT_STATUS 0xfc + +#define RT_SDHCI_HOST_VERSION 0xfe +#define RT_SDHCI_VENDOR_VER_MASK 0xff00 +#define RT_SDHCI_VENDOR_VER_SHIFT 8 +#define RT_SDHCI_SPEC_VER_MASK 0x00ff +#define RT_SDHCI_SPEC_VER_SHIFT 0 +#define RT_SDHCI_SPEC_100 0 +#define RT_SDHCI_SPEC_200 1 +#define RT_SDHCI_SPEC_300 2 +#define RT_SDHCI_SPEC_400 3 +#define RT_SDHCI_SPEC_410 4 +#define RT_SDHCI_SPEC_420 5 + +/* + * End of controller registers. + */ + +#define RT_SDHCI_MAX_DIV_SPEC_200 256 +#define RT_SDHCI_MAX_DIV_SPEC_300 2046 + +/* + * Host SDMA buffer boundary. Valid values from 4K to 512K in powers of 2. + */ +#define RT_SDHCI_DEFAULT_BOUNDARY_SIZE (512 * 1024) +#define RT_SDHCI_DEFAULT_BOUNDARY_ARG (rt_ilog2(RT_SDHCI_DEFAULT_BOUNDARY_SIZE) - 12) +#define RT_SDHCI_MAX_SEGS 128 + +/* Allow for a command request and a data request at the same time */ +#define RT_SDHCI_MAX_MRQS 2 +#define MMC_CMD_TRANSFER_TIME (10 * 1000000L) /* max 10 ms */ + +enum rt_sdhci_cookie +{ + RT_SDHCI_COOKIE_UNMAPPED, + RT_SDHCI_COOKIE_PRE_MAPPED, + RT_SDHCI_COOKIE_MAPPED, +}; + +struct rt_sdhci_host +{ + const char *hw_name; /* Hardware bus name */ + + unsigned int quirks; /* Deviations from spec. */ + +/* Controller doesn't honor resets unless we touch the clock register */ +#define RT_SDHCI_QUIRK_CLOCK_BEFORE_RESET (1 << 0) +/* Controller has bad caps bits, but really supports DMA */ +#define RT_SDHCI_QUIRK_FORCE_DMA (1 << 1) +/* Controller doesn't like to be reset when there is no card inserted. */ +#define RT_SDHCI_QUIRK_NO_CARD_NO_RESET (1 << 2) +/* Controller doesn't like clearing the power reg before a change */ +#define RT_SDHCI_QUIRK_SINGLE_POWER_WRITE (1 << 3) +/* Controller has an unusable DMA engine */ +#define RT_SDHCI_QUIRK_BROKEN_DMA (1 << 5) +/* Controller has an unusable ADMA engine */ +#define RT_SDHCI_QUIRK_BROKEN_ADMA (1 << 6) +/* Controller can only DMA from 32-bit aligned addresses */ +#define RT_SDHCI_QUIRK_32BIT_DMA_ADDR (1 << 7) +/* Controller can only DMA chunk sizes that are a multiple of 32 bits */ +#define RT_SDHCI_QUIRK_32BIT_DMA_SIZE (1 << 8) +/* Controller can only ADMA chunks that are a multiple of 32 bits */ +#define RT_SDHCI_QUIRK_32BIT_ADMA_SIZE (1 << 9) +/* Controller needs to be reset after each request to stay stable */ +#define RT_SDHCI_QUIRK_RESET_AFTER_REQUEST (1 << 10) +/* Controller needs voltage and power writes to happen separately */ +#define RT_SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1 << 11) +/* Controller provides an incorrect timeout value for transfers */ +#define RT_SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1 << 12) +/* Controller has an issue with buffer bits for small transfers */ +#define RT_SDHCI_QUIRK_BROKEN_SMALL_PIO (1 << 13) +/* Controller does not provide transfer-complete interrupt when not busy */ +#define RT_SDHCI_QUIRK_NO_BUSY_IRQ (1 << 14) +/* Controller has unreliable card detection */ +#define RT_SDHCI_QUIRK_BROKEN_CARD_DETECTION (1 << 15) +/* Controller reports inverted write-protect state */ +#define RT_SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1 << 16) +/* Controller has unusable command queue engine */ +#define RT_SDHCI_QUIRK_BROKEN_CQE (1 << 17) +/* Controller does not like fast PIO transfers */ +#define RT_SDHCI_QUIRK_PIO_NEEDS_DELAY (1 << 18) +/* Controller does not have a LED */ +#define RT_SDHCI_QUIRK_NO_LED (1 << 19) +/* Controller has to be forced to use block size of 2048 bytes */ +#define RT_SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1 << 20) +/* Controller cannot do multi-block transfers */ +#define RT_SDHCI_QUIRK_NO_MULTIBLOCK (1 << 21) +/* Controller can only handle 1-bit data transfers */ +#define RT_SDHCI_QUIRK_FORCE_1_BIT_DATA (1 << 22) +/* Controller needs 10ms delay between applying power and clock */ +#define RT_SDHCI_QUIRK_DELAY_AFTER_POWER (1 << 23) +/* Controller uses SDCLK instead of TMCLK for data timeouts */ +#define RT_SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1 << 24) +/* Controller reports wrong base clock capability */ +#define RT_SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1 << 25) +/* Controller cannot support End Attribute in NOP ADMA descriptor */ +#define RT_SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1 << 26) +/* Controller uses Auto CMD12 command to stop the transfer */ +#define RT_SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1 << 28) +/* Controller doesn't have HISPD bit field in HI-SPEED SD card */ +#define RT_SDHCI_QUIRK_NO_HISPD_BIT (1 << 29) +/* Controller treats ADMA descriptors with length 0000h incorrectly */ +#define RT_SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC (1 << 30) +/* The read-only detection via RT_SDHCI_PRESENT_STATE register is unstable */ +#define RT_SDHCI_QUIRK_UNSTABLE_RO_DETECT (1 << 31) + + unsigned int quirks2; /* More deviations from spec. */ + +#define RT_SDHCI_QUIRK2_HOST_OFF_CARD_ON (1 << 0) +#define RT_SDHCI_QUIRK2_HOST_NO_CMD23 (1 << 1) +/* The system physically doesn't support 1.8v, even if the host does */ +#define RT_SDHCI_QUIRK2_NO_1_8_V (1 << 2) +#define RT_SDHCI_QUIRK2_PRESET_VALUE_BROKEN (1 << 3) +#define RT_SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON (1 << 4) +/* Controller has a non-standard host control register */ +#define RT_SDHCI_QUIRK2_BROKEN_HOST_CONTROL (1 << 5) +/* Controller does not support HS200 */ +#define RT_SDHCI_QUIRK2_BROKEN_HS200 (1 << 6) +/* Controller does not support DDR50 */ +#define RT_SDHCI_QUIRK2_BROKEN_DDR50 (1 << 7) +/* Stop command (CMD12) can set Transfer Complete when not using MMC_RSP_BUSY */ +#define RT_SDHCI_QUIRK2_STOP_WITH_TC (1 << 8) +/* Controller does not support 64-bit DMA */ +#define RT_SDHCI_QUIRK2_BROKEN_64_BIT_DMA (1 << 9) +/* need clear transfer mode register before send cmd */ +#define RT_SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD (1 << 10) +/* Capability register bit-63 indicates HS400 support */ +#define RT_SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 (1 << 11) +/* forced tuned clock */ +#define RT_SDHCI_QUIRK2_TUNING_WORK_AROUND (1 << 12) +/* disable the block count for single block transactions */ +#define RT_SDHCI_QUIRK2_SUPPORT_SINGLE (1 << 13) +/* Controller broken with using ACMD23 */ +#define RT_SDHCI_QUIRK2_ACMD23_BROKEN (1 << 14) +/* Broken Clock divider zero in controller */ +#define RT_SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1 << 15) +/* Controller has CRC in 136 bit Command Response */ +#define RT_SDHCI_QUIRK2_RSP_136_HAS_CRC (1 << 16) + +#define RT_SDHCI_QUIRK2_DISABLE_HW_TIMEOUT (1 << 17) + +#define RT_SDHCI_QUIRK2_USE_32BIT_BLK_CNT (1 << 18) +/* Issue CMD and DATA reset together */ +#define RT_SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER (1 << 19) + + void *data_buf; + + int irq; /* Device IRQ */ + void *ioaddr; /* Mapped address */ + char *bounce_buffer; /* For packing SDMA reads/writes */ + rt_uint64_t bounce_addr; + unsigned int bounce_buffer_size; + + const struct rt_sdhci_ops *ops; /* Low level hw interface */ + + /* Internal data */ + struct rt_mmc_host *mmc; /* MMC structure */ + struct rt_mmc_host_ops mmc_host_ops; /* MMC host ops */ + rt_uint64_t dma_mask; /* custom DMA mask */ + + rt_spinlock_t lock; + int flags; /* Host attributes */ +#define RT_SDHCI_USE_SDMA (1 << 0) /* Host is SDMA capable */ +#define RT_SDHCI_USE_ADMA (1 << 1) /* Host is ADMA capable */ +#define RT_SDHCI_REQ_USE_DMA (1 << 2) /* Use DMA for this req. */ +#define RT_SDHCI_DEVICE_DEAD (1 << 3) /* Device unresponsive */ +#define RT_SDHCI_SDR50_NEEDS_TUNING (1 << 4) /* SDR50 needs tuning */ +#define RT_SDHCI_AUTO_CMD12 (1 << 6) /* Auto CMD12 support */ +#define RT_SDHCI_AUTO_CMD23 (1 << 7) /* Auto CMD23 support */ +#define RT_SDHCI_PV_ENABLED (1 << 8) /* Preset value enabled */ +#define RT_SDHCI_USE_64_BIT_DMA (1 << 12) /* Use 64-bit DMA */ +#define RT_SDHCI_HS400_TUNING (1 << 13) /* Tuning for HS400 */ +#define RT_SDHCI_SIGNALING_330 (1 << 14) /* Host is capable of 3.3V signaling */ +#define RT_SDHCI_SIGNALING_180 (1 << 15) /* Host is capable of 1.8V signaling */ +#define RT_SDHCI_SIGNALING_120 (1 << 16) /* Host is capable of 1.2V signaling */ + + unsigned int version; /* RT_SDHCI spec. version */ + + unsigned int max_clk; /* Max possible freq (MHz) */ + unsigned int timeout_clk; /* Timeout freq (KHz) */ + rt_uint8_t max_timeout_count; /* Vendor specific max timeout count */ + unsigned int clk_mul; /* Clock Muliplier value */ + + unsigned int clock; /* Current clock (MHz) */ + rt_uint8_t pwr; /* Current voltage */ + rt_uint8_t drv_type; /* Current UHS-I driver type */ + rt_bool_t reinit_uhs; /* Force UHS-related re-initialization */ + + rt_bool_t runtime_suspended; /* Host is runtime suspended */ + rt_bool_t bus_on; /* Bus power prevents runtime suspend */ + rt_bool_t preset_enabled; /* Preset is enabled */ + rt_bool_t pending_reset; /* Cmd/data reset is pending */ + rt_bool_t irq_wake_enabled; /* IRQ wakeup is enabled */ + rt_bool_t v4_mode; /* Host Version 4 Enable */ + rt_bool_t always_defer_done; /* Always defer to complete requests */ + + struct rt_mmcsd_req *mrqs_done[RT_SDHCI_MAX_MRQS]; /* Requests done */ + struct rt_mmcsd_cmd *cmd; /* Current command */ + struct rt_mmcsd_cmd *data_cmd; /* Current data command */ + struct rt_mmcsd_cmd *deferred_cmd; /* Deferred command */ + struct rt_mmcsd_data *data; /* Current data request */ + unsigned int data_early : 1; /* Data finished before cmd */ + + unsigned int blocks; /* remaining PIO blocks */ + rt_size_t align_buffer_sz; /* Bounce buffer size */ + rt_uint64_t align_addr; /* Mapped bounce buffer */ + + struct rt_workqueue *complete_wq; /* Request completion wq */ + struct rt_work complete_work; /* Request completion work */ + + struct rt_workqueue *irq_wq; + struct rt_work irq_work; + + struct rt_timer timer; /* Timer for timeouts */ + struct rt_timer data_timer; /* Timer for data timeouts */ + + rt_uint32_t caps; /* CAPABILITY_0 */ + rt_uint32_t caps1; /* CAPABILITY_1 */ + rt_bool_t read_caps; /* Capability flags have been read */ + + rt_bool_t sdhci_core_to_disable_vqmmc; /* sdhci core can disable vqmmc */ + unsigned int ocr_avail_sdio; /* OCR bit masks */ + unsigned int ocr_avail_sd; + unsigned int ocr_avail_mmc; + rt_uint32_t ocr_mask; /* available voltages */ + + unsigned timing; /* Current timing */ + + rt_uint32_t thread_isr; + + rt_uint32_t ier; /* Cached registers */ + + rt_bool_t cqe_on; /* CQE is operating */ + rt_uint32_t cqe_ier; /* CQE interrupt mask */ + rt_uint32_t cqe_err_ier; /* CQE error interrupt mask */ + + rt_wqueue_t buf_ready_int; /* Waitqueue for Buffer Read Ready interrupt */ + unsigned int tuning_done; /* Condition flag set when CMD19 succeeds */ + + unsigned int tuning_count; /* Timer count for re-tuning */ + unsigned int tuning_mode; /* Re-tuning mode supported by host */ + unsigned int tuning_err; /* Error code for re-tuning */ +#define RT_SDHCI_TUNING_MODE_1 0 +#define RT_SDHCI_TUNING_MODE_2 1 +#define RT_SDHCI_TUNING_MODE_3 2 + + int tuning_delay; + int tuning_loop_count; + rt_uint32_t sdma_boundary; + rt_uint64_t data_timeout; + + rt_ubase_t private[]; +}; + +struct rt_sdhci_ops +{ + rt_uint32_t (*read_l)(struct rt_sdhci_host *host, int reg); + rt_uint16_t (*read_w)(struct rt_sdhci_host *host, int reg); + rt_uint8_t (*read_b)(struct rt_sdhci_host *host, int reg); + void (*write_l)(struct rt_sdhci_host *host, rt_uint32_t val, int reg); + void (*write_w)(struct rt_sdhci_host *host, rt_uint16_t val, int reg); + void (*write_b)(struct rt_sdhci_host *host, rt_uint8_t val, int reg); + void (*set_clock)(struct rt_sdhci_host *host, unsigned int clock); + void (*set_power)(struct rt_sdhci_host *host, unsigned char mode, unsigned short vdd); + rt_uint32_t (*irq)(struct rt_sdhci_host *host, rt_uint32_t intmask); + int (*set_dma_mask)(struct rt_sdhci_host *host); + int (*enable_dma)(struct rt_sdhci_host *host); + unsigned int (*get_max_clock)(struct rt_sdhci_host *host); + unsigned int (*get_min_clock)(struct rt_sdhci_host *host); + unsigned int (*get_timeout_clock)(struct rt_sdhci_host *host); + unsigned int (*get_max_timeout_count)(struct rt_sdhci_host *host); + void (*set_timeout)(struct rt_sdhci_host *host, struct rt_mmcsd_cmd *cmd); + void (*set_bus_width)(struct rt_sdhci_host *host, int width); + unsigned int (*get_ro)(struct rt_sdhci_host *host); + void (*reset)(struct rt_sdhci_host *host, rt_uint8_t mask); + int (*platform_execute_tuning)(struct rt_sdhci_host *host, rt_uint32_t opcode); + void (*set_uhs_signaling)(struct rt_sdhci_host *host, unsigned int uhs); + void (*hw_reset)(struct rt_sdhci_host *host); + void (*card_event)(struct rt_sdhci_host *host); + void (*voltage_switch)(struct rt_sdhci_host *host); + void (*request_done)(struct rt_sdhci_host *host, struct rt_mmcsd_req *mrq); +}; + +rt_inline void rt_sdhci_writel(struct rt_sdhci_host *host, rt_uint32_t val, int reg) +{ + if (host->ops->write_l) + { + host->ops->write_l(host, val, reg); + } + else + { + HWREG32(host->ioaddr + reg) = val; + } +} + +rt_inline void rt_sdhci_writew(struct rt_sdhci_host *host, rt_uint16_t val, int reg) +{ + if (host->ops->write_w) + { + host->ops->write_w(host, val, reg); + } + else + { + HWREG16(host->ioaddr + reg) = val; + } +} + +rt_inline void rt_sdhci_writeb(struct rt_sdhci_host *host, rt_uint8_t val, int reg) +{ + if (host->ops->write_b) + { + host->ops->write_b(host, val, reg); + } + else + { + HWREG8(host->ioaddr + reg) = val; + } +} + +rt_inline rt_uint32_t rt_sdhci_readl(struct rt_sdhci_host *host, int reg) +{ + if (host->ops->read_l) + { + return host->ops->read_l(host, reg); + } + + return HWREG32(host->ioaddr + reg); +} + +rt_inline rt_uint16_t rt_sdhci_readw(struct rt_sdhci_host *host, int reg) +{ + if (host->ops->read_w) + { + return host->ops->read_w(host, reg); + } + + return HWREG16(host->ioaddr + reg); +} + +rt_inline rt_uint8_t rt_sdhci_readb(struct rt_sdhci_host *host, int reg) +{ + if (host->ops->read_b) + { + return host->ops->read_b(host, reg); + } + + return HWREG8(host->ioaddr + reg); +} + +struct rt_sdhci_host *rt_sdhci_alloc_host(struct rt_device *dev, rt_size_t priv_size); +void rt_sdhci_free_host(struct rt_sdhci_host *host); + +rt_inline void *rt_sdhci_priv(struct rt_sdhci_host *host) +{ + return host->private; +} + +void rt_sdhci_read_caps(struct rt_sdhci_host *host, const rt_uint16_t *ver, + const rt_uint32_t *caps, const rt_uint32_t *caps1); +int rt_sdhci_setup_host(struct rt_sdhci_host *host); +void rt_sdhci_cleanup_host(struct rt_sdhci_host *host); +int rt_sdhci_set_and_add_host(struct rt_sdhci_host *host); +int rt_sdhci_init_host(struct rt_sdhci_host *host); +void rt_sdhci_uninit_host(struct rt_sdhci_host *host, rt_bool_t dead); + +rt_uint16_t rt_sdhci_clk_set(struct rt_sdhci_host *host, unsigned int clock, unsigned int *actual_clock); +void rt_sdhci_set_clock(struct rt_sdhci_host *host, unsigned int clock); +void rt_sdhci_clk_enable(struct rt_sdhci_host *host, rt_uint16_t clk); +void rt_sdhci_set_power(struct rt_sdhci_host *host, unsigned char mode,unsigned short vdd); +void rt_read_reg(struct rt_sdhci_host* host); + +void rt_sdhci_set_power_and_bus_voltage(struct rt_sdhci_host *host, unsigned char mode, unsigned short vdd); +void rt_sdhci_set_power_with_noreg(struct rt_sdhci_host *host, unsigned char mode, unsigned short vdd); +void rt_sdhci_start_request(struct rt_mmc_host *mmc, struct rt_mmcsd_req *mrq); +int rt_sdhci_start_request_atomic(struct rt_mmc_host *mmc, struct rt_mmcsd_req *mrq); +void rt_sdhci_set_bus_width(struct rt_sdhci_host *host, int width); +void rt_sdhci_reset(struct rt_sdhci_host *host, rt_uint8_t mask); +void rt_sdhci_set_uhs(struct rt_sdhci_host *host, unsigned timing); +int rt_sdhci_execute_tuning(struct rt_mmc_host *mmc, rt_uint32_t opcode); +int __sdhci_execute_tuning(struct rt_sdhci_host *host, rt_uint32_t opcode); +void rt_sdhci_ios_set(struct rt_mmc_host *mmc, struct rt_mmcsd_io_cfg *ios); +int rt_sdhci_start_signal_voltage_switch(struct rt_mmc_host *mmc, struct rt_mmcsd_io_cfg *ios); +void rt_sdhci_enable_io_irq(struct rt_mmc_host *mmc, int enable); +void rt_sdhci_start_tuning(struct rt_sdhci_host *host); +void rt_sdhci_end_tuning(struct rt_sdhci_host *host); +void rt_sdhci_reset_tuning(struct rt_sdhci_host *host); +void rt_sdhci_send_tuning(struct rt_sdhci_host *host, rt_uint32_t opcode); +void rt_sdhci_abort_tuning(struct rt_sdhci_host *host, rt_uint32_t opcode); +void rt_sdhci_enable_v4_mode(struct rt_sdhci_host *host); +void rt_sdhci_data_irq_timeout(struct rt_sdhci_host *host, rt_bool_t enable); +void rt_sdhci_timeout_set(struct rt_sdhci_host *host, struct rt_mmcsd_cmd *cmd); +void rt_sdhci_read_reg_debug(struct rt_sdhci_host* host); + +#endif /* __DEV_SDHCI_H__ */ diff --git a/components/drivers/include/drivers/dev_sdhci_host.h b/components/drivers/include/drivers/dev_sdhci_host.h new file mode 100755 index 00000000000..3ba360d543a --- /dev/null +++ b/components/drivers/include/drivers/dev_sdhci_host.h @@ -0,0 +1,310 @@ +/* + * Copyright (c) 2006-2024 RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2024-08-16 zhujiale first version + */ + +#ifndef __DEV_SDHCI_HOST_H__ +#define __DEV_SDHCI_HOST_H__ + +#include + +#include +#include +#include +#include + +#define rt_mmc_dev(x) ((x)->parent) + +#define MMC_SEND_TUNING_BLOCK_HS200 SEND_TUNING_BLOCK_HS200 +#define MMC_SEND_TUNING_BLOCK SEND_TUNING_BLOCK +#define MMC_STOP_TRANSMISSION STOP_TRANSMISSION +#define MMC_BUS_TEST_R 14 /* adtc R1 */ +#define MMC_WRITE_MULTIPLE_BLOCK WRITE_MULTIPLE_BLOCK +#define MMC_READ_MULTIPLE_BLOCK READ_MULTIPLE_BLOCK + +#define MMC_TIMING_UHS_DDR50 MMCSD_TIMING_UHS_DDR50 +#define MMC_TIMING_UHS_SDR50 MMCSD_TIMING_UHS_SDR50 +#define MMC_TIMING_MMC_HS200 MMCSD_TIMING_MMC_HS200 +#define MMC_TIMING_MMC_HS400 MMCSD_TIMING_MMC_HS400 +#define MMC_TIMING_UHS_SDR104 MMCSD_TIMING_UHS_SDR104 +#define MMC_TIMING_UHS_SDR25 MMCSD_TIMING_UHS_SDR25 +#define MMC_TIMING_MMC_DDR52 MMCSD_TIMING_MMC_DDR52 +#define MMC_TIMING_UHS_SDR12 MMCSD_TIMING_UHS_SDR12 +#define MMC_TIMING_SD_HS MMCSD_TIMING_SD_HS +#define MMC_TIMING_MMC_HS MMCSD_TIMING_MMC_HS + +#define MMC_POWER_OFF MMCSD_POWER_OFF +#define MMC_POWER_UP MMCSD_POWER_UP +#define MMC_POWER_ON MMCSD_POWER_ON +#define MMC_POWER_UNDEFINED 3 + +#define MMC_SET_DRIVER_TYPE_B 0 +#define MMC_SET_DRIVER_TYPE_A 1 +#define MMC_SET_DRIVER_TYPE_C 2 +#define MMC_SET_DRIVER_TYPE_D 3 + +#define MMC_SIGNAL_VOLTAGE_330 0 +#define MMC_SIGNAL_VOLTAGE_180 1 +#define MMC_SIGNAL_VOLTAGE_120 2 + +#define MMC_RSP_PRESENT (1 << 16) +#define MMC_RSP_136 (1 << 17) /* 136 bit response */ +#define MMC_RSP_CRC (1 << 18) /* expect valid crc */ +#define MMC_RSP_BUSY (1 << 19) /* card may send busy */ +#define MMC_RSP_OPCODE (1 << 20) /* response contains opcode */ + +#define MMC_RSP_NONE (0) +#define MMC_RSP_R1 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE) +#define MMC_RSP_R1B (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE | MMC_RSP_BUSY) +#define MMC_RSP_R2 (MMC_RSP_PRESENT | MMC_RSP_136 | MMC_RSP_CRC) +#define MMC_RSP_R3 (MMC_RSP_PRESENT) +#define MMC_RSP_R4 (MMC_RSP_PRESENT) +#define MMC_RSP_R5 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE) +#define MMC_RSP_R6 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE) +#define MMC_RSP_R7 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE) + +#define MMC_CMD_ADTC CMD_ADTC + +#define MMC_BUS_WIDTH_8 MMCSD_BUS_WIDTH_8 +#define MMC_BUS_WIDTH_4 MMCSD_BUS_WIDTH_4 +#define MMC_BUS_WIDTH_1 MMCSD_BUS_WIDTH_1 + +#define MMC_PM_KEEP_POWER (1 << 0) /* preserve card power during suspend */ +#define MMC_PM_WAKE_SDIO_IRQ (1 << 1) /* wake up host system on SDIO IRQ assertion */ + +enum mmc_blk_status +{ + MMC_BLK_SUCCESS = 0, + MMC_BLK_PARTIAL, + MMC_BLK_CMD_ERR, + MMC_BLK_RETRY, + MMC_BLK_ABORT, + MMC_BLK_DATA_ERR, + MMC_BLK_ECC_ERR, + MMC_BLK_NOMEDIUM, + MMC_BLK_NEW_REQUEST, +}; + +#define MMC_NUM_CLK_PHASES (MMC_TIMING_MMC_HS400 + 1) + +struct rt_mmc_host; + +struct rt_mmc_host_ops +{ + void (*request)(struct rt_mmc_host *host, struct rt_mmcsd_req *req); + void (*set_ios)(struct rt_mmc_host *host, struct rt_mmcsd_io_cfg *ios); + int (*get_ro)(struct rt_mmc_host *host); + int (*get_cd)(struct rt_mmc_host *host); + void (*enable_sdio_irq)(struct rt_mmc_host *host, int enable); + void (*ack_sdio_irq)(struct rt_mmc_host *host); + int (*start_signal_voltage_switch)(struct rt_mmc_host *host, struct rt_mmcsd_io_cfg *ios); + int (*card_busy)(struct rt_mmc_host *host); + int (*execute_tuning)(struct rt_mmc_host *host, unsigned opcode); + int (*prepare_hs400_tuning)(struct rt_mmc_host *host, struct rt_mmcsd_io_cfg *ios); + int (*hs400_prepare_ddr)(struct rt_mmc_host *host); + void (*hs400_downgrade)(struct rt_mmc_host *host); + void (*hs400_complete)(struct rt_mmc_host *host); + void (*hs400_enhanced_strobe)(struct rt_mmc_host *host, struct rt_mmcsd_io_cfg *ios); + void (*hw_reset)(struct rt_mmc_host *host); + void (*card_event)(struct rt_mmc_host *host); +}; + +/* VDD voltage 3.3 ~ 3.4 */ +#define MMC_VDD_34_35 0x00400000 /* VDD voltage 3.4 ~ 3.5 */ +#define MMC_VDD_35_36 0x00800000 /* VDD voltage 3.5 ~ 3.6 */ + +#define MMC_CAP2_HS200_1_8V_SDR MMCSD_SUP_HS200_1V8 +#define MMC_CAP2_HS200_1_2V_SDR MMCSD_SUP_HS200_1V2 +#define MMC_CAP_4_BIT_DATA MMCSD_BUSWIDTH_4 +#define MMC_CAP_8_BIT_DATA MMCSD_BUSWIDTH_8 +#define MMC_CAP2_HS200 MMCSD_SUP_HS200 +#define MMC_CAP_MMC_HIGHSPEED MMCSD_SUP_HIGHSPEED +#define MMC_CAP_SD_HIGHSPEED MMCSD_SUP_HIGHSPEED +#define MMC_CAP_1_8V_DDR MMCSD_SUP_DDR_1V8 +#define MMC_CAP_3_3V_DDR MMCSD_SUP_DDR_3V3 +#define MMC_CAP_1_2V_DDR MMCSD_SUP_DDR_1V2 +#define MMC_CAP_NONREMOVABLE MMCSD_SUP_NONREMOVABLE + +#define MMC_CAP_UHS_DDR50 0 +#define MMC_CAP2_HS400 0 +#define MMC_CAP_UHS_SDR50 0 +#define MMC_CAP_UHS_SDR25 0 +#define MMC_CAP_UHS_SDR12 0 +#define MMC_CAP_UHS_SDR104 0 +#define MMC_CAP_UHS 0 +#define MMC_CAP2_HSX00_1_8V 0 +#define MMC_CAP2_HS400_ES 0 +#define MMC_CAP_NEEDS_POLL 0 +#define MMC_CAP2_HSX00_1_2V 0 +#define MMC_CAP2_HS400_1_2V 0 +#define MMC_CAP2_HS400_1_8V 0 +#define MMC_CAP_DRIVER_TYPE_D 0 +#define MMC_CAP_DRIVER_TYPE_C 0 +#define MMC_SET_DRIVER_TYPE_B 0 +#define MMC_CAP_DRIVER_TYPE_A 0 +#define MMC_CAP2_SDIO_IRQ_NOTHREAD 0 +#define MMC_CAP_CMD23 0 +#define MMC_CAP_SDIO_IRQ 0 + +#define MMC_CAP2_NO_SDIO (1 << 19) +#define MMC_CAP2_NO_SD (1 << 21) +#define MMC_CAP2_NO_MMC (1 << 22) +#define MMC_CAP2_CQE (1 << 23) + +#define MMC_VDD_165_195 VDD_165_195 +#define MMC_VDD_20_21 VDD_20_21 +#define MMC_VDD_29_30 VDD_29_30 +#define MMC_VDD_30_31 VDD_30_31 +#define MMC_VDD_32_33 VDD_32_33 +#define MMC_VDD_33_34 VDD_33_34 + +struct rt_mmc_host +{ + struct rt_mmcsd_host rthost; + struct rt_device *parent; + int index; + const struct rt_mmc_host_ops *ops; + unsigned int f_min; + unsigned int f_max; + unsigned int f_init; + rt_uint32_t ocr_avail; + rt_uint32_t ocr_avail_sdio; /* SDIO-specific OCR */ + rt_uint32_t ocr_avail_sd; /* SD-specific OCR */ + rt_uint32_t ocr_avail_mmc; /* MMC-specific OCR */ + struct wakeup_source *ws; /* Enable consume of uevents */ + rt_uint32_t max_current_330; + rt_uint32_t max_current_300; + rt_uint32_t max_current_180; + rt_uint32_t caps; /* Host capabilities */ + rt_uint32_t caps2; /* More host capabilities */ + + + /* host specific block data */ + unsigned int max_seg_size; /* see blk_queue_max_segment_size */ + unsigned short max_segs; /* see blk_queue_max_segments */ + unsigned short unused; + unsigned int max_req_size; /* maximum number of bytes in one req */ + unsigned int max_blk_size; /* maximum size of one mmc block */ + unsigned int max_blk_count; /* maximum number of blocks in one req */ + unsigned int max_busy_timeout; /* max busy timeout in ms */ + struct rt_mmcsd_io_cfg ios; /* current io bus settings */ + unsigned int retune_period; + /* group bitfields together to minimize padding */ + unsigned int use_spi_crc : 1; + unsigned int claimed : 1; /* host exclusively claimed */ + unsigned int doing_init_tune : 1; /* initial tuning in progress */ + unsigned int can_retune : 1; /* re-tuning can be used */ + unsigned int doing_retune : 1; /* re-tuning in progress */ + unsigned int retune_now : 1; /* do re-tuning at next req */ + unsigned int retune_paused : 1; /* re-tuning is temporarily disabled */ + unsigned int retune_crc_disable : 1; /* don't trigger retune upon crc */ + unsigned int can_dma_map_merge : 1; /* merging can be used */ + unsigned int vqmmc_enabled : 1; /* vqmmc regulator is enabled */ + + int need_retune; /* re-tuning is needed */ + int hold_retune; /* hold off re-tuning */ + rt_bool_t trigger_card_event; /* card_event necessary */ + unsigned int sdio_irqs; + rt_bool_t sdio_irq_pending; + + /* Ongoing data transfer that allows commands during transfer */ + struct rt_mmcsd_req *ongoing_mrq; + + rt_uint32_t actual_clock; /* Actual HC clock rate */ + rt_uint32_t pm_caps; + rt_ubase_t private[]; +}; + +rt_inline int mmc_card_is_removable(struct rt_mmc_host *host) +{ + return !(host->caps & MMC_CAP_NONREMOVABLE); +} + +struct rt_mmc_host *rt_mmc_alloc_host(int extra, struct rt_device *); +rt_err_t rt_mmc_add_host(struct rt_mmc_host *); +void rt_mmc_remove_host(struct rt_mmc_host *); +void rt_mmc_free_host(struct rt_mmc_host *); +rt_err_t rt_mmc_of_parse(struct rt_mmc_host *host); + +rt_inline void *rt_mmc_priv(struct rt_mmc_host *host) +{ + return (void *)host->private; +} + +#define mmc_host_is_spi(host) ((host)->caps & MMC_CAP_SPI) + +#define mmc_dev(x) ((x)->parent) +#define mmc_classdev(x) (&(x)->class_dev) +#define mmc_hostname(x) (x->parent->parent.name) + +void rt_mmc_detect_change(struct rt_mmc_host *host, rt_ubase_t delay); +void rt_mmc_request_done(struct rt_mmc_host *host, struct rt_mmcsd_req *req); + +rt_inline rt_bool_t sdio_irq_claimed(struct rt_mmc_host *host) +{ + return host->sdio_irqs > 0; +} + +void mmc_retune_timer_stop(struct rt_mmc_host* host); + +enum dma_data_direction +{ + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +rt_inline void mmc_retune_needed(struct rt_mmc_host *host) +{ + if (host->can_retune) + { + host->need_retune = 1; + } +} + +rt_inline rt_bool_t mmc_can_retune(struct rt_mmc_host *host) +{ + return host->can_retune == 1; +} + +rt_inline rt_bool_t mmc_doing_retune(struct rt_mmc_host *host) +{ + return host->doing_retune == 1; +} + +rt_inline rt_bool_t mmc_doing_tune(struct rt_mmc_host *host) +{ + return host->doing_retune == 1 || host->doing_init_tune == 1; +} + +rt_inline int mmc_get_dma_dir(struct rt_mmcsd_data *data) +{ + return data->flags & DATA_DIR_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE; +} + +rt_inline rt_bool_t mmc_op_multi(rt_uint32_t opcode) +{ + return opcode == MMC_WRITE_MULTIPLE_BLOCK || + opcode == MMC_READ_MULTIPLE_BLOCK; +} + +rt_inline rt_bool_t mmc_op_tuning(rt_uint32_t opcode) +{ + return opcode == MMC_SEND_TUNING_BLOCK || + opcode == MMC_SEND_TUNING_BLOCK_HS200; +} + +rt_err_t rt_mmc_gpio_get_cd(struct rt_mmc_host *host); +void rt_mmc_detect_change(struct rt_mmc_host *host, rt_ubase_t delay); +rt_bool_t rt_mmc_can_gpio_ro(struct rt_mmc_host *host); +rt_err_t rt_mmc_gpio_get_ro(struct rt_mmc_host *host); + +rt_err_t rt_mmc_send_abort_tuning(struct rt_mmc_host *host, rt_uint32_t opcode); + +#endif /* __DEV_SDHCI_HOST_H__ */ diff --git a/components/drivers/include/drivers/mmcsd_host.h b/components/drivers/include/drivers/mmcsd_host.h index 5632ee10753..68508de6121 100644 --- a/components/drivers/include/drivers/mmcsd_host.h +++ b/components/drivers/include/drivers/mmcsd_host.h @@ -76,6 +76,8 @@ struct rt_mmcsd_io_cfg #define MMCSD_SIGNAL_VOLTAGE_330 0 #define MMCSD_SIGNAL_VOLTAGE_180 1 #define MMCSD_SIGNAL_VOLTAGE_120 2 + + rt_bool_t enhanced_strobe; }; struct rt_mmcsd_host; @@ -88,9 +90,23 @@ struct rt_mmcsd_host_ops rt_int32_t (*get_card_status)(struct rt_mmcsd_host *host); void (*enable_sdio_irq)(struct rt_mmcsd_host *host, rt_int32_t en); rt_int32_t (*execute_tuning)(struct rt_mmcsd_host *host, rt_int32_t opcode); - rt_int32_t (*switch_uhs_voltage)(struct rt_mmcsd_host *host); + rt_bool_t (*card_busy)(struct rt_mmcsd_host *host); + rt_err_t (*signal_voltage_switch)(struct rt_mmcsd_host *host, struct rt_mmcsd_io_cfg *io_cfg); }; +#ifdef RT_USING_REGULATOR +struct rt_regulator; + +struct rt_mmcsd_supply +{ + rt_bool_t vqmmc_enabled; + rt_bool_t regulator_enabled; + + struct rt_regulator *vmmc; /* Card power supply */ + struct rt_regulator *vqmmc; /* Optional Vccq supply */ +}; +#endif /* RT_USING_REGULATOR */ + struct rt_mmcsd_host { char name[RT_NAME_MAX]; @@ -158,6 +174,13 @@ struct rt_mmcsd_host struct rt_semaphore *sdio_irq_sem; struct rt_thread *sdio_irq_thread; +#ifdef RT_USING_REGULATOR + struct rt_mmcsd_supply supply; +#endif +#ifdef RT_USING_OFW + void *ofw_node; +#endif + void *private_data; }; #ifdef __cplusplus diff --git a/components/drivers/include/rtdevice.h b/components/drivers/include/rtdevice.h index ce7d7307932..50a97beee54 100644 --- a/components/drivers/include/rtdevice.h +++ b/components/drivers/include/rtdevice.h @@ -215,6 +215,10 @@ extern "C" { #include "drivers/dev_mmcsd_core.h" #include "drivers/dev_sd.h" #include "drivers/dev_sdio.h" +#if defined(RT_USING_DM) && defined(RT_USING_SDHCI) +#include "drivers/dev_sdhci.h" +#include "drivers/dev_sdhci_host.h" +#endif /* RT_USING_DM && RT_USING_SDHCI */ #endif /* RT_USING_SDIO */ diff --git a/components/drivers/sdio/Kconfig b/components/drivers/sdio/Kconfig index b0c28869db8..232a65efd91 100644 --- a/components/drivers/sdio/Kconfig +++ b/components/drivers/sdio/Kconfig @@ -1,4 +1,4 @@ -config RT_USING_SDIO +menuconfig RT_USING_SDIO bool "Using SD/MMC device drivers" select RT_USING_BLK default n @@ -6,7 +6,7 @@ config RT_USING_SDIO if RT_USING_SDIO config RT_SDIO_STACK_SIZE int "The stack size for sdio irq thread" - default 512 + default IDLE_THREAD_STACK_SIZE config RT_SDIO_THREAD_PRIORITY int "The priority level value of sdio irq thread" @@ -14,7 +14,7 @@ config RT_USING_SDIO config RT_MMCSD_STACK_SIZE int "The stack size for mmcsd thread" - default 1024 + default IDLE_THREAD_STACK_SIZE config RT_MMCSD_THREAD_PRIORITY int "The priority level value of mmcsd thread" @@ -30,3 +30,7 @@ config RT_USING_SDIO bool "Using sdhci for sd/mmc drivers" default n endif + +if RT_USING_DM && RT_USING_SDIO + rsource "host/Kconfig" +endif diff --git a/components/drivers/sdio/SConscript b/components/drivers/sdio/SConscript index 98d71d1394f..640c099f38c 100644 --- a/components/drivers/sdio/SConscript +++ b/components/drivers/sdio/SConscript @@ -1,23 +1,32 @@ -Import('RTT_ROOT') from building import * +group = [] +objs = [] + +if not GetDepend(['RT_USING_SDIO']): + Return('group') + cwd = GetCurrentDir() -src = Split(""" -dev_block.c -dev_mmcsd_core.c -dev_sd.c -dev_sdio.c -dev_mmc.c -""") +list = os.listdir(cwd) +CPPPATH = [cwd + '/../include'] + +src = ['dev_block.c', 'dev_mmcsd_core.c', 'dev_sd.c', 'dev_sdio.c', 'dev_mmc.c'] + +if GetDepend(['RT_USING_DM']): + src += ['dev_sdio_dm.c'] + + if GetDepend('RT_USING_SDHCI'): + src += ['dev_sdhci.c', 'dev_sdhci_dm.c', 'dev_sdhci_host.c'] -# The set of source files associated with this SConscript file. -path = [cwd + '/../include' , cwd + '/sdhci/include'] + if GetDepend(['RT_USING_REGULATOR']): + src += ['dev_regulator.c'] -if GetDepend('RT_USING_SDHCI'): - src += [os.path.join('sdhci', 'sdhci.c')] - src += [os.path.join('sdhci', 'fit-mmc.c')] - src += [os.path.join('sdhci', 'sdhci-platform.c')] +group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH) -group = DefineGroup('DeviceDrivers', src, depend = ['RT_USING_SDIO'], CPPPATH = path) +for d in list: + path = os.path.join(cwd, d) + if os.path.isfile(os.path.join(path, 'SConscript')): + objs = objs + SConscript(os.path.join(d, 'SConscript')) +objs = objs + group -Return('group') +Return('objs') diff --git a/components/drivers/sdio/dev_mmcsd_core.c b/components/drivers/sdio/dev_mmcsd_core.c index dad5fe09a33..9f80bb9b60b 100644 --- a/components/drivers/sdio/dev_mmcsd_core.c +++ b/components/drivers/sdio/dev_mmcsd_core.c @@ -549,6 +549,281 @@ rt_uint32_t mmcsd_select_voltage(struct rt_mmcsd_host *host, rt_uint32_t ocr) return ocr; } +rt_err_t mmcsd_set_signal_voltage(struct rt_mmcsd_host *host, unsigned char signal_voltage) +{ + rt_err_t err = RT_EOK; + unsigned char old_signal_voltage = host->io_cfg.signal_voltage; + + host->io_cfg.signal_voltage = signal_voltage; + if (host->ops->signal_voltage_switch) + { + err = host->ops->signal_voltage_switch(host, &host->io_cfg); + } + + if (err) + { + host->io_cfg.signal_voltage = old_signal_voltage; + } + + return err; +} + +void mmcsd_set_initial_signal_voltage(struct rt_mmcsd_host *host) +{ + /* 3.3V -> 1.8v -> 1.2v */ + if (!mmcsd_set_signal_voltage(host, MMCSD_SIGNAL_VOLTAGE_330)) + { + LOG_D("Initial signal voltage of %sv", "3.3"); + } + else if (!mmcsd_set_signal_voltage(host, MMCSD_SIGNAL_VOLTAGE_180)) + { + LOG_D("Initial signal voltage of %sv", "1.8"); + } + else if (!mmcsd_set_signal_voltage(host, MMCSD_SIGNAL_VOLTAGE_120)) + { + LOG_D("Initial signal voltage of %sv", "1.2"); + } +} + +rt_err_t mmcsd_host_set_uhs_voltage(struct rt_mmcsd_host *host) +{ + rt_uint32_t old_clock = host->io_cfg.clock; + + host->io_cfg.clock = 0; + mmcsd_set_iocfg(host); + + if (mmcsd_set_signal_voltage(host, MMCSD_SIGNAL_VOLTAGE_180)) + { + return -RT_ERROR; + } + + /* Keep clock gated for at least 10 ms, though spec only says 5 ms */ + rt_thread_mdelay(10); + + host->io_cfg.clock = old_clock; + mmcsd_set_iocfg(host); + + return RT_EOK; +} + +static void mmcsd_power_cycle(struct rt_mmcsd_host *host, rt_uint32_t ocr); + +rt_err_t mmcsd_set_uhs_voltage(struct rt_mmcsd_host *host, rt_uint32_t ocr) +{ + rt_err_t err = RT_EOK; + struct rt_mmcsd_cmd cmd; + + if (!host->ops->signal_voltage_switch) + { + return -RT_EINVAL; + } + + if (!host->ops->card_busy) + { + LOG_W("%s: Cannot verify signal voltage switch", host->name); + } + + rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd)); + + cmd.cmd_code = VOLTAGE_SWITCH; + cmd.arg = 0; + cmd.flags = RESP_R1 | CMD_AC; + + err = mmcsd_send_cmd(host, &cmd, 0); + if (err) + { + goto power_cycle; + } + + if (!controller_is_spi(host) && (cmd.resp[0] & R1_ERROR)) + { + return -RT_EIO; + } + + /* + * The card should drive cmd and dat[0:3] low immediately + * after the response of cmd11, but wait 1 ms to be sure + */ + rt_thread_mdelay(1); + if (host->ops->card_busy && !host->ops->card_busy(host)) + { + err = -RT_ERROR; + goto power_cycle; + } + + if (mmcsd_host_set_uhs_voltage(host)) + { + /* + * Voltages may not have been switched, but we've already + * sent CMD11, so a power cycle is required anyway + */ + err = -RT_ERROR; + goto power_cycle; + } + + /* Wait for at least 1 ms according to spec */ + rt_thread_mdelay(1); + + /* + * Failure to switch is indicated by the card holding + * dat[0:3] low + */ + if (host->ops->card_busy && host->ops->card_busy(host)) + { + err = -RT_ERROR; + } + +power_cycle: + if (err) + { + LOG_D("%s: Signal voltage switch failed, power cycling card", host->name); + mmcsd_power_cycle(host, ocr); + } + + return err; +} + +static const rt_uint8_t tuning_blk_pattern_4bit[] = +{ + 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, + 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, + 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, + 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, + 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, + 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, + 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, + 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, +}; + +static const rt_uint8_t tuning_blk_pattern_8bit[] = +{ + 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, + 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, + 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, + 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, + 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, + 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, + 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, + 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, + 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, + 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, + 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, + 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, + 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, + 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, + 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, + 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, +}; + +rt_err_t mmcsd_send_tuning(struct rt_mmcsd_host *host, rt_uint32_t opcode, rt_err_t *cmd_error) +{ + rt_err_t err = RT_EOK; + int size; + rt_uint8_t *data_buf; + const rt_uint8_t *tuning_block_pattern; + struct rt_mmcsd_req req = {}; + struct rt_mmcsd_cmd cmd = {}; + struct rt_mmcsd_data data = {}; + struct rt_mmcsd_io_cfg *io_cfg = &host->io_cfg; + + if (io_cfg->bus_width == MMCSD_BUS_WIDTH_8) + { + tuning_block_pattern = tuning_blk_pattern_8bit; + size = sizeof(tuning_blk_pattern_8bit); + } + else if (io_cfg->bus_width == MMCSD_BUS_WIDTH_4) + { + tuning_block_pattern = tuning_blk_pattern_4bit; + size = sizeof(tuning_blk_pattern_4bit); + } + else + { + return -RT_EINVAL; + } + + data_buf = rt_malloc(size); + if (!data_buf) + { + return -RT_ENOMEM; + } + + rt_memset(data_buf, 0, size); + rt_memset(&req, 0, sizeof(struct rt_mmcsd_req)); + rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd)); + rt_memset(&data, 0, sizeof(struct rt_mmcsd_data)); + + req.cmd = &cmd; + req.data = &data; + + cmd.cmd_code = opcode; + cmd.flags = RESP_R1 | CMD_ADTC; + + data.blksize = size; + data.blks = 1; + data.flags = DATA_DIR_READ; + + /* + * According to the tuning specs, Tuning process + * is normally shorter 40 executions of CMD19, + * and timeout value should be shorter than 150 ms + */ + data.timeout_ns = 150 * 1000000; + + mmcsd_send_request(host, &req); + + if (cmd_error) + { + *cmd_error = cmd.err; + } + + if (cmd.err) + { + err = cmd.err; + goto out_free; + } + + if (data.err) + { + err = data.err; + goto out_free; + } + + if (rt_memcmp(data_buf, tuning_block_pattern, size)) + { + err = -RT_EIO; + } + +out_free: + rt_free(data_buf); + + return err; +} + +rt_err_t mmcsd_send_abort_tuning(struct rt_mmcsd_host *host, rt_uint32_t opcode) +{ + struct rt_mmcsd_cmd cmd = {}; + + /* + * eMMC specification specifies that CMD12 can be used to stop a tuning + * command, but SD specification does not, so do nothing unless it is eMMC. + */ + if (opcode != SEND_TUNING_BLOCK_HS200) + { + return 0; + } + + cmd.cmd_code = STOP_TRANSMISSION; + cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_AC; + + /* + * For drivers that override R1 to R1b, set an arbitrary timeout based + * on the tuning timeout i.e. 150ms. + */ + cmd.busy_timeout = 150; + + return mmcsd_send_cmd(host, &cmd, 0); +} + static void mmcsd_power_up(struct rt_mmcsd_host *host) { int bit = __rt_fls(host->valid_ocr) - 1; @@ -568,6 +843,8 @@ static void mmcsd_power_up(struct rt_mmcsd_host *host) host->io_cfg.bus_width = MMCSD_BUS_WIDTH_1; mmcsd_set_iocfg(host); + mmcsd_set_initial_signal_voltage(host); + /* * This delay should be sufficient to allow the power supply * to reach the minimum voltage. @@ -599,6 +876,17 @@ static void mmcsd_power_off(struct rt_mmcsd_host *host) mmcsd_set_iocfg(host); } +static void mmcsd_power_cycle(struct rt_mmcsd_host *host, rt_uint32_t ocr) +{ + mmcsd_power_off(host); + + /* Wait at least 1 ms according to SD spec */ + rt_thread_mdelay(1); + + mmcsd_power_up(host); + mmcsd_select_voltage(host, ocr); +} + int mmcsd_wait_cd_changed(rt_int32_t timeout) { struct rt_mmcsd_host *host; diff --git a/components/drivers/sdio/dev_regulator.c b/components/drivers/sdio/dev_regulator.c new file mode 100755 index 00000000000..6b62f7b519c --- /dev/null +++ b/components/drivers/sdio/dev_regulator.c @@ -0,0 +1,262 @@ +/* + * Copyright (c) 2006-2022, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2022-12-06 GuEe-GUI first version + */ + +#include "dev_sdio_dm.h" + +#define DBG_TAG "SDIO" +#ifdef RT_SDIO_DEBUG +#define DBG_LVL DBG_LOG +#else +#define DBG_LVL DBG_INFO +#endif /* RT_SDIO_DEBUG */ +#include + +static rt_err_t ocrbitnum_to_vdd(int vdd_bit, int *min_uvolt, int *max_uvolt) +{ + int tmp; + + if (!vdd_bit) + { + return -RT_EINVAL; + } + + tmp = vdd_bit - rt_ilog2(VDD_165_195); + + if (tmp == 0) + { + *min_uvolt = 1650 * 1000; + *max_uvolt = 1950 * 1000; + } + else + { + *min_uvolt = 1900 * 1000 + tmp * 100 * 1000; + *max_uvolt = *min_uvolt + 100 * 1000; + } + + return 0; +} + +rt_err_t sdio_regulator_set_ocr(struct rt_mmcsd_host *host, + struct rt_regulator *supply, rt_uint16_t vdd_bit) +{ + rt_err_t err = RT_EOK; + + if (!host) + { + return -RT_EINVAL; + } + + if (rt_is_err_or_null(supply)) + { + return RT_EOK; + } + + if (vdd_bit) + { + int min_uvolt, max_uvolt; + + ocrbitnum_to_vdd(vdd_bit, &min_uvolt, &max_uvolt); + + err = rt_regulator_set_voltage(supply, min_uvolt, max_uvolt); + + if (!err && host->supply.regulator_enabled) + { + err = rt_regulator_enable(supply); + + if (!err) + { + host->supply.regulator_enabled = RT_TRUE; + } + } + } + else if (host->supply.regulator_enabled) + { + err = rt_regulator_disable(supply); + + if (!err) + { + host->supply.regulator_enabled = RT_FALSE; + } + } + + if (err) + { + LOG_E("Set regulator OCR %d error = %s", vdd_bit, rt_strerror(err)); + } + + return err; +} + +static int regulator_set_voltage_if_supported(struct rt_regulator *regulator, + int min_uvolt, int target_uvolt, int max_uvolt) +{ + if (!rt_regulator_is_supported_voltage(regulator, min_uvolt, max_uvolt)) + { + return -RT_EINVAL; + } + + if (rt_regulator_get_voltage(regulator) == target_uvolt) + { + return RT_EOK; + } + + return rt_regulator_set_voltage_triplet(regulator, min_uvolt, target_uvolt, + max_uvolt); +} + +rt_err_t sdio_regulator_set_vqmmc(struct rt_mmcsd_host *host, + struct rt_mmcsd_io_cfg *ios) +{ + rt_err_t err; + int uvolt, min_uvolt, max_uvolt; + + if (rt_is_err_or_null(host->supply.vqmmc)) + { + return -RT_EINVAL; + } + + switch (ios->signal_voltage) + { + case MMCSD_SIGNAL_VOLTAGE_120: + return regulator_set_voltage_if_supported(host->supply.vqmmc, + 1100000, 1200000, 1300000); + + case MMCSD_SIGNAL_VOLTAGE_180: + return regulator_set_voltage_if_supported(host->supply.vqmmc, + 1700000, 1800000, 1950000); + + case MMCSD_SIGNAL_VOLTAGE_330: + err = ocrbitnum_to_vdd(host->io_cfg.vdd, &uvolt, &max_uvolt); + + if (err) + { + return err; + } + + min_uvolt = rt_max(uvolt - 300000, 2700000); + max_uvolt = rt_min(max_uvolt + 200000, 3600000); + + err = regulator_set_voltage_if_supported(host->supply.vqmmc, + min_uvolt, uvolt, max_uvolt); + if (err >= 0) + { + return err; + } + + return regulator_set_voltage_if_supported(host->supply.vqmmc, + 2700000, uvolt, 3600000); + + default: + return -RT_EINVAL; + } +} + +rt_err_t sdio_regulator_get_supply(struct rt_device *dev, struct rt_mmcsd_host *host) +{ + rt_err_t err; + + if (!dev || !host) + { + return -RT_EINVAL; + } + + host->supply.vmmc = rt_regulator_get(dev, "vmmc"); + host->supply.vqmmc = rt_regulator_get(dev, "vqmmc"); + + if (!rt_is_err(host->supply.vmmc)) + { + if (!host->supply.vmmc) + { + LOG_D("%s: No %s regulator found", rt_dm_dev_get_name(dev), "vmmc"); + } + } + else + { + err = rt_ptr_err(host->supply.vmmc); + goto _fail; + } + + if (!rt_is_err(host->supply.vqmmc)) + { + if (!host->supply.vqmmc) + { + LOG_D("%s: No %s regulator found", rt_dm_dev_get_name(dev), "vqmmc"); + } + } + else + { + err = rt_ptr_err(host->supply.vqmmc); + goto _fail; + } + + return RT_EOK; + +_fail: + if (!rt_is_err_or_null(host->supply.vmmc)) + { + rt_regulator_put(host->supply.vmmc); + host->supply.vmmc = RT_NULL; + } + + if (!rt_is_err_or_null(host->supply.vqmmc)) + { + rt_regulator_put(host->supply.vqmmc); + host->supply.vqmmc = RT_NULL; + } + + return err; +} + +rt_err_t sdio_regulator_enable_vqmmc(struct rt_mmcsd_host *host) +{ + struct rt_mmcsd_supply *supply; + + if (!host) + { + return -RT_EINVAL; + } + + supply = &host->supply; + + if (!rt_is_err_or_null(supply->vqmmc) && !supply->vqmmc_enabled) + { + rt_err_t err = rt_regulator_enable(supply->vqmmc); + + if (err) + { + LOG_E("Enabling vqmmc regulator failed error = %s", rt_strerror(err)); + } + else + { + supply->vqmmc_enabled = RT_TRUE; + } + } + + return RT_EOK; +} + +void sdio_regulator_disable_vqmmc(struct rt_mmcsd_host *host) +{ + struct rt_mmcsd_supply *supply; + + if (!host) + { + return; + } + + supply = &host->supply; + + if (!rt_is_err_or_null(supply->vqmmc) && supply->vqmmc_enabled) + { + rt_regulator_disable(supply->vqmmc); + + supply->vqmmc_enabled = RT_FALSE; + } +} diff --git a/components/drivers/sdio/dev_sd.c b/components/drivers/sdio/dev_sd.c index 22725dbe1d9..d0193bdd1d7 100644 --- a/components/drivers/sdio/dev_sd.c +++ b/components/drivers/sdio/dev_sd.c @@ -48,7 +48,7 @@ rt_inline rt_uint32_t GET_BITS(rt_uint32_t *resp, rt_uint32_t size) { const rt_int32_t __size = size; - const rt_uint32_t __mask = (__size < 32 ? 1 << __size : 0) - 1; + const rt_uint32_t __mask = (__size < 32 ? 1U << __size : 0) - 1; const rt_int32_t __off = 3 - ((start) / 32); const rt_int32_t __shft = (start) & 31; rt_uint32_t __res; @@ -274,7 +274,7 @@ static rt_int32_t mmcsd_switch(struct rt_mmcsd_card *card) card->max_data_rate = 50000000; if (switch_func_timing == SD_SWITCH_FUNC_TIMING_SDR104) { - LOG_I("sd: switch to SDR104 mode\n"); + LOG_I("SD card switch to %s mode", "SDR104"); mmcsd_set_timing(card->host, MMCSD_TIMING_UHS_SDR104); mmcsd_set_clock(card->host, 208000000); err = mmcsd_excute_tuning(card); @@ -282,7 +282,7 @@ static rt_int32_t mmcsd_switch(struct rt_mmcsd_card *card) } else if (switch_func_timing == SD_SWITCH_FUNC_TIMING_SDR50) { - LOG_I("sd: switch to SDR50 mode\n"); + LOG_I("SD card switch to %s mode", "SDR50"); mmcsd_set_timing(card->host, MMCSD_TIMING_UHS_SDR50); mmcsd_set_clock(card->host, 100000000); err = mmcsd_excute_tuning(card); @@ -290,13 +290,13 @@ static rt_int32_t mmcsd_switch(struct rt_mmcsd_card *card) } else if (switch_func_timing == SD_SWITCH_FUNC_TIMING_DDR50) { - LOG_I("sd: switch to DDR50 mode\n"); + LOG_I("SD card switch to %s mode", "DDR50"); mmcsd_set_timing(card->host, MMCSD_TIMING_UHS_DDR50); mmcsd_set_clock(card->host, 50000000); } else { - LOG_I("sd: switch to High Speed / SDR25 mode \n"); + LOG_I("SD card switch to %s mode", "High Speed / SDR25"); mmcsd_set_timing(card->host, MMCSD_TIMING_SD_HS); mmcsd_set_clock(card->host, 50000000); } @@ -629,31 +629,6 @@ static rt_err_t mmcsd_read_sd_status(struct rt_mmcsd_card *card, rt_uint32_t *sd return 0; } -static rt_err_t sd_switch_voltage(struct rt_mmcsd_host *host) -{ - rt_err_t err; - struct rt_mmcsd_cmd cmd = { 0 }; - - cmd.cmd_code = VOLTAGE_SWITCH; - cmd.arg = 0; - cmd.flags = RESP_R1 | CMD_AC; - - err = mmcsd_send_cmd(host, &cmd, 0); - if (err) - return err; - - return RT_EOK; -} - -static rt_err_t sd_switch_uhs_voltage(struct rt_mmcsd_host *host) -{ - if (host->ops->switch_uhs_voltage != RT_NULL) - { - return host->ops->switch_uhs_voltage(host); - } - return -ENOSYS; -} - static rt_int32_t mmcsd_sd_init_card(struct rt_mmcsd_host *host, rt_uint32_t ocr) { @@ -675,7 +650,7 @@ static rt_int32_t mmcsd_sd_init_card(struct rt_mmcsd_host *host, ocr |= 1 << 30; /* Switch to UHS voltage if both Host and the Card support this feature */ - if (((host->valid_ocr & VDD_165_195) != 0) && (host->ops->switch_uhs_voltage != RT_NULL)) + if (((host->valid_ocr & VDD_165_195) != 0) && (host->ops->signal_voltage_switch != RT_NULL)) { ocr |= OCR_S18R; } @@ -687,10 +662,7 @@ static rt_int32_t mmcsd_sd_init_card(struct rt_mmcsd_host *host, if (ocr & OCR_S18R) { ocr = VDD_165_195; - err = sd_switch_voltage(host); - if (err) - goto err2; - err = sd_switch_uhs_voltage(host); + err = mmcsd_set_uhs_voltage(host, ocr); if (err) goto err2; } diff --git a/components/drivers/sdio/sdhci/sdhci.c b/components/drivers/sdio/dev_sdhci.c old mode 100644 new mode 100755 similarity index 73% rename from components/drivers/sdio/sdhci/sdhci.c rename to components/drivers/sdio/dev_sdhci.c index 6f52b5e9914..c8fd0646fa2 --- a/components/drivers/sdio/sdhci/sdhci.c +++ b/components/drivers/sdio/dev_sdhci.c @@ -9,94 +9,122 @@ */ #include -#include -#include "sdhci.h" -#include -#define DBG_TAG "RT_SDHCI" -#ifdef DRV_DEBUG + +#define DBG_TAG "SDHCI" +#ifdef RT_SDIO_DEBUG #define DBG_LVL DBG_LOG #else #define DBG_LVL DBG_INFO -#endif /* DRV_DEBUG */ +#endif /* RT_SDIO_DEBUG */ #include + +#include +#include +#include "dev_sdio_dm.h" + static unsigned int debug_quirks = 0; static unsigned int debug_quirks2; + /********************************************************* */ /* cmd */ /********************************************************* */ - - -void rt_read_reg_debug(struct rt_sdhci_host *host) -{ - rt_kprintf("0x00 addddddddddddd = %x \n", rt_sdhci_readl(host, 0x00)); - rt_kprintf("0x04 EMMC_BLOCKSIZE = %x \n", rt_sdhci_readw(host, 0x04)); - rt_kprintf("0x06 EMMC_BLOCKCOUNT = %x \n", rt_sdhci_readw(host, 0x06)); - rt_kprintf("0x08 RT_SDHCI_ARGUMENT = %x \n", rt_sdhci_readl(host, 0x08)); - rt_kprintf("0x0c EMMC_XFER_MODE = %x \n", rt_sdhci_readw(host, 0x0c)); - rt_kprintf("0x0e RT_SDHCI_COMMAND = %x \n", rt_sdhci_readw(host, 0x0e)); - rt_kprintf("0x24 RT_SDHCI_PRESENT_STATE = %x \n", rt_sdhci_readl(host, 0x24)); - rt_kprintf("0x28 RT_SDHCI_HOST_CONTROL = %x \n", rt_sdhci_readb(host, 0x28)); - rt_kprintf("0x29 RT_SDHCI_POWER_CONTROL = %x \n", rt_sdhci_readb(host, 0x29)); - rt_kprintf("0x2a EMMC_BGAP_CTRL = %x \n", rt_sdhci_readb(host, 0x2a)); - rt_kprintf("0x2c EMMC_CLK_CTRL = %x \n", rt_sdhci_readw(host, 0x2c)); - rt_kprintf("0x2e EMMC_TOUT_CTRL = %x \n", rt_sdhci_readb(host, 0x2e)); - rt_kprintf("0x2f EMMC_SW_RST = %x \n", rt_sdhci_readb(host, 0x2f)); - rt_kprintf("0x30 RT_SDHCI_INT_STATUS = %x \n", rt_sdhci_readw(host, 0x30)); - rt_kprintf("0x32 RT_SDHCI_ERR_INT_STATUS = %x \n", rt_sdhci_readw(host, 0x32)); - rt_kprintf("0x34 RT_SDHCI_INT_ENABLE = %x \n", rt_sdhci_readw(host, 0x34)); - rt_kprintf("0x36 EMMC ERROR INT STATEN = %x \n", rt_sdhci_readw(host, 0x36)); - rt_kprintf("0x38 EMMC NORMAL INT SIGNAL EN = %x \n", rt_sdhci_readw(host, 0x38)); - rt_kprintf("0x3a EMMC ERROR INT SIGNAL EN = %x \n", rt_sdhci_readw(host, 0x3a)); - rt_kprintf("0x3c EMMC_AUTO_CMD_STAT = %x \n", rt_sdhci_readw(host, 0x3c)); - rt_kprintf("0x3e EMMC_HOST_CTRL2 = %x \n", rt_sdhci_readw(host, 0x3e)); - rt_kprintf("0x40 EMMC_CAPABILITIES1 = %x \n", rt_sdhci_readl(host, 0x40)); - rt_kprintf("0x44 EMMC_CAPABILITIES2 = %x \n", rt_sdhci_readl(host, 0x44)); - rt_kprintf("0x52 EMMC_FORC_ERR_INT_STAT = %x \n", rt_sdhci_readw(host, 0x52)); - rt_kprintf("0x54 EMMC_ADMA_ERR_STAT = %x \n", rt_sdhci_readb(host, 0x54)); - rt_kprintf("0x58 EMMC_ADMA_SA = %x \n", rt_sdhci_readl(host, 0x58)); - rt_kprintf("0x66 EMMC_PRESET_SDR12 = %x \n", rt_sdhci_readw(host, 0x66)); - rt_kprintf("0x68 EMMC_PRESET_SDR25 = %x \n", rt_sdhci_readw(host, 0x68)); - rt_kprintf("0x6a EMMC_PRESET_SDR50 = %x \n", rt_sdhci_readw(host, 0x6a)); - rt_kprintf("0x6c EMMC_PRESET_SDR104 = %x \n", rt_sdhci_readw(host, 0x6c)); - rt_kprintf("0x6e EMMC_PRESET_DDR50 = %x \n", rt_sdhci_readw(host, 0x6e)); - rt_kprintf("0x78 EMMC_ADMA_ID = %x \n", rt_sdhci_readl(host, 0x78)); - rt_kprintf("0xfe EMMC_HOST_CNTRL_VERS = %x \n", rt_sdhci_readw(host, 0xfe)); - -} -static inline rt_bool_t sdhci_has_requests(struct rt_sdhci_host *host) +void rt_sdhci_read_reg_debug(struct rt_sdhci_host *host) +{ + rt_kprintf("0x00 addddddddddddd = %x\n", rt_sdhci_readl(host, 0x00)); + rt_kprintf("0x04 EMMC_BLOCKSIZE = %x\n", rt_sdhci_readw(host, 0x04)); + rt_kprintf("0x06 EMMC_BLOCKCOUNT = %x\n", rt_sdhci_readw(host, 0x06)); + rt_kprintf("0x08 RT_SDHCI_ARGUMENT = %x\n", rt_sdhci_readl(host, 0x08)); + rt_kprintf("0x0c EMMC_XFER_MODE = %x\n", rt_sdhci_readw(host, 0x0c)); + rt_kprintf("0x0e RT_SDHCI_COMMAND = %x\n", rt_sdhci_readw(host, 0x0e)); + rt_kprintf("0x24 RT_SDHCI_PRESENT_STATE = %x\n", rt_sdhci_readl(host, 0x24)); + rt_kprintf("0x28 RT_SDHCI_HOST_CONTROL = %x\n", rt_sdhci_readb(host, 0x28)); + rt_kprintf("0x29 RT_SDHCI_POWER_CONTROL = %x\n", rt_sdhci_readb(host, 0x29)); + rt_kprintf("0x2a EMMC_BGAP_CTRL = %x\n", rt_sdhci_readb(host, 0x2a)); + rt_kprintf("0x2c EMMC_CLK_CTRL = %x\n", rt_sdhci_readw(host, 0x2c)); + rt_kprintf("0x2e EMMC_TOUT_CTRL = %x\n", rt_sdhci_readb(host, 0x2e)); + rt_kprintf("0x2f EMMC_SW_RST = %x\n", rt_sdhci_readb(host, 0x2f)); + rt_kprintf("0x30 RT_SDHCI_INT_STATUS = %x\n", rt_sdhci_readw(host, 0x30)); + rt_kprintf("0x32 RT_SDHCI_ERR_INT_STATUS = %x\n", rt_sdhci_readw(host, 0x32)); + rt_kprintf("0x34 RT_SDHCI_INT_ENABLE = %x\n", rt_sdhci_readw(host, 0x34)); + rt_kprintf("0x36 EMMC ERROR INT STATEN = %x\n", rt_sdhci_readw(host, 0x36)); + rt_kprintf("0x38 EMMC NORMAL INT SIGNAL EN = %x\n", rt_sdhci_readw(host, 0x38)); + rt_kprintf("0x3a EMMC ERROR INT SIGNAL EN = %x\n", rt_sdhci_readw(host, 0x3a)); + rt_kprintf("0x3c EMMC_AUTO_CMD_STAT = %x\n", rt_sdhci_readw(host, 0x3c)); + rt_kprintf("0x3e EMMC_HOST_CTRL2 = %x\n", rt_sdhci_readw(host, 0x3e)); + rt_kprintf("0x40 EMMC_CAPABILITIES1 = %x\n", rt_sdhci_readl(host, 0x40)); + rt_kprintf("0x44 EMMC_CAPABILITIES2 = %x\n", rt_sdhci_readl(host, 0x44)); + rt_kprintf("0x52 EMMC_FORC_ERR_INT_STAT = %x\n", rt_sdhci_readw(host, 0x52)); + rt_kprintf("0x54 EMMC_ADMA_ERR_STAT = %x\n", rt_sdhci_readb(host, 0x54)); + rt_kprintf("0x58 EMMC_ADMA_SA = %x\n", rt_sdhci_readl(host, 0x58)); + rt_kprintf("0x66 EMMC_PRESET_SDR12 = %x\n", rt_sdhci_readw(host, 0x66)); + rt_kprintf("0x68 EMMC_PRESET_SDR25 = %x\n", rt_sdhci_readw(host, 0x68)); + rt_kprintf("0x6a EMMC_PRESET_SDR50 = %x\n", rt_sdhci_readw(host, 0x6a)); + rt_kprintf("0x6c EMMC_PRESET_SDR104 = %x\n", rt_sdhci_readw(host, 0x6c)); + rt_kprintf("0x6e EMMC_PRESET_DDR50 = %x\n", rt_sdhci_readw(host, 0x6e)); + rt_kprintf("0x78 EMMC_ADMA_ID = %x\n", rt_sdhci_readl(host, 0x78)); + rt_kprintf("0xfe EMMC_HOST_CNTRL_VERS = %x\n", rt_sdhci_readw(host, 0xfe)); +} + +rt_inline rt_bool_t sdhci_has_requests(struct rt_sdhci_host *host) { return host->cmd || host->data_cmd; } -static inline rt_bool_t sdhci_auto_cmd23(struct rt_sdhci_host *host, - struct rt_mmcsd_req *mrq) +rt_inline rt_bool_t sdhci_auto_cmd23(struct rt_sdhci_host *host, + struct rt_mmcsd_req *mrq) { return mrq->sbc && (host->flags & RT_SDHCI_AUTO_CMD23); } -static inline rt_bool_t sdhci_auto_cmd12(struct rt_sdhci_host *host, - struct rt_mmcsd_req *mrq) +rt_inline rt_bool_t sdhci_auto_cmd12(struct rt_sdhci_host *host, + struct rt_mmcsd_req *mrq) { return !mrq->sbc && (host->flags & RT_SDHCI_AUTO_CMD12) && !mrq->cap_cmd_during_tfr; } -static inline rt_bool_t sdhci_manual_cmd23(struct rt_sdhci_host *host, - struct rt_mmcsd_req *mrq) +rt_inline rt_bool_t sdhci_manual_cmd23(struct rt_sdhci_host *host, + struct rt_mmcsd_req *mrq) { return mrq->sbc && !(host->flags & RT_SDHCI_AUTO_CMD23); } -static inline rt_bool_t sdhci_data_line_cmd(struct rt_mmcsd_cmd *cmd) +rt_inline rt_bool_t sdhci_data_line_cmd(struct rt_mmcsd_cmd *cmd) { return cmd->data || cmd->flags & MMC_RSP_BUSY; } +static void sdhci_do_enable_v4_mode(struct rt_sdhci_host *host) +{ + rt_uint16_t ctrl2; + + ctrl2 = rt_sdhci_readw(host, RT_SDHCI_HOST_CONTROL2); + + if (ctrl2 & RT_SDHCI_CTRL_V4_MODE) + { + return; + } + + ctrl2 |= RT_SDHCI_CTRL_V4_MODE; + rt_sdhci_writew(host, ctrl2, RT_SDHCI_HOST_CONTROL2); +} + +void rt_sdhci_enable_v4_mode(struct rt_sdhci_host *host) +{ + host->v4_mode = RT_TRUE; + sdhci_do_enable_v4_mode(host); +} + void rt_sdhci_data_irq_timeout(struct rt_sdhci_host *host, rt_bool_t enable) { if (enable) + { host->ier |= RT_SDHCI_INT_DATA_TIMEOUT; + } else + { host->ier &= ~RT_SDHCI_INT_DATA_TIMEOUT; + } + rt_sdhci_writel(host, host->ier, RT_SDHCI_INT_ENABLE); rt_sdhci_writel(host, host->ier, RT_SDHCI_SIGNAL_ENABLE); } @@ -109,17 +137,30 @@ void rt_sdhci_set_uhs(struct rt_sdhci_host *host, unsigned timing) ctrl_2 &= ~RT_SDHCI_CTRL_UHS_MASK; if ((timing == MMC_TIMING_MMC_HS200) || (timing == MMC_TIMING_UHS_SDR104)) + { ctrl_2 |= RT_SDHCI_CTRL_UHS_SDR104; + } else if (timing == MMC_TIMING_UHS_SDR12) + { ctrl_2 |= RT_SDHCI_CTRL_UHS_SDR12; + } else if (timing == MMC_TIMING_UHS_SDR25) + { ctrl_2 |= RT_SDHCI_CTRL_UHS_SDR25; + } else if (timing == MMC_TIMING_UHS_SDR50) + { ctrl_2 |= RT_SDHCI_CTRL_UHS_SDR50; + } else if ((timing == MMC_TIMING_UHS_DDR50) || (timing == MMC_TIMING_MMC_DDR52)) + { ctrl_2 |= RT_SDHCI_CTRL_UHS_DDR50; + } else if (timing == MMC_TIMING_MMC_HS400) + { ctrl_2 |= RT_SDHCI_CTRL_HS400; /* Non-standard */ + } + rt_sdhci_writew(host, ctrl_2, RT_SDHCI_HOST_CONTROL2); } @@ -136,59 +177,66 @@ void rt_sdhci_set_bus_width(struct rt_sdhci_host *host, int width) else { if (host->mmc->caps & MMC_CAP_8_BIT_DATA) + { ctrl &= ~RT_SDHCI_CTRL_8BITBUS; + } if (width == MMC_BUS_WIDTH_4) + { ctrl |= RT_SDHCI_CTRL_4BITBUS; + } else + { ctrl &= ~RT_SDHCI_CTRL_4BITBUS; + } } + rt_sdhci_writeb(host, ctrl, RT_SDHCI_HOST_CONTROL); } -static inline rt_bool_t sdhci_can_64bit_dma(struct rt_sdhci_host *host) +rt_inline rt_bool_t sdhci_can_64bit_dma(struct rt_sdhci_host *host) { if (host->version >= RT_SDHCI_SPEC_410 && host->v4_mode) + { return host->caps & RT_SDHCI_CAN_64BIT_V4; + } return host->caps & RT_SDHCI_CAN_64BIT; } -static void sdhci_do_enable_v4_mode(struct rt_sdhci_host *host) -{ - rt_uint16_t ctrl2; - - ctrl2 = rt_sdhci_readw(host, RT_SDHCI_HOST_CONTROL2); - if (ctrl2 & RT_SDHCI_CTRL_V4_MODE) - return; - - ctrl2 |= RT_SDHCI_CTRL_V4_MODE; - rt_sdhci_writew(host, ctrl2, RT_SDHCI_HOST_CONTROL2); -} - void rt_sdhci_cleanup_host(struct rt_sdhci_host *host) { - return; + struct rt_mmc_host *mmc = host->mmc; + + if (host->sdhci_core_to_disable_vqmmc) + { + rt_regulator_disable(mmc->rthost.supply.vqmmc); + } } static void sdhci_set_default_irqs(struct rt_sdhci_host *host) { - host->ier = RT_SDHCI_INT_BUS_POWER | RT_SDHCI_INT_DATA_END_BIT | RT_SDHCI_INT_DATA_CRC | RT_SDHCI_INT_DATA_TIMEOUT | RT_SDHCI_INT_INDEX | RT_SDHCI_INT_END_BIT | RT_SDHCI_INT_CRC | RT_SDHCI_INT_TIMEOUT | RT_SDHCI_INT_DATA_END | RT_SDHCI_INT_RESPONSE; + host->ier = RT_SDHCI_INT_BUS_POWER | RT_SDHCI_INT_DATA_END_BIT | + RT_SDHCI_INT_DATA_CRC | RT_SDHCI_INT_DATA_TIMEOUT | + RT_SDHCI_INT_INDEX | RT_SDHCI_INT_END_BIT | RT_SDHCI_INT_CRC | + RT_SDHCI_INT_TIMEOUT | RT_SDHCI_INT_DATA_END | RT_SDHCI_INT_RESPONSE; - if (host->tuning_mode == RT_SDHCI_TUNING_MODE_2 || host->tuning_mode == RT_SDHCI_TUNING_MODE_3) + if (host->tuning_mode == RT_SDHCI_TUNING_MODE_2 || + host->tuning_mode == RT_SDHCI_TUNING_MODE_3) + { host->ier |= RT_SDHCI_INT_RETUNE; + } rt_sdhci_writel(host, host->ier, RT_SDHCI_INT_ENABLE); rt_sdhci_writel(host, host->ier, RT_SDHCI_SIGNAL_ENABLE); } - -static inline void sdhci_auto_cmd_select(struct rt_sdhci_host *host, - struct rt_mmcsd_cmd *cmd, - rt_uint16_t *mode) +rt_inline void sdhci_auto_cmd_select(struct rt_sdhci_host *host, + struct rt_mmcsd_cmd *cmd, + rt_uint16_t *mode) { - rt_bool_t use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) && (cmd->cmd_code != SD_IO_RW_EXTENDED); - rt_bool_t use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq); rt_uint16_t ctrl2; + rt_bool_t use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq); + rt_bool_t use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) && (cmd->cmd_code != SD_IO_RW_EXTENDED); if (host->version >= RT_SDHCI_SPEC_410 && host->v4_mode && (use_cmd12 || use_cmd23)) { @@ -196,22 +244,30 @@ static inline void sdhci_auto_cmd_select(struct rt_sdhci_host *host, ctrl2 = rt_sdhci_readw(host, RT_SDHCI_HOST_CONTROL2); if (use_cmd23) + { ctrl2 |= RT_SDHCI_CMD23_ENABLE; + } else + { ctrl2 &= ~RT_SDHCI_CMD23_ENABLE; + } + rt_sdhci_writew(host, ctrl2, RT_SDHCI_HOST_CONTROL2); return; } if (use_cmd12) + { *mode |= RT_SDHCI_TRNS_AUTO_CMD12; + } else if (use_cmd23) + { *mode |= RT_SDHCI_TRNS_AUTO_CMD23; + } } - -static rt_bool_t sdhci_present_error(struct rt_sdhci_host *host, +static rt_bool_t sdhci_present_error(struct rt_sdhci_host *host, struct rt_mmcsd_cmd *cmd, rt_bool_t present) { if (!present || host->flags & RT_SDHCI_DEVICE_DEAD) @@ -257,6 +313,7 @@ static rt_uint16_t sdhci_get_preset_value(struct rt_sdhci_host *host) preset = rt_sdhci_readw(host, RT_SDHCI_PRESET_FOR_SDR12); break; } + return preset; } @@ -264,8 +321,11 @@ static void sdhci_set_card_detection(struct rt_sdhci_host *host, rt_bool_t enabl { rt_uint32_t present; - if ((host->quirks & RT_SDHCI_QUIRK_BROKEN_CARD_DETECTION) || !mmc_card_is_removable(host->mmc)) + if ((host->quirks & RT_SDHCI_QUIRK_BROKEN_CARD_DETECTION) || + !mmc_card_is_removable(host->mmc)) + { return; + } if (enable) { @@ -302,7 +362,10 @@ enum sdhci_reset_reason static rt_bool_t sdhci_needs_reset(struct rt_sdhci_host *host, struct rt_mmcsd_req *mrq) { - return (!(host->flags & RT_SDHCI_DEVICE_DEAD) && ((mrq->cmd && mrq->cmd->err) || (mrq->sbc && mrq->sbc->err) || (mrq->data && mrq->data->stop && mrq->data->stop->err) || (host->quirks & RT_SDHCI_QUIRK_RESET_AFTER_REQUEST))); + return (!(host->flags & RT_SDHCI_DEVICE_DEAD) && + ((mrq->cmd && mrq->cmd->err) || (mrq->sbc && mrq->sbc->err) || + (mrq->data && mrq->data->stop && mrq->data->stop->err) || + (host->quirks & RT_SDHCI_QUIRK_RESET_AFTER_REQUEST))); } static rt_bool_t sdhci_do_reset(struct rt_sdhci_host *host, rt_uint8_t mask) @@ -312,16 +375,21 @@ static rt_bool_t sdhci_do_reset(struct rt_sdhci_host *host, rt_uint8_t mask) struct rt_mmc_host *mmc = host->mmc; if (!mmc->ops->get_cd(mmc)) + { return RT_FALSE; + } } + if (host->ops->reset) { host->ops->reset(host, mask); } + return RT_TRUE; } -static void sdhci_reset_for_reason(struct rt_sdhci_host *host, enum sdhci_reset_reason reason) +static void sdhci_reset_for_reason(struct rt_sdhci_host *host, + enum sdhci_reset_reason reason) { if (host->quirks2 & RT_SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER) { @@ -356,55 +424,70 @@ static void sdhci_reset_for_all(struct rt_sdhci_host *host) if (host->flags & (RT_SDHCI_USE_SDMA)) { if (host->ops->enable_dma) + { host->ops->enable_dma(host); + } } host->preset_enabled = RT_FALSE; } } - static void sdhci_runtime_pm_bus_on(struct rt_sdhci_host *host) { if (host->bus_on) + { return; + } + host->bus_on = RT_TRUE; } static void sdhci_runtime_pm_bus_off(struct rt_sdhci_host *host) { if (!host->bus_on) + { return; + } + host->bus_on = RT_FALSE; } void rt_sdhci_reset(struct rt_sdhci_host *host, rt_uint8_t mask) { - ssize_t timeout; + rt_ssize_t timeout; rt_sdhci_writeb(host, mask, RT_SDHCI_SOFTWARE_RESET); if (mask & RT_SDHCI_RESET_ALL) { host->clock = 0; + if (host->quirks2 & RT_SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) + { sdhci_runtime_pm_bus_off(host); + } } timeout = rt_tick_from_millisecond(150); - while (1) + + while (RT_TRUE) { timeout = timeout - rt_tick_get(); - if (!(rt_sdhci_readb(host, RT_SDHCI_SOFTWARE_RESET) & mask)) + { break; + } + if (timeout < 0) { rt_kprintf("%s: Reset 0x%x never completed.\n", mmc_hostname(host->mmc), (int)mask); - rt_read_reg_debug(host); + rt_sdhci_read_reg_debug(host); + return; } + rt_hw_us_delay(10); } } @@ -419,37 +502,47 @@ static rt_ubase_t sdhci_sdma_address(struct rt_sdhci_host *host) static void sdhci_set_adma_addr(struct rt_sdhci_host *host, rt_uint32_t addr) { - rt_sdhci_writel(host, lower_32_bits(addr), RT_SDHCI_ADMA_ADDRESS); + rt_sdhci_writel(host, rt_lower_32_bits(addr), RT_SDHCI_ADMA_ADDRESS); + if (host->flags & RT_SDHCI_USE_64_BIT_DMA) - rt_sdhci_writel(host, upper_32_bits(addr), RT_SDHCI_ADMA_ADDRESS_HI); + { + rt_sdhci_writel(host, rt_upper_32_bits(addr), RT_SDHCI_ADMA_ADDRESS_HI); + } } static void sdhci_set_sdma_addr(struct rt_sdhci_host *host, rt_uint32_t addr) { if (host->v4_mode) + { sdhci_set_adma_addr(host, addr); + } else + { rt_sdhci_writel(host, addr, RT_SDHCI_DMA_ADDRESS); + } } static void sdhci_config_dma(struct rt_sdhci_host *host) { - rt_uint8_t ctrl; + rt_uint8_t ctrl; rt_uint16_t ctrl2; if (host->version < RT_SDHCI_SPEC_200) + { return; + } ctrl = rt_sdhci_readb(host, RT_SDHCI_HOST_CONTROL); ctrl &= ~RT_SDHCI_CTRL_DMA_MASK; if (!(host->flags & RT_SDHCI_REQ_USE_DMA)) - goto out; + { + goto _out; + } /* Note if DMA Select is zero then SDMA is selected */ if (host->flags & RT_SDHCI_USE_64_BIT_DMA) { - if (host->v4_mode) { ctrl2 = rt_sdhci_readw(host, RT_SDHCI_HOST_CONTROL2); @@ -458,40 +551,60 @@ static void sdhci_config_dma(struct rt_sdhci_host *host) } } -out: +_out: rt_sdhci_writeb(host, ctrl, RT_SDHCI_HOST_CONTROL); } -static inline void sdhci_set_block_info(struct rt_sdhci_host *host, - struct rt_mmcsd_data *data) +rt_inline void sdhci_set_block_info(struct rt_sdhci_host *host, + struct rt_mmcsd_data *data) { int boundary; - size_t total_size = data->blks * data->blksize; + rt_size_t total_size = data->blks * data->blksize; if (total_size <= 512) + { boundary = 0; /* 4k bytes*/ + } else if (total_size <= 1024) + { boundary = 1; /* 8 KB*/ + } else if (total_size <= 2048) + { boundary = 2; /* 16 KB*/ + } else if (total_size <= 4096) + { boundary = 3; /* 32 KB*/ + } else if (total_size <= 8192) + { boundary = 4; /* 64 KB*/ + } else if (total_size <= 16384) + { boundary = 5; /* 128 KB*/ + } else if (total_size <= 32768) + { boundary = 6; /* 256 KB*/ + } else + { boundary = 7; /* 512 KB*/ - rt_sdhci_writew(host, - RT_SDHCI_MAKE_BLKSZ(boundary, data->blksize), - RT_SDHCI_BLOCK_SIZE); + } + + rt_sdhci_writew(host, RT_SDHCI_MAKE_BLKSZ(boundary, data->blksize), + RT_SDHCI_BLOCK_SIZE); - if (host->version >= RT_SDHCI_SPEC_410 && host->v4_mode && (host->quirks2 & RT_SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) + if (host->version >= RT_SDHCI_SPEC_410 && + host->v4_mode && (host->quirks2 & RT_SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { if (rt_sdhci_readw(host, RT_SDHCI_BLOCK_COUNT)) + { rt_sdhci_writew(host, 0, RT_SDHCI_BLOCK_COUNT); + } + rt_sdhci_writew(host, data->blks, RT_SDHCI_32BIT_BLK_CNT); } else @@ -502,18 +615,27 @@ static inline void sdhci_set_block_info(struct rt_sdhci_host *host, static void sdhci_set_transfer_irqs(struct rt_sdhci_host *host) { - rt_uint32_t pio_irqs = RT_SDHCI_INT_DATA_AVAIL | RT_SDHCI_INT_SPACE_AVAIL; rt_uint32_t dma_irqs = RT_SDHCI_INT_DMA_END; + rt_uint32_t pio_irqs = RT_SDHCI_INT_DATA_AVAIL | RT_SDHCI_INT_SPACE_AVAIL; if (host->flags & RT_SDHCI_REQ_USE_DMA) + { host->ier = (host->ier & ~pio_irqs) | dma_irqs; + } else + { host->ier = (host->ier & ~dma_irqs) | pio_irqs; + } if (host->flags & (RT_SDHCI_AUTO_CMD23 | RT_SDHCI_AUTO_CMD12)) + { host->ier |= RT_SDHCI_INT_AUTO_CMD_ERR; + } else + { host->ier &= ~RT_SDHCI_INT_AUTO_CMD_ERR; + } + rt_sdhci_writel(host, host->ier, RT_SDHCI_INT_ENABLE); rt_sdhci_writel(host, host->ier, RT_SDHCI_SIGNAL_ENABLE); } @@ -522,9 +644,9 @@ static void sdhci_prepare_data(struct rt_sdhci_host *host, struct rt_mmcsd_cmd * { struct rt_mmcsd_data *data = cmd->data; - LOG_D(data->blksize * data->blks > 524288); - LOG_D(data->blksize > host->mmc->max_blk_size); - LOG_D(data->blks > 65535); + RT_ASSERT(!(data->blksize * data->blks > 524288)); + RT_ASSERT(!(data->blksize > host->mmc->max_blk_size)); + RT_ASSERT(!(data->blks > 65535)); host->data = data; host->data_early = 0; @@ -538,10 +660,15 @@ static void sdhci_prepare_data(struct rt_sdhci_host *host, struct rt_mmcsd_cmd * length_mask = 0; offset_mask = 0; + if (host->quirks & RT_SDHCI_QUIRK_32BIT_DMA_SIZE) + { length_mask = 3; + } if (host->quirks & RT_SDHCI_QUIRK_32BIT_DMA_ADDR) + { offset_mask = 3; + } if ((data->blks * data->blksize) & length_mask) { @@ -558,9 +685,13 @@ static void sdhci_prepare_data(struct rt_sdhci_host *host, struct rt_mmcsd_cmd * if (host->flags & RT_SDHCI_REQ_USE_DMA) { if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) + { rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, data->buf, data->blks * data->blksize); + } else + { rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, data->buf, data->blks * data->blksize); + } sdhci_set_sdma_addr(host, sdhci_sdma_address(host)); } @@ -579,16 +710,16 @@ static void sdhci_set_mrq_done(struct rt_sdhci_host *host, struct rt_mmcsd_req * { int i; - for (i = 0; i < RT_SDHCI_MAX_MRQS; i++) + for (i = 0; i < RT_SDHCI_MAX_MRQS; ++i) { if (host->mrqs_done[i] == mrq) { - LOG_D(1); + RT_ASSERT(0); return; } } - for (i = 0; i < RT_SDHCI_MAX_MRQS; i++) + for (i = 0; i < RT_SDHCI_MAX_MRQS; ++i) { if (!host->mrqs_done[i]) { @@ -597,97 +728,117 @@ static void sdhci_set_mrq_done(struct rt_sdhci_host *host, struct rt_mmcsd_req * } } - LOG_D(i >= RT_SDHCI_MAX_MRQS); + RT_ASSERT(!(i >= RT_SDHCI_MAX_MRQS)); } -static inline rt_bool_t sdhci_defer_done(struct rt_sdhci_host *host, - struct rt_mmcsd_req *mrq) +rt_inline rt_bool_t sdhci_defer_done(struct rt_sdhci_host *host, + struct rt_mmcsd_req *mrq) { struct rt_mmcsd_data *data = mrq->data; - return host->pending_reset || host->always_defer_done || ((host->flags & RT_SDHCI_REQ_USE_DMA) && data && data->host_cookie == COOKIE_MAPPED); + return host->pending_reset || host->always_defer_done || + ((host->flags & RT_SDHCI_REQ_USE_DMA) && data && + data->host_cookie == RT_SDHCI_COOKIE_MAPPED); } /********************************************************* */ /* pio */ /********************************************************* */ - -static void rt_sdhci_read_block_pio(struct rt_sdhci_host *host,void **buf) +static void rt_sdhci_read_block_pio(struct rt_sdhci_host *host, void **buf) { + rt_size_t len; rt_uint32_t scratch; - size_t len; - rt_uint32_t blksize = host->data->blksize; + while (blksize) { - len = min(4U, blksize); + len = rt_min_t(rt_size_t, 4U, blksize); scratch = rt_sdhci_readl(host, RT_SDHCI_BUFFER); rt_memcpy(*buf, &scratch, len); - *buf += len; + *buf += len; blksize -= len; } } static void rt_sdhci_write_block_pio(struct rt_sdhci_host *host,void **buf) { - size_t blksize, len; rt_uint32_t scratch; - LOG_D("PIO writing\n"); + rt_size_t blksize, len; - blksize = host->data->blksize; - scratch = 0; + LOG_D("PIO writing"); + + blksize = host->data->blksize; + scratch = 0; while (blksize) { - len = min(4U, blksize); + len = rt_min_t(rt_size_t, 4U, blksize); rt_memcpy(&scratch, *buf, len); - *buf += len; + + *buf += len; blksize -= len; + rt_sdhci_writel(host, scratch, RT_SDHCI_BUFFER); } } static void sdhci_transfer_pio(struct rt_sdhci_host *host) { + void *buf; rt_uint32_t mask; if (host->blocks == 0) + { return; + } if (host->data->flags & DATA_DIR_READ) + { mask = RT_SDHCI_DATA_AVAILABLE; + } else + { mask = RT_SDHCI_SPACE_AVAILABLE; + } - if ((host->quirks & RT_SDHCI_QUIRK_BROKEN_SMALL_PIO) && (host->data->blks == 1)) + if ((host->quirks & RT_SDHCI_QUIRK_BROKEN_SMALL_PIO) && host->data->blks == 1) { mask = ~0; } - void *buf = (void *)host->data->buf; + + buf = (void *)host->data->buf; + while (rt_sdhci_readl(host, RT_SDHCI_PRESENT_STATE) & mask) { if (host->quirks & RT_SDHCI_QUIRK_PIO_NEEDS_DELAY) + { rt_hw_us_delay(100); + } if (host->data->flags & DATA_DIR_READ) + { rt_sdhci_read_block_pio(host,&buf); + } else + { rt_sdhci_write_block_pio(host,&buf); + } + + --host->data->blks; - host->data->blks--; if (host->data->blks == 0) + { break; + } } } /********************************************************* */ /* config */ /********************************************************* */ - - static rt_bool_t sdhci_timing_has_preset(unsigned char timing) { switch (timing) @@ -700,40 +851,52 @@ static rt_bool_t sdhci_timing_has_preset(unsigned char timing) case MMC_TIMING_MMC_DDR52: return RT_TRUE; } + return RT_FALSE; } static rt_bool_t sdhci_preset_needed(struct rt_sdhci_host *host, unsigned char timing) { - return !(host->quirks2 & RT_SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && sdhci_timing_has_preset(timing); + return !(host->quirks2 & RT_SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && + sdhci_timing_has_preset(timing); } static rt_bool_t sdhci_presetable_values_change(struct rt_sdhci_host *host, struct rt_mmcsd_io_cfg *ios) { - return !host->preset_enabled && (sdhci_preset_needed(host, ios->timing) || host->drv_type != ios->drv_type); + return !host->preset_enabled && + (sdhci_preset_needed(host, ios->timing) || host->drv_type != ios->drv_type); } - static void sdhci_preset_value_enable(struct rt_sdhci_host *host, rt_bool_t enable) { if (host->version < RT_SDHCI_SPEC_300) + { return; + } if (host->preset_enabled != enable) { rt_uint16_t ctrl = rt_sdhci_readw(host, RT_SDHCI_HOST_CONTROL2); if (enable) + { ctrl |= RT_SDHCI_CTRL_PRESET_VAL_ENABLE; + } else + { ctrl &= ~RT_SDHCI_CTRL_PRESET_VAL_ENABLE; + } rt_sdhci_writew(host, ctrl, RT_SDHCI_HOST_CONTROL2); if (enable) + { host->flags |= RT_SDHCI_PV_ENABLED; + } else + { host->flags &= ~RT_SDHCI_PV_ENABLED; + } host->preset_enabled = enable; } @@ -744,16 +907,33 @@ static void sdhci_set_power_reg(struct rt_sdhci_host *host, unsigned char mode, { struct rt_mmc_host *mmc = host->mmc; - mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); + sdio_regulator_set_ocr(&mmc->rthost, mmc->rthost.supply.vmmc, vdd); if (mode != MMC_POWER_OFF) + { rt_sdhci_writeb(host, RT_SDHCI_POWER_ON, RT_SDHCI_POWER_CONTROL); + } else + { rt_sdhci_writeb(host, 0, RT_SDHCI_POWER_CONTROL); + } +} + +void rt_sdhci_set_power_and_bus_voltage(struct rt_sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ + if (!rt_is_err_or_null(host->mmc->rthost.supply.vmmc)) + { + struct rt_mmc_host *mmc = host->mmc; + + sdio_regulator_set_ocr(&mmc->rthost, mmc->rthost.supply.vmmc, vdd); + } + + rt_sdhci_set_power_with_noreg(host, mode, vdd); } void rt_sdhci_set_power_with_noreg(struct rt_sdhci_host *host, unsigned char mode, - unsigned short vdd) + unsigned short vdd) { rt_uint8_t pwr = 0; @@ -781,55 +961,74 @@ void rt_sdhci_set_power_with_noreg(struct rt_sdhci_host *host, unsigned char mod } if (host->pwr == pwr) + { return; + } host->pwr = pwr; if (pwr == 0) { rt_sdhci_writeb(host, 0, RT_SDHCI_POWER_CONTROL); + if (host->quirks2 & RT_SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) + { sdhci_runtime_pm_bus_off(host); + } } else { if (!(host->quirks & RT_SDHCI_QUIRK_SINGLE_POWER_WRITE)) + { rt_sdhci_writeb(host, 0, RT_SDHCI_POWER_CONTROL); + } if (host->quirks & RT_SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) + { rt_sdhci_writeb(host, pwr, RT_SDHCI_POWER_CONTROL); + } pwr |= RT_SDHCI_POWER_ON; rt_sdhci_writeb(host, pwr, RT_SDHCI_POWER_CONTROL); if (host->quirks2 & RT_SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) + { sdhci_runtime_pm_bus_on(host); + } if (host->quirks & RT_SDHCI_QUIRK_DELAY_AFTER_POWER) + { rt_thread_mdelay(10); + } } } void rt_sdhci_set_power(struct rt_sdhci_host *host, unsigned char mode, - unsigned short vdd) + unsigned short vdd) { - if (!host->mmc->supply.vmmc) + if (!host->mmc->rthost.supply.vmmc) + { rt_sdhci_set_power_with_noreg(host, mode, vdd); + } else + { sdhci_set_power_reg(host, mode, vdd); + } } -int rt_sdhci_start_signal_voltage_switch(struct rt_mmc_host *mmc, - struct rt_mmcsd_io_cfg *ios) +int rt_sdhci_start_signal_voltage_switch(struct rt_mmc_host *mmc, + struct rt_mmcsd_io_cfg *ios) { - struct rt_sdhci_host *host = mmc_priv(mmc); - rt_uint16_t ctrl; - int ret; + int ret; + rt_uint16_t ctrl; + struct rt_sdhci_host *host = rt_mmc_priv(mmc); if (host->version < RT_SDHCI_SPEC_300) + { return 0; + } ctrl = rt_sdhci_readw(host, RT_SDHCI_HOST_CONTROL2); @@ -837,33 +1036,44 @@ int rt_sdhci_start_signal_voltage_switch(struct rt_mmc_host *mmc, { case MMC_SIGNAL_VOLTAGE_330: if (!(host->flags & RT_SDHCI_SIGNALING_330)) + { return -EINVAL; + } + ctrl &= ~RT_SDHCI_CTRL_VDD_180; rt_sdhci_writew(host, ctrl, RT_SDHCI_HOST_CONTROL2); - if (!mmc->supply.vqmmc) + if (mmc->rthost.supply.vqmmc) { - ret = rt_mmc_regulator_set_vqmmc(mmc, ios); + ret = sdio_regulator_set_vqmmc(&mmc->rthost, ios); if (ret < 0) { return -EIO; } } + rt_thread_mdelay(5); ctrl = rt_sdhci_readw(host, RT_SDHCI_HOST_CONTROL2); if (!(ctrl & RT_SDHCI_CTRL_VDD_180)) + { return 0; + } + return -EAGAIN; + case MMC_SIGNAL_VOLTAGE_180: if (!(host->flags & RT_SDHCI_SIGNALING_180)) + { return -EINVAL; - if (!mmc->supply.vqmmc) + } + + if (mmc->rthost.supply.vqmmc) { - ret = rt_mmc_regulator_set_vqmmc(mmc, ios); + ret = sdio_regulator_set_vqmmc(&mmc->rthost, ios); if (ret < 0) { - LOG_D("%s: Switching to 1.8V signalling voltage failed\n", + LOG_D("%s: Switching to 1.8V signalling voltage failed", mmc_hostname(mmc)); return -EIO; } @@ -873,71 +1083,95 @@ int rt_sdhci_start_signal_voltage_switch(struct rt_mmc_host *mmc, rt_sdhci_writew(host, ctrl, RT_SDHCI_HOST_CONTROL2); if (host->ops->voltage_switch) + { host->ops->voltage_switch(host); + } ctrl = rt_sdhci_readw(host, RT_SDHCI_HOST_CONTROL2); if (ctrl & RT_SDHCI_CTRL_VDD_180) + { return 0; + } - LOG_D("%s: 1.8V regulator output did not become stable\n", + LOG_D("%s: 1.8V regulator output did not become stable", mmc_hostname(mmc)); return -EAGAIN; + case MMC_SIGNAL_VOLTAGE_120: if (!(host->flags & RT_SDHCI_SIGNALING_120)) + { return -EINVAL; - if (!mmc->supply.vqmmc) + } + + if (mmc->rthost.supply.vqmmc) { - ret = rt_mmc_regulator_set_vqmmc(mmc, ios); + ret = sdio_regulator_set_vqmmc(&mmc->rthost, ios); if (ret < 0) { - LOG_D("%s: Switching to 1.2V signalling voltage failed\n", + LOG_D("%s: Switching to 1.2V signalling voltage failed", mmc_hostname(mmc)); return -EIO; } } + return 0; + default: return 0; } } - static int sdhci_get_cd(struct rt_mmc_host *mmc) { - struct rt_sdhci_host *host = mmc_priv(mmc); - int gpio_cd = rt_mmc_gpio_get_cd(mmc); + int gpio_cd = rt_mmc_gpio_get_cd(mmc); + struct rt_sdhci_host *host = rt_mmc_priv(mmc); if (host->flags & RT_SDHCI_DEVICE_DEAD) + { return 0; + } if (!mmc_card_is_removable(mmc)) + { return 1; + } if (gpio_cd >= 0) + { return !!gpio_cd; + } if (host->quirks & RT_SDHCI_QUIRK_BROKEN_CARD_DETECTION) + { return 1; + } return !!(rt_sdhci_readl(host, RT_SDHCI_PRESENT_STATE) & RT_SDHCI_CARD_PRESENT); } static int sdhci_check_ro(struct rt_sdhci_host *host) { - int is_readonly; + int is_readonly; rt_base_t flags; flags = rt_spin_lock_irqsave(&host->lock); if (host->flags & RT_SDHCI_DEVICE_DEAD) + { is_readonly = 0; + } else if (host->ops->get_ro) + { is_readonly = host->ops->get_ro(host); + } else if (rt_mmc_can_gpio_ro(host->mmc)) + { is_readonly = rt_mmc_gpio_get_ro(host->mmc); + } else - is_readonly = !(rt_sdhci_readl(host, RT_SDHCI_PRESENT_STATE) - & RT_SDHCI_WRITE_PROTECT); + { + is_readonly = !(rt_sdhci_readl(host, RT_SDHCI_PRESENT_STATE) & RT_SDHCI_WRITE_PROTECT); + } rt_spin_unlock_irqrestore(&host->lock, flags); @@ -947,22 +1181,29 @@ static int sdhci_check_ro(struct rt_sdhci_host *host) #define SAMPLE_COUNT 5 static int rt_sdhci_ro_get(struct rt_mmc_host *mmc) { - struct rt_sdhci_host *host = mmc_priv(mmc); - int i, ro_count; + int ro_count; + struct rt_sdhci_host *host = rt_mmc_priv(mmc); if (!(host->quirks & RT_SDHCI_QUIRK_UNSTABLE_RO_DETECT)) + { return sdhci_check_ro(host); + } ro_count = 0; - for (i = 0; i < SAMPLE_COUNT; i++) + + for (int i = 0; i < SAMPLE_COUNT; i++) { if (sdhci_check_ro(host)) { if (++ro_count > SAMPLE_COUNT / 2) + { return 1; + } } + rt_thread_mdelay(30); } + return 0; } @@ -971,9 +1212,13 @@ static void rt_sdhci_enable_io_irq_nolock(struct rt_sdhci_host *host, int enable if (!(host->flags & RT_SDHCI_DEVICE_DEAD)) { if (enable) + { host->ier |= RT_SDHCI_INT_CARD_INT; + } else + { host->ier &= ~RT_SDHCI_INT_CARD_INT; + } rt_sdhci_writel(host, host->ier, RT_SDHCI_INT_ENABLE); rt_sdhci_writel(host, host->ier, RT_SDHCI_SIGNAL_ENABLE); @@ -982,9 +1227,11 @@ static void rt_sdhci_enable_io_irq_nolock(struct rt_sdhci_host *host, int enable static void sdhci_ack_sdio_irq(struct rt_mmc_host *mmc) { - rt_base_t flags; - struct rt_sdhci_host *host = mmc_priv(mmc); - flags = rt_spin_lock_irqsave(&host->lock); + rt_base_t flags; + struct rt_sdhci_host *host = rt_mmc_priv(mmc); + + flags = rt_spin_lock_irqsave(&host->lock); + rt_sdhci_enable_io_irq_nolock(host, RT_TRUE); rt_spin_unlock_irqrestore(&host->lock, flags); } @@ -992,13 +1239,17 @@ static void sdhci_ack_sdio_irq(struct rt_mmc_host *mmc) static void sdhci_del_timer(struct rt_sdhci_host *host, struct rt_mmcsd_req *mrq) { if (sdhci_data_line_cmd(mrq->cmd)) + { rt_timer_stop(&host->data_timer); + } else + { rt_timer_stop(&host->timer); + } } -static unsigned int sdhci_target_timeout(struct rt_sdhci_host *host, - struct rt_mmcsd_cmd *cmd, +static unsigned int sdhci_target_timeout(struct rt_sdhci_host *host, + struct rt_mmcsd_cmd *cmd, struct rt_mmcsd_data *data) { unsigned int target_timeout; @@ -1009,14 +1260,17 @@ static unsigned int sdhci_target_timeout(struct rt_sdhci_host *host, } else { - target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000); + target_timeout = RT_DIV_ROUND_UP(data->timeout_ns, 1000); + if (host->clock && data->timeout_clks) { - rt_uint32_t val; + rt_uint32_t val = 1000000ULL * data->timeout_clks; - val = 1000000ULL * data->timeout_clks; - if (do_div(val, host->clock)) + if (rt_do_div(val, host->clock)) + { target_timeout++; + } + target_timeout += val; } } @@ -1024,24 +1278,31 @@ static unsigned int sdhci_target_timeout(struct rt_sdhci_host *host, return target_timeout; } -static rt_uint8_t sdhci_calc_timeout(struct rt_sdhci_host *host, struct rt_mmcsd_cmd *cmd, +static rt_uint8_t sdhci_calc_timeout(struct rt_sdhci_host *host, + struct rt_mmcsd_cmd *cmd, rt_bool_t *too_big) { - rt_uint8_t count; + rt_uint8_t count; struct rt_mmcsd_data *data; - unsigned target_timeout, current_timeout; + unsigned target_timeout, current_timeout; *too_big = RT_FALSE; if (host->quirks & RT_SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) + { return host->max_timeout_count; + } - if (cmd == NULL) + if (!cmd) + { return host->max_timeout_count; + } data = cmd->data; if (!data && !cmd->busy_timeout) + { return host->max_timeout_count; + } target_timeout = sdhci_target_timeout(host, cmd, data); @@ -1049,15 +1310,20 @@ static rt_uint8_t sdhci_calc_timeout(struct rt_sdhci_host *host, struct rt_mmcsd current_timeout = (1 << 13) * 1000 / host->timeout_clk; while (current_timeout < target_timeout) { - count++; + ++count; current_timeout <<= 1; + if (count > host->max_timeout_count) { if (!(host->quirks2 & RT_SDHCI_QUIRK2_DISABLE_HW_TIMEOUT)) - LOG_D("Too large timeout 0x%x requested for CMD%d!\n", + { + LOG_D("Too large timeout 0x%x requested for CMD%d!", count, cmd->cmd_code); + } + count = host->max_timeout_count; *too_big = RT_TRUE; + break; } } @@ -1065,18 +1331,17 @@ static rt_uint8_t sdhci_calc_timeout(struct rt_sdhci_host *host, struct rt_mmcsd return count; } -static void sdhci_calc_sw_timeout(struct rt_sdhci_host *host, +static void sdhci_calc_sw_timeout(struct rt_sdhci_host *host, struct rt_mmcsd_cmd *cmd) { - struct rt_mmcsd_data *data = cmd->data; - struct rt_mmc_host *mmc = host->mmc; - struct rt_mmcsd_io_cfg *ios = &mmc->ios; - unsigned char bus_width = 1 << ios->bus_width; - unsigned int blksz; - unsigned int freq; - rt_uint64_t target_timeout; - rt_uint64_t transfer_time; + unsigned char bus_width; + unsigned int blksz, freq; + rt_uint64_t target_timeout, transfer_time; + struct rt_mmc_host *mmc = host->mmc; + struct rt_mmcsd_data *data = cmd->data; + struct rt_mmcsd_io_cfg *ios = &mmc->ios; + bus_width = 1 << ios->bus_width; target_timeout = sdhci_target_timeout(host, cmd, data); target_timeout *= 1000L; @@ -1085,7 +1350,7 @@ static void sdhci_calc_sw_timeout(struct rt_sdhci_host *host, blksz = data->blksize; freq = mmc->actual_clock ?: host->clock; transfer_time = (rt_uint64_t)blksz * 1000000000L * (8 / bus_width); - do_div(transfer_time, freq); + rt_do_div(transfer_time, freq); transfer_time = transfer_time * 2; host->data_timeout = data->blks * target_timeout + transfer_time; } @@ -1095,14 +1360,15 @@ static void sdhci_calc_sw_timeout(struct rt_sdhci_host *host, } if (host->data_timeout) + { host->data_timeout += MMC_CMD_TRANSFER_TIME; + } } - void rt_sdhci_timeout_set(struct rt_sdhci_host *host, struct rt_mmcsd_cmd *cmd) { - rt_bool_t too_big = RT_FALSE; - rt_uint8_t count = sdhci_calc_timeout(host, cmd, &too_big); + rt_bool_t too_big = RT_FALSE; + rt_uint8_t count = sdhci_calc_timeout(host, cmd, &too_big); if (too_big && host->quirks2 & RT_SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { @@ -1120,13 +1386,17 @@ void rt_sdhci_timeout_set(struct rt_sdhci_host *host, struct rt_mmcsd_cmd *cmd) static void sdhci_set_timeout(struct rt_sdhci_host *host, struct rt_mmcsd_cmd *cmd) { if (host->ops->set_timeout) + { host->ops->set_timeout(host, cmd); + } else + { rt_sdhci_timeout_set(host, cmd); + } } static void sdhci_start_timer(struct rt_sdhci_host *host, struct rt_mmcsd_req *mrq, - unsigned long timeout) + unsigned long timeout) { if (sdhci_data_line_cmd(mrq->cmd)) { @@ -1161,19 +1431,29 @@ static void sdhci_start_timer(struct rt_sdhci_host *host, struct rt_mmcsd_req *m static void __sdhci_finish_mrq(struct rt_sdhci_host *host, struct rt_mmcsd_req *mrq) { if (host->cmd && host->cmd->mrq == mrq) - host->cmd = NULL; + { + host->cmd = RT_NULL; + } if (host->data_cmd && host->data_cmd->mrq == mrq) - host->data_cmd = NULL; + { + host->data_cmd = RT_NULL; + } if (host->deferred_cmd && host->deferred_cmd->mrq == mrq) - host->deferred_cmd = NULL; + { + host->deferred_cmd = RT_NULL; + } if (host->data && host->data->mrq == mrq) - host->data = NULL; + { + host->data = RT_NULL; + } if (sdhci_needs_reset(host, mrq)) + { host->pending_reset = RT_TRUE; + } sdhci_set_mrq_done(host, mrq); @@ -1204,12 +1484,14 @@ static void sdhci_error_out_mrqs(struct rt_sdhci_host *host, int err) static void sdhci_card_event(struct rt_mmc_host *mmc) { - struct rt_sdhci_host *host = mmc_priv(mmc); - rt_uint32_t flags; - int present; + int present; + rt_uint32_t flags; + struct rt_sdhci_host *host = rt_mmc_priv(mmc); if (host->ops->card_event) + { host->ops->card_event(host); + } present = mmc->ops->get_cd(mmc); @@ -1217,10 +1499,8 @@ static void sdhci_card_event(struct rt_mmc_host *mmc) if (sdhci_has_requests(host) && !present) { - rt_kprintf("%s: Card removed during transfer!\n", - mmc_hostname(mmc)); - rt_kprintf("%s: Resetting controller.\n", - mmc_hostname(mmc)); + rt_kprintf("%s: Card removed during transfer!\n", mmc_hostname(mmc)); + rt_kprintf("%s: Resetting controller.\n", mmc_hostname(mmc)); sdhci_do_reset(host, RT_SDHCI_RESET_CMD); sdhci_do_reset(host, RT_SDHCI_RESET_DATA); @@ -1232,87 +1512,109 @@ static void sdhci_card_event(struct rt_mmc_host *mmc) static int sdhci_card_busy(struct rt_mmc_host *mmc) { - struct rt_sdhci_host *host = mmc_priv(mmc); - rt_uint32_t present_state; + rt_uint32_t present_state; + struct rt_sdhci_host *host = rt_mmc_priv(mmc); present_state = rt_sdhci_readl(host, RT_SDHCI_PRESENT_STATE); return !(present_state & RT_SDHCI_DATA_0_LVL_MASK); } - static int sdhci_prepare_hs400_tuning(struct rt_mmc_host *mmc, struct rt_mmcsd_io_cfg *ios) { - struct rt_sdhci_host *host = mmc_priv(mmc); - rt_uint32_t flags; + rt_uint32_t flags; + struct rt_sdhci_host *host = rt_mmc_priv(mmc); - flags = rt_spin_lock_irqsave(&host->lock); + flags = rt_spin_lock_irqsave(&host->lock); host->flags |= RT_SDHCI_HS400_TUNING; rt_spin_unlock_irqrestore(&host->lock, flags); return 0; } - static void sdhci_set_transfer_mode(struct rt_sdhci_host *host, struct rt_mmcsd_cmd *cmd) { - rt_uint16_t mode = 0; + rt_uint16_t mode = 0; struct rt_mmcsd_data *data = cmd->data; - if (data == NULL) + if (!data) { if (host->quirks2 & RT_SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) { if (!mmc_op_tuning(cmd->cmd_code)) + { rt_sdhci_writew(host, 0x0, RT_SDHCI_TRANSFER_MODE); + } } else { mode = rt_sdhci_readw(host, RT_SDHCI_TRANSFER_MODE); - rt_sdhci_writew(host, mode & ~(RT_SDHCI_TRNS_AUTO_CMD12 | RT_SDHCI_TRNS_AUTO_CMD23), RT_SDHCI_TRANSFER_MODE); + rt_sdhci_writew(host, mode & ~(RT_SDHCI_TRNS_AUTO_CMD12 | RT_SDHCI_TRNS_AUTO_CMD23), + RT_SDHCI_TRANSFER_MODE); } + return; } if (!(host->quirks2 & RT_SDHCI_QUIRK2_SUPPORT_SINGLE)) + { mode = RT_SDHCI_TRNS_BLK_CNT_EN; + } if (mmc_op_multi(cmd->cmd_code) || data->blks > 1) { mode = RT_SDHCI_TRNS_BLK_CNT_EN | RT_SDHCI_TRNS_MULTI; sdhci_auto_cmd_select(host, cmd, &mode); + if (sdhci_auto_cmd23(host, cmd->mrq)) + { rt_sdhci_writel(host, cmd->mrq->sbc->arg, RT_SDHCI_ARGUMENT2); + } } if (data->flags & DATA_DIR_READ) + { mode |= RT_SDHCI_TRNS_READ; + } + if (host->flags & RT_SDHCI_REQ_USE_DMA) + { mode |= RT_SDHCI_TRNS_DMA; + } rt_sdhci_writew(host, mode, RT_SDHCI_TRANSFER_MODE); } static rt_bool_t sdhci_send_command(struct rt_sdhci_host *host, struct rt_mmcsd_cmd *cmd) { - int flags; - rt_uint32_t mask; + int flags; + rt_uint32_t mask; unsigned long timeout; + cmd->err = 0; - if ((host->quirks2 & RT_SDHCI_QUIRK2_STOP_WITH_TC) && cmd->cmd_code == MMC_STOP_TRANSMISSION) + if ((host->quirks2 & RT_SDHCI_QUIRK2_STOP_WITH_TC) && + cmd->cmd_code == MMC_STOP_TRANSMISSION) + { cmd->flags |= MMC_RSP_BUSY; + } mask = RT_SDHCI_CMD_INHIBIT; if (sdhci_data_line_cmd(cmd)) + { mask |= RT_SDHCI_DATA_INHIBIT; + } if (cmd->mrq->data && (cmd == cmd->mrq->data->stop)) + { mask &= ~RT_SDHCI_DATA_INHIBIT; + } if (rt_sdhci_readl(host, RT_SDHCI_PRESENT_STATE) & mask) + { return RT_FALSE; + } host->cmd = cmd; host->data_timeout = 0; @@ -1336,32 +1638,53 @@ static rt_bool_t sdhci_send_command(struct rt_sdhci_host *host, struct rt_mmcsd_ } if (!(cmd->flags & MMC_RSP_PRESENT)) + { flags = RT_SDHCI_CMD_RESP_NONE; + } else if (cmd->flags & MMC_RSP_136) + { flags = RT_SDHCI_CMD_RESP_LONG; + } else if (cmd->flags & MMC_RSP_BUSY) + { flags = RT_SDHCI_CMD_RESP_SHORT_BUSY; + } else + { flags = RT_SDHCI_CMD_RESP_SHORT; + } if (cmd->flags & MMC_RSP_CRC) + { flags |= RT_SDHCI_CMD_CRC; + } if (cmd->flags & MMC_RSP_OPCODE) + { flags |= RT_SDHCI_CMD_INDEX; + } if (cmd->data || mmc_op_tuning(cmd->cmd_code)) + { flags |= RT_SDHCI_CMD_DATA; + } timeout = rt_tick_get(); if (host->data_timeout) + { timeout += rt_tick_from_millisecond(host->data_timeout * 1000); - else if (!cmd->data && cmd->busy_timeout > 9000) - timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * RT_TICK_PER_SECOND + RT_TICK_PER_SECOND; + } + else if (!cmd->data && (int)cmd->busy_timeout > 9000) + { + timeout += (RT_DIV_ROUND_UP(cmd->busy_timeout, 1000) + 1) * RT_TICK_PER_SECOND; + } else + { timeout += 10 * RT_TICK_PER_SECOND; + } sdhci_start_timer(host, cmd->mrq, timeout); rt_sdhci_writew(host, RT_SDHCI_MAKE_CMD(cmd->cmd_code, flags), RT_SDHCI_COMMAND); + return RT_TRUE; } @@ -1370,18 +1693,22 @@ static rt_bool_t sdhci_send_command(struct rt_sdhci_host *host, struct rt_mmcsd_ /********************************************************* */ static void __sdhci_finish_data(struct rt_sdhci_host *host, rt_bool_t sw_data_timeout) { - struct rt_mmcsd_cmd *data_cmd = host->data_cmd; - struct rt_mmcsd_data *data = host->data; + struct rt_mmcsd_data *data = host->data; + struct rt_mmcsd_cmd *data_cmd = host->data_cmd; - host->data = NULL; - host->data_cmd = NULL; + host->data = RT_NULL; + host->data_cmd = RT_NULL; if (data->err) { if (!host->cmd || host->cmd == data_cmd) + { sdhci_reset_for(host, REQUEST_ERROR); + } else + { sdhci_reset_for(host, REQUEST_ERROR_DATA_ONLY); + } } if (data->err) @@ -1401,7 +1728,8 @@ static void __sdhci_finish_data(struct rt_sdhci_host *host, rt_bool_t sw_data_ti } else { - host->cmd = NULL; + host->cmd = RT_NULL; + if (!sdhci_send_command(host, data->stop)) { if (sw_data_timeout) @@ -1438,10 +1766,12 @@ static void sdhci_data_irq(struct rt_sdhci_host *host, rt_uint32_t intmask) if (intmask & RT_SDHCI_INT_DATA_AVAIL && !host->data) { command = RT_SDHCI_GET_CMD(rt_sdhci_readw(host, RT_SDHCI_COMMAND)); + if (command == MMC_SEND_TUNING_BLOCK || command == MMC_SEND_TUNING_BLOCK_HS200) { host->tuning_done = 1; rt_wqueue_wakeup(&host->buf_ready_int, 0); + return; } } @@ -1454,43 +1784,51 @@ static void sdhci_data_irq(struct rt_sdhci_host *host, rt_uint32_t intmask) { if (intmask & RT_SDHCI_INT_DATA_TIMEOUT) { - host->data_cmd = NULL; + host->data_cmd = RT_NULL; data_cmd->err = -ETIMEDOUT; __sdhci_finish_mrq(host, data_cmd->mrq); return; } if (intmask & RT_SDHCI_INT_DATA_END) { - host->data_cmd = NULL; + host->data_cmd = RT_NULL; if (host->cmd == data_cmd) + { return; + } __sdhci_finish_mrq(host, data_cmd->mrq); return; } } - if (host->pending_reset) + { return; + } + rt_kprintf("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n", mmc_hostname(host->mmc), (unsigned)intmask); - rt_read_reg_debug(host); + rt_sdhci_read_reg_debug(host); return; } if (intmask & RT_SDHCI_INT_DATA_TIMEOUT) + { host->data->err = -ETIMEDOUT; + } else if (intmask & RT_SDHCI_INT_DATA_END_BIT) + { host->data->err = -EILSEQ; - else if ((intmask & RT_SDHCI_INT_DATA_CRC) && RT_SDHCI_GET_CMD(rt_sdhci_readw(host, RT_SDHCI_COMMAND)) != MMC_BUS_TEST_R) + } + else if ((intmask & RT_SDHCI_INT_DATA_CRC) && + RT_SDHCI_GET_CMD(rt_sdhci_readw(host, RT_SDHCI_COMMAND)) != MMC_BUS_TEST_R) { host->data->err = -EILSEQ; } - if (host->data->err) { sdhci_finish_data(host); @@ -1498,7 +1836,9 @@ static void sdhci_data_irq(struct rt_sdhci_host *host, rt_uint32_t intmask) else { if (intmask & (RT_SDHCI_INT_DATA_AVAIL | RT_SDHCI_INT_SPACE_AVAIL)) + { sdhci_transfer_pio(host); + } if (intmask & RT_SDHCI_INT_DMA_END) { @@ -1506,25 +1846,34 @@ static void sdhci_data_irq(struct rt_sdhci_host *host, rt_uint32_t intmask) dmastart = sdhci_sdma_address(host); dmanow = dmastart + host->data->bytes_xfered; - dmanow = (dmanow & ~((rt_uint32_t)RT_SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + RT_SDHCI_DEFAULT_BOUNDARY_SIZE; + dmanow = (dmanow & ~((rt_uint32_t)RT_SDHCI_DEFAULT_BOUNDARY_SIZE - 1)); + dmanow += RT_SDHCI_DEFAULT_BOUNDARY_SIZE; host->data->bytes_xfered = dmanow - dmastart; - LOG_D("DMA base %pad, transferred 0x%06x bytes, next %pad\n", + + LOG_D("DMA base %p, transferred 0x%06x bytes, next %p", &dmastart, host->data->bytes_xfered, &dmanow); + sdhci_set_sdma_addr(host, dmanow); } if (intmask & RT_SDHCI_INT_DATA_END) { struct rt_mmcsd_data *data = host->data; + if (data->buf) { if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { - rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, data->buf, data->blks * data->blksize); - } else { - rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, data->buf, data->blks * data->blksize); + rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, data->buf, + data->blks * data->blksize); + } + else + { + rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, data->buf, + data->blks * data->blksize); } } + if (host->cmd == host->data_cmd) { host->data_early = 1; @@ -1539,22 +1888,27 @@ static void sdhci_data_irq(struct rt_sdhci_host *host, rt_uint32_t intmask) static void rt_sdhci_read_rsp_136(struct rt_sdhci_host *host, struct rt_mmcsd_cmd *cmd) { - int i, reg; + int reg; - for (i = 0; i < 4; i++) + for (int i = 0; i < 4; ++i) { reg = RT_SDHCI_RESPONSE + (3 - i) * 4; cmd->resp[i] = rt_sdhci_readl(host, reg); } if (host->quirks2 & RT_SDHCI_QUIRK2_RSP_136_HAS_CRC) + { return; + } - for (i = 0; i < 4; i++) + for (int i = 0; i < 4; ++i) { cmd->resp[i] <<= 8; + if (i != 3) + { cmd->resp[i] |= cmd->resp[i + 1] >> 24; + } } } @@ -1562,7 +1916,7 @@ static void sdhci_command_end(struct rt_sdhci_host *host) { struct rt_mmcsd_cmd *cmd = host->cmd; - host->cmd = NULL; + host->cmd = RT_NULL; if (cmd->flags & MMC_RSP_PRESENT) { @@ -1598,21 +1952,29 @@ static void sdhci_command_end(struct rt_sdhci_host *host) else { if (host->data && host->data_early) + { sdhci_finish_data(host); + } if (!cmd->data) + { __sdhci_finish_mrq(host, cmd->mrq); + } } } - -static void sdhci_cmd_irq(struct rt_sdhci_host *host, rt_uint32_t intmask, rt_uint32_t *intmask_p) +static void sdhci_cmd_irq(struct rt_sdhci_host *host, + rt_uint32_t intmask, rt_uint32_t *intmask_p) { if (intmask & RT_SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) { - struct rt_mmcsd_req *mrq = host->data_cmd->mrq; - rt_uint16_t auto_cmd_status = rt_sdhci_readw(host, RT_SDHCI_AUTO_CMD_STATUS); - int data_err_bit = (auto_cmd_status & RT_SDHCI_AUTO_CMD_TIMEOUT) ? RT_SDHCI_INT_DATA_TIMEOUT : RT_SDHCI_INT_DATA_CRC; + int data_err_bit; + rt_uint16_t auto_cmd_status; + struct rt_mmcsd_req *mrq = host->data_cmd->mrq; + + auto_cmd_status = rt_sdhci_readw(host, RT_SDHCI_AUTO_CMD_STATUS); + data_err_bit = (auto_cmd_status & RT_SDHCI_AUTO_CMD_TIMEOUT) ? + RT_SDHCI_INT_DATA_TIMEOUT : RT_SDHCI_INT_DATA_CRC; if (!mrq->sbc && (host->flags & RT_SDHCI_AUTO_CMD12)) { @@ -1624,18 +1986,27 @@ static void sdhci_cmd_irq(struct rt_sdhci_host *host, rt_uint32_t intmask, rt_ui if (!host->cmd) { if (host->pending_reset) + { return; + } + rt_kprintf("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n", mmc_hostname(host->mmc), (unsigned)intmask); - rt_read_reg_debug(host); + rt_sdhci_read_reg_debug(host); + return; } + if (intmask & (RT_SDHCI_INT_TIMEOUT | RT_SDHCI_INT_CRC | RT_SDHCI_INT_END_BIT | RT_SDHCI_INT_INDEX)) { if (intmask & RT_SDHCI_INT_TIMEOUT) + { host->cmd->err = -ETIMEDOUT; + } else + { host->cmd->err = -EILSEQ; + } /* Treat data command CRC error the same as data CRC error */ if (host->cmd->data && (intmask & (RT_SDHCI_INT_CRC | RT_SDHCI_INT_TIMEOUT)) == RT_SDHCI_INT_CRC) @@ -1651,9 +2022,12 @@ static void sdhci_cmd_irq(struct rt_sdhci_host *host, rt_uint32_t intmask, rt_ui if (intmask & RT_SDHCI_INT_AUTO_CMD_ERR) { - struct rt_mmcsd_req *mrq = host->cmd->mrq; - rt_uint16_t auto_cmd_status = rt_sdhci_readw(host, RT_SDHCI_AUTO_CMD_STATUS); - int err = (auto_cmd_status & RT_SDHCI_AUTO_CMD_TIMEOUT) ? -ETIMEDOUT : -EILSEQ; + int err; + rt_uint16_t auto_cmd_status; + struct rt_mmcsd_req *mrq = host->cmd->mrq; + + auto_cmd_status = rt_sdhci_readw(host, RT_SDHCI_AUTO_CMD_STATUS); + err = (auto_cmd_status & RT_SDHCI_AUTO_CMD_TIMEOUT) ? -ETIMEDOUT : -EILSEQ; if (mrq->sbc && (host->flags & RT_SDHCI_AUTO_CMD23)) { @@ -1664,7 +2038,9 @@ static void sdhci_cmd_irq(struct rt_sdhci_host *host, rt_uint32_t intmask, rt_ui } if (intmask & RT_SDHCI_INT_RESPONSE) + { sdhci_command_end(host); + } } static void sdhci_irq(int irq, void *dev_id) @@ -1672,12 +2048,11 @@ static void sdhci_irq(int irq, void *dev_id) #define IRQ_NONE 0 #define IRQ_WAIT 1 #define IRQ_DONE 2 + int max_loops = 16, result = IRQ_NONE; + rt_uint32_t intmask, mask, unexpected = 0; + struct rt_sdhci_host *host = dev_id; + struct rt_mmcsd_req *mrqs_done[RT_SDHCI_MAX_MRQS] = { 0 }; - struct rt_mmcsd_req* mrqs_done[RT_SDHCI_MAX_MRQS] = { 0 }; - struct rt_sdhci_host *host = dev_id; - rt_uint32_t intmask, mask, unexpected = 0; - int max_loops = 16; - int i, result= IRQ_NONE ; rt_spin_lock(&host->lock); if (host->runtime_suspended) @@ -1690,17 +2065,19 @@ static void sdhci_irq(int irq, void *dev_id) if (!intmask || intmask == 0xffffffff) { result = IRQ_NONE; - goto out; + goto _out; } do { - LOG_D("IRQ status 0x%08x\n", intmask); + LOG_D("IRQ status 0x%08x", intmask); if (host->ops->irq) { intmask = host->ops->irq(host, intmask); if (!intmask) - goto cont; + { + goto _cont; + } } /* Clear selected interrupts. */ @@ -1716,41 +2093,55 @@ static void sdhci_irq(int irq, void *dev_id) rt_sdhci_writel(host, host->ier, RT_SDHCI_INT_ENABLE); rt_sdhci_writel(host, host->ier, RT_SDHCI_SIGNAL_ENABLE); - rt_sdhci_writel(host, intmask & (RT_SDHCI_INT_CARD_INSERT | RT_SDHCI_INT_CARD_REMOVE), RT_SDHCI_INT_STATUS); + rt_sdhci_writel(host, intmask & (RT_SDHCI_INT_CARD_INSERT | RT_SDHCI_INT_CARD_REMOVE), + RT_SDHCI_INT_STATUS); host->thread_isr |= intmask & (RT_SDHCI_INT_CARD_INSERT | RT_SDHCI_INT_CARD_REMOVE); result = IRQ_WAIT; } if (intmask & RT_SDHCI_INT_CMD_MASK) + { sdhci_cmd_irq(host, intmask & RT_SDHCI_INT_CMD_MASK, &intmask); + } if (intmask & RT_SDHCI_INT_DATA_MASK) + { sdhci_data_irq(host, intmask & RT_SDHCI_INT_DATA_MASK); + } if (intmask & RT_SDHCI_INT_BUS_POWER) + { rt_kprintf("%s: Card is consuming too much power!\n", mmc_hostname(host->mmc)); + } - intmask &= ~(RT_SDHCI_INT_CARD_INSERT | RT_SDHCI_INT_CARD_REMOVE | RT_SDHCI_INT_CMD_MASK | RT_SDHCI_INT_DATA_MASK | RT_SDHCI_INT_ERROR | RT_SDHCI_INT_BUS_POWER | RT_SDHCI_INT_RETUNE | RT_SDHCI_INT_CARD_INT); + intmask &= ~(RT_SDHCI_INT_CARD_INSERT | RT_SDHCI_INT_CARD_REMOVE | + RT_SDHCI_INT_CMD_MASK | RT_SDHCI_INT_DATA_MASK | + RT_SDHCI_INT_ERROR | RT_SDHCI_INT_BUS_POWER | + RT_SDHCI_INT_RETUNE | RT_SDHCI_INT_CARD_INT); if (intmask) { unexpected |= intmask; rt_sdhci_writel(host, intmask, RT_SDHCI_INT_STATUS); } - cont: + _cont: if (result == IRQ_NONE) + { result = IRQ_WAIT; + } intmask = rt_sdhci_readl(host, RT_SDHCI_INT_STATUS); } while (intmask && --max_loops); - for (i = 0; i < RT_SDHCI_MAX_MRQS; i++) + for (int i = 0; i < RT_SDHCI_MAX_MRQS; ++i) { struct rt_mmcsd_req *mrq = host->mrqs_done[i]; if (!mrq) + { continue; + } if (sdhci_defer_done(host, mrq)) { @@ -1759,31 +2150,40 @@ static void sdhci_irq(int irq, void *dev_id) else { mrqs_done[i] = mrq; - host->mrqs_done[i] = NULL; + host->mrqs_done[i] = RT_NULL; } } -out: +_out: if (host->deferred_cmd) + { result = IRQ_WAIT; + } rt_spin_unlock(&host->lock); - for (i = 0; i < RT_SDHCI_MAX_MRQS; i++) + for (int i = 0; i < RT_SDHCI_MAX_MRQS; ++i) { if (!mrqs_done[i]) + { continue; + } if (host->ops->request_done) + { host->ops->request_done(host, mrqs_done[i]); + } else + { rt_mmc_request_done(host->mmc, mrqs_done[i]); + } } if (unexpected) { rt_kprintf("%s: Unexpected interrupt 0x%08x.\n", - mmc_hostname(host->mmc), unexpected); - rt_read_reg_debug(host); + mmc_hostname(host->mmc), unexpected); + + rt_sdhci_read_reg_debug(host); } if (result == IRQ_WAIT) @@ -1792,21 +2192,25 @@ static void sdhci_irq(int irq, void *dev_id) } } -static rt_bool_t sdhci_send_command_retry(struct rt_sdhci_host *host, +static rt_bool_t sdhci_send_command_retry(struct rt_sdhci_host *host, struct rt_mmcsd_cmd *cmd, - unsigned long flags) + unsigned long flags) { + int timeout; + rt_bool_t present; struct rt_mmcsd_cmd *deferred_cmd = host->deferred_cmd; - int timeout = 10; /* Approx. 10 ms */ - rt_bool_t present; + + timeout = 10; + while (!sdhci_send_command(host, cmd)) { if (!timeout--) { rt_kprintf("%s: Controller never released inhibit bit(s).\n", mmc_hostname(host->mmc)); - rt_read_reg_debug(host); + rt_sdhci_read_reg_debug(host); cmd->err = -EIO; + return RT_FALSE; } @@ -1819,31 +2223,39 @@ static rt_bool_t sdhci_send_command_retry(struct rt_sdhci_host *host, flags = rt_spin_lock_irqsave(&host->lock); if (cmd == deferred_cmd && cmd != host->deferred_cmd) + { return RT_TRUE; + } if (sdhci_present_error(host, cmd, present)) + { return RT_FALSE; + } } if (cmd == host->deferred_cmd) - host->deferred_cmd = NULL; + { + host->deferred_cmd = RT_NULL; + } return RT_TRUE; } static rt_bool_t rt_sdhci_start_request_done(struct rt_sdhci_host *host) { - rt_base_t flags; + int i; + rt_base_t flags; struct rt_mmcsd_req *mrq; - int i; flags = rt_spin_lock_irqsave(&host->lock); - for (i = 0; i < RT_SDHCI_MAX_MRQS; i++) + for (i = 0; i < RT_SDHCI_MAX_MRQS; ++i) { mrq = host->mrqs_done[i]; if (mrq) + { break; + } } if (!mrq) @@ -1862,8 +2274,10 @@ static rt_bool_t rt_sdhci_start_request_done(struct rt_sdhci_host *host) /* Some controllers need this kick or reset won't work here */ if (host->quirks & RT_SDHCI_QUIRK_CLOCK_BEFORE_RESET) + { /* This is to force an update */ host->ops->set_clock(host, host->clock); + } sdhci_reset_for(host, REQUEST_ERROR); @@ -1874,67 +2288,79 @@ static rt_bool_t rt_sdhci_start_request_done(struct rt_sdhci_host *host) { struct rt_mmcsd_data *data = mrq->data; - if (data && data->host_cookie == COOKIE_MAPPED) + if (data && data->host_cookie == RT_SDHCI_COOKIE_MAPPED) { if (host->bounce_buffer) { - /* - * On reads, copy the bounced data into the - * sglist - */ + /* On reads, copy the bounced data into the sglist */ if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { unsigned int length = data->bytes_xfered; if (length > host->bounce_buffer_size) { - LOG_E("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n", + LOG_E("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes", mmc_hostname(host->mmc), host->bounce_buffer_size, data->bytes_xfered); /* Cap it down and continue */ length = host->bounce_buffer_size; } - rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, data->buf, data->blks * data->blksize); - } else { - /* No copying, just switch ownership */ - rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, data->buf, data->blks * data->blksize); + rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, data->buf, + data->blks * data->blksize); } - } - data->host_cookie = COOKIE_UNMAPPED; - } - else { - if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) + else { - rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, data->buf, data->blks * data->blksize); - } else { /* No copying, just switch ownership */ - rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, data->buf, data->blks * data->blksize); + rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, data->buf, + data->blks * data->blksize); } + } + data->host_cookie = RT_SDHCI_COOKIE_UNMAPPED; + } + else + { + if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) + { + rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, data->buf, + data->blks * data->blksize); + } + else + { + /* No copying, just switch ownership */ + rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, data->buf, + data->blks * data->blksize); + } } } - host->mrqs_done[i] = NULL; + host->mrqs_done[i] = RT_NULL; rt_spin_unlock_irqrestore(&host->lock, flags); if (host->ops->request_done) + { host->ops->request_done(host, mrq); + } else + { rt_mmc_request_done(host->mmc, mrq); + } return RT_FALSE; } - static void sdhci_thread_irq(struct rt_work *work, void *work_data) { - struct rt_sdhci_host* host = work_data; + rt_base_t flags; + rt_uint32_t isr; struct rt_mmcsd_cmd *cmd; - rt_base_t flags; - rt_uint32_t isr; + struct rt_sdhci_host *host = work_data; - while (!rt_sdhci_start_request_done(host)); + while (!rt_sdhci_start_request_done(host)) + { + rt_hw_cpu_relax(); + } flags = rt_spin_lock_irqsave(&host->lock); @@ -1943,7 +2369,9 @@ static void sdhci_thread_irq(struct rt_work *work, void *work_data) cmd = host->deferred_cmd; if (cmd && !sdhci_send_command_retry(host, cmd, flags)) + { sdhci_finish_mrq(host, cmd->mrq); + } rt_spin_unlock_irqrestore(&host->lock, flags); @@ -1955,10 +2383,9 @@ static void sdhci_thread_irq(struct rt_work *work, void *work_data) } } - void rt_sdhci_enable_io_irq(struct rt_mmc_host *mmc, int enable) { - struct rt_sdhci_host *host = mmc_priv(mmc); + struct rt_sdhci_host *host = rt_mmc_priv(mmc); rt_uint32_t flags; flags = rt_spin_lock_irqsave(&host->lock); @@ -1970,13 +2397,12 @@ void rt_sdhci_enable_io_irq(struct rt_mmc_host *mmc, int enable) /********************************************************* */ /* request */ /********************************************************* */ - void rt_sdhci_start_request(struct rt_mmc_host *mmc, struct rt_mmcsd_req *mrq) { - struct rt_sdhci_host *host = mmc_priv(mmc); + rt_base_t flags; + rt_bool_t present; struct rt_mmcsd_cmd *cmd; - rt_base_t flags; - rt_bool_t present; + struct rt_sdhci_host *host = rt_mmc_priv(mmc); /* Firstly check card presence */ present = mmc->ops->get_cd(mmc); @@ -1984,38 +2410,43 @@ void rt_sdhci_start_request(struct rt_mmc_host *mmc, struct rt_mmcsd_req *mrq) flags = rt_spin_lock_irqsave(&host->lock); if (sdhci_present_error(host, mrq->cmd, present)) - goto out_finish; + { + goto _out_finish; + } cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd; if (!sdhci_send_command_retry(host, cmd, flags)) - goto out_finish; + { + goto _out_finish; + } rt_spin_unlock_irqrestore(&host->lock, flags); return; -out_finish: +_out_finish: sdhci_finish_mrq(host, mrq); rt_spin_unlock_irqrestore(&host->lock, flags); } - static void sdhci_complete_work(struct rt_work *work, void *work_data) { struct rt_sdhci_host *host = work_data; - while (!rt_sdhci_start_request_done(host)); + while (!rt_sdhci_start_request_done(host)) + { + rt_hw_cpu_relax(); + } } - /********************************************************* */ /* timer */ /********************************************************* */ static void sdhci_timeout_timer(void *parameter) { + rt_base_t flags; struct rt_sdhci_host *host = parameter; - rt_base_t flags; flags = rt_spin_lock_irqsave(&host->lock); @@ -2023,7 +2454,7 @@ static void sdhci_timeout_timer(void *parameter) { rt_kprintf("%s: Timeout waiting for hardware cmd interrupt.\n", mmc_hostname(host->mmc)); - rt_read_reg_debug(host); + rt_sdhci_read_reg_debug(host); host->cmd->err = -ETIMEDOUT; sdhci_finish_mrq(host, host->cmd->mrq); @@ -2034,8 +2465,8 @@ static void sdhci_timeout_timer(void *parameter) static void sdhci_timeout_data_timer(void *parameter) { + rt_base_t flags; struct rt_sdhci_host *host = parameter; - rt_base_t flags; flags = rt_spin_lock_irqsave(&host->lock); @@ -2043,7 +2474,7 @@ static void sdhci_timeout_data_timer(void *parameter) { rt_kprintf("%s: Timeout waiting for hardware interrupt.\n", mmc_hostname(host->mmc)); - rt_read_reg_debug(host); + rt_sdhci_read_reg_debug(host); if (host->data) { @@ -2066,32 +2497,35 @@ static void sdhci_timeout_data_timer(void *parameter) rt_spin_unlock_irqrestore(&host->lock, flags); } - /********************************************************* */ /* tuning */ /********************************************************* */ int rt_sdhci_execute_tuning(struct rt_mmc_host *mmc, rt_uint32_t opcode) { - struct rt_sdhci_host *host = mmc_priv(mmc); - int err = 0; - unsigned int tuning_count = 0; - rt_bool_t hs400_tuning; + int err = 0; + rt_bool_t hs400_tuning; + unsigned int tuning_count = 0; + struct rt_sdhci_host *host = rt_mmc_priv(mmc); hs400_tuning = host->flags & RT_SDHCI_HS400_TUNING; if (host->tuning_mode == RT_SDHCI_TUNING_MODE_1) + { tuning_count = host->tuning_count; + } switch (host->timing) { /* HS400 tuning is done in HS200 mode */ case MMC_TIMING_MMC_HS400: err = -EINVAL; - goto out; + goto _out; case MMC_TIMING_MMC_HS200: if (hs400_tuning) + { tuning_count = 0; + } break; case MMC_TIMING_UHS_SDR104: @@ -2100,30 +2534,34 @@ int rt_sdhci_execute_tuning(struct rt_mmc_host *mmc, rt_uint32_t opcode) case MMC_TIMING_UHS_SDR50: if (host->flags & RT_SDHCI_SDR50_NEEDS_TUNING) + { break; - fallthrough; + } + /* Fallthrough */ default: - goto out; + goto _out; } if (host->ops->platform_execute_tuning) { err = host->ops->platform_execute_tuning(host, opcode); - goto out; + goto _out; } mmc->retune_period = tuning_count; if (host->tuning_delay < 0) + { host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK; + } rt_sdhci_start_tuning(host); host->tuning_err = __sdhci_execute_tuning(host, opcode); rt_sdhci_end_tuning(host); -out: +_out: host->flags &= ~RT_SDHCI_HS400_TUNING; return err; @@ -2131,9 +2569,7 @@ int rt_sdhci_execute_tuning(struct rt_mmc_host *mmc, rt_uint32_t opcode) int __sdhci_execute_tuning(struct rt_sdhci_host *host, rt_uint32_t opcode) { - int i; - - for (i = 0; i < host->tuning_loop_count; i++) + for (int i = 0; i < host->tuning_loop_count; ++i) { rt_uint16_t ctrl; @@ -2146,20 +2582,26 @@ int __sdhci_execute_tuning(struct rt_sdhci_host *host, rt_uint32_t opcode) } if (host->tuning_delay > 0) + { rt_thread_mdelay(host->tuning_delay); + } ctrl = rt_sdhci_readw(host, RT_SDHCI_HOST_CONTROL2); if (!(ctrl & RT_SDHCI_CTRL_EXEC_TUNING)) { if (ctrl & RT_SDHCI_CTRL_TUNED_CLK) + { return 0; /* Success! */ + } + break; } } - LOG_D("%s: Tuning failed, falling back to fixed sampling clock\n", + LOG_D("%s: Tuning failed, falling back to fixed sampling clock", mmc_hostname(host->mmc)); rt_sdhci_reset_tuning(host); + return -EAGAIN; } @@ -2170,7 +2612,9 @@ void rt_sdhci_start_tuning(struct rt_sdhci_host *host) ctrl = rt_sdhci_readw(host, RT_SDHCI_HOST_CONTROL2); ctrl |= RT_SDHCI_CTRL_EXEC_TUNING; if (host->quirks2 & RT_SDHCI_QUIRK2_TUNING_WORK_AROUND) + { ctrl |= RT_SDHCI_CTRL_TUNED_CLK; + } rt_sdhci_writew(host, ctrl, RT_SDHCI_HOST_CONTROL2); rt_sdhci_writel(host, RT_SDHCI_INT_DATA_AVAIL, RT_SDHCI_INT_ENABLE); @@ -2194,11 +2638,11 @@ void rt_sdhci_abort_tuning(struct rt_sdhci_host *host, rt_uint32_t opcode) void rt_sdhci_send_tuning(struct rt_sdhci_host *host, rt_uint32_t opcode) { - struct rt_mmc_host *mmc = host->mmc; + unsigned long flags; + rt_uint32_t b = host->sdma_boundary; struct rt_mmcsd_cmd cmd = {}; struct rt_mmcsd_req mrq = {}; - unsigned long flags; - rt_uint32_t b = host->sdma_boundary; + struct rt_mmc_host *mmc = host->mmc; flags = rt_spin_lock_irqsave(&host->lock); @@ -2209,9 +2653,13 @@ void rt_sdhci_send_tuning(struct rt_sdhci_host *host, rt_uint32_t opcode) mrq.cmd = &cmd; if (cmd.cmd_code == MMC_SEND_TUNING_BLOCK_HS200 && mmc->ios.bus_width == MMC_BUS_WIDTH_8) + { rt_sdhci_writew(host, RT_SDHCI_MAKE_BLKSZ(b, 128), RT_SDHCI_BLOCK_SIZE); + } else + { rt_sdhci_writew(host, RT_SDHCI_MAKE_BLKSZ(b, 64), RT_SDHCI_BLOCK_SIZE); + } rt_sdhci_writew(host, RT_SDHCI_TRNS_READ, RT_SDHCI_TRANSFER_MODE); @@ -2222,7 +2670,7 @@ void rt_sdhci_send_tuning(struct rt_sdhci_host *host, rt_uint32_t opcode) return; } - host->cmd = NULL; + host->cmd = RT_NULL; sdhci_del_timer(host, &mrq); @@ -2245,7 +2693,8 @@ void rt_sdhci_reset_tuning(struct rt_sdhci_host *host) /********************************************************* */ /* error */ /********************************************************* */ -static const struct mmc_host_ops rt_sdhci_ops = { +static const struct rt_mmc_host_ops rt_sdhci_ops = +{ .request = rt_sdhci_start_request, .set_ios = rt_sdhci_ios_set, .get_cd = sdhci_get_cd, @@ -2259,11 +2708,10 @@ static const struct mmc_host_ops rt_sdhci_ops = { .card_busy = sdhci_card_busy, }; - -void rt_sdhci_uninit_host(struct rt_sdhci_host *host, int dead) +void rt_sdhci_uninit_host(struct rt_sdhci_host *host, rt_bool_t dead) { + unsigned long flags; struct rt_mmc_host *mmc = host->mmc; - unsigned long flags; if (dead) { @@ -2273,8 +2721,7 @@ void rt_sdhci_uninit_host(struct rt_sdhci_host *host, int dead) if (sdhci_has_requests(host)) { - rt_kprintf("%s: Controller removed during " - " transfer!\n", + rt_kprintf("%s: Controller removed during transfer!\n", mmc_hostname(mmc)); sdhci_error_out_mrqs(host, -ENOMEDIUM); } @@ -2286,9 +2733,10 @@ void rt_sdhci_uninit_host(struct rt_sdhci_host *host, int dead) rt_mmc_remove_host(mmc); - if (!dead) + { sdhci_reset_for_all(host); + } rt_sdhci_writel(host, 0, RT_SDHCI_INT_ENABLE); rt_sdhci_writel(host, 0, RT_SDHCI_SIGNAL_ENABLE); @@ -2298,15 +2746,18 @@ void rt_sdhci_uninit_host(struct rt_sdhci_host *host, int dead) rt_workqueue_destroy(host->complete_wq); + if (host->sdhci_core_to_disable_vqmmc) + { + rt_regulator_disable(mmc->rthost.supply.vqmmc); + } } rt_uint16_t rt_sdhci_clk_set(struct rt_sdhci_host *host, unsigned int clock, - unsigned int *actual_clock) + unsigned int *actual_clock) { - int div = 0; /* Initialized for compiler warning */ - int real_div = div, clk_mul = 1; - rt_uint16_t clk = 0; - rt_bool_t switch_base_clk = RT_FALSE; + rt_uint16_t clk = 0; + rt_bool_t switch_base_clk = RT_FALSE; + int div = 0, real_div = 0, clk_mul = 1; if (host->version >= RT_SDHCI_SPEC_300) { @@ -2316,7 +2767,7 @@ rt_uint16_t rt_sdhci_clk_set(struct rt_sdhci_host *host, unsigned int clock, clk = rt_sdhci_readw(host, RT_SDHCI_CLOCK_CONTROL); pre_val = sdhci_get_preset_value(host); - div = FIELD_GET(RT_SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val); + div = RT_FIELD_GET(RT_SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val); if (host->clk_mul && (pre_val & RT_SDHCI_PRESET_CLKGEN_SEL)) { clk = RT_SDHCI_PROG_CLOCK_MODE; @@ -2325,22 +2776,23 @@ rt_uint16_t rt_sdhci_clk_set(struct rt_sdhci_host *host, unsigned int clock, } else { - real_div = max_t(int, 1, div << 1); + real_div = rt_max_t(int, 1, div << 1); } - goto clock_set; + + goto _clock_set; } if (host->clk_mul) { for (div = 1; div <= 1024; div++) { - if ((host->max_clk * host->clk_mul / div) - <= clock) + if ((host->max_clk * host->clk_mul / div) <= clock) + { break; + } } if ((host->max_clk * host->clk_mul / div) <= clock) { - clk = RT_SDHCI_PROG_CLOCK_MODE; real_div = div; clk_mul = host->clk_mul; @@ -2348,7 +2800,6 @@ rt_uint16_t rt_sdhci_clk_set(struct rt_sdhci_host *host, unsigned int clock, } else { - switch_base_clk = RT_TRUE; } } @@ -2356,21 +2807,26 @@ rt_uint16_t rt_sdhci_clk_set(struct rt_sdhci_host *host, unsigned int clock, if (!host->clk_mul || switch_base_clk) { if (host->max_clk <= clock) + { div = 1; + } else { - for (div = 2; div < RT_SDHCI_MAX_DIV_SPEC_300; - div += 2) + for (div = 2; div < RT_SDHCI_MAX_DIV_SPEC_300; div += 2) { if ((host->max_clk / div) <= clock) + { break; + } } } real_div = div; div >>= 1; if ((host->quirks2 & RT_SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN) && !div && host->max_clk <= 25000000) + { div = 1; + } } } else @@ -2378,15 +2834,19 @@ rt_uint16_t rt_sdhci_clk_set(struct rt_sdhci_host *host, unsigned int clock, for (div = 1; div < RT_SDHCI_MAX_DIV_SPEC_200; div *= 2) { if ((host->max_clk / div) <= clock) + { break; + } } real_div = div; div >>= 1; } -clock_set: +_clock_set: if (real_div) + { *actual_clock = (host->max_clk * clk_mul) / real_div; + } clk |= (div & RT_SDHCI_DIV_MASK) << RT_SDHCI_DIVIDER_SHIFT; clk |= ((div & RT_SDHCI_DIV_HI_MASK) >> RT_SDHCI_DIV_MASK_LEN) << RT_SDHCI_DIVIDER_HI_SHIFT; @@ -2402,20 +2862,25 @@ void rt_sdhci_clk_enable(struct rt_sdhci_host *host, rt_uint16_t clk) rt_sdhci_writew(host, clk, RT_SDHCI_CLOCK_CONTROL); timeout = rt_tick_from_millisecond(150); - while (1) + while (RT_TRUE) { timeout = timeout - rt_tick_get(); clk = rt_sdhci_readw(host, RT_SDHCI_CLOCK_CONTROL); if (clk & RT_SDHCI_CLOCK_INT_STABLE) + { break; + } + if (timeout < 0) { rt_kprintf("%s: Internal clock never stabilised.\n", mmc_hostname(host->mmc)); - rt_read_reg_debug(host); + rt_sdhci_read_reg_debug(host); + return; } + rt_hw_us_delay(10); } @@ -2426,20 +2891,25 @@ void rt_sdhci_clk_enable(struct rt_sdhci_host *host, rt_uint16_t clk) rt_sdhci_writew(host, clk, RT_SDHCI_CLOCK_CONTROL); timeout = rt_tick_from_millisecond(150); - while (1) + while (RT_TRUE) { timeout = timeout - rt_tick_get(); clk = rt_sdhci_readw(host, RT_SDHCI_CLOCK_CONTROL); if (clk & RT_SDHCI_CLOCK_INT_STABLE) + { break; + } + if (timeout < 0) { rt_kprintf("%s: PLL clock never stabilised.\n", mmc_hostname(host->mmc)); - rt_read_reg_debug(host); + rt_sdhci_read_reg_debug(host); + return; } + rt_hw_us_delay(10); } } @@ -2457,34 +2927,39 @@ void rt_sdhci_set_clock(struct rt_sdhci_host *host, unsigned int clock) rt_sdhci_writew(host, 0, RT_SDHCI_CLOCK_CONTROL); if (clock == 0) + { return; + } clk = rt_sdhci_clk_set(host, clock, &host->mmc->actual_clock); rt_sdhci_clk_enable(host, clk); } void rt_sdhci_read_caps(struct rt_sdhci_host *host, const rt_uint16_t *ver, - const rt_uint32_t *caps, const rt_uint32_t *caps1) + const rt_uint32_t *caps, const rt_uint32_t *caps1) { rt_uint16_t v; - rt_uint64_t dt_caps_mask = 0; - rt_uint64_t dt_caps = 0; + rt_uint64_t dt_caps_mask = 0, dt_caps = 0; if (host->read_caps) + { return; + } host->read_caps = RT_TRUE; if (debug_quirks) + { host->quirks = debug_quirks; + } if (debug_quirks2) + { host->quirks2 = debug_quirks2; + } sdhci_reset_for_all(host); - if (host->v4_mode) - sdhci_do_enable_v4_mode(host); #ifdef RT_USING_OFW rt_ofw_prop_read_u64(mmc_dev(host->mmc)->ofw_node, "sdhci-caps-mask", &dt_caps_mask); @@ -2501,12 +2976,14 @@ void rt_sdhci_read_caps(struct rt_sdhci_host *host, const rt_uint16_t *ver, else { host->caps = rt_sdhci_readl(host, RT_SDHCI_CAPABILITIES); - host->caps &= ~lower_32_bits(dt_caps_mask); - host->caps |= lower_32_bits(dt_caps); + host->caps &= ~rt_lower_32_bits(dt_caps_mask); + host->caps |= rt_lower_32_bits(dt_caps); } if (host->version < RT_SDHCI_SPEC_300) + { return; + } if (caps1) { @@ -2515,22 +2992,24 @@ void rt_sdhci_read_caps(struct rt_sdhci_host *host, const rt_uint16_t *ver, else { host->caps1 = rt_sdhci_readl(host, RT_SDHCI_CAPABILITIES_1); - host->caps1 &= ~upper_32_bits(dt_caps_mask); - host->caps1 |= upper_32_bits(dt_caps); + host->caps1 &= ~rt_upper_32_bits(dt_caps_mask); + host->caps1 |= rt_upper_32_bits(dt_caps); } } struct rt_sdhci_host *rt_sdhci_alloc_host(struct rt_device *dev, - size_t priv_size) + rt_size_t priv_size) { - struct rt_mmc_host *mmc; + struct rt_mmc_host *mmc; struct rt_sdhci_host *host; mmc = rt_mmc_alloc_host(sizeof(struct rt_sdhci_host) + priv_size, dev); if (!mmc) - return NULL; + { + return RT_NULL; + } - host = mmc_priv(mmc); + host = rt_mmc_priv(mmc); host->mmc = mmc; host->mmc_host_ops = rt_sdhci_ops; mmc->ops = &host->mmc_host_ops; @@ -2541,41 +3020,42 @@ struct rt_sdhci_host *rt_sdhci_alloc_host(struct rt_device *dev, host->cqe_err_ier = RT_SDHCI_CQE_INT_ERR_MASK; host->tuning_delay = -1; - host->tuning_loop_count = MAX_TUNING_LOOP; + host->tuning_loop_count = RT_SDHCI_MAX_TUNING_LOOP; host->sdma_boundary = RT_SDHCI_DEFAULT_BOUNDARY_ARG; - host->max_timeout_count = 0xE; + host->max_timeout_count = 0xe; return host; } int rt_sdhci_setup_host(struct rt_sdhci_host *host) { + int ret = 0; + rt_bool_t enable_vqmmc = RT_FALSE; + rt_size_t max_current_caps, max_clk; + unsigned int ocr_avail, override_timeout_clk; struct rt_mmc_host *mmc; - size_t max_current_caps; - unsigned int ocr_avail; - unsigned int override_timeout_clk; - size_t max_clk; - int ret = 0; - bool enable_vqmmc = RT_FALSE; - - RT_ASSERT(host != NULL); + RT_ASSERT(host != RT_NULL); mmc = host->mmc; - if (!mmc->supply.vqmmc) + if (!mmc->rthost.supply.vqmmc) { + ret = sdio_regulator_get_supply(mmc->parent,&mmc->rthost); if (ret) + { return ret; + } + enable_vqmmc = RT_TRUE; } - LOG_D("Version: 0x%08x | Present: 0x%08x\n", + LOG_D("Version: 0x%08x | Present: 0x%08x", rt_sdhci_readw(host, RT_SDHCI_HOST_VERSION), rt_sdhci_readl(host, RT_SDHCI_PRESENT_STATE)); - LOG_D("Caps: 0x%08x | Caps_1: 0x%08x\n", + LOG_D("Caps: 0x%08x | Caps_1: 0x%08x", rt_sdhci_readl(host, RT_SDHCI_CAPABILITIES), rt_sdhci_readl(host, RT_SDHCI_CAPABILITIES_1)); @@ -2590,28 +3070,40 @@ int rt_sdhci_setup_host(struct rt_sdhci_host *host) } if (host->quirks & RT_SDHCI_QUIRK_FORCE_DMA) + { host->flags |= RT_SDHCI_USE_SDMA; + } else if (!(host->caps & RT_SDHCI_CAN_DO_SDMA)) - LOG_D("Controller doesn't have SDMA capability\n"); + { + LOG_D("Controller doesn't have SDMA capability"); + } else + { host->flags |= RT_SDHCI_USE_SDMA; + } if ((host->quirks & RT_SDHCI_QUIRK_BROKEN_DMA) && (host->flags & RT_SDHCI_USE_SDMA)) { - LOG_D("Disabling DMA as it is marked broken\n"); + LOG_D("Disabling DMA as it is marked broken"); host->flags &= ~RT_SDHCI_USE_SDMA; } if (sdhci_can_64bit_dma(host)) + { host->flags |= RT_SDHCI_USE_64_BIT_DMA; + } if (host->flags & RT_SDHCI_USE_SDMA) { if (host->ops->set_dma_mask) + { ret = host->ops->set_dma_mask(host); + } if (!ret && host->ops->enable_dma) + { ret = host->ops->enable_dma(host); + } if (ret) { @@ -2624,16 +3116,22 @@ int rt_sdhci_setup_host(struct rt_sdhci_host *host) } if ((host->flags & RT_SDHCI_USE_64_BIT_DMA) && !host->v4_mode) + { host->flags &= ~RT_SDHCI_USE_SDMA; + } if (!(host->flags & RT_SDHCI_USE_SDMA)) { - host->dma_mask = DMA_BIT_MASK(64); + host->dma_mask = RT_DMA_ADDR_MASK(64); } if (host->version >= RT_SDHCI_SPEC_300) - host->max_clk = FIELD_GET(RT_SDHCI_CLOCK_V3_BASE_MASK, host->caps); + { + host->max_clk = RT_FIELD_GET(RT_SDHCI_CLOCK_V3_BASE_MASK, host->caps); + } else - host->max_clk = FIELD_GET(RT_SDHCI_CLOCK_BASE_MASK, host->caps); + { + host->max_clk = RT_FIELD_GET(RT_SDHCI_CLOCK_BASE_MASK, host->caps); + } host->max_clk *= 1000000; if (host->max_clk == 0 || host->quirks & RT_SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) @@ -2643,39 +3141,52 @@ int rt_sdhci_setup_host(struct rt_sdhci_host *host) rt_kprintf("%s: Hardware doesn't specify base clock frequency. %p \n", mmc_hostname(mmc), host->ops->get_max_clock); ret = -ENODEV; - goto undma; + goto _undma; } + host->max_clk = host->ops->get_max_clock(host); } - host->clk_mul = FIELD_GET(RT_SDHCI_CLOCK_MUL_MASK, host->caps1); + host->clk_mul = RT_FIELD_GET(RT_SDHCI_CLOCK_MUL_MASK, host->caps1); if (host->clk_mul) + { host->clk_mul += 1; + } max_clk = host->max_clk; if (host->ops->get_min_clock) + { mmc->f_min = host->ops->get_min_clock(host); + } else if (host->version >= RT_SDHCI_SPEC_300) { if (host->clk_mul) + { max_clk = host->max_clk * host->clk_mul; + } mmc->f_min = host->max_clk / RT_SDHCI_MAX_DIV_SPEC_300; } else + { mmc->f_min = host->max_clk / RT_SDHCI_MAX_DIV_SPEC_200; + } if (!mmc->f_max || mmc->f_max > max_clk) + { mmc->f_max = max_clk; + } if (!(host->quirks & RT_SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { - host->timeout_clk = FIELD_GET(RT_SDHCI_TIMEOUT_CLK_MASK, host->caps); + host->timeout_clk = RT_FIELD_GET(RT_SDHCI_TIMEOUT_CLK_MASK, host->caps); if (host->caps & RT_SDHCI_TIMEOUT_CLK_UNIT) + { host->timeout_clk *= 1000; + } if (host->timeout_clk == 0) { @@ -2684,73 +3195,93 @@ int rt_sdhci_setup_host(struct rt_sdhci_host *host) rt_kprintf("%s: Hardware doesn't specify timeout clock frequency.\n", mmc_hostname(mmc)); ret = -ENODEV; - goto undma; + goto _undma; } - host->timeout_clk = - DIV_ROUND_UP(host->ops->get_timeout_clock(host), - 1000); + host->timeout_clk = RT_DIV_ROUND_UP(host->ops->get_timeout_clock(host), 1000); } if (override_timeout_clk) + { host->timeout_clk = override_timeout_clk; + } - mmc->max_busy_timeout = host->ops->get_max_timeout_count ? host->ops->get_max_timeout_count(host) : 1 << 27; + mmc->max_busy_timeout = host->ops->get_max_timeout_count ? + host->ops->get_max_timeout_count(host) : 1 << 27; mmc->max_busy_timeout /= host->timeout_clk; } if (host->quirks2 & RT_SDHCI_QUIRK2_DISABLE_HW_TIMEOUT && !host->ops->get_max_timeout_count) + { mmc->max_busy_timeout = 0; + } mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23; mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; if (host->quirks & RT_SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) + { host->flags |= RT_SDHCI_AUTO_CMD12; + } - if ((host->version >= RT_SDHCI_SPEC_300) && (!(host->flags & RT_SDHCI_USE_SDMA) || host->v4_mode) && !(host->quirks2 & RT_SDHCI_QUIRK2_ACMD23_BROKEN)) + if ((host->version >= RT_SDHCI_SPEC_300) && + (!(host->flags & RT_SDHCI_USE_SDMA) || host->v4_mode) && + !(host->quirks2 & RT_SDHCI_QUIRK2_ACMD23_BROKEN)) { host->flags |= RT_SDHCI_AUTO_CMD23; - LOG_D("Auto-CMD23 available\n"); + LOG_D("Auto-CMD23 available"); } else { - LOG_D("Auto-CMD23 unavailable\n"); + LOG_D("Auto-CMD23 unavailable"); } if (!(host->quirks & RT_SDHCI_QUIRK_FORCE_1_BIT_DATA)) + { mmc->caps |= MMC_CAP_4_BIT_DATA; + } if (host->quirks2 & RT_SDHCI_QUIRK2_HOST_NO_CMD23) + { mmc->caps &= ~MMC_CAP_CMD23; + } if (host->caps & RT_SDHCI_CAN_DO_HISPD) + { mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; - if ((host->quirks & RT_SDHCI_QUIRK_BROKEN_CARD_DETECTION) && mmc_card_is_removable(mmc) && rt_mmc_gpio_get_cd(mmc) < 0) + } + if ((host->quirks & RT_SDHCI_QUIRK_BROKEN_CARD_DETECTION) && + mmc_card_is_removable(mmc) && rt_mmc_gpio_get_cd(mmc) < 0) + { mmc->caps |= MMC_CAP_NEEDS_POLL; + } - if (mmc->supply.vqmmc) + if (mmc->rthost.supply.vqmmc) { if (enable_vqmmc) { + rt_regulator_enable(mmc->rthost.supply.vqmmc); host->sdhci_core_to_disable_vqmmc = !ret; } - if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000, - 1950000)) + if (!rt_regulator_is_supported_voltage(mmc->rthost.supply.vqmmc, 1700000, 1950000)) + { host->caps1 &= ~(RT_SDHCI_SUPPORT_SDR104 | RT_SDHCI_SUPPORT_SDR50 | RT_SDHCI_SUPPORT_DDR50); + } - if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000, - 3600000)) + if (!rt_regulator_is_supported_voltage(mmc->rthost.supply.vqmmc, 2700000, 3600000)) + { host->flags &= ~RT_SDHCI_SIGNALING_330; + } if (ret) { rt_kprintf("%s: Failed to enable vqmmc regulator: %d\n", mmc_hostname(mmc), ret); - mmc->supply.vqmmc = (void *)-EINVAL; + mmc->rthost.supply.vqmmc = (void *)-EINVAL; } } + if (host->quirks2 & RT_SDHCI_QUIRK2_NO_1_8_V) { host->caps1 &= ~(RT_SDHCI_SUPPORT_SDR104 | RT_SDHCI_SUPPORT_SDR50 | RT_SDHCI_SUPPORT_DDR50); @@ -2759,61 +3290,84 @@ int rt_sdhci_setup_host(struct rt_sdhci_host *host) } if (host->caps1 & (RT_SDHCI_SUPPORT_SDR104 | RT_SDHCI_SUPPORT_SDR50 | RT_SDHCI_SUPPORT_DDR50)) + { mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; + } if (host->caps1 & RT_SDHCI_SUPPORT_SDR104) { mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; if (!(host->quirks2 & RT_SDHCI_QUIRK2_BROKEN_HS200)) + { mmc->caps2 |= MMC_CAP2_HS200; + } } else if (host->caps1 & RT_SDHCI_SUPPORT_SDR50) { mmc->caps |= MMC_CAP_UHS_SDR50; } - if (host->quirks2 & RT_SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 && (host->caps1 & RT_SDHCI_SUPPORT_HS400)) + if ((host->quirks2 & RT_SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400) && + (host->caps1 & RT_SDHCI_SUPPORT_HS400)) + { mmc->caps2 |= MMC_CAP2_HS400; - if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) && (!mmc->supply.vqmmc || !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000, 1300000))) + } + if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) && (!mmc->rthost.supply.vqmmc || + !rt_regulator_is_supported_voltage(mmc->rthost.supply.vqmmc, 1100000, 1300000))) + { mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V; - if ((host->caps1 & RT_SDHCI_SUPPORT_DDR50) && !(host->quirks2 & RT_SDHCI_QUIRK2_BROKEN_DDR50)) + } + if ((host->caps1 & RT_SDHCI_SUPPORT_DDR50) && + !(host->quirks2 & RT_SDHCI_QUIRK2_BROKEN_DDR50)) + { mmc->caps |= MMC_CAP_UHS_DDR50; + } if (host->caps1 & RT_SDHCI_USE_SDR50_TUNING) + { host->flags |= RT_SDHCI_SDR50_NEEDS_TUNING; + } if (host->caps1 & RT_SDHCI_DRIVER_TYPE_A) + { mmc->caps |= MMC_CAP_DRIVER_TYPE_A; + } if (host->caps1 & RT_SDHCI_DRIVER_TYPE_C) + { mmc->caps |= MMC_CAP_DRIVER_TYPE_C; + } if (host->caps1 & RT_SDHCI_DRIVER_TYPE_D) + { mmc->caps |= MMC_CAP_DRIVER_TYPE_D; + } - host->tuning_count = FIELD_GET(RT_SDHCI_RETUNING_TIMER_COUNT_MASK, - host->caps1); + host->tuning_count = RT_FIELD_GET(RT_SDHCI_RETUNING_TIMER_COUNT_MASK, host->caps1); if (host->tuning_count) + { host->tuning_count = 1 << (host->tuning_count - 1); + } /* Re-tuning mode supported by the Host Controller */ - host->tuning_mode = FIELD_GET(RT_SDHCI_RETUNING_MODE_MASK, host->caps1); + host->tuning_mode = RT_FIELD_GET(RT_SDHCI_RETUNING_MODE_MASK, host->caps1); ocr_avail = 0; max_current_caps = rt_sdhci_readl(host, RT_SDHCI_MAX_CURRENT); - if (!max_current_caps && mmc->supply.vmmc) + if (!max_current_caps && mmc->rthost.supply.vmmc) { - int curr = regulator_get_current_limit(mmc->supply.vmmc); + int curr = rt_regulator_get_voltage(mmc->rthost.supply.vmmc); if (curr > 0) { curr = curr / 1000; /* convert to mA */ curr = curr / RT_SDHCI_MAX_CURRENT_MULTIPLIER; - curr = min_t(rt_uint32_t, curr, RT_SDHCI_MAX_CURRENT_LIMIT); - max_current_caps = - FIELD_PREP(RT_SDHCI_MAX_CURRENT_330_MASK, curr) | FIELD_PREP(RT_SDHCI_MAX_CURRENT_300_MASK, curr) | FIELD_PREP(RT_SDHCI_MAX_CURRENT_180_MASK, curr); + curr = rt_min_t(rt_uint32_t, curr, RT_SDHCI_MAX_CURRENT_LIMIT); + max_current_caps = RT_FIELD_PREP(RT_SDHCI_MAX_CURRENT_330_MASK, curr) | + RT_FIELD_PREP(RT_SDHCI_MAX_CURRENT_300_MASK, curr) | + RT_FIELD_PREP(RT_SDHCI_MAX_CURRENT_180_MASK, curr); } } @@ -2821,59 +3375,74 @@ int rt_sdhci_setup_host(struct rt_sdhci_host *host) { ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; - mmc->max_current_330 = FIELD_GET(RT_SDHCI_MAX_CURRENT_330_MASK, - max_current_caps) - * RT_SDHCI_MAX_CURRENT_MULTIPLIER; + mmc->max_current_330 = RT_FIELD_GET(RT_SDHCI_MAX_CURRENT_330_MASK, max_current_caps); + mmc->max_current_330 *= RT_SDHCI_MAX_CURRENT_MULTIPLIER; } if (host->caps & RT_SDHCI_CAN_VDD_300) { ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; - mmc->max_current_300 = FIELD_GET(RT_SDHCI_MAX_CURRENT_300_MASK, - max_current_caps) - * RT_SDHCI_MAX_CURRENT_MULTIPLIER; + mmc->max_current_300 = RT_FIELD_GET(RT_SDHCI_MAX_CURRENT_300_MASK, max_current_caps); + mmc->max_current_300 *= RT_SDHCI_MAX_CURRENT_MULTIPLIER; } if (host->caps & RT_SDHCI_CAN_VDD_180) { ocr_avail |= MMC_VDD_165_195; - mmc->max_current_180 = FIELD_GET(RT_SDHCI_MAX_CURRENT_180_MASK, - max_current_caps) - * RT_SDHCI_MAX_CURRENT_MULTIPLIER; + mmc->max_current_180 = RT_FIELD_GET(RT_SDHCI_MAX_CURRENT_180_MASK, max_current_caps); + mmc->max_current_180 *= RT_SDHCI_MAX_CURRENT_MULTIPLIER; } if (host->ocr_mask) + { ocr_avail = host->ocr_mask; + } if (mmc->ocr_avail) + { ocr_avail = mmc->ocr_avail; + } mmc->ocr_avail = ocr_avail; mmc->ocr_avail_sdio = ocr_avail; if (host->ocr_avail_sdio) + { mmc->ocr_avail_sdio &= host->ocr_avail_sdio; + } mmc->ocr_avail_sd = ocr_avail; if (host->ocr_avail_sd) + { mmc->ocr_avail_sd &= host->ocr_avail_sd; + } else + { mmc->ocr_avail_sd &= ~MMC_VDD_165_195; + } mmc->ocr_avail_mmc = ocr_avail; if (host->ocr_avail_mmc) + { mmc->ocr_avail_mmc &= host->ocr_avail_mmc; + } if (mmc->ocr_avail == 0) { rt_kprintf("%s: Hardware doesn't report any support voltages.\n", mmc_hostname(mmc)); ret = -ENODEV; - goto unreg; + goto _unreg; } - if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) || (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V))) + if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 | + MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) || + (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V))) + { host->flags |= RT_SDHCI_SIGNALING_180; + } if (mmc->caps2 & MMC_CAP2_HSX00_1_2V) + { host->flags |= RT_SDHCI_SIGNALING_120; + } rt_spin_lock_init(&host->lock); @@ -2883,7 +3452,8 @@ int rt_sdhci_setup_host(struct rt_sdhci_host *host) mmc->max_segs = 1; } else - { /* PIO */ + { + /* PIO */ mmc->max_segs = RT_SDHCI_MAX_SEGS; } @@ -2912,15 +3482,19 @@ int rt_sdhci_setup_host(struct rt_sdhci_host *host) mmc->max_blk_count = (host->quirks & RT_SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; return 0; -unreg: -undma: +_unreg: + if (host->sdhci_core_to_disable_vqmmc) + { + rt_regulator_disable(mmc->rthost.supply.vqmmc); + } +_undma: return ret; } static void sdhci_init(struct rt_sdhci_host *host, int soft) { + rt_base_t flags; struct rt_mmc_host *mmc = host->mmc; - rt_base_t flags; if (soft) { @@ -2930,10 +3504,6 @@ static void sdhci_init(struct rt_sdhci_host *host, int soft) { sdhci_do_reset(host, RT_SDHCI_RESET_ALL); } - if (host->v4_mode) - { - sdhci_do_enable_v4_mode(host); - } flags = rt_spin_lock_irqsave(&host->lock); sdhci_set_default_irqs(host); rt_spin_unlock_irqrestore(&host->lock, flags); @@ -2957,49 +3527,64 @@ static void sdhci_reinit(struct rt_sdhci_host *host) sdhci_enable_card_detection(host); if (cd != (host->ier & (RT_SDHCI_INT_CARD_REMOVE | RT_SDHCI_INT_CARD_INSERT))) + { rt_mmc_detect_change(host->mmc, rt_tick_from_millisecond(200)); + } } int rt_sdhci_init_host(struct rt_sdhci_host *host) { + int ret, len; + char dev_name[32]; struct rt_mmc_host *mmc = host->mmc; - int ret; + + len = sdio_host_set_name(&mmc->rthost, dev_name); if ((mmc->caps2 & MMC_CAP2_CQE) && (host->quirks & RT_SDHCI_QUIRK_BROKEN_CQE)) { mmc->caps2 &= ~MMC_CAP2_CQE; } - host->complete_wq = rt_workqueue_create("sdhci", 4096, 20); + host->complete_wq = rt_workqueue_create(dev_name, RT_SYSTEM_WORKQUEUE_STACKSIZE, 20); if (!host->complete_wq) + { return -ENOMEM; + } rt_work_init(&host->complete_work, sdhci_complete_work, host); - rt_timer_init(&host->timer, "sdhci_timer", sdhci_timeout_timer, host, 0, RT_TIMER_FLAG_SOFT_TIMER); - rt_timer_init(&host->data_timer, "sdhci_data_timer", sdhci_timeout_data_timer, host, 0, RT_TIMER_FLAG_SOFT_TIMER); + rt_sprintf(&dev_name[len], "-timer"); + rt_timer_init(&host->timer, dev_name, + sdhci_timeout_timer, host, 0, RT_TIMER_FLAG_SOFT_TIMER); + rt_sprintf(&dev_name[len], "-data-timer"); + rt_timer_init(&host->data_timer, dev_name, + sdhci_timeout_data_timer, host, 0, RT_TIMER_FLAG_SOFT_TIMER); rt_wqueue_init(&host->buf_ready_int); sdhci_init(host, 0); - host->irq_wq = rt_workqueue_create("sdhci_irq", 8192, 1); + rt_sprintf(&dev_name[len], "-irq"); + host->irq_wq = rt_workqueue_create(dev_name, RT_SYSTEM_WORKQUEUE_STACKSIZE, 1); rt_work_init(&host->irq_work, sdhci_thread_irq, host); rt_hw_interrupt_install(host->irq, sdhci_irq, host, mmc_hostname(mmc)); rt_pic_irq_unmask(host->irq); + ret = rt_mmc_add_host(mmc); if (ret) - goto unirq; + { + goto _unirq; + } - LOG_D("%s: RT_SDHCI controller on %s [%s] using %s\n", - mmc_hostname(mmc), host->hw_name, mmc_dev(mmc)->parent.name, - (host->flags & RT_SDHCI_USE_SDMA) ? "DMA" : "PIO"); + LOG_D("%s: RT_SDHCI controller on %s [%s] using %s", + mmc_hostname(mmc), host->hw_name, mmc_dev(mmc)->parent.name, + (host->flags & RT_SDHCI_USE_SDMA) ? "DMA" : "PIO"); sdhci_enable_card_detection(host); return 0; -unirq: +_unirq: sdhci_reset_for_all(host); rt_sdhci_writel(host, 0, RT_SDHCI_INT_ENABLE); rt_sdhci_writel(host, 0, RT_SDHCI_SIGNAL_ENABLE); @@ -3010,17 +3595,22 @@ int rt_sdhci_init_host(struct rt_sdhci_host *host) int rt_sdhci_set_and_add_host(struct rt_sdhci_host *host) { int ret; + ret = rt_sdhci_setup_host(host); if (ret) + { return ret; + } ret = rt_sdhci_init_host(host); if (ret) - goto cleanup; + { + goto _cleanup; + } return 0; -cleanup: +_cleanup: rt_sdhci_cleanup_host(host); return ret; @@ -3028,20 +3618,25 @@ int rt_sdhci_set_and_add_host(struct rt_sdhci_host *host) void rt_sdhci_ios_set(struct rt_mmc_host *mmc, struct rt_mmcsd_io_cfg *ios) { - struct rt_sdhci_host *host = mmc_priv(mmc); - rt_bool_t reinit_uhs = host->reinit_uhs; - rt_bool_t turning_on_clk = RT_FALSE; - rt_uint8_t ctrl; + rt_uint8_t ctrl; + rt_bool_t reinit_uhs, turning_on_clk = RT_FALSE; + struct rt_sdhci_host *host = rt_mmc_priv(mmc); + reinit_uhs = host->reinit_uhs; host->reinit_uhs = RT_FALSE; if (ios->power_mode == MMC_POWER_UNDEFINED) + { return; + } if (host->flags & RT_SDHCI_DEVICE_DEAD) { - if (mmc->supply.vmmc && ios->power_mode == MMC_POWER_OFF) - mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); + if (mmc->rthost.supply.vmmc && ios->power_mode == MMC_POWER_OFF) + { + sdio_regulator_set_ocr(&mmc->rthost, mmc->rthost.supply.vmmc, 0); + } + return; } @@ -3051,8 +3646,12 @@ void rt_sdhci_ios_set(struct rt_mmc_host *mmc, struct rt_mmcsd_io_cfg *ios) sdhci_reinit(host); } - if (host->version >= RT_SDHCI_SPEC_300 && (ios->power_mode == MMC_POWER_UP) && !(host->quirks2 & RT_SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) + if (host->version >= RT_SDHCI_SPEC_300 && + (ios->power_mode == MMC_POWER_UP) && + !(host->quirks2 & RT_SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) + { sdhci_preset_value_enable(host, RT_FALSE); + } if (!ios->clock || ios->clock != host->clock) { @@ -3064,30 +3663,50 @@ void rt_sdhci_ios_set(struct rt_mmc_host *mmc, struct rt_mmcsd_io_cfg *ios) if (host->quirks & RT_SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && host->clock) { host->timeout_clk = mmc->actual_clock ? mmc->actual_clock / 1000 : host->clock / 1000; - mmc->max_busy_timeout = - host->ops->get_max_timeout_count ? host->ops->get_max_timeout_count(host) : 1 << 27; + mmc->max_busy_timeout = host->ops->get_max_timeout_count ? + host->ops->get_max_timeout_count(host) : 1 << 27; mmc->max_busy_timeout /= host->timeout_clk; } } if (host->ops->set_power) + { host->ops->set_power(host, ios->power_mode, ios->vdd); + } else + { rt_sdhci_set_power(host, ios->power_mode, ios->vdd); + } host->ops->set_bus_width(host, ios->bus_width); - if (!reinit_uhs && turning_on_clk && host->timing == ios->timing && host->version >= RT_SDHCI_SPEC_300 && !sdhci_presetable_values_change(host, ios)) + if (!reinit_uhs && turning_on_clk && + host->timing == ios->timing && host->version >= RT_SDHCI_SPEC_300 && + !sdhci_presetable_values_change(host, ios)) + { return; + } ctrl = rt_sdhci_readb(host, RT_SDHCI_HOST_CONTROL); if (!(host->quirks & RT_SDHCI_QUIRK_NO_HISPD_BIT)) { - if (ios->timing == MMC_TIMING_SD_HS || ios->timing == MMC_TIMING_MMC_HS || ios->timing == MMC_TIMING_MMC_HS400 || ios->timing == MMC_TIMING_MMC_HS200 || ios->timing == MMC_TIMING_MMC_DDR52 || ios->timing == MMC_TIMING_UHS_SDR50 || ios->timing == MMC_TIMING_UHS_SDR104 || ios->timing == MMC_TIMING_UHS_DDR50 || ios->timing == MMC_TIMING_UHS_SDR25) + if (ios->timing == MMC_TIMING_SD_HS || + ios->timing == MMC_TIMING_MMC_HS || + ios->timing == MMC_TIMING_MMC_HS400 || + ios->timing == MMC_TIMING_MMC_HS200 || + ios->timing == MMC_TIMING_MMC_DDR52 || + ios->timing == MMC_TIMING_UHS_SDR50 || + ios->timing == MMC_TIMING_UHS_SDR104 || + ios->timing == MMC_TIMING_UHS_DDR50 || + ios->timing == MMC_TIMING_UHS_SDR25) + { ctrl |= RT_SDHCI_CTRL_HISPD; + } else + { ctrl &= ~RT_SDHCI_CTRL_HISPD; + } } if (host->version >= RT_SDHCI_SPEC_300) @@ -3108,16 +3727,24 @@ void rt_sdhci_ios_set(struct rt_mmc_host *mmc, struct rt_mmcsd_io_cfg *ios) ctrl_2 = rt_sdhci_readw(host, RT_SDHCI_HOST_CONTROL2); ctrl_2 &= ~RT_SDHCI_CTRL_DRV_TYPE_MASK; if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) + { ctrl_2 |= RT_SDHCI_CTRL_DRV_TYPE_A; + } else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B) + { ctrl_2 |= RT_SDHCI_CTRL_DRV_TYPE_B; + } else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) + { ctrl_2 |= RT_SDHCI_CTRL_DRV_TYPE_C; + } else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D) + { ctrl_2 |= RT_SDHCI_CTRL_DRV_TYPE_D; + } else { - LOG_D("%s: invalid driver type, default to driver type B\n", + LOG_D("%s: invalid driver type, default to driver type B", mmc_hostname(mmc)); ctrl_2 |= RT_SDHCI_CTRL_DRV_TYPE_B; } @@ -3135,16 +3762,18 @@ void rt_sdhci_ios_set(struct rt_mmc_host *mmc, struct rt_mmcsd_io_cfg *ios) sdhci_preset_value_enable(host, RT_TRUE); preset = sdhci_get_preset_value(host); - ios->drv_type = FIELD_GET(RT_SDHCI_PRESET_DRV_MASK, - preset); + ios->drv_type = RT_FIELD_GET(RT_SDHCI_PRESET_DRV_MASK, preset); host->drv_type = ios->drv_type; } host->ops->set_clock(host, host->clock); } else + { rt_sdhci_writeb(host, ctrl, RT_SDHCI_HOST_CONTROL); + } } + void rt_sdhci_free_host(struct rt_sdhci_host *host) { rt_sdhci_cleanup_host(host); diff --git a/components/drivers/sdio/sdhci/sdhci-platform.c b/components/drivers/sdio/dev_sdhci_dm.c old mode 100644 new mode 100755 similarity index 58% rename from components/drivers/sdio/sdhci/sdhci-platform.c rename to components/drivers/sdio/dev_sdhci_dm.c index d6cf4ed2a35..dd369fdbd73 --- a/components/drivers/sdio/sdhci/sdhci-platform.c +++ b/components/drivers/sdio/dev_sdhci_dm.c @@ -7,9 +7,11 @@ * Date Author Notes * 2024-08-16 zhujiale first version */ -#include "sdhci-platform.h" -static const struct rt_sdhci_ops sdhci_pltfm_ops = { +#include "dev_sdhci_dm.h" + +static const struct rt_sdhci_ops sdhci_pltfm_ops = +{ .set_clock = rt_sdhci_set_clock, .set_bus_width = rt_sdhci_set_bus_width, .reset = rt_sdhci_reset, @@ -18,40 +20,54 @@ static const struct rt_sdhci_ops sdhci_pltfm_ops = { void rt_sdhci_get_property(struct rt_platform_device *pdev) { - struct rt_device *dev = &pdev->parent; - struct rt_sdhci_host *host = pdev->priv; - struct rt_sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - rt_uint32_t bus_width; + rt_uint32_t bus_width; + struct rt_device *dev = &pdev->parent; + struct rt_sdhci_host *host = pdev->priv; + struct rt_sdhci_pltfm_host *pltfm_host = rt_sdhci_priv(host); if (rt_dm_dev_prop_read_bool(dev, "sdhci,auto-cmd12")) + { host->quirks |= RT_SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; + } - if (rt_dm_dev_prop_read_bool(dev, "sdhci,1-bit-only") || (rt_dm_dev_prop_read_u32(dev, "bus-width", &bus_width) == 0 && bus_width == 1)) + if (rt_dm_dev_prop_read_bool(dev, "sdhci,1-bit-only") || + (!rt_dm_dev_prop_read_u32(dev, "bus-width", &bus_width) && bus_width == 1)) + { host->quirks |= RT_SDHCI_QUIRK_FORCE_1_BIT_DATA; + } if (rt_dm_dev_prop_read_bool(dev, "broken-cd")) + { host->quirks |= RT_SDHCI_QUIRK_BROKEN_CARD_DETECTION; + } if (rt_dm_dev_prop_read_bool(dev, "no-1-8-v")) + { host->quirks2 |= RT_SDHCI_QUIRK2_NO_1_8_V; + } rt_dm_dev_prop_read_u32(dev, "clock-frequency", &pltfm_host->clock); if (rt_dm_dev_prop_read_bool(dev, "keep-power-in-suspend")) + { host->mmc->pm_caps |= MMC_PM_KEEP_POWER; + } - if (rt_dm_dev_prop_read_bool(dev, "wakeup-source") || rt_dm_dev_prop_read_bool(dev, "enable-sdio-wakeup")) /* legacy */ + if (rt_dm_dev_prop_read_bool(dev, "wakeup-source") || + rt_dm_dev_prop_read_bool(dev, "enable-sdio-wakeup")) /* legacy */ + { host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; + } } -struct rt_sdhci_host *rt_sdhci_pltfm_init(struct rt_platform_device *pdev, - const struct rt_sdhci_pltfm_data *pdata, - size_t priv_size) +struct rt_sdhci_host *rt_sdhci_pltfm_init(struct rt_platform_device *pdev, + const struct rt_sdhci_pltfm_data *pdata, + rt_size_t priv_size) { - struct rt_sdhci_host *host; - struct rt_device *dev = &pdev->parent; - void *ioaddr; - int irq; + int irq; + void *ioaddr; + struct rt_sdhci_host *host; + struct rt_device *dev = &pdev->parent; ioaddr = rt_dm_dev_iomap(dev, 0); if (!ioaddr) @@ -64,19 +80,26 @@ struct rt_sdhci_host *rt_sdhci_pltfm_init(struct rt_platform_device *pdev, { return RT_NULL; } - host = rt_sdhci_alloc_host(dev,sizeof(struct rt_sdhci_pltfm_host) + priv_size); + + host = rt_sdhci_alloc_host(dev, sizeof(struct rt_sdhci_pltfm_host) + priv_size); if (!host) { return RT_NULL; } + host->irq = irq; host->ioaddr = ioaddr; host->hw_name = rt_dm_dev_get_name(dev); if (pdata && pdata->ops) + { host->ops = pdata->ops; + } else + { host->ops = &sdhci_pltfm_ops; + } + if (pdata) { host->quirks = pdata->quirks; @@ -88,22 +111,26 @@ struct rt_sdhci_host *rt_sdhci_pltfm_init(struct rt_platform_device *pdev, return host; } -int rt_sdhci_pltfm_init_and_add_host(struct rt_platform_device *pdev, - const struct rt_sdhci_pltfm_data *pdata, - size_t priv_size) +rt_err_t rt_sdhci_pltfm_init_and_add_host(struct rt_platform_device *pdev, + const struct rt_sdhci_pltfm_data *pdata, + rt_size_t priv_size) { + rt_err_t ret = RT_EOK; struct rt_sdhci_host *host; - int ret = 0; host = rt_sdhci_pltfm_init(pdev, pdata, priv_size); if (!host) + { return -RT_ERROR; + } rt_sdhci_get_property(pdev); ret = rt_sdhci_init_host(host); if (ret) + { rt_sdhci_pltfm_free(pdev); + } return ret; } @@ -115,11 +142,22 @@ void rt_sdhci_pltfm_free(struct rt_platform_device *pdev) rt_sdhci_free_host(host); } -void rt_sdhci_pltfm_remove(struct rt_platform_device *pdev) +rt_err_t rt_sdhci_pltfm_remove(struct rt_platform_device *pdev) { + rt_bool_t dead; struct rt_sdhci_host *host = pdev->priv; - int dead = (readl(host->ioaddr + RT_SDHCI_INT_STATUS) == 0xffffffff); + + dead = (HWREG32(host->ioaddr + RT_SDHCI_INT_STATUS) == 0xffffffff); rt_sdhci_uninit_host(host, dead); rt_sdhci_pltfm_free(pdev); + + return RT_EOK; +} + +rt_uint32_t rt_sdhci_pltfm_clk_get_max_clock(struct rt_sdhci_host *host) +{ + struct rt_sdhci_pltfm_host *pltfm_host = rt_sdhci_priv(host); + + return rt_clk_get_rate(pltfm_host->clk); } diff --git a/components/drivers/sdio/dev_sdhci_dm.h b/components/drivers/sdio/dev_sdhci_dm.h new file mode 100755 index 00000000000..c5b09980809 --- /dev/null +++ b/components/drivers/sdio/dev_sdhci_dm.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2006-2024 RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2024-08-16 zhujiale first version + */ + +#ifndef __DEV_SDHCI_DM_H__ +#define __DEV_SDHCI_DM_H__ + +#include +#include + +struct rt_sdhci_pltfm_data +{ + const struct rt_sdhci_ops *ops; + rt_uint32_t quirks; + rt_uint32_t quirks2; +}; + +struct rt_sdhci_pltfm_host +{ + struct rt_clk *clk; + rt_uint32_t clock; + rt_uint16_t xfer_mode_shadow; + + rt_ubase_t private[]; +}; + +void rt_sdhci_get_property(struct rt_platform_device *pdev); + +rt_inline void rt_sdhci_get_of_property(struct rt_platform_device *pdev) +{ + return rt_sdhci_get_property(pdev); +} + +extern struct rt_sdhci_host *rt_sdhci_pltfm_init(struct rt_platform_device *pdev, + const struct rt_sdhci_pltfm_data *pdata, + rt_size_t priv_size); +extern void rt_sdhci_pltfm_free(struct rt_platform_device *pdev); + +extern rt_err_t rt_sdhci_pltfm_init_and_add_host(struct rt_platform_device *pdev, + const struct rt_sdhci_pltfm_data *pdata, + rt_size_t priv_size); + +extern rt_err_t rt_sdhci_pltfm_remove(struct rt_platform_device *pdev); + +extern rt_uint32_t rt_sdhci_pltfm_clk_get_max_clock(struct rt_sdhci_host *host); + +rt_inline void *rt_sdhci_pltfm_priv(struct rt_sdhci_pltfm_host *host) +{ + return host->private; +} + +rt_inline rt_err_t rt_sdhci_pltfm_suspend(struct rt_device *dev) +{ + return RT_EOK; +} + +rt_inline rt_err_t rt_sdhci_pltfm_resume(struct rt_device *dev) +{ + return RT_EOK; +} + +#endif /* __DEV_SDHCI_DM_H__ */ diff --git a/components/drivers/sdio/sdhci/fit-mmc.c b/components/drivers/sdio/dev_sdhci_host.c old mode 100644 new mode 100755 similarity index 55% rename from components/drivers/sdio/sdhci/fit-mmc.c rename to components/drivers/sdio/dev_sdhci_host.c index a3fe063194d..a9d31a059df --- a/components/drivers/sdio/sdhci/fit-mmc.c +++ b/components/drivers/sdio/dev_sdhci_host.c @@ -7,17 +7,25 @@ * Date Author Notes * 2024-08-16 zhujiale first version */ + #include -#include "sdhci.h" +#include + +#define DBG_TAG "SDHCI" +#ifdef RT_SDIO_DEBUG +#define DBG_LVL DBG_LOG +#else +#define DBG_LVL DBG_INFO +#endif /* RT_SDIO_DEBUG */ #include -#include -#include +#include "dev_sdio_dm.h" +#include "dev_sdhci_dm.h" static void rt_plat_request(struct rt_mmcsd_host *host, struct rt_mmcsd_req *req) { - struct rt_mmc_host *mmc = (struct rt_mmc_host *)host; - rt_uint32_t flags = req->cmd->flags; + rt_uint32_t flags = req->cmd->flags; + struct rt_mmc_host *mmc = rt_container_of(host, struct rt_mmc_host, rthost); switch (flags & RESP_MASK) { @@ -49,12 +57,13 @@ static void rt_plat_request(struct rt_mmcsd_host *host, struct rt_mmcsd_req *req flags |= MMC_RSP_R7; break; } + if (req->data) { if ((rt_uint64_t)rt_kmem_v2p(req->data->buf) > 0xffffffff) { + void *req_buf = RT_NULL; void *dma_buffer = rt_malloc(req->data->blks * req->data->blksize); - void *req_buf = NULL; if (req->data->flags & DATA_DIR_WRITE) { @@ -76,7 +85,9 @@ static void rt_plat_request(struct rt_mmcsd_host *host, struct rt_mmcsd_req *req { rt_memcpy(req_buf, dma_buffer, req->data->blksize * req->data->blks); req->data->buf = req_buf; - }else{ + } + else + { req->data->buf = req_buf; } @@ -98,9 +109,9 @@ static void rt_plat_request(struct rt_mmcsd_host *host, struct rt_mmcsd_req *req static void rt_plat_set_ioconfig(struct rt_mmcsd_host *host, struct rt_mmcsd_io_cfg *iocfg) { - struct rt_mmc_host *mmc = (struct rt_mmc_host *)host; + struct rt_mmc_host *mmc = rt_container_of(host, struct rt_mmc_host, rthost); - LOG_D("clock:%d,width:%d,power:%d,vdd:%d,timing:%d\n", + LOG_D("clock: %u, width: %u, power: %u, vdd: %u, timing: %u", iocfg->clock, iocfg->bus_width, iocfg->power_mode, iocfg->vdd, iocfg->timing); @@ -109,66 +120,80 @@ static void rt_plat_set_ioconfig(struct rt_mmcsd_host *host, struct rt_mmcsd_io_ static rt_int32_t rt_plat_get_card_status(struct rt_mmcsd_host *host) { - struct rt_mmc_host *mmc = (struct rt_mmc_host *)host; + struct rt_mmc_host *mmc = rt_container_of(host, struct rt_mmc_host, rthost); return mmc->ops->get_cd(mmc); } static rt_int32_t rt_plat_execute_tuning(struct rt_mmcsd_host *host, rt_int32_t opcode) { - struct rt_mmc_host *mmc = (struct rt_mmc_host *)host; + struct rt_mmc_host *mmc = rt_container_of(host, struct rt_mmc_host, rthost); return mmc->ops->execute_tuning(mmc, opcode); } static void rt_plat_enable_sdio_irq(struct rt_mmcsd_host *host, rt_int32_t en) { - struct rt_mmc_host *mmc = (struct rt_mmc_host *)host; + struct rt_mmc_host *mmc = rt_container_of(host, struct rt_mmc_host, rthost); return mmc->ops->enable_sdio_irq(mmc, en); } +static rt_bool_t rt_plat_card_busy(struct rt_mmcsd_host *host) +{ + struct rt_mmc_host *mmc = rt_container_of(host, struct rt_mmc_host, rthost); -static const struct rt_mmcsd_host_ops rt_mmcsd_ops = { - .request = rt_plat_request, - .set_iocfg = rt_plat_set_ioconfig, - .get_card_status = rt_plat_get_card_status, - .enable_sdio_irq = rt_plat_enable_sdio_irq, - .execute_tuning = rt_plat_execute_tuning, -}; + return mmc->ops->card_busy(mmc); +} +static rt_err_t rt_plat_signal_voltage_switch(struct rt_mmcsd_host *host, struct rt_mmcsd_io_cfg *io_cfg) +{ + struct rt_mmc_host *mmc = rt_container_of(host, struct rt_mmc_host, rthost); + + return mmc->ops->start_signal_voltage_switch(mmc, io_cfg); +} + +static const struct rt_mmcsd_host_ops rt_mmcsd_ops = +{ + .request = rt_plat_request, + .set_iocfg = rt_plat_set_ioconfig, + .get_card_status = rt_plat_get_card_status, + .enable_sdio_irq = rt_plat_enable_sdio_irq, + .execute_tuning = rt_plat_execute_tuning, + .card_busy = rt_plat_card_busy, + .signal_voltage_switch = rt_plat_signal_voltage_switch, +}; void rt_mmc_request_done(struct rt_mmc_host *host, struct rt_mmcsd_req *mrq) { mmcsd_req_complete(&host->rthost); } -/*add host in rtt while sdhci complete*/ -int rt_mmc_add_host(struct rt_mmc_host *mmc) +/* Add host in rtt while sdhci complete */ +rt_err_t rt_mmc_add_host(struct rt_mmc_host *mmc) { mmc->rthost.ops = &rt_mmcsd_ops; mmc->rthost.flags = mmc->caps; mmc->rthost.freq_max = mmc->f_max; - mmc->rthost.freq_min = 400000; + mmc->rthost.freq_min = mmc->f_min; mmc->rthost.max_dma_segs = mmc->max_segs; mmc->rthost.max_seg_size = mmc->max_seg_size; mmc->rthost.max_blk_size = mmc->max_blk_size; mmc->rthost.max_blk_count = mmc->max_blk_count; - mmc->rthost.valid_ocr = VDD_165_195|VDD_20_21|VDD_21_22|VDD_22_23|VDD_24_25|VDD_25_26|VDD_26_27|VDD_27_28|VDD_28_29|VDD_29_30|VDD_30_31|VDD_32_33|VDD_33_34|VDD_34_35|VDD_35_36; - + mmc->rthost.valid_ocr = mmc->ocr_avail; mmcsd_change(&mmc->rthost); - return 0; + + return RT_EOK; } struct rt_mmc_host *rt_mmc_alloc_host(int extra, struct rt_device *dev) { struct rt_mmc_host *mmc; - mmc = rt_malloc(sizeof(*mmc) + extra); + mmc = rt_calloc(1, sizeof(*mmc) + extra); if (mmc) { - rt_memset(mmc, 0, sizeof(*mmc) + extra); mmc->parent = dev; mmcsd_host_init(&mmc->rthost); } @@ -181,86 +206,52 @@ void rt_mmc_remove_host(struct rt_mmc_host *host) rt_free(host); } -int rt_mmc_abort_tuning(struct rt_mmc_host *host, rt_uint32_t opcode) +rt_err_t rt_mmc_abort_tuning(struct rt_mmc_host *host, rt_uint32_t opcode) { - return 0; + return RT_EOK; } - -int rt_mmc_gpio_get_cd(struct rt_mmc_host *host) +rt_err_t rt_mmc_gpio_get_cd(struct rt_mmc_host *host) { - return -ENOSYS; + return -RT_ENOSYS; } -void rt_mmc_detect_change(struct rt_mmc_host *host, unsigned long delay) +void rt_mmc_detect_change(struct rt_mmc_host *host, rt_ubase_t delay) { } - -int rt_mmc_regulator_set_vqmmc(struct rt_mmc_host *mmc, struct rt_mmcsd_io_cfg *ios) -{ - return 0; -} - rt_bool_t rt_mmc_can_gpio_ro(struct rt_mmc_host *host) { return RT_FALSE; } -int rt_mmc_gpio_get_ro(struct rt_mmc_host *host) +rt_err_t rt_mmc_gpio_get_ro(struct rt_mmc_host *host) { - return 0; + return RT_EOK; } -int rt_mmc_send_abort_tuning(struct rt_mmc_host *host, rt_uint32_t opcode) +rt_err_t rt_mmc_send_abort_tuning(struct rt_mmc_host *host, rt_uint32_t opcode) { - return 0; + return RT_EOK; } -int rt_mmc_of_parse(struct rt_mmc_host *host) + +rt_err_t rt_mmc_of_parse(struct rt_mmc_host *host) { + rt_err_t err; struct rt_device *dev = host->parent; - rt_uint32_t bus_width; if (!dev || !dev->ofw_node) - return 0; - - /* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */ - if (rt_dm_dev_prop_read_u32(dev, "bus-width", &bus_width) < 0) { - bus_width = 1; + return RT_EOK; } - switch (bus_width) + err = sdio_ofw_parse(dev->ofw_node, &host->rthost); + if (err) { - case 8: - host->caps |= MMC_CAP_8_BIT_DATA; - break; /* Hosts capable of 8-bit can also do 4 bits */ - case 4: - host->caps |= MMC_CAP_4_BIT_DATA; - break; - case 1: - break; - default: - return -EINVAL; + return err; } - /* f_max is obtained from the optional "max-frequency" property */ - rt_dm_dev_prop_read_u32(dev, "max-frequency", &host->f_max); - - if (rt_dm_dev_prop_read_bool(dev, "cap-mmc-highspeed")) - { - host->caps |= MMC_CAP_MMC_HIGHSPEED; - } - - if (rt_dm_dev_prop_read_bool(dev, "mmc-hs200-1_8v")) - { - host->caps |= MMC_CAP2_HS200_1_8V_SDR; - } - - if (rt_dm_dev_prop_read_bool(dev, "non-removable")) - { - host->caps |= MMC_CAP_NONREMOVABLE; - } + host->caps |= host->rthost.flags; if (rt_dm_dev_prop_read_bool(dev, "no-sdio")) { @@ -272,25 +263,9 @@ int rt_mmc_of_parse(struct rt_mmc_host *host) host->caps2 |= MMC_CAP2_NO_SD; } - if (rt_dm_dev_prop_read_bool(dev, "mmc-ddr-3_3v")) - { - host->caps |= MMC_CAP_3_3V_DDR; - } - - if (rt_dm_dev_prop_read_bool(dev, "mmc-ddr-1_8v")) - { - host->caps |= MMC_CAP_1_8V_DDR; - } - - if (rt_dm_dev_prop_read_bool(dev, "mmc-ddr-1_2v")) - { - host->caps |= MMC_CAP_1_2V_DDR; - } - - return 0; + return RT_EOK; } - void rt_mmc_free_host(struct rt_mmc_host *host) { } @@ -299,22 +274,3 @@ rt_bool_t rt_mmc_can_gpio_cd(struct rt_mmc_host *host) { return RT_FALSE; } - -int mmc_regulator_get_supply(struct rt_mmc_host *mmc) -{ - mmc->supply.vmmc = -RT_NULL; - mmc->supply.vqmmc = -RT_NULL; - - return 0; -} -int regulator_get_current_limit(struct regulator *regulator) -{ - return 0; -} - -int regulator_is_supported_voltage(struct regulator *regulator, - - int min_uV, int max_uV) -{ - return 0; -} diff --git a/components/drivers/sdio/dev_sdio_dm.c b/components/drivers/sdio/dev_sdio_dm.c new file mode 100755 index 00000000000..29d40ed6a78 --- /dev/null +++ b/components/drivers/sdio/dev_sdio_dm.c @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2006-2022, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2022-12-06 GuEe-GUI first version + */ + +#include +#include "dev_sdio_dm.h" + +#define DBG_TAG "SDIO" +#ifdef RT_SDIO_DEBUG +#define DBG_LVL DBG_LOG +#else +#define DBG_LVL DBG_INFO +#endif /* RT_SDIO_DEBUG */ +#include + +int sdio_host_set_name(struct rt_mmcsd_host *host, char *out_devname) +{ + int id = -1, res; + static int uid_min = -1; + static volatile rt_atomic_t uid = 0; + + RT_ASSERT(host != RT_NULL); + +#ifdef RT_USING_OFW + if (host->ofw_node) + { + id = rt_ofw_get_alias_id(host->ofw_node, "mmc"); + + if (uid_min < 0) + { + uid_min = rt_ofw_get_alias_last_id("mmc"); + uid_min = uid_min < 0 ? 0 : (uid_min + 1); + + rt_atomic_store(&uid, uid_min); + } + } +#endif /* RT_USING_OFW */ + + if (id < 0) + { + id = (int)rt_atomic_add(&uid, 1); + } + + res = rt_snprintf(host->name, RT_NAME_MAX, "sd%u", id); + + if (out_devname) + { + rt_strncpy(out_devname, host->name, RT_NAME_MAX); + } + + return res; +} + +#ifdef RT_USING_OFW +rt_err_t sdio_ofw_parse(struct rt_ofw_node *dev_np, struct rt_mmcsd_host *host) +{ + rt_uint32_t bus_width = 1; + + if (!dev_np) + { + return -RT_EINVAL; + } + + host->ofw_node = host->ofw_node ? : dev_np; + + host->flags = MMCSD_MUTBLKWRITE; + rt_ofw_prop_read_u32(dev_np, "bus-width", &bus_width); + + switch (bus_width) + { + case 0x8: + host->flags |= MMCSD_BUSWIDTH_8; + break; + + case 0x4: + host->flags |= MMCSD_BUSWIDTH_4; + break; + + case 0x1: + break; + + default: + LOG_E("Invalid \"bus-width\" value %d", bus_width); + return -RT_EIO; + } + + rt_ofw_prop_read_u32(dev_np, "max-frequency", &host->freq_max); + + if (rt_ofw_prop_read_bool(dev_np, "non-removable")) + { + host->flags |= MMCSD_SUP_NONREMOVABLE; + } + + if (rt_ofw_prop_read_bool(dev_np, "cap-sdio-irq")) + { + host->flags |= MMCSD_SUP_SDIO_IRQ; + } + + if (rt_ofw_prop_read_bool(dev_np, "cap-sd-highspeed") || + rt_ofw_prop_read_bool(dev_np, "cap-mmc-highspeed")) + { + host->flags |= MMCSD_SUP_HIGHSPEED; + } + + if (rt_ofw_prop_read_bool(dev_np, "mmc-ddr-3_3v")) + { + host->flags |= MMCSD_SUP_DDR_3V3; + } + if (rt_ofw_prop_read_bool(dev_np, "mmc-ddr-1_8v")) + { + host->flags |= MMCSD_SUP_DDR_1V8; + } + if (rt_ofw_prop_read_bool(dev_np, "mmc-ddr-1_2v")) + { + host->flags |= MMCSD_SUP_DDR_1V2; + } + + if (rt_ofw_prop_read_bool(dev_np, "mmc-hs200-1_2v")) + { + host->flags |= MMCSD_SUP_HS200_1V2; + } + if (rt_ofw_prop_read_bool(dev_np, "mmc-hs200-1_8v")) + { + host->flags |= MMCSD_SUP_HS200_1V8; + } + + if (rt_ofw_prop_read_bool(dev_np, "mmc-hs400-1_8v")) + { + host->flags |= MMCSD_SUP_HS400_1V8; + } + if (rt_ofw_prop_read_bool(dev_np, "mmc-hs400-1_2v")) + { + host->flags |= MMCSD_SUP_HS400_1V2; + } + + if (rt_ofw_prop_read_bool(dev_np, "sd-uhs-sdr50")) + { + host->flags |= MMCSD_SUP_SDR50; + } + if (rt_ofw_prop_read_bool(dev_np, "sd-uhs-sdr104")) + { + host->flags |= MMCSD_SUP_SDR104; + } + if (rt_ofw_prop_read_bool(dev_np, "sd-uhs-ddr50")) + { + host->flags |= MMCSD_SUP_DDR50; + } + + return RT_EOK; +} +#endif /* RT_USING_OFW */ diff --git a/components/drivers/sdio/dev_sdio_dm.h b/components/drivers/sdio/dev_sdio_dm.h new file mode 100755 index 00000000000..675ff3d442c --- /dev/null +++ b/components/drivers/sdio/dev_sdio_dm.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2006-2022, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2022-12-06 GuEe-GUI first version + */ + +#ifndef __DEV_SDIO_DM_H__ +#define __DEV_SDIO_DM_H__ + +#include +#include +#include + +int sdio_host_set_name(struct rt_mmcsd_host *host, char *out_devname); + +#ifdef RT_USING_REGULATOR +rt_err_t sdio_regulator_set_ocr(struct rt_mmcsd_host *host, + struct rt_regulator *supply, rt_uint16_t vdd_bit); +rt_err_t sdio_regulator_set_vqmmc(struct rt_mmcsd_host *host, + struct rt_mmcsd_io_cfg *ios); +rt_err_t sdio_regulator_get_supply(struct rt_device *dev, struct rt_mmcsd_host *host); +rt_err_t sdio_regulator_enable_vqmmc(struct rt_mmcsd_host *host); +void sdio_regulator_disable_vqmmc(struct rt_mmcsd_host *host); +#endif /* RT_USING_REGULATOR */ + +#ifdef RT_USING_OFW +rt_err_t sdio_ofw_parse(struct rt_ofw_node *dev_np, struct rt_mmcsd_host *host); +#else +rt_inline rt_err_t sdio_ofw_parse(struct rt_ofw_node *dev_np, struct rt_mmcsd_host *host) +{ + return RT_EOK; +} +#endif /* RT_USING_OFW */ + +#endif /* __DEV_SDIO_DM_H__ */ diff --git a/components/drivers/sdio/host/Kconfig b/components/drivers/sdio/host/Kconfig new file mode 100755 index 00000000000..8576b19d8f8 --- /dev/null +++ b/components/drivers/sdio/host/Kconfig @@ -0,0 +1,22 @@ +config RT_SDIO_SDHCI_PCI + bool "SDHCI support on PCI bus" + depends on RT_USING_PCI + select RT_USING_SDHCI + default n + +config RT_SDIO_DW_MMC + bool "Synopsys DesignWare MMC Family" + depends on RT_USING_PINCTRL + depends on RT_USING_RESET + depends on RT_USING_REGULATOR + select RT_USING_DEVICE_IPC + select RT_USING_SYSTEM_WORKQUEUE + default n + +config RT_SDIO_DW_MMC_PCI + bool "Synopsys Designware MCI support on PCI bus" + depends on RT_SDIO_DW_MMC + depends on RT_USING_PCI + default n + +osource "$(SOC_DM_SDIO_DIR)/Kconfig" diff --git a/components/drivers/sdio/host/SConscript b/components/drivers/sdio/host/SConscript new file mode 100755 index 00000000000..242de85644c --- /dev/null +++ b/components/drivers/sdio/host/SConscript @@ -0,0 +1,24 @@ +from building import * + +group = [] + +if not GetDepend(['RT_USING_SDIO']) and not GetDepend(['RT_USING_DM']): + Return('group') + +cwd = GetCurrentDir() +CPPPATH = [cwd + '/../../include'] + +src = [] + +if GetDepend(['RT_SDIO_SDHCI_PCI']): + src += ['sdhci-pci.c'] + +if GetDepend(['RT_SDIO_DW_MMC']): + src += ['sdio-dw.c', 'sdio-dw-platform.c'] + + if GetDepend(['RT_SDIO_DW_MMC_PCI']): + src += ['sdio-dw-pci.c'] + +group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH) + +Return('group') diff --git a/components/drivers/sdio/host/sdhci-pci.c b/components/drivers/sdio/host/sdhci-pci.c new file mode 100755 index 00000000000..704b1b701b2 --- /dev/null +++ b/components/drivers/sdio/host/sdhci-pci.c @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2006-2022, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2023-02-25 GuEe-GUI the first version + */ + +#define SDHCI_REG_BAR 0 + +#include "../dev_sdio_dm.h" + +struct pci_sdhci_host +{ + struct rt_sdhci_host parent; +}; + +static const struct rt_sdhci_ops pci_sdhci_ops = +{ + .set_clock = rt_sdhci_set_clock, + .set_bus_width = rt_sdhci_set_bus_width, + .reset = rt_sdhci_reset, +}; + +static rt_err_t pci_sdhci_probe(struct rt_pci_device *pdev) +{ + rt_err_t err; + struct rt_sdhci_host *host; + struct pci_sdhci_host *pci_host; + + host = rt_sdhci_alloc_host(&pdev->parent, sizeof(struct pci_sdhci_host)); + + if (!host) + { + return -RT_ENOMEM; + } + pci_host = rt_container_of(host, struct pci_sdhci_host, parent); + + host->ioaddr = rt_pci_iomap(pdev, SDHCI_REG_BAR); + + if (!host->ioaddr) + { + err = -RT_EIO; + goto _fail; + } + + host->irq = pdev->irq; + host->ops = &pci_sdhci_ops; + + rt_pci_irq_unmask(pdev); + rt_pci_set_master(pdev); + + if ((err = rt_sdhci_set_and_add_host(host))) + { + goto _fail; + } + + pdev->parent.user_data = pci_host; + + return RT_EOK; + +_fail: + if (host->ioaddr) + { + rt_iounmap(host->ioaddr); + } + + rt_sdhci_free_host(host); + + return err; +} + +static rt_err_t pci_sdhci_remove(struct rt_pci_device *pdev) +{ + rt_bool_t dead; + struct rt_sdhci_host *host; + struct pci_sdhci_host *pci_host = pdev->parent.user_data; + + host = &pci_host->parent; + + /* INTx is shared, don't mask all */ + rt_hw_interrupt_umask(pdev->irq); + rt_pci_irq_mask(pdev); + rt_pci_clear_master(pdev); + + dead = (HWREG32(host->ioaddr + RT_SDHCI_INT_STATUS) == 0xffffffff); + + rt_sdhci_uninit_host(host, dead); + + rt_iounmap(host->ioaddr); + rt_sdhci_free_host(host); + + return RT_EOK; +} + +static const struct rt_pci_device_id pci_sdhci_pci_ids[] = +{ + { RT_PCI_DEVICE_ID(PCI_VENDOR_ID_REDHAT, 0x0007), }, + { RT_PCI_DEVICE_CLASS(PCIS_SYSTEM_SDHCI, ~0) }, + { /* sentinel */ } +}; + +static struct rt_pci_driver pci_sdhci_driver = +{ + .name = "sdhci-pci", + + .ids = pci_sdhci_pci_ids, + .probe = pci_sdhci_probe, + .remove = pci_sdhci_remove, +}; +RT_PCI_DRIVER_EXPORT(pci_sdhci_driver); diff --git a/components/drivers/sdio/host/sdio-dw-pci.c b/components/drivers/sdio/host/sdio-dw-pci.c new file mode 100755 index 00000000000..0601164f5f2 --- /dev/null +++ b/components/drivers/sdio/host/sdio-dw-pci.c @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2006-2022, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2022-12-06 GuEe-GUI first version + */ + +#include "dev_sdio_dw.h" + +#include + +#define SYNOPSYS_DW_MCI_VENDOR_ID 0x0700 +#define SYNOPSYS_DW_MCI_DEVICE_ID 0x1107 + +#define MCI_REG_NO 2 + +static const struct sdio_dw_drv_data sdio_dw_pci_drv_data = +{ +}; + +static rt_err_t sdio_dw_pci_probe(struct rt_pci_device *pdev) +{ + rt_err_t err; + struct sdio_dw *sd = rt_calloc(1, sizeof(*sd)); + + if (!sd) + { + return -RT_ENOMEM; + } + + sd->bus_dev = &pdev->parent; + sd->base = rt_pci_iomap(pdev, MCI_REG_NO); + + if (!sd->base) + { + goto _fail; + } + + sd->irq = pdev->irq; + rt_pci_irq_unmask(pdev); + + sd->base_phy = (rt_ubase_t)rt_kmem_v2p(sd->base); + sd->drv_data = &sdio_dw_pci_drv_data; + + /* board data */ + sd->bus_hz = 33 * 1000 * 1000; + sd->detect_delay_ms = 200; + sd->fifo_depth = 32; + + pdev->parent.user_data = sd; + + if ((err = sdio_dw_probe(sd))) + { + goto _fail; + } + + return RT_EOK; + +_fail: + if (sd->base) + { + rt_iounmap(sd->base); + } + + rt_free(sd); + + return err; +} + +static rt_err_t sdio_dw_pci_remove(struct rt_pci_device *pdev) +{ + struct sdio_dw *sd = pdev->parent.user_data; + + sdio_dw_remove(sd); + + rt_iounmap(sd->base); + + rt_free(sd); + + return RT_EOK; +} + +static const struct rt_pci_device_id sdio_dw_pci_pci_ids[] = +{ + { RT_PCI_DEVICE_ID(SYNOPSYS_DW_MCI_VENDOR_ID, SYNOPSYS_DW_MCI_DEVICE_ID) }, + { /* sentinel */ } +}; + +static struct rt_pci_driver sdio_dw_pci_driver = +{ + .name = "dw-mmc-pci", + + .ids = sdio_dw_pci_pci_ids, + .probe = sdio_dw_pci_probe, + .remove = sdio_dw_pci_remove, +}; +RT_PCI_DRIVER_EXPORT(sdio_dw_pci_driver); diff --git a/components/drivers/sdio/host/sdio-dw-platform.c b/components/drivers/sdio/host/sdio-dw-platform.c new file mode 100755 index 00000000000..936fcb8dbe8 --- /dev/null +++ b/components/drivers/sdio/host/sdio-dw-platform.c @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2006-2022, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2022-12-06 GuEe-GUI first version + */ + +#include "sdio-dw-platform.h" + +#include + +rt_err_t sdio_dw_platform_register(struct rt_platform_device *pdev, + const struct sdio_dw_drv_data *drv_data) +{ + rt_err_t err = RT_EOK; + struct rt_device *dev = &pdev->parent; + struct sdio_dw *sd = rt_calloc(1, sizeof(*sd)); + + if (!sd) + { + return -RT_ENOMEM; + } + + sd->bus_dev = &pdev->parent; + sd->base = rt_dm_dev_iomap(dev, 0); + + if (!sd->base) + { + err = -RT_EIO; + goto _fail; + } + + sd->irq = rt_dm_dev_get_irq(dev, 0); + + if (sd->irq < 0) + { + err = sd->irq; + + goto _fail; + } + + sd->parent.ofw_node = dev->ofw_node; + + sd->base_phy = (rt_ubase_t)rt_kmem_v2p(sd->base); + sd->drv_data = drv_data; + + pdev->parent.user_data = sd; + + if ((err = sdio_dw_probe(sd))) + { + goto _fail; + } + + return RT_EOK; + +_fail: + if (sd->base) + { + rt_iounmap(sd->base); + } + + rt_free(sd); + + return err; +} + +static rt_err_t sdio_dw_platform_probe(struct rt_platform_device *pdev) +{ + const struct sdio_dw_drv_data *drv_data = RT_NULL; + + if (pdev->parent.ofw_node) + { + drv_data = pdev->id->data; + } + + return sdio_dw_platform_register(pdev, drv_data); +} + +static rt_err_t sdio_dw_platform_remove(struct rt_platform_device *pdev) +{ + struct sdio_dw *sd = pdev->parent.user_data; + + sdio_dw_remove(sd); + + rt_iounmap(sd->base); + + rt_free(sd); + + return RT_EOK; +} + +static const struct rt_ofw_node_id sdio_dw_platform_ofw_ids[] = +{ + { .compatible = "snps,dw-mshc", }, + { .compatible = "img,pistachio-dw-mshc", }, + { /* sentinel */ } +}; + +static struct rt_platform_driver sdio_dw_platform_driver = +{ + .name = "dw-mmc", + .ids = sdio_dw_platform_ofw_ids, + + .probe = sdio_dw_platform_probe, + .remove = sdio_dw_platform_remove, +}; +RT_PLATFORM_DRIVER_EXPORT(sdio_dw_platform_driver); diff --git a/components/drivers/sdio/host/sdio-dw-platform.h b/components/drivers/sdio/host/sdio-dw-platform.h new file mode 100755 index 00000000000..aaf432c67ba --- /dev/null +++ b/components/drivers/sdio/host/sdio-dw-platform.h @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2006-2022, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2022-12-06 GuEe-GUI first version + */ + +#ifndef __SDIO_DW_PLATFORM_H__ +#define __SDIO_DW_PLATFORM_H__ + +#include "sdio-dw.h" + +rt_err_t sdio_dw_platform_register(struct rt_platform_device *pdev, + const struct sdio_dw_drv_data *drv_data); + +#endif /* __SDIO_DW_PLATFORM_H__ */ diff --git a/components/drivers/sdio/host/sdio-dw.c b/components/drivers/sdio/host/sdio-dw.c new file mode 100755 index 00000000000..1980bad4ecf --- /dev/null +++ b/components/drivers/sdio/host/sdio-dw.c @@ -0,0 +1,3265 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2022-12-06 GuEe-GUI first version + * 2023-02-25 GuEe-GUI add EDMA support + */ + +#include + +#define DBG_TAG "sdio.dw" +#define DBG_LVL DBG_INFO +#include + +#include +#include + +#include "sdio-dw.h" + +/* Common flag combinations */ +#define PINT(x) SDIO_DW_INT_##x +#define SDIO_DW_DATA_ERROR_FLAGS (PINT(DRTO) | PINT(DCRC) | PINT(HTO) | PINT(SBE) | PINT(EBE) | PINT(HLE)) +#define SDIO_DW_CMD_ERROR_FLAGS (PINT(RTO) | PINT(RCRC) | PINT(RESP_ERR) | PINT(HLE)) +#define SDIO_DW_ERROR_FLAGS (SDIO_DW_DATA_ERROR_FLAGS | SDIO_DW_CMD_ERROR_FLAGS) +#define SDIO_DW_SEND_STATUS 1 +#define SDIO_DW_RECV_STATUS 2 +#define SDIO_DW_DMA_THRESHOLD 16 + +#define SDIO_DW_FREQ_HZ_MAX 200000000 +#define SDIO_DW_FREQ_HZ_MIN 100000 + +#define PINTC(x) SDIO_DW_IDMAC_INT_##x +#define SDIO_DW_IDMAC_INT_CLR (PINTC(AI) | PINTC(NI) | PINTC(CES) | PINTC(DU) | PINTC(FBE) | PINTC(RI) | PINTC(TI)) + +#define DESC_RING_BUF_SZ ARCH_PAGE_SIZE +#define NSEC_PER_SEC 1000000000L +#define USEC_PER_MSEC 1000L +#define MSEC_PER_SEC 1000L + +struct idmac_desc64 +{ + rt_uint32_t des0; /* Control descriptor */ +#define IDMAC_OWN_CLR64(x) !((x) & rt_cpu_to_le32(IDMAC_DES0_OWN)) + + rt_uint32_t des1; /* Reserved */ + + rt_uint32_t des2; /* Buffer sizes */ +#define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \ + ((d)->des2 = ((d)->des2 & rt_cpu_to_le32(0x03ffe000)) | ((rt_cpu_to_le32(s)) & rt_cpu_to_le32(0x1fff))) + + rt_uint32_t des3; /* Reserved */ + + rt_uint32_t des4; /* Lower 32-bits of buffer address pointer 1 */ + rt_uint32_t des5; /* Upper 32-bits of buffer address pointer 1 */ + + rt_uint32_t des6; /* Lower 32-bits of next descriptor address */ + rt_uint32_t des7; /* Upper 32-bits of next descriptor address */ +}; + +struct idmac_desc32 +{ + rt_le32_t des0; /* Control Descriptor */ +#define IDMAC_DES0_DIC RT_BIT(1) +#define IDMAC_DES0_LD RT_BIT(2) +#define IDMAC_DES0_FD RT_BIT(3) +#define IDMAC_DES0_CH RT_BIT(4) +#define IDMAC_DES0_ER RT_BIT(5) +#define IDMAC_DES0_CES RT_BIT(30) +#define IDMAC_DES0_OWN RT_BIT(31) + rt_le32_t des1; /* Buffer sizes */ + +#define IDMAC_32ADDR_SET_BUFFER1_SIZE(d, s) \ + ((d)->des1 = ((d)->des1 & rt_cpu_to_le32(0x03ffe000)) | (rt_cpu_to_le32((s) & 0x1fff))) + + rt_le32_t des2; /* Buffer 1 physical address */ + rt_le32_t des3; /* Buffer 2 physical address */ +}; + +/* Each descriptor can transfer up to 4KB of data in chained mode */ +#define DW_MCI_DESC_DATA_LENGTH 0x1000 + +static rt_bool_t sdio_dw_ctrl_reset(struct sdio_dw *sd, rt_uint32_t reset) +{ + rt_uint32_t ctrl; + rt_tick_t start; + int timeout = rt_tick_from_millisecond(500); + + ctrl = sdio_dw_readl(sd, CTRL); + ctrl |= reset; + sdio_dw_writel(sd, CTRL, ctrl); + + start = rt_tick_get(); + + while ((sdio_dw_readl(sd, CTRL) & reset)) + { + if ((rt_tick_get() - start) > timeout) + { + LOG_E("Timeout resetting block (ctrl reset 0x%x)", ctrl & reset); + + return RT_FALSE; + } + + rt_hw_cpu_relax(); + } + + return RT_TRUE; +} + +static void sdio_dw_wait_while_busy(struct sdio_dw *sd, rt_uint32_t cmd_flags) +{ + if ((cmd_flags & SDIO_DW_CMD_PRV_DAT_WAIT) && !(cmd_flags & SDIO_DW_CMD_VOLT_SWITCH)) + { + rt_tick_t start = rt_tick_get(); + int timeout = rt_tick_from_millisecond(500); + + while ((sdio_dw_readl(sd, STATUS) & SDIO_DW_STATUS_BUSY)) + { + if ((rt_tick_get() - start) > timeout) + { + LOG_E("Wait busy fail"); + + break; + } + + rt_hw_cpu_relax(); + } + } +} + +static void sdio_dw_send_cmd(struct sdio_dw_slot *slot, rt_uint32_t cmd, rt_uint32_t arg) +{ + rt_tick_t start; + struct sdio_dw *sd = slot->sd; + int timeout = rt_tick_from_millisecond(500); + + sdio_dw_writel(sd, CMDARG, arg); + rt_hw_wmb(); + sdio_dw_wait_while_busy(sd, cmd); + sdio_dw_writel(sd, CMD, SDIO_DW_CMD_START | cmd); + + start = rt_tick_get(); + + while ((sdio_dw_readl(sd, CMD) & SDIO_DW_CMD_START)) + { + if ((rt_tick_get() - start) > timeout) + { + LOG_E("Wait command start fail"); + + break; + } + + rt_hw_cpu_relax(); + } +} + +rt_inline void sdio_dw_set_cto(struct sdio_dw *sd) +{ + rt_ubase_t level; + rt_uint32_t cto_clks, cto_div, cto_ms; + + cto_clks = sdio_dw_readl(sd, TMOUT) & 0xff; + cto_div = (sdio_dw_readl(sd, CLKDIV) & 0xff) * 2; + + if (cto_div == 0) + { + cto_div = 1; + } + + cto_ms = RT_DIV_ROUND_UP_ULL((rt_uint64_t)MSEC_PER_SEC * cto_clks * cto_div, + sd->bus_hz); + + /* Add a bit spare time */ + cto_ms += 10; + + /* + * The durations we're working with are fairly short so we have to be extra + * careful about synchronization here. Specifically in hardware a command + * timeout is _at most_ 5.1 ms, so that means we expect an interrupt + * (either command done or timeout) to come rather quickly after the + * sdio_dw_writel. ...but just in case we have a long interrupt latency + * let's add a bit of paranoia. + * + * In general we'll assume that at least an interrupt will be asserted in + * hardware by the time the cto_timer runs. ...and if it hasn't been + * asserted in hardware by that time then we'll assume it'll never come. + */ + level = rt_spin_lock_irqsave(&sd->irq_lock); + + if (!rt_bitmap_test_bit(&sd->pending_events, EVENT_CMD_COMPLETE)) + { + rt_tick_t tick = rt_tick_from_millisecond(cto_ms) + 1; + + rt_timer_control(&sd->cto_timer, RT_TIMER_CTRL_SET_TIME, &tick); + + rt_timer_start(&sd->cto_timer); + } + + rt_spin_unlock_irqrestore(&sd->irq_lock, level); +} + +static void sdio_dw_start_cmd(struct sdio_dw_slot *slot, rt_uint32_t cmd, + rt_uint32_t arg) +{ + struct sdio_dw *sd = slot->sd; + + sdio_dw_writel(sd, CMDARG, arg); + rt_hw_wmb(); + sdio_dw_wait_while_busy(sd, cmd); + sdio_dw_writel(sd, CMD, SDIO_DW_CMD_START | cmd); + + /* Response expected command only */ + if ((cmd & SDIO_DW_CMD_RESP_EXP)) + { + sdio_dw_set_cto(sd); + } +} + +rt_inline void send_stop_abort(struct sdio_dw *sd, struct rt_mmcsd_data *data) +{ + struct rt_mmcsd_cmd *stop = &sd->stop_abort; + + sdio_dw_start_cmd(sd->slot, sd->stop_cmdr, stop->arg); +} + +/* DMA interface functions */ +static void sdio_dw_stop_dma(struct sdio_dw *sd) +{ + if (sd->using_dma) + { + sd->dma_ops->stop(sd); + sd->dma_ops->cleanup(sd); + } + + /* Data transfer was stopped by the interrupt handler */ + rt_bitmap_set_bit(&sd->pending_events, EVENT_XFER_COMPLETE); +} + +static rt_uint32_t sdio_dw_prep_stop_abort(struct sdio_dw *sd, struct rt_mmcsd_cmd *cmd) +{ + rt_uint32_t cmdr; + struct rt_mmcsd_cmd *stop; + + if (!cmd->data) + { + return 0; + } + + stop = &sd->stop_abort; + cmdr = cmd->cmd_code; + rt_memset(stop, 0, sizeof(*stop)); + + if (cmdr == READ_SINGLE_BLOCK || + cmdr == READ_MULTIPLE_BLOCK || + cmdr == WRITE_BLOCK || + cmdr == WRITE_MULTIPLE_BLOCK || + cmdr == SEND_TUNING_BLOCK || + cmdr == SEND_TUNING_BLOCK_HS200 || + cmdr == GEN_CMD) + { + stop->cmd_code = STOP_TRANSMISSION; + stop->arg = 0; + stop->flags = CMD_AC; + } + else if (cmdr == SD_IO_RW_EXTENDED) + { + stop->cmd_code = SD_IO_RW_DIRECT; + stop->arg |= (1 << 31) | (0 << 28) | ((cmd->arg >> 28) & 0x7); + stop->flags = RESP_SPI_R5 | CMD_AC; + } + else + { + return 0; + } + + cmdr = stop->cmd_code | SDIO_DW_CMD_STOP | SDIO_DW_CMD_RESP_CRC | SDIO_DW_CMD_RESP_EXP; + + if (!rt_bitmap_test_bit(&sd->slot->flags, DW_MMC_CARD_NO_USE_HOLD)) + { + cmdr |= SDIO_DW_CMD_USE_HOLD_REG; + } + + return cmdr; +} + +static void sdio_dw_idmac_reset(struct sdio_dw *sd) +{ + /* Software reset of DMA */ + sdio_dw_writel(sd, BMOD, sdio_dw_readl(sd, BMOD) | SDIO_DW_IDMAC_SWRESET); +} + +static rt_err_t sdio_dw_idmac_init(struct sdio_dw *sd) +{ + int i; + rt_err_t err = RT_EOK; + + if (sd->dma_64bit_address) + { + struct idmac_desc64 *p; + /* Number of descriptors in the ring buffer */ + sd->ring_size = DESC_RING_BUF_SZ / sizeof(struct idmac_desc64); + + /* Forward link the descriptor list */ + for (i = 0, p = sd->dma_buf; i < sd->ring_size - 1; ++i, ++p) + { + p->des6 = (sd->dma_buf_phy + (sizeof(struct idmac_desc64) * (i + 1))) & 0xffffffff; + p->des7 = (rt_uint64_t)(sd->dma_buf_phy + (sizeof(struct idmac_desc64) * (i + 1))) >> 32; + /* Initialize reserved and buffer size fields to "0" */ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; + } + + /* Set the last descriptor as the end-of-ring descriptor */ + p->des6 = sd->dma_buf_phy & 0xffffffff; + p->des7 = (rt_uint64_t)sd->dma_buf_phy >> 32; + p->des0 = IDMAC_DES0_ER; + } + else + { + struct idmac_desc32 *p; + /* Number of descriptors in the ring buffer */ + sd->ring_size = DESC_RING_BUF_SZ / sizeof(struct idmac_desc32); + + /* Forward link the descriptor list */ + for (i = 0, p = sd->dma_buf; i < sd->ring_size - 1; ++i, ++p) + { + p->des3 = rt_cpu_to_le32(sd->dma_buf_phy + (sizeof(struct idmac_desc32) * (i + 1))); + p->des0 = 0; + p->des1 = 0; + } + + /* Set the last descriptor as the end-of-ring descriptor */ + p->des3 = rt_cpu_to_le32(sd->dma_buf_phy); + p->des0 = rt_cpu_to_le32(IDMAC_DES0_ER); + } + + sdio_dw_idmac_reset(sd); + + if (sd->dma_64bit_address) + { + /* Mask out interrupts - get Tx & Rx complete only */ + sdio_dw_writel(sd, IDSTS64, SDIO_DW_IDMAC_INT_CLR); + sdio_dw_writel(sd, IDINTEN64, PINTC(NI) | PINTC(RI) | PINTC(TI)); + + /* Set the descriptor base address */ + sdio_dw_writel(sd, DBADDRL, sd->dma_buf_phy & 0xffffffff); + sdio_dw_writel(sd, DBADDRU, (rt_uint64_t)sd->dma_buf_phy >> 32); + } + else + { + /* Mask out interrupts - get Tx & Rx complete only */ + sdio_dw_writel(sd, IDSTS, SDIO_DW_IDMAC_INT_CLR); + sdio_dw_writel(sd, IDINTEN, PINTC(NI) | PINTC(RI) | PINTC(TI)); + + /* Set the descriptor base address */ + sdio_dw_writel(sd, DBADDR, sd->dma_buf_phy); + } + + return err; +} + +rt_inline rt_err_t sdio_dw_prepare_desc64(struct sdio_dw *sd, struct rt_mmcsd_data *data) +{ + rt_uint32_t desc_len; + rt_uint64_t mem_addr; + int timeout = rt_tick_from_millisecond(100); + struct idmac_desc64 *desc_first, *desc_last, *desc; + + desc_first = desc_last = desc = sd->dma_buf; + mem_addr = (rt_uint64_t)rt_kmem_v2p(sd->last_buf); + + for (rt_uint32_t length = sd->last_remain; length; ++desc) + { + rt_tick_t start = rt_tick_get(); + + desc_len = rt_min_t(rt_uint32_t, length, DW_MCI_DESC_DATA_LENGTH); + length -= desc_len; + + /* + * Wait for the former clear OWN bit operation of IDMAC to make sure + * that this descriptor isn't still owned by IDMAC as IDMAC's write ops + * and CPU's read ops are asynchronous. + */ + while ((HWREG32(&desc->des0) & IDMAC_DES0_OWN)) + { + if ((rt_tick_get() - start) > timeout) + { + goto _err_own_bit; + } + + rt_hw_cpu_relax(); + } + + /* Set the OWN bit and disable interrupts for this descriptor */ + desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH; + + /* Buffer length */ + IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len); + + /* Physical address to DMA to/from */ + desc->des4 = mem_addr & 0xffffffff; + desc->des5 = mem_addr >> 32; + + /* Update physical address for the next desc */ + mem_addr += desc_len; + + /* Save pointer to the last descriptor */ + desc_last = desc; + } + + /* Set first descriptor */ + desc_first->des0 |= IDMAC_DES0_FD; + + /* Set last descriptor */ + desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); + desc_last->des0 |= IDMAC_DES0_LD; + + return RT_EOK; + +_err_own_bit: + /* restore the descriptor chain as it's polluted */ + LOG_D("Descriptor is still owned by IDMAC"); + + rt_memset(sd->dma_buf, 0, DESC_RING_BUF_SZ); + sdio_dw_idmac_init(sd); + + return -RT_EINVAL; +} + +rt_inline rt_err_t sdio_dw_prepare_desc32(struct sdio_dw *sd, struct rt_mmcsd_data *data) +{ + rt_uint32_t desc_len, mem_addr; + int timeout = rt_tick_from_millisecond(100); + struct idmac_desc32 *desc_first, *desc_last, *desc; + + desc_first = desc_last = desc = sd->dma_buf; + mem_addr = (rt_ubase_t)rt_kmem_v2p(sd->last_buf); + + for (rt_uint32_t length = sd->last_remain; length; ++desc) + { + rt_tick_t start = rt_tick_get(); + + desc_len = rt_min_t(rt_uint32_t, length, DW_MCI_DESC_DATA_LENGTH); + length -= desc_len; + + /* + * Wait for the former clear OWN bit operation of IDMAC to make sure + * that this descriptor isn't still owned by IDMAC as IDMAC's write ops + * and CPU's read ops are asynchronous. + */ + while (!IDMAC_OWN_CLR64(HWREG32(&desc->des0))) + { + if ((rt_tick_get() - start) > timeout) + { + goto _err_own_bit; + } + + rt_hw_cpu_relax(); + } + + /* Set the OWN bit and disable interrupts for this descriptor */ + desc->des0 = rt_cpu_to_le32(IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH); + + /* Buffer length */ + IDMAC_32ADDR_SET_BUFFER1_SIZE(desc, desc_len); + + /* Physical address to DMA to/from */ + desc->des2 = rt_cpu_to_le32(mem_addr); + + /* Update physical address for the next desc */ + mem_addr += desc_len; + + /* Save pointer to the last descriptor */ + desc_last = desc; + } + + /* Set first descriptor */ + desc_first->des0 |= rt_cpu_to_le32(IDMAC_DES0_FD); + + /* Set last descriptor */ + desc_last->des0 &= rt_cpu_to_le32(~(IDMAC_DES0_CH | IDMAC_DES0_DIC)); + desc_last->des0 |= rt_cpu_to_le32(IDMAC_DES0_LD); + + return RT_EOK; + +_err_own_bit: + /* restore the descriptor chain as it's polluted */ + LOG_D("Descriptor is still owned by IDMAC"); + + rt_memset(sd->dma_buf, 0, DESC_RING_BUF_SZ); + sdio_dw_idmac_init(sd); + + return -RT_EINVAL; +} + +static rt_err_t sdio_dw_idmac_start(struct sdio_dw *sd) +{ + rt_err_t err = RT_EOK; + + if (sd->dma_64bit_address) + { + err = sdio_dw_prepare_desc64(sd, sd->data); + } + else + { + err = sdio_dw_prepare_desc32(sd, sd->data); + } + + if (err) + { + goto _out; + } + + /* Drain writebuffer */ + rt_hw_wmb(); + + /* Make sure to reset DMA in case we did PIO before this */ + sdio_dw_ctrl_reset(sd, SDIO_DW_CTRL_DMA_RESET); + sdio_dw_idmac_reset(sd); + + /* Select IDMAC interface */ + sdio_dw_writel(sd, CTRL, sdio_dw_readl(sd, CTRL) | SDIO_DW_CTRL_USE_IDMAC); + + /* Drain writebuffer */ + rt_hw_wmb(); + + /* Enable the IDMAC */ + sdio_dw_writel(sd, BMOD, sdio_dw_readl(sd, BMOD) | SDIO_DW_IDMAC_ENABLE | SDIO_DW_IDMAC_FB); + + /* Start it running */ + sdio_dw_writel(sd, PLDMND, 1); + +_out: + return err; +} + +static rt_err_t sdio_dw_idmac_complete(struct sdio_dw *sd) +{ + rt_err_t err = RT_EOK; + struct rt_mmcsd_data *data = sd->data; + + sd->dma_ops->cleanup(sd); + + if (data) + { + rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, data->buf, data->blks * data->blksize); + rt_bitmap_set_bit(&sd->pending_events, EVENT_XFER_COMPLETE); + rt_workqueue_urgent_work(sd->state_wq, &sd->state_work); + } + + return err; +} + +static rt_err_t sdio_dw_idmac_stop(struct sdio_dw *sd) +{ + rt_uint32_t reg; + + /* Disable and reset the IDMAC interface */ + reg = sdio_dw_readl(sd, CTRL); + reg &= ~SDIO_DW_CTRL_USE_IDMAC; + reg |= SDIO_DW_CTRL_DMA_RESET; + sdio_dw_writel(sd, CTRL, reg); + + /* Stop the IDMAC running */ + reg = sdio_dw_readl(sd, BMOD); + reg &= ~(SDIO_DW_IDMAC_ENABLE | SDIO_DW_IDMAC_FB); + reg |= SDIO_DW_IDMAC_SWRESET; + sdio_dw_writel(sd, BMOD, reg); + + return RT_EOK; +} + +static rt_err_t sdio_dw_idmac_cleanup(struct sdio_dw *sd) +{ + return RT_EOK; +} + +static const struct sdio_dw_dma_ops sdio_dw_idmac_ops = +{ + .init = sdio_dw_idmac_init, + .start = sdio_dw_idmac_start, + .complete = sdio_dw_idmac_complete, + .stop = sdio_dw_idmac_stop, + .cleanup = sdio_dw_idmac_cleanup, +}; + +static void edma_callback(struct rt_dma_chan *chan, rt_size_t size) +{ + struct sdio_dw *sd = chan->priv; + + sd->dma_ops->complete(sd); +} + +static rt_err_t sdio_dw_edmac_init(struct sdio_dw *sd) +{ + rt_err_t err = RT_EOK; + + sd->edma_chan = rt_dma_chan_request(sd->bus_dev, "rx-tx"); + + if (rt_is_err(sd->edma_chan)) + { + err = rt_ptr_err(sd->edma_chan); + LOG_E("Get external DMA channel error = %s", rt_strerror(err)); + + sd->edma_chan = RT_NULL; + } + else if (!sd->edma_chan) + { + err = -RT_ERROR; + } + else + { + sd->edma_chan->callback = edma_callback; + sd->edma_chan->priv = sd; + } + + return err; +} + +static rt_err_t sdio_dw_edmac_start(struct sdio_dw *sd) +{ + rt_err_t err; + struct rt_dma_slave_config config; + struct rt_dma_slave_transfer transfer; + + rt_memset(&config, 0, sizeof(config)); + config.src_addr_width = RT_DMA_SLAVE_BUSWIDTH_4_BYTES; + config.dst_addr_width = RT_DMA_SLAVE_BUSWIDTH_4_BYTES; + + config.src_addr = (rt_ubase_t)rt_kmem_v2p(sd->last_buf); + config.dst_addr = (rt_ubase_t)rt_kmem_v2p(sd->fifo_base); + + config.dst_maxburst = 1 << (((sdio_dw_readl(sd, FIFOTH) >> 28) & 0x7) + 1); + config.dst_maxburst = config.dst_maxburst == 2 ? 1 : config.dst_maxburst; + config.src_maxburst = config.dst_maxburst; + + if (sd->data->flags & DATA_DIR_READ) + { + config.direction = RT_DMA_DEV_TO_MEM; + } + else + { + config.direction = RT_DMA_MEM_TO_DEV; + } + + if ((err = rt_dma_chan_config(sd->edma_chan, &config))) + { + LOG_E("Config EDMAC error = %s", rt_strerror(err)); + return err; + } + + rt_memset(&transfer, 0, sizeof(transfer)); + transfer.src_addr = config.src_addr; + transfer.dst_addr = config.dst_addr; + transfer.buffer_len = sd->last_remain; + + if ((err = rt_dma_prep_single(sd->edma_chan, &transfer))) + { + LOG_E("Prepare EDMAC error = %s", rt_strerror(err)); + return err; + } + + return rt_dma_chan_start(sd->edma_chan); +} + +static rt_err_t sdio_dw_edmac_complete(struct sdio_dw *sd) +{ + rt_err_t err = RT_EOK; + struct rt_mmcsd_data *data = sd->data; + + sd->dma_ops->cleanup(sd); + + if (data) + { + rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, data->buf, data->blks * data->blksize); + rt_bitmap_set_bit(&sd->pending_events, EVENT_XFER_COMPLETE); + rt_workqueue_urgent_work(sd->state_wq, &sd->state_work); + } + + return err; +} + +static rt_err_t sdio_dw_edmac_stop(struct sdio_dw *sd) +{ + return rt_dma_chan_stop(sd->edma_chan); +} + +static rt_err_t sdio_dw_edmac_cleanup(struct sdio_dw *sd) +{ + return RT_EOK; +} + +static rt_err_t sdio_dw_edmac_exit(struct sdio_dw *sd) +{ + if (sd->edma_chan) + { + rt_dma_chan_release(sd->edma_chan); + sd->edma_chan = RT_NULL; + } + + return RT_EOK; +} + +static const struct sdio_dw_dma_ops sdio_dw_edmac_ops = +{ + .init = sdio_dw_edmac_init, + .start = sdio_dw_edmac_start, + .complete = sdio_dw_edmac_complete, + .stop = sdio_dw_edmac_stop, + .cleanup = sdio_dw_edmac_cleanup, + .exit = sdio_dw_edmac_exit, +}; + +static rt_bool_t sdio_dw_get_cd(struct sdio_dw_slot *slot) +{ + rt_bool_t present; + struct sdio_dw *sd = slot->sd; + + if (!controller_is_removable(slot->host)) + { + present = RT_TRUE; + } + else + { + present = (sdio_dw_readl(sd, CDETECT) & (1 << slot->id)) == 0; + } + + return present; +} + +static void sdio_dw_adjust_fifoth(struct sdio_dw *sd, struct rt_mmcsd_data *data) +{ + static const rt_uint32_t mszs[] = { 1, 4, 8, 16, 32, 64, 128, 256 }; + rt_uint32_t blksz = data->blksize; + rt_uint32_t fifo_width = 1 << sd->data_shift; + rt_uint32_t blksz_depth = blksz / fifo_width, fifoth_val; + rt_uint32_t msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; + int idx = RT_ARRAY_SIZE(mszs) - 1; + + /* PIO should ship this scenario */ + if (!sd->use_dma) + { + return; + } + + tx_wmark = (sd->fifo_depth) / 2; + tx_wmark_invers = sd->fifo_depth - tx_wmark; + + /* MSIZE is '1', if blksz is not a multiple of the FIFO width */ + if (blksz % fifo_width) + { + goto _done; + } + + do { + if (!((blksz_depth % mszs[idx]) || (tx_wmark_invers % mszs[idx]))) + { + msize = idx; + rx_wmark = mszs[idx] - 1; + + break; + } + } while (--idx > 0); + /* If idx is '0', it won't be tried Thus, initial values are uesed */ + +_done: + fifoth_val = SDIO_DW_SET_FIFOTH(msize, rx_wmark, tx_wmark); + sdio_dw_writel(sd, FIFOTH, fifoth_val); +} + +static void sdio_dw_ctrl_thld(struct sdio_dw *sd, struct rt_mmcsd_data *data) +{ + rt_uint8_t enable; + rt_uint16_t thld_size; + rt_uint32_t blksz_depth, fifo_depth; + rt_uint32_t blksz = data->blksize; + + /* + * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is + * in the FIFO region, so we really shouldn't access it). + */ + if (sd->verid < SDIO_DW_240A || + (sd->verid < SDIO_DW_280A && (data->flags & DATA_DIR_WRITE))) + { + return; + } + + /* + * Card write Threshold is introduced since 2.80a + * It's used when HS400 mode is enabled. + */ + if ((data->flags & DATA_DIR_WRITE) && sd->timing != MMCSD_TIMING_MMC_HS400) + { + goto _disable; + } + + if ((data->flags & DATA_DIR_WRITE)) + { + enable = SDIO_DW_CARD_WR_THR_EN; + } + else + { + enable = SDIO_DW_CARD_RD_THR_EN; + } + + if (sd->timing != MMCSD_TIMING_MMC_HS200 && + sd->timing != MMCSD_TIMING_UHS_SDR104 && + sd->timing != MMCSD_TIMING_MMC_HS400) + { + goto _disable; + } + + blksz_depth = blksz / (1 << sd->data_shift); + fifo_depth = sd->fifo_depth; + + if (blksz_depth > fifo_depth) + { + goto _disable; + } + + /* + * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz' + * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz + * Currently just choose blksz. + */ + thld_size = blksz; + sdio_dw_writel(sd, CDTHRCTL, SDIO_DW_SET_THLD(thld_size, enable)); + + return; + +_disable: + sdio_dw_writel(sd, CDTHRCTL, 0); +} + +static rt_err_t sdio_dw_submit_data_dma(struct sdio_dw *sd, struct rt_mmcsd_data *data) +{ + rt_uint32_t temp; + rt_ubase_t level; + + sd->using_dma = RT_FALSE; + + /* If we don't have a channel, we can't do DMA */ + if (!sd->use_dma) + { + return -RT_ENOSYS; + } + + sd->using_dma = RT_TRUE; + + /* + * Decide the MSIZE and RX/TX Watermark. If current block size is same with + * previous size, no need to update fifoth. + */ + if (sd->prev_blksz != data->blksize) + { + sdio_dw_adjust_fifoth(sd, data); + } + + /* Enable the DMA interface */ + temp = sdio_dw_readl(sd, CTRL); + temp |= SDIO_DW_CTRL_DMA_ENABLE; + sdio_dw_writel(sd, CTRL, temp); + + /* Disable RX/TX IRQs, let DMA handle it */ + level = rt_spin_lock_irqsave(&sd->irq_lock); + temp = sdio_dw_readl(sd, INTMASK); + temp &= ~(PINT(RXDR) | PINT(TXDR)); + sdio_dw_writel(sd, INTMASK, temp); + rt_spin_unlock_irqrestore(&sd->irq_lock, level); + + /* Flush data to memory */ + rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, sd->last_buf, sd->last_remain); + + if (sd->dma_ops->start(sd)) + { + /* We can't do DMA, try PIO for this one */ + sd->dma_ops->stop(sd); + + return -RT_ENOSYS; + } + + return RT_EOK; +} + +static void sdio_dw_submit_data(struct sdio_dw *sd, struct rt_mmcsd_data *data) +{ + rt_ubase_t level; + rt_uint32_t temp; + + data->err = -RT_ERROR; + sd->data = data; + sd->last_buf = data->buf; + sd->last_remain = data->blks * data->blksize; + + if ((data->flags & DATA_DIR_READ)) + { + sd->dir_status = SDIO_DW_RECV_STATUS; + } + else + { + sd->dir_status = SDIO_DW_SEND_STATUS; + } + + sdio_dw_ctrl_thld(sd, data); + + if (sdio_dw_submit_data_dma(sd, data)) + { + sd->part_buf_start = 0; + sd->part_buf_count = 0; + + sdio_dw_writel(sd, RINTSTS, PINT(TXDR) | PINT(RXDR)); + + level = rt_spin_lock_irqsave(&sd->irq_lock); + temp = sdio_dw_readl(sd, INTMASK); + temp |= PINT(TXDR) | PINT(RXDR); + sdio_dw_writel(sd, INTMASK, temp); + rt_spin_unlock_irqrestore(&sd->irq_lock, level); + + temp = sdio_dw_readl(sd, CTRL); + temp &= ~SDIO_DW_CTRL_DMA_ENABLE; + sdio_dw_writel(sd, CTRL, temp); + + /* + * Use the initial fifoth_val for PIO mode. If wm_algined is set, we set + * watermark same as data size. If next issued data may be transfered by + * DMA mode, prev_blksz should be invalidated. + */ + if (sd->wm_aligned) + { + sdio_dw_adjust_fifoth(sd, data); + } + else + { + sdio_dw_writel(sd, FIFOTH, sd->fifoth_val); + } + sd->prev_blksz = 0; + } + else + { + /* + * Keep the current block size. + * It will be used to decide whether to update fifoth register next time. + */ + sd->prev_blksz = data->blksize; + } +} + +static void sdio_dw_setup_bus(struct sdio_dw_slot *slot, rt_bool_t force_clkinit) +{ + struct sdio_dw *sd = slot->sd; + rt_uint32_t clock = slot->clock; + rt_uint32_t cmd_bits = SDIO_DW_CMD_UPD_CLK | SDIO_DW_CMD_PRV_DAT_WAIT; + + /* We must continue to set bit 28 in CMD until the change is complete */ + if (sd->state == STATE_WAITING_CMD11_DONE) + { + cmd_bits |= SDIO_DW_CMD_VOLT_SWITCH; + } + + if (!clock) + { + sdio_dw_writel(sd, CLKENA, 0); + sdio_dw_send_cmd(slot, cmd_bits, 0); + } + else if (clock != sd->current_speed || force_clkinit) + { + rt_uint32_t clk_en_a, div = sd->bus_hz / clock; + + if (sd->bus_hz % clock && sd->bus_hz > clock) + { + /* Move the + 1 after the divide to prevent over-clocking the card */ + div += 1; + } + + div = (sd->bus_hz != clock) ? RT_DIV_ROUND_UP(div, 2) : 0; + + if (clock != slot->clk_old && + !rt_bitmap_test_bit(&slot->flags, DW_MMC_CARD_NEEDS_POLL) && + !force_clkinit) + { + LOG_D("Bus speed (slot %d) = %uHz (slot req %uHz, actual %uHZ div = %d)", + slot->id, sd->bus_hz, clock, + div ? ((sd->bus_hz / div) >> 1) : sd->bus_hz, div); + } + + /* Disable clock */ + sdio_dw_writel(sd, CLKENA, 0); + sdio_dw_writel(sd, CLKSRC, 0); + + /* Inform CIU */ + sdio_dw_send_cmd(slot, cmd_bits, 0); + + /* Set clock to desired speed */ + sdio_dw_writel(sd, CLKDIV, div); + + /* Inform CIU */ + sdio_dw_send_cmd(slot, cmd_bits, 0); + + /* Enable clock; only low power if no SDIO */ + clk_en_a = SDIO_DW_CLKEN_ENABLE << slot->id; + if (!rt_bitmap_test_bit(&slot->flags, DW_MMC_CARD_NO_LOW_PWR)) + { + clk_en_a |= SDIO_DW_CLKEN_LOW_PWR << slot->id; + } + sdio_dw_writel(sd, CLKENA, clk_en_a); + + /* Inform CIU */ + sdio_dw_send_cmd(slot, cmd_bits, 0); + + /* Keep the last clock value that was requested from core */ + slot->clk_old = clock; + } + + sd->current_speed = clock; + + /* Set the current slot bus width */ + sdio_dw_writel(sd, CTYPE, (slot->ctype << slot->id)); +} + +static void sdio_dw_set_data_timeout(struct sdio_dw *sd, rt_uint32_t timeout_ns) +{ + rt_uint64_t tmp; + rt_uint32_t clk_div, tmout; + const struct sdio_dw_drv_data *drv_data = sd->drv_data; + + if (drv_data && drv_data->set_data_timeout) + { + drv_data->set_data_timeout(sd, timeout_ns); + + return; + } + + clk_div = (sdio_dw_readl(sd, CLKDIV) & 0xff) * 2; + + if (clk_div == 0) + { + clk_div = 1; + } + + tmp = RT_DIV_ROUND_UP_ULL((rt_uint64_t)timeout_ns * sd->bus_hz, NSEC_PER_SEC); + tmp = RT_DIV_ROUND_UP_ULL(tmp, clk_div); + + /* TMOUT[7:0] (RESPONSE_TIMEOUT): Set maximum */ + tmout = 0xff; + + /* TMOUT[31:8] (DATA_TIMEOUT) */ + if (!tmp || tmp > 0xffffff) + { + tmout |= (0xffffff << 8); + } + else + { + tmout |= (tmp & 0xffffff) << 8; + } + + sdio_dw_writel(sd, TMOUT, tmout); +} + +/* Push final bytes to part_buf, only use during push */ +static void sdio_dw_set_part_bytes(struct sdio_dw *sd, void *buf, int cnt) +{ + rt_memcpy((void *)&sd->part_buf, buf, cnt); + sd->part_buf_count = cnt; +} + +/* Append bytes to part_buf, only use during push */ +static int sdio_dw_push_part_bytes(struct sdio_dw *sd, void *buf, int cnt) +{ + cnt = rt_min(cnt, (1 << sd->data_shift) - sd->part_buf_count); + rt_memcpy((void *)&sd->part_buf + sd->part_buf_count, buf, cnt); + sd->part_buf_count += cnt; + + return cnt; +} + +/* Pull first bytes from part_buf, only use during pull */ +static int sdio_dw_pull_part_bytes(struct sdio_dw *sd, void *buf, int cnt) +{ + cnt = rt_min_t(int, cnt, sd->part_buf_count); + + if (cnt) + { + rt_memcpy(buf, (void *)&sd->part_buf + sd->part_buf_start, cnt); + sd->part_buf_count -= cnt; + sd->part_buf_start += cnt; + } + + return cnt; +} + +/* Pull final bytes from the part_buf, assuming it's just been filled */ +static void sdio_dw_pull_final_bytes(struct sdio_dw *sd, void *buf, int cnt) +{ + rt_memcpy(buf, &sd->part_buf, cnt); + sd->part_buf_start = cnt; + sd->part_buf_count = (1 << sd->data_shift) - cnt; +} + +static void sdio_dw_push_data16(struct sdio_dw *sd, void *buf, int cnt) +{ + struct rt_mmcsd_data *data = sd->data; + int init_cnt = cnt; + + /* Try and push anything in the part_buf */ + if ((sd->part_buf_count)) + { + int len = sdio_dw_push_part_bytes(sd, buf, cnt); + + buf += len; + cnt -= len; + + if (sd->part_buf_count == 2) + { + sdio_dw_fifo_writew(sd, sd->part_buf16); + sd->part_buf_count = 0; + } + } +#ifndef ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (((rt_ubase_t)buf & 0x1)) + { + while (cnt >= 2) + { + rt_uint16_t aligned_buf[64]; + int len = rt_min(cnt & -2, (int)sizeof(aligned_buf)); + int items = len >> 1; + + /* rt_memcpy from input buffer into aligned buffer */ + rt_memcpy(aligned_buf, buf, len); + buf += len; + cnt -= len; + + /* Push data from aligned buffer into fifo */ + for (int i = 0; i < items; ++i) + { + sdio_dw_fifo_writew(sd, aligned_buf[i]); + } + } + } + else +#endif /* !ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + { + rt_uint16_t *pdata = buf; + + for (; cnt >= 2; cnt -= 2) + { + sdio_dw_fifo_writew(sd, *pdata++); + } + buf = pdata; + } + /* Put anything remaining in the part_buf */ + if (cnt) + { + sdio_dw_set_part_bytes(sd, buf, cnt); + + /* Push data if we have reached the expected data length */ + if ((data->bytes_xfered + init_cnt) == (data->blksize * data->blks)) + { + sdio_dw_fifo_writew(sd, sd->part_buf16); + } + } +} + +static void sdio_dw_pull_data16(struct sdio_dw *sd, void *buf, int cnt) +{ +#ifndef ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (((rt_ubase_t)buf & 0x1)) + { + while (cnt >= 2) + { + /* Pull data from fifo into aligned buffer */ + rt_uint16_t aligned_buf[64]; + int len = rt_min(cnt & -2, (int)sizeof(aligned_buf)); + int items = len >> 1; + + for (int i = 0; i < items; ++i) + { + aligned_buf[i] = sdio_dw_fifo_readw(sd); + } + + /* rt_memcpy from aligned buffer into output buffer */ + rt_memcpy(buf, aligned_buf, len); + buf += len; + cnt -= len; + } + } + else +#endif /* !ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + { + rt_uint16_t *pdata = buf; + + for (; cnt >= 2; cnt -= 2) + { + *pdata++ = sdio_dw_fifo_readw(sd); + } + buf = pdata; + } + if (cnt) + { + sd->part_buf16 = sdio_dw_fifo_readw(sd); + sdio_dw_pull_final_bytes(sd, buf, cnt); + } +} + +static void sdio_dw_push_data32(struct sdio_dw *sd, void *buf, int cnt) +{ + struct rt_mmcsd_data *data = sd->data; + int init_cnt = cnt; + + /* Try and push anything in the part_buf */ + if ((sd->part_buf_count)) + { + int len = sdio_dw_push_part_bytes(sd, buf, cnt); + + buf += len; + cnt -= len; + + if (sd->part_buf_count == 4) + { + sdio_dw_fifo_writel(sd, sd->part_buf32); + sd->part_buf_count = 0; + } + } +#ifndef ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (((rt_ubase_t)buf & 0x3)) + { + while (cnt >= 4) + { + rt_uint32_t aligned_buf[32]; + int len = rt_min(cnt & -4, (int)sizeof(aligned_buf)); + int items = len >> 2; + + /* rt_memcpy from input buffer into aligned buffer */ + rt_memcpy(aligned_buf, buf, len); + buf += len; + cnt -= len; + + /* Push data from aligned buffer into fifo */ + for (int i = 0; i < items; ++i) + { + sdio_dw_fifo_writel(sd, aligned_buf[i]); + } + } + } + else +#endif /* !ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + { + rt_uint32_t *pdata = buf; + + for (; cnt >= 4; cnt -= 4) + { + sdio_dw_fifo_writel(sd, *pdata++); + } + buf = pdata; + } + /* Put anything remaining in the part_buf */ + if (cnt) + { + sdio_dw_set_part_bytes(sd, buf, cnt); + + /* Push data if we have reached the expected data length */ + if ((data->bytes_xfered + init_cnt) == (data->blksize * data->blks)) + { + sdio_dw_fifo_writel(sd, sd->part_buf32); + } + } +} + +static void sdio_dw_pull_data32(struct sdio_dw *sd, void *buf, int cnt) +{ +#ifndef ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (((rt_ubase_t)buf & 0x3)) + { + while (cnt >= 4) + { + /* Pull data from fifo into aligned buffer */ + rt_uint32_t aligned_buf[32]; + int len = rt_min(cnt & -4, (int)sizeof(aligned_buf)); + int items = len >> 2; + + for (int i = 0; i < items; ++i) + { + aligned_buf[i] = sdio_dw_fifo_readl(sd); + } + + /* rt_memcpy from aligned buffer into output buffer */ + rt_memcpy(buf, aligned_buf, len); + buf += len; + cnt -= len; + } + } + else +#endif /* !ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + { + rt_uint32_t *pdata = buf; + + for (; cnt >= 4; cnt -= 4) + { + *pdata++ = sdio_dw_fifo_readl(sd); + } + buf = pdata; + } + if (cnt) + { + sd->part_buf32 = sdio_dw_fifo_readl(sd); + sdio_dw_pull_final_bytes(sd, buf, cnt); + } +} + +static void sdio_dw_push_data64(struct sdio_dw *sd, void *buf, int cnt) +{ + struct rt_mmcsd_data *data = sd->data; + int init_cnt = cnt; + + /* Try and push anything in the part_buf */ + if ((sd->part_buf_count)) + { + int len = sdio_dw_push_part_bytes(sd, buf, cnt); + + buf += len; + cnt -= len; + + if (sd->part_buf_count == 8) + { + sdio_dw_fifo_writeq(sd, sd->part_buf64); + sd->part_buf_count = 0; + } + } +#ifndef ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (((rt_ubase_t)buf & 0x7)) + { + while (cnt >= 8) + { + rt_uint64_t aligned_buf[16]; + int len = rt_min(cnt & -8, (int)sizeof(aligned_buf)); + int items = len >> 3; + + /* rt_memcpy from input buffer into aligned buffer */ + rt_memcpy(aligned_buf, buf, len); + buf += len; + cnt -= len; + + /* Push data from aligned buffer into fifo */ + for (int i = 0; i < items; ++i) + { + sdio_dw_fifo_writeq(sd, aligned_buf[i]); + } + } + } + else +#endif /* !ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + { + rt_uint64_t *pdata = buf; + + for (; cnt >= 8; cnt -= 8) + { + sdio_dw_fifo_writeq(sd, *pdata++); + } + buf = pdata; + } + /* Put anything remaining in the part_buf */ + if (cnt) + { + sdio_dw_set_part_bytes(sd, buf, cnt); + + /* Push data if we have reached the expected data length */ + if ((data->bytes_xfered + init_cnt) == (data->blksize * data->blks)) + { + sdio_dw_fifo_writeq(sd, sd->part_buf64); + } + } +} + +static void sdio_dw_pull_data64(struct sdio_dw *sd, void *buf, int cnt) +{ +#ifndef ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (((rt_ubase_t)buf & 0x7)) + { + while (cnt >= 8) + { + /* Pull data from fifo into aligned buffer */ + rt_uint64_t aligned_buf[16]; + int len = rt_min(cnt & -8, (int)sizeof(aligned_buf)); + int items = len >> 3; + + for (int i = 0; i < items; ++i) + { + aligned_buf[i] = sdio_dw_fifo_readq(sd); + } + + /* rt_memcpy from aligned buffer into output buffer */ + rt_memcpy(buf, aligned_buf, len); + buf += len; + cnt -= len; + } + } + else +#endif /* !ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + { + rt_uint64_t *pdata = buf; + + for (; cnt >= 8; cnt -= 8) + { + *pdata++ = sdio_dw_fifo_readq(sd); + } + buf = pdata; + } + if (cnt) + { + sd->part_buf64 = sdio_dw_fifo_readq(sd); + sdio_dw_pull_final_bytes(sd, buf, cnt); + } +} + +static void sdio_dw_pull_data(struct sdio_dw *sd, void *buf, int cnt) +{ + /* Get remaining partial bytes */ + int len = sdio_dw_pull_part_bytes(sd, buf, cnt); + + if (len != cnt) + { + buf += len; + cnt -= len; + + /* Get the rest of the data */ + sd->pull_data(sd, buf, cnt); + } +} + +static void sdio_dw_read_data_pio(struct sdio_dw *sd, rt_bool_t dto) +{ + void *buf; + int shift = sd->data_shift; + struct rt_mmcsd_data *data = sd->data; + rt_uint32_t status, remain, fcnt, len; + + buf = sd->last_buf; + remain = sd->last_remain; + + do { + if (!remain) + { + break; + } + + do { + fcnt = (SDIO_DW_GET_FCNT(sdio_dw_readl(sd, STATUS)) << shift) + sd->part_buf_count; + len = rt_min(remain, fcnt); + + if (!len) + { + break; + } + + sdio_dw_pull_data(sd, buf, len); + data->bytes_xfered += len; + buf += len; + remain -= len; + } while (remain); + + status = sdio_dw_readl(sd, MINTSTS); + sdio_dw_writel(sd, RINTSTS, PINT(RXDR)); + /* If the RXDR is ready read again */ + } while ((status & PINT(RXDR)) || (dto && SDIO_DW_GET_FCNT(sdio_dw_readl(sd, STATUS)))); + + sd->last_buf = remain ? buf : RT_NULL; + sd->last_remain = remain; + + rt_hw_wmb(); + rt_bitmap_set_bit(&sd->pending_events, EVENT_XFER_COMPLETE); +} + +static void sdio_dw_write_data_pio(struct sdio_dw *sd) +{ + void *buf; + int shift = sd->data_shift; + struct rt_mmcsd_data *data = sd->data; + rt_uint32_t status, remain, fcnt, len, fifo_depth; + + buf = sd->last_buf; + remain = sd->last_remain; + fifo_depth = sd->fifo_depth; + + do { + if (!remain) + { + break; + } + + do { + fcnt = ((fifo_depth - SDIO_DW_GET_FCNT(sdio_dw_readl(sd, STATUS))) << shift) - sd->part_buf_count; + len = rt_min(remain, fcnt); + + if (!len) + { + break; + } + + sd->push_data(sd, buf, len); + data->bytes_xfered += len; + buf += len; + remain -= len; + } while (remain); + + status = sdio_dw_readl(sd, MINTSTS); + sdio_dw_writel(sd, RINTSTS, PINT(TXDR)); + /* If TXDR write again */ + } while ((status & PINT(TXDR))); + + sd->last_buf = remain ? buf : RT_NULL; + sd->last_remain = remain; + + rt_hw_wmb(); + rt_bitmap_set_bit(&sd->pending_events, EVENT_XFER_COMPLETE); +} + +static void sdio_dw_init_dma(struct sdio_dw *sd) +{ + int addr_config; + + /* + * Check tansfer mode from HCON[17:16] + * Clear the ambiguous description of dw_mmc databook: + * 2b'00: No DMA Interface -> Actually means using Internal DMA block + * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block + * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block + * 2b'11: Non DW DMA Interface -> pio only + * Compared to DesignWare DMA Interface, Generic DMA Interface has a simpler + * request/acknowledge handshake mechanism and both of them are regarded as + * external dma master for dw_mmc. + */ + sd->use_dma = SDIO_DW_GET_TRANS_MODE(sdio_dw_readl(sd, HCON)); + + if (sd->use_dma == DMA_INTERFACE_IDMA) + { + sd->use_dma = TRANS_MODE_IDMAC; + } + else if (sd->use_dma == DMA_INTERFACE_DWDMA || sd->use_dma == DMA_INTERFACE_GDMA) + { + sd->use_dma = TRANS_MODE_EDMAC; + } + else + { + goto _no_dma; + } + + /* Determine which DMA interface to use */ + if (sd->use_dma == TRANS_MODE_IDMAC) + { + /* Check ADDR_CONFIG bit in HCON to find IDMAC address bus width */ + addr_config = SDIO_DW_GET_ADDR_CONFIG(sdio_dw_readl(sd, HCON)); + + /* Supports IDMAC in 64/32-bit address mode */ + sd->dma_64bit_address = (addr_config == 1); + LOG_D("IDMAC supports %s-bit address mode", sd->dma_64bit_address ? "64" : "32"); + + /* Alloc memory for translation */ + sd->dma_buf = rt_dma_alloc_coherent(sd->bus_dev, DESC_RING_BUF_SZ, &sd->dma_buf_phy); + + if (!sd->dma_buf) + { + LOG_E("Could not alloc DMA memory witch cache"); + + goto _no_dma; + } + + sd->dma_ops = &sdio_dw_idmac_ops; + LOG_D("Using internal DMA controller"); + } + else + { + if (!rt_dm_dev_prop_read_bool(&sd->parent, "dma-names") || + !rt_dm_dev_prop_read_bool(&sd->parent, "dmas")) + { + goto _no_dma; + } + + sd->dma_ops = &sdio_dw_edmac_ops; + LOG_D("Using external DMA controller"); + } + + if (sd->dma_ops->init && sd->dma_ops->start && sd->dma_ops->stop && sd->dma_ops->cleanup) + { + if (sd->dma_ops->init(sd)) + { + LOG_E("Unable to initialize DMA Controller"); + goto _no_dma; + } + } + else + { + LOG_E("DMA initialization not found"); + goto _no_dma; + } + + return; + +_no_dma: + LOG_D("Using PIO mode"); + sd->use_dma = TRANS_MODE_PIO; +} + +static rt_bool_t sdio_dw_reset(struct sdio_dw *sd) +{ + rt_err_t res = RT_FALSE; + rt_uint32_t flags = SDIO_DW_CTRL_RESET | SDIO_DW_CTRL_FIFO_RESET; + + if (sd->use_dma) + { + flags |= SDIO_DW_CTRL_DMA_RESET; + } + + if (sdio_dw_ctrl_reset(sd, flags)) + { + int timeout = 500 * USEC_PER_MSEC; + /* In all cases we clear the RAWINTS register to clear any interrupts */ + sdio_dw_writel(sd, RINTSTS, 0xffffffff); + + if (!sd->use_dma) + { + res = RT_TRUE; + goto _ciu_out; + } + + /* Wait for dma_req to be cleared */ + while ((sdio_dw_readl(sd, STATUS) & SDIO_DW_STATUS_DMA_REQ) && timeout--) + { + rt_hw_cpu_relax(); + } + + if (time <= 0) + { + LOG_E("Timeout waiting for dma_req to be cleared in reset"); + goto _ciu_out; + } + + /* when using DMA next we reset the fifo again */ + if (!sdio_dw_ctrl_reset(sd, SDIO_DW_CTRL_FIFO_RESET)) + { + goto _ciu_out; + } + } + else + { + /* If the controller reset bit did clear, then set clock regs */ + if (!(sdio_dw_readl(sd, CTRL) & SDIO_DW_CTRL_RESET)) + { + LOG_E("FIFO/DMA reset bits didn't clear but ciu was reset, doing clock update"); + + goto _ciu_out; + } + } + + if (sd->use_dma == TRANS_MODE_IDMAC) + { + /* It is also required that we reinit idmac */ + sdio_dw_idmac_init(sd); + } + + res = RT_TRUE; + +_ciu_out: + /* After a CTRL reset we need to have CIU set clock registers */ + sdio_dw_send_cmd(sd->slot, SDIO_DW_CMD_UPD_CLK, 0); + + return res; +} + +static void sdio_dw_start_request(struct sdio_dw *sd, struct rt_mmcsd_cmd *cmd) +{ + rt_uint32_t cmd_flags; + struct sdio_dw_slot *slot = sd->slot; + struct rt_mmcsd_data *data = cmd->data; + + if (sd->state == STATE_WAITING_CMD11_DONE) + { + sd->state = STATE_IDLE; + } + + if (sd->state == STATE_IDLE) + { + sd->state = STATE_SENDING_CMD; + } + + sd->req = sd->slot->req; + sd->cmd = cmd; + + sd->pending_events = 0; + sd->cmd_status = 0; + sd->data_status = 0; + sd->dir_status = 0; + + if (data) + { + sdio_dw_set_data_timeout(sd, data->timeout_ns); + sdio_dw_writel(sd, BYTCNT, data->blksize * data->blks); + sdio_dw_writel(sd, BLKSIZ, data->blksize); + } + + cmd_flags = cmd->cmd_code; + + if (cmd->cmd_code == STOP_TRANSMISSION || + cmd->cmd_code == GO_IDLE_STATE || + cmd->cmd_code == GO_INACTIVE_STATE || + (cmd->cmd_code == SD_IO_RW_DIRECT && ((cmd->arg >> 9) & 0x1ffff) == SDIO_REG_CCCR_IO_ABORT)) + { + cmd_flags |= SDIO_DW_CMD_STOP; + } + else if (cmd->cmd_code != SEND_STATUS && data) + { + cmd_flags |= SDIO_DW_CMD_PRV_DAT_WAIT; + } + + if (cmd->cmd_code == VOLTAGE_SWITCH) + { + rt_uint32_t clk_en_a; + + /* Special bit makes CMD11 not die */ + cmd_flags |= SDIO_DW_CMD_VOLT_SWITCH; + + /* Change state to continue to handle CMD11 weirdness */ + sd->state = STATE_SENDING_CMD11; + + /* + * We need to disable low power mode (automatic clock stop) while + * doing voltage switch so we don't confuse the card, since stopping + * the clock is a specific part of the UHS voltage change dance. + * + * Note that low power mode (SDIO_DW_CLKEN_LOW_PWR) will be + * unconditionally turned back on in dw_mci_setup_bus() if it's ever + * called with a non-zero clock. That shouldn't happen until the + * voltage change is all done. + */ + clk_en_a = sdio_dw_readl(sd, CLKENA); + clk_en_a &= ~(SDIO_DW_CLKEN_LOW_PWR << slot->id); + sdio_dw_writel(sd, CLKENA, clk_en_a); + sdio_dw_send_cmd(sd->slot, SDIO_DW_CMD_UPD_CLK | SDIO_DW_CMD_PRV_DAT_WAIT, 0); + } + + switch (resp_type(cmd)) + { + case RESP_NONE: + break; + + case RESP_R1: + case RESP_R5: + case RESP_R6: + case RESP_R7: + case RESP_R1B: + cmd_flags |= SDIO_DW_CMD_RESP_EXP; + cmd_flags |= SDIO_DW_CMD_RESP_CRC; + break; + + case RESP_R2: + cmd_flags |= SDIO_DW_CMD_RESP_EXP; + cmd_flags |= SDIO_DW_CMD_RESP_CRC; + cmd_flags |= SDIO_DW_CMD_RESP_LONG; + break; + + case RESP_R3: + case RESP_R4: + cmd_flags |= SDIO_DW_CMD_RESP_EXP; + break; + + default: + LOG_D("Unsupported cmd type = %x", resp_type(cmd)); + break; + } + + if (data) + { + cmd_flags |= SDIO_DW_CMD_DAT_EXP; + + if ((data->flags & DATA_DIR_WRITE)) + { + cmd_flags |= SDIO_DW_CMD_DAT_WR; + } + } + + if (!rt_bitmap_test_bit(&slot->flags, DW_MMC_CARD_NO_USE_HOLD)) + { + cmd_flags |= SDIO_DW_CMD_USE_HOLD_REG; + } + + if (rt_bitmap_test_bit(&slot->flags, DW_MMC_CARD_NEED_INIT)) + { + cmd_flags |= SDIO_DW_CMD_INIT; + rt_bitmap_clear_bit(&slot->flags, DW_MMC_CARD_NEED_INIT); + } + + if (data) + { + sdio_dw_submit_data(sd, data); + /* Drain writebuffer */ + rt_hw_wmb(); + } + + sdio_dw_start_cmd(slot, cmd_flags, cmd->arg); + + if (cmd->cmd_code == VOLTAGE_SWITCH) + { + rt_ubase_t level = rt_spin_lock_irqsave(&sd->irq_lock); + + if (!rt_bitmap_test_bit(&sd->pending_events, EVENT_CMD_COMPLETE)) + { + rt_tick_t tick = rt_tick_from_millisecond(500) + 1; + + rt_timer_control(&sd->cmd11_timer, RT_TIMER_CTRL_SET_TIME, &tick); + + rt_timer_start(&sd->cmd11_timer); + } + + rt_spin_unlock_irqrestore(&sd->irq_lock, level); + } + + sd->stop_cmdr = sdio_dw_prep_stop_abort(sd, cmd); +} + +static void sdio_dw_end_request(struct sdio_dw *sd) +{ + sd->slot->req = RT_NULL; + sd->req = RT_NULL; + + if (sd->state == STATE_SENDING_CMD11) + { + sd->state = STATE_WAITING_CMD11_DONE; + } + else + { + sd->state = STATE_IDLE; + } + + rt_hw_spin_unlock(&sd->lock.lock); + + mmcsd_req_complete(sd->slot->host); + + rt_hw_spin_lock(&sd->lock.lock); +} + +static rt_err_t sdio_dw_cmd_complete(struct sdio_dw *sd, struct rt_mmcsd_cmd *cmd) +{ + rt_uint32_t status = sd->cmd_status; + + sd->cmd_status = 0; + + /* Read the response from the card (up to 16 bytes) */ + if (resp_type(cmd) == RESP_R2) + { + cmd->resp[0] = sdio_dw_readl(sd, RESP3); + cmd->resp[1] = sdio_dw_readl(sd, RESP2); + cmd->resp[2] = sdio_dw_readl(sd, RESP1); + cmd->resp[3] = sdio_dw_readl(sd, RESP0); + } + else + { + cmd->resp[0] = sdio_dw_readl(sd, RESP0); + } + + if ((status & PINT(RTO))) + { + cmd->err = -RT_ETIMEOUT; + } + else if ((resp_type(cmd) & (RESP_R1 | RESP_R5 | RESP_R6 | RESP_R7 | RESP_R1B)) && + (status & PINT(RCRC))) + { + cmd->err = -RT_EIO; + } + else if ((status & PINT(RESP_ERR))) + { + cmd->err = -RT_EIO; + } + else + { + cmd->err = RT_EOK; + } + + return cmd->err; +} + +static int sdio_dw_data_complete(struct sdio_dw *sd, struct rt_mmcsd_data *data) +{ + rt_uint32_t status = sd->data_status; + + if (status & SDIO_DW_DATA_ERROR_FLAGS) + { + if (status & PINT(DRTO)) + { + data->err = -RT_ETIMEOUT; + } + else if (status & PINT(DCRC)) + { + data->err = -RT_EIO; + } + else if (status & PINT(EBE)) + { + if (sd->dir_status == SDIO_DW_SEND_STATUS) + { + /* + * No data CRC status was returned. The number of bytes + * transferred will be exaggerated in PIO mode. + */ + data->bytes_xfered = 0; + data->err = -RT_ETIMEOUT; + } + else if (sd->dir_status == SDIO_DW_RECV_STATUS) + { + data->err = -RT_EIO; + } + } + else + { + /* PINT(SBE) is included */ + data->err = -RT_EIO; + } + + LOG_D("Data error, status 0x%x", status); + + /* After an error, there may be data lingering in the FIFO */ + sdio_dw_reset(sd); + } + else + { + data->bytes_xfered = data->blks * data->blksize; + data->err = RT_EOK; + } + + return data->err; +} + +static void sdio_dw_mmc_request(struct rt_mmcsd_host *host, struct rt_mmcsd_req *req) +{ + struct sdio_dw_slot *slot = host->private_data; + struct sdio_dw *sd = slot->sd; + + /* + * The check for card presence and queueing of the request must be atomic, + * otherwise the card could be removed in between and the request wouldn't + * fail until another card was inserted. + */ + if (!sdio_dw_get_cd(slot)) + { + req->cmd->err = -RT_EIO; + mmcsd_req_complete(host); + + return; + } + + rt_hw_spin_lock(&sd->lock.lock); + + sd->slot->req = req; + sdio_dw_start_request(sd, req->sbc ? : req->cmd); + + rt_hw_spin_unlock(&sd->lock.lock); +} + +static void sdio_dw_mmc_set_iocfg(struct rt_mmcsd_host *host, struct rt_mmcsd_io_cfg *ios) +{ + rt_err_t err; + rt_uint32_t regs; + struct sdio_dw_slot *slot = host->private_data; + struct sdio_dw *sd = slot->sd; + const struct sdio_dw_drv_data *drv_data = sd->drv_data; + + /* Bus */ + switch (ios->bus_width) + { + case MMCSD_BUS_WIDTH_4: + slot->ctype = SDIO_DW_CTYPE_4BIT; + break; + + case MMCSD_BUS_WIDTH_8: + slot->ctype = SDIO_DW_CTYPE_8BIT; + break; + + default: + slot->ctype = SDIO_DW_CTYPE_1BIT; + break; + } + + regs = sdio_dw_readl(sd, UHS_REG); + + /* DDR mode set */ + if (ios->timing == MMCSD_TIMING_MMC_DDR52 || + ios->timing == MMCSD_TIMING_UHS_DDR50 || + ios->timing == MMCSD_TIMING_MMC_HS400) + { + regs |= ((0x1 << slot->id) << 16); + } + else + { + regs &= ~((0x1 << slot->id) << 16); + } + + sdio_dw_writel(sd, UHS_REG, regs); + sd->timing = ios->timing; + + /* + * Use mirror of ios->clock to prevent race with mmc core ios update when + * finding the minimum. + */ + slot->clock = ios->clock; + + if (drv_data && drv_data->set_iocfg) + { + drv_data->set_iocfg(sd, ios); + } + + /* Power */ + switch (ios->power_mode) + { + case MMCSD_POWER_UP: + if (host->supply.vmmc) + { + err = sdio_regulator_set_ocr(host, host->supply.vmmc, ios->vdd); + + if (err) + { + LOG_E("Failed to enable vmmc regulator error = %s", rt_strerror(err)); + + return; + } + } + rt_bitmap_set_bit(&slot->flags, DW_MMC_CARD_NEED_INIT); + regs = sdio_dw_readl(sd, PWREN); + regs |= (1 << slot->id); + sdio_dw_writel(sd, PWREN, regs); + break; + + case MMCSD_POWER_ON: + if (!sd->vqmmc_enabled) + { + if (host->supply.vqmmc) + { + err = rt_regulator_enable(host->supply.vqmmc); + + if (err) + { + LOG_E("Failed to enable vqmmc error = %s", rt_strerror(err)); + } + else + { + sd->vqmmc_enabled = RT_TRUE; + } + } + else + { + sd->vqmmc_enabled = RT_TRUE; + } + + sdio_dw_ctrl_reset(sd, SDIO_DW_CTRL_ALL_RESET_FLAGS); + } + + /* Adjust clock / bus width after power is up */ + sdio_dw_setup_bus(slot, RT_FALSE); + break; + + case MMCSD_POWER_OFF: + /* Turn clock off before power goes down */ + sdio_dw_setup_bus(slot, RT_FALSE); + + if (host->supply.vmmc) + { + sdio_regulator_set_ocr(host, host->supply.vmmc, 0); + } + + if (host->supply.vqmmc && sd->vqmmc_enabled) + { + rt_regulator_disable(host->supply.vqmmc); + } + + sd->vqmmc_enabled = RT_FALSE; + + regs = sdio_dw_readl(sd, PWREN); + regs &= ~(1 << slot->id); + sdio_dw_writel(sd, PWREN, regs); + break; + + default: + LOG_E("Invalid power_mode value %x", ios->power_mode); + break; + } + + if (sd->state == STATE_WAITING_CMD11_DONE && ios->clock != 0) + { + sd->state = STATE_IDLE; + } +} + +static rt_int32_t sdio_dw_mmc_get_card_status(struct rt_mmcsd_host *host) +{ + return 0; +} + +static void sdio_dw_mmc_enable_sdio_irq(struct rt_mmcsd_host *host, rt_int32_t enable) +{ + rt_ubase_t level; + rt_uint32_t int_mask, clk_en_a_old, clk_en_a; + struct sdio_dw_slot *slot = host->private_data; + struct sdio_dw *sd = slot->sd; + const rt_uint32_t clken_low_pwr = SDIO_DW_CLKEN_LOW_PWR << slot->id; + + /* + * Low power mode will stop the card clock when idle. According to the + * description of the CLKENA register we should disable low power mode for + * SDIO cards if we need SDIO interrupts to work. + */ + clk_en_a_old = sdio_dw_readl(sd, CLKENA); + + if (enable) + { + rt_bitmap_set_bit(&slot->flags, DW_MMC_CARD_NO_LOW_PWR); + clk_en_a = clk_en_a_old & ~clken_low_pwr; + } + else + { + rt_bitmap_clear_bit(&slot->flags, DW_MMC_CARD_NO_LOW_PWR); + clk_en_a = clk_en_a_old | clken_low_pwr; + } + + if (clk_en_a != clk_en_a_old) + { + sdio_dw_writel(sd, CLKENA, clk_en_a); + sdio_dw_send_cmd(slot, SDIO_DW_CMD_UPD_CLK | SDIO_DW_CMD_PRV_DAT_WAIT, 0); + } + + level = rt_spin_lock_irqsave(&sd->irq_lock); + + /* Enable/disable Slot Specific SDIO interrupt */ + int_mask = sdio_dw_readl(sd, INTMASK); + if (enable) + { + int_mask |= SDIO_DW_INT_SDIO(slot->sdio_id); + } + else + { + int_mask &= ~SDIO_DW_INT_SDIO(slot->sdio_id); + } + sdio_dw_writel(sd, INTMASK, int_mask); + + rt_spin_unlock_irqrestore(&sd->irq_lock, level); +} + +static rt_int32_t sdio_dw_mmc_execute_tuning(struct rt_mmcsd_host *host, rt_int32_t opcode) +{ + struct sdio_dw_slot *slot = host->private_data; + struct sdio_dw *sd = slot->sd; + const struct sdio_dw_drv_data *drv_data = sd->drv_data; + + if (drv_data && drv_data->execute_tuning) + { + return drv_data->execute_tuning(slot, opcode); + } + + return -RT_EINVAL; +} + +static rt_bool_t sdio_dw_mmc_card_busy(struct rt_mmcsd_host *host) +{ + rt_uint32_t status; + struct sdio_dw_slot *slot = host->private_data; + + /* Check the busy bit which is low when DAT[3:0] (the data lines) are 0000 */ + status = sdio_dw_readl(slot->sd, STATUS); + + return !!(status & SDIO_DW_STATUS_BUSY); +} + +static rt_err_t sdio_dw_mmc_signal_voltage_switch(struct rt_mmcsd_host *host, + struct rt_mmcsd_io_cfg *ios) +{ + rt_uint32_t uhs, v18; + struct sdio_dw_slot *slot = host->private_data; + struct sdio_dw *sd = slot->sd; + const struct sdio_dw_drv_data *drv_data = sd->drv_data; + + v18 = SDIO_DW_UHS_18V << slot->id; + + if (drv_data && drv_data->switch_voltage) + { + return drv_data->switch_voltage(host, ios); + } + + /* + * Program the voltage. Note that some instances of dw_mmc may use + * the UHS_REG for this. For other instances (like exynos) the UHS_REG + * does no harm but you need to set the regulator directly. Try both. + */ + uhs = sdio_dw_readl(sd, UHS_REG); + if (ios->signal_voltage == MMCSD_SIGNAL_VOLTAGE_330) + { + uhs &= ~v18; + } + else + { + uhs |= v18; + } + + if (host->supply.vqmmc) + { + rt_err_t err = sdio_regulator_set_vqmmc(host, ios); + + if (err < 0) + { + LOG_D("Regulator set error %s to %s V", rt_strerror(err), + uhs & v18 ? "1.8" : "3.3"); + + return err; + } + } + sdio_dw_writel(sd, UHS_REG, uhs); + + return RT_EOK; +} + +static const struct rt_mmcsd_host_ops sdio_dw_mmc_ops = +{ + .request = sdio_dw_mmc_request, + .set_iocfg = sdio_dw_mmc_set_iocfg, + .get_card_status = sdio_dw_mmc_get_card_status, + .enable_sdio_irq = sdio_dw_mmc_enable_sdio_irq, + .execute_tuning = sdio_dw_mmc_execute_tuning, + .card_busy = sdio_dw_mmc_card_busy, + .signal_voltage_switch = sdio_dw_mmc_signal_voltage_switch, +}; + +static void sdio_dw_set_drto(struct sdio_dw *sd) +{ + rt_ubase_t level; + rt_uint32_t drto_clks, drto_div, drto_ms; + const struct sdio_dw_drv_data *drv_data = sd->drv_data; + + if (drv_data && drv_data->get_drto_clks) + { + drto_clks = drv_data->get_drto_clks(sd); + } + else + { + drto_clks = sdio_dw_readl(sd, TMOUT) >> 8; + } + + drto_div = (sdio_dw_readl(sd, CLKDIV) & 0xff) * 2; + + if (drto_div == 0) + { + drto_div = 1; + } + + drto_ms = RT_DIV_ROUND_UP_ULL((rt_uint64_t)MSEC_PER_SEC * drto_clks * drto_div, + sd->bus_hz); + + /* Add a bit spare time */ + drto_ms += 10; + + level = rt_spin_lock_irqsave(&sd->irq_lock); + + if (!rt_bitmap_test_bit(&sd->pending_events, EVENT_DATA_COMPLETE)) + { + rt_tick_t tick = rt_tick_from_millisecond(drto_ms); + + rt_timer_control(&sd->dto_timer, RT_TIMER_CTRL_SET_TIME, &tick); + + rt_timer_start(&sd->dto_timer); + } + + rt_spin_unlock_irqrestore(&sd->irq_lock, level); +} + +static rt_bool_t sdio_dw_clear_pending_cmd_complete(struct sdio_dw *sd) +{ + if (!rt_bitmap_test_bit(&sd->pending_events, EVENT_CMD_COMPLETE)) + { + return RT_FALSE; + } + + rt_timer_stop(&sd->cto_timer); + rt_bitmap_clear_bit(&sd->pending_events, EVENT_CMD_COMPLETE); + + return RT_TRUE; +} + +static rt_bool_t sdio_dw_clear_pending_data_complete(struct sdio_dw *sd) +{ + if (!rt_bitmap_test_bit(&sd->pending_events, EVENT_DATA_COMPLETE)) + { + return RT_FALSE; + } + + rt_timer_stop(&sd->dto_timer); + rt_bitmap_clear_bit(&sd->pending_events, EVENT_DATA_COMPLETE); + + return RT_TRUE; +} + +static void sdio_dw_state_change(struct rt_work *work, void *work_data) +{ + rt_err_t err; + rt_uint32_t state, prev_state; + struct rt_mmcsd_cmd *cmd; + struct rt_mmcsd_req *req; + struct rt_mmcsd_data *data; + struct sdio_dw *sd = rt_container_of(work, struct sdio_dw, state_work); + + rt_hw_spin_lock(&sd->lock.lock); + + state = sd->state; + req = sd->req; + data = sd->data; + +_next_status: + prev_state = state; + + switch (state) + { + case STATE_IDLE: + case STATE_WAITING_CMD11_DONE: + break; + + case STATE_SENDING_CMD11: + case STATE_SENDING_CMD: + if (!sdio_dw_clear_pending_cmd_complete(sd)) + { + break; + } + + cmd = sd->cmd; + sd->cmd = RT_NULL; + err = sdio_dw_cmd_complete(sd, cmd); + if (cmd == req->sbc && !err) + { + sdio_dw_start_request(sd, req->cmd); + + goto _unlock; + } + + if (cmd->data && err) + { + if (err != -RT_ETIMEOUT && sd->dir_status == SDIO_DW_RECV_STATUS) + { + state = STATE_SENDING_DATA; + + goto _check_status; + } + + send_stop_abort(sd, data); + sdio_dw_stop_dma(sd); + state = STATE_SENDING_STOP; + break; + } + + if (!cmd->data || err) + { + sdio_dw_end_request(sd); + + goto _unlock; + } + + prev_state = state = STATE_SENDING_DATA; + + /* Fall through */ + case STATE_SENDING_DATA: + if (rt_bitmap_test_bit(&sd->pending_events, EVENT_DATA_ERROR)) + { + rt_bitmap_clear_bit(&sd->pending_events, EVENT_DATA_ERROR); + + if (!(sd->data_status & (PINT(DRTO) | PINT(EBE)))) + { + send_stop_abort(sd, data); + } + + sdio_dw_stop_dma(sd); + state = STATE_DATA_ERROR; + break; + } + + if (!rt_bitmap_test_bit(&sd->pending_events, EVENT_XFER_COMPLETE)) + { + /* + * If all data-related interrupts don't come within the given time + * in reading data state. + */ + if (sd->dir_status == SDIO_DW_RECV_STATUS) + { + sdio_dw_set_drto(sd); + } + + break; + } + rt_bitmap_clear_bit(&sd->pending_events, EVENT_XFER_COMPLETE); + + /* + * Handle an EVENT_DATA_ERROR that might have shown up before the + * transfer completed. This might not have been caught by the check + * above because the interrupt could have gone off between the previous + * check and the check for transfer complete. + * + * Technically this ought not be needed assuming we get a DATA_COMPLETE + * eventually (we'll notice the error and end the request), but it + * shouldn't hurt. + * + * This has the advantage of sending the stop command. + */ + if (rt_bitmap_test_bit(&sd->pending_events, EVENT_DATA_ERROR)) + { + rt_bitmap_clear_bit(&sd->pending_events, EVENT_DATA_ERROR); + + if (!(sd->data_status & (PINT(DRTO) | PINT(EBE)))) + { + send_stop_abort(sd, data); + } + + sdio_dw_stop_dma(sd); + state = STATE_DATA_ERROR; + break; + } + prev_state = state = STATE_DATA_BUSY; + + /* Fall through */ + case STATE_DATA_BUSY: + if (!sdio_dw_clear_pending_data_complete(sd)) + { + /* + * If data error interrupt comes but data over interrupt doesn't + * come within the given time. In reading data state. + */ + if (sd->dir_status == SDIO_DW_RECV_STATUS) + { + sdio_dw_set_drto(sd); + } + + break; + } + + sd->data = RT_NULL; + err = sdio_dw_data_complete(sd, data); + + if (!err) + { + if (!data->stop || req->sbc) + { + if (req->sbc && data->stop) + { + data->stop->err = RT_EOK; + } + + sdio_dw_end_request(sd); + + goto _unlock; + } + + /* Stop command for open-ended transfer */ + if (data->stop) + { + send_stop_abort(sd, data); + } + } + else + { + /* + * If we don't have a command complete now we'll never get one since + * we just reset everything; better end the request. + * + * If we do have a command complete we'll fall through to the + * STATE_SENDING_STOP command and everything will be peachy keen. + */ + if (!rt_bitmap_test_bit(&sd->pending_events, EVENT_CMD_COMPLETE)) + { + sd->cmd = RT_NULL; + + sdio_dw_end_request(sd); + + goto _unlock; + } + } + + /* If err has non-zero, stop-abort command has been already issued. */ + prev_state = state = STATE_SENDING_STOP; + + /* Fall through */ + case STATE_SENDING_STOP: + if (!sdio_dw_clear_pending_cmd_complete(sd)) + { + break; + } + + /* CMD error in data command */ + if (req->cmd->err && req->data) + { + sdio_dw_reset(sd); + } + + sd->cmd = RT_NULL; + sd->data = RT_NULL; + + if (!req->sbc && req->stop) + { + sdio_dw_cmd_complete(sd, req->stop); + } + else + { + sd->cmd_status = 0; + } + + sdio_dw_end_request(sd); + goto _unlock; + + case STATE_DATA_ERROR: + if (!rt_bitmap_test_bit(&sd->pending_events, EVENT_XFER_COMPLETE)) + { + break; + } + rt_bitmap_clear_bit(&sd->pending_events, EVENT_XFER_COMPLETE); + + state = STATE_DATA_BUSY; + break; + + default: + break; + } + +_check_status: + if (state != prev_state) + { + goto _next_status; + } + + sd->state = state; +_unlock: + rt_hw_spin_unlock(&sd->lock.lock); +} + +static void sdio_dw_cmd11_timer(void *param) +{ + struct sdio_dw *sd = param; + + if (sd->state != STATE_SENDING_CMD11) + { + LOG_W("Unexpected CMD11 timeout"); + + return; + } + + sd->cmd_status = PINT(RTO); + rt_bitmap_set_bit(&sd->pending_events, EVENT_CMD_COMPLETE); + rt_workqueue_urgent_work(sd->state_wq, &sd->state_work); +} + +static void sdio_dw_cto_timer(void *param) +{ + rt_ubase_t level; + rt_uint32_t pending; + struct sdio_dw *sd = param; + + level = rt_spin_lock_irqsave(&sd->irq_lock); + + /* + * If somehow we have very bad interrupt latency it's remotely possible that + * the timer could fire while the interrupt is still pending or while the + * interrupt is midway through running. Let's be paranoid and detect those + * two cases. Note that this is paranoia is somewhat justified because in + * this function we don't actually cancel the pending command in the + * controller, we just assume it will never come. + */ + /* Read-only mask reg */ + pending = sdio_dw_readl(sd, MINTSTS); + + if ((pending & (SDIO_DW_CMD_ERROR_FLAGS | PINT(CMD_DONE)))) + { + /* The interrupt should fire; no need to act but we can warn */ + LOG_W("Unexpected interrupt latency"); + + goto _unlock; + } + + if (rt_bitmap_test_bit(&sd->pending_events, EVENT_CMD_COMPLETE)) + { + /* Presumably interrupt handler couldn't delete the timer */ + LOG_W("CTO timeout when already completed"); + + goto _unlock; + } + + /* + * Continued paranoia to make sure we're in the state we expect. + * This paranoia isn't really justified but it seems good to be safe. + */ + switch (sd->state) + { + case STATE_SENDING_CMD11: + case STATE_SENDING_CMD: + case STATE_SENDING_STOP: + /* + * If CMD_DONE interrupt does NOT come in sending command state, we + * should notify the driver to terminate current transfer and report a + * command timeout to the core. + */ + sd->cmd_status = PINT(RTO); + rt_bitmap_set_bit(&sd->pending_events, EVENT_CMD_COMPLETE); + rt_workqueue_urgent_work(sd->state_wq, &sd->state_work); + break; + + default: + LOG_W("Unexpected command timeout, state %d", sd->state); + break; + } + +_unlock: + rt_spin_unlock_irqrestore(&sd->irq_lock, level); +} + +static void sdio_dw_dto_timer(void *param) +{ + rt_ubase_t level; + rt_uint32_t pending; + struct sdio_dw *sd = param; + + level = rt_spin_lock_irqsave(&sd->irq_lock); + + /* + * The DTO timer is much longer than the CTO timer, so it's even less likely + * that we'll these cases, but it pays to be paranoid. + */ + /* Read-only mask reg */ + pending = sdio_dw_readl(sd, MINTSTS); + + if ((pending & PINT(DATA_OVER))) + { + /* The interrupt should fire; no need to act but we can warn */ + LOG_W("Unexpected data interrupt latency"); + + goto _unlock; + } + + if (rt_bitmap_test_bit(&sd->pending_events, EVENT_DATA_COMPLETE)) + { + /* Presumably interrupt handler couldn't delete the timer */ + LOG_W("DTO timeout when already completed"); + + goto _unlock; + } + + /* + * Continued paranoia to make sure we're in the state we expect. + * This paranoia isn't really justified but it seems good to be safe. + */ + switch (sd->state) + { + case STATE_SENDING_DATA: + case STATE_DATA_BUSY: + /* + * If DTO interrupt does NOT come in sending data state, we should + * notify the driver to terminate current transfer and report a data + * timeout to the core. + */ + sd->data_status = PINT(DRTO); + rt_bitmap_set_bit(&sd->pending_events, EVENT_DATA_ERROR); + rt_bitmap_set_bit(&sd->pending_events, EVENT_DATA_COMPLETE); + rt_workqueue_urgent_work(sd->state_wq, &sd->state_work); + break; + + default: + LOG_W("Unexpected data timeout, state %d", sd->state); + break; + } + +_unlock: + rt_spin_unlock_irqrestore(&sd->irq_lock, level); +} + +static void sdio_dw_cmd_interrupt(struct sdio_dw *sd, rt_uint32_t status) +{ + rt_timer_stop(&sd->cto_timer); + + if (!sd->cmd_status) + { + sd->cmd_status = status; + } + + /* Drain writebuffer */ + rt_hw_wmb(); + + rt_bitmap_set_bit(&sd->pending_events, EVENT_CMD_COMPLETE); + rt_workqueue_urgent_work(sd->state_wq, &sd->state_work); +} + +static void sdio_dw_isr(int irqno, void *param) +{ + rt_uint32_t pending; + struct sdio_dw *sd = (struct sdio_dw *)param; + struct sdio_dw_slot *slot = sd->slot; + + /* Read-only mask reg */ + pending = sdio_dw_readl(sd, MINTSTS); + + if (pending) + { + if (sd->state == STATE_SENDING_CMD11 && (pending & PINT(VOLT_SWITCH))) + { + sdio_dw_writel(sd, RINTSTS, PINT(VOLT_SWITCH)); + pending &= ~PINT(VOLT_SWITCH); + + rt_hw_spin_lock(&sd->irq_lock.lock); + sdio_dw_cmd_interrupt(sd, pending); + rt_hw_spin_unlock(&sd->irq_lock.lock); + + rt_timer_stop(&sd->cmd11_timer); + } + + if ((pending & SDIO_DW_CMD_ERROR_FLAGS)) + { + rt_hw_spin_lock(&sd->irq_lock.lock); + + rt_timer_stop(&sd->cto_timer); + sdio_dw_writel(sd, RINTSTS, SDIO_DW_CMD_ERROR_FLAGS); + sd->cmd_status = pending; + rt_hw_wmb(); + rt_bitmap_set_bit(&sd->pending_events, EVENT_CMD_COMPLETE); + + rt_hw_spin_unlock(&sd->irq_lock.lock); + } + + if ((pending & SDIO_DW_DATA_ERROR_FLAGS)) + { + rt_hw_spin_lock(&sd->irq_lock.lock); + + if ((sd->quirks & SDIO_DW_QUIRK_EXTENDED_TMOUT)) + { + rt_timer_stop(&sd->dto_timer); + } + + sdio_dw_writel(sd, RINTSTS, SDIO_DW_DATA_ERROR_FLAGS); + sd->data_status = pending; + rt_hw_wmb(); + rt_bitmap_set_bit(&sd->pending_events, EVENT_DATA_ERROR); + + if ((sd->quirks & SDIO_DW_QUIRK_EXTENDED_TMOUT)) + { + /* In case of error, we cannot expect a DTO */ + rt_bitmap_set_bit(&sd->pending_events, EVENT_DATA_COMPLETE); + } + rt_workqueue_urgent_work(sd->state_wq, &sd->state_work); + + rt_hw_spin_unlock(&sd->irq_lock.lock); + } + + if ((pending & PINT(DATA_OVER))) + { + rt_hw_spin_lock(&sd->irq_lock.lock); + + rt_timer_stop(&sd->dto_timer); + + sdio_dw_writel(sd, RINTSTS, PINT(DATA_OVER)); + if (!sd->data_status) + { + sd->data_status = pending; + } + rt_hw_wmb(); + + if (sd->dir_status == SDIO_DW_RECV_STATUS && sd->data && sd->data->buf) + { + sdio_dw_read_data_pio(sd, RT_TRUE); + } + rt_bitmap_set_bit(&sd->pending_events, EVENT_DATA_COMPLETE); + + rt_workqueue_urgent_work(sd->state_wq, &sd->state_work); + + rt_hw_spin_unlock(&sd->irq_lock.lock); + } + + if ((pending & PINT(RXDR))) + { + sdio_dw_writel(sd, RINTSTS, PINT(RXDR)); + + if (sd->dir_status == SDIO_DW_RECV_STATUS && sd->data && sd->data->buf) + { + sdio_dw_read_data_pio(sd, RT_FALSE); + } + } + + if ((pending & PINT(TXDR))) + { + sdio_dw_writel(sd, RINTSTS, PINT(TXDR)); + + if (sd->dir_status == SDIO_DW_SEND_STATUS && sd->data && sd->data->buf) + { + sdio_dw_write_data_pio(sd); + } + } + + if ((pending & PINT(CMD_DONE))) + { + rt_hw_spin_lock(&sd->irq_lock.lock); + + sdio_dw_writel(sd, RINTSTS, PINT(CMD_DONE)); + sdio_dw_cmd_interrupt(sd, pending); + + rt_hw_spin_unlock(&sd->irq_lock.lock); + } + + if ((pending & PINT(CD))) + { + sdio_dw_writel(sd, RINTSTS, PINT(CD)); + mmcsd_change(slot->host); + } + + if ((pending & SDIO_DW_INT_SDIO(slot->sdio_id))) + { + sdio_dw_writel(sd, RINTSTS, SDIO_DW_INT_SDIO(slot->sdio_id)); + sdio_dw_mmc_enable_sdio_irq(slot->host, RT_FALSE); + sdio_irq_wakeup(slot->host); + } + } + + if (sd->use_dma != TRANS_MODE_IDMAC) + { + return; + } + + /* Handle IDMA interrupts */ + pending = sd->dma_64bit_address ? sdio_dw_readl(sd, IDSTS64) : sdio_dw_readl(sd, IDSTS); + + if ((pending & (PINTC(TI) | PINTC(RI)))) + { + if (sd->dma_64bit_address) + { + sdio_dw_writel(sd, IDSTS64, PINTC(TI) | PINTC(RI)); + sdio_dw_writel(sd, IDSTS64, PINTC(NI)); + } + else + { + sdio_dw_writel(sd, IDSTS, PINTC(TI) | PINTC(RI)); + sdio_dw_writel(sd, IDSTS, PINTC(NI)); + } + + if (!rt_bitmap_test_bit(&sd->pending_events, EVENT_DATA_ERROR)) + { + sd->dma_ops->complete(sd); + } + } +} + +#ifdef RT_USING_OFW +static rt_err_t sdio_dw_parse_ofw(struct sdio_dw *sd) +{ + struct rt_ofw_node *np = sd->parent.ofw_node; + const struct sdio_dw_drv_data *drv_data = sd->drv_data; + + rt_ofw_prop_read_u32(np, "fifo-depth", &sd->fifo_depth); + rt_ofw_prop_read_u32(np, "card-detect-delay", &sd->detect_delay_ms); + rt_ofw_prop_read_u32(np, "data-addr", &sd->data_addr_override); + + if (rt_ofw_prop_read_bool(np, "fifo-watermark-aligned")) + { + sd->wm_aligned = RT_TRUE; + } + + rt_ofw_prop_read_u32(np, "clock-frequency", &sd->bus_hz); + + if (drv_data && drv_data->parse_ofw) + { + return drv_data->parse_ofw(sd); + } + + return RT_EOK; +} +#else +rt_inline rt_err_t sdio_dw_parse_ofw(struct sdio_dw *sd) +{ + return -RT_ENOSYS; +} +#endif /* RT_USING_OFW */ + +static rt_err_t sdio_dw_init_slot(struct sdio_dw *sd) +{ + rt_err_t err; + struct sdio_dw_slot *slot; + struct rt_mmcsd_host *host = mmcsd_alloc_host(); + + if (!host) + { + return -RT_ENOMEM; + } + + slot = rt_calloc(1, sizeof(*slot)); + + if (!slot) + { + err = -RT_ENOMEM; + goto _free; + } + + err = sdio_regulator_get_supply(&sd->parent, host); + + if (err) + { + goto _free; + } + + host->ops = &sdio_dw_mmc_ops; + host->private_data = slot; + slot->host = host; + slot->sd = sd; + sd->slot = slot; + + slot->id = 0; + slot->sdio_id = sd->sdio_id0 + slot->id; + + err = sdio_ofw_parse(sd->parent.ofw_node, host); + + if (err) + { + goto _free; + } + + if (!host->valid_ocr) + { + host->valid_ocr = VDD_32_33 | VDD_33_34; + } + + if (sd->minimum_speed) + { + host->freq_min = sd->minimum_speed; + } + else + { + host->freq_min = SDIO_DW_FREQ_HZ_MIN; + } + + if (!host->freq_max) + { + host->freq_max = SDIO_DW_FREQ_HZ_MAX; + } + + /* Useful defaults if platform data is unset. */ + if (sd->use_dma == TRANS_MODE_IDMAC) + { + host->max_dma_segs = sd->ring_size; + host->max_blk_size = 65535; + host->max_seg_size = DESC_RING_BUF_SZ; + host->max_blk_count = (host->max_seg_size * sd->ring_size) / 512; + } + else if (sd->use_dma == TRANS_MODE_EDMAC) + { + host->max_dma_segs = 64; + host->max_blk_size = 65535; + host->max_blk_count = 65535; + host->max_seg_size = host->max_blk_size * host->max_blk_count; + } + else + { + /* TRANS_MODE_PIO */ + host->max_dma_segs = 64; + host->max_blk_size = 65535; + host->max_blk_count = 512; + host->max_seg_size = host->max_blk_size * host->max_blk_count; + } + + return RT_EOK; + +_free: + if (host) + { + mmcsd_free_host(host); + } + if (slot) + { + rt_free(slot); + } + return err; +} + +static void sdio_dw_free(struct sdio_dw *sd) +{ + if (!rt_is_err_or_null(sd->rstc)) + { + rt_reset_control_assert(sd->rstc); + rt_reset_control_put(sd->rstc); + } + + if (!rt_is_err_or_null(sd->ciu_clk)) + { + rt_clk_disable_unprepare(sd->ciu_clk); + rt_clk_put(sd->ciu_clk); + } + + if (!rt_is_err_or_null(sd->biu_clk)) + { + rt_clk_disable_unprepare(sd->biu_clk); + rt_clk_put(sd->biu_clk); + } + + if (sd->use_dma && sd->dma_ops->exit) + { + sd->dma_ops->exit(sd); + } + + if (sd->dma_buf) + { + rt_dma_free_coherent(sd->bus_dev, + DESC_RING_BUF_SZ, sd->dma_buf, sd->dma_buf_phy); + } +} + +rt_err_t sdio_dw_probe(struct sdio_dw *sd) +{ + int i, len; + rt_err_t err = RT_EOK; + char dev_name[RT_NAME_MAX]; + const struct sdio_dw_drv_data *drv_data = sd->drv_data; + + err = sdio_dw_parse_ofw(sd); + + if (err && err != -RT_ENOSYS) + { + goto _free_res; + } + + sd->rstc = rt_reset_control_get_by_name(&sd->parent, "reset"); + + if (rt_is_err(sd->rstc)) + { + LOG_E("Reset controller not found"); + + err = rt_ptr_err(sd->rstc); + goto _free_res; + } + + if (sd->rstc) + { + rt_reset_control_assert(sd->rstc); + rt_hw_us_delay(20); + rt_reset_control_deassert(sd->rstc); + } + + sd->biu_clk = rt_clk_get_by_name(&sd->parent, "biu"); + sd->ciu_clk = rt_clk_get_by_name(&sd->parent, "ciu"); + + if (rt_is_err(sd->biu_clk) || rt_is_err(sd->ciu_clk)) + { + /* board has init clock */ + if (sd->bus_hz) + { + goto _out_clk; + } + + err = rt_is_err(sd->biu_clk) ? rt_ptr_err(sd->biu_clk) : rt_ptr_err(sd->ciu_clk); + goto _free_res; + } + + err = rt_clk_prepare_enable(sd->ciu_clk); + + if (err) + { + goto _free_res; + } + + if (sd->bus_hz) + { + rt_clk_set_rate(sd->ciu_clk, sd->bus_hz); + } + + sd->bus_hz = rt_clk_get_rate(sd->ciu_clk); + + if (!sd->bus_hz) + { + err = -RT_EIO; + LOG_E("Bus speed not found"); + goto _free_res; + } + +_out_clk: + if (drv_data && drv_data->init) + { + err = drv_data->init(sd); + + if (err) + { + goto _free_res; + } + } + + rt_spin_lock_init(&sd->lock); + rt_spin_lock_init(&sd->irq_lock); + + /* + * Get the host data width - this assumes that HCON has been set with the + * correct values. + */ + i = SDIO_DW_GET_HDATA_WIDTH(sdio_dw_readl(sd, HCON)); + if (!i) + { + sd->push_data = sdio_dw_push_data16; + sd->pull_data = sdio_dw_pull_data16; + sd->data_shift = 1; + } + else if (i == 2) + { + sd->push_data = sdio_dw_push_data64; + sd->pull_data = sdio_dw_pull_data64; + sd->data_shift = 3; + } + else + { + /* Check for a reserved value, and warn if it is */ + if (i != 1) + { + LOG_W("HCON reports a reserved host data width, defaulting to 32-bit access"); + } + + sd->push_data = sdio_dw_push_data32; + sd->pull_data = sdio_dw_pull_data32; + sd->data_shift = 2; + } + + /* Reset all blocks */ + if (!sdio_dw_ctrl_reset(sd, SDIO_DW_CTRL_ALL_RESET_FLAGS)) + { + err = -RT_EIO; + + goto _free_res; + } + + sdio_dw_init_dma(sd); + + /* Clear the interrupts for the host controller */ + sdio_dw_writel(sd, RINTSTS, 0xffffffff); + /* Disable all mmc interrupt first */ + sdio_dw_writel(sd, INTMASK, 0); + + /* Put in max timeout */ + sdio_dw_writel(sd, TMOUT, 0xffffffff); + + /* + * FIFO threshold settings: + * Rx Mark = fifo_size / 2 - 1, + * Tx Mark = fifo_size / 2 + * DMA Size = 8 + */ + if (sd->fifo_depth) + { + /* + * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may have been + * overwritten by the bootloader, just like we're about to do, so if you + * know the value for your hardware, you should put it in the platform + * data. + */ + sd->fifo_depth = sdio_dw_readl(sd, FIFOTH); + sd->fifo_depth = 1 + ((sd->fifo_depth >> 16) & 0xfff); + } + sd->fifoth_val = SDIO_DW_SET_FIFOTH(0x2, sd->fifo_depth / 2 - 1, sd->fifo_depth / 2); + sdio_dw_writel(sd, FIFOTH, sd->fifoth_val); + + /* Disable clock to CIU */ + sdio_dw_writel(sd, CLKENA, 0); + sdio_dw_writel(sd, CLKSRC, 0); + + /* + * In 2.40a spec, Data offset is changed. + * Need to check the version-id and set data-offset for DATA register. + */ + sd->verid = SDIO_DW_GET_VERID(sdio_dw_readl(sd, VERID)); + LOG_D("Version ID is %04x", sd->verid); + + if (sd->data_addr_override) + { + sd->fifo_base = sd->base + sd->data_addr_override; + } + else if (sd->verid < SDIO_DW_240A) + { + sd->fifo_base = sd->base + DATA_OFFSET; + } + else + { + sd->fifo_base = sd->base + DATA_240A_OFFSET; + } + + /* + * Enable interrupts for command done, data over, data empty, receive ready + * and error such as transmit, receive timeout, crc error + */ + sdio_dw_writel(sd, INTMASK, PINT(CMD_DONE) | PINT(DATA_OVER) | PINT(TXDR) | PINT(RXDR) | SDIO_DW_ERROR_FLAGS); + /* Enable mci interrupt */ + sdio_dw_writel(sd, CTRL, SDIO_DW_CTRL_INT_ENABLE); + + /* Enable GPIO interrupt */ + sdio_dw_writel(sd, INTMASK, sdio_dw_readl(sd, INTMASK) | PINT(CD)); + + /* We need at least one slot to succeed */ + err = sdio_dw_init_slot(sd); + + if (err) + { + goto _free_res; + } + + /* Now that slots are all setup, we can enable card detect */ + sdio_dw_writel(sd, INTMASK, sdio_dw_readl(sd, INTMASK) | PINT(CD)); + + len = sdio_host_set_name(sd->slot->host, dev_name); + + sd->state_wq = rt_workqueue_create(dev_name, RT_SYSTEM_WORKQUEUE_STACKSIZE, + RT_MMCSD_THREAD_PRIORITY); + + if (!sd->state_wq) + { + err = -RT_ENOMEM; + + goto _free_res; + } + + rt_work_init(&sd->state_work, sdio_dw_state_change, sd); + + rt_hw_interrupt_install(sd->irq, sdio_dw_isr, sd, dev_name); + rt_hw_interrupt_umask(sd->irq); + + rt_strncpy(&dev_name[len], "-cmd11", sizeof(dev_name) - len); + rt_timer_init(&sd->cmd11_timer, dev_name, sdio_dw_cmd11_timer, sd, + 0, RT_TIMER_FLAG_PERIODIC); + + rt_strncpy(&dev_name[len], "-cto", sizeof(dev_name) - len); + rt_timer_init(&sd->cto_timer, dev_name, sdio_dw_cto_timer, sd, + 0, RT_TIMER_FLAG_PERIODIC); + + rt_strncpy(&dev_name[len], "-dto", sizeof(dev_name) - len); + rt_timer_init(&sd->dto_timer, dev_name, sdio_dw_dto_timer, sd, + 0, RT_TIMER_FLAG_PERIODIC); + + mmcsd_change(sd->slot->host); + + return err; + +_free_res: + sdio_dw_free(sd); + + return err; +} + +rt_err_t sdio_dw_remove(struct sdio_dw *sd) +{ + if (sd->slot) + { + mmcsd_free_host(sd->slot->host); + } + + sdio_dw_writel(sd, RINTSTS, 0xffffffff); + /* Disable all mmc interrupt first */ + sdio_dw_writel(sd, INTMASK, 0); + + /* Disable clock to CIU */ + sdio_dw_writel(sd, CLKENA, 0); + sdio_dw_writel(sd, CLKSRC, 0); + + rt_hw_interrupt_mask(sd->irq); + rt_pic_detach_irq(sd->irq, sd); + + rt_timer_detach(&sd->cmd11_timer); + rt_timer_detach(&sd->cto_timer); + rt_timer_detach(&sd->dto_timer); + + sdio_dw_free(sd); + + return RT_EOK; +} diff --git a/components/drivers/sdio/host/sdio-dw.h b/components/drivers/sdio/host/sdio-dw.h new file mode 100755 index 00000000000..14dd05237bb --- /dev/null +++ b/components/drivers/sdio/host/sdio-dw.h @@ -0,0 +1,362 @@ +/* + * Copyright (c) 2006-2022, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2022-12-06 GuEe-GUI first version + */ + +#ifndef __SDIO_DW_H__ +#define __SDIO_DW_H__ + +#include "../dev_sdio_dm.h" + +#define SDIO_DW_CTRL 0x000 +#define SDIO_DW_PWREN 0x004 +#define SDIO_DW_CLKDIV 0x008 +#define SDIO_DW_CLKSRC 0x00c +#define SDIO_DW_CLKENA 0x010 +#define SDIO_DW_TMOUT 0x014 +#define SDIO_DW_CTYPE 0x018 +#define SDIO_DW_BLKSIZ 0x01c +#define SDIO_DW_BYTCNT 0x020 +#define SDIO_DW_INTMASK 0x024 +#define SDIO_DW_CMDARG 0x028 +#define SDIO_DW_CMD 0x02c +#define SDIO_DW_RESP0 0x030 +#define SDIO_DW_RESP1 0x034 +#define SDIO_DW_RESP2 0x038 +#define SDIO_DW_RESP3 0x03c +#define SDIO_DW_MINTSTS 0x040 +#define SDIO_DW_RINTSTS 0x044 +#define SDIO_DW_STATUS 0x048 +#define SDIO_DW_FIFOTH 0x04c +#define SDIO_DW_CDETECT 0x050 +#define SDIO_DW_WRTPRT 0x054 +#define SDIO_DW_GPIO 0x058 +#define SDIO_DW_TCBCNT 0x05c +#define SDIO_DW_TBBCNT 0x060 +#define SDIO_DW_DEBNCE 0x064 +#define SDIO_DW_USRID 0x068 +#define SDIO_DW_VERID 0x06c +#define SDIO_DW_HCON 0x070 +#define SDIO_DW_UHS_REG 0x074 +#define SDIO_DW_RST_N 0x078 +#define SDIO_DW_BMOD 0x080 +#define SDIO_DW_PLDMND 0x084 +#define SDIO_DW_DBADDR 0x088 +#define SDIO_DW_IDSTS 0x08c +#define SDIO_DW_IDINTEN 0x090 +#define SDIO_DW_DSCADDR 0x094 +#define SDIO_DW_BUFADDR 0x098 +#define SDIO_DW_CDTHRCTL 0x100 +#define SDIO_DW_UHS_REG_EXT 0x108 +#define SDIO_DW_DDR_REG 0x10c +#define SDIO_DW_ENABLE_SHIFT 0x110 +#define SDIO_DW_DATA(x) (x) +/* + * Registers to support idmac 64-bit address mode + */ +#define SDIO_DW_DBADDRL 0x088 +#define SDIO_DW_DBADDRU 0x08c +#define SDIO_DW_IDSTS64 0x090 +#define SDIO_DW_IDINTEN64 0x094 +#define SDIO_DW_DSCADDRL 0x098 +#define SDIO_DW_DSCADDRU 0x09c +#define SDIO_DW_BUFADDRL 0x0a0 +#define SDIO_DW_BUFADDRU 0x0a4 + +/* Support for longer data read timeout */ +#define SDIO_DW_QUIRK_EXTENDED_TMOUT RT_BIT(0) + +#define SDIO_DW_240A 0x240a +#define SDIO_DW_280A 0x280a + +/* + * Data offset is difference according to Version + * Lower than 2.40a : data register offest is 0x100 + */ +#define DATA_OFFSET 0x100 +#define DATA_240A_OFFSET 0x200 + +/* Control register defines */ +#define SDIO_DW_CTRL_USE_IDMAC RT_BIT(25) +#define SDIO_DW_CTRL_CEATA_INT_EN RT_BIT(11) +#define SDIO_DW_CTRL_SEND_AS_CCSD RT_BIT(10) +#define SDIO_DW_CTRL_SEND_CCSD RT_BIT(9) +#define SDIO_DW_CTRL_ABRT_READ_DATA RT_BIT(8) +#define SDIO_DW_CTRL_SEND_IRQ_RESP RT_BIT(7) +#define SDIO_DW_CTRL_READ_WAIT RT_BIT(6) +#define SDIO_DW_CTRL_DMA_ENABLE RT_BIT(5) +#define SDIO_DW_CTRL_INT_ENABLE RT_BIT(4) +#define SDIO_DW_CTRL_DMA_RESET RT_BIT(2) +#define SDIO_DW_CTRL_FIFO_RESET RT_BIT(1) +#define SDIO_DW_CTRL_RESET RT_BIT(0) +/* Clock Enable register defines */ +#define SDIO_DW_CLKEN_LOW_PWR RT_BIT(16) +#define SDIO_DW_CLKEN_ENABLE RT_BIT(0) +/* Time-out register defines */ +#define SDIO_DW_TMOUT_DATA(n) ((n) << 8) +#define SDIO_DW_TMOUT_DATA_MSK 0xffffff00 +#define SDIO_DW_TMOUT_RESP(n) ((n) & 0xff) +#define SDIO_DW_TMOUT_RESP_MSK 0xff +/* Card-type register defines */ +#define SDIO_DW_CTYPE_8BIT RT_BIT(16) +#define SDIO_DW_CTYPE_4BIT RT_BIT(0) +#define SDIO_DW_CTYPE_1BIT 0 +/* Interrupt status & mask register defines */ +#define SDIO_DW_INT_SDIO(n) RT_BIT(16 + (n)) +#define SDIO_DW_INT_RAW_SDIO RT_BIT(24) +#define SDIO_DW_INT_EBE RT_BIT(15) +#define SDIO_DW_INT_ACD RT_BIT(14) +#define SDIO_DW_INT_SBE RT_BIT(13) +#define SDIO_DW_INT_HLE RT_BIT(12) +#define SDIO_DW_INT_FRUN RT_BIT(11) +#define SDIO_DW_INT_HTO RT_BIT(10) +#define SDIO_DW_INT_VOLT_SWITCH RT_BIT(10) +#define SDIO_DW_INT_DRTO RT_BIT(9) +#define SDIO_DW_INT_RTO RT_BIT(8) +#define SDIO_DW_INT_DCRC RT_BIT(7) +#define SDIO_DW_INT_RCRC RT_BIT(6) +#define SDIO_DW_INT_RXDR RT_BIT(5) +#define SDIO_DW_INT_TXDR RT_BIT(4) +#define SDIO_DW_INT_DATA_OVER RT_BIT(3) +#define SDIO_DW_INT_CMD_DONE RT_BIT(2) +#define SDIO_DW_INT_RESP_ERR RT_BIT(1) +#define SDIO_DW_INT_CD RT_BIT(0) +#define SDIO_DW_INT_ERROR 0xbfc2 +/* Command register defines */ +#define SDIO_DW_CMD_START RT_BIT(31) +#define SDIO_DW_CMD_USE_HOLD_REG RT_BIT(29) +#define SDIO_DW_CMD_VOLT_SWITCH RT_BIT(28) +#define SDIO_DW_CMD_CCS_EXP RT_BIT(23) +#define SDIO_DW_CMD_CEATA_RD RT_BIT(22) +#define SDIO_DW_CMD_UPD_CLK RT_BIT(21) +#define SDIO_DW_CMD_INIT RT_BIT(15) +#define SDIO_DW_CMD_STOP RT_BIT(14) +#define SDIO_DW_CMD_PRV_DAT_WAIT RT_BIT(13) +#define SDIO_DW_CMD_SEND_STOP RT_BIT(12) +#define SDIO_DW_CMD_STRM_MODE RT_BIT(11) +#define SDIO_DW_CMD_DAT_WR RT_BIT(10) +#define SDIO_DW_CMD_DAT_EXP RT_BIT(9) +#define SDIO_DW_CMD_RESP_CRC RT_BIT(8) +#define SDIO_DW_CMD_RESP_LONG RT_BIT(7) +#define SDIO_DW_CMD_RESP_EXP RT_BIT(6) +#define SDIO_DW_CMD_INDX(n) ((n) & 0x1f) +/* Status register defines */ +#define SDIO_DW_GET_FCNT(x) (((x) >> 17) & 0x1fff) +#define SDIO_DW_STATUS_DMA_REQ RT_BIT(31) +#define SDIO_DW_STATUS_BUSY RT_BIT(9) +/* FIFOTH register defines */ +#define SDIO_DW_SET_FIFOTH(m, r, t) (((m) & 0x7) << 28 | ((r) & 0xfff) << 16 | ((t) & 0xfff)) +/* HCON register defines */ +#define DMA_INTERFACE_IDMA (0x0) +#define DMA_INTERFACE_DWDMA (0x1) +#define DMA_INTERFACE_GDMA (0x2) +#define DMA_INTERFACE_NODMA (0x3) +#define SDIO_DW_GET_TRANS_MODE(x) (((x) >> 16) & 0x3) +#define SDIO_DW_GET_SLOT_NUM(x) ((((x) >> 1) & 0x1f) + 1) +#define SDIO_DW_GET_HDATA_WIDTH(x) (((x) >> 7) & 0x7) +#define SDIO_DW_GET_ADDR_CONFIG(x) (((x) >> 27) & 0x1) +/* Internal DMAC interrupt defines */ +#define SDIO_DW_IDMAC_INT_AI RT_BIT(9) +#define SDIO_DW_IDMAC_INT_NI RT_BIT(8) +#define SDIO_DW_IDMAC_INT_CES RT_BIT(5) +#define SDIO_DW_IDMAC_INT_DU RT_BIT(4) +#define SDIO_DW_IDMAC_INT_FBE RT_BIT(2) +#define SDIO_DW_IDMAC_INT_RI RT_BIT(1) +#define SDIO_DW_IDMAC_INT_TI RT_BIT(0) +/* Internal DMAC bus mode bits */ +#define SDIO_DW_IDMAC_ENABLE RT_BIT(7) +#define SDIO_DW_IDMAC_FB RT_BIT(1) +#define SDIO_DW_IDMAC_SWRESET RT_BIT(0) +/* H/W reset */ +#define SDIO_DW_RST_HWACTIVE 0x1 +/* Version ID register define */ +#define SDIO_DW_GET_VERID(x) ((x) & 0xffff) +/* Card read threshold */ +#define SDIO_DW_SET_THLD(v, x) (((v) & 0xfff) << 16 | (x)) +#define SDIO_DW_CARD_WR_THR_EN RT_BIT(2) +#define SDIO_DW_CARD_RD_THR_EN RT_BIT(0) +/* UHS-1 register defines */ +#define SDIO_DW_UHS_DDR RT_BIT(16) +#define SDIO_DW_UHS_18V RT_BIT(0) +/* DDR register defines */ +#define SDIO_DW_DDR_HS400 RT_BIT(31) +/* Enable shift register defines */ +#define SDIO_DW_ENABLE_PHASE RT_BIT(0) +/* All ctrl reset bits */ +#define SDIO_DW_CTRL_ALL_RESET_FLAGS (SDIO_DW_CTRL_RESET | SDIO_DW_CTRL_FIFO_RESET | SDIO_DW_CTRL_DMA_RESET) + +struct rt_dma_chan; + +struct sdio_dw +{ + struct rt_device parent; + struct rt_device *bus_dev; + + struct rt_workqueue *state_wq; + struct rt_work state_work; + + void *base; + void *fifo_base; + rt_ubase_t base_phy; + rt_uint32_t data_addr_override; + rt_bool_t wm_aligned; + + int irq; + int sdio_id0; + rt_uint32_t verid; + rt_uint32_t quirks; + rt_uint32_t bus_hz; + rt_bool_t fifo_mode; + rt_uint32_t fifo_depth; + rt_uint32_t fifoth_val; + rt_uint32_t detect_delay_ms; + rt_uint32_t current_speed; + rt_uint32_t minimum_speed; + void *priv; + + rt_uint32_t vqmmc_enabled; + rt_uint32_t cmd_status; + rt_uint32_t data_status; + rt_uint32_t stop_cmdr; + rt_uint32_t dir_status; +#define STATE_IDLE 0 +#define STATE_SENDING_CMD 1 +#define STATE_SENDING_DATA 2 +#define STATE_DATA_BUSY 3 +#define STATE_SENDING_STOP 4 +#define STATE_DATA_ERROR 5 +#define STATE_SENDING_CMD11 6 +#define STATE_WAITING_CMD11_DONE 7 + rt_uint32_t state; +#define EVENT_CMD_COMPLETE 0 +#define EVENT_XFER_COMPLETE 1 +#define EVENT_DATA_COMPLETE 2 +#define EVENT_DATA_ERROR 3 + rt_bitmap_t pending_events; + + struct rt_mmcsd_req *req; + struct rt_mmcsd_data *data; + struct rt_mmcsd_cmd *cmd; + struct rt_mmcsd_cmd stop_abort; + rt_uint32_t prev_blksz; + rt_uint8_t timing; + + struct rt_clk *biu_clk; + struct rt_clk *ciu_clk; + + void *last_buf; + rt_uint32_t last_remain; + + int data_shift; + rt_uint8_t part_buf_start; + rt_uint8_t part_buf_count; + union + { + rt_uint64_t part_buf; + rt_uint16_t part_buf16; + rt_uint32_t part_buf32; + rt_uint64_t part_buf64; + }; + void (*push_data)(struct sdio_dw *sd, void *buf, int cnt); + void (*pull_data)(struct sdio_dw *sd, void *buf, int cnt); + + /* DMA interface members */ +#define TRANS_MODE_PIO 0 +#define TRANS_MODE_IDMAC 1 +#define TRANS_MODE_EDMAC 2 + rt_bool_t use_dma; + rt_bool_t using_dma; + rt_bool_t dma_64bit_address; + rt_size_t ring_size; + void *dma_buf; + rt_ubase_t dma_buf_phy; + struct rt_dma_chan *edma_chan; + const struct sdio_dw_dma_ops *dma_ops; + + struct rt_timer cmd11_timer; + struct rt_timer cto_timer; + struct rt_timer dto_timer; + + struct rt_reset_control *rstc; + + struct sdio_dw_slot *slot; + const struct sdio_dw_drv_data *drv_data; + + struct rt_spinlock lock, irq_lock; +}; + +/* DMA ops for Internal/External DMAC interface */ +struct sdio_dw_dma_ops +{ + rt_err_t (*init)(struct sdio_dw *sd); + rt_err_t (*start)(struct sdio_dw *sd); + rt_err_t (*complete)(struct sdio_dw *sd); + rt_err_t (*stop)(struct sdio_dw *sd); + rt_err_t (*cleanup)(struct sdio_dw *sd); + rt_err_t (*exit)(struct sdio_dw *sd); +}; + +struct sdio_dw_slot +{ + struct rt_mmcsd_host *host; + struct sdio_dw *sd; + + rt_uint32_t ctype; + + struct rt_mmcsd_req *req; + + rt_uint32_t clock; + rt_uint32_t clk_old; + +#define DW_MMC_CARD_PRESENT 0 +#define DW_MMC_CARD_NEED_INIT 1 +#define DW_MMC_CARD_NO_LOW_PWR 2 +#define DW_MMC_CARD_NO_USE_HOLD 3 +#define DW_MMC_CARD_NEEDS_POLL 4 + rt_bitmap_t flags; + + int id; + int sdio_id; +}; + +struct sdio_dw_drv_data +{ + rt_ubase_t *caps; + rt_uint32_t num_caps; + rt_uint32_t common_caps; + + rt_err_t (*init)(struct sdio_dw *sd); + rt_err_t (*set_iocfg)(struct sdio_dw *sd, struct rt_mmcsd_io_cfg *ios); + rt_err_t (*parse_ofw)(struct sdio_dw *sd); + rt_err_t (*execute_tuning)(struct sdio_dw_slot *slot, rt_uint32_t opcode); + rt_err_t (*prepare_hs400_tuning)(struct sdio_dw *sd, struct rt_mmcsd_io_cfg *ios); + rt_err_t (*switch_voltage)(struct rt_mmcsd_host *host, struct rt_mmcsd_io_cfg *ios); + rt_err_t (*set_data_timeout)(struct sdio_dw *sd, rt_uint32_t timeout_ns); + rt_uint32_t (*get_drto_clks)(struct sdio_dw *sd); +}; + +#define sdio_dw_writel(sd, reg, val) HWREG32((sd)->base + SDIO_DW_##reg) = (val) +#define sdio_dw_writew(sd, reg, val) HWREG16((sd)->base + SDIO_DW_##reg) = (val) +#define sdio_dw_writeb(sd, reg, val) HWREG8((sd)->base + SDIO_DW_##reg) = (val) + +#define sdio_dw_readl(sd, reg) HWREG32((sd)->base + SDIO_DW_##reg) +#define sdio_dw_readw(sd, reg) HWREG16((sd)->base + SDIO_DW_##reg) +#define sdio_dw_readb(sd, reg) HWREG8((sd)->base + SDIO_DW_##reg) + +#define sdio_dw_fifo_writew(sd, val) HWREG16((sd)->fifo_base) = (val) +#define sdio_dw_fifo_writel(sd, val) HWREG32((sd)->fifo_base) = (val) +#define sdio_dw_fifo_writeq(sd, val) HWREG64((sd)->fifo_base) = (val) + +#define sdio_dw_fifo_readw(sd) HWREG16((sd)->fifo_base) +#define sdio_dw_fifo_readl(sd) HWREG32((sd)->fifo_base) +#define sdio_dw_fifo_readq(sd) HWREG64((sd)->fifo_base) + +rt_err_t sdio_dw_probe(struct sdio_dw *sd); +rt_err_t sdio_dw_remove(struct sdio_dw *sd); + +#endif /* __SDIO_DW_H__ */ diff --git a/components/drivers/sdio/sdhci/include/sdhci-platform.h b/components/drivers/sdio/sdhci/include/sdhci-platform.h deleted file mode 100644 index f8d96449051..00000000000 --- a/components/drivers/sdio/sdhci/include/sdhci-platform.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (c) 2006-2024 RT-Thread Development Team - * - * SPDX-License-Identifier: Apache-2.0 - * - * Change Logs: - * Date Author Notes - * 2024-08-16 zhujiale first version - */ - -#ifndef _DRIVERS_MMC_RT_SDHCI_PLTFM_H -#define _DRIVERS_MMC_RT_SDHCI_PLTFM_H -#include -#include -#include -#include -#include -#include "sdhci.h" - -struct rt_sdhci_pltfm_data -{ - const struct rt_sdhci_ops *ops; - unsigned int quirks; - unsigned int quirks2; -}; - -struct rt_sdhci_pltfm_host -{ - struct rt_clk *clk; - unsigned int clock; - rt_uint64_t xfer_mode_shadow; - - unsigned long private[]; -}; -void rt_sdhci_get_property(struct rt_platform_device *pdev); - -static inline void sdhci_get_of_property(struct rt_platform_device *pdev) -{ - return rt_sdhci_get_property(pdev); -} -extern struct rt_sdhci_host *rt_sdhci_pltfm_init(struct rt_platform_device *pdev, - const struct rt_sdhci_pltfm_data *pdata, - size_t priv_size); -extern void rt_sdhci_pltfm_free(struct rt_platform_device *pdev); - -extern int rt_sdhci_pltfm_init_and_add_host(struct rt_platform_device *pdev, - const struct rt_sdhci_pltfm_data *pdata, - size_t priv_size); -extern void rt_sdhci_pltfm_remove(struct rt_platform_device *pdev); - -extern unsigned int rt_sdhci_pltfm_clk_get_max_clock(struct rt_sdhci_host *host); - -static inline void *sdhci_pltfm_priv(struct rt_sdhci_pltfm_host *host) -{ - return host->private; -} - -static inline int sdhci_pltfm_suspend(struct rt_device *dev) -{ - return 0; -} -static inline int sdhci_pltfm_resume(struct rt_device *dev) -{ - return 0; -} -#endif diff --git a/components/drivers/sdio/sdhci/include/sdhci.h b/components/drivers/sdio/sdhci/include/sdhci.h deleted file mode 100644 index 1ad3db12882..00000000000 --- a/components/drivers/sdio/sdhci/include/sdhci.h +++ /dev/null @@ -1,677 +0,0 @@ -/* - * Copyright (c) 2006-2024 RT-Thread Development Team - * - * SPDX-License-Identifier: Apache-2.0 - * - * Change Logs: - * Date Author Notes - * 2024-08-16 zhujiale first version - */ -#ifndef __RT_SDHCI_HW_H -#define __RT_SDHCI_HW_H - -#include "sdhci_host.h" -#include "sdhci_misc.h" -#include "sdhci-platform.h" -#include -#include -#include -#include - -#define lower_32_bits(n) ((rt_uint32_t)((n) & 0xffffffff)) -#define upper_32_bits(n) ((rt_uint32_t)(((n) >> 16) >> 16)) - -#define MAX_TUNING_LOOP 40 -/* - * Controller registers - */ -#define RT_SDHCI_DMA_ADDRESS 0x00 -#define RT_SDHCI_ARGUMENT2 RT_SDHCI_DMA_ADDRESS -#define RT_SDHCI_32BIT_BLK_CNT RT_SDHCI_DMA_ADDRESS - -#define RT_SDHCI_BLOCK_SIZE 0x04 -#define RT_SDHCI_MAKE_BLKSZ(dma, blksz) (((dma & 0x7) << 12) | (blksz & 0xFFF)) - -#define RT_SDHCI_BLOCK_COUNT 0x06 - -#define RT_SDHCI_ARGUMENT 0x08 - -#define RT_SDHCI_TRANSFER_MODE 0x0C -#define RT_SDHCI_TRNS_DMA 0x01 -#define RT_SDHCI_TRNS_BLK_CNT_EN 0x02 -#define RT_SDHCI_TRNS_AUTO_CMD12 0x04 -#define RT_SDHCI_TRNS_AUTO_CMD23 0x08 -#define RT_SDHCI_TRNS_AUTO_SEL 0x0C -#define RT_SDHCI_TRNS_READ 0x10 -#define RT_SDHCI_TRNS_MULTI 0x20 - -#define RT_SDHCI_COMMAND 0x0E -#define RT_SDHCI_CMD_RESP_MASK 0x03 -#define RT_SDHCI_CMD_CRC 0x08 -#define RT_SDHCI_CMD_INDEX 0x10 -#define RT_SDHCI_CMD_DATA 0x20 -#define RT_SDHCI_CMD_ABORTCMD 0xC0 - -#define RT_SDHCI_CMD_RESP_NONE 0x00 -#define RT_SDHCI_CMD_RESP_LONG 0x01 -#define RT_SDHCI_CMD_RESP_SHORT 0x02 -#define RT_SDHCI_CMD_RESP_SHORT_BUSY 0x03 - -#define RT_SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff)) -#define RT_SDHCI_GET_CMD(c) ((c >> 8) & 0x3f) - -#define RT_SDHCI_RESPONSE 0x10 - -#define RT_SDHCI_BUFFER 0x20 - -#define RT_SDHCI_PRESENT_STATE 0x24 -#define RT_SDHCI_CMD_INHIBIT 0x00000001 -#define RT_SDHCI_DATA_INHIBIT 0x00000002 -#define RT_SDHCI_DOING_WRITE 0x00000100 -#define RT_SDHCI_DOING_READ 0x00000200 -#define RT_SDHCI_SPACE_AVAILABLE 0x00000400 -#define RT_SDHCI_DATA_AVAILABLE 0x00000800 -#define RT_SDHCI_CARD_PRESENT 0x00010000 -#define RT_SDHCI_CARD_PRES_SHIFT 16 -#define RT_SDHCI_CD_STABLE 0x00020000 -#define RT_SDHCI_CD_LVL 0x00040000 -#define RT_SDHCI_CD_LVL_SHIFT 18 -#define RT_SDHCI_WRITE_PROTECT 0x00080000 -#define RT_SDHCI_DATA_LVL_MASK 0x00F00000 -#define RT_SDHCI_DATA_LVL_SHIFT 20 -#define RT_SDHCI_DATA_0_LVL_MASK 0x00100000 -#define RT_SDHCI_CMD_LVL 0x01000000 - -#define RT_SDHCI_HOST_CONTROL 0x28 -#define RT_SDHCI_CTRL_LED 0x01 -#define RT_SDHCI_CTRL_4BITBUS 0x02 -#define RT_SDHCI_CTRL_HISPD 0x04 -#define RT_SDHCI_CTRL_DMA_MASK 0x18 -#define RT_SDHCI_CTRL_SDMA 0x00 -#define RT_SDHCI_CTRL_ADMA1 0x08 -#define RT_SDHCI_CTRL_ADMA32 0x10 -#define RT_SDHCI_CTRL_ADMA64 0x18 -#define RT_SDHCI_CTRL_ADMA3 0x18 -#define RT_SDHCI_CTRL_8BITBUS 0x20 -#define RT_SDHCI_CTRL_CDTEST_INS 0x40 -#define RT_SDHCI_CTRL_CDTEST_EN 0x80 - -#define RT_SDHCI_POWER_CONTROL 0x29 -#define RT_SDHCI_POWER_ON 0x01 -#define RT_SDHCI_POWER_180 0x0A -#define RT_SDHCI_POWER_300 0x0C -#define RT_SDHCI_POWER_330 0x0E -/* - * VDD2 - UHS2 or PCIe/NVMe - * VDD2 power on/off and voltage select - */ -#define RT_SDHCI_VDD2_POWER_ON 0x10 -#define RT_SDHCI_VDD2_POWER_120 0x80 -#define RT_SDHCI_VDD2_POWER_180 0xA0 - -#define RT_SDHCI_BLOCK_GAP_CONTROL 0x2A - -#define RT_SDHCI_WAKE_UP_CONTROL 0x2B -#define RT_SDHCI_WAKE_ON_INT 0x01 -#define RT_SDHCI_WAKE_ON_INSERT 0x02 -#define RT_SDHCI_WAKE_ON_REMOVE 0x04 - -#define RT_SDHCI_CLOCK_CONTROL 0x2C -#define RT_SDHCI_DIVIDER_SHIFT 8 -#define RT_SDHCI_DIVIDER_HI_SHIFT 6 -#define RT_SDHCI_DIV_MASK 0xFF -#define RT_SDHCI_DIV_MASK_LEN 8 -#define RT_SDHCI_DIV_HI_MASK 0x300 -#define RT_SDHCI_PROG_CLOCK_MODE 0x0020 -#define RT_SDHCI_CLOCK_CARD_EN 0x0004 -#define RT_SDHCI_CLOCK_PLL_EN 0x0008 -#define RT_SDHCI_CLOCK_INT_STABLE 0x0002 -#define RT_SDHCI_CLOCK_INT_EN 0x0001 - -#define RT_SDHCI_TIMEOUT_CONTROL 0x2E - -#define RT_SDHCI_SOFTWARE_RESET 0x2F -#define RT_SDHCI_RESET_ALL 0x01 -#define RT_SDHCI_RESET_CMD 0x02 -#define RT_SDHCI_RESET_DATA 0x04 - -#define RT_SDHCI_INT_STATUS 0x30 -#define RT_SDHCI_INT_ENABLE 0x34 -#define RT_SDHCI_SIGNAL_ENABLE 0x38 -#define RT_SDHCI_INT_RESPONSE 0x00000001 -#define RT_SDHCI_INT_DATA_END 0x00000002 -#define RT_SDHCI_INT_BLK_GAP 0x00000004 -#define RT_SDHCI_INT_DMA_END 0x00000008 -#define RT_SDHCI_INT_SPACE_AVAIL 0x00000010 -#define RT_SDHCI_INT_DATA_AVAIL 0x00000020 -#define RT_SDHCI_INT_CARD_INSERT 0x00000040 -#define RT_SDHCI_INT_CARD_REMOVE 0x00000080 -#define RT_SDHCI_INT_CARD_INT 0x00000100 -#define RT_SDHCI_INT_RETUNE 0x00001000 -#define RT_SDHCI_INT_CQE 0x00004000 -#define RT_SDHCI_INT_ERROR 0x00008000 -#define RT_SDHCI_INT_TIMEOUT 0x00010000 -#define RT_SDHCI_INT_CRC 0x00020000 -#define RT_SDHCI_INT_END_BIT 0x00040000 -#define RT_SDHCI_INT_INDEX 0x00080000 -#define RT_SDHCI_INT_DATA_TIMEOUT 0x00100000 -#define RT_SDHCI_INT_DATA_CRC 0x00200000 -#define RT_SDHCI_INT_DATA_END_BIT 0x00400000 -#define RT_SDHCI_INT_BUS_POWER 0x00800000 -#define RT_SDHCI_INT_AUTO_CMD_ERR 0x01000000 -#define RT_SDHCI_INT_ADMA_ERROR 0x02000000 - -#define RT_SDHCI_INT_NORMAL_MASK 0x00007FFF -#define RT_SDHCI_INT_ERROR_MASK 0xFFFF8000 - -#define RT_SDHCI_INT_CMD_MASK (RT_SDHCI_INT_RESPONSE | RT_SDHCI_INT_TIMEOUT | RT_SDHCI_INT_CRC | RT_SDHCI_INT_END_BIT | RT_SDHCI_INT_INDEX | RT_SDHCI_INT_AUTO_CMD_ERR) -#define RT_SDHCI_INT_DATA_MASK (RT_SDHCI_INT_DATA_END | RT_SDHCI_INT_DMA_END | RT_SDHCI_INT_DATA_AVAIL | RT_SDHCI_INT_SPACE_AVAIL | RT_SDHCI_INT_DATA_TIMEOUT | RT_SDHCI_INT_DATA_CRC | RT_SDHCI_INT_DATA_END_BIT | RT_SDHCI_INT_ADMA_ERROR | RT_SDHCI_INT_BLK_GAP) -#define RT_SDHCI_INT_ALL_MASK ((unsigned int)-1) - -#define RT_SDHCI_CQE_INT_ERR_MASK ( \ - RT_SDHCI_INT_ADMA_ERROR | RT_SDHCI_INT_BUS_POWER | RT_SDHCI_INT_DATA_END_BIT | RT_SDHCI_INT_DATA_CRC | RT_SDHCI_INT_DATA_TIMEOUT | RT_SDHCI_INT_INDEX | RT_SDHCI_INT_END_BIT | RT_SDHCI_INT_CRC | RT_SDHCI_INT_TIMEOUT) - -#define RT_SDHCI_CQE_INT_MASK (RT_SDHCI_CQE_INT_ERR_MASK | RT_SDHCI_INT_CQE) - -#define RT_SDHCI_AUTO_CMD_STATUS 0x3C -#define RT_SDHCI_AUTO_CMD_TIMEOUT 0x00000002 -#define RT_SDHCI_AUTO_CMD_CRC 0x00000004 -#define RT_SDHCI_AUTO_CMD_END_BIT 0x00000008 -#define RT_SDHCI_AUTO_CMD_INDEX 0x00000010 - -#define RT_SDHCI_HOST_CONTROL2 0x3E -#define RT_SDHCI_CTRL_UHS_MASK 0x0007 -#define RT_SDHCI_CTRL_UHS_SDR12 0x0000 -#define RT_SDHCI_CTRL_UHS_SDR25 0x0001 -#define RT_SDHCI_CTRL_UHS_SDR50 0x0002 -#define RT_SDHCI_CTRL_UHS_SDR104 0x0003 -#define RT_SDHCI_CTRL_UHS_DDR50 0x0004 -#define RT_SDHCI_CTRL_HS400 0x0005 /* Non-standard */ -#define RT_SDHCI_CTRL_VDD_180 0x0008 -#define RT_SDHCI_CTRL_DRV_TYPE_MASK 0x0030 -#define RT_SDHCI_CTRL_DRV_TYPE_B 0x0000 -#define RT_SDHCI_CTRL_DRV_TYPE_A 0x0010 -#define RT_SDHCI_CTRL_DRV_TYPE_C 0x0020 -#define RT_SDHCI_CTRL_DRV_TYPE_D 0x0030 -#define RT_SDHCI_CTRL_EXEC_TUNING 0x0040 -#define RT_SDHCI_CTRL_TUNED_CLK 0x0080 -#define RT_SDHCI_CMD23_ENABLE 0x0800 -#define RT_SDHCI_CTRL_V4_MODE 0x1000 -#define RT_SDHCI_CTRL_64BIT_ADDR 0x2000 -#define RT_SDHCI_CTRL_PRESET_VAL_ENABLE 0x8000 - -#define RT_SDHCI_CAPABILITIES 0x40 -#define RT_SDHCI_TIMEOUT_CLK_MASK RT_GENMASK(5, 0) -#define RT_SDHCI_TIMEOUT_CLK_SHIFT 0 -#define RT_SDHCI_TIMEOUT_CLK_UNIT 0x00000080 -#define RT_SDHCI_CLOCK_BASE_MASK RT_GENMASK(13, 8) -#define RT_SDHCI_CLOCK_BASE_SHIFT 8 -#define RT_SDHCI_CLOCK_V3_BASE_MASK RT_GENMASK(15, 8) -#define RT_SDHCI_MAX_BLOCK_MASK 0x00030000 -#define RT_SDHCI_MAX_BLOCK_SHIFT 16 -#define RT_SDHCI_CAN_DO_8BIT 0x00040000 -#define RT_SDHCI_CAN_DO_ADMA2 0x00080000 -#define RT_SDHCI_CAN_DO_ADMA1 0x00100000 -#define RT_SDHCI_CAN_DO_HISPD 0x00200000 -#define RT_SDHCI_CAN_DO_SDMA 0x00400000 -#define RT_SDHCI_CAN_DO_SUSPEND 0x00800000 -#define RT_SDHCI_CAN_VDD_330 0x01000000 -#define RT_SDHCI_CAN_VDD_300 0x02000000 -#define RT_SDHCI_CAN_VDD_180 0x04000000 -#define RT_SDHCI_CAN_64BIT_V4 0x08000000 -#define RT_SDHCI_CAN_64BIT 0x10000000 - -#define RT_SDHCI_CAPABILITIES_1 0x44 -#define RT_SDHCI_SUPPORT_SDR50 0x00000001 -#define RT_SDHCI_SUPPORT_SDR104 0x00000002 -#define RT_SDHCI_SUPPORT_DDR50 0x00000004 -#define RT_SDHCI_DRIVER_TYPE_A 0x00000010 -#define RT_SDHCI_DRIVER_TYPE_C 0x00000020 -#define RT_SDHCI_DRIVER_TYPE_D 0x00000040 -#define RT_SDHCI_RETUNING_TIMER_COUNT_MASK RT_GENMASK(11, 8) -#define RT_SDHCI_USE_SDR50_TUNING 0x00002000 -#define RT_SDHCI_RETUNING_MODE_MASK RT_GENMASK(15, 14) -#define RT_SDHCI_CLOCK_MUL_MASK RT_GENMASK(23, 16) -#define RT_SDHCI_CAN_DO_ADMA3 0x08000000 -#define RT_SDHCI_SUPPORT_HS400 0x80000000 /* Non-standard */ - -#define RT_SDHCI_MAX_CURRENT 0x48 -#define RT_SDHCI_MAX_CURRENT_LIMIT RT_GENMASK(7, 0) -#define RT_SDHCI_MAX_CURRENT_330_MASK RT_GENMASK(7, 0) -#define RT_SDHCI_MAX_CURRENT_300_MASK RT_GENMASK(15, 8) -#define RT_SDHCI_MAX_CURRENT_180_MASK RT_GENMASK(23, 16) -#define RT_SDHCI_MAX_CURRENT_MULTIPLIER 4 - -/* 4C-4F reserved for more max current */ - -#define RT_SDHCI_SET_ACMD12_ERROR 0x50 -#define RT_SDHCI_SET_INT_ERROR 0x52 - -#define RT_SDHCI_ADMA_ERROR 0x54 - -/* 55-57 reserved */ - -#define RT_SDHCI_ADMA_ADDRESS 0x58 -#define RT_SDHCI_ADMA_ADDRESS_HI 0x5C - -/* 60-FB reserved */ - -#define RT_SDHCI_PRESET_FOR_HIGH_SPEED 0x64 -#define RT_SDHCI_PRESET_FOR_SDR12 0x66 -#define RT_SDHCI_PRESET_FOR_SDR25 0x68 -#define RT_SDHCI_PRESET_FOR_SDR50 0x6A -#define RT_SDHCI_PRESET_FOR_SDR104 0x6C -#define RT_SDHCI_PRESET_FOR_DDR50 0x6E -#define RT_SDHCI_PRESET_FOR_HS400 0x74 /* Non-standard */ -#define RT_SDHCI_PRESET_DRV_MASK RT_GENMASK(15, 14) -#define BIT(nr) ((1) << (nr)) - -#define RT_SDHCI_PRESET_CLKGEN_SEL BIT(10) -#define RT_SDHCI_PRESET_SDCLK_FREQ_MASK RT_GENMASK(9, 0) - -#define RT_SDHCI_SLOT_INT_STATUS 0xFC - -#define RT_SDHCI_HOST_VERSION 0xFE -#define RT_SDHCI_VENDOR_VER_MASK 0xFF00 -#define RT_SDHCI_VENDOR_VER_SHIFT 8 -#define RT_SDHCI_SPEC_VER_MASK 0x00FF -#define RT_SDHCI_SPEC_VER_SHIFT 0 -#define RT_SDHCI_SPEC_100 0 -#define RT_SDHCI_SPEC_200 1 -#define RT_SDHCI_SPEC_300 2 -#define RT_SDHCI_SPEC_400 3 -#define RT_SDHCI_SPEC_410 4 -#define RT_SDHCI_SPEC_420 5 - -/* - * End of controller registers. - */ - -#define RT_SDHCI_MAX_DIV_SPEC_200 256 -#define RT_SDHCI_MAX_DIV_SPEC_300 2046 - -/* - * Host SDMA buffer boundary. Valid values from 4K to 512K in powers of 2. - */ -#define RT_SDHCI_DEFAULT_BOUNDARY_SIZE (512 * 1024) -#define ilog2(v) __rt_ffs(v) -#define RT_SDHCI_DEFAULT_BOUNDARY_ARG (ilog2(RT_SDHCI_DEFAULT_BOUNDARY_SIZE) - 12) -#define RT_SDHCI_MAX_SEGS 128 - -/* Allow for a command request and a data request at the same time */ -#define RT_SDHCI_MAX_MRQS 2 -#define MMC_CMD_TRANSFER_TIME (10 * 1000000L) /* max 10 ms */ - - -enum rt_sdhci_cookie -{ - COOKIE_UNMAPPED, - COOKIE_PRE_MAPPED, /* mapped by sdhci_pre_req() */ - COOKIE_MAPPED, /* mapped by sdhci_prepare_data() */ -}; - -struct rt_sdhci_host -{ - const char *hw_name; /* Hardware bus name */ - - unsigned int quirks; /* Deviations from spec. */ - - void *data_buf; -/* Controller doesn't honor resets unless we touch the clock register */ -#define RT_SDHCI_QUIRK_CLOCK_BEFORE_RESET (1 << 0) -/* Controller has bad caps bits, but really supports DMA */ -#define RT_SDHCI_QUIRK_FORCE_DMA (1 << 1) -/* Controller doesn't like to be reset when there is no card inserted. */ -#define RT_SDHCI_QUIRK_NO_CARD_NO_RESET (1 << 2) -/* Controller doesn't like clearing the power reg before a change */ -#define RT_SDHCI_QUIRK_SINGLE_POWER_WRITE (1 << 3) -/* Controller has an unusable DMA engine */ -#define RT_SDHCI_QUIRK_BROKEN_DMA (1 << 5) -/* Controller has an unusable ADMA engine */ -#define RT_SDHCI_QUIRK_BROKEN_ADMA (1 << 6) -/* Controller can only DMA from 32-bit aligned addresses */ -#define RT_SDHCI_QUIRK_32BIT_DMA_ADDR (1 << 7) -/* Controller can only DMA chunk sizes that are a multiple of 32 bits */ -#define RT_SDHCI_QUIRK_32BIT_DMA_SIZE (1 << 8) -/* Controller can only ADMA chunks that are a multiple of 32 bits */ -#define RT_SDHCI_QUIRK_32BIT_ADMA_SIZE (1 << 9) -/* Controller needs to be reset after each request to stay stable */ -#define RT_SDHCI_QUIRK_RESET_AFTER_REQUEST (1 << 10) -/* Controller needs voltage and power writes to happen separately */ -#define RT_SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1 << 11) -/* Controller provides an incorrect timeout value for transfers */ -#define RT_SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1 << 12) -/* Controller has an issue with buffer bits for small transfers */ -#define RT_SDHCI_QUIRK_BROKEN_SMALL_PIO (1 << 13) -/* Controller does not provide transfer-complete interrupt when not busy */ -#define RT_SDHCI_QUIRK_NO_BUSY_IRQ (1 << 14) -/* Controller has unreliable card detection */ -#define RT_SDHCI_QUIRK_BROKEN_CARD_DETECTION (1 << 15) -/* Controller reports inverted write-protect state */ -#define RT_SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1 << 16) -/* Controller has unusable command queue engine */ -#define RT_SDHCI_QUIRK_BROKEN_CQE (1 << 17) -/* Controller does not like fast PIO transfers */ -#define RT_SDHCI_QUIRK_PIO_NEEDS_DELAY (1 << 18) -/* Controller does not have a LED */ -#define RT_SDHCI_QUIRK_NO_LED (1 << 19) -/* Controller has to be forced to use block size of 2048 bytes */ -#define RT_SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1 << 20) -/* Controller cannot do multi-block transfers */ -#define RT_SDHCI_QUIRK_NO_MULTIBLOCK (1 << 21) -/* Controller can only handle 1-bit data transfers */ -#define RT_SDHCI_QUIRK_FORCE_1_BIT_DATA (1 << 22) -/* Controller needs 10ms delay between applying power and clock */ -#define RT_SDHCI_QUIRK_DELAY_AFTER_POWER (1 << 23) -/* Controller uses SDCLK instead of TMCLK for data timeouts */ -#define RT_SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1 << 24) -/* Controller reports wrong base clock capability */ -#define RT_SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1 << 25) -/* Controller cannot support End Attribute in NOP ADMA descriptor */ -#define RT_SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1 << 26) -/* Controller uses Auto CMD12 command to stop the transfer */ -#define RT_SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1 << 28) -/* Controller doesn't have HISPD bit field in HI-SPEED SD card */ -#define RT_SDHCI_QUIRK_NO_HISPD_BIT (1 << 29) -/* Controller treats ADMA descriptors with length 0000h incorrectly */ -#define RT_SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC (1 << 30) -/* The read-only detection via RT_SDHCI_PRESENT_STATE register is unstable */ -#define RT_SDHCI_QUIRK_UNSTABLE_RO_DETECT (1 << 31) - - unsigned int quirks2; /* More deviations from spec. */ - -#define RT_SDHCI_QUIRK2_HOST_OFF_CARD_ON (1 << 0) -#define RT_SDHCI_QUIRK2_HOST_NO_CMD23 (1 << 1) -/* The system physically doesn't support 1.8v, even if the host does */ -#define RT_SDHCI_QUIRK2_NO_1_8_V (1 << 2) -#define RT_SDHCI_QUIRK2_PRESET_VALUE_BROKEN (1 << 3) -#define RT_SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON (1 << 4) -/* Controller has a non-standard host control register */ -#define RT_SDHCI_QUIRK2_BROKEN_HOST_CONTROL (1 << 5) -/* Controller does not support HS200 */ -#define RT_SDHCI_QUIRK2_BROKEN_HS200 (1 << 6) -/* Controller does not support DDR50 */ -#define RT_SDHCI_QUIRK2_BROKEN_DDR50 (1 << 7) -/* Stop command (CMD12) can set Transfer Complete when not using MMC_RSP_BUSY */ -#define RT_SDHCI_QUIRK2_STOP_WITH_TC (1 << 8) -/* Controller does not support 64-bit DMA */ -#define RT_SDHCI_QUIRK2_BROKEN_64_BIT_DMA (1 << 9) -/* need clear transfer mode register before send cmd */ -#define RT_SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD (1 << 10) -/* Capability register bit-63 indicates HS400 support */ -#define RT_SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 (1 << 11) -/* forced tuned clock */ -#define RT_SDHCI_QUIRK2_TUNING_WORK_AROUND (1 << 12) -/* disable the block count for single block transactions */ -#define RT_SDHCI_QUIRK2_SUPPORT_SINGLE (1 << 13) -/* Controller broken with using ACMD23 */ -#define RT_SDHCI_QUIRK2_ACMD23_BROKEN (1 << 14) -/* Broken Clock divider zero in controller */ -#define RT_SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1 << 15) -/* Controller has CRC in 136 bit Command Response */ -#define RT_SDHCI_QUIRK2_RSP_136_HAS_CRC (1 << 16) - -#define RT_SDHCI_QUIRK2_DISABLE_HW_TIMEOUT (1 << 17) - -#define RT_SDHCI_QUIRK2_USE_32BIT_BLK_CNT (1 << 18) -/* Issue CMD and DATA reset together */ -#define RT_SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER (1 << 19) - - int irq; /* Device IRQ */ - void *ioaddr; /* Mapped address */ - char *bounce_buffer; /* For packing SDMA reads/writes */ - rt_uint64_t bounce_addr; - unsigned int bounce_buffer_size; - - const struct rt_sdhci_ops *ops; /* Low level hw interface */ - - /* Internal data */ - struct rt_mmc_host *mmc; /* MMC structure */ - struct mmc_host_ops mmc_host_ops; /* MMC host ops */ - rt_uint64_t dma_mask; /* custom DMA mask */ - - rt_spinlock_t lock; - int flags; /* Host attributes */ -#define RT_SDHCI_USE_SDMA (1 << 0) /* Host is SDMA capable */ -#define RT_SDHCI_USE_ADMA (1 << 1) /* Host is ADMA capable */ -#define RT_SDHCI_REQ_USE_DMA (1 << 2) /* Use DMA for this req. */ -#define RT_SDHCI_DEVICE_DEAD (1 << 3) /* Device unresponsive */ -#define RT_SDHCI_SDR50_NEEDS_TUNING (1 << 4) /* SDR50 needs tuning */ -#define RT_SDHCI_AUTO_CMD12 (1 << 6) /* Auto CMD12 support */ -#define RT_SDHCI_AUTO_CMD23 (1 << 7) /* Auto CMD23 support */ -#define RT_SDHCI_PV_ENABLED (1 << 8) /* Preset value enabled */ -#define RT_SDHCI_USE_64_BIT_DMA (1 << 12) /* Use 64-bit DMA */ -#define RT_SDHCI_HS400_TUNING (1 << 13) /* Tuning for HS400 */ -#define RT_SDHCI_SIGNALING_330 (1 << 14) /* Host is capable of 3.3V signaling */ -#define RT_SDHCI_SIGNALING_180 (1 << 15) /* Host is capable of 1.8V signaling */ -#define RT_SDHCI_SIGNALING_120 (1 << 16) /* Host is capable of 1.2V signaling */ - - unsigned int version; /* RT_SDHCI spec. version */ - - unsigned int max_clk; /* Max possible freq (MHz) */ - unsigned int timeout_clk; /* Timeout freq (KHz) */ - rt_uint8_t max_timeout_count; /* Vendor specific max timeout count */ - unsigned int clk_mul; /* Clock Muliplier value */ - - unsigned int clock; /* Current clock (MHz) */ - rt_uint8_t pwr; /* Current voltage */ - rt_uint8_t drv_type; /* Current UHS-I driver type */ - rt_bool_t reinit_uhs; /* Force UHS-related re-initialization */ - - rt_bool_t runtime_suspended; /* Host is runtime suspended */ - rt_bool_t bus_on; /* Bus power prevents runtime suspend */ - rt_bool_t preset_enabled; /* Preset is enabled */ - rt_bool_t pending_reset; /* Cmd/data reset is pending */ - rt_bool_t irq_wake_enabled; /* IRQ wakeup is enabled */ - rt_bool_t v4_mode; /* Host Version 4 Enable */ - rt_bool_t always_defer_done; /* Always defer to complete requests */ - - struct rt_mmcsd_req *mrqs_done[RT_SDHCI_MAX_MRQS]; /* Requests done */ - struct rt_mmcsd_cmd *cmd; /* Current command */ - struct rt_mmcsd_cmd *data_cmd; /* Current data command */ - struct rt_mmcsd_cmd *deferred_cmd; /* Deferred command */ - struct rt_mmcsd_data *data; /* Current data request */ - unsigned int data_early : 1; /* Data finished before cmd */ - - unsigned int blocks; /* remaining PIO blocks */ - size_t align_buffer_sz; /* Bounce buffer size */ - rt_uint64_t align_addr; /* Mapped bounce buffer */ - - struct rt_workqueue *complete_wq; /* Request completion wq */ - struct rt_work complete_work; /* Request completion work */ - - struct rt_workqueue *irq_wq; - struct rt_work irq_work; - - struct rt_timer timer; /* Timer for timeouts */ - struct rt_timer data_timer; /* Timer for data timeouts */ - - rt_uint32_t caps; /* CAPABILITY_0 */ - rt_uint32_t caps1; /* CAPABILITY_1 */ - rt_bool_t read_caps; /* Capability flags have been read */ - - rt_bool_t sdhci_core_to_disable_vqmmc; /* sdhci core can disable vqmmc */ - unsigned int ocr_avail_sdio; /* OCR bit masks */ - unsigned int ocr_avail_sd; - unsigned int ocr_avail_mmc; - rt_uint32_t ocr_mask; /* available voltages */ - - unsigned timing; /* Current timing */ - - rt_uint32_t thread_isr; - - /* cached registers */ - rt_uint32_t ier; - - rt_bool_t cqe_on; /* CQE is operating */ - rt_uint32_t cqe_ier; /* CQE interrupt mask */ - rt_uint32_t cqe_err_ier; /* CQE error interrupt mask */ - - rt_wqueue_t buf_ready_int; /* Waitqueue for Buffer Read Ready interrupt */ - unsigned int tuning_done; /* Condition flag set when CMD19 succeeds */ - - unsigned int tuning_count; /* Timer count for re-tuning */ - unsigned int tuning_mode; /* Re-tuning mode supported by host */ - unsigned int tuning_err; /* Error code for re-tuning */ -#define RT_SDHCI_TUNING_MODE_1 0 -#define RT_SDHCI_TUNING_MODE_2 1 -#define RT_SDHCI_TUNING_MODE_3 2 - /* Delay (ms) between tuning commands */ - int tuning_delay; - int tuning_loop_count; - - /* Host SDMA buffer boundary. */ - rt_uint32_t sdma_boundary; - rt_uint64_t data_timeout; - - unsigned long private[]; -}; - -static inline rt_uint8_t u8_read(const volatile void *addr) -{ - return *(const volatile rt_uint8_t *)addr; -} - -static inline rt_uint16_t u16_read(const volatile void *addr) -{ - return *(const volatile rt_uint16_t *)addr; -} - -static inline rt_uint32_t u32_read(const volatile void *addr) -{ - return *(const volatile rt_uint32_t *)addr; -} - -static inline void u8_write(rt_uint8_t value, volatile void *addr) -{ - *(volatile rt_uint8_t *)addr = value; -} - -static inline void u16_write(rt_uint16_t value, volatile void *addr) -{ - *(volatile rt_uint16_t *)addr = value; -} - -static inline void u32_write(rt_uint32_t value, volatile void *addr) -{ - *(volatile rt_uint32_t *)addr = value; -} - -#define readb(c) u8_read(c) -#define readw(c) u16_read(c) -#define readl(c) u32_read(c) -#define readsb(p, d, l) ({ __raw_readsb(p,d,l); __iormb(); }) -#define readsw(p, d, l) ({ __raw_readsw(p,d,l); __iormb(); }) -#define readsl(p, d, l) ({ __raw_readsl(p,d,l); __iormb(); }) - -#define writeb(v, c) u8_write(v, c) -#define writew(v, c) u16_write(v, c) -#define writel(v, c) u32_write(v, c) -#define writesb(p, d, l) ({ __iowmb(); __raw_writesb(p,d,l); }) -#define writesw(p, d, l) ({ __iowmb(); __raw_writesw(p,d,l); }) -#define writesl(p, d, l) ({ __iowmb(); __raw_writesl(p,d,l); }) - -static inline void rt_sdhci_writel(struct rt_sdhci_host *host, rt_uint32_t val, int reg) -{ - writel(val, host->ioaddr + reg); -} - -static inline void rt_sdhci_writew(struct rt_sdhci_host *host, rt_uint16_t val, int reg) -{ - writew(val, host->ioaddr + reg); -} - -static inline void rt_sdhci_writeb(struct rt_sdhci_host *host, rt_uint8_t val, int reg) -{ - writeb(val, host->ioaddr + reg); -} - -static inline rt_uint32_t rt_sdhci_readl(struct rt_sdhci_host *host, int reg) -{ - return readl(host->ioaddr + reg); -} - -static inline rt_uint16_t rt_sdhci_readw(struct rt_sdhci_host *host, int reg) -{ - return readw(host->ioaddr + reg); -} - -static inline rt_uint8_t rt_sdhci_readb(struct rt_sdhci_host *host, int reg) -{ - return readb(host->ioaddr + reg); -} - - -struct rt_sdhci_ops -{ - void (*set_clock)(struct rt_sdhci_host *host, unsigned int clock); - void (*set_power)(struct rt_sdhci_host *host, unsigned char mode, - unsigned short vdd); - rt_uint32_t (*irq)(struct rt_sdhci_host *host, rt_uint32_t intmask); - int (*set_dma_mask)(struct rt_sdhci_host *host); - int (*enable_dma)(struct rt_sdhci_host *host); - unsigned int (*get_max_clock)(struct rt_sdhci_host *host); - unsigned int (*get_min_clock)(struct rt_sdhci_host *host); - unsigned int (*get_timeout_clock)(struct rt_sdhci_host *host); - unsigned int (*get_max_timeout_count)(struct rt_sdhci_host *host); - void (*set_timeout)(struct rt_sdhci_host *host, - struct rt_mmcsd_cmd *cmd); - void (*set_bus_width)(struct rt_sdhci_host *host, int width); - unsigned int (*get_ro)(struct rt_sdhci_host *host); - void (*reset)(struct rt_sdhci_host *host, rt_uint8_t mask); - int (*platform_execute_tuning)(struct rt_sdhci_host *host, rt_uint32_t opcode); - void (*set_uhs_signaling)(struct rt_sdhci_host *host, unsigned int uhs); - void (*hw_reset)(struct rt_sdhci_host *host); - void (*card_event)(struct rt_sdhci_host *host); - void (*voltage_switch)(struct rt_sdhci_host *host); - void (*request_done)(struct rt_sdhci_host *host, - struct rt_mmcsd_req *mrq); -}; - - -struct rt_sdhci_host *rt_sdhci_alloc_host(struct rt_device *dev, size_t priv_size); -void rt_sdhci_free_host(struct rt_sdhci_host *host); - -static inline void *sdhci_priv(struct rt_sdhci_host *host) -{ - return host->private; -} - -void rt_sdhci_read_caps(struct rt_sdhci_host *host, const rt_uint16_t *ver, - const rt_uint32_t *caps, const rt_uint32_t *caps1); -int rt_sdhci_setup_host(struct rt_sdhci_host *host); -void rt_sdhci_cleanup_host(struct rt_sdhci_host *host); -int rt_sdhci_set_and_add_host(struct rt_sdhci_host *host); -int rt_sdhci_init_host(struct rt_sdhci_host *host); -void rt_sdhci_uninit_host(struct rt_sdhci_host *host, int dead); - -rt_uint16_t rt_sdhci_clk_set(struct rt_sdhci_host *host, unsigned int clock, - unsigned int *actual_clock); -void rt_sdhci_set_clock(struct rt_sdhci_host *host, unsigned int clock); -void rt_sdhci_clk_enable(struct rt_sdhci_host *host, rt_uint16_t clk); -void rt_sdhci_set_power(struct rt_sdhci_host *host, unsigned char mode,unsigned short vdd); -void rt_read_reg(struct rt_sdhci_host* host); - -void rt_sdhci_set_power_with_noreg(struct rt_sdhci_host *host, unsigned char mode, - unsigned short vdd); -void rt_sdhci_start_request(struct rt_mmc_host *mmc, struct rt_mmcsd_req *mrq); -int rt_sdhci_start_request_atomic(struct rt_mmc_host *mmc, struct rt_mmcsd_req *mrq); -void rt_sdhci_set_bus_width(struct rt_sdhci_host *host, int width); -void rt_sdhci_reset(struct rt_sdhci_host *host, rt_uint8_t mask); -void rt_sdhci_set_uhs(struct rt_sdhci_host *host, unsigned timing); -int rt_sdhci_execute_tuning(struct rt_mmc_host *mmc, rt_uint32_t opcode); -int __sdhci_execute_tuning(struct rt_sdhci_host *host, rt_uint32_t opcode); -void rt_sdhci_ios_set(struct rt_mmc_host *mmc, struct rt_mmcsd_io_cfg *ios); -int rt_sdhci_start_signal_voltage_switch(struct rt_mmc_host *mmc, - struct rt_mmcsd_io_cfg *ios); -void rt_sdhci_enable_io_irq(struct rt_mmc_host *mmc, int enable); -void rt_sdhci_start_tuning(struct rt_sdhci_host *host); -void rt_sdhci_end_tuning(struct rt_sdhci_host *host); -void rt_sdhci_reset_tuning(struct rt_sdhci_host *host); -void rt_sdhci_send_tuning(struct rt_sdhci_host *host, rt_uint32_t opcode); -void rt_sdhci_abort_tuning(struct rt_sdhci_host *host, rt_uint32_t opcode); -void rt_sdhci_data_irq_timeout(struct rt_sdhci_host *host, rt_bool_t enable); -void rt_sdhci_timeout_set(struct rt_sdhci_host *host, struct rt_mmcsd_cmd *cmd); -void rt_read_reg_debug(struct rt_sdhci_host* host); - -#endif /* __RT_SDHCI_HW_H */ diff --git a/components/drivers/sdio/sdhci/include/sdhci_host.h b/components/drivers/sdio/sdhci/include/sdhci_host.h deleted file mode 100644 index 8584ab4b9d5..00000000000 --- a/components/drivers/sdio/sdhci/include/sdhci_host.h +++ /dev/null @@ -1,345 +0,0 @@ -/* - * Copyright (c) 2006-2024 RT-Thread Development Team - * - * SPDX-License-Identifier: Apache-2.0 - * - * Change Logs: - * Date Author Notes - * 2024-08-16 zhujiale first version - */ -#ifndef __RT_SDHCI_MMC_H__ -#define __RT_SDHCI_MMC_H__ - -#include -#include -#include -#include -#include -#define mmc_dev(x) ((x)->parent) - -#define MMC_SEND_TUNING_BLOCK_HS200 SEND_TUNING_BLOCK_HS200 -#define MMC_SEND_TUNING_BLOCK SEND_TUNING_BLOCK -#define MMC_STOP_TRANSMISSION STOP_TRANSMISSION -#define MMC_BUS_TEST_R 14 /* adtc R1 */ -#define MMC_WRITE_MULTIPLE_BLOCK WRITE_MULTIPLE_BLOCK -#define MMC_READ_MULTIPLE_BLOCK READ_MULTIPLE_BLOCK - -#define MMC_TIMING_UHS_DDR50 MMCSD_TIMING_UHS_DDR50 -#define MMC_TIMING_UHS_SDR50 MMCSD_TIMING_UHS_SDR50 -#define MMC_TIMING_MMC_HS200 MMCSD_TIMING_MMC_HS200 -#define MMC_TIMING_MMC_HS400 MMCSD_TIMING_MMC_HS400 -#define MMC_TIMING_UHS_SDR104 MMCSD_TIMING_UHS_SDR104 -#define MMC_TIMING_UHS_SDR25 MMCSD_TIMING_UHS_SDR25 -#define MMC_TIMING_MMC_DDR52 MMCSD_TIMING_MMC_DDR52 -#define MMC_TIMING_UHS_SDR12 MMCSD_TIMING_UHS_SDR12 -#define MMC_TIMING_SD_HS MMCSD_TIMING_SD_HS -#define MMC_TIMING_MMC_HS MMCSD_TIMING_MMC_HS - -#define MMC_POWER_OFF MMCSD_POWER_OFF -#define MMC_POWER_UP MMCSD_POWER_UP -#define MMC_POWER_ON MMCSD_POWER_ON -#define MMC_POWER_UNDEFINED 3 - -#define MMC_SET_DRIVER_TYPE_B 0 -#define MMC_SET_DRIVER_TYPE_A 1 -#define MMC_SET_DRIVER_TYPE_C 2 -#define MMC_SET_DRIVER_TYPE_D 3 - -#define MMC_SIGNAL_VOLTAGE_330 0 -#define MMC_SIGNAL_VOLTAGE_180 1 -#define MMC_SIGNAL_VOLTAGE_120 2 - -#define MMC_RSP_PRESENT (1 << 16) -#define MMC_RSP_136 (1 << 17) /* 136 bit response */ -#define MMC_RSP_CRC (1 << 18) /* expect valid crc */ -#define MMC_RSP_BUSY (1 << 19) /* card may send busy */ -#define MMC_RSP_OPCODE (1 << 20) /* response contains opcode */ - -#define MMC_RSP_NONE (0) -#define MMC_RSP_R1 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE) -#define MMC_RSP_R1B (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE | MMC_RSP_BUSY) -#define MMC_RSP_R2 (MMC_RSP_PRESENT | MMC_RSP_136 | MMC_RSP_CRC) -#define MMC_RSP_R3 (MMC_RSP_PRESENT) -#define MMC_RSP_R4 (MMC_RSP_PRESENT) -#define MMC_RSP_R5 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE) -#define MMC_RSP_R6 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE) -#define MMC_RSP_R7 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE) - -#define MMC_CMD_ADTC CMD_ADTC - -#define MMC_BUS_WIDTH_8 MMCSD_BUS_WIDTH_8 -#define MMC_BUS_WIDTH_4 MMCSD_BUS_WIDTH_4 -#define MMC_BUS_WIDTH_1 MMCSD_BUS_WIDTH_1 - -#define MMC_PM_KEEP_POWER (1 << 0) /* preserve card power during suspend */ -#define MMC_PM_WAKE_SDIO_IRQ (1 << 1) /* wake up host system on SDIO IRQ assertion */ -enum mmc_blk_status -{ - MMC_BLK_SUCCESS = 0, - MMC_BLK_PARTIAL, - MMC_BLK_CMD_ERR, - MMC_BLK_RETRY, - MMC_BLK_ABORT, - MMC_BLK_DATA_ERR, - MMC_BLK_ECC_ERR, - MMC_BLK_NOMEDIUM, - MMC_BLK_NEW_REQUEST, -}; - -#define MMC_NUM_CLK_PHASES (MMC_TIMING_MMC_HS400 + 1) - -struct rt_mmc_host ; - -struct mmc_host_ops -{ - void (*request)(struct rt_mmc_host *host, struct rt_mmcsd_req *req); - void (*set_ios)(struct rt_mmc_host *host, struct rt_mmcsd_io_cfg *ios); - int (*get_ro)(struct rt_mmc_host *host); - int (*get_cd)(struct rt_mmc_host *host); - void (*enable_sdio_irq)(struct rt_mmc_host *host, int enable); - void (*ack_sdio_irq)(struct rt_mmc_host *host); - int (*start_signal_voltage_switch)(struct rt_mmc_host *host, struct rt_mmcsd_io_cfg *ios); - int (*card_busy)(struct rt_mmc_host *host); - int (*execute_tuning)(struct rt_mmc_host *host, unsigned opcode); - int (*prepare_hs400_tuning)(struct rt_mmc_host *host, struct rt_mmcsd_io_cfg *ios); - int (*hs400_prepare_ddr)(struct rt_mmc_host *host); - void (*hs400_downgrade)(struct rt_mmc_host *host); - void (*hs400_complete)(struct rt_mmc_host *host); - void (*hs400_enhanced_strobe)(struct rt_mmc_host *host, - struct rt_mmcsd_io_cfg* ios); - void (*hw_reset)(struct rt_mmc_host* host); - void (*card_event)(struct rt_mmc_host* host); -}; - -struct regulator; -struct mmc_pwrseq; - -struct mmc_supply -{ - struct regulator *vmmc; /* Card power supply */ - struct regulator *vqmmc; /* Optional Vccq supply */ -}; - -struct mmc_ctx -{ - struct task_struct *task; -}; - -/* VDD voltage 3.3 ~ 3.4 */ -#define MMC_VDD_34_35 0x00400000 /* VDD voltage 3.4 ~ 3.5 */ -#define MMC_VDD_35_36 0x00800000 /* VDD voltage 3.5 ~ 3.6 */ - -#define MMC_CAP2_HS200_1_8V_SDR MMCSD_SUP_HS200_1V8 -#define MMC_CAP_4_BIT_DATA MMCSD_BUSWIDTH_4 -#define MMC_CAP_8_BIT_DATA MMCSD_BUSWIDTH_8 -#define MMC_CAP2_HS200 MMCSD_SUP_HS200 -#define MMC_CAP_MMC_HIGHSPEED MMCSD_SUP_HIGHSPEED -#define MMC_CAP_SD_HIGHSPEED MMCSD_SUP_HIGHSPEED -#define MMC_CAP_1_8V_DDR MMCSD_SUP_DDR_1V8 -#define MMC_CAP_3_3V_DDR MMCSD_SUP_DDR_3V3 -#define MMC_CAP_1_2V_DDR MMCSD_SUP_DDR_1V2 -#define MMC_CAP_NONREMOVABLE MMCSD_SUP_NONREMOVABLE - - -#define MMC_CAP_UHS_DDR50 0 -#define MMC_CAP2_HS400 0 -#define MMC_CAP_UHS_SDR50 0 -#define MMC_CAP_UHS_SDR25 0 -#define MMC_CAP_UHS_SDR12 0 -#define MMC_CAP_UHS_SDR104 0 -#define MMC_CAP_UHS 0 -#define MMC_CAP2_HSX00_1_8V 0 -#define MMC_CAP2_HS400_ES 0 -#define MMC_CAP_NEEDS_POLL 0 -#define MMC_CAP2_HSX00_1_2V 0 -#define MMC_CAP2_HS400_1_8V 0 -#define MMC_CAP_DRIVER_TYPE_D 0 -#define MMC_CAP_DRIVER_TYPE_C 0 -#define MMC_SET_DRIVER_TYPE_B 0 -#define MMC_CAP_DRIVER_TYPE_A 0 -#define MMC_CAP2_SDIO_IRQ_NOTHREAD 0 -#define MMC_CAP_CMD23 0 -#define MMC_CAP_SDIO_IRQ 0 - -#define MMC_CAP2_NO_SDIO (1 << 19) -#define MMC_CAP2_NO_SD (1 << 21) -#define MMC_CAP2_NO_MMC (1 << 22) -#define MMC_CAP2_CQE (1 << 23) - -#define MMC_VDD_165_195 VDD_165_195 -#define MMC_VDD_20_21 VDD_20_21 -#define MMC_VDD_29_30 VDD_29_30 -#define MMC_VDD_30_31 VDD_30_31 -#define MMC_VDD_32_33 VDD_32_33 -#define MMC_VDD_33_34 VDD_33_34 - - -struct rt_mmc_host -{ - struct rt_mmcsd_host rthost; - struct rt_device *parent; - int index; - const struct mmc_host_ops *ops; - unsigned int f_min; - unsigned int f_max; - unsigned int f_init; - rt_uint32_t ocr_avail; - rt_uint32_t ocr_avail_sdio; /* SDIO-specific OCR */ - rt_uint32_t ocr_avail_sd; /* SD-specific OCR */ - rt_uint32_t ocr_avail_mmc; /* MMC-specific OCR */ - struct wakeup_source *ws; /* Enable consume of uevents */ - rt_uint32_t max_current_330; - rt_uint32_t max_current_300; - rt_uint32_t max_current_180; - rt_uint32_t caps; /* Host capabilities */ - - rt_uint32_t caps2; /* More host capabilities */ - - - /* host specific block data */ - unsigned int max_seg_size; /* see blk_queue_max_segment_size */ - unsigned short max_segs; /* see blk_queue_max_segments */ - unsigned short unused; - unsigned int max_req_size; /* maximum number of bytes in one req */ - unsigned int max_blk_size; /* maximum size of one mmc block */ - unsigned int max_blk_count; /* maximum number of blocks in one req */ - unsigned int max_busy_timeout; /* max busy timeout in ms */ - struct rt_mmcsd_io_cfg ios; /* current io bus settings */ - unsigned int retune_period; - /* group bitfields together to minimize padding */ - unsigned int use_spi_crc : 1; - unsigned int claimed : 1; /* host exclusively claimed */ - unsigned int doing_init_tune : 1; /* initial tuning in progress */ - unsigned int can_retune : 1; /* re-tuning can be used */ - unsigned int doing_retune : 1; /* re-tuning in progress */ - unsigned int retune_now : 1; /* do re-tuning at next req */ - unsigned int retune_paused : 1; /* re-tuning is temporarily disabled */ - unsigned int retune_crc_disable : 1; /* don't trigger retune upon crc */ - unsigned int can_dma_map_merge : 1; /* merging can be used */ - unsigned int vqmmc_enabled : 1; /* vqmmc regulator is enabled */ - - int need_retune; /* re-tuning is needed */ - int hold_retune; /* hold off re-tuning */ - rt_bool_t trigger_card_event; /* card_event necessary */ - unsigned int sdio_irqs; - rt_bool_t sdio_irq_pending; - - struct led_trigger *led; /* activity led */ - - struct mmc_supply supply; - - - /* Ongoing data transfer that allows commands during transfer */ - struct rt_mmcsd_req *ongoing_mrq; - - - unsigned int actual_clock; /* Actual HC clock rate */ - rt_uint32_t pm_caps; - unsigned long private[]; -}; - - -static inline int mmc_card_is_removable(struct rt_mmc_host *host) -{ - return !(host->caps & MMC_CAP_NONREMOVABLE); -} - -struct device_node; -struct rt_mmc_host *rt_mmc_alloc_host(int extra, struct rt_device *); -int rt_mmc_add_host(struct rt_mmc_host *); -void rt_mmc_remove_host(struct rt_mmc_host *); -void rt_mmc_free_host(struct rt_mmc_host *); -int rt_mmc_of_parse(struct rt_mmc_host *host); -int rt_mmc_of_parse_voltage(struct rt_mmc_host *host, rt_uint32_t *mask); - -static inline void *mmc_priv(struct rt_mmc_host *host) -{ - return (void *)host->private; -} - - -#define mmc_host_is_spi(host) ((host)->caps & MMC_CAP_SPI) - -#define mmc_dev(x) ((x)->parent) -#define mmc_classdev(x) (&(x)->class_dev) -#define mmc_hostname(x) (x->parent->parent.name) - -void rt_mmc_detect_change(struct rt_mmc_host *, unsigned long delay); -void rt_mmc_request_done(struct rt_mmc_host *, struct rt_mmcsd_req *); -void mmc_command_done(struct rt_mmc_host *host, struct rt_mmcsd_req *mrq); - -void mmc_cqe_request_done(struct rt_mmc_host *host, struct rt_mmcsd_req *mrq); - -static inline rt_bool_t sdio_irq_claimed(struct rt_mmc_host *host) -{ - return host->sdio_irqs > 0; -} - -static inline int mmc_regulator_set_ocr(struct rt_mmc_host *mmc, - struct regulator *supply, - unsigned short vdd_bit) -{ - return 0; -} - -int mmc_regulator_get_supply(struct rt_mmc_host *mmc); -int mmc_regulator_enable_vqmmc(struct rt_mmc_host *mmc); -void mmc_regulator_disable_vqmmc(struct rt_mmc_host *mmc); - -void mmc_retune_timer_stop(struct rt_mmc_host* host); - -enum dma_data_direction -{ - DMA_BIDIRECTIONAL = 0, - DMA_TO_DEVICE = 1, - DMA_FROM_DEVICE = 2, - DMA_NONE = 3, -}; -#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL << (n)) - 1)) -static inline void mmc_retune_needed(struct rt_mmc_host *host) -{ - if (host->can_retune) - host->need_retune = 1; -} - -static inline rt_bool_t mmc_can_retune(struct rt_mmc_host *host) -{ - return host->can_retune == 1; -} - -static inline rt_bool_t mmc_doing_retune(struct rt_mmc_host *host) -{ - return host->doing_retune == 1; -} - -static inline rt_bool_t mmc_doing_tune(struct rt_mmc_host *host) -{ - return host->doing_retune == 1 || host->doing_init_tune == 1; -} - -static inline int mmc_get_dma_dir(struct rt_mmcsd_data *data) -{ - return data->flags & DATA_DIR_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE; -} - -static inline rt_bool_t mmc_op_multi(rt_uint32_t opcode) -{ - return opcode == MMC_WRITE_MULTIPLE_BLOCK || opcode == MMC_READ_MULTIPLE_BLOCK; -} - -static inline rt_bool_t mmc_op_tuning(rt_uint32_t opcode) -{ - return opcode == MMC_SEND_TUNING_BLOCK || opcode == MMC_SEND_TUNING_BLOCK_HS200; -} - -int rt_mmc_gpio_get_cd(struct rt_mmc_host *host); -void rt_mmc_detect_change(struct rt_mmc_host *host, unsigned long delay); -int rt_mmc_regulator_set_vqmmc(struct rt_mmc_host *mmc, struct rt_mmcsd_io_cfg *ios); -rt_bool_t rt_mmc_can_gpio_ro(struct rt_mmc_host *host); -int rt_mmc_gpio_get_ro(struct rt_mmc_host *host); - -int rt_mmc_send_abort_tuning(struct rt_mmc_host *host, rt_uint32_t opcode); -int rt_mmc_of_parse(struct rt_mmc_host *host); - - -#endif diff --git a/components/drivers/sdio/sdhci/include/sdhci_misc.h b/components/drivers/sdio/sdhci/include/sdhci_misc.h deleted file mode 100644 index 46144581c4f..00000000000 --- a/components/drivers/sdio/sdhci/include/sdhci_misc.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2006-2024 RT-Thread Development Team - * - * SPDX-License-Identifier: Apache-2.0 - * - * Change Logs: - * Date Author Notes - * 2024-08-16 zhujiale first version - */ -#ifndef __RT_SDHCI_MISC_H__ -#define __RT_SDHCI_MISC_H__ - - -#define __BF_FIELD_CHECK(...) -#define __bf_shf(x) (__builtin_ffsll(x) - 1) -#define FIELD_GET(_mask, _reg) \ - ({ \ - __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \ - (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \ - }) - -#define FIELD_PREP(_mask, _val) \ - ({ \ - __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \ - ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \ - }) - -#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) - -#define min_t(type, x, y) (((type)x < (type)y) ? x : y) -#define max_t(type, x, y) (((type)x > (type)y) ? x : y) -#define min(x, y) ((x) < (y) ? (x) : (y)) - -#define from_timer(var, callback_timer, timer_fieldname) \ - container_of(callback_timer, typeof(*var), timer_fieldname) - - -#define le32_to_cpu(x) (x) -#define le16_to_cpu(x) (x) -#define cpu_to_le16(x) (x) -#define cpu_to_le32(x) (x) -#define lower_32_bits(n) ((rt_uint32_t)((n) & 0xffffffff)) -#define upper_32_bits(n) ((rt_uint32_t)(((n) >> 16) >> 16)) - -#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) - -#define do_div(n, base) ({ \ - uint32_t __base = (base); \ - uint32_t __rem; \ - __rem = ((uint64_t)(n)) % __base; \ - (n) = ((uint64_t)(n)) / __base; \ - __rem; \ -}) - -#define fallthrough \ - do { \ - } while (0) - -int regulator_is_supported_voltage(struct regulator *regulator, - int min_uV, int max_uV); -rt_bool_t rt_mmc_can_gpio_cd(struct rt_mmc_host *host); - -struct regulator -{ - const char *supply_name; -}; - -int regulator_get_current_limit(struct regulator *regulator); - -#endif