drivers: mmc: add SD support for Command Queueing

Application class A2 cards require CQ to be enabled to realise their
stated performance figures. Add support to enable/disable card CQ via
the Performance Enhancement extension register, and cater for the slight
differences in command set versus eMMC.

Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
This commit is contained in:
Jonathan Bell
2024-03-15 13:06:13 +00:00
committed by Dom Cobley
parent 125a9b0c16
commit 1379718b89
6 changed files with 116 additions and 13 deletions

View File

@@ -896,7 +896,10 @@ static int mmc_blk_part_switch_pre(struct mmc_card *card,
if ((part_type & mask) == rpmb) {
if (card->ext_csd.cmdq_en) {
ret = mmc_cmdq_disable(card);
if (mmc_card_sd(card))
ret = mmc_sd_cmdq_disable(card);
else
ret = mmc_cmdq_disable(card);
if (ret)
return ret;
}
@@ -915,8 +918,12 @@ static int mmc_blk_part_switch_post(struct mmc_card *card,
if ((part_type & mask) == rpmb) {
mmc_retune_unpause(card->host);
if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
ret = mmc_cmdq_enable(card);
if (card->reenable_cmdq && !card->ext_csd.cmdq_en) {
if (mmc_card_sd(card))
ret = mmc_sd_cmdq_enable(card);
else
ret = mmc_cmdq_enable(card);
}
}
return ret;
@@ -1127,7 +1134,10 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
switch (mq_rq->drv_op) {
case MMC_DRV_OP_IOCTL:
if (card->ext_csd.cmdq_en) {
ret = mmc_cmdq_disable(card);
if (mmc_card_sd(card))
ret = mmc_sd_cmdq_disable(card);
else
ret = mmc_cmdq_disable(card);
if (ret)
break;
}
@@ -1145,8 +1155,12 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
/* Always switch back to main area after RPMB access */
if (rpmb_ioctl)
mmc_blk_part_switch(card, 0);
else if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
mmc_cmdq_enable(card);
else if (card->reenable_cmdq && !card->ext_csd.cmdq_en) {
if (mmc_card_sd(card))
mmc_sd_cmdq_enable(card);
else
mmc_cmdq_enable(card);
}
break;
case MMC_DRV_OP_BOOT_WP:
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,

View File

@@ -563,7 +563,11 @@ int mmc_cqe_recovery(struct mmc_host *host)
mmc_poll_for_busy(host->card, MMC_CQE_RECOVERY_TIMEOUT, true, MMC_BUSY_IO);
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = MMC_CMDQ_TASK_MGMT;
if (mmc_card_sd(host->card))
cmd.opcode = SD_CMDQ_TASK_MGMT;
else
cmd.opcode = MMC_CMDQ_TASK_MGMT;
cmd.arg = 1; /* Discard entire queue */
cmd.flags = MMC_RSP_R1B_NO_CRC | MMC_CMD_AC; /* Ignore CRC */
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;

View File

@@ -1034,8 +1034,8 @@ static bool mmc_sd_card_using_v18(struct mmc_card *card)
(SD_MODE_UHS_SDR50 | SD_MODE_UHS_SDR104 | SD_MODE_UHS_DDR50);
}
static int sd_write_ext_reg(struct mmc_card *card, u8 fno, u8 page, u16 offset,
u8 reg_data)
int sd_write_ext_reg(struct mmc_card *card, u8 fno, u8 page, u16 offset,
u8 reg_data)
{
struct mmc_host *host = card->host;
struct mmc_request mrq = {};
@@ -1193,8 +1193,14 @@ static int sd_parse_ext_reg_perf(struct mmc_card *card, u8 fno, u8 page,
card->ext_perf.feature_support |= SD_EXT_PERF_CACHE;
/* Command queue support indicated via queue depth bits (0 to 4). */
if (reg_buf[6] & 0x1f)
if (reg_buf[6] & 0x1f) {
card->ext_perf.feature_support |= SD_EXT_PERF_CMD_QUEUE;
card->ext_csd.cmdq_depth = reg_buf[6] & 0x1f;
card->ext_csd.cmdq_support = true;
pr_debug("%s: Command Queue supported depth %u\n",
mmc_hostname(card->host),
card->ext_csd.cmdq_depth);
}
card->ext_perf.fno = fno;
card->ext_perf.page = page;
@@ -1584,13 +1590,41 @@ cont:
goto free_card;
}
/* Enable command queueing if supported */
if (card->ext_csd.cmdq_support && host->caps2 & MMC_CAP2_CQE) {
/*
* Right now the MMC block layer uses DCMDs to issue
* cache-flush commands specific to eMMC devices.
* Turning off DCMD support avoids generating Illegal Command
* errors on SD, and flushing is instead done synchronously
* by mmc_blk_issue_flush().
*/
host->caps2 &= ~MMC_CAP2_CQE_DCMD;
err = mmc_sd_cmdq_enable(card);
if (err && err != -EBADMSG)
goto free_card;
if (err) {
pr_warn("%s: Enabling CMDQ failed\n",
mmc_hostname(card->host));
card->ext_csd.cmdq_support = false;
card->ext_csd.cmdq_depth = 0;
}
}
card->reenable_cmdq = card->ext_csd.cmdq_en;
if (!mmc_card_ult_capacity(card) && host->cqe_ops && !host->cqe_enabled) {
err = host->cqe_ops->cqe_enable(host, card);
if (!err) {
host->cqe_enabled = true;
host->hsq_enabled = true;
pr_info("%s: Host Software Queue enabled\n",
mmc_hostname(host));
if (card->ext_csd.cmdq_en) {
pr_info("%s: Command Queue Engine enabled\n",
mmc_hostname(host));
} else {
host->hsq_enabled = true;
pr_info("%s: Host Software Queue enabled\n",
mmc_hostname(host));
}
}
}

View File

@@ -417,3 +417,40 @@ int mmc_app_sd_status(struct mmc_card *card, void *ssr)
return 0;
}
int sd_write_ext_reg(struct mmc_card *card, u8 fno, u8 page, u16 offset,
u8 reg_data);
static int mmc_sd_cmdq_switch(struct mmc_card *card, bool enable)
{
int err;
u8 reg = 0;
/*
* SD offers two command queueing modes - sequential (in-order) and
* voluntary (out-of-order). Apps Class A2 performance is only
* guaranteed for voluntary CQ (bit 1 = 0), so use that in preference
* to sequential.
*/
if (enable)
reg = BIT(0);
/* Performance enhancement register byte 262 controls command queueing */
err = sd_write_ext_reg(card, card->ext_perf.fno, card->ext_perf.page,
card->ext_perf.offset + 262, reg);
if (!err)
card->ext_csd.cmdq_en = enable;
return err;
}
int mmc_sd_cmdq_enable(struct mmc_card *card)
{
return mmc_sd_cmdq_switch(card, true);
}
EXPORT_SYMBOL_GPL(mmc_sd_cmdq_enable);
int mmc_sd_cmdq_disable(struct mmc_card *card)
{
return mmc_sd_cmdq_switch(card, false);
}
EXPORT_SYMBOL_GPL(mmc_sd_cmdq_disable);

View File

@@ -24,6 +24,8 @@ int mmc_app_sd_status(struct mmc_card *card, void *ssr);
int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card);
int mmc_send_ext_addr(struct mmc_host *host, u32 addr);
void mmc_uhs2_prepare_cmd(struct mmc_host *host, struct mmc_request *mrq);
int mmc_sd_cmdq_enable(struct mmc_card *card);
int mmc_sd_cmdq_disable(struct mmc_card *card);
#endif

View File

@@ -32,6 +32,9 @@
#define SD_APP_OP_COND 41 /* bcr [31:0] OCR R3 */
#define SD_APP_SEND_SCR 51 /* adtc R1 */
/* class 1 */
#define SD_CMDQ_TASK_MGMT 43 /* ac See below R1b */
/* class 11 */
#define SD_READ_EXTR_SINGLE 48 /* adtc [31:0] R1 */
#define SD_WRITE_EXTR_SINGLE 49 /* adtc [31:0] R1 */
@@ -64,6 +67,15 @@
* [7:0] Check Pattern (0xAA)
*/
/*
* SD_CMDQ_TASK_MGMT argument format:
*
* [31:21] Reserved (0)
* [20:16] Task ID
* [15:4] Reserved (0)
* [3:0] Operation - 0x1 = abort all tasks, 0x2 = abort Task ID
*/
/*
* SCR field definitions
*/