mmc: Revert all packing related patches

The version of packing support excepted by the linux community is
slightly different from the one that was merged. This revert is needed
in order to upload the latest version from the linux community.

This patch reverts the following commits:
1.mmc: card: Add eMMC4.5 write packed commands unit-tests
2.mmc: card: Fix packing control enabling algorithm
3.mmc: block: Add MMC write packing statistics
4.mmc: msm_sdcc: enable the write packing control
5.mmc: msm_sdcc: Enable write packing capability
6.mmc: block: Add write packing control
7.mmc: core: Support packed write command for eMMC4.5 device
8.mmc: core: Add packed command feature of eMMC4.5

(cherry picked from commit f94cf3da103b344b13fa4d6665fd21dad1b95ead)

Change-Id: I2efc6dc8d8f6d5cc7e9efa99ec74914ffff96fcd
  commit:  9b54d88c6a11ebfe069b7fdebcb521da21754c3f
  commit: e2ecb58a6c5011549aac3e86fb1c13e7b7c65104
  commit: e544d700e2dac1584a8172c4dc347d81ede203bd
  commit: 8afe8d2a98a1bbf3804162ff5c95a56226935f5a
  commit: 25e2261a556c4393f79d58bce814bb3df34b9549
  commit: 63c61d6d8b8f37c71b4162b3affffdf72ac06811
  commit: 968c774ea6466fa7adbf2eac333220132acda306
  commit: 516994eee39282b8648b509e449ff83b49833209.
Signed-off-by: Tatyana Brokhman <tlinder@codeaurora.org>
(cherry picked from commit 31fe84d6edae65f9df5663538e528697897be86e)

Signed-off-by: Maya Erez <merez@codeaurora.org>
This commit is contained in:
Maya Erez
2012-11-28 23:46:50 +02:00
committed by Stephen Boyd
parent 0375b15d28
commit 821bb10c51
19 changed files with 29 additions and 3007 deletions

View File

@@ -8,23 +8,6 @@ The following attributes are read/write.
force_ro Enforce read-only access even if write protect switch is off.
num_wr_reqs_to_start_packing This attribute is used to determine
the trigger for activating the write packing, in case the write
packing control feature is enabled.
When the MMC manages to reach a point where num_wr_reqs_to_start_packing
write requests could be packed, it enables the write packing feature.
This allows us to start the write packing only when it is beneficial
and has minimum affect on the read latency.
The number of potential packed requests that will trigger the packing
can be configured via sysfs by writing the required value to:
/sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
The default value of num_wr_reqs_to_start_packing was determined by
running parallel lmdd write and lmdd read operations and calculating
the max number of packed writes requests.
SD and MMC Device Attributes
============================

View File

@@ -252,7 +252,6 @@ static struct mmc_platform_data sdc1_data = {
.vreg_data = &mmc_slot_vreg_data[SDCC1],
.uhs_caps = MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50,
.uhs_caps2 = MMC_CAP2_HS200_1_8V_SDR,
.packed_write = MMC_CAP2_PACKED_WR | MMC_CAP2_PACKED_WR_CONTROL,
.mpm_sdiowakeup_int = MSM_MPM_PIN_SDC1_DAT1,
.msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
};

View File

@@ -247,7 +247,6 @@ static struct mmc_platform_data msm8960_sdc1_data = {
.mpm_sdiowakeup_int = MSM_MPM_PIN_SDC1_DAT1,
.msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
.uhs_caps2 = MMC_CAP2_HS200_1_8V_SDR,
.packed_write = MMC_CAP2_PACKED_WR | MMC_CAP2_PACKED_WR_CONTROL,
};
#endif

View File

@@ -296,7 +296,6 @@ static struct mmc_platform_data msm8960_sdc1_data = {
.mpm_sdiowakeup_int = MSM_MPM_PIN_SDC1_DAT1,
.msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
.uhs_caps2 = MMC_CAP2_HS200_1_8V_SDR,
.packed_write = MMC_CAP2_PACKED_WR | MMC_CAP2_PACKED_WR_CONTROL,
};
#endif

View File

@@ -76,14 +76,3 @@ config MMC_TEST
This driver is only of interest to those developing or
testing a host driver. Most people should say N here.
config MMC_BLOCK_TEST
tristate "MMC block test"
depends on MMC_BLOCK && IOSCHED_TEST
default m
help
MMC block test can be used with test iosched to test the MMC block
device.
Currently used to test eMMC 4.5 features (packed commands, sanitize,
BKOPs).

View File

@@ -8,4 +8,3 @@ obj-$(CONFIG_MMC_TEST) += mmc_test.o
obj-$(CONFIG_SDIO_UART) += sdio_uart.o
obj-$(CONFIG_MMC_BLOCK_TEST) += mmc_block_test.o

View File

@@ -59,16 +59,6 @@ MODULE_ALIAS("mmc:block");
#define INAND_CMD38_ARG_SECTRIM2 0x88
#define MMC_SANITIZE_REQ_TIMEOUT 240000 /* msec */
#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
(req->cmd_flags & REQ_META)) && \
(rq_data_dir(req) == WRITE))
#define PACKED_CMD_VER 0x01
#define PACKED_CMD_WR 0x02
#define MMC_BLK_UPDATE_STOP_REASON(stats, reason) \
do { \
if (stats->enabled) \
stats->pack_stop_reason[reason]++; \
} while (0)
static DEFINE_MUTEX(block_mutex);
@@ -120,26 +110,24 @@ struct mmc_blk_data {
struct device_attribute force_ro;
struct device_attribute power_ro_lock;
int area_type;
struct device_attribute num_wr_reqs_to_start_packing;
};
static DEFINE_MUTEX(open_lock);
enum {
MMC_PACKED_N_IDX = -1,
MMC_PACKED_N_ZERO,
MMC_PACKED_N_SINGLE,
enum mmc_blk_status {
MMC_BLK_SUCCESS = 0,
MMC_BLK_PARTIAL,
MMC_BLK_CMD_ERR,
MMC_BLK_RETRY,
MMC_BLK_ABORT,
MMC_BLK_DATA_ERR,
MMC_BLK_ECC_ERR,
MMC_BLK_NOMEDIUM,
};
module_param(perdev_minors, int, 0444);
MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
{
mqrq->packed_cmd = MMC_PACKED_NONE;
mqrq->packed_num = MMC_PACKED_N_ZERO;
}
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
struct mmc_blk_data *md;
@@ -273,38 +261,6 @@ out:
return ret;
}
static ssize_t
num_wr_reqs_to_start_packing_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
int num_wr_reqs_to_start_packing;
int ret;
num_wr_reqs_to_start_packing = md->queue.num_wr_reqs_to_start_packing;
ret = snprintf(buf, PAGE_SIZE, "%d\n", num_wr_reqs_to_start_packing);
mmc_blk_put(md);
return ret;
}
static ssize_t
num_wr_reqs_to_start_packing_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int value;
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
sscanf(buf, "%d", &value);
if (value >= 0)
md->queue.num_wr_reqs_to_start_packing = value;
mmc_blk_put(md);
return count;
}
static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
{
struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
@@ -1140,60 +1096,12 @@ static int mmc_blk_err_check(struct mmc_card *card,
if (!brq->data.bytes_xfered)
return MMC_BLK_RETRY;
if (mq_mrq->packed_cmd != MMC_PACKED_NONE) {
if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
return MMC_BLK_PARTIAL;
else
return MMC_BLK_SUCCESS;
}
if (blk_rq_bytes(req) != brq->data.bytes_xfered)
return MMC_BLK_PARTIAL;
return MMC_BLK_SUCCESS;
}
static int mmc_blk_packed_err_check(struct mmc_card *card,
struct mmc_async_req *areq)
{
struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
mmc_active);
struct request *req = mq_rq->req;
int err, check, status;
u8 ext_csd[512];
check = mmc_blk_err_check(card, areq);
err = get_card_status(card, &status, 0);
if (err) {
pr_err("%s: error %d sending status command\n",
req->rq_disk->disk_name, err);
return MMC_BLK_ABORT;
}
if (status & R1_EXP_EVENT) {
err = mmc_send_ext_csd(card, ext_csd);
if (err) {
pr_err("%s: error %d sending ext_csd\n",
req->rq_disk->disk_name, err);
return MMC_BLK_ABORT;
}
if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
EXT_CSD_PACKED_FAILURE) &&
(ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
EXT_CSD_PACKED_GENERIC_ERROR)) {
if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
EXT_CSD_PACKED_INDEXED_ERROR) {
mq_rq->packed_fail_idx =
ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
return MMC_BLK_PARTIAL;
}
}
}
return check;
}
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
int disable_multi,
@@ -1349,361 +1257,10 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
mmc_queue_bounce_pre(mqrq);
}
static void mmc_blk_write_packing_control(struct mmc_queue *mq,
struct request *req)
{
struct mmc_host *host = mq->card->host;
int data_dir;
/* Support for the write packing on eMMC 4.5 or later */
if (mq->card->ext_csd.rev <= 5)
return;
if (!(host->caps2 & MMC_CAP2_PACKED_WR))
return;
/*
* In case the packing control is not supported by the host, it should
* not have an effect on the write packing. Therefore we have to enable
* the write packing
*/
if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) {
mq->wr_packing_enabled = true;
return;
}
if (!req || (req && (req->cmd_flags & REQ_FLUSH))) {
if (mq->num_of_potential_packed_wr_reqs >
mq->num_wr_reqs_to_start_packing)
mq->wr_packing_enabled = true;
mq->num_of_potential_packed_wr_reqs = 0;
return;
}
data_dir = rq_data_dir(req);
if (data_dir == READ) {
mq->num_of_potential_packed_wr_reqs = 0;
mq->wr_packing_enabled = false;
return;
} else if (data_dir == WRITE) {
mq->num_of_potential_packed_wr_reqs++;
}
if (mq->num_of_potential_packed_wr_reqs >
mq->num_wr_reqs_to_start_packing)
mq->wr_packing_enabled = true;
}
struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(struct mmc_card *card)
{
if (!card)
return NULL;
return &card->wr_pack_stats;
}
EXPORT_SYMBOL(mmc_blk_get_packed_statistics);
void mmc_blk_init_packed_statistics(struct mmc_card *card)
{
int max_num_of_packed_reqs = 0;
if (!card || !card->wr_pack_stats.packing_events)
return;
max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
spin_lock(&card->wr_pack_stats.lock);
memset(card->wr_pack_stats.packing_events, 0,
(max_num_of_packed_reqs + 1) *
sizeof(*card->wr_pack_stats.packing_events));
memset(&card->wr_pack_stats.pack_stop_reason, 0,
sizeof(card->wr_pack_stats.pack_stop_reason));
card->wr_pack_stats.enabled = true;
spin_unlock(&card->wr_pack_stats.lock);
}
EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
void print_mmc_packing_stats(struct mmc_card *card)
{
int i;
int max_num_of_packed_reqs = 0;
if ((!card) || (!card->wr_pack_stats.packing_events))
return;
max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
spin_lock(&card->wr_pack_stats.lock);
pr_info("%s: write packing statistics:\n",
mmc_hostname(card->host));
for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) {
if (card->wr_pack_stats.packing_events[i] != 0)
pr_info("%s: Packed %d reqs - %d times\n",
mmc_hostname(card->host), i,
card->wr_pack_stats.packing_events[i]);
}
pr_info("%s: stopped packing due to the following reasons:\n",
mmc_hostname(card->host));
if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS])
pr_info("%s: %d times: exceedmax num of segments\n",
mmc_hostname(card->host),
card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS])
pr_info("%s: %d times: exceeding the max num of sectors\n",
mmc_hostname(card->host),
card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]);
if (card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR])
pr_info("%s: %d times: wrong data direction\n",
mmc_hostname(card->host),
card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]);
if (card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD])
pr_info("%s: %d times: flush or discard\n",
mmc_hostname(card->host),
card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
if (card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE])
pr_info("%s: %d times: empty queue\n",
mmc_hostname(card->host),
card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]);
if (card->wr_pack_stats.pack_stop_reason[REL_WRITE])
pr_info("%s: %d times: rel write\n",
mmc_hostname(card->host),
card->wr_pack_stats.pack_stop_reason[REL_WRITE]);
if (card->wr_pack_stats.pack_stop_reason[THRESHOLD])
pr_info("%s: %d times: Threshold\n",
mmc_hostname(card->host),
card->wr_pack_stats.pack_stop_reason[THRESHOLD]);
spin_unlock(&card->wr_pack_stats.lock);
}
EXPORT_SYMBOL(print_mmc_packing_stats);
static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
{
struct request_queue *q = mq->queue;
struct mmc_card *card = mq->card;
struct request *cur = req, *next = NULL;
struct mmc_blk_data *md = mq->data;
bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
unsigned int req_sectors = 0, phys_segments = 0;
unsigned int max_blk_count, max_phys_segs;
u8 put_back = 0;
u8 max_packed_rw = 0;
u8 reqs = 0;
struct mmc_wr_pack_stats *stats = &card->wr_pack_stats;
mmc_blk_clear_packed(mq->mqrq_cur);
if (!(md->flags & MMC_BLK_CMD23) ||
!card->ext_csd.packed_event_en)
goto no_packed;
if (!mq->wr_packing_enabled)
goto no_packed;
if ((rq_data_dir(cur) == WRITE) &&
(card->host->caps2 & MMC_CAP2_PACKED_WR))
max_packed_rw = card->ext_csd.max_packed_writes;
if (max_packed_rw == 0)
goto no_packed;
if (mmc_req_rel_wr(cur) &&
(md->flags & MMC_BLK_REL_WR) &&
!en_rel_wr) {
goto no_packed;
}
max_blk_count = min(card->host->max_blk_count,
card->host->max_req_size >> 9);
if (unlikely(max_blk_count > 0xffff))
max_blk_count = 0xffff;
max_phys_segs = queue_max_segments(q);
req_sectors += blk_rq_sectors(cur);
phys_segments += cur->nr_phys_segments;
if (rq_data_dir(cur) == WRITE) {
req_sectors++;
phys_segments++;
}
spin_lock(&stats->lock);
while (reqs < max_packed_rw - 1) {
spin_lock_irq(q->queue_lock);
next = blk_fetch_request(q);
spin_unlock_irq(q->queue_lock);
if (!next) {
MMC_BLK_UPDATE_STOP_REASON(stats, EMPTY_QUEUE);
break;
}
if (next->cmd_flags & REQ_DISCARD ||
next->cmd_flags & REQ_FLUSH) {
MMC_BLK_UPDATE_STOP_REASON(stats, FLUSH_OR_DISCARD);
put_back = 1;
break;
}
if (rq_data_dir(cur) != rq_data_dir(next)) {
MMC_BLK_UPDATE_STOP_REASON(stats, WRONG_DATA_DIR);
put_back = 1;
break;
}
if (mmc_req_rel_wr(next) &&
(md->flags & MMC_BLK_REL_WR) &&
!en_rel_wr) {
MMC_BLK_UPDATE_STOP_REASON(stats, REL_WRITE);
put_back = 1;
break;
}
req_sectors += blk_rq_sectors(next);
if (req_sectors > max_blk_count) {
if (stats->enabled)
stats->pack_stop_reason[EXCEEDS_SECTORS]++;
put_back = 1;
break;
}
phys_segments += next->nr_phys_segments;
if (phys_segments > max_phys_segs) {
MMC_BLK_UPDATE_STOP_REASON(stats, EXCEEDS_SEGMENTS);
put_back = 1;
break;
}
if (rq_data_dir(next) == WRITE)
mq->num_of_potential_packed_wr_reqs++;
list_add_tail(&next->queuelist, &mq->mqrq_cur->packed_list);
cur = next;
reqs++;
}
if (put_back) {
spin_lock_irq(q->queue_lock);
blk_requeue_request(q, next);
spin_unlock_irq(q->queue_lock);
}
if (stats->enabled) {
if (reqs + 1 <= card->ext_csd.max_packed_writes)
stats->packing_events[reqs + 1]++;
if (reqs + 1 == max_packed_rw)
MMC_BLK_UPDATE_STOP_REASON(stats, THRESHOLD);
}
spin_unlock(&stats->lock);
if (reqs > 0) {
list_add(&req->queuelist, &mq->mqrq_cur->packed_list);
mq->mqrq_cur->packed_num = ++reqs;
return reqs;
}
no_packed:
mmc_blk_clear_packed(mq->mqrq_cur);
return 0;
}
static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
struct mmc_queue *mq)
{
struct mmc_blk_request *brq = &mqrq->brq;
struct request *req = mqrq->req;
struct request *prq;
struct mmc_blk_data *md = mq->data;
bool do_rel_wr;
u32 *packed_cmd_hdr = mqrq->packed_cmd_hdr;
u8 i = 1;
mqrq->packed_cmd = MMC_PACKED_WRITE;
mqrq->packed_blocks = 0;
mqrq->packed_fail_idx = MMC_PACKED_N_IDX;
memset(packed_cmd_hdr, 0, sizeof(mqrq->packed_cmd_hdr));
packed_cmd_hdr[0] = (mqrq->packed_num << 16) |
(PACKED_CMD_WR << 8) | PACKED_CMD_VER;
/*
* Argument for each entry of packed group
*/
list_for_each_entry(prq, &mqrq->packed_list, queuelist) {
do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
/* Argument of CMD23*/
packed_cmd_hdr[(i * 2)] =
(do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
blk_rq_sectors(prq);
/* Argument of CMD18 or CMD25 */
packed_cmd_hdr[((i * 2)) + 1] =
mmc_card_blockaddr(card) ?
blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
mqrq->packed_blocks += blk_rq_sectors(prq);
i++;
}
memset(brq, 0, sizeof(struct mmc_blk_request));
brq->mrq.cmd = &brq->cmd;
brq->mrq.data = &brq->data;
brq->mrq.sbc = &brq->sbc;
brq->mrq.stop = &brq->stop;
brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
brq->sbc.arg = MMC_CMD23_ARG_PACKED | (mqrq->packed_blocks + 1);
brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
brq->cmd.arg = blk_rq_pos(req);
if (!mmc_card_blockaddr(card))
brq->cmd.arg <<= 9;
brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
brq->data.blksz = 512;
brq->data.blocks = mqrq->packed_blocks + 1;
brq->data.flags |= MMC_DATA_WRITE;
brq->data.fault_injected = false;
brq->stop.opcode = MMC_STOP_TRANSMISSION;
brq->stop.arg = 0;
brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
mmc_set_data_timeout(&brq->data, card);
brq->data.sg = mqrq->sg;
brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
mqrq->mmc_active.mrq = &brq->mrq;
/*
* This is intended for packed commands tests usage - in case these
* functions are not in use the respective pointers are NULL
*/
if (mq->err_check_fn)
mqrq->mmc_active.err_check = mq->err_check_fn;
else
mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
if (mq->packed_test_fn)
mq->packed_test_fn(mq->queue, mqrq);
mmc_queue_bounce_pre(mqrq);
}
static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
struct mmc_blk_request *brq, struct request *req,
int ret)
{
struct mmc_queue_req *mq_rq;
mq_rq = container_of(brq, struct mmc_queue_req, brq);
/*
* If this is an SD card and we're writing, we can first
* mark the known good sectors as ok.
@@ -1720,41 +1277,8 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
ret = blk_end_request(req, 0, blocks << 9);
} else
ret = blk_end_request(req, 0, brq->data.bytes_xfered);
} else {
if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
ret = blk_end_request(req, 0, brq->data.bytes_xfered);
}
}
return ret;
}
static int mmc_blk_end_packed_req(struct mmc_queue *mq,
struct mmc_queue_req *mq_rq)
{
struct request *prq;
int idx = mq_rq->packed_fail_idx, i = 0;
int ret = 0;
while (!list_empty(&mq_rq->packed_list)) {
prq = list_entry_rq(mq_rq->packed_list.next);
if (idx == i) {
/* retry from error index */
mq_rq->packed_num -= idx;
mq_rq->req = prq;
ret = 1;
if (mq_rq->packed_num == MMC_PACKED_N_SINGLE) {
list_del_init(&prq->queuelist);
mmc_blk_clear_packed(mq_rq);
}
return ret;
}
list_del_init(&prq->queuelist);
blk_end_request(prq, 0, blk_rq_bytes(prq));
i++;
}
mmc_blk_clear_packed(mq_rq);
} else
ret = blk_end_request(req, 0, brq->data.bytes_xfered);
return ret;
}
@@ -1766,24 +1290,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
int ret = 1, disable_multi = 0, retry = 0, type;
enum mmc_blk_status status;
struct mmc_queue_req *mq_rq;
struct request *req, *prq;
struct request *req;
struct mmc_async_req *areq;
const u8 packed_num = 2;
u8 reqs = 0;
if (!rqc && !mq->mqrq_prev->req)
return 0;
if (rqc)
reqs = mmc_blk_prep_packed_list(mq, rqc);
do {
if (rqc) {
if (reqs >= packed_num)
mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
card, mq);
else
mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
areq = &mq->mqrq_cur->mmc_active;
} else
areq = NULL;
@@ -1804,15 +1319,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
* A block was successfully transferred.
*/
mmc_blk_reset_success(md, type);
if (mq_rq->packed_cmd != MMC_PACKED_NONE) {
ret = mmc_blk_end_packed_req(mq, mq_rq);
break;
} else {
ret = blk_end_request(req, 0,
ret = blk_end_request(req, 0,
brq->data.bytes_xfered);
}
/*
* If the blk_end_request function returns non-zero even
* though all data has been transferred and no errors
@@ -1845,8 +1353,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
err = mmc_blk_reset(md, card->host, type);
if (!err)
break;
if (err == -ENODEV ||
mq_rq->packed_cmd != MMC_PACKED_NONE)
if (err == -ENODEV)
goto cmd_abort;
/* Fall through */
}
@@ -1873,62 +1380,26 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
}
if (ret) {
if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
/*
* In case of a incomplete request
* prepare it again and resend.
*/
mmc_blk_rw_rq_prep(mq_rq, card,
disable_multi, mq);
mmc_start_req(card->host,
&mq_rq->mmc_active, NULL);
} else {
mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
mmc_start_req(card->host,
&mq_rq->mmc_active, NULL);
}
/*
* In case of a incomplete request
* prepare it again and resend.
*/
mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
}
} while (ret);
return 1;
cmd_abort:
if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
if (mmc_card_removed(card))
req->cmd_flags |= REQ_QUIET;
while (ret)
ret = blk_end_request(req, -EIO,
blk_rq_cur_bytes(req));
} else {
while (!list_empty(&mq_rq->packed_list)) {
prq = list_entry_rq(mq_rq->packed_list.next);
list_del_init(&prq->queuelist);
blk_end_request(prq, -EIO, blk_rq_bytes(prq));
}
mmc_blk_clear_packed(mq_rq);
}
if (mmc_card_removed(card))
req->cmd_flags |= REQ_QUIET;
while (ret)
ret = blk_end_request(req, -EIO,
blk_rq_cur_bytes(req));
start_new_req:
if (rqc) {
/*
* If current request is packed, it needs to put back.
*/
if (mq->mqrq_cur->packed_cmd != MMC_PACKED_NONE) {
while (!list_empty(&mq->mqrq_cur->packed_list)) {
prq = list_entry_rq(
mq->mqrq_cur->packed_list.prev);
if (prq->queuelist.prev !=
&mq->mqrq_cur->packed_list) {
list_del_init(&prq->queuelist);
spin_lock_irq(mq->queue->queue_lock);
blk_requeue_request(mq->queue, prq);
spin_unlock_irq(mq->queue->queue_lock);
} else {
list_del_init(&prq->queuelist);
}
}
mmc_blk_clear_packed(mq->mqrq_cur);
}
mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL);
}
@@ -1962,8 +1433,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
goto out;
}
mmc_blk_write_packing_control(mq, req);
if (req && req->cmd_flags & REQ_SANITIZE) {
/* complete ongoing async transfer before issuing sanitize */
if (card->host && card->host->areq)
@@ -2195,8 +1664,6 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
if (md) {
card = md->queue.card;
device_remove_file(disk_to_dev(md->disk),
&md->num_wr_reqs_to_start_packing);
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
@@ -2264,16 +1731,6 @@ static int mmc_add_disk(struct mmc_blk_data *md)
goto power_ro_lock_fail;
}
md->num_wr_reqs_to_start_packing.show =
num_wr_reqs_to_start_packing_show;
md->num_wr_reqs_to_start_packing.store =
num_wr_reqs_to_start_packing_store;
sysfs_attr_init(&md->num_wr_reqs_to_start_packing.attr);
md->num_wr_reqs_to_start_packing.attr.name =
"num_wr_reqs_to_start_packing";
md->num_wr_reqs_to_start_packing.attr.mode = S_IRUGO | S_IWUSR;
ret = device_create_file(disk_to_dev(md->disk),
&md->num_wr_reqs_to_start_packing);
if (ret)
goto power_ro_lock_fail;

File diff suppressed because it is too large Load Diff

View File

@@ -24,13 +24,6 @@
#define MMC_QUEUE_SUSPENDED (1 << 0)
/*
* Based on benchmark tests the default num of requests to trigger the write
* packing was determined, to keep the read latency as low as possible and
* manage to keep the high write throughput.
*/
#define DEFAULT_NUM_REQS_TO_START_PACK 17
/*
* Prepare a MMC request. This just filters out odd stuff.
*/
@@ -189,12 +182,9 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
INIT_LIST_HEAD(&mqrq_cur->packed_list);
INIT_LIST_HEAD(&mqrq_prev->packed_list);
mq->mqrq_cur = mqrq_cur;
mq->mqrq_prev = mqrq_prev;
mq->queue->queuedata = mq;
mq->num_wr_reqs_to_start_packing = DEFAULT_NUM_REQS_TO_START_PACK;
blk_queue_prep_rq(mq->queue, mmc_prep_request);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
@@ -395,35 +385,6 @@ void mmc_queue_resume(struct mmc_queue *mq)
}
}
static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
struct mmc_queue_req *mqrq,
struct scatterlist *sg)
{
struct scatterlist *__sg;
unsigned int sg_len = 0;
struct request *req;
enum mmc_packed_cmd cmd;
cmd = mqrq->packed_cmd;
if (cmd == MMC_PACKED_WRITE) {
__sg = sg;
sg_set_buf(__sg, mqrq->packed_cmd_hdr,
sizeof(mqrq->packed_cmd_hdr));
sg_len++;
__sg->page_link &= ~0x02;
}
__sg = sg + sg_len;
list_for_each_entry(req, &mqrq->packed_list, queuelist) {
sg_len += blk_rq_map_sg(mq->queue, req, __sg);
__sg = sg + (sg_len - 1);
(__sg++)->page_link &= ~0x02;
}
sg_mark_end(sg + (sg_len - 1));
return sg_len;
}
/*
* Prepare the sg list(s) to be handed of to the host driver
*/
@@ -434,19 +395,12 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
struct scatterlist *sg;
int i;
if (!mqrq->bounce_buf) {
if (!list_empty(&mqrq->packed_list))
return mmc_queue_packed_map_sg(mq, mqrq, mqrq->sg);
else
return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
}
if (!mqrq->bounce_buf)
return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
BUG_ON(!mqrq->bounce_sg);
if (!list_empty(&mqrq->packed_list))
sg_len = mmc_queue_packed_map_sg(mq, mqrq, mqrq->bounce_sg);
else
sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
mqrq->bounce_sg_len = sg_len;

View File

@@ -12,22 +12,6 @@ struct mmc_blk_request {
struct mmc_data data;
};
enum mmc_blk_status {
MMC_BLK_SUCCESS = 0,
MMC_BLK_PARTIAL,
MMC_BLK_CMD_ERR,
MMC_BLK_RETRY,
MMC_BLK_ABORT,
MMC_BLK_DATA_ERR,
MMC_BLK_ECC_ERR,
MMC_BLK_NOMEDIUM,
};
enum mmc_packed_cmd {
MMC_PACKED_NONE = 0,
MMC_PACKED_WRITE,
};
struct mmc_queue_req {
struct request *req;
struct mmc_blk_request brq;
@@ -36,12 +20,6 @@ struct mmc_queue_req {
struct scatterlist *bounce_sg;
unsigned int bounce_sg_len;
struct mmc_async_req mmc_active;
struct list_head packed_list;
u32 packed_cmd_hdr[128];
unsigned int packed_blocks;
enum mmc_packed_cmd packed_cmd;
int packed_fail_idx;
u8 packed_num;
};
struct mmc_queue {
@@ -55,11 +33,6 @@ struct mmc_queue {
struct mmc_queue_req mqrq[2];
struct mmc_queue_req *mqrq_cur;
struct mmc_queue_req *mqrq_prev;
bool wr_packing_enabled;
int num_of_potential_packed_wr_reqs;
int num_wr_reqs_to_start_packing;
int (*err_check_fn) (struct mmc_card *, struct mmc_async_req *);
void (*packed_test_fn) (struct request_queue *, struct mmc_queue_req *);
};
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
@@ -73,6 +46,4 @@ extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
extern void mmc_queue_bounce_post(struct mmc_queue_req *);
extern void print_mmc_packing_stats(struct mmc_card *card);
#endif

View File

@@ -251,8 +251,6 @@ struct mmc_card *mmc_alloc_card(struct mmc_host *host, struct device_type *type)
card->dev.release = mmc_release_card;
card->dev.type = type;
spin_lock_init(&card->wr_pack_stats.lock);
return card;
}
@@ -355,8 +353,6 @@ void mmc_remove_card(struct mmc_card *card)
device_del(&card->dev);
}
kfree(card->wr_pack_stats.packing_events);
put_device(&card->dev);
}

View File

@@ -318,164 +318,6 @@ static const struct file_operations mmc_dbg_ext_csd_fops = {
.llseek = default_llseek,
};
static int mmc_wr_pack_stats_open(struct inode *inode, struct file *filp)
{
struct mmc_card *card = inode->i_private;
filp->private_data = card;
card->wr_pack_stats.print_in_read = 1;
return 0;
}
#define TEMP_BUF_SIZE 256
static ssize_t mmc_wr_pack_stats_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct mmc_card *card = filp->private_data;
struct mmc_wr_pack_stats *pack_stats;
int i;
int max_num_of_packed_reqs = 0;
char *temp_buf;
if (!card)
return cnt;
if (!card->wr_pack_stats.print_in_read)
return 0;
if (!card->wr_pack_stats.enabled) {
pr_info("%s: write packing statistics are disabled\n",
mmc_hostname(card->host));
goto exit;
}
pack_stats = &card->wr_pack_stats;
if (!pack_stats->packing_events) {
pr_info("%s: NULL packing_events\n", mmc_hostname(card->host));
goto exit;
}
max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
temp_buf = kmalloc(TEMP_BUF_SIZE, GFP_KERNEL);
if (!temp_buf)
goto exit;
spin_lock(&pack_stats->lock);
snprintf(temp_buf, TEMP_BUF_SIZE, "%s: write packing statistics:\n",
mmc_hostname(card->host));
strlcat(ubuf, temp_buf, cnt);
for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) {
if (pack_stats->packing_events[i]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: Packed %d reqs - %d times\n",
mmc_hostname(card->host), i,
pack_stats->packing_events[i]);
strlcat(ubuf, temp_buf, cnt);
}
}
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: stopped packing due to the following reasons:\n",
mmc_hostname(card->host));
strlcat(ubuf, temp_buf, cnt);
if (pack_stats->pack_stop_reason[EXCEEDS_SEGMENTS]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: exceed max num of segments\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[EXCEEDS_SEGMENTS]);
strlcat(ubuf, temp_buf, cnt);
}
if (pack_stats->pack_stop_reason[EXCEEDS_SECTORS]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: exceed max num of sectors\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[EXCEEDS_SECTORS]);
strlcat(ubuf, temp_buf, cnt);
}
if (pack_stats->pack_stop_reason[WRONG_DATA_DIR]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: wrong data direction\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[WRONG_DATA_DIR]);
strlcat(ubuf, temp_buf, cnt);
}
if (pack_stats->pack_stop_reason[FLUSH_OR_DISCARD]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: flush or discard\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[FLUSH_OR_DISCARD]);
strlcat(ubuf, temp_buf, cnt);
}
if (pack_stats->pack_stop_reason[EMPTY_QUEUE]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: empty queue\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[EMPTY_QUEUE]);
strlcat(ubuf, temp_buf, cnt);
}
if (pack_stats->pack_stop_reason[REL_WRITE]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: rel write\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[REL_WRITE]);
strlcat(ubuf, temp_buf, cnt);
}
if (pack_stats->pack_stop_reason[THRESHOLD]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: Threshold\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[THRESHOLD]);
strlcat(ubuf, temp_buf, cnt);
}
spin_unlock(&pack_stats->lock);
kfree(temp_buf);
pr_info("%s", ubuf);
exit:
if (card->wr_pack_stats.print_in_read == 1) {
card->wr_pack_stats.print_in_read = 0;
return strnlen(ubuf, cnt);
}
return 0;
}
static ssize_t mmc_wr_pack_stats_write(struct file *filp,
const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
struct mmc_card *card = filp->private_data;
int value;
if (!card)
return cnt;
sscanf(ubuf, "%d", &value);
if (value) {
mmc_blk_init_packed_statistics(card);
} else {
spin_lock(&card->wr_pack_stats.lock);
card->wr_pack_stats.enabled = false;
spin_unlock(&card->wr_pack_stats.lock);
}
return cnt;
}
static const struct file_operations mmc_dbg_wr_pack_stats_fops = {
.open = mmc_wr_pack_stats_open,
.read = mmc_wr_pack_stats_read,
.write = mmc_wr_pack_stats_write,
};
void mmc_add_card_debugfs(struct mmc_card *card)
{
struct mmc_host *host = card->host;
@@ -508,12 +350,6 @@ void mmc_add_card_debugfs(struct mmc_card *card)
&mmc_dbg_ext_csd_fops))
goto err;
if (mmc_card_mmc(card) && (card->ext_csd.rev >= 6) &&
(card->host->caps2 & MMC_CAP2_PACKED_WR))
if (!debugfs_create_file("wr_pack_stats", S_IRUSR, root, card,
&mmc_dbg_wr_pack_stats_fops))
goto err;
return;
err:

View File

@@ -537,10 +537,6 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
} else {
card->ext_csd.data_tag_unit_size = 0;
}
card->ext_csd.max_packed_writes =
ext_csd[EXT_CSD_MAX_PACKED_WRITES];
card->ext_csd.max_packed_reads =
ext_csd[EXT_CSD_MAX_PACKED_READS];
}
out:
@@ -1278,43 +1274,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
}
if ((host->caps2 & MMC_CAP2_PACKED_CMD) &&
(card->ext_csd.max_packed_writes > 0) &&
(card->ext_csd.max_packed_reads > 0)) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_EXP_EVENTS_CTRL,
EXT_CSD_PACKED_EVENT_EN,
card->ext_csd.generic_cmd6_time);
if (err && err != -EBADMSG)
goto free_card;
if (err) {
pr_warning("%s: Enabling packed event failed\n",
mmc_hostname(card->host));
card->ext_csd.packed_event_en = 0;
err = 0;
} else {
card->ext_csd.packed_event_en = 1;
}
}
if (!oldcard) {
if ((host->caps2 & MMC_CAP2_PACKED_CMD) &&
(card->ext_csd.max_packed_writes > 0)) {
/*
* We would like to keep the statistics in an index
* that equals the num of packed requests
* (1 to max_packed_writes)
*/
card->wr_pack_stats.packing_events = kzalloc(
(card->ext_csd.max_packed_writes + 1) *
sizeof(*card->wr_pack_stats.packing_events),
GFP_KERNEL);
if (!card->wr_pack_stats.packing_events)
goto free_card;
}
}
if (!oldcard)
host->card = card;

View File

@@ -335,7 +335,6 @@ int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
ext_csd, 512);
}
EXPORT_SYMBOL_GPL(mmc_send_ext_csd);
int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
{

View File

@@ -5755,9 +5755,6 @@ msmsdcc_probe(struct platform_device *pdev)
mmc->caps |= (MMC_CAP_SET_XPC_330 | MMC_CAP_SET_XPC_300 |
MMC_CAP_SET_XPC_180);
/* packed write */
mmc->caps2 |= plat->packed_write;
mmc->caps2 |= (MMC_CAP2_BOOTPART_NOACC | MMC_CAP2_DETECT_ON_ERR);
mmc->caps2 |= MMC_CAP2_SANITIZE;
mmc->caps2 |= MMC_CAP2_INIT_BKOPS;

View File

@@ -53,9 +53,6 @@ struct mmc_ext_csd {
u8 part_config;
u8 cache_ctrl;
u8 rst_n_function;
u8 max_packed_writes;
u8 max_packed_reads;
u8 packed_event_en;
unsigned int part_time; /* Units: ms */
unsigned int sa_timeout; /* Units: 100ns */
unsigned int generic_cmd6_time; /* Units: 10ms */
@@ -209,25 +206,6 @@ struct mmc_part {
#define MMC_BLK_DATA_AREA_GP (1<<2)
};
enum mmc_packed_stop_reasons {
EXCEEDS_SEGMENTS = 0,
EXCEEDS_SECTORS,
WRONG_DATA_DIR,
FLUSH_OR_DISCARD,
EMPTY_QUEUE,
REL_WRITE,
THRESHOLD,
MAX_REASONS,
};
struct mmc_wr_pack_stats {
u32 *packing_events;
u32 pack_stop_reason[MAX_REASONS];
spinlock_t lock;
bool enabled;
bool print_in_read;
};
/*
* MMC device
*/
@@ -302,7 +280,6 @@ struct mmc_card {
struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
unsigned int nr_parts;
struct mmc_wr_pack_stats wr_pack_stats; /* packed commands stats*/
};
/*
@@ -527,8 +504,4 @@ extern void mmc_unregister_driver(struct mmc_driver *);
extern void mmc_fixup_device(struct mmc_card *card,
const struct mmc_fixup *table);
extern struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(
struct mmc_card *card);
extern void mmc_blk_init_packed_statistics(struct mmc_card *card);
#endif /* LINUX_MMC_CARD_H */

View File

@@ -18,9 +18,6 @@ struct mmc_request;
struct mmc_command {
u32 opcode;
u32 arg;
#define MMC_CMD23_ARG_REL_WR (1 << 31)
#define MMC_CMD23_ARG_PACKED ((0 << 31) | (1 << 30))
#define MMC_CMD23_ARG_TAG_REQ (1 << 29)
u32 resp[4];
unsigned int flags; /* expected response type */
#define MMC_RSP_PRESENT (1 << 0)
@@ -151,7 +148,6 @@ extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
extern void mmc_start_bkops(struct mmc_card *card, bool from_exception);
extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool);
extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
extern int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd);
#define MMC_ERASE_ARG 0x00000000
#define MMC_SECURE_ERASE_ARG 0x80000000

View File

@@ -239,11 +239,6 @@ struct mmc_host {
#define MMC_CAP2_BROKEN_VOLTAGE (1 << 7) /* Use the broken voltage */
#define MMC_CAP2_DETECT_ON_ERR (1 << 8) /* On I/O err check card removal */
#define MMC_CAP2_HC_ERASE_SZ (1 << 9) /* High-capacity erase size */
#define MMC_CAP2_PACKED_RD (1 << 10) /* Allow packed read */
#define MMC_CAP2_PACKED_WR (1 << 11) /* Allow packed write */
#define MMC_CAP2_PACKED_CMD (MMC_CAP2_PACKED_RD | \
MMC_CAP2_PACKED_WR) /* Allow packed commands */
#define MMC_CAP2_PACKED_WR_CONTROL (1 << 12) /* Allow write packing control */
#define MMC_CAP2_SANITIZE (1 << 13) /* Support Sanitize */
#define MMC_CAP2_INIT_BKOPS (1 << 15) /* Need to set BKOPS_EN */
#define MMC_CAP2_POWER_OFF_VCCQ_DURING_SUSPEND (1 << 16)

View File

@@ -141,7 +141,6 @@ static inline bool mmc_op_multi(u32 opcode)
#define R1_SWITCH_ERROR (1 << 7) /* sx, c */
#define R1_EXCEPTION_EVENT (1 << 6) /* sx, a */
#define R1_APP_CMD (1 << 5) /* sr, c */
#define R1_EXP_EVENT (1 << 6) /* sr, a */
#define R1_STATE_IDLE 0
#define R1_STATE_READY 1
@@ -277,8 +276,6 @@ struct _mmc_csd {
#define EXT_CSD_FLUSH_CACHE 32 /* W */
#define EXT_CSD_CACHE_CTRL 33 /* R/W */
#define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */
#define EXT_CSD_PACKED_FAILURE_INDEX 35 /* RO */
#define EXT_CSD_PACKED_CMD_STATUS 36 /* RO */
#define EXT_CSD_EXP_EVENTS_STATUS 54 /* RO, 2 bytes */
#define EXT_CSD_EXP_EVENTS_CTRL 56 /* R/W, 2 bytes */
#define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */
@@ -328,8 +325,6 @@ struct _mmc_csd {
#define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */
#define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */
#define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */
#define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */
#define EXT_CSD_MAX_PACKED_READS 501 /* RO */
#define EXT_CSD_BKOPS_SUPPORT 502 /* RO */
#define EXT_CSD_HPI_FEATURES 503 /* RO */
@@ -391,18 +386,12 @@ struct _mmc_csd {
#define EXT_CSD_PWR_CL_8BIT_SHIFT 4
#define EXT_CSD_PWR_CL_4BIT_SHIFT 0
#define EXT_CSD_PACKED_EVENT_EN (1 << 3)
#define EXT_CSD_PACKED_GENERIC_ERROR (1 << 0)
#define EXT_CSD_PACKED_INDEXED_ERROR (1 << 1)
/*
* EXCEPTION_EVENT_STATUS field
*/
#define EXT_CSD_URGENT_BKOPS BIT(0)
#define EXT_CSD_DYNCAP_NEEDED BIT(1)
#define EXT_CSD_SYSPOOL_EXHAUSTED BIT(2)
#define EXT_CSD_PACKED_FAILURE BIT(3)
/*
* BKOPS status level