tty: n_smux: Add retry queue size flow control

If the receive retry queue size gets too large, then automatically
enable flow control to prevent having to drop buffers.

Change-Id: If27599015b362ce013e177ee350e026933390d72
Signed-off-by: Eric Holmberg <eholmber@codeaurora.org>
This commit is contained in:
Eric Holmberg
2012-06-26 13:29:14 -06:00
committed by Stephen Boyd
parent 64431703a0
commit 0b5be95d8c
4 changed files with 284 additions and 31 deletions

View File

@@ -33,8 +33,6 @@
#define SMUX_NOTIFY_FIFO_SIZE 128 #define SMUX_NOTIFY_FIFO_SIZE 128
#define SMUX_TX_QUEUE_SIZE 256 #define SMUX_TX_QUEUE_SIZE 256
#define SMUX_WM_LOW 2
#define SMUX_WM_HIGH 4
#define SMUX_PKT_LOG_SIZE 80 #define SMUX_PKT_LOG_SIZE 80
/* Maximum size we can accept in a single RX buffer */ /* Maximum size we can accept in a single RX buffer */
@@ -172,12 +170,15 @@ struct smux_lch_t {
unsigned local_state; unsigned local_state;
unsigned local_mode; unsigned local_mode;
uint8_t local_tiocm; uint8_t local_tiocm;
unsigned options;
unsigned remote_state; unsigned remote_state;
unsigned remote_mode; unsigned remote_mode;
uint8_t remote_tiocm; uint8_t remote_tiocm;
int tx_flow_control; int tx_flow_control;
int rx_flow_control_auto;
int rx_flow_control_client;
/* client callbacks and private data */ /* client callbacks and private data */
void *priv; void *priv;
@@ -331,6 +332,7 @@ static int ssr_notifier_cb(struct notifier_block *this,
unsigned long code, unsigned long code,
void *data); void *data);
static void smux_uart_power_on_atomic(void); static void smux_uart_power_on_atomic(void);
static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
/** /**
* Convert TTY Error Flags to string for logging purposes. * Convert TTY Error Flags to string for logging purposes.
@@ -403,10 +405,13 @@ static int lch_init(void)
ch->local_state = SMUX_LCH_LOCAL_CLOSED; ch->local_state = SMUX_LCH_LOCAL_CLOSED;
ch->local_mode = SMUX_LCH_MODE_NORMAL; ch->local_mode = SMUX_LCH_MODE_NORMAL;
ch->local_tiocm = 0x0; ch->local_tiocm = 0x0;
ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
ch->remote_state = SMUX_LCH_REMOTE_CLOSED; ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
ch->remote_mode = SMUX_LCH_MODE_NORMAL; ch->remote_mode = SMUX_LCH_MODE_NORMAL;
ch->remote_tiocm = 0x0; ch->remote_tiocm = 0x0;
ch->tx_flow_control = 0; ch->tx_flow_control = 0;
ch->rx_flow_control_auto = 0;
ch->rx_flow_control_client = 0;
ch->priv = 0; ch->priv = 0;
ch->notify = 0; ch->notify = 0;
ch->get_rx_buffer = 0; ch->get_rx_buffer = 0;
@@ -487,6 +492,8 @@ static void smux_lch_purge(void)
ch->remote_state = SMUX_LCH_REMOTE_CLOSED; ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
ch->remote_mode = SMUX_LCH_MODE_NORMAL; ch->remote_mode = SMUX_LCH_MODE_NORMAL;
ch->tx_flow_control = 0; ch->tx_flow_control = 0;
ch->rx_flow_control_auto = 0;
ch->rx_flow_control_client = 0;
/* Purge RX retry queue */ /* Purge RX retry queue */
if (ch->rx_retry_queue_cnt) if (ch->rx_retry_queue_cnt)
@@ -1352,6 +1359,7 @@ static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
uint8_t lcid; uint8_t lcid;
int ret = 0; int ret = 0;
int do_retry = 0; int do_retry = 0;
int tx_ready = 0;
int tmp; int tmp;
int rx_len; int rx_len;
struct smux_lch_t *ch; struct smux_lch_t *ch;
@@ -1395,8 +1403,20 @@ static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
if (!list_empty(&ch->rx_retry_queue)) { if (!list_empty(&ch->rx_retry_queue)) {
do_retry = 1; do_retry = 1;
if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
!ch->rx_flow_control_auto &&
((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
/* need to flow control RX */
ch->rx_flow_control_auto = 1;
tx_ready |= smux_rx_flow_control_updated(ch);
schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
NULL);
}
if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) { if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
/* retry queue full */ /* retry queue full */
pr_err("%s: ch %d RX retry queue full\n",
__func__, lcid);
schedule_notify(lcid, SMUX_READ_FAIL, NULL); schedule_notify(lcid, SMUX_READ_FAIL, NULL);
ret = -ENOMEM; ret = -ENOMEM;
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags); spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
@@ -1420,7 +1440,7 @@ static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
} }
ack_pkt->hdr.pad_len = pkt->hdr.pad_len; ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
smux_tx_queue(ack_pkt, ch, 0); smux_tx_queue(ack_pkt, ch, 0);
list_channel(ch); tx_ready = 1;
} else { } else {
pr_err("%s: Remote loopack allocation failure\n", pr_err("%s: Remote loopack allocation failure\n",
__func__); __func__);
@@ -1446,6 +1466,8 @@ static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
/* buffer allocation failed - add to retry queue */ /* buffer allocation failed - add to retry queue */
do_retry = 1; do_retry = 1;
} else if (tmp < 0) { } else if (tmp < 0) {
pr_err("%s: ch %d Client RX buffer alloc failed %d\n",
__func__, lcid, tmp);
schedule_notify(lcid, SMUX_READ_FAIL, NULL); schedule_notify(lcid, SMUX_READ_FAIL, NULL);
ret = -ENOMEM; ret = -ENOMEM;
} }
@@ -1492,6 +1514,8 @@ static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags); spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
} }
if (tx_ready)
list_channel(ch);
out: out:
return ret; return ret;
} }
@@ -2304,18 +2328,32 @@ static void smux_inactivity_worker(struct work_struct *work)
/** /**
* Remove RX retry packet from channel and free it. * Remove RX retry packet from channel and free it.
* *
* Must be called with state_lock_lhb1 locked.
*
* @ch Channel for retry packet * @ch Channel for retry packet
* @retry Retry packet to remove * @retry Retry packet to remove
*
* @returns 1 if flow control updated; 0 otherwise
*
* Must be called with state_lock_lhb1 locked.
*/ */
void smux_remove_rx_retry(struct smux_lch_t *ch, int smux_remove_rx_retry(struct smux_lch_t *ch,
struct smux_rx_pkt_retry *retry) struct smux_rx_pkt_retry *retry)
{ {
int tx_ready = 0;
list_del(&retry->rx_retry_list); list_del(&retry->rx_retry_list);
--ch->rx_retry_queue_cnt; --ch->rx_retry_queue_cnt;
smux_free_pkt(retry->pkt); smux_free_pkt(retry->pkt);
kfree(retry); kfree(retry);
if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
(ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
ch->rx_flow_control_auto) {
ch->rx_flow_control_auto = 0;
smux_rx_flow_control_updated(ch);
schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
tx_ready = 1;
}
return tx_ready;
} }
/** /**
@@ -2386,6 +2424,8 @@ static void smux_rx_retry_worker(struct work_struct *work)
union notifier_metadata metadata; union notifier_metadata metadata;
int tmp; int tmp;
unsigned long flags; unsigned long flags;
int immediate_retry = 0;
int tx_ready = 0;
ch = container_of(work, struct smux_lch_t, rx_retry_work.work); ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
@@ -2397,7 +2437,7 @@ static void smux_rx_retry_worker(struct work_struct *work)
retry = list_first_entry(&ch->rx_retry_queue, retry = list_first_entry(&ch->rx_retry_queue,
struct smux_rx_pkt_retry, struct smux_rx_pkt_retry,
rx_retry_list); rx_retry_list);
smux_remove_rx_retry(ch, retry); (void)smux_remove_rx_retry(ch, retry);
} }
} }
@@ -2412,7 +2452,8 @@ static void smux_rx_retry_worker(struct work_struct *work)
rx_retry_list); rx_retry_list);
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags); spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
SMUX_DBG("%s: retrying rx pkt %p\n", __func__, retry); SMUX_DBG("%s: ch %d retrying rx pkt %p\n",
__func__, ch->lcid, retry);
metadata.read.pkt_priv = 0; metadata.read.pkt_priv = 0;
metadata.read.buffer = 0; metadata.read.buffer = 0;
tmp = ch->get_rx_buffer(ch->priv, tmp = ch->get_rx_buffer(ch->priv,
@@ -2421,33 +2462,44 @@ static void smux_rx_retry_worker(struct work_struct *work)
retry->pkt->hdr.payload_len); retry->pkt->hdr.payload_len);
if (tmp == 0 && metadata.read.buffer) { if (tmp == 0 && metadata.read.buffer) {
/* have valid RX buffer */ /* have valid RX buffer */
memcpy(metadata.read.buffer, retry->pkt->payload, memcpy(metadata.read.buffer, retry->pkt->payload,
retry->pkt->hdr.payload_len); retry->pkt->hdr.payload_len);
metadata.read.len = retry->pkt->hdr.payload_len; metadata.read.len = retry->pkt->hdr.payload_len;
spin_lock_irqsave(&ch->state_lock_lhb1, flags); spin_lock_irqsave(&ch->state_lock_lhb1, flags);
smux_remove_rx_retry(ch, retry); tx_ready = smux_remove_rx_retry(ch, retry);
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags); spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata); schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
if (tx_ready)
list_channel(ch);
immediate_retry = 1;
} else if (tmp == -EAGAIN || } else if (tmp == -EAGAIN ||
(tmp == 0 && !metadata.read.buffer)) { (tmp == 0 && !metadata.read.buffer)) {
/* retry again */ /* retry again */
retry->timeout_in_ms <<= 1; retry->timeout_in_ms <<= 1;
if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) { if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
/* timed out */ /* timed out */
pr_err("%s: ch %d RX retry client timeout\n",
__func__, ch->lcid);
spin_lock_irqsave(&ch->state_lock_lhb1, flags); spin_lock_irqsave(&ch->state_lock_lhb1, flags);
smux_remove_rx_retry(ch, retry); tx_ready = smux_remove_rx_retry(ch, retry);
schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags); spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
if (tx_ready)
list_channel(ch);
} }
} else { } else {
/* client error - drop packet */ /* client error - drop packet */
pr_err("%s: ch %d RX retry client failed (%d)\n",
__func__, ch->lcid, tmp);
spin_lock_irqsave(&ch->state_lock_lhb1, flags); spin_lock_irqsave(&ch->state_lock_lhb1, flags);
smux_remove_rx_retry(ch, retry); tx_ready = smux_remove_rx_retry(ch, retry);
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags); spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL); schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
if (tx_ready)
list_channel(ch);
} }
/* schedule next retry */ /* schedule next retry */
@@ -2456,8 +2508,12 @@ static void smux_rx_retry_worker(struct work_struct *work)
retry = list_first_entry(&ch->rx_retry_queue, retry = list_first_entry(&ch->rx_retry_queue,
struct smux_rx_pkt_retry, struct smux_rx_pkt_retry,
rx_retry_list); rx_retry_list);
queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
msecs_to_jiffies(retry->timeout_in_ms)); if (immediate_retry)
queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
else
queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
msecs_to_jiffies(retry->timeout_in_ms));
} }
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags); spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
} }
@@ -2563,7 +2619,7 @@ static void smux_tx_worker(struct work_struct *work)
if (smux.power_state != SMUX_PWR_ON) { if (smux.power_state != SMUX_PWR_ON) {
/* channel not ready to transmit */ /* channel not ready to transmit */
SMUX_DBG("%s: can not tx with power state %d\n", SMUX_DBG("%s: waiting for link up (state %d)\n",
__func__, __func__,
smux.power_state); smux.power_state);
spin_unlock_irqrestore(&smux.tx_lock_lha2, flags); spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
@@ -2606,7 +2662,7 @@ static void smux_tx_worker(struct work_struct *work)
--ch->tx_pending_data_cnt; --ch->tx_pending_data_cnt;
if (ch->notify_lwm && if (ch->notify_lwm &&
ch->tx_pending_data_cnt ch->tx_pending_data_cnt
<= SMUX_WM_LOW) { <= SMUX_TX_WM_LOW) {
ch->notify_lwm = 0; ch->notify_lwm = 0;
low_wm_notif = 1; low_wm_notif = 1;
} }
@@ -2633,6 +2689,34 @@ static void smux_tx_worker(struct work_struct *work)
} }
} }
/**
* Update the RX flow control (sent in the TIOCM Status command).
*
* @ch Channel for update
*
* @returns 1 for updated, 0 for not updated
*
* Must be called with ch->state_lock_lhb1 locked.
*/
static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
{
int updated = 0;
int prev_state;
prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
else
ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
smux_send_status_cmd(ch);
updated = 1;
}
return updated;
}
/**********************************************************************/ /**********************************************************************/
/* Kernel API */ /* Kernel API */
@@ -2675,17 +2759,30 @@ int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK) if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
ch->local_mode = SMUX_LCH_MODE_NORMAL; ch->local_mode = SMUX_LCH_MODE_NORMAL;
/* Flow control */ /* RX Flow control */
if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) { if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL; ch->rx_flow_control_client = 1;
ret = smux_send_status_cmd(ch); tx_ready |= smux_rx_flow_control_updated(ch);
tx_ready = 1;
} }
if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) { if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL; ch->rx_flow_control_client = 0;
ret = smux_send_status_cmd(ch); tx_ready |= smux_rx_flow_control_updated(ch);
tx_ready = 1; }
/* Auto RX Flow Control */
if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
SMUX_DBG("%s: auto rx flow control option enabled\n",
__func__);
ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
}
if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
SMUX_DBG("%s: auto rx flow control option disabled\n",
__func__);
ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
ch->rx_flow_control_auto = 0;
tx_ready |= smux_rx_flow_control_updated(ch);
} }
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags); spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
@@ -2909,16 +3006,16 @@ int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
/* verify high watermark */ /* verify high watermark */
SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt); SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH) { if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
pr_err("%s: ch %d high watermark %d exceeded %d\n", pr_err("%s: ch %d high watermark %d exceeded %d\n",
__func__, lcid, SMUX_WM_HIGH, __func__, lcid, SMUX_TX_WM_HIGH,
ch->tx_pending_data_cnt); ch->tx_pending_data_cnt);
ret = -EAGAIN; ret = -EAGAIN;
goto out_inner; goto out_inner;
} }
/* queue packet for transmit */ /* queue packet for transmit */
if (++ch->tx_pending_data_cnt == SMUX_WM_HIGH) { if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
ch->notify_lwm = 1; ch->notify_lwm = 1;
pr_err("%s: high watermark hit\n", __func__); pr_err("%s: high watermark hit\n", __func__);
schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL); schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
@@ -2965,7 +3062,7 @@ int msm_smux_is_ch_full(uint8_t lcid)
ch = &smux_lch[lcid]; ch = &smux_lch[lcid];
spin_lock_irqsave(&ch->tx_lock_lhb2, flags); spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH) if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
is_full = 1; is_full = 1;
spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags); spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
@@ -2993,7 +3090,7 @@ int msm_smux_is_ch_low(uint8_t lcid)
ch = &smux_lch[lcid]; ch = &smux_lch[lcid];
spin_lock_irqsave(&ch->tx_lock_lhb2, flags); spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
if (ch->tx_pending_data_cnt <= SMUX_WM_LOW) if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
is_low = 1; is_low = 1;
spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags); spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);

View File

@@ -32,6 +32,10 @@
/* Maximum number of packets in retry queue */ /* Maximum number of packets in retry queue */
#define SMUX_RX_RETRY_MAX_PKTS 32 #define SMUX_RX_RETRY_MAX_PKTS 32
#define SMUX_RX_WM_HIGH 16
#define SMUX_RX_WM_LOW 4
#define SMUX_TX_WM_LOW 2
#define SMUX_TX_WM_HIGH 4
struct tty_struct; struct tty_struct;

View File

@@ -185,6 +185,8 @@ struct smux_mock_callback {
int event_disconnected_ssr; int event_disconnected_ssr;
int event_low_wm; int event_low_wm;
int event_high_wm; int event_high_wm;
int event_rx_retry_high_wm;
int event_rx_retry_low_wm;
/* TIOCM changes */ /* TIOCM changes */
int event_tiocm; int event_tiocm;
@@ -235,6 +237,8 @@ void mock_cb_data_reset(struct smux_mock_callback *cb)
cb->event_disconnected_ssr = 0; cb->event_disconnected_ssr = 0;
cb->event_low_wm = 0; cb->event_low_wm = 0;
cb->event_high_wm = 0; cb->event_high_wm = 0;
cb->event_rx_retry_high_wm = 0;
cb->event_rx_retry_low_wm = 0;
cb->event_tiocm = 0; cb->event_tiocm = 0;
cb->tiocm_meta.tiocm_old = 0; cb->tiocm_meta.tiocm_old = 0;
cb->tiocm_meta.tiocm_new = 0; cb->tiocm_meta.tiocm_new = 0;
@@ -295,6 +299,8 @@ static int mock_cb_data_print(const struct smux_mock_callback *cb,
"\tevent_disconnected_ssr=%d\n" "\tevent_disconnected_ssr=%d\n"
"\tevent_low_wm=%d\n" "\tevent_low_wm=%d\n"
"\tevent_high_wm=%d\n" "\tevent_high_wm=%d\n"
"\tevent_rx_retry_high_wm=%d\n"
"\tevent_rx_retry_low_wm=%d\n"
"\tevent_tiocm=%d\n" "\tevent_tiocm=%d\n"
"\tevent_read_done=%d\n" "\tevent_read_done=%d\n"
"\tevent_read_failed=%d\n" "\tevent_read_failed=%d\n"
@@ -311,6 +317,8 @@ static int mock_cb_data_print(const struct smux_mock_callback *cb,
cb->event_disconnected_ssr, cb->event_disconnected_ssr,
cb->event_low_wm, cb->event_low_wm,
cb->event_high_wm, cb->event_high_wm,
cb->event_rx_retry_high_wm,
cb->event_rx_retry_low_wm,
cb->event_tiocm, cb->event_tiocm,
cb->event_read_done, cb->event_read_done,
cb->event_read_failed, cb->event_read_failed,
@@ -429,6 +437,19 @@ void smux_mock_cb(void *priv, int event, const void *metadata)
spin_unlock_irqrestore(&cb_data_ptr->lock, flags); spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
break; break;
case SMUX_RX_RETRY_HIGH_WM_HIT:
spin_lock_irqsave(&cb_data_ptr->lock, flags);
++cb_data_ptr->event_rx_retry_high_wm;
spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
break;
case SMUX_RX_RETRY_LOW_WM_HIT:
spin_lock_irqsave(&cb_data_ptr->lock, flags);
++cb_data_ptr->event_rx_retry_low_wm;
spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
break;
case SMUX_TIOCM_UPDATE: case SMUX_TIOCM_UPDATE:
spin_lock_irqsave(&cb_data_ptr->lock, flags); spin_lock_irqsave(&cb_data_ptr->lock, flags);
++cb_data_ptr->event_tiocm; ++cb_data_ptr->event_tiocm;
@@ -1328,7 +1349,7 @@ static int smux_ut_local_get_rx_buff_retry(char *buf, int max)
/* open port for loopback */ /* open port for loopback */
ret = msm_smux_set_ch_option(SMUX_TEST_LCID, ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
SMUX_CH_OPTION_LOCAL_LOOPBACK, SMUX_CH_OPTION_LOCAL_LOOPBACK,
0); SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP);
UT_ASSERT_INT(ret, ==, 0); UT_ASSERT_INT(ret, ==, 0);
ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, ret = msm_smux_open(SMUX_TEST_LCID, &cb_data,
@@ -1581,6 +1602,132 @@ static int smux_ut_local_get_rx_buff_retry(char *buf, int max)
return i; return i;
} }
/**
* Verify get_rx_buffer callback retry for auto-rx flow control.
*
* @buf Buffer for status message
* @max Size of buffer
*
* @returns Number of bytes written to @buf
*/
static int smux_ut_local_get_rx_buff_retry_auto(char *buf, int max)
{
static struct smux_mock_callback cb_data;
static int cb_initialized;
int i = 0;
int failed = 0;
int ret;
int try;
int try_rx_retry_wm;
i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
pr_err("%s", buf);
if (!cb_initialized)
mock_cb_data_init(&cb_data);
mock_cb_data_reset(&cb_data);
smux_byte_loopback = SMUX_TEST_LCID;
while (!failed) {
/* open port for loopback */
ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
SMUX_CH_OPTION_LOCAL_LOOPBACK
| SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP,
0);
UT_ASSERT_INT(ret, ==, 0);
ret = msm_smux_open(SMUX_TEST_LCID, &cb_data,
smux_mock_cb, get_rx_buffer_mock);
UT_ASSERT_INT(ret, ==, 0);
UT_ASSERT_INT(
(int)wait_for_completion_timeout(
&cb_data.cb_completion, HZ), >, 0);
UT_ASSERT_INT(cb_data.cb_count, ==, 1);
UT_ASSERT_INT(cb_data.event_connected, ==, 1);
mock_cb_data_reset(&cb_data);
/* Test high rx-retry watermark */
get_rx_buffer_mock_fail = 1;
try_rx_retry_wm = 0;
for (try = 0; try < SMUX_RX_RETRY_MAX_PKTS; ++try) {
pr_err("%s: try %d\n", __func__, try);
ret = msm_smux_write(SMUX_TEST_LCID, (void *)1,
test_array, sizeof(test_array));
UT_ASSERT_INT(ret, ==, 0);
if (failed)
break;
if (!try_rx_retry_wm &&
cb_data.event_rx_retry_high_wm) {
/* RX high watermark hit */
try_rx_retry_wm = try + 1;
break;
}
while (cb_data.event_write_done <= try) {
UT_ASSERT_INT(
(int)wait_for_completion_timeout(
&cb_data.cb_completion, HZ),
>, 0);
INIT_COMPLETION(cb_data.cb_completion);
}
if (failed)
break;
}
if (failed)
break;
/* RX retry high watermark should have been set */
UT_ASSERT_INT(cb_data.event_rx_retry_high_wm, ==, 1);
UT_ASSERT_INT(try_rx_retry_wm, ==, SMUX_RX_WM_HIGH);
/*
* Disabled RX buffer allocation failure and wait for
* the SMUX_RX_WM_HIGH count successful packets.
*/
get_rx_buffer_mock_fail = 0;
while (cb_data.event_read_done < SMUX_RX_WM_HIGH) {
UT_ASSERT_INT(
(int)wait_for_completion_timeout(
&cb_data.cb_completion, 2*HZ),
>, 0);
INIT_COMPLETION(cb_data.cb_completion);
}
if (failed)
break;
UT_ASSERT_INT(0, ==, cb_data.event_read_failed);
UT_ASSERT_INT(SMUX_RX_WM_HIGH, ==,
cb_data.event_read_done);
UT_ASSERT_INT(cb_data.event_rx_retry_low_wm, ==, 1);
mock_cb_data_reset(&cb_data);
/* close port */
ret = msm_smux_close(SMUX_TEST_LCID);
UT_ASSERT_INT(ret, ==, 0);
UT_ASSERT_INT(
(int)wait_for_completion_timeout(
&cb_data.cb_completion, HZ),
>, 0);
UT_ASSERT_INT(cb_data.cb_count, ==, 1);
UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
break;
}
if (!failed) {
i += scnprintf(buf + i, max - i, "\tOK\n");
} else {
pr_err("%s: Failed\n", __func__);
i += scnprintf(buf + i, max - i, "\tFailed\n");
i += mock_cb_data_print(&cb_data, buf + i, max - i);
msm_smux_close(SMUX_TEST_LCID);
}
smux_byte_loopback = 0;
mock_cb_data_reset(&cb_data);
return i;
}
static char debug_buffer[DEBUG_BUFMAX]; static char debug_buffer[DEBUG_BUFMAX];
static ssize_t debug_read(struct file *file, char __user *buf, static ssize_t debug_read(struct file *file, char __user *buf,
@@ -1644,6 +1791,8 @@ static int __init smux_debugfs_init(void)
smux_ut_local_smuxld_receive_buf); smux_ut_local_smuxld_receive_buf);
debug_create("ut_local_get_rx_buff_retry", 0444, dent, debug_create("ut_local_get_rx_buff_retry", 0444, dent,
smux_ut_local_get_rx_buff_retry); smux_ut_local_get_rx_buff_retry);
debug_create("ut_local_get_rx_buff_retry_auto", 0444, dent,
smux_ut_local_get_rx_buff_retry_auto);
return 0; return 0;
} }

View File

@@ -77,6 +77,8 @@ enum {
SMUX_TIOCM_UPDATE, SMUX_TIOCM_UPDATE,
SMUX_LOW_WM_HIT, /* @metadata is NULL */ SMUX_LOW_WM_HIT, /* @metadata is NULL */
SMUX_HIGH_WM_HIT, /* @metadata is NULL */ SMUX_HIGH_WM_HIT, /* @metadata is NULL */
SMUX_RX_RETRY_HIGH_WM_HIT, /* @metadata is NULL */
SMUX_RX_RETRY_LOW_WM_HIT, /* @metadata is NULL */
}; };
/** /**
@@ -86,6 +88,7 @@ enum {
SMUX_CH_OPTION_LOCAL_LOOPBACK = 1 << 0, SMUX_CH_OPTION_LOCAL_LOOPBACK = 1 << 0,
SMUX_CH_OPTION_REMOTE_LOOPBACK = 1 << 1, SMUX_CH_OPTION_REMOTE_LOOPBACK = 1 << 1,
SMUX_CH_OPTION_REMOTE_TX_STOP = 1 << 2, SMUX_CH_OPTION_REMOTE_TX_STOP = 1 << 2,
SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP = 1 << 3,
}; };
/** /**