tty: n_smux: Add retry queue size flow control
If the receive retry queue size gets too large, then automatically
enable flow control to prevent having to drop buffers.
Change-Id: If27599015b362ce013e177ee350e026933390d72
Signed-off-by: Eric Holmberg <eholmber@codeaurora.org>
diff --git a/drivers/tty/n_smux.c b/drivers/tty/n_smux.c
index bcfa4f6..68e3669 100644
--- a/drivers/tty/n_smux.c
+++ b/drivers/tty/n_smux.c
@@ -33,8 +33,6 @@
#define SMUX_NOTIFY_FIFO_SIZE 128
#define SMUX_TX_QUEUE_SIZE 256
-#define SMUX_WM_LOW 2
-#define SMUX_WM_HIGH 4
#define SMUX_PKT_LOG_SIZE 80
/* Maximum size we can accept in a single RX buffer */
@@ -172,12 +170,15 @@
unsigned local_state;
unsigned local_mode;
uint8_t local_tiocm;
+ unsigned options;
unsigned remote_state;
unsigned remote_mode;
uint8_t remote_tiocm;
int tx_flow_control;
+ int rx_flow_control_auto;
+ int rx_flow_control_client;
/* client callbacks and private data */
void *priv;
@@ -331,6 +332,7 @@
unsigned long code,
void *data);
static void smux_uart_power_on_atomic(void);
+static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
/**
* Convert TTY Error Flags to string for logging purposes.
@@ -403,10 +405,13 @@
ch->local_state = SMUX_LCH_LOCAL_CLOSED;
ch->local_mode = SMUX_LCH_MODE_NORMAL;
ch->local_tiocm = 0x0;
+ ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
ch->remote_mode = SMUX_LCH_MODE_NORMAL;
ch->remote_tiocm = 0x0;
ch->tx_flow_control = 0;
+ ch->rx_flow_control_auto = 0;
+ ch->rx_flow_control_client = 0;
ch->priv = 0;
ch->notify = 0;
ch->get_rx_buffer = 0;
@@ -487,6 +492,8 @@
ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
ch->remote_mode = SMUX_LCH_MODE_NORMAL;
ch->tx_flow_control = 0;
+ ch->rx_flow_control_auto = 0;
+ ch->rx_flow_control_client = 0;
/* Purge RX retry queue */
if (ch->rx_retry_queue_cnt)
@@ -1352,6 +1359,7 @@
uint8_t lcid;
int ret = 0;
int do_retry = 0;
+ int tx_ready = 0;
int tmp;
int rx_len;
struct smux_lch_t *ch;
@@ -1395,8 +1403,20 @@
if (!list_empty(&ch->rx_retry_queue)) {
do_retry = 1;
+
+ if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
+ !ch->rx_flow_control_auto &&
+ ((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
+ /* need to flow control RX */
+ ch->rx_flow_control_auto = 1;
+ tx_ready |= smux_rx_flow_control_updated(ch);
+ schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
+ NULL);
+ }
if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
/* retry queue full */
+ pr_err("%s: ch %d RX retry queue full\n",
+ __func__, lcid);
schedule_notify(lcid, SMUX_READ_FAIL, NULL);
ret = -ENOMEM;
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
@@ -1420,7 +1440,7 @@
}
ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
smux_tx_queue(ack_pkt, ch, 0);
- list_channel(ch);
+ tx_ready = 1;
} else {
pr_err("%s: Remote loopack allocation failure\n",
__func__);
@@ -1446,6 +1466,8 @@
/* buffer allocation failed - add to retry queue */
do_retry = 1;
} else if (tmp < 0) {
+ pr_err("%s: ch %d Client RX buffer alloc failed %d\n",
+ __func__, lcid, tmp);
schedule_notify(lcid, SMUX_READ_FAIL, NULL);
ret = -ENOMEM;
}
@@ -1492,6 +1514,8 @@
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
}
+ if (tx_ready)
+ list_channel(ch);
out:
return ret;
}
@@ -2304,18 +2328,32 @@
/**
* Remove RX retry packet from channel and free it.
*
- * Must be called with state_lock_lhb1 locked.
- *
* @ch Channel for retry packet
* @retry Retry packet to remove
+ *
+ * @returns 1 if flow control updated; 0 otherwise
+ *
+ * Must be called with state_lock_lhb1 locked.
*/
-void smux_remove_rx_retry(struct smux_lch_t *ch,
+int smux_remove_rx_retry(struct smux_lch_t *ch,
struct smux_rx_pkt_retry *retry)
{
+ int tx_ready = 0;
+
list_del(&retry->rx_retry_list);
--ch->rx_retry_queue_cnt;
smux_free_pkt(retry->pkt);
kfree(retry);
+
+ if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
+ (ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
+ ch->rx_flow_control_auto) {
+ ch->rx_flow_control_auto = 0;
+ smux_rx_flow_control_updated(ch);
+ schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
+ tx_ready = 1;
+ }
+ return tx_ready;
}
/**
@@ -2386,6 +2424,8 @@
union notifier_metadata metadata;
int tmp;
unsigned long flags;
+ int immediate_retry = 0;
+ int tx_ready = 0;
ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
@@ -2397,7 +2437,7 @@
retry = list_first_entry(&ch->rx_retry_queue,
struct smux_rx_pkt_retry,
rx_retry_list);
- smux_remove_rx_retry(ch, retry);
+ (void)smux_remove_rx_retry(ch, retry);
}
}
@@ -2412,7 +2452,8 @@
rx_retry_list);
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
- SMUX_DBG("%s: retrying rx pkt %p\n", __func__, retry);
+ SMUX_DBG("%s: ch %d retrying rx pkt %p\n",
+ __func__, ch->lcid, retry);
metadata.read.pkt_priv = 0;
metadata.read.buffer = 0;
tmp = ch->get_rx_buffer(ch->priv,
@@ -2421,33 +2462,44 @@
retry->pkt->hdr.payload_len);
if (tmp == 0 && metadata.read.buffer) {
/* have valid RX buffer */
+
memcpy(metadata.read.buffer, retry->pkt->payload,
retry->pkt->hdr.payload_len);
metadata.read.len = retry->pkt->hdr.payload_len;
spin_lock_irqsave(&ch->state_lock_lhb1, flags);
- smux_remove_rx_retry(ch, retry);
+ tx_ready = smux_remove_rx_retry(ch, retry);
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
-
schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
+ if (tx_ready)
+ list_channel(ch);
+
+ immediate_retry = 1;
} else if (tmp == -EAGAIN ||
(tmp == 0 && !metadata.read.buffer)) {
/* retry again */
retry->timeout_in_ms <<= 1;
if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
/* timed out */
+ pr_err("%s: ch %d RX retry client timeout\n",
+ __func__, ch->lcid);
spin_lock_irqsave(&ch->state_lock_lhb1, flags);
- smux_remove_rx_retry(ch, retry);
- schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
+ tx_ready = smux_remove_rx_retry(ch, retry);
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+ schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
+ if (tx_ready)
+ list_channel(ch);
}
} else {
/* client error - drop packet */
+ pr_err("%s: ch %d RX retry client failed (%d)\n",
+ __func__, ch->lcid, tmp);
spin_lock_irqsave(&ch->state_lock_lhb1, flags);
- smux_remove_rx_retry(ch, retry);
+ tx_ready = smux_remove_rx_retry(ch, retry);
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
-
schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
+ if (tx_ready)
+ list_channel(ch);
}
/* schedule next retry */
@@ -2456,8 +2508,12 @@
retry = list_first_entry(&ch->rx_retry_queue,
struct smux_rx_pkt_retry,
rx_retry_list);
- queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
- msecs_to_jiffies(retry->timeout_in_ms));
+
+ if (immediate_retry)
+ queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
+ else
+ queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
+ msecs_to_jiffies(retry->timeout_in_ms));
}
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
}
@@ -2563,7 +2619,7 @@
if (smux.power_state != SMUX_PWR_ON) {
/* channel not ready to transmit */
- SMUX_DBG("%s: can not tx with power state %d\n",
+ SMUX_DBG("%s: waiting for link up (state %d)\n",
__func__,
smux.power_state);
spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
@@ -2606,7 +2662,7 @@
--ch->tx_pending_data_cnt;
if (ch->notify_lwm &&
ch->tx_pending_data_cnt
- <= SMUX_WM_LOW) {
+ <= SMUX_TX_WM_LOW) {
ch->notify_lwm = 0;
low_wm_notif = 1;
}
@@ -2633,6 +2689,34 @@
}
}
+/**
+ * Update the RX flow control (sent in the TIOCM Status command).
+ *
+ * @ch Channel for update
+ *
+ * @returns 1 for updated, 0 for not updated
+ *
+ * Must be called with ch->state_lock_lhb1 locked.
+ */
+static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
+{
+ int updated = 0;
+ int prev_state;
+
+ prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
+
+ if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
+ ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
+ else
+ ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
+
+ if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
+ smux_send_status_cmd(ch);
+ updated = 1;
+ }
+
+ return updated;
+}
/**********************************************************************/
/* Kernel API */
@@ -2675,17 +2759,30 @@
if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
ch->local_mode = SMUX_LCH_MODE_NORMAL;
- /* Flow control */
+ /* RX Flow control */
if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
- ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
- ret = smux_send_status_cmd(ch);
- tx_ready = 1;
+ ch->rx_flow_control_client = 1;
+ tx_ready |= smux_rx_flow_control_updated(ch);
}
if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
- ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
- ret = smux_send_status_cmd(ch);
- tx_ready = 1;
+ ch->rx_flow_control_client = 0;
+ tx_ready |= smux_rx_flow_control_updated(ch);
+ }
+
+ /* Auto RX Flow Control */
+ if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
+ SMUX_DBG("%s: auto rx flow control option enabled\n",
+ __func__);
+ ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
+ }
+
+ if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
+ SMUX_DBG("%s: auto rx flow control option disabled\n",
+ __func__);
+ ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
+ ch->rx_flow_control_auto = 0;
+ tx_ready |= smux_rx_flow_control_updated(ch);
}
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
@@ -2909,16 +3006,16 @@
/* verify high watermark */
SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
- if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH) {
+ if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
pr_err("%s: ch %d high watermark %d exceeded %d\n",
- __func__, lcid, SMUX_WM_HIGH,
+ __func__, lcid, SMUX_TX_WM_HIGH,
ch->tx_pending_data_cnt);
ret = -EAGAIN;
goto out_inner;
}
/* queue packet for transmit */
- if (++ch->tx_pending_data_cnt == SMUX_WM_HIGH) {
+ if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
ch->notify_lwm = 1;
pr_err("%s: high watermark hit\n", __func__);
schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
@@ -2965,7 +3062,7 @@
ch = &smux_lch[lcid];
spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
- if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH)
+ if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
is_full = 1;
spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
@@ -2993,7 +3090,7 @@
ch = &smux_lch[lcid];
spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
- if (ch->tx_pending_data_cnt <= SMUX_WM_LOW)
+ if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
is_low = 1;
spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
diff --git a/drivers/tty/smux_private.h b/drivers/tty/smux_private.h
index f644ff0..353c762 100644
--- a/drivers/tty/smux_private.h
+++ b/drivers/tty/smux_private.h
@@ -32,6 +32,10 @@
/* Maximum number of packets in retry queue */
#define SMUX_RX_RETRY_MAX_PKTS 32
+#define SMUX_RX_WM_HIGH 16
+#define SMUX_RX_WM_LOW 4
+#define SMUX_TX_WM_LOW 2
+#define SMUX_TX_WM_HIGH 4
struct tty_struct;
diff --git a/drivers/tty/smux_test.c b/drivers/tty/smux_test.c
index c821c7f..e488a63 100644
--- a/drivers/tty/smux_test.c
+++ b/drivers/tty/smux_test.c
@@ -185,6 +185,8 @@
int event_disconnected_ssr;
int event_low_wm;
int event_high_wm;
+ int event_rx_retry_high_wm;
+ int event_rx_retry_low_wm;
/* TIOCM changes */
int event_tiocm;
@@ -235,6 +237,8 @@
cb->event_disconnected_ssr = 0;
cb->event_low_wm = 0;
cb->event_high_wm = 0;
+ cb->event_rx_retry_high_wm = 0;
+ cb->event_rx_retry_low_wm = 0;
cb->event_tiocm = 0;
cb->tiocm_meta.tiocm_old = 0;
cb->tiocm_meta.tiocm_new = 0;
@@ -295,6 +299,8 @@
"\tevent_disconnected_ssr=%d\n"
"\tevent_low_wm=%d\n"
"\tevent_high_wm=%d\n"
+ "\tevent_rx_retry_high_wm=%d\n"
+ "\tevent_rx_retry_low_wm=%d\n"
"\tevent_tiocm=%d\n"
"\tevent_read_done=%d\n"
"\tevent_read_failed=%d\n"
@@ -311,6 +317,8 @@
cb->event_disconnected_ssr,
cb->event_low_wm,
cb->event_high_wm,
+ cb->event_rx_retry_high_wm,
+ cb->event_rx_retry_low_wm,
cb->event_tiocm,
cb->event_read_done,
cb->event_read_failed,
@@ -429,6 +437,19 @@
spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
break;
+ case SMUX_RX_RETRY_HIGH_WM_HIT:
+ spin_lock_irqsave(&cb_data_ptr->lock, flags);
+ ++cb_data_ptr->event_rx_retry_high_wm;
+ spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
+ break;
+
+ case SMUX_RX_RETRY_LOW_WM_HIT:
+ spin_lock_irqsave(&cb_data_ptr->lock, flags);
+ ++cb_data_ptr->event_rx_retry_low_wm;
+ spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
+ break;
+
+
case SMUX_TIOCM_UPDATE:
spin_lock_irqsave(&cb_data_ptr->lock, flags);
++cb_data_ptr->event_tiocm;
@@ -1328,7 +1349,7 @@
/* open port for loopback */
ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
SMUX_CH_OPTION_LOCAL_LOOPBACK,
- 0);
+ SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP);
UT_ASSERT_INT(ret, ==, 0);
ret = msm_smux_open(SMUX_TEST_LCID, &cb_data,
@@ -1581,6 +1602,132 @@
return i;
}
+/**
+ * Verify get_rx_buffer callback retry for auto-rx flow control.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ */
+static int smux_ut_local_get_rx_buff_retry_auto(char *buf, int max)
+{
+ static struct smux_mock_callback cb_data;
+ static int cb_initialized;
+ int i = 0;
+ int failed = 0;
+ int ret;
+ int try;
+ int try_rx_retry_wm;
+
+ i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
+ pr_err("%s", buf);
+
+ if (!cb_initialized)
+ mock_cb_data_init(&cb_data);
+
+ mock_cb_data_reset(&cb_data);
+ smux_byte_loopback = SMUX_TEST_LCID;
+ while (!failed) {
+ /* open port for loopback */
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_LOCAL_LOOPBACK
+ | SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP,
+ 0);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ ret = msm_smux_open(SMUX_TEST_LCID, &cb_data,
+ smux_mock_cb, get_rx_buffer_mock);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_connected, ==, 1);
+ mock_cb_data_reset(&cb_data);
+
+ /* Test high rx-retry watermark */
+ get_rx_buffer_mock_fail = 1;
+ try_rx_retry_wm = 0;
+ for (try = 0; try < SMUX_RX_RETRY_MAX_PKTS; ++try) {
+ pr_err("%s: try %d\n", __func__, try);
+ ret = msm_smux_write(SMUX_TEST_LCID, (void *)1,
+ test_array, sizeof(test_array));
+ UT_ASSERT_INT(ret, ==, 0);
+ if (failed)
+ break;
+
+ if (!try_rx_retry_wm &&
+ cb_data.event_rx_retry_high_wm) {
+ /* RX high watermark hit */
+ try_rx_retry_wm = try + 1;
+ break;
+ }
+
+ while (cb_data.event_write_done <= try) {
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ INIT_COMPLETION(cb_data.cb_completion);
+ }
+ if (failed)
+ break;
+ }
+ if (failed)
+ break;
+
+ /* RX retry high watermark should have been set */
+ UT_ASSERT_INT(cb_data.event_rx_retry_high_wm, ==, 1);
+ UT_ASSERT_INT(try_rx_retry_wm, ==, SMUX_RX_WM_HIGH);
+
+ /*
+ * Disabled RX buffer allocation failure and wait for
+ * the SMUX_RX_WM_HIGH count successful packets.
+ */
+ get_rx_buffer_mock_fail = 0;
+ while (cb_data.event_read_done < SMUX_RX_WM_HIGH) {
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, 2*HZ),
+ >, 0);
+ INIT_COMPLETION(cb_data.cb_completion);
+ }
+ if (failed)
+ break;
+
+ UT_ASSERT_INT(0, ==, cb_data.event_read_failed);
+ UT_ASSERT_INT(SMUX_RX_WM_HIGH, ==,
+ cb_data.event_read_done);
+ UT_ASSERT_INT(cb_data.event_rx_retry_low_wm, ==, 1);
+ mock_cb_data_reset(&cb_data);
+
+ /* close port */
+ ret = msm_smux_close(SMUX_TEST_LCID);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
+ break;
+ }
+
+ if (!failed) {
+ i += scnprintf(buf + i, max - i, "\tOK\n");
+ } else {
+ pr_err("%s: Failed\n", __func__);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ i += mock_cb_data_print(&cb_data, buf + i, max - i);
+ msm_smux_close(SMUX_TEST_LCID);
+ }
+ smux_byte_loopback = 0;
+ mock_cb_data_reset(&cb_data);
+ return i;
+}
+
static char debug_buffer[DEBUG_BUFMAX];
static ssize_t debug_read(struct file *file, char __user *buf,
@@ -1644,6 +1791,8 @@
smux_ut_local_smuxld_receive_buf);
debug_create("ut_local_get_rx_buff_retry", 0444, dent,
smux_ut_local_get_rx_buff_retry);
+ debug_create("ut_local_get_rx_buff_retry_auto", 0444, dent,
+ smux_ut_local_get_rx_buff_retry_auto);
return 0;
}
diff --git a/include/linux/smux.h b/include/linux/smux.h
index 308f969..24a6371 100644
--- a/include/linux/smux.h
+++ b/include/linux/smux.h
@@ -77,6 +77,8 @@
SMUX_TIOCM_UPDATE,
SMUX_LOW_WM_HIT, /* @metadata is NULL */
SMUX_HIGH_WM_HIT, /* @metadata is NULL */
+ SMUX_RX_RETRY_HIGH_WM_HIT, /* @metadata is NULL */
+ SMUX_RX_RETRY_LOW_WM_HIT, /* @metadata is NULL */
};
/**
@@ -86,6 +88,7 @@
SMUX_CH_OPTION_LOCAL_LOOPBACK = 1 << 0,
SMUX_CH_OPTION_REMOTE_LOOPBACK = 1 << 1,
SMUX_CH_OPTION_REMOTE_TX_STOP = 1 << 2,
+ SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP = 1 << 3,
};
/**