wifi: mt76: add separate tx scheduling queue for off-channel tx

Ensure that packets are not sent out to the wrong channel

Link: https://patch.msgid.link/20240827093011.18621-6-nbd@nbd.name
Signed-off-by: Felix Fietkau <nbd@nbd.name>
This commit is contained in:
Felix Fietkau
2024-08-27 11:29:53 +02:00
parent f4fdd77162
commit 0b3be9d1d3
3 changed files with 41 additions and 26 deletions

View File

@@ -941,8 +941,7 @@ int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
mutex_lock(&dev->mutex); mutex_lock(&dev->mutex);
set_bit(MT76_RESET, &phy->state); set_bit(MT76_RESET, &phy->state);
ieee80211_stop_queues(phy->hw); mt76_worker_disable(&dev->tx_worker);
wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout); wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
mt76_update_survey(phy); mt76_update_survey(phy);
@@ -959,12 +958,11 @@ int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
if (chandef->chan != phy->main_chan) if (chandef->chan != phy->main_chan)
memset(phy->chan_state, 0, sizeof(*phy->chan_state)); memset(phy->chan_state, 0, sizeof(*phy->chan_state));
mt76_worker_enable(&dev->tx_worker);
ret = dev->drv->set_channel(phy); ret = dev->drv->set_channel(phy);
clear_bit(MT76_RESET, &phy->state); clear_bit(MT76_RESET, &phy->state);
ieee80211_wake_queues(phy->hw);
mt76_worker_schedule(&dev->tx_worker); mt76_worker_schedule(&dev->tx_worker);
mutex_unlock(&dev->mutex); mutex_unlock(&dev->mutex);
@@ -1548,6 +1546,7 @@ void mt76_wcid_init(struct mt76_wcid *wcid)
{ {
INIT_LIST_HEAD(&wcid->tx_list); INIT_LIST_HEAD(&wcid->tx_list);
skb_queue_head_init(&wcid->tx_pending); skb_queue_head_init(&wcid->tx_pending);
skb_queue_head_init(&wcid->tx_offchannel);
INIT_LIST_HEAD(&wcid->list); INIT_LIST_HEAD(&wcid->list);
idr_init(&wcid->pktid); idr_init(&wcid->pktid);

View File

@@ -361,6 +361,7 @@ struct mt76_wcid {
struct list_head tx_list; struct list_head tx_list;
struct sk_buff_head tx_pending; struct sk_buff_head tx_pending;
struct sk_buff_head tx_offchannel;
struct list_head list; struct list_head list;
struct idr pktid; struct idr pktid;

View File

@@ -330,6 +330,7 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb) struct mt76_wcid *wcid, struct sk_buff *skb)
{ {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct sk_buff_head *head;
if (mt76_testmode_enabled(phy)) { if (mt76_testmode_enabled(phy)) {
ieee80211_free_txskb(phy->hw, skb); ieee80211_free_txskb(phy->hw, skb);
@@ -345,9 +346,15 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx); info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
spin_lock_bh(&wcid->tx_pending.lock); if ((info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
__skb_queue_tail(&wcid->tx_pending, skb); (info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK))
spin_unlock_bh(&wcid->tx_pending.lock); head = &wcid->tx_offchannel;
else
head = &wcid->tx_pending;
spin_lock_bh(&head->lock);
__skb_queue_tail(head, skb);
spin_unlock_bh(&head->lock);
spin_lock_bh(&phy->tx_lock); spin_lock_bh(&phy->tx_lock);
if (list_empty(&wcid->tx_list)) if (list_empty(&wcid->tx_list))
@@ -478,7 +485,7 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
return idx; return idx;
do { do {
if (test_bit(MT76_RESET, &phy->state)) if (test_bit(MT76_RESET, &phy->state) || phy->offchannel)
return -EBUSY; return -EBUSY;
if (stop || mt76_txq_stopped(q)) if (stop || mt76_txq_stopped(q))
@@ -522,7 +529,7 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
while (1) { while (1) {
int n_frames = 0; int n_frames = 0;
if (test_bit(MT76_RESET, &phy->state)) if (test_bit(MT76_RESET, &phy->state) || phy->offchannel)
return -EBUSY; return -EBUSY;
if (dev->queue_ops->tx_cleanup && if (dev->queue_ops->tx_cleanup &&
@@ -568,7 +575,7 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
{ {
int len; int len;
if (qid >= 4) if (qid >= 4 || phy->offchannel)
return; return;
local_bh_disable(); local_bh_disable();
@@ -586,7 +593,8 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
EXPORT_SYMBOL_GPL(mt76_txq_schedule); EXPORT_SYMBOL_GPL(mt76_txq_schedule);
static int static int
mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid) mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid,
struct sk_buff_head *head)
{ {
struct mt76_dev *dev = phy->dev; struct mt76_dev *dev = phy->dev;
struct ieee80211_sta *sta; struct ieee80211_sta *sta;
@@ -594,8 +602,8 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid)
struct sk_buff *skb; struct sk_buff *skb;
int ret = 0; int ret = 0;
spin_lock(&wcid->tx_pending.lock); spin_lock(&head->lock);
while ((skb = skb_peek(&wcid->tx_pending)) != NULL) { while ((skb = skb_peek(head)) != NULL) {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int qid = skb_get_queue_mapping(skb); int qid = skb_get_queue_mapping(skb);
@@ -607,13 +615,13 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid)
qid = MT_TXQ_PSD; qid = MT_TXQ_PSD;
q = phy->q_tx[qid]; q = phy->q_tx[qid];
if (mt76_txq_stopped(q)) { if (mt76_txq_stopped(q) || test_bit(MT76_RESET, &phy->state)) {
ret = -1; ret = -1;
break; break;
} }
__skb_unlink(skb, &wcid->tx_pending); __skb_unlink(skb, head);
spin_unlock(&wcid->tx_pending.lock); spin_unlock(&head->lock);
sta = wcid_to_sta(wcid); sta = wcid_to_sta(wcid);
spin_lock(&q->lock); spin_lock(&q->lock);
@@ -621,15 +629,17 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid)
dev->queue_ops->kick(dev, q); dev->queue_ops->kick(dev, q);
spin_unlock(&q->lock); spin_unlock(&q->lock);
spin_lock(&wcid->tx_pending.lock); spin_lock(&head->lock);
} }
spin_unlock(&wcid->tx_pending.lock); spin_unlock(&head->lock);
return ret; return ret;
} }
static void mt76_txq_schedule_pending(struct mt76_phy *phy) static void mt76_txq_schedule_pending(struct mt76_phy *phy)
{ {
LIST_HEAD(tx_list);
if (list_empty(&phy->tx_list)) if (list_empty(&phy->tx_list))
return; return;
@@ -637,22 +647,27 @@ static void mt76_txq_schedule_pending(struct mt76_phy *phy)
rcu_read_lock(); rcu_read_lock();
spin_lock(&phy->tx_lock); spin_lock(&phy->tx_lock);
while (!list_empty(&phy->tx_list)) { list_splice_init(&phy->tx_list, &tx_list);
struct mt76_wcid *wcid = NULL; while (!list_empty(&tx_list)) {
struct mt76_wcid *wcid;
int ret; int ret;
wcid = list_first_entry(&phy->tx_list, struct mt76_wcid, tx_list); wcid = list_first_entry(&tx_list, struct mt76_wcid, tx_list);
list_del_init(&wcid->tx_list); list_del_init(&wcid->tx_list);
spin_unlock(&phy->tx_lock); spin_unlock(&phy->tx_lock);
ret = mt76_txq_schedule_pending_wcid(phy, wcid); ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_offchannel);
if (ret >= 0 && !phy->offchannel)
ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_pending);
spin_lock(&phy->tx_lock); spin_lock(&phy->tx_lock);
if (ret) { if (!skb_queue_empty(&wcid->tx_pending) &&
if (list_empty(&wcid->tx_list)) !skb_queue_empty(&wcid->tx_offchannel) &&
list_add_tail(&wcid->tx_list, &phy->tx_list); list_empty(&wcid->tx_list))
list_add_tail(&wcid->tx_list, &phy->tx_list);
if (ret < 0)
break; break;
}
} }
spin_unlock(&phy->tx_lock); spin_unlock(&phy->tx_lock);