summaryrefslogtreecommitdiff
path: root/net/mac80211/tx.c
diff options
context:
space:
mode:
authorToke Høiland-Jørgensen <toke@toke.dk>2018-12-18 17:02:06 -0800
committerJohannes Berg <johannes.berg@intel.com>2019-01-19 09:24:12 +0100
commit1866760096bf40bcf6977a9076b3026598bc12ee (patch)
tree9ccf3723aa2f98fb6fc3acfdaf9ad69112ee3222 /net/mac80211/tx.c
parentf04d402f2f00a0f0470b7e636b50170608b425c7 (diff)
mac80211: Add TXQ scheduling API
This adds an API to mac80211 to handle scheduling of TXQs. The interface between driver and mac80211 for TXQ handling is changed by adding two new functions: ieee80211_next_txq(), which will return the next TXQ to schedule in the current round-robin rotation, and ieee80211_return_txq(), which the driver uses to indicate that it has finished scheduling a TXQ (which will then be put back in the scheduling rotation if it isn't empty). The driver must call ieee80211_txq_schedule_start() at the start of each scheduling session, and ieee80211_txq_schedule_end() at the end. The API then guarantees that the same TXQ is not returned twice in the same session (so a driver can loop on ieee80211_next_txq() without worrying about breaking the loop. Usage of the new API is optional, so drivers can be ported one at a time. In this patch, the actual scheduling performed by mac80211 is simple round-robin, but a subsequent commit adds airtime fairness awareness to the scheduler. Signed-off-by: Toke Høiland-Jørgensen <toke@toke.dk> [minor kernel-doc fix, propagate sparse locking checks out] Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'net/mac80211/tx.c')
-rw-r--r--net/mac80211/tx.c60
1 files changed, 59 insertions, 1 deletions
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index f170d6c6629a..544da6411620 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1449,6 +1449,7 @@ void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata,
codel_vars_init(&txqi->def_cvars);
codel_stats_init(&txqi->cstats);
__skb_queue_head_init(&txqi->frags);
+ INIT_LIST_HEAD(&txqi->schedule_order);
txqi->txq.vif = &sdata->vif;
@@ -1489,6 +1490,9 @@ void ieee80211_txq_purge(struct ieee80211_local *local,
fq_tin_reset(fq, tin, fq_skb_free_func);
ieee80211_purge_tx_queue(&local->hw, &txqi->frags);
+ spin_lock_bh(&local->active_txq_lock[txqi->txq.ac]);
+ list_del_init(&txqi->schedule_order);
+ spin_unlock_bh(&local->active_txq_lock[txqi->txq.ac]);
}
void ieee80211_txq_set_params(struct ieee80211_local *local)
@@ -1605,7 +1609,7 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local,
ieee80211_txq_enqueue(local, txqi, skb);
spin_unlock_bh(&fq->lock);
- drv_wake_tx_queue(local, txqi);
+ schedule_and_wake_txq(local, txqi);
return true;
}
@@ -3630,6 +3634,60 @@ out:
}
EXPORT_SYMBOL(ieee80211_tx_dequeue);
+struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct txq_info *txqi = NULL;
+
+ lockdep_assert_held(&local->active_txq_lock[ac]);
+
+ txqi = list_first_entry_or_null(&local->active_txqs[ac],
+ struct txq_info,
+ schedule_order);
+
+ if (!txqi || txqi->schedule_round == local->schedule_round[ac])
+ return NULL;
+
+ list_del_init(&txqi->schedule_order);
+ txqi->schedule_round = local->schedule_round[ac];
+ return &txqi->txq;
+}
+EXPORT_SYMBOL(ieee80211_next_txq);
+
+void ieee80211_return_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct txq_info *txqi = to_txq_info(txq);
+
+ lockdep_assert_held(&local->active_txq_lock[txq->ac]);
+
+ if (list_empty(&txqi->schedule_order) &&
+ (!skb_queue_empty(&txqi->frags) || txqi->tin.backlog_packets))
+ list_add_tail(&txqi->schedule_order,
+ &local->active_txqs[txq->ac]);
+}
+EXPORT_SYMBOL(ieee80211_return_txq);
+
+void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
+ __acquires(txq_lock)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ spin_lock_bh(&local->active_txq_lock[ac]);
+ local->schedule_round[ac]++;
+}
+EXPORT_SYMBOL(ieee80211_txq_schedule_start);
+
+void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
+ __releases(txq_lock)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ spin_unlock_bh(&local->active_txq_lock[ac]);
+}
+EXPORT_SYMBOL(ieee80211_txq_schedule_end);
+
void __ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev,
u32 info_flags)