summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-08-07 11:34:41 -0700
committerDavid S. Miller <davem@davemloft.net>2017-08-07 11:37:47 -0700
commit46d4b68f891bee5d83a32508bfbd9778be6b1b63 (patch)
tree3c05e6c6285102eef6f6756bd692e86ae86f9b30 /drivers/net/wireless/intel/iwlwifi/mvm/sta.c
parent2a32ca138e24836d01918797018b0fd0302c6b3f (diff)
parent9d546198705a79630cb29b1cc47a43e75b8afb89 (diff)
Merge tag 'wireless-drivers-next-for-davem-2017-08-07' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next
Kalle Valo says: ==================== wireless-drivers-next patches for 4.14 The first wireless-drivers-next pull request for 4.14. I'm submitting this unusally late in the cycle as my vacation postponed this. But even if this is late there's not still that much new features, mostly cleanup or fixes. Major changes: ath10k * preparation for wcn3990 support iwlwifi * Reorganization of the code into separate directories continues qtnfmac * regulatory support updates * add get_channel, dump_survey and channel_switch cfg80211 handlers ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/mvm/sta.c')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c452
1 files changed, 99 insertions, 353 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index ab66b4394dfc..922cd5379841 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -296,60 +296,6 @@ unlock:
rcu_read_unlock();
}
-static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta)
-{
- unsigned long used_hw_queues;
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- unsigned int wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, NULL, true, false);
- u32 ac;
-
- lockdep_assert_held(&mvm->mutex);
-
- used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
-
- /* Find available queues, and allocate them to the ACs */
- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
- u8 queue = find_first_zero_bit(&used_hw_queues,
- mvm->first_agg_queue);
-
- if (queue >= mvm->first_agg_queue) {
- IWL_ERR(mvm, "Failed to allocate STA queue\n");
- return -EBUSY;
- }
-
- __set_bit(queue, &used_hw_queues);
- mvmsta->hw_queue[ac] = queue;
- }
-
- /* Found a place for all queues - enable them */
- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
- iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
- mvmsta->hw_queue[ac],
- iwl_mvm_ac_to_tx_fifo[ac], 0,
- wdg_timeout);
- mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
- }
-
- return 0;
-}
-
-static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta)
-{
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- unsigned long sta_msk;
- int i;
-
- lockdep_assert_held(&mvm->mutex);
-
- /* disable the TDLS STA-specific queues */
- sta_msk = mvmsta->tfd_queue_msk;
- for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
- iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
-}
-
/* Disable aggregations for a bitmap of TIDs for a given station */
static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
unsigned long disable_agg_tids,
@@ -757,7 +703,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_trans_txq_scd_cfg cfg = {
- .fifo = iwl_mvm_ac_to_tx_fifo[ac],
+ .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
.sta_id = mvmsta->sta_id,
.tid = tid,
.frame_limit = IWL_FRAME_LIMIT,
@@ -1315,7 +1261,7 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
cfg.tid = i;
- cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
+ cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
txq_id ==
IWL_MVM_DQA_BSS_CLIENT_QUEUE);
@@ -1329,8 +1275,6 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
}
}
-
- atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0);
}
int iwl_mvm_add_sta(struct iwl_mvm *mvm,
@@ -1355,9 +1299,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
spin_lock_init(&mvm_sta->lock);
- /* In DQA mode, if this is a HW restart, re-alloc existing queues */
- if (iwl_mvm_is_dqa_supported(mvm) &&
- test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ /* if this is a HW restart re-alloc existing queues */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
goto update_fw;
}
@@ -1375,33 +1318,15 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
/* HW restart, don't assume the memory has been zeroed */
- atomic_set(&mvm->pending_frames[sta_id], 0);
mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
mvm_sta->tfd_queue_msk = 0;
- /*
- * Allocate new queues for a TDLS station, unless we're in DQA mode,
- * and then they'll be allocated dynamically
- */
- if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) {
- ret = iwl_mvm_tdls_sta_init(mvm, sta);
- if (ret)
- return ret;
- } else if (!iwl_mvm_is_dqa_supported(mvm)) {
- for (i = 0; i < IEEE80211_NUM_ACS; i++)
- if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
- mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
- }
-
/* for HW restart - reset everything but the sequence number */
for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
u16 seq = mvm_sta->tid_data[i].seq_number;
memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
mvm_sta->tid_data[i].seq_number = seq;
- if (!iwl_mvm_is_dqa_supported(mvm))
- continue;
-
/*
* Mark all queues for this STA as unallocated and defer TX
* frames until the queue is allocated
@@ -1435,7 +1360,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
mvm_sta->dup_data = dup_data;
}
- if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
ret = iwl_mvm_reserve_sta_stream(mvm, sta,
ieee80211_vif_type_p2p(vif));
if (ret)
@@ -1461,8 +1386,6 @@ update_fw:
return 0;
err:
- if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
- iwl_mvm_tdls_sta_deinit(mvm, sta);
return ret;
}
@@ -1535,79 +1458,6 @@ static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
return 0;
}
-void iwl_mvm_sta_drained_wk(struct work_struct *wk)
-{
- struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
- u8 sta_id;
-
- /*
- * The mutex is needed because of the SYNC cmd, but not only: if the
- * work would run concurrently with iwl_mvm_rm_sta, it would run before
- * iwl_mvm_rm_sta sets the station as busy, and exit. Then
- * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
- * that later.
- */
- mutex_lock(&mvm->mutex);
-
- for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
- int ret;
- struct ieee80211_sta *sta =
- rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
- lockdep_is_held(&mvm->mutex));
-
- /*
- * This station is in use or RCU-removed; the latter happens in
- * managed mode, where mac80211 removes the station before we
- * can remove it from firmware (we can only do that after the
- * MAC is marked unassociated), and possibly while the deauth
- * frame to disconnect from the AP is still queued. Then, the
- * station pointer is -ENOENT when the last skb is reclaimed.
- */
- if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
- continue;
-
- if (PTR_ERR(sta) == -EINVAL) {
- IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
- sta_id);
- continue;
- }
-
- if (!sta) {
- IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
- sta_id);
- continue;
- }
-
- WARN_ON(PTR_ERR(sta) != -EBUSY);
- /* This station was removed and we waited until it got drained,
- * we can now proceed and remove it.
- */
- ret = iwl_mvm_rm_sta_common(mvm, sta_id);
- if (ret) {
- IWL_ERR(mvm,
- "Couldn't remove sta %d after it was drained\n",
- sta_id);
- continue;
- }
- RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
- clear_bit(sta_id, mvm->sta_drained);
-
- if (mvm->tfd_drained[sta_id]) {
- unsigned long i, msk = mvm->tfd_drained[sta_id];
-
- for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
- iwl_mvm_disable_txq(mvm, i, i,
- IWL_MAX_TID_COUNT, 0);
-
- mvm->tfd_drained[sta_id] = 0;
- IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
- sta_id, msk);
- }
- }
-
- mutex_unlock(&mvm->mutex);
-}
-
static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mvm_sta *mvm_sta)
@@ -1631,10 +1481,11 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta)
{
- int i, ret;
+ int i;
for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
u16 txq_id;
+ int ret;
spin_lock_bh(&mvm_sta->lock);
txq_id = mvm_sta->tid_data[i].txq_id;
@@ -1645,10 +1496,10 @@ int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
if (ret)
- break;
+ return ret;
}
- return ret;
+ return 0;
}
int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
@@ -1665,79 +1516,65 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
if (iwl_mvm_has_new_rx_api(mvm))
kfree(mvm_sta->dup_data);
- if ((vif->type == NL80211_IFTYPE_STATION &&
- mvmvif->ap_sta_id == sta_id) ||
- iwl_mvm_is_dqa_supported(mvm)){
- ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
- if (ret)
- return ret;
- /* flush its queues here since we are freeing mvm_sta */
- ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
- if (ret)
- return ret;
- if (iwl_mvm_has_new_tx_api(mvm)) {
- ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
- } else {
- u32 q_mask = mvm_sta->tfd_queue_msk;
+ ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
+ if (ret)
+ return ret;
- ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
- q_mask);
- }
- if (ret)
- return ret;
- ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
-
- /* If DQA is supported - the queues can be disabled now */
- if (iwl_mvm_is_dqa_supported(mvm)) {
- iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
- /*
- * If pending_frames is set at this point - it must be
- * driver internal logic error, since queues are empty
- * and removed successuly.
- * warn on it but set it to 0 anyway to avoid station
- * not being removed later in the function
- */
- WARN_ON(atomic_xchg(&mvm->pending_frames[sta_id], 0));
- }
+ /* flush its queues here since we are freeing mvm_sta */
+ ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
+ if (ret)
+ return ret;
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
+ } else {
+ u32 q_mask = mvm_sta->tfd_queue_msk;
- /* If there is a TXQ still marked as reserved - free it */
- if (iwl_mvm_is_dqa_supported(mvm) &&
- mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
- u8 reserved_txq = mvm_sta->reserved_queue;
- enum iwl_mvm_queue_status *status;
-
- /*
- * If no traffic has gone through the reserved TXQ - it
- * is still marked as IWL_MVM_QUEUE_RESERVED, and
- * should be manually marked as free again
- */
- spin_lock_bh(&mvm->queue_info_lock);
- status = &mvm->queue_info[reserved_txq].status;
- if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
- (*status != IWL_MVM_QUEUE_FREE),
- "sta_id %d reserved txq %d status %d",
- sta_id, reserved_txq, *status)) {
- spin_unlock_bh(&mvm->queue_info_lock);
- return -EINVAL;
- }
+ ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
+ q_mask);
+ }
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
+
+ iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
+
+ /* If there is a TXQ still marked as reserved - free it */
+ if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
+ u8 reserved_txq = mvm_sta->reserved_queue;
+ enum iwl_mvm_queue_status *status;
- *status = IWL_MVM_QUEUE_FREE;
+ /*
+ * If no traffic has gone through the reserved TXQ - it
+ * is still marked as IWL_MVM_QUEUE_RESERVED, and
+ * should be manually marked as free again
+ */
+ spin_lock_bh(&mvm->queue_info_lock);
+ status = &mvm->queue_info[reserved_txq].status;
+ if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
+ (*status != IWL_MVM_QUEUE_FREE),
+ "sta_id %d reserved txq %d status %d",
+ sta_id, reserved_txq, *status)) {
spin_unlock_bh(&mvm->queue_info_lock);
+ return -EINVAL;
}
- if (vif->type == NL80211_IFTYPE_STATION &&
- mvmvif->ap_sta_id == sta_id) {
- /* if associated - we can't remove the AP STA now */
- if (vif->bss_conf.assoc)
- return ret;
+ *status = IWL_MVM_QUEUE_FREE;
+ spin_unlock_bh(&mvm->queue_info_lock);
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ mvmvif->ap_sta_id == sta_id) {
+ /* if associated - we can't remove the AP STA now */
+ if (vif->bss_conf.assoc)
+ return ret;
- /* unassoc - go ahead - remove the AP STA now */
- mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
+ /* unassoc - go ahead - remove the AP STA now */
+ mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
- /* clear d0i3_ap_sta_id if no longer relevant */
- if (mvm->d0i3_ap_sta_id == sta_id)
- mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
- }
+ /* clear d0i3_ap_sta_id if no longer relevant */
+ if (mvm->d0i3_ap_sta_id == sta_id)
+ mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
}
/*
@@ -1754,32 +1591,10 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
* calls the drain worker.
*/
spin_lock_bh(&mvm_sta->lock);
+ spin_unlock_bh(&mvm_sta->lock);
- /*
- * There are frames pending on the AC queues for this station.
- * We need to wait until all the frames are drained...
- */
- if (atomic_read(&mvm->pending_frames[sta_id])) {
- rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id],
- ERR_PTR(-EBUSY));
- spin_unlock_bh(&mvm_sta->lock);
-
- /* disable TDLS sta queues on drain complete */
- if (sta->tdls) {
- mvm->tfd_drained[sta_id] = mvm_sta->tfd_queue_msk;
- IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", sta_id);
- }
-
- ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
- } else {
- spin_unlock_bh(&mvm_sta->lock);
-
- if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
- iwl_mvm_tdls_sta_deinit(mvm, sta);
-
- ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
- RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
- }
+ ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
return ret;
}
@@ -1878,7 +1693,7 @@ static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm)
IWL_MAX_TID_COUNT,
wdg_timeout);
mvm->aux_queue = queue;
- } else if (iwl_mvm_is_dqa_supported(mvm)) {
+ } else {
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = IWL_MVM_TX_FIFO_MCAST,
.sta_id = mvm->aux_sta.sta_id,
@@ -1889,9 +1704,6 @@ static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm)
iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
wdg_timeout);
- } else {
- iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
- IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
}
}
@@ -1991,7 +1803,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
- if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC)
queue = mvm->probe_queue;
@@ -2078,8 +1890,7 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
- if (iwl_mvm_is_dqa_supported(mvm))
- iwl_mvm_free_bcast_sta_queues(mvm, vif);
+ iwl_mvm_free_bcast_sta_queues(mvm, vif);
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
if (ret)
@@ -2090,23 +1901,10 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- u32 qmask = 0;
lockdep_assert_held(&mvm->mutex);
- if (!iwl_mvm_is_dqa_supported(mvm)) {
- qmask = iwl_mvm_mac_get_queues_mask(vif);
-
- /*
- * The firmware defines the TFD queue mask to only be relevant
- * for *unicast* queues, so the multicast (CAB) queue shouldn't
- * be included. This only happens in NL80211_IFTYPE_AP vif type,
- * so the next line will only have an effect there.
- */
- qmask &= ~BIT(vif->cab_queue);
- }
-
- return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
+ return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
ieee80211_vif_type_p2p(vif),
IWL_STA_GENERAL_PURPOSE);
}
@@ -2118,7 +1916,7 @@ int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* @mvm: the mvm component
* @vif: the interface to which the broadcast station is added
* @bsta: the broadcast station to add. */
-int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
@@ -2149,7 +1947,7 @@ void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* Send the FW a request to remove the station from it's internal data
* structures, and in addition remove it from the local data structure.
*/
-int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
int ret;
@@ -2188,9 +1986,6 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
- if (!iwl_mvm_is_dqa_supported(mvm))
- return 0;
-
if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_ADHOC))
return -ENOTSUPP;
@@ -2255,9 +2050,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
- if (!iwl_mvm_is_dqa_supported(mvm))
- return 0;
-
iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
@@ -2507,8 +2299,6 @@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
mvm_sta->tid_disable_agg &= ~BIT(tid);
} else {
/* In DQA-mode the queue isn't removed on agg termination */
- if (!iwl_mvm_is_dqa_supported(mvm))
- mvm_sta->tfd_queue_msk &= ~BIT(queue);
mvm_sta->tid_disable_agg |= BIT(tid);
}
@@ -2611,19 +2401,17 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ret = -ENXIO;
goto release_locks;
}
- } else if (iwl_mvm_is_dqa_supported(mvm) &&
- unlikely(mvm->queue_info[txq_id].status ==
+ } else if (unlikely(mvm->queue_info[txq_id].status ==
IWL_MVM_QUEUE_SHARED)) {
ret = -ENXIO;
IWL_DEBUG_TX_QUEUES(mvm,
"Can't start tid %d agg on shared queue!\n",
tid);
goto release_locks;
- } else if (!iwl_mvm_is_dqa_supported(mvm) ||
- mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
+ } else if (mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
- mvm->first_agg_queue,
- mvm->last_agg_queue);
+ IWL_MVM_DQA_MIN_DATA_QUEUE,
+ IWL_MVM_DQA_MAX_DATA_QUEUE);
if (txq_id < 0) {
ret = txq_id;
IWL_ERR(mvm, "Failed to allocate agg queue\n");
@@ -2741,37 +2529,34 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
queue_status = mvm->queue_info[queue].status;
spin_unlock_bh(&mvm->queue_info_lock);
- /* In DQA mode, the existing queue might need to be reconfigured */
- if (iwl_mvm_is_dqa_supported(mvm)) {
- /* Maybe there is no need to even alloc a queue... */
- if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
- alloc_queue = false;
+ /* Maybe there is no need to even alloc a queue... */
+ if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
+ alloc_queue = false;
+ /*
+ * Only reconfig the SCD for the queue if the window size has
+ * changed from current (become smaller)
+ */
+ if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
/*
- * Only reconfig the SCD for the queue if the window size has
- * changed from current (become smaller)
+ * If reconfiguring an existing queue, it first must be
+ * drained
*/
- if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
- /*
- * If reconfiguring an existing queue, it first must be
- * drained
- */
- ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
- BIT(queue));
- if (ret) {
- IWL_ERR(mvm,
- "Error draining queue before reconfig\n");
- return ret;
- }
+ ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
+ BIT(queue));
+ if (ret) {
+ IWL_ERR(mvm,
+ "Error draining queue before reconfig\n");
+ return ret;
+ }
- ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
- mvmsta->sta_id, tid,
- buf_size, ssn);
- if (ret) {
- IWL_ERR(mvm,
- "Error reconfiguring TXQ #%d\n", queue);
- return ret;
- }
+ ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
+ mvmsta->sta_id, tid,
+ buf_size, ssn);
+ if (ret) {
+ IWL_ERR(mvm,
+ "Error reconfiguring TXQ #%d\n", queue);
+ return ret;
}
}
@@ -2867,18 +2652,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
"ssn = %d, next_recl = %d\n",
tid_data->ssn, tid_data->next_reclaimed);
- /*
- * There are still packets for this RA / TID in the HW.
- * Not relevant for DQA mode, since there is no need to disable
- * the queue.
- */
- if (!iwl_mvm_is_dqa_supported(mvm) &&
- tid_data->ssn != tid_data->next_reclaimed) {
- tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
- err = 0;
- break;
- }
-
tid_data->ssn = 0xffff;
tid_data->state = IWL_AGG_OFF;
spin_unlock_bh(&mvmsta->lock);
@@ -2886,12 +2659,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
-
- if (!iwl_mvm_is_dqa_supported(mvm)) {
- int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
-
- iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0);
- }
return 0;
case IWL_AGG_STARTING:
case IWL_EMPTYING_HW_QUEUE_ADDBA:
@@ -2961,13 +2728,6 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_drain_sta(mvm, mvmsta, false);
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
-
- if (!iwl_mvm_is_dqa_supported(mvm)) {
- int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
-
- iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue,
- tid, 0);
- }
}
return 0;
@@ -3586,15 +3346,6 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
u16 n_queued;
tid_data = &mvmsta->tid_data[tid];
- if (WARN(!iwl_mvm_is_dqa_supported(mvm) &&
- tid_data->state != IWL_AGG_ON &&
- tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
- "TID %d state is %d\n",
- tid, tid_data->state)) {
- spin_unlock_bh(&mvmsta->lock);
- ieee80211_sta_eosp(sta);
- return;
- }
n_queued = iwl_mvm_tid_queued(mvm, tid_data);
if (n_queued > remaining) {
@@ -3688,13 +3439,8 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
mvm_sta->disable_tx = disable;
- /*
- * Tell mac80211 to start/stop queuing tx for this station,
- * but don't stop queuing if there are still pending frames
- * for this station.
- */
- if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
- ieee80211_sta_block_awake(mvm->hw, sta, disable);
+ /* Tell mac80211 to start/stop queuing tx for this station */
+ ieee80211_sta_block_awake(mvm->hw, sta, disable);
iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);