summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
diff options
context:
space:
mode:
authorEmmanuel Grumbach <emmanuel.grumbach@intel.com>2017-05-07 15:00:31 +0300
committerLuca Coelho <luciano.coelho@intel.com>2017-06-23 12:07:04 +0300
commitdcfbd67b4b8d5223d5362aac9af267387a32f568 (patch)
treea4ec097c5ada5b733220ad9eafa3158a8f2ced34 /drivers/net/wireless/intel/iwlwifi/mvm/sta.c
parent59df97f7223636399f425a5e76586218c48d791e (diff)
iwlwifi: add a W/A for a scheduler hardware bug
In case we need to move the scheduler write pointer by steps of 0x40, 0x80 or 0xc0, the scheduler gets stuck. This leads to hardware error interrupts with status: 0x5A5A5A5A or alike. In order to work around this, detect in the transport layer that we are going to hit this case and tell iwlmvm to increment the sequence number of the packets. This allows to keep the requirement that the WiFi sequence number is in sync with the index in the scheduler Tx queue and it also allows to avoid the problematic sequence. This means that from time to time, we will start a queue from ssn + 1, but that shouldn't be a problem since we don't switch to new queues for AMPDU now that we have DQA which allows to keep the same queue while toggling the AMPDU state. This bug has been fixed on 9000 devices and up. Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com> Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/mvm/sta.c')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 0249300c4600..aa41ee8ed916 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -758,7 +758,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
bool using_inactive_queue = false, same_sta = false;
unsigned long disable_agg_tids = 0;
enum iwl_mvm_agg_state queue_state;
- bool shared_queue = false;
+ bool shared_queue = false, inc_ssn;
int ssn;
unsigned long tfd_queue_mask;
int ret;
@@ -885,8 +885,12 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
}
ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
- iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
- wdg_timeout);
+ inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
+ ssn, &cfg, wdg_timeout);
+ if (inc_ssn) {
+ ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
+ le16_add_cpu(&hdr->seq_ctrl, 0x10);
+ }
/*
* Mark queue as shared in transport if shared
@@ -898,6 +902,13 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
spin_lock_bh(&mvmsta->lock);
+ /*
+ * This looks racy, but it is not. We have only one packet for
+ * this ra/tid in our Tx path since we stop the Qdisc when we
+ * need to allocate a new TFD queue.
+ */
+ if (inc_ssn)
+ mvmsta->tid_data[tid].seq_number += 0x10;
mvmsta->tid_data[tid].txq_id = queue;
mvmsta->tid_data[tid].is_tid_active = true;
mvmsta->tfd_queue_msk |= BIT(queue);