summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
diff options
context:
space:
mode:
authorJacob Keller <jacob.e.keller@intel.com>2017-07-14 09:10:13 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2017-08-27 16:15:24 -0700
commit742c9875759c1858c3312442a78a80f3e93d82c4 (patch)
treea7443df896f49bcf000baa441ecb7f2b6c859eeb /drivers/net/ethernet/intel/i40evf/i40e_txrx.c
parent0a2c7722be1705edca34458bd9de2f97188f9636 (diff)
i40e/i40evf: avoid dynamic ITR updates when polling or low packet rate
The dynamic ITR algorithm depends on a calculation of usecs which assumes that the interrupts have been firing constantly at the interrupt throttle rate. This is not guaranteed because we could have a low packet rate, or have been polling in software. We'll estimate whether this is the case by using jiffies to determine if we've been too long. If the time difference of jiffies is larger we are guaranteed to have an incorrect calculation. If the time difference of jiffies is smaller we might have been polling some but the difference shouldn't affect the calculation too much. This ensures that we don't get stuck in BULK latency during certain rare situations where we receive bursts of packets that force us into NAPI polling. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/i40evf/i40e_txrx.c')
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c22
1 files changed, 17 insertions, 5 deletions
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 2f7d9f4a6746..c32c62462c84 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -359,11 +359,25 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
enum i40e_latency_range new_latency_range = rc->latency_range;
u32 new_itr = rc->itr;
int bytes_per_int;
- int usecs;
+ unsigned int usecs, estimated_usecs;
if (rc->total_packets == 0 || !rc->itr)
return false;
+ usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
+ bytes_per_int = rc->total_bytes / usecs;
+
+ /* The calculations in this algorithm depend on interrupts actually
+ * firing at the ITR rate. This may not happen if the packet rate is
+ * really low, or if we've been napi polling. Check to make sure
+ * that's not the case before we continue.
+ */
+ estimated_usecs = jiffies_to_usecs(jiffies - rc->last_itr_update);
+ if (estimated_usecs > usecs) {
+ new_latency_range = I40E_LOW_LATENCY;
+ goto reset_latency;
+ }
+
/* simple throttlerate management
* 0-10MB/s lowest (50000 ints/s)
* 10-20MB/s low (20000 ints/s)
@@ -375,9 +389,6 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
* are in 2 usec increments in the ITR registers, and make sure
* to use the smoothed values that the countdown timer gives us.
*/
- usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
- bytes_per_int = rc->total_bytes / usecs;
-
switch (new_latency_range) {
case I40E_LOWEST_LATENCY:
if (bytes_per_int > 10)
@@ -396,6 +407,7 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
break;
}
+reset_latency:
rc->latency_range = new_latency_range;
switch (new_latency_range) {
@@ -414,12 +426,12 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
rc->total_bytes = 0;
rc->total_packets = 0;
+ rc->last_itr_update = jiffies;
if (new_itr != rc->itr) {
rc->itr = new_itr;
return true;
}
-
return false;
}