summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-11-30 12:10:53 +0000
committerGreg Kroah-Hartman <gregkh@suse.de>2011-12-09 08:56:08 -0800
commit4851c6a079350393f4a9219878e699845412bace (patch)
treec9e54df7559d435f0c47470b60f793ea445b499a
parent51338aa88d3ef2d05bf8b1f8a3019ebffcea74f1 (diff)
sch_red: fix red_calc_qavg_from_idle_time
[ Upstream commit ea6a5d3b97b768561db6358f15e4c84ced0f4f7e ] Since commit a4a710c4a7490587 (pkt_sched: Change PSCHED_SHIFT from 10 to 6) it seems RED/GRED are broken. red_calc_qavg_from_idle_time() computes a delay in us units, but this delay is now 16 times bigger than real delay, so the final qavg result smaller than expected. Use standard kernel time services since there is no need to obfuscate them. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--include/net/red.h15
1 files changed, 6 insertions, 9 deletions
diff --git a/include/net/red.h b/include/net/red.h
index 3319f16b3beb..b72a3b833936 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -116,7 +116,7 @@ struct red_parms {
u32 qR; /* Cached random number */
unsigned long qavg; /* Average queue length: A scaled */
- psched_time_t qidlestart; /* Start of current idle period */
+ ktime_t qidlestart; /* Start of current idle period */
};
static inline u32 red_rmask(u8 Plog)
@@ -148,17 +148,17 @@ static inline void red_set_parms(struct red_parms *p,
static inline int red_is_idling(struct red_parms *p)
{
- return p->qidlestart != PSCHED_PASTPERFECT;
+ return p->qidlestart.tv64 != 0;
}
static inline void red_start_of_idle_period(struct red_parms *p)
{
- p->qidlestart = psched_get_time();
+ p->qidlestart = ktime_get();
}
static inline void red_end_of_idle_period(struct red_parms *p)
{
- p->qidlestart = PSCHED_PASTPERFECT;
+ p->qidlestart.tv64 = 0;
}
static inline void red_restart(struct red_parms *p)
@@ -170,13 +170,10 @@ static inline void red_restart(struct red_parms *p)
static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
{
- psched_time_t now;
- long us_idle;
+ s64 delta = ktime_us_delta(ktime_get(), p->qidlestart);
+ long us_idle = min_t(s64, delta, p->Scell_max);
int shift;
- now = psched_get_time();
- us_idle = psched_tdiff_bounded(now, p->qidlestart, p->Scell_max);
-
/*
* The problem: ideally, average length queue recalcultion should
* be done over constant clock intervals. This is too expensive, so