summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobin Holt <holt@sgi.com>2006-11-14 20:50:59 -0600
committerChris Wright <chrisw@sous-sol.org>2006-12-01 16:12:37 -0800
commitf0017a04c679278dcd6d6e042a967ea695b04eef (patch)
treec170a559dd1e6e8dc230b9186155097905395e37
parent984db69c4ecafdb6b69d46641bd84e93f9152c9a (diff)
[PATCH] IA64: bte_unaligned_copy() transfers one extra cache line.
When called to do a transfer that has a start offset within the cache line which is uneven between source and destination and a length which terminates the source of the copy exactly on a cache line, one extra line gets copied into a temporary buffer. This is normally not an issue since the buffer is a kernel buffer and only the requested information gets copied into the user buffer. The problem arises when the source ends at the very last physical page of memory. That last cache line does not exist and results in the SHUB chip raising an MCA. Signed-off-by: Robin Holt <holt@sgi.com> Signed-off-by: Dean Nelson <dcn@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com> Signed-off-by: Chris Wright <chrisw@sous-sol.org>
-rw-r--r--arch/ia64/sn/kernel/bte.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
index 27dee4584061..c55f487b654a 100644
--- a/arch/ia64/sn/kernel/bte.c
+++ b/arch/ia64/sn/kernel/bte.c
@@ -382,14 +382,13 @@ bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
* bcopy to the destination.
*/
- /* Add the leader from source */
- headBteLen = len + (src & L1_CACHE_MASK);
- /* Add the trailing bytes from footer. */
- headBteLen += L1_CACHE_BYTES - (headBteLen & L1_CACHE_MASK);
- headBteSource = src & ~L1_CACHE_MASK;
headBcopySrcOffset = src & L1_CACHE_MASK;
headBcopyDest = dest;
headBcopyLen = len;
+
+ headBteSource = src - headBcopySrcOffset;
+ /* Add the leading and trailing bytes from source */
+ headBteLen = L1_CACHE_ALIGN(len + headBcopySrcOffset);
}
if (headBcopyLen > 0) {