summaryrefslogtreecommitdiff
path: root/arch/sh/mm/cache.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2010-12-22 09:48:54 +1000
committerDave Airlie <airlied@redhat.com>2010-12-22 09:48:54 +1000
commitae09f09e94d755ed45c58b695675636c0ec53f9e (patch)
tree77cb9bac7d81f5b1250b8638a007e10c17b600af /arch/sh/mm/cache.c
parent1d99e5c57255d188773fb437391df24fe8faf575 (diff)
parent5909a77ac62cc042f94bd262016cf468a2f96022 (diff)
Merge remote branch 'intel/drm-intel-next' of /ssd/git/drm-next into drm-core-next
* 'intel/drm-intel-next' of /ssd/git/drm-next: (771 commits) drm/i915: Undo "Uncouple render/power ctx before suspending" drm/i915: Allow the application to choose the constant addressing mode drm/i915: dynamic render p-state support for Sandy Bridge drm/i915: Enable EI mode for RCx decision making on Sandybridge drm/i915/sdvo: Border and stall select became test bits in gen5 drm/i915: Add Guess-o-matic for pageflip timestamping. drm/i915: Add support for precise vblank timestamping (v2) drm/i915: Add frame buffer compression on Sandybridge drm/i915: Add self-refresh support on Sandybridge drm/i915: Wait for vblank before unpinning old fb Revert "drm/i915: Avoid using PIPE_CONTROL on Ironlake" drm/i915: Pass clock limits down to PLL matcher drm/i915: Poll for seqno completion if IRQ is disabled drm/i915/ringbuffer: Make IRQ refcnting atomic agp/intel: Fix missed cached memory flags setting in i965_write_entry() drm/i915/sdvo: Only use the SDVO pin if it is in the valid range drm/i915: Enable RC6 autodownclocking on Sandybridge drm/i915: Terminate the FORCE WAKE after we have finished reading drm/i915/gtt: Clear the cachelines upon resume drm/i915: Restore GTT mapping first upon resume ...
Diffstat (limited to 'arch/sh/mm/cache.c')
-rw-r--r--arch/sh/mm/cache.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index ba401d137bb9..88d3dc3d30d5 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -60,14 +60,14 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long len)
{
if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
- !test_bit(PG_dcache_dirty, &page->flags)) {
+ test_bit(PG_dcache_clean, &page->flags)) {
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(vto, src, len);
kunmap_coherent(vto);
} else {
memcpy(dst, src, len);
if (boot_cpu_data.dcache.n_aliases)
- set_bit(PG_dcache_dirty, &page->flags);
+ clear_bit(PG_dcache_clean, &page->flags);
}
if (vma->vm_flags & VM_EXEC)
@@ -79,14 +79,14 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long len)
{
if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
- !test_bit(PG_dcache_dirty, &page->flags)) {
+ test_bit(PG_dcache_clean, &page->flags)) {
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(dst, vfrom, len);
kunmap_coherent(vfrom);
} else {
memcpy(dst, src, len);
if (boot_cpu_data.dcache.n_aliases)
- set_bit(PG_dcache_dirty, &page->flags);
+ clear_bit(PG_dcache_clean, &page->flags);
}
}
@@ -98,7 +98,7 @@ void copy_user_highpage(struct page *to, struct page *from,
vto = kmap_atomic(to, KM_USER1);
if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
- !test_bit(PG_dcache_dirty, &from->flags)) {
+ test_bit(PG_dcache_clean, &from->flags)) {
vfrom = kmap_coherent(from, vaddr);
copy_page(vto, vfrom);
kunmap_coherent(vfrom);
@@ -141,7 +141,7 @@ void __update_cache(struct vm_area_struct *vma,
page = pfn_to_page(pfn);
if (pfn_valid(pfn)) {
- int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
+ int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags);
if (dirty)
__flush_purge_region(page_address(page), PAGE_SIZE);
}
@@ -153,7 +153,7 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
if (pages_do_alias(addr, vmaddr)) {
if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
- !test_bit(PG_dcache_dirty, &page->flags)) {
+ test_bit(PG_dcache_clean, &page->flags)) {
void *kaddr;
kaddr = kmap_coherent(page, vmaddr);