summaryrefslogtreecommitdiff
path: root/drivers/video/tegra/nvmap/nvmap_handle.c
diff options
context:
space:
mode:
authorKirill Artamonov <kartamonov@nvidia.com>2011-05-24 13:12:43 +0300
committerDan Willemsen <dwillemsen@nvidia.com>2011-11-30 21:45:04 -0800
commitb0bf5067c63be08898d33e92af416e64c421fec5 (patch)
treed73273c1829501163f7ff8272badc48a71024508 /drivers/video/tegra/nvmap/nvmap_handle.c
parent431fa116af6295a15ed77bd35b560ffd56dadf52 (diff)
video: tegra: nvmap: single page allocation policy corrected
When user doesn't use default heap policy and selects GART or carveout allocation, automatic single-page-to-sysmem rule doesn't work. Because of broken rule many single page allocations take extra space in carveout and create unnecessary page mappings in GART and SMMU. The fix adds sysmem bit to heap mask when allocation is single page and GART or carveout is present in heap mask. bug 730124 bug 731923 The change also does sanity check of available system memory before adding sysmem bit for carveout allocations. bug 777839 Original-Change-Id: I13a62653825f6c80581adcd2682fb2608d3a284e Reviewed-on: http://git-master/r/31383 Reviewed-by: Kirill Artamonov <kartamonov@nvidia.com> Tested-by: Kirill Artamonov <kartamonov@nvidia.com> Reviewed-by: Krishna Reddy <vdumpa@nvidia.com> Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com> Rebase-Id: R278606210f20aacc885fa9eb06b3a2a3d8677b55
Diffstat (limited to 'drivers/video/tegra/nvmap/nvmap_handle.c')
-rw-r--r--drivers/video/tegra/nvmap/nvmap_handle.c30
1 files changed, 29 insertions, 1 deletions
diff --git a/drivers/video/tegra/nvmap/nvmap_handle.c b/drivers/video/tegra/nvmap/nvmap_handle.c
index 3b1c2ee06b10..de47aa7a5e58 100644
--- a/drivers/video/tegra/nvmap/nvmap_handle.c
+++ b/drivers/video/tegra/nvmap/nvmap_handle.c
@@ -35,6 +35,9 @@
#include <mach/iovmm.h>
#include <mach/nvmap.h>
+#include <linux/vmstat.h>
+#include <linux/swap.h>
+
#include "nvmap.h"
#include "nvmap_mru.h"
#include "nvmap_common.h"
@@ -319,6 +322,10 @@ static const unsigned int heap_policy_large[] = {
0,
};
+/* Do not override single page policy if there is not much space to
+avoid invoking system oom killer. */
+#define NVMAP_SMALL_POLICY_SYSMEM_THRESHOLD 50000000
+
int nvmap_alloc_handle_id(struct nvmap_client *client,
unsigned long id, unsigned int heap_mask,
size_t align, unsigned int flags)
@@ -340,8 +347,29 @@ int nvmap_alloc_handle_id(struct nvmap_client *client,
h->secure = !!(flags & NVMAP_HANDLE_SECURE);
h->flags = (flags & NVMAP_HANDLE_CACHE_FLAG);
h->align = max_t(size_t, align, L1_CACHE_BYTES);
+
#ifndef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
- /* This resriction is deprecated as alignments greater than
+#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
+ /* Allow single pages allocations in system memory to save
+ * carveout space and avoid extra iovm mappings */
+ if (nr_page == 1) {
+ if (heap_mask & NVMAP_HEAP_IOVMM)
+ heap_mask |= NVMAP_HEAP_SYSMEM;
+ else if (heap_mask & NVMAP_HEAP_CARVEOUT_GENERIC) {
+ /* Calculate size of free physical pages
+ * managed by kernel */
+ unsigned long freeMem =
+ (global_page_state(NR_FREE_PAGES) +
+ global_page_state(NR_FILE_PAGES) -
+ total_swapcache_pages) << PAGE_SHIFT;
+
+ if (freeMem > NVMAP_SMALL_POLICY_SYSMEM_THRESHOLD)
+ heap_mask |= NVMAP_HEAP_SYSMEM;
+ }
+ }
+#endif
+
+ /* This restriction is deprecated as alignments greater than
PAGE_SIZE are now correctly handled, but it is retained for
AP20 compatibility. */
if (h->align > PAGE_SIZE)