summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/bust_spinlocks.c3
-rw-r--r--lib/dma-debug.c45
-rw-r--r--lib/idr.c80
-rw-r--r--lib/xz/Kconfig2
4 files changed, 63 insertions, 67 deletions
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c
index 9681d54b95d1..f8e0e5367398 100644
--- a/lib/bust_spinlocks.c
+++ b/lib/bust_spinlocks.c
@@ -8,6 +8,7 @@
*/
#include <linux/kernel.h>
+#include <linux/printk.h>
#include <linux/spinlock.h>
#include <linux/tty.h>
#include <linux/wait.h>
@@ -28,5 +29,3 @@ void __attribute__((weak)) bust_spinlocks(int yes)
wake_up_klogd();
}
}
-
-
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 5e396accd3d0..d87a17a819d0 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -862,17 +862,21 @@ static void check_unmap(struct dma_debug_entry *ref)
entry = bucket_find_exact(bucket, ref);
if (!entry) {
+ /* must drop lock before calling dma_mapping_error */
+ put_hash_bucket(bucket, &flags);
+
if (dma_mapping_error(ref->dev, ref->dev_addr)) {
err_printk(ref->dev, NULL,
- "DMA-API: device driver tries "
- "to free an invalid DMA memory address\n");
- return;
+ "DMA-API: device driver tries to free an "
+ "invalid DMA memory address\n");
+ } else {
+ err_printk(ref->dev, NULL,
+ "DMA-API: device driver tries to free DMA "
+ "memory it has not allocated [device "
+ "address=0x%016llx] [size=%llu bytes]\n",
+ ref->dev_addr, ref->size);
}
- err_printk(ref->dev, NULL, "DMA-API: device driver tries "
- "to free DMA memory it has not allocated "
- "[device address=0x%016llx] [size=%llu bytes]\n",
- ref->dev_addr, ref->size);
- goto out;
+ return;
}
if (ref->size != entry->size) {
@@ -936,7 +940,6 @@ static void check_unmap(struct dma_debug_entry *ref)
hash_bucket_del(entry);
dma_entry_free(entry);
-out:
put_hash_bucket(bucket, &flags);
}
@@ -1082,13 +1085,27 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
ref.dev = dev;
ref.dev_addr = dma_addr;
bucket = get_hash_bucket(&ref, &flags);
- entry = bucket_find_exact(bucket, &ref);
- if (!entry)
- goto out;
+ list_for_each_entry(entry, &bucket->list, list) {
+ if (!exact_match(&ref, entry))
+ continue;
+
+ /*
+ * The same physical address can be mapped multiple
+ * times. Without a hardware IOMMU this results in the
+ * same device addresses being put into the dma-debug
+ * hash multiple times too. This can result in false
+ * positives being reported. Therefore we implement a
+ * best-fit algorithm here which updates the first entry
+ * from the hash which fits the reference value and is
+ * not currently listed as being checked.
+ */
+ if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
+ entry->map_err_type = MAP_ERR_CHECKED;
+ break;
+ }
+ }
- entry->map_err_type = MAP_ERR_CHECKED;
-out:
put_hash_bucket(bucket, &flags);
}
EXPORT_SYMBOL(debug_dma_mapping_error);
diff --git a/lib/idr.c b/lib/idr.c
index 00739aaf95a2..322e2816f2fb 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -106,8 +106,14 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
if (layer_idr)
return get_from_free_list(layer_idr);
- /* try to allocate directly from kmem_cache */
- new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
+ /*
+ * Try to allocate directly from kmem_cache. We want to try this
+ * before preload buffer; otherwise, non-preloading idr_alloc()
+ * users will end up taking advantage of preloading ones. As the
+ * following is allowed to fail for preloaded cases, suppress
+ * warning this time.
+ */
+ new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN);
if (new)
return new;
@@ -115,18 +121,24 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
* Try to fetch one from the per-cpu preload buffer if in process
* context. See idr_preload() for details.
*/
- if (in_interrupt())
- return NULL;
-
- preempt_disable();
- new = __this_cpu_read(idr_preload_head);
- if (new) {
- __this_cpu_write(idr_preload_head, new->ary[0]);
- __this_cpu_dec(idr_preload_cnt);
- new->ary[0] = NULL;
+ if (!in_interrupt()) {
+ preempt_disable();
+ new = __this_cpu_read(idr_preload_head);
+ if (new) {
+ __this_cpu_write(idr_preload_head, new->ary[0]);
+ __this_cpu_dec(idr_preload_cnt);
+ new->ary[0] = NULL;
+ }
+ preempt_enable();
+ if (new)
+ return new;
}
- preempt_enable();
- return new;
+
+ /*
+ * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so
+ * that memory allocation failure warning is printed as intended.
+ */
+ return kmem_cache_zalloc(idr_layer_cache, gfp_mask);
}
static void idr_layer_rcu_free(struct rcu_head *head)
@@ -184,20 +196,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
}
}
-/**
- * idr_pre_get - reserve resources for idr allocation
- * @idp: idr handle
- * @gfp_mask: memory allocation flags
- *
- * This function should be called prior to calling the idr_get_new* functions.
- * It preallocates enough memory to satisfy the worst possible allocation. The
- * caller should pass in GFP_KERNEL if possible. This of course requires that
- * no spinning locks be held.
- *
- * If the system is REALLY out of memory this function returns %0,
- * otherwise %1.
- */
-int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
+int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
{
while (idp->id_free_cnt < MAX_IDR_FREE) {
struct idr_layer *new;
@@ -208,13 +207,12 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
}
return 1;
}
-EXPORT_SYMBOL(idr_pre_get);
+EXPORT_SYMBOL(__idr_pre_get);
/**
* sub_alloc - try to allocate an id without growing the tree depth
* @idp: idr handle
* @starting_id: id to start search at
- * @id: pointer to the allocated handle
* @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
* @gfp_mask: allocation mask for idr_layer_alloc()
* @layer_idr: optional idr passed to idr_layer_alloc()
@@ -376,25 +374,7 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id,
idr_mark_full(pa, id);
}
-/**
- * idr_get_new_above - allocate new idr entry above or equal to a start id
- * @idp: idr handle
- * @ptr: pointer you want associated with the id
- * @starting_id: id to start search at
- * @id: pointer to the allocated handle
- *
- * This is the allocate id function. It should be called with any
- * required locks.
- *
- * If allocation from IDR's private freelist fails, idr_get_new_above() will
- * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
- * IDR's preallocation and then retry the idr_get_new_above() call.
- *
- * If the idr is full idr_get_new_above() will return %-ENOSPC.
- *
- * @id returns a value in the range @starting_id ... %0x7fffffff
- */
-int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
+int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
{
struct idr_layer *pa[MAX_IDR_LEVEL + 1];
int rv;
@@ -407,7 +387,7 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
*id = rv;
return 0;
}
-EXPORT_SYMBOL(idr_get_new_above);
+EXPORT_SYMBOL(__idr_get_new_above);
/**
* idr_preload - preload for idr_alloc()
@@ -908,7 +888,7 @@ static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
{
/* allocate idr_layers */
- if (!idr_pre_get(&ida->idr, gfp_mask))
+ if (!__idr_pre_get(&ida->idr, gfp_mask))
return 0;
/* allocate free_bitmap */
diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig
index 82a04d7ba99e..08837db52d94 100644
--- a/lib/xz/Kconfig
+++ b/lib/xz/Kconfig
@@ -15,7 +15,7 @@ config XZ_DEC_X86
config XZ_DEC_POWERPC
bool "PowerPC BCJ filter decoder"
- default y if POWERPC
+ default y if PPC
select XZ_DEC_BCJ
config XZ_DEC_IA64