summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLuis R. Rodriguez <mcgrof@do-not-panic.com>2013-03-22 19:33:38 +0000
committerLuis R. Rodriguez <mcgrof@do-not-panic.com>2013-03-22 12:36:42 -0700
commitbfd429ddd3bf54d7d6cd5d9ec64fca174e3c53eb (patch)
tree8d42bd07f2cd431238752317e4dbf6555df825a2
parentcc35b309c5f605aa489098897d8bd57ae2f0abe7 (diff)
compat: backport lib/scatterlist.c from 0db9299f
Backport the lib/scatterlist.c addition, this required just removing the kmemleak stuff as that is not available on older kernels. mcgrof@frijol ~/linux-next (git::master)$ git describe --contains 0db9299f v2.6.25-rc1~1173^2~2 commit 0db9299f48ebd4a860d6ad4e1d36ac50671d48e7 Author: Jens Axboe <jens.axboe@oracle.com> Date: Fri Nov 30 09:16:50 2007 +0100 SG: Move functions to lib/scatterlist.c and add sg chaining allocator helpers Manually doing chained sg lists is not trivial, so add some helpers to make sure that drivers get it right. Signed-off-by: Jens Axboe <jens.axboe@oracle.com> 1 2.6.24 [ OK ] 2 2.6.25 [ OK ] 3 2.6.26 [ OK ] 4 2.6.27 [ OK ] 5 2.6.28 [ OK ] 6 2.6.29 [ OK ] 7 2.6.30 [ OK ] 8 2.6.31 [ OK ] 9 2.6.32 [ OK ] 10 2.6.33 [ OK ] 11 2.6.34 [ OK ] 12 2.6.35 [ OK ] 13 2.6.36 [ OK ] 14 2.6.37 [ OK ] 15 2.6.38 [ OK ] 16 2.6.39 [ OK ] 17 3.0.65 [ OK ] 18 3.1.10 [ OK ] 19 3.2.38 [ OK ] 20 3.3.8 [ OK ] 21 3.4.32 [ OK ] 22 3.5.7 [ OK ] 23 3.6.11 [ OK ] 24 3.7.9 [ OK ] 25 3.8.0 [ OK ] 26 3.9-rc1 [ OK ] Signed-off-by: Luis R. Rodriguez <mcgrof@do-not-panic.com>
-rw-r--r--compat/compat-2.6.25.c191
-rw-r--r--include/linux/compat-2.6.25.h33
2 files changed, 224 insertions, 0 deletions
diff --git a/compat/compat-2.6.25.c b/compat/compat-2.6.25.c
index 15500249..20cc3dbd 100644
--- a/compat/compat-2.6.25.c
+++ b/compat/compat-2.6.25.c
@@ -12,6 +12,197 @@
#include <linux/pci.h>
/*
+ * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
+ * helpers.
+ */
+static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
+{
+ if (nents == SG_MAX_SINGLE_ALLOC) {
+ return (struct scatterlist *) __get_free_page(gfp_mask);
+ } else
+ return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
+}
+
+static void sg_kfree(struct scatterlist *sg, unsigned int nents)
+{
+ if (nents == SG_MAX_SINGLE_ALLOC)
+ free_page((unsigned long) sg);
+ else
+ kfree(sg);
+}
+
+
+/**
+ * __sg_free_table - Free a previously mapped sg table
+ * @table: The sg table header to use
+ * @max_ents: The maximum number of entries per single scatterlist
+ * @free_fn: Free function
+ *
+ * Description:
+ * Free an sg table previously allocated and setup with
+ * __sg_alloc_table(). The @max_ents value must be identical to
+ * that previously used with __sg_alloc_table().
+ *
+ **/
+void __sg_free_table(struct sg_table *table, unsigned int max_ents,
+ sg_free_fn *free_fn)
+{
+ struct scatterlist *sgl, *next;
+
+ if (unlikely(!table->sgl))
+ return;
+
+ sgl = table->sgl;
+ while (table->orig_nents) {
+ unsigned int alloc_size = table->orig_nents;
+ unsigned int sg_size;
+
+ /*
+ * If we have more than max_ents segments left,
+ * then assign 'next' to the sg table after the current one.
+ * sg_size is then one less than alloc size, since the last
+ * element is the chain pointer.
+ */
+ if (alloc_size > max_ents) {
+ next = sg_chain_ptr(&sgl[max_ents - 1]);
+ alloc_size = max_ents;
+ sg_size = alloc_size - 1;
+ } else {
+ sg_size = alloc_size;
+ next = NULL;
+ }
+
+ table->orig_nents -= sg_size;
+ free_fn(sgl, alloc_size);
+ sgl = next;
+ }
+
+ table->sgl = NULL;
+}
+EXPORT_SYMBOL(__sg_free_table);
+
+/**
+ * sg_free_table - Free a previously allocated sg table
+ * @table: The mapped sg table header
+ *
+ **/
+void sg_free_table(struct sg_table *table)
+{
+ __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
+}
+EXPORT_SYMBOL(sg_free_table);
+
+/**
+ * __sg_alloc_table - Allocate and initialize an sg table with given allocator
+ * @table: The sg table header to use
+ * @nents: Number of entries in sg list
+ * @max_ents: The maximum number of entries the allocator returns per call
+ * @gfp_mask: GFP allocation mask
+ * @alloc_fn: Allocator to use
+ *
+ * Description:
+ * This function returns a @table @nents long. The allocator is
+ * defined to return scatterlist chunks of maximum size @max_ents.
+ * Thus if @nents is bigger than @max_ents, the scatterlists will be
+ * chained in units of @max_ents.
+ *
+ * Notes:
+ * If this function returns non-0 (eg failure), the caller must call
+ * __sg_free_table() to cleanup any leftover allocations.
+ *
+ **/
+int __sg_alloc_table(struct sg_table *table, unsigned int nents,
+ unsigned int max_ents, gfp_t gfp_mask,
+ sg_alloc_fn *alloc_fn)
+{
+ struct scatterlist *sg, *prv;
+ unsigned int left;
+
+#ifndef ARCH_HAS_SG_CHAIN
+ if (WARN_ON_ONCE(nents > max_ents))
+ return -EINVAL;
+#endif
+
+ memset(table, 0, sizeof(*table));
+
+ left = nents;
+ prv = NULL;
+ do {
+ unsigned int sg_size, alloc_size = left;
+
+ if (alloc_size > max_ents) {
+ alloc_size = max_ents;
+ sg_size = alloc_size - 1;
+ } else
+ sg_size = alloc_size;
+
+ left -= sg_size;
+
+ sg = alloc_fn(alloc_size, gfp_mask);
+ if (unlikely(!sg)) {
+ /*
+ * Adjust entry count to reflect that the last
+ * entry of the previous table won't be used for
+ * linkage. Without this, sg_kfree() may get
+ * confused.
+ */
+ if (prv)
+ table->nents = ++table->orig_nents;
+
+ return -ENOMEM;
+ }
+
+ sg_init_table(sg, alloc_size);
+ table->nents = table->orig_nents += sg_size;
+
+ /*
+ * If this is the first mapping, assign the sg table header.
+ * If this is not the first mapping, chain previous part.
+ */
+ if (prv)
+ sg_chain(prv, max_ents, sg);
+ else
+ table->sgl = sg;
+
+ /*
+ * If no more entries after this one, mark the end
+ */
+ if (!left)
+ sg_mark_end(&sg[sg_size - 1]);
+
+ prv = sg;
+ } while (left);
+
+ return 0;
+}
+EXPORT_SYMBOL(__sg_alloc_table);
+
+/**
+ * sg_alloc_table - Allocate and initialize an sg table
+ * @table: The sg table header to use
+ * @nents: Number of entries in sg list
+ * @gfp_mask: GFP allocation mask
+ *
+ * Description:
+ * Allocate and initialize an sg table. If @nents@ is larger than
+ * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
+ *
+ **/
+int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
+{
+ int ret;
+
+ ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
+ gfp_mask, sg_kmalloc);
+ if (unlikely(ret))
+ __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
+
+ return ret;
+}
+EXPORT_SYMBOL(sg_alloc_table);
+
+
+/*
* To backport b718989d correctly pcibios_enable_device()
* is required but we don't have access to it on modules
* as its an architecture specific routine that is not
diff --git a/include/linux/compat-2.6.25.h b/include/linux/compat-2.6.25.h
index a08e0b26..dd986f5c 100644
--- a/include/linux/compat-2.6.25.h
+++ b/include/linux/compat-2.6.25.h
@@ -20,10 +20,43 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/scatterlist.h>
#define __inet_lookup_established __inet_lookup_established_old
#include <net/inet_hashtables.h>
#undef __inet_lookup_established
#include <linux/compat-3.9.h>
+
+struct sg_table {
+ struct scatterlist *sgl; /* the list */
+ unsigned int nents; /* number of mapped entries */
+ unsigned int orig_nents; /* original size of list */
+};
+
+#define sg_alloc_fn LINUX_BACKPORT(sg_alloc_fn)
+typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
+
+#define sg_free_fn LINUX_BACKPORT(sg_free_fn)
+typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
+
+#define __sg_free_table LINUX_BACKPORT(__sg_free_table)
+void __sg_free_table(struct sg_table *table, unsigned int max_ents,
+ sg_free_fn *free_fn);
+#define sg_free_table LINUX_BACKPORT(sg_free_table)
+void sg_free_table(struct sg_table *);
+#define __sg_alloc_table LINUX_BACKPORT(__sg_alloc_table)
+int __sg_alloc_table(struct sg_table *table, unsigned int nents,
+ unsigned int max_ents, gfp_t gfp_mask,
+ sg_alloc_fn *alloc_fn);
+#define sg_alloc_table LINUX_BACKPORT(sg_alloc_table)
+int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask);
+
+/*
+ * Maximum number of entries that will be allocated in one piece, if
+ * a list larger than this is required then chaining will be utilized.
+ */
+#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
+
+
/*
* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
* not check it for lookups anymore, thanks Alexey. -DaveM