X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=lib%2Fscatterlist.c;h=0d475d8167bfd7f4f9909424ea29a72653b581fe;hb=e213e9cf707c51808e372dabd1070a61af17e77b;hp=02aaa27e010ef2b0786e71a70f840a9915b1ed22;hpb=0db9299f48ebd4a860d6ad4e1d36ac50671d48e7;p=safe%2Fjmp%2Flinux-2.6 diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 02aaa27..0d475d8 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -8,6 +8,7 @@ */ #include #include +#include /** * sg_next - return the next scatterlist entry in a list @@ -130,13 +131,17 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents) /** * __sg_free_table - Free a previously mapped sg table * @table: The sg table header to use + * @max_ents: The maximum number of entries per single scatterlist * @free_fn: Free function * * Description: - * Free an sg table previously allocated and setup with __sg_alloc_table(). + * Free an sg table previously allocated and setup with + * __sg_alloc_table(). The @max_ents value must be identical to + * that previously used with __sg_alloc_table(). * **/ -void __sg_free_table(struct sg_table *table, sg_free_fn *free_fn) +void __sg_free_table(struct sg_table *table, unsigned int max_ents, + sg_free_fn *free_fn) { struct scatterlist *sgl, *next; @@ -149,14 +154,14 @@ void __sg_free_table(struct sg_table *table, sg_free_fn *free_fn) unsigned int sg_size; /* - * If we have more than SG_MAX_SINGLE_ALLOC segments left, + * If we have more than max_ents segments left, * then assign 'next' to the sg table after the current one. * sg_size is then one less than alloc size, since the last * element is the chain pointer. */ - if (alloc_size > SG_MAX_SINGLE_ALLOC) { - next = sg_chain_ptr(&sgl[SG_MAX_SINGLE_ALLOC - 1]); - alloc_size = SG_MAX_SINGLE_ALLOC; + if (alloc_size > max_ents) { + next = sg_chain_ptr(&sgl[max_ents - 1]); + alloc_size = max_ents; sg_size = alloc_size - 1; } else { sg_size = alloc_size; @@ -179,7 +184,7 @@ EXPORT_SYMBOL(__sg_free_table); **/ void sg_free_table(struct sg_table *table) { - __sg_free_table(table, sg_kfree); + __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree); } EXPORT_SYMBOL(sg_free_table); @@ -187,22 +192,30 @@ EXPORT_SYMBOL(sg_free_table); * __sg_alloc_table - Allocate and initialize an sg table with given allocator * @table: The sg table header to use * @nents: Number of entries in sg list + * @max_ents: The maximum number of entries the allocator returns per call * @gfp_mask: GFP allocation mask * @alloc_fn: Allocator to use * + * Description: + * This function returns a @table @nents long. The allocator is + * defined to return scatterlist chunks of maximum size @max_ents. + * Thus if @nents is bigger than @max_ents, the scatterlists will be + * chained in units of @max_ents. + * * Notes: * If this function returns non-0 (eg failure), the caller must call * __sg_free_table() to cleanup any leftover allocations. * **/ -int __sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask, +int __sg_alloc_table(struct sg_table *table, unsigned int nents, + unsigned int max_ents, gfp_t gfp_mask, sg_alloc_fn *alloc_fn) { struct scatterlist *sg, *prv; unsigned int left; #ifndef ARCH_HAS_SG_CHAIN - BUG_ON(nents > SG_MAX_SINGLE_ALLOC); + BUG_ON(nents > max_ents); #endif memset(table, 0, sizeof(*table)); @@ -212,8 +225,8 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask, do { unsigned int sg_size, alloc_size = left; - if (alloc_size > SG_MAX_SINGLE_ALLOC) { - alloc_size = SG_MAX_SINGLE_ALLOC; + if (alloc_size > max_ents) { + alloc_size = max_ents; sg_size = alloc_size - 1; } else sg_size = alloc_size; @@ -232,7 +245,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask, * If this is not the first mapping, chain previous part. */ if (prv) - sg_chain(prv, SG_MAX_SINGLE_ALLOC, sg); + sg_chain(prv, max_ents, sg); else table->sgl = sg; @@ -272,10 +285,211 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) { int ret; - ret = __sg_alloc_table(table, nents, gfp_mask, sg_kmalloc); + ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, + gfp_mask, sg_kmalloc); if (unlikely(ret)) - __sg_free_table(table, sg_kfree); + __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree); return ret; } EXPORT_SYMBOL(sg_alloc_table); + +/** + * sg_miter_start - start mapping iteration over a sg list + * @miter: sg mapping iter to be started + * @sgl: sg list to iterate over + * @nents: number of sg entries + * + * Description: + * Starts mapping iterator @miter. + * + * Context: + * Don't care. + */ +void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, + unsigned int nents, unsigned int flags) +{ + memset(miter, 0, sizeof(struct sg_mapping_iter)); + + miter->__sg = sgl; + miter->__nents = nents; + miter->__offset = 0; + WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG))); + miter->__flags = flags; +} +EXPORT_SYMBOL(sg_miter_start); + +/** + * sg_miter_next - proceed mapping iterator to the next mapping + * @miter: sg mapping iter to proceed + * + * Description: + * Proceeds @miter@ to the next mapping. @miter@ should have been + * started using sg_miter_start(). On successful return, + * @miter@->page, @miter@->addr and @miter@->length point to the + * current mapping. + * + * Context: + * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till + * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC. + * + * Returns: + * true if @miter contains the next mapping. false if end of sg + * list is reached. + */ +bool sg_miter_next(struct sg_mapping_iter *miter) +{ + unsigned int off, len; + + /* check for end and drop resources from the last iteration */ + if (!miter->__nents) + return false; + + sg_miter_stop(miter); + + /* get to the next sg if necessary. __offset is adjusted by stop */ + while (miter->__offset == miter->__sg->length) { + if (--miter->__nents) { + miter->__sg = sg_next(miter->__sg); + miter->__offset = 0; + } else + return false; + } + + /* map the next page */ + off = miter->__sg->offset + miter->__offset; + len = miter->__sg->length - miter->__offset; + + miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT); + off &= ~PAGE_MASK; + miter->length = min_t(unsigned int, len, PAGE_SIZE - off); + miter->consumed = miter->length; + + if (miter->__flags & SG_MITER_ATOMIC) + miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off; + else + miter->addr = kmap(miter->page) + off; + + return true; +} +EXPORT_SYMBOL(sg_miter_next); + +/** + * sg_miter_stop - stop mapping iteration + * @miter: sg mapping iter to be stopped + * + * Description: + * Stops mapping iterator @miter. @miter should have been started + * started using sg_miter_start(). A stopped iteration can be + * resumed by calling sg_miter_next() on it. This is useful when + * resources (kmap) need to be released during iteration. + * + * Context: + * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise. + */ +void sg_miter_stop(struct sg_mapping_iter *miter) +{ + WARN_ON(miter->consumed > miter->length); + + /* drop resources from the last iteration */ + if (miter->addr) { + miter->__offset += miter->consumed; + + if (miter->__flags & SG_MITER_TO_SG) + flush_kernel_dcache_page(miter->page); + + if (miter->__flags & SG_MITER_ATOMIC) { + WARN_ON(!irqs_disabled()); + kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); + } else + kunmap(miter->page); + + miter->page = NULL; + miter->addr = NULL; + miter->length = 0; + miter->consumed = 0; + } +} +EXPORT_SYMBOL(sg_miter_stop); + +/** + * sg_copy_buffer - Copy data between a linear buffer and an SG list + * @sgl: The SG list + * @nents: Number of SG entries + * @buf: Where to copy from + * @buflen: The number of bytes to copy + * @to_buffer: transfer direction (non zero == from an sg list to a + * buffer, 0 == from a buffer to an sg list + * + * Returns the number of copied bytes. + * + **/ +static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen, int to_buffer) +{ + unsigned int offset = 0; + struct sg_mapping_iter miter; + unsigned long flags; + unsigned int sg_flags = SG_MITER_ATOMIC; + + if (to_buffer) + sg_flags |= SG_MITER_FROM_SG; + else + sg_flags |= SG_MITER_TO_SG; + + sg_miter_start(&miter, sgl, nents, sg_flags); + + local_irq_save(flags); + + while (sg_miter_next(&miter) && offset < buflen) { + unsigned int len; + + len = min(miter.length, buflen - offset); + + if (to_buffer) + memcpy(buf + offset, miter.addr, len); + else + memcpy(miter.addr, buf + offset, len); + + offset += len; + } + + sg_miter_stop(&miter); + + local_irq_restore(flags); + return offset; +} + +/** + * sg_copy_from_buffer - Copy from a linear buffer to an SG list + * @sgl: The SG list + * @nents: Number of SG entries + * @buf: Where to copy from + * @buflen: The number of bytes to copy + * + * Returns the number of copied bytes. + * + **/ +size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen) +{ + return sg_copy_buffer(sgl, nents, buf, buflen, 0); +} +EXPORT_SYMBOL(sg_copy_from_buffer); + +/** + * sg_copy_to_buffer - Copy from an SG list to a linear buffer + * @sgl: The SG list + * @nents: Number of SG entries + * @buf: Where to copy to + * @buflen: The number of bytes to copy + * + * Returns the number of copied bytes. + * + **/ +size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen) +{ + return sg_copy_buffer(sgl, nents, buf, buflen, 1); +} +EXPORT_SYMBOL(sg_copy_to_buffer);