diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index daf8c480c78..50e2c16b20f 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -45,6 +45,7 @@ * bitmap_set(dst, pos, nbits) Set specified bit area * bitmap_clear(dst, pos, nbits) Clear specified bit area * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area + * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) @@ -55,7 +56,8 @@ * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf * bitmap_scnlistprintf(buf, len, src, nbits) Print bitmap src as list to buf - * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from list + * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf + * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region * bitmap_release_region(bitmap, pos, order) Free specified bit region * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region @@ -113,11 +115,24 @@ extern int __bitmap_weight(const unsigned long *bitmap, int bits); extern void bitmap_set(unsigned long *map, int i, int len); extern void bitmap_clear(unsigned long *map, int start, int nr); -extern unsigned long bitmap_find_next_zero_area(unsigned long *map, - unsigned long size, - unsigned long start, - unsigned int nr, - unsigned long align_mask); + +extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask, + unsigned long align_offset); + +static inline unsigned long +bitmap_find_next_zero_area(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask) +{ + return bitmap_find_next_zero_area_off(map, size, start, nr, + align_mask, 0); +} extern int bitmap_scnprintf(char *buf, unsigned int len, const unsigned long *src, int nbits); @@ -129,6 +144,8 @@ extern int bitmap_scnlistprintf(char *buf, unsigned int len, const unsigned long *src, int nbits); extern int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits); +extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, + unsigned long *dst, int nbits); extern void bitmap_remap(unsigned long *dst, const unsigned long *src, const unsigned long *old, const unsigned long *new, int bits); extern int bitmap_bitremap(int oldbit, diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 9869ef3674a..3bc7dd7394b 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -8,29 +8,53 @@ * Version 2. See the file COPYING for more details. */ +#ifndef __GENALLOC_H__ +#define __GENALLOC_H__ -/* - * General purpose special memory pool descriptor. +struct gen_pool; + +struct gen_pool *__must_check gen_pool_create(unsigned order, int nid); + +void gen_pool_destroy(struct gen_pool *pool); + +unsigned long __must_check +gen_pool_alloc_aligned(struct gen_pool *pool, size_t size, + unsigned alignment_order); + +/** + * gen_pool_alloc() - allocate special memory from the pool + * @pool: Pool to allocate from. + * @size: Number of bytes to allocate from the pool. + * + * Allocate the requested number of bytes from the specified pool. + * Uses a first-fit algorithm. */ -struct gen_pool { - rwlock_t lock; - struct list_head chunks; /* list of chunks in this pool */ - int min_alloc_order; /* minimum allocation order */ -}; +static inline unsigned long __must_check +gen_pool_alloc(struct gen_pool *pool, size_t size) +{ + return gen_pool_alloc_aligned(pool, size, 0); +} -/* - * General purpose special memory pool chunk descriptor. +void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size); + +extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long); +extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t, + size_t, int); +/** + * gen_pool_add - add a new chunk of special memory to the pool + * @pool: pool to add new memory chunk to + * @addr: starting address of memory chunk to add to pool + * @size: size in bytes of the memory chunk to add to pool + * @nid: node id of the node the chunk structure and bitmap should be + * allocated on, or -1 + * + * Add a new chunk of special memory to the specified pool. + * + * Returns 0 on success or a -ve errno on failure. */ -struct gen_pool_chunk { - spinlock_t lock; - struct list_head next_chunk; /* next chunk in pool */ - unsigned long start_addr; /* starting address of memory chunk */ - unsigned long end_addr; /* ending address of memory chunk */ - unsigned long bits[0]; /* bitmap for allocating memory chunk */ -}; - -extern struct gen_pool *gen_pool_create(int, int); -extern int gen_pool_add(struct gen_pool *, unsigned long, size_t, int); -extern void gen_pool_destroy(struct gen_pool *); -extern unsigned long gen_pool_alloc(struct gen_pool *, size_t); -extern void gen_pool_free(struct gen_pool *, unsigned long, size_t); +static inline int __must_check gen_pool_add(struct gen_pool *pool, unsigned long addr, + size_t size, int nid) +{ + return gen_pool_add_virt(pool, addr, -1, size, nid); +} +#endif /* __GENALLOC_H__ */ diff --git a/lib/bitmap.c b/lib/bitmap.c index 741fae905ae..cf12bb86d7c 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -315,30 +315,32 @@ void bitmap_clear(unsigned long *map, int start, int nr) } EXPORT_SYMBOL(bitmap_clear); -/* +/** * bitmap_find_next_zero_area - find a contiguous aligned zero area * @map: The address to base the search on * @size: The bitmap size in bits * @start: The bitnumber to start searching at * @nr: The number of zeroed bits we're looking for * @align_mask: Alignment mask for zero area + * @align_offset: Alignment offset for zero area. * * The @align_mask should be one less than a power of 2; the effect is that - * the bit offset of all zero areas this function finds is multiples of that - * power of 2. A @align_mask of 0 means no alignment is required. + * the bit offset of all zero areas this function finds plus @align_offset + * is multiple of that power of 2. */ -unsigned long bitmap_find_next_zero_area(unsigned long *map, - unsigned long size, - unsigned long start, - unsigned int nr, - unsigned long align_mask) +unsigned long bitmap_find_next_zero_area_off(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask, + unsigned long align_offset) { unsigned long index, end, i; again: index = find_next_zero_bit(map, size, start); /* Align allocation */ - index = __ALIGN_MASK(index, align_mask); + index = __ALIGN_MASK(index + align_offset, align_mask) - align_offset; end = index + nr; if (end > size) @@ -350,7 +352,7 @@ unsigned long bitmap_find_next_zero_area(unsigned long *map, } return index; } -EXPORT_SYMBOL(bitmap_find_next_zero_area); +EXPORT_SYMBOL(bitmap_find_next_zero_area_off); /* * Bitmap printing & parsing functions: first version by Bill Irwin, @@ -571,8 +573,11 @@ int bitmap_scnlistprintf(char *buf, unsigned int buflen, EXPORT_SYMBOL(bitmap_scnlistprintf); /** - * bitmap_parselist - convert list format ASCII string to bitmap - * @bp: read nul-terminated user string from this buffer + * __bitmap_parselist - convert list format ASCII string to bitmap + * @buf: read nul-terminated user string from this buffer + * @buflen: buffer size in bytes. If string is smaller than this + * then it must be terminated with a \0. + * @is_user: location of buffer, 0 indicates kernel space * @maskp: write resulting mask here * @nmaskbits: number of bits in mask to be written * @@ -587,20 +592,63 @@ EXPORT_SYMBOL(bitmap_scnlistprintf); * %-EINVAL: invalid character in string * %-ERANGE: bit number specified too large for mask */ -int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits) +static int __bitmap_parselist(const char *buf, unsigned int buflen, + int is_user, unsigned long *maskp, + int nmaskbits) { unsigned a, b; + int c, old_c, totaldigits; + const char __user *ubuf = buf; + int exp_digit, in_range; + totaldigits = c = 0; bitmap_zero(maskp, nmaskbits); do { - if (!isdigit(*bp)) - return -EINVAL; - b = a = simple_strtoul(bp, (char **)&bp, BASEDEC); - if (*bp == '-') { - bp++; - if (!isdigit(*bp)) + exp_digit = 1; + in_range = 0; + a = b = 0; + + /* Get the next cpu# or a range of cpu#'s */ + while (buflen) { + old_c = c; + if (is_user) { + if (__get_user(c, ubuf++)) + return -EFAULT; + } else + c = *buf++; + buflen--; + if (isspace(c)) + continue; + + /* + * If the last character was a space and the current + * character isn't '\0', we've got embedded whitespace. + * This is a no-no, so throw an error. + */ + if (totaldigits && c && isspace(old_c)) + return -EINVAL; + + /* A '\0' or a ',' signal the end of a cpu# or range */ + if (c == '\0' || c == ',') + break; + + if (c == '-') { + if (exp_digit || in_range) + return -EINVAL; + b = 0; + in_range = 1; + exp_digit = 1; + continue; + } + + if (!isdigit(c)) return -EINVAL; - b = simple_strtoul(bp, (char **)&bp, BASEDEC); + + b = b * 10 + (c - '0'); + if (!in_range) + a = b; + exp_digit = 0; + totaldigits++; } if (!(a <= b)) return -EINVAL; @@ -610,13 +658,52 @@ int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits) set_bit(a, maskp); a++; } - if (*bp == ',') - bp++; - } while (*bp != '\0' && *bp != '\n'); + } while (buflen && c == ','); return 0; } + +int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits) +{ + char *nl = strchr(bp, '\n'); + int len; + + if (nl) + len = nl - bp; + else + len = strlen(bp); + + return __bitmap_parselist(bp, len, 0, maskp, nmaskbits); +} EXPORT_SYMBOL(bitmap_parselist); + +/** + * bitmap_parselist_user() + * + * @ubuf: pointer to user buffer containing string. + * @ulen: buffer size in bytes. If string is smaller than this + * then it must be terminated with a \0. + * @maskp: pointer to bitmap array that will contain result. + * @nmaskbits: size of bitmap, in bits. + * + * Wrapper for bitmap_parselist(), providing it with user buffer. + * + * We cannot have this as an inline function in bitmap.h because it needs + * linux/uaccess.h to get the access_ok() declaration and this causes + * cyclic dependencies. + */ +int bitmap_parselist_user(const char __user *ubuf, + unsigned int ulen, unsigned long *maskp, + int nmaskbits) +{ + if (!access_ok(VERIFY_READ, ubuf, ulen)) + return -EFAULT; + return __bitmap_parselist((const char *)ubuf, + ulen, 1, maskp, nmaskbits); +} +EXPORT_SYMBOL(bitmap_parselist_user); + + /** * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap * @buf: pointer to a bitmap @@ -830,7 +917,7 @@ EXPORT_SYMBOL(bitmap_bitremap); * @orig (i.e. bits 3, 5, 7 and 9) were also set. * * When bit 11 is set in @orig, it means turn on the bit in - * @dst corresponding to whatever is the twelth bit that is + * @dst corresponding to whatever is the twelfth bit that is * turned on in @relmap. In the above example, there were * only ten bits turned on in @relmap (30..39), so that bit * 11 was set in @orig had no affect on @dst. diff --git a/lib/genalloc.c b/lib/genalloc.c index 1923f1490e7..c7b9b9c41b7 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -16,53 +16,86 @@ #include +/* General purpose special memory pool descriptor. */ +struct gen_pool { + rwlock_t lock; /* protects chunks list */ + struct list_head chunks; /* list of chunks in this pool */ + unsigned order; /* minimum allocation order */ +}; + +/* General purpose special memory pool chunk descriptor. */ +struct gen_pool_chunk { + spinlock_t lock; /* protects bits */ + struct list_head next_chunk; /* next chunk in pool */ + phys_addr_t phys_addr; /* physical starting address of memory chunk */ + unsigned long start; /* start of memory chunk */ + unsigned long size; /* number of bits */ + unsigned long bits[0]; /* bitmap for allocating memory chunk */ +}; + /** - * gen_pool_create - create a new special memory pool - * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents - * @nid: node id of the node the pool structure should be allocated on, or -1 + * gen_pool_create() - create a new special memory pool + * @order: Log base 2 of number of bytes each bitmap bit + * represents. + * @nid: Node id of the node the pool structure should be allocated + * on, or -1. This will be also used for other allocations. * * Create a new special memory pool that can be used to manage special purpose * memory not managed by the regular kmalloc/kfree interface. */ -struct gen_pool *gen_pool_create(int min_alloc_order, int nid) +struct gen_pool *__must_check gen_pool_create(unsigned order, int nid) { struct gen_pool *pool; - pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); - if (pool != NULL) { + if (WARN_ON(order >= BITS_PER_LONG)) + return NULL; + + pool = kmalloc_node(sizeof *pool, GFP_KERNEL, nid); + if (pool) { rwlock_init(&pool->lock); INIT_LIST_HEAD(&pool->chunks); - pool->min_alloc_order = min_alloc_order; + pool->order = order; } return pool; } EXPORT_SYMBOL(gen_pool_create); /** - * gen_pool_add - add a new chunk of special memory to the pool + * gen_pool_add_virt - add a new chunk of special memory to the pool * @pool: pool to add new memory chunk to - * @addr: starting address of memory chunk to add to pool + * @virt: virtual starting address of memory chunk to add to pool + * @phys: physical starting address of memory chunk to add to pool * @size: size in bytes of the memory chunk to add to pool * @nid: node id of the node the chunk structure and bitmap should be * allocated on, or -1 * * Add a new chunk of special memory to the specified pool. + * + * Returns 0 on success or a -ve errno on failure. */ -int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size, - int nid) +int __must_check gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, + size_t size, int nid) { struct gen_pool_chunk *chunk; - int nbits = size >> pool->min_alloc_order; - int nbytes = sizeof(struct gen_pool_chunk) + - (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE; + size_t nbytes; - chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid); - if (unlikely(chunk == NULL)) - return -1; + if (WARN_ON(!virt || virt + size < virt || + (virt & ((1 << pool->order) - 1)))) + return -EINVAL; + + size = size >> pool->order; + if (WARN_ON(!size)) + return -EINVAL; + + nbytes = sizeof *chunk + BITS_TO_LONGS(size) * sizeof *chunk->bits; + chunk = kzalloc_node(nbytes, GFP_KERNEL, nid); + if (!chunk) + return -ENOMEM; spin_lock_init(&chunk->lock); - chunk->start_addr = addr; - chunk->end_addr = addr + size; + chunk->phys_addr = phys; + chunk->start = virt >> pool->order; + chunk->size = size; write_lock(&pool->lock); list_add(&chunk->next_chunk, &pool->chunks); @@ -70,118 +103,145 @@ int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size, return 0; } -EXPORT_SYMBOL(gen_pool_add); +EXPORT_SYMBOL(gen_pool_add_virt); /** - * gen_pool_destroy - destroy a special memory pool - * @pool: pool to destroy + * gen_pool_virt_to_phys - return the physical address of memory + * @pool: pool to allocate from + * @addr: starting address of memory + * + * Returns the physical address on success, or -1 on error. + */ +phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) +{ + struct list_head *_chunk; + struct gen_pool_chunk *chunk; + + read_lock(&pool->lock); + list_for_each(_chunk, &pool->chunks) { + chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); + + if (addr >= chunk->start && + addr < (chunk->start + chunk->size)) + return chunk->phys_addr + addr - chunk->start; + } + read_unlock(&pool->lock); + + return -1; +} +EXPORT_SYMBOL(gen_pool_virt_to_phys); + +/** + * gen_pool_destroy() - destroy a special memory pool + * @pool: Pool to destroy. * * Destroy the specified special memory pool. Verifies that there are no * outstanding allocations. */ void gen_pool_destroy(struct gen_pool *pool) { - struct list_head *_chunk, *_next_chunk; struct gen_pool_chunk *chunk; - int order = pool->min_alloc_order; - int bit, end_bit; - + int bit; - list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { - chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); + while (!list_empty(&pool->chunks)) { + chunk = list_entry(pool->chunks.next, struct gen_pool_chunk, + next_chunk); list_del(&chunk->next_chunk); - end_bit = (chunk->end_addr - chunk->start_addr) >> order; - bit = find_next_bit(chunk->bits, end_bit, 0); - BUG_ON(bit < end_bit); + bit = find_next_bit(chunk->bits, chunk->size, 0); + BUG_ON(bit < chunk->size); kfree(chunk); } kfree(pool); - return; } EXPORT_SYMBOL(gen_pool_destroy); /** - * gen_pool_alloc - allocate special memory from the pool - * @pool: pool to allocate from - * @size: number of bytes to allocate from the pool + * gen_pool_alloc_aligned() - allocate special memory from the pool + * @pool: Pool to allocate from. + * @size: Number of bytes to allocate from the pool. + * @alignment_order: Order the allocated space should be + * aligned to (eg. 20 means allocated space + * must be aligned to 1MiB). * * Allocate the requested number of bytes from the specified pool. * Uses a first-fit algorithm. */ -unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) +unsigned long __must_check +gen_pool_alloc_aligned(struct gen_pool *pool, size_t size, + unsigned alignment_order) { - struct list_head *_chunk; + unsigned long addr, align_mask = 0, flags, start; struct gen_pool_chunk *chunk; - unsigned long addr, flags; - int order = pool->min_alloc_order; - int nbits, start_bit, end_bit; if (size == 0) return 0; - nbits = (size + (1UL << order) - 1) >> order; + if (alignment_order > pool->order) + align_mask = (1 << (alignment_order - pool->order)) - 1; - read_lock(&pool->lock); - list_for_each(_chunk, &pool->chunks) { - chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); + size = (size + (1UL << pool->order) - 1) >> pool->order; - end_bit = (chunk->end_addr - chunk->start_addr) >> order; + read_lock(&pool->lock); + list_for_each_entry(chunk, &pool->chunks, next_chunk) { + if (chunk->size < size) + continue; spin_lock_irqsave(&chunk->lock, flags); - start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0, - nbits, 0); - if (start_bit >= end_bit) { + start = bitmap_find_next_zero_area_off(chunk->bits, chunk->size, + 0, size, align_mask, + chunk->start); + if (start >= chunk->size) { spin_unlock_irqrestore(&chunk->lock, flags); continue; } - addr = chunk->start_addr + ((unsigned long)start_bit << order); - - bitmap_set(chunk->bits, start_bit, nbits); + bitmap_set(chunk->bits, start, size); spin_unlock_irqrestore(&chunk->lock, flags); - read_unlock(&pool->lock); - return addr; + addr = (chunk->start + start) << pool->order; + goto done; } + + addr = 0; +done: read_unlock(&pool->lock); - return 0; + return addr; } -EXPORT_SYMBOL(gen_pool_alloc); +EXPORT_SYMBOL(gen_pool_alloc_aligned); /** - * gen_pool_free - free allocated special memory back to the pool - * @pool: pool to free to - * @addr: starting address of memory to free back to pool - * @size: size in bytes of memory to free + * gen_pool_free() - free allocated special memory back to the pool + * @pool: Pool to free to. + * @addr: Starting address of memory to free back to pool. + * @size: Size in bytes of memory to free. * * Free previously allocated special memory back to the specified pool. */ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) { - struct list_head *_chunk; struct gen_pool_chunk *chunk; unsigned long flags; - int order = pool->min_alloc_order; - int bit, nbits; - nbits = (size + (1UL << order) - 1) >> order; + if (!size) + return; - read_lock(&pool->lock); - list_for_each(_chunk, &pool->chunks) { - chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); + addr = addr >> pool->order; + size = (size + (1UL << pool->order) - 1) >> pool->order; + + BUG_ON(addr + size < addr); - if (addr >= chunk->start_addr && addr < chunk->end_addr) { - BUG_ON(addr + size > chunk->end_addr); + read_lock(&pool->lock); + list_for_each_entry(chunk, &pool->chunks, next_chunk) + if (addr >= chunk->start && + addr + size <= chunk->start + chunk->size) { spin_lock_irqsave(&chunk->lock, flags); - bit = (addr - chunk->start_addr) >> order; - while (nbits--) - __clear_bit(bit++, chunk->bits); + bitmap_clear(chunk->bits, addr - chunk->start, size); spin_unlock_irqrestore(&chunk->lock, flags); - break; + goto done; } - } - BUG_ON(nbits > 0); + BUG_ON(1); +done: read_unlock(&pool->lock); } EXPORT_SYMBOL(gen_pool_free);