diff options
Diffstat (limited to 'libdde-linux26/contrib/lib')
26 files changed, 10214 insertions, 0 deletions
diff --git a/libdde-linux26/contrib/lib/bitmap.c b/libdde-linux26/contrib/lib/bitmap.c new file mode 100644 index 00000000..35a1f7ff --- /dev/null +++ b/libdde-linux26/contrib/lib/bitmap.c @@ -0,0 +1,1020 @@ +/* + * lib/bitmap.c + * Helper functions for bitmap.h. + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ +#include <linux/module.h> +#include <linux/ctype.h> +#include <linux/errno.h> +#include <linux/bitmap.h> +#include <linux/bitops.h> +#include <asm/uaccess.h> + +/* + * bitmaps provide an array of bits, implemented using an an + * array of unsigned longs. The number of valid bits in a + * given bitmap does _not_ need to be an exact multiple of + * BITS_PER_LONG. + * + * The possible unused bits in the last, partially used word + * of a bitmap are 'don't care'. The implementation makes + * no particular effort to keep them zero. It ensures that + * their value will not affect the results of any operation. + * The bitmap operations that return Boolean (bitmap_empty, + * for example) or scalar (bitmap_weight, for example) results + * carefully filter out these unused bits from impacting their + * results. + * + * These operations actually hold to a slightly stronger rule: + * if you don't input any bitmaps to these ops that have some + * unused bits set, then they won't output any set unused bits + * in output bitmaps. + * + * The byte ordering of bitmaps is more natural on little + * endian architectures. See the big-endian headers + * include/asm-ppc64/bitops.h and include/asm-s390/bitops.h + * for the best explanations of this ordering. + */ + +int __bitmap_empty(const unsigned long *bitmap, int bits) +{ + int k, lim = bits/BITS_PER_LONG; + for (k = 0; k < lim; ++k) + if (bitmap[k]) + return 0; + + if (bits % BITS_PER_LONG) + if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) + return 0; + + return 1; +} +EXPORT_SYMBOL(__bitmap_empty); + +int __bitmap_full(const unsigned long *bitmap, int bits) +{ + int k, lim = bits/BITS_PER_LONG; + for (k = 0; k < lim; ++k) + if (~bitmap[k]) + return 0; + + if (bits % BITS_PER_LONG) + if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) + return 0; + + return 1; +} +EXPORT_SYMBOL(__bitmap_full); + +int __bitmap_equal(const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits) +{ + int k, lim = bits/BITS_PER_LONG; + for (k = 0; k < lim; ++k) + if (bitmap1[k] != bitmap2[k]) + return 0; + + if (bits % BITS_PER_LONG) + if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) + return 0; + + return 1; +} +EXPORT_SYMBOL(__bitmap_equal); + +void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits) +{ + int k, lim = bits/BITS_PER_LONG; + for (k = 0; k < lim; ++k) + dst[k] = ~src[k]; + + if (bits % BITS_PER_LONG) + dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits); +} +EXPORT_SYMBOL(__bitmap_complement); + +/** + * __bitmap_shift_right - logical right shift of the bits in a bitmap + * @dst : destination bitmap + * @src : source bitmap + * @shift : shift by this many bits + * @bits : bitmap size, in bits + * + * Shifting right (dividing) means moving bits in the MS -> LS bit + * direction. Zeros are fed into the vacated MS positions and the + * LS bits shifted off the bottom are lost. + */ +void __bitmap_shift_right(unsigned long *dst, + const unsigned long *src, int shift, int bits) +{ + int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG; + int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; + unsigned long mask = (1UL << left) - 1; + for (k = 0; off + k < lim; ++k) { + unsigned long upper, lower; + + /* + * If shift is not word aligned, take lower rem bits of + * word above and make them the top rem bits of result. + */ + if (!rem || off + k + 1 >= lim) + upper = 0; + else { + upper = src[off + k + 1]; + if (off + k + 1 == lim - 1 && left) + upper &= mask; + } + lower = src[off + k]; + if (left && off + k == lim - 1) + lower &= mask; + dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem; + if (left && k == lim - 1) + dst[k] &= mask; + } + if (off) + memset(&dst[lim - off], 0, off*sizeof(unsigned long)); +} +EXPORT_SYMBOL(__bitmap_shift_right); + + +/** + * __bitmap_shift_left - logical left shift of the bits in a bitmap + * @dst : destination bitmap + * @src : source bitmap + * @shift : shift by this many bits + * @bits : bitmap size, in bits + * + * Shifting left (multiplying) means moving bits in the LS -> MS + * direction. Zeros are fed into the vacated LS bit positions + * and those MS bits shifted off the top are lost. + */ + +void __bitmap_shift_left(unsigned long *dst, + const unsigned long *src, int shift, int bits) +{ + int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG; + int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; + for (k = lim - off - 1; k >= 0; --k) { + unsigned long upper, lower; + + /* + * If shift is not word aligned, take upper rem bits of + * word below and make them the bottom rem bits of result. + */ + if (rem && k > 0) + lower = src[k - 1]; + else + lower = 0; + upper = src[k]; + if (left && k == lim - 1) + upper &= (1UL << left) - 1; + dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem; + if (left && k + off == lim - 1) + dst[k + off] &= (1UL << left) - 1; + } + if (off) + memset(dst, 0, off*sizeof(unsigned long)); +} +EXPORT_SYMBOL(__bitmap_shift_left); + +void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits) +{ + int k; + int nr = BITS_TO_LONGS(bits); + + for (k = 0; k < nr; k++) + dst[k] = bitmap1[k] & bitmap2[k]; +} +EXPORT_SYMBOL(__bitmap_and); + +void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits) +{ + int k; + int nr = BITS_TO_LONGS(bits); + + for (k = 0; k < nr; k++) + dst[k] = bitmap1[k] | bitmap2[k]; +} +EXPORT_SYMBOL(__bitmap_or); + +void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits) +{ + int k; + int nr = BITS_TO_LONGS(bits); + + for (k = 0; k < nr; k++) + dst[k] = bitmap1[k] ^ bitmap2[k]; +} +EXPORT_SYMBOL(__bitmap_xor); + +void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits) +{ + int k; + int nr = BITS_TO_LONGS(bits); + + for (k = 0; k < nr; k++) + dst[k] = bitmap1[k] & ~bitmap2[k]; +} +EXPORT_SYMBOL(__bitmap_andnot); + +int __bitmap_intersects(const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits) +{ + int k, lim = bits/BITS_PER_LONG; + for (k = 0; k < lim; ++k) + if (bitmap1[k] & bitmap2[k]) + return 1; + + if (bits % BITS_PER_LONG) + if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) + return 1; + return 0; +} +EXPORT_SYMBOL(__bitmap_intersects); + +int __bitmap_subset(const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits) +{ + int k, lim = bits/BITS_PER_LONG; + for (k = 0; k < lim; ++k) + if (bitmap1[k] & ~bitmap2[k]) + return 0; + + if (bits % BITS_PER_LONG) + if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) + return 0; + return 1; +} +EXPORT_SYMBOL(__bitmap_subset); + +int __bitmap_weight(const unsigned long *bitmap, int bits) +{ + int k, w = 0, lim = bits/BITS_PER_LONG; + + for (k = 0; k < lim; k++) + w += hweight_long(bitmap[k]); + + if (bits % BITS_PER_LONG) + w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); + + return w; +} +EXPORT_SYMBOL(__bitmap_weight); + +/* + * Bitmap printing & parsing functions: first version by Bill Irwin, + * second version by Paul Jackson, third by Joe Korty. + */ + +#define CHUNKSZ 32 +#define nbits_to_hold_value(val) fls(val) +#define unhex(c) (isdigit(c) ? (c - '0') : (toupper(c) - 'A' + 10)) +#define BASEDEC 10 /* fancier cpuset lists input in decimal */ + +/** + * bitmap_scnprintf - convert bitmap to an ASCII hex string. + * @buf: byte buffer into which string is placed + * @buflen: reserved size of @buf, in bytes + * @maskp: pointer to bitmap to convert + * @nmaskbits: size of bitmap, in bits + * + * Exactly @nmaskbits bits are displayed. Hex digits are grouped into + * comma-separated sets of eight digits per set. + */ +int bitmap_scnprintf(char *buf, unsigned int buflen, + const unsigned long *maskp, int nmaskbits) +{ + int i, word, bit, len = 0; + unsigned long val; + const char *sep = ""; + int chunksz; + u32 chunkmask; + + chunksz = nmaskbits & (CHUNKSZ - 1); + if (chunksz == 0) + chunksz = CHUNKSZ; + + i = ALIGN(nmaskbits, CHUNKSZ) - CHUNKSZ; + for (; i >= 0; i -= CHUNKSZ) { + chunkmask = ((1ULL << chunksz) - 1); + word = i / BITS_PER_LONG; + bit = i % BITS_PER_LONG; + val = (maskp[word] >> bit) & chunkmask; + len += scnprintf(buf+len, buflen-len, "%s%0*lx", sep, + (chunksz+3)/4, val); + chunksz = CHUNKSZ; + sep = ","; + } + return len; +} +EXPORT_SYMBOL(bitmap_scnprintf); + +/** + * __bitmap_parse - convert an ASCII hex string into a bitmap. + * @buf: pointer to buffer containing string. + * @buflen: buffer size in bytes. If string is smaller than this + * then it must be terminated with a \0. + * @is_user: location of buffer, 0 indicates kernel space + * @maskp: pointer to bitmap array that will contain result. + * @nmaskbits: size of bitmap, in bits. + * + * Commas group hex digits into chunks. Each chunk defines exactly 32 + * bits of the resultant bitmask. No chunk may specify a value larger + * than 32 bits (%-EOVERFLOW), and if a chunk specifies a smaller value + * then leading 0-bits are prepended. %-EINVAL is returned for illegal + * characters and for grouping errors such as "1,,5", ",44", "," and "". + * Leading and trailing whitespace accepted, but not embedded whitespace. + */ +int __bitmap_parse(const char *buf, unsigned int buflen, + int is_user, unsigned long *maskp, + int nmaskbits) +{ + int c, old_c, totaldigits, ndigits, nchunks, nbits; + u32 chunk; + const char __user *ubuf = buf; + + bitmap_zero(maskp, nmaskbits); + + nchunks = nbits = totaldigits = c = 0; + do { + chunk = ndigits = 0; + + /* Get the next chunk of the bitmap */ + while (buflen) { + old_c = c; + if (is_user) { + if (__get_user(c, ubuf++)) + return -EFAULT; + } + else + c = *buf++; + buflen--; + if (isspace(c)) + continue; + + /* + * If the last character was a space and the current + * character isn't '\0', we've got embedded whitespace. + * This is a no-no, so throw an error. + */ + if (totaldigits && c && isspace(old_c)) + return -EINVAL; + + /* A '\0' or a ',' signal the end of the chunk */ + if (c == '\0' || c == ',') + break; + + if (!isxdigit(c)) + return -EINVAL; + + /* + * Make sure there are at least 4 free bits in 'chunk'. + * If not, this hexdigit will overflow 'chunk', so + * throw an error. + */ + if (chunk & ~((1UL << (CHUNKSZ - 4)) - 1)) + return -EOVERFLOW; + + chunk = (chunk << 4) | unhex(c); + ndigits++; totaldigits++; + } + if (ndigits == 0) + return -EINVAL; + if (nchunks == 0 && chunk == 0) + continue; + + __bitmap_shift_left(maskp, maskp, CHUNKSZ, nmaskbits); + *maskp |= chunk; + nchunks++; + nbits += (nchunks == 1) ? nbits_to_hold_value(chunk) : CHUNKSZ; + if (nbits > nmaskbits) + return -EOVERFLOW; + } while (buflen && c == ','); + + return 0; +} +EXPORT_SYMBOL(__bitmap_parse); + +/** + * bitmap_parse_user() + * + * @ubuf: pointer to user buffer containing string. + * @ulen: buffer size in bytes. If string is smaller than this + * then it must be terminated with a \0. + * @maskp: pointer to bitmap array that will contain result. + * @nmaskbits: size of bitmap, in bits. + * + * Wrapper for __bitmap_parse(), providing it with user buffer. + * + * We cannot have this as an inline function in bitmap.h because it needs + * linux/uaccess.h to get the access_ok() declaration and this causes + * cyclic dependencies. + */ +int bitmap_parse_user(const char __user *ubuf, + unsigned int ulen, unsigned long *maskp, + int nmaskbits) +{ + if (!access_ok(VERIFY_READ, ubuf, ulen)) + return -EFAULT; + return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits); +} +EXPORT_SYMBOL(bitmap_parse_user); + +/* + * bscnl_emit(buf, buflen, rbot, rtop, bp) + * + * Helper routine for bitmap_scnlistprintf(). Write decimal number + * or range to buf, suppressing output past buf+buflen, with optional + * comma-prefix. Return len of what would be written to buf, if it + * all fit. + */ +static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len) +{ + if (len > 0) + len += scnprintf(buf + len, buflen - len, ","); + if (rbot == rtop) + len += scnprintf(buf + len, buflen - len, "%d", rbot); + else + len += scnprintf(buf + len, buflen - len, "%d-%d", rbot, rtop); + return len; +} + +/** + * bitmap_scnlistprintf - convert bitmap to list format ASCII string + * @buf: byte buffer into which string is placed + * @buflen: reserved size of @buf, in bytes + * @maskp: pointer to bitmap to convert + * @nmaskbits: size of bitmap, in bits + * + * Output format is a comma-separated list of decimal numbers and + * ranges. Consecutively set bits are shown as two hyphen-separated + * decimal numbers, the smallest and largest bit numbers set in + * the range. Output format is compatible with the format + * accepted as input by bitmap_parselist(). + * + * The return value is the number of characters which would be + * generated for the given input, excluding the trailing '\0', as + * per ISO C99. + */ +int bitmap_scnlistprintf(char *buf, unsigned int buflen, + const unsigned long *maskp, int nmaskbits) +{ + int len = 0; + /* current bit is 'cur', most recently seen range is [rbot, rtop] */ + int cur, rbot, rtop; + + if (buflen == 0) + return 0; + buf[0] = 0; + + rbot = cur = find_first_bit(maskp, nmaskbits); + while (cur < nmaskbits) { + rtop = cur; + cur = find_next_bit(maskp, nmaskbits, cur+1); + if (cur >= nmaskbits || cur > rtop + 1) { + len = bscnl_emit(buf, buflen, rbot, rtop, len); + rbot = cur; + } + } + return len; +} +EXPORT_SYMBOL(bitmap_scnlistprintf); + +/** + * bitmap_parselist - convert list format ASCII string to bitmap + * @bp: read nul-terminated user string from this buffer + * @maskp: write resulting mask here + * @nmaskbits: number of bits in mask to be written + * + * Input format is a comma-separated list of decimal numbers and + * ranges. Consecutively set bits are shown as two hyphen-separated + * decimal numbers, the smallest and largest bit numbers set in + * the range. + * + * Returns 0 on success, -errno on invalid input strings. + * Error values: + * %-EINVAL: second number in range smaller than first + * %-EINVAL: invalid character in string + * %-ERANGE: bit number specified too large for mask + */ +int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits) +{ + unsigned a, b; + + bitmap_zero(maskp, nmaskbits); + do { + if (!isdigit(*bp)) + return -EINVAL; + b = a = simple_strtoul(bp, (char **)&bp, BASEDEC); + if (*bp == '-') { + bp++; + if (!isdigit(*bp)) + return -EINVAL; + b = simple_strtoul(bp, (char **)&bp, BASEDEC); + } + if (!(a <= b)) + return -EINVAL; + if (b >= nmaskbits) + return -ERANGE; + while (a <= b) { + set_bit(a, maskp); + a++; + } + if (*bp == ',') + bp++; + } while (*bp != '\0' && *bp != '\n'); + return 0; +} +EXPORT_SYMBOL(bitmap_parselist); + +/** + * bitmap_pos_to_ord(buf, pos, bits) + * @buf: pointer to a bitmap + * @pos: a bit position in @buf (0 <= @pos < @bits) + * @bits: number of valid bit positions in @buf + * + * Map the bit at position @pos in @buf (of length @bits) to the + * ordinal of which set bit it is. If it is not set or if @pos + * is not a valid bit position, map to -1. + * + * If for example, just bits 4 through 7 are set in @buf, then @pos + * values 4 through 7 will get mapped to 0 through 3, respectively, + * and other @pos values will get mapped to 0. When @pos value 7 + * gets mapped to (returns) @ord value 3 in this example, that means + * that bit 7 is the 3rd (starting with 0th) set bit in @buf. + * + * The bit positions 0 through @bits are valid positions in @buf. + */ +static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits) +{ + int i, ord; + + if (pos < 0 || pos >= bits || !test_bit(pos, buf)) + return -1; + + i = find_first_bit(buf, bits); + ord = 0; + while (i < pos) { + i = find_next_bit(buf, bits, i + 1); + ord++; + } + BUG_ON(i != pos); + + return ord; +} + +/** + * bitmap_ord_to_pos(buf, ord, bits) + * @buf: pointer to bitmap + * @ord: ordinal bit position (n-th set bit, n >= 0) + * @bits: number of valid bit positions in @buf + * + * Map the ordinal offset of bit @ord in @buf to its position in @buf. + * Value of @ord should be in range 0 <= @ord < weight(buf), else + * results are undefined. + * + * If for example, just bits 4 through 7 are set in @buf, then @ord + * values 0 through 3 will get mapped to 4 through 7, respectively, + * and all other @ord values return undefined values. When @ord value 3 + * gets mapped to (returns) @pos value 7 in this example, that means + * that the 3rd set bit (starting with 0th) is at position 7 in @buf. + * + * The bit positions 0 through @bits are valid positions in @buf. + */ +static int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits) +{ + int pos = 0; + + if (ord >= 0 && ord < bits) { + int i; + + for (i = find_first_bit(buf, bits); + i < bits && ord > 0; + i = find_next_bit(buf, bits, i + 1)) + ord--; + if (i < bits && ord == 0) + pos = i; + } + + return pos; +} + +/** + * bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap + * @dst: remapped result + * @src: subset to be remapped + * @old: defines domain of map + * @new: defines range of map + * @bits: number of bits in each of these bitmaps + * + * Let @old and @new define a mapping of bit positions, such that + * whatever position is held by the n-th set bit in @old is mapped + * to the n-th set bit in @new. In the more general case, allowing + * for the possibility that the weight 'w' of @new is less than the + * weight of @old, map the position of the n-th set bit in @old to + * the position of the m-th set bit in @new, where m == n % w. + * + * If either of the @old and @new bitmaps are empty, or if @src and + * @dst point to the same location, then this routine copies @src + * to @dst. + * + * The positions of unset bits in @old are mapped to themselves + * (the identify map). + * + * Apply the above specified mapping to @src, placing the result in + * @dst, clearing any bits previously set in @dst. + * + * For example, lets say that @old has bits 4 through 7 set, and + * @new has bits 12 through 15 set. This defines the mapping of bit + * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other + * bit positions unchanged. So if say @src comes into this routine + * with bits 1, 5 and 7 set, then @dst should leave with bits 1, + * 13 and 15 set. + */ +void bitmap_remap(unsigned long *dst, const unsigned long *src, + const unsigned long *old, const unsigned long *new, + int bits) +{ + int oldbit, w; + + if (dst == src) /* following doesn't handle inplace remaps */ + return; + bitmap_zero(dst, bits); + + w = bitmap_weight(new, bits); + for (oldbit = find_first_bit(src, bits); + oldbit < bits; + oldbit = find_next_bit(src, bits, oldbit + 1)) { + int n = bitmap_pos_to_ord(old, oldbit, bits); + if (n < 0 || w == 0) + set_bit(oldbit, dst); /* identity map */ + else + set_bit(bitmap_ord_to_pos(new, n % w, bits), dst); + } +} +EXPORT_SYMBOL(bitmap_remap); + +/** + * bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit + * @oldbit: bit position to be mapped + * @old: defines domain of map + * @new: defines range of map + * @bits: number of bits in each of these bitmaps + * + * Let @old and @new define a mapping of bit positions, such that + * whatever position is held by the n-th set bit in @old is mapped + * to the n-th set bit in @new. In the more general case, allowing + * for the possibility that the weight 'w' of @new is less than the + * weight of @old, map the position of the n-th set bit in @old to + * the position of the m-th set bit in @new, where m == n % w. + * + * The positions of unset bits in @old are mapped to themselves + * (the identify map). + * + * Apply the above specified mapping to bit position @oldbit, returning + * the new bit position. + * + * For example, lets say that @old has bits 4 through 7 set, and + * @new has bits 12 through 15 set. This defines the mapping of bit + * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other + * bit positions unchanged. So if say @oldbit is 5, then this routine + * returns 13. + */ +int bitmap_bitremap(int oldbit, const unsigned long *old, + const unsigned long *new, int bits) +{ + int w = bitmap_weight(new, bits); + int n = bitmap_pos_to_ord(old, oldbit, bits); + if (n < 0 || w == 0) + return oldbit; + else + return bitmap_ord_to_pos(new, n % w, bits); +} +EXPORT_SYMBOL(bitmap_bitremap); + +/** + * bitmap_onto - translate one bitmap relative to another + * @dst: resulting translated bitmap + * @orig: original untranslated bitmap + * @relmap: bitmap relative to which translated + * @bits: number of bits in each of these bitmaps + * + * Set the n-th bit of @dst iff there exists some m such that the + * n-th bit of @relmap is set, the m-th bit of @orig is set, and + * the n-th bit of @relmap is also the m-th _set_ bit of @relmap. + * (If you understood the previous sentence the first time your + * read it, you're overqualified for your current job.) + * + * In other words, @orig is mapped onto (surjectively) @dst, + * using the the map { <n, m> | the n-th bit of @relmap is the + * m-th set bit of @relmap }. + * + * Any set bits in @orig above bit number W, where W is the + * weight of (number of set bits in) @relmap are mapped nowhere. + * In particular, if for all bits m set in @orig, m >= W, then + * @dst will end up empty. In situations where the possibility + * of such an empty result is not desired, one way to avoid it is + * to use the bitmap_fold() operator, below, to first fold the + * @orig bitmap over itself so that all its set bits x are in the + * range 0 <= x < W. The bitmap_fold() operator does this by + * setting the bit (m % W) in @dst, for each bit (m) set in @orig. + * + * Example [1] for bitmap_onto(): + * Let's say @relmap has bits 30-39 set, and @orig has bits + * 1, 3, 5, 7, 9 and 11 set. Then on return from this routine, + * @dst will have bits 31, 33, 35, 37 and 39 set. + * + * When bit 0 is set in @orig, it means turn on the bit in + * @dst corresponding to whatever is the first bit (if any) + * that is turned on in @relmap. Since bit 0 was off in the + * above example, we leave off that bit (bit 30) in @dst. + * + * When bit 1 is set in @orig (as in the above example), it + * means turn on the bit in @dst corresponding to whatever + * is the second bit that is turned on in @relmap. The second + * bit in @relmap that was turned on in the above example was + * bit 31, so we turned on bit 31 in @dst. + * + * Similarly, we turned on bits 33, 35, 37 and 39 in @dst, + * because they were the 4th, 6th, 8th and 10th set bits + * set in @relmap, and the 4th, 6th, 8th and 10th bits of + * @orig (i.e. bits 3, 5, 7 and 9) were also set. + * + * When bit 11 is set in @orig, it means turn on the bit in + * @dst corresponding to whatever is the twelth bit that is + * turned on in @relmap. In the above example, there were + * only ten bits turned on in @relmap (30..39), so that bit + * 11 was set in @orig had no affect on @dst. + * + * Example [2] for bitmap_fold() + bitmap_onto(): + * Let's say @relmap has these ten bits set: + * 40 41 42 43 45 48 53 61 74 95 + * (for the curious, that's 40 plus the first ten terms of the + * Fibonacci sequence.) + * + * Further lets say we use the following code, invoking + * bitmap_fold() then bitmap_onto, as suggested above to + * avoid the possitility of an empty @dst result: + * + * unsigned long *tmp; // a temporary bitmap's bits + * + * bitmap_fold(tmp, orig, bitmap_weight(relmap, bits), bits); + * bitmap_onto(dst, tmp, relmap, bits); + * + * Then this table shows what various values of @dst would be, for + * various @orig's. I list the zero-based positions of each set bit. + * The tmp column shows the intermediate result, as computed by + * using bitmap_fold() to fold the @orig bitmap modulo ten + * (the weight of @relmap). + * + * @orig tmp @dst + * 0 0 40 + * 1 1 41 + * 9 9 95 + * 10 0 40 (*) + * 1 3 5 7 1 3 5 7 41 43 48 61 + * 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45 + * 0 9 18 27 0 9 8 7 40 61 74 95 + * 0 10 20 30 0 40 + * 0 11 22 33 0 1 2 3 40 41 42 43 + * 0 12 24 36 0 2 4 6 40 42 45 53 + * 78 102 211 1 2 8 41 42 74 (*) + * + * (*) For these marked lines, if we hadn't first done bitmap_fold() + * into tmp, then the @dst result would have been empty. + * + * If either of @orig or @relmap is empty (no set bits), then @dst + * will be returned empty. + * + * If (as explained above) the only set bits in @orig are in positions + * m where m >= W, (where W is the weight of @relmap) then @dst will + * once again be returned empty. + * + * All bits in @dst not set by the above rule are cleared. + */ +void bitmap_onto(unsigned long *dst, const unsigned long *orig, + const unsigned long *relmap, int bits) +{ + int n, m; /* same meaning as in above comment */ + + if (dst == orig) /* following doesn't handle inplace mappings */ + return; + bitmap_zero(dst, bits); + + /* + * The following code is a more efficient, but less + * obvious, equivalent to the loop: + * for (m = 0; m < bitmap_weight(relmap, bits); m++) { + * n = bitmap_ord_to_pos(orig, m, bits); + * if (test_bit(m, orig)) + * set_bit(n, dst); + * } + */ + + m = 0; + for (n = find_first_bit(relmap, bits); + n < bits; + n = find_next_bit(relmap, bits, n + 1)) { + /* m == bitmap_pos_to_ord(relmap, n, bits) */ + if (test_bit(m, orig)) + set_bit(n, dst); + m++; + } +} +EXPORT_SYMBOL(bitmap_onto); + +/** + * bitmap_fold - fold larger bitmap into smaller, modulo specified size + * @dst: resulting smaller bitmap + * @orig: original larger bitmap + * @sz: specified size + * @bits: number of bits in each of these bitmaps + * + * For each bit oldbit in @orig, set bit oldbit mod @sz in @dst. + * Clear all other bits in @dst. See further the comment and + * Example [2] for bitmap_onto() for why and how to use this. + */ +void bitmap_fold(unsigned long *dst, const unsigned long *orig, + int sz, int bits) +{ + int oldbit; + + if (dst == orig) /* following doesn't handle inplace mappings */ + return; + bitmap_zero(dst, bits); + + for (oldbit = find_first_bit(orig, bits); + oldbit < bits; + oldbit = find_next_bit(orig, bits, oldbit + 1)) + set_bit(oldbit % sz, dst); +} +EXPORT_SYMBOL(bitmap_fold); + +/* + * Common code for bitmap_*_region() routines. + * bitmap: array of unsigned longs corresponding to the bitmap + * pos: the beginning of the region + * order: region size (log base 2 of number of bits) + * reg_op: operation(s) to perform on that region of bitmap + * + * Can set, verify and/or release a region of bits in a bitmap, + * depending on which combination of REG_OP_* flag bits is set. + * + * A region of a bitmap is a sequence of bits in the bitmap, of + * some size '1 << order' (a power of two), aligned to that same + * '1 << order' power of two. + * + * Returns 1 if REG_OP_ISFREE succeeds (region is all zero bits). + * Returns 0 in all other cases and reg_ops. + */ + +enum { + REG_OP_ISFREE, /* true if region is all zero bits */ + REG_OP_ALLOC, /* set all bits in region */ + REG_OP_RELEASE, /* clear all bits in region */ +}; + +static int __reg_op(unsigned long *bitmap, int pos, int order, int reg_op) +{ + int nbits_reg; /* number of bits in region */ + int index; /* index first long of region in bitmap */ + int offset; /* bit offset region in bitmap[index] */ + int nlongs_reg; /* num longs spanned by region in bitmap */ + int nbitsinlong; /* num bits of region in each spanned long */ + unsigned long mask; /* bitmask for one long of region */ + int i; /* scans bitmap by longs */ + int ret = 0; /* return value */ + + /* + * Either nlongs_reg == 1 (for small orders that fit in one long) + * or (offset == 0 && mask == ~0UL) (for larger multiword orders.) + */ + nbits_reg = 1 << order; + index = pos / BITS_PER_LONG; + offset = pos - (index * BITS_PER_LONG); + nlongs_reg = BITS_TO_LONGS(nbits_reg); + nbitsinlong = min(nbits_reg, BITS_PER_LONG); + + /* + * Can't do "mask = (1UL << nbitsinlong) - 1", as that + * overflows if nbitsinlong == BITS_PER_LONG. + */ + mask = (1UL << (nbitsinlong - 1)); + mask += mask - 1; + mask <<= offset; + + switch (reg_op) { + case REG_OP_ISFREE: + for (i = 0; i < nlongs_reg; i++) { + if (bitmap[index + i] & mask) + goto done; + } + ret = 1; /* all bits in region free (zero) */ + break; + + case REG_OP_ALLOC: + for (i = 0; i < nlongs_reg; i++) + bitmap[index + i] |= mask; + break; + + case REG_OP_RELEASE: + for (i = 0; i < nlongs_reg; i++) + bitmap[index + i] &= ~mask; + break; + } +done: + return ret; +} + +/** + * bitmap_find_free_region - find a contiguous aligned mem region + * @bitmap: array of unsigned longs corresponding to the bitmap + * @bits: number of bits in the bitmap + * @order: region size (log base 2 of number of bits) to find + * + * Find a region of free (zero) bits in a @bitmap of @bits bits and + * allocate them (set them to one). Only consider regions of length + * a power (@order) of two, aligned to that power of two, which + * makes the search algorithm much faster. + * + * Return the bit offset in bitmap of the allocated region, + * or -errno on failure. + */ +int bitmap_find_free_region(unsigned long *bitmap, int bits, int order) +{ + int pos, end; /* scans bitmap by regions of size order */ + + for (pos = 0 ; (end = pos + (1 << order)) <= bits; pos = end) { + if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE)) + continue; + __reg_op(bitmap, pos, order, REG_OP_ALLOC); + return pos; + } + return -ENOMEM; +} +EXPORT_SYMBOL(bitmap_find_free_region); + +/** + * bitmap_release_region - release allocated bitmap region + * @bitmap: array of unsigned longs corresponding to the bitmap + * @pos: beginning of bit region to release + * @order: region size (log base 2 of number of bits) to release + * + * This is the complement to __bitmap_find_free_region() and releases + * the found region (by clearing it in the bitmap). + * + * No return value. + */ +void bitmap_release_region(unsigned long *bitmap, int pos, int order) +{ + __reg_op(bitmap, pos, order, REG_OP_RELEASE); +} +EXPORT_SYMBOL(bitmap_release_region); + +/** + * bitmap_allocate_region - allocate bitmap region + * @bitmap: array of unsigned longs corresponding to the bitmap + * @pos: beginning of bit region to allocate + * @order: region size (log base 2 of number of bits) to allocate + * + * Allocate (set bits in) a specified region of a bitmap. + * + * Return 0 on success, or %-EBUSY if specified region wasn't + * free (not all bits were zero). + */ +int bitmap_allocate_region(unsigned long *bitmap, int pos, int order) +{ + if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE)) + return -EBUSY; + __reg_op(bitmap, pos, order, REG_OP_ALLOC); + return 0; +} +EXPORT_SYMBOL(bitmap_allocate_region); + +/** + * bitmap_copy_le - copy a bitmap, putting the bits into little-endian order. + * @dst: destination buffer + * @src: bitmap to copy + * @nbits: number of bits in the bitmap + * + * Require nbits % BITS_PER_LONG == 0. + */ +void bitmap_copy_le(void *dst, const unsigned long *src, int nbits) +{ + unsigned long *d = dst; + int i; + + for (i = 0; i < nbits/BITS_PER_LONG; i++) { + if (BITS_PER_LONG == 64) + d[i] = cpu_to_le64(src[i]); + else + d[i] = cpu_to_le32(src[i]); + } +} +EXPORT_SYMBOL(bitmap_copy_le); diff --git a/libdde-linux26/contrib/lib/bitrev.c b/libdde-linux26/contrib/lib/bitrev.c new file mode 100644 index 00000000..39562034 --- /dev/null +++ b/libdde-linux26/contrib/lib/bitrev.c @@ -0,0 +1,59 @@ +#include <linux/types.h> +#include <linux/module.h> +#include <linux/bitrev.h> + +MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>"); +MODULE_DESCRIPTION("Bit ordering reversal functions"); +MODULE_LICENSE("GPL"); + +const u8 byte_rev_table[256] = { + 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, + 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0, + 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, + 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, + 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, + 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, + 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, + 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc, + 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, + 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, + 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, + 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, + 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, + 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6, + 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, + 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, + 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, + 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, + 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, + 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9, + 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, + 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, + 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, + 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, + 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, + 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3, + 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, + 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, + 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, + 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, + 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, + 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff, +}; +EXPORT_SYMBOL_GPL(byte_rev_table); + +u16 bitrev16(u16 x) +{ + return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8); +} +EXPORT_SYMBOL(bitrev16); + +/** + * bitrev32 - reverse the order of bits in a u32 value + * @x: value to be bit-reversed + */ +u32 bitrev32(u32 x) +{ + return (bitrev16(x & 0xffff) << 16) | bitrev16(x >> 16); +} +EXPORT_SYMBOL(bitrev32); diff --git a/libdde-linux26/contrib/lib/cpumask.c b/libdde-linux26/contrib/lib/cpumask.c new file mode 100644 index 00000000..3389e244 --- /dev/null +++ b/libdde-linux26/contrib/lib/cpumask.c @@ -0,0 +1,172 @@ +#include <linux/kernel.h> +#include <linux/bitops.h> +#include <linux/cpumask.h> +#include <linux/module.h> +#include <linux/bootmem.h> + +int __first_cpu(const cpumask_t *srcp) +{ + return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS)); +} +EXPORT_SYMBOL(__first_cpu); + +int __next_cpu(int n, const cpumask_t *srcp) +{ + return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1)); +} +EXPORT_SYMBOL(__next_cpu); + +#if NR_CPUS > 64 +int __next_cpu_nr(int n, const cpumask_t *srcp) +{ + return min_t(int, nr_cpu_ids, + find_next_bit(srcp->bits, nr_cpu_ids, n+1)); +} +EXPORT_SYMBOL(__next_cpu_nr); +#endif + +int __any_online_cpu(const cpumask_t *mask) +{ + int cpu; + + for_each_cpu_mask(cpu, *mask) { + if (cpu_online(cpu)) + break; + } + return cpu; +} +EXPORT_SYMBOL(__any_online_cpu); + +/** + * cpumask_next_and - get the next cpu in *src1p & *src2p + * @n: the cpu prior to the place to search (ie. return will be > @n) + * @src1p: the first cpumask pointer + * @src2p: the second cpumask pointer + * + * Returns >= nr_cpu_ids if no further cpus set in both. + */ +int cpumask_next_and(int n, const struct cpumask *src1p, + const struct cpumask *src2p) +{ + while ((n = cpumask_next(n, src1p)) < nr_cpu_ids) + if (cpumask_test_cpu(n, src2p)) + break; + return n; +} +EXPORT_SYMBOL(cpumask_next_and); + +/** + * cpumask_any_but - return a "random" in a cpumask, but not this one. + * @mask: the cpumask to search + * @cpu: the cpu to ignore. + * + * Often used to find any cpu but smp_processor_id() in a mask. + * Returns >= nr_cpu_ids if no cpus set. + */ +int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) +{ + unsigned int i; + + cpumask_check(cpu); + for_each_cpu(i, mask) + if (i != cpu) + break; + return i; +} + +/* These are not inline because of header tangles. */ +#ifdef CONFIG_CPUMASK_OFFSTACK +/** + * alloc_cpumask_var_node - allocate a struct cpumask on a given node + * @mask: pointer to cpumask_var_t where the cpumask is returned + * @flags: GFP_ flags + * + * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is + * a nop returning a constant 1 (in <linux/cpumask.h>) + * Returns TRUE if memory allocation succeeded, FALSE otherwise. + * + * In addition, mask will be NULL if this fails. Note that gcc is + * usually smart enough to know that mask can never be NULL if + * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case + * too. + */ +bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) +{ + if (likely(slab_is_available())) + *mask = kmalloc_node(cpumask_size(), flags, node); + else { +#ifdef CONFIG_DEBUG_PER_CPU_MAPS + printk(KERN_ERR + "=> alloc_cpumask_var: kmalloc not available!\n"); +#endif + *mask = NULL; + } +#ifdef CONFIG_DEBUG_PER_CPU_MAPS + if (!*mask) { + printk(KERN_ERR "=> alloc_cpumask_var: failed!\n"); + dump_stack(); + } +#endif + /* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */ + if (*mask) { + unsigned int tail; + tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long); + memset(cpumask_bits(*mask) + cpumask_size() - tail, + 0, tail); + } + + return *mask != NULL; +} +EXPORT_SYMBOL(alloc_cpumask_var_node); + +/** + * alloc_cpumask_var - allocate a struct cpumask + * @mask: pointer to cpumask_var_t where the cpumask is returned + * @flags: GFP_ flags + * + * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is + * a nop returning a constant 1 (in <linux/cpumask.h>). + * + * See alloc_cpumask_var_node. + */ +bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) +{ + return alloc_cpumask_var_node(mask, flags, numa_node_id()); +} +EXPORT_SYMBOL(alloc_cpumask_var); + +/** + * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena. + * @mask: pointer to cpumask_var_t where the cpumask is returned + * + * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is + * a nop (in <linux/cpumask.h>). + * Either returns an allocated (zero-filled) cpumask, or causes the + * system to panic. + */ +void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) +{ + *mask = alloc_bootmem(cpumask_size()); +} + +/** + * free_cpumask_var - frees memory allocated for a struct cpumask. + * @mask: cpumask to free + * + * This is safe on a NULL mask. + */ +void free_cpumask_var(cpumask_var_t mask) +{ + kfree(mask); +} +EXPORT_SYMBOL(free_cpumask_var); + +/** + * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var + * @mask: cpumask to free + */ +void __init free_bootmem_cpumask_var(cpumask_var_t mask) +{ + free_bootmem((unsigned long)mask, cpumask_size()); +} +#endif diff --git a/libdde-linux26/contrib/lib/crc32.c b/libdde-linux26/contrib/lib/crc32.c new file mode 100644 index 00000000..49d1c9e3 --- /dev/null +++ b/libdde-linux26/contrib/lib/crc32.c @@ -0,0 +1,501 @@ +/* + * Oct 15, 2000 Matt Domsch <Matt_Domsch@dell.com> + * Nicer crc32 functions/docs submitted by linux@horizon.com. Thanks! + * Code was from the public domain, copyright abandoned. Code was + * subsequently included in the kernel, thus was re-licensed under the + * GNU GPL v2. + * + * Oct 12, 2000 Matt Domsch <Matt_Domsch@dell.com> + * Same crc32 function was used in 5 other places in the kernel. + * I made one version, and deleted the others. + * There are various incantations of crc32(). Some use a seed of 0 or ~0. + * Some xor at the end with ~0. The generic crc32() function takes + * seed as an argument, and doesn't xor at the end. Then individual + * users can do whatever they need. + * drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0. + * fs/jffs2 uses seed 0, doesn't xor with ~0. + * fs/partitions/efi.c uses seed ~0, xor's with ~0. + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#include <linux/crc32.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/compiler.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <asm/atomic.h> +#include "crc32defs.h" +#if CRC_LE_BITS == 8 +#define tole(x) __constant_cpu_to_le32(x) +#define tobe(x) __constant_cpu_to_be32(x) +#else +#define tole(x) (x) +#define tobe(x) (x) +#endif +#include "crc32table.h" + +MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>"); +MODULE_DESCRIPTION("Ethernet CRC32 calculations"); +MODULE_LICENSE("GPL"); + +/** + * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32 + * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for + * other uses, or the previous crc32 value if computing incrementally. + * @p: pointer to buffer over which CRC is run + * @len: length of buffer @p + */ +u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len); + +#if CRC_LE_BITS == 1 +/* + * In fact, the table-based code will work in this case, but it can be + * simplified by inlining the table in ?: form. + */ + +u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) +{ + int i; + while (len--) { + crc ^= *p++; + for (i = 0; i < 8; i++) + crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0); + } + return crc; +} +#else /* Table-based approach */ + +u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) +{ +# if CRC_LE_BITS == 8 + const u32 *b =(u32 *)p; + const u32 *tab = crc32table_le; + +# ifdef __LITTLE_ENDIAN +# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8) +# else +# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8) +# endif + + crc = __cpu_to_le32(crc); + /* Align it */ + if(unlikely(((long)b)&3 && len)){ + do { + u8 *p = (u8 *)b; + DO_CRC(*p++); + b = (void *)p; + } while ((--len) && ((long)b)&3 ); + } + if(likely(len >= 4)){ + /* load data 32 bits wide, xor data 32 bits wide. */ + size_t save_len = len & 3; + len = len >> 2; + --b; /* use pre increment below(*++b) for speed */ + do { + crc ^= *++b; + DO_CRC(0); + DO_CRC(0); + DO_CRC(0); + DO_CRC(0); + } while (--len); + b++; /* point to next byte(s) */ + len = save_len; + } + /* And the last few bytes */ + if(len){ + do { + u8 *p = (u8 *)b; + DO_CRC(*p++); + b = (void *)p; + } while (--len); + } + + return __le32_to_cpu(crc); +#undef ENDIAN_SHIFT +#undef DO_CRC + +# elif CRC_LE_BITS == 4 + while (len--) { + crc ^= *p++; + crc = (crc >> 4) ^ crc32table_le[crc & 15]; + crc = (crc >> 4) ^ crc32table_le[crc & 15]; + } + return crc; +# elif CRC_LE_BITS == 2 + while (len--) { + crc ^= *p++; + crc = (crc >> 2) ^ crc32table_le[crc & 3]; + crc = (crc >> 2) ^ crc32table_le[crc & 3]; + crc = (crc >> 2) ^ crc32table_le[crc & 3]; + crc = (crc >> 2) ^ crc32table_le[crc & 3]; + } + return crc; +# endif +} +#endif + +/** + * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32 + * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for + * other uses, or the previous crc32 value if computing incrementally. + * @p: pointer to buffer over which CRC is run + * @len: length of buffer @p + */ +u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len); + +#if CRC_BE_BITS == 1 +/* + * In fact, the table-based code will work in this case, but it can be + * simplified by inlining the table in ?: form. + */ + +u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) +{ + int i; + while (len--) { + crc ^= *p++ << 24; + for (i = 0; i < 8; i++) + crc = + (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : + 0); + } + return crc; +} + +#else /* Table-based approach */ +u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) +{ +# if CRC_BE_BITS == 8 + const u32 *b =(u32 *)p; + const u32 *tab = crc32table_be; + +# ifdef __LITTLE_ENDIAN +# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8) +# else +# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8) +# endif + + crc = __cpu_to_be32(crc); + /* Align it */ + if(unlikely(((long)b)&3 && len)){ + do { + u8 *p = (u8 *)b; + DO_CRC(*p++); + b = (u32 *)p; + } while ((--len) && ((long)b)&3 ); + } + if(likely(len >= 4)){ + /* load data 32 bits wide, xor data 32 bits wide. */ + size_t save_len = len & 3; + len = len >> 2; + --b; /* use pre increment below(*++b) for speed */ + do { + crc ^= *++b; + DO_CRC(0); + DO_CRC(0); + DO_CRC(0); + DO_CRC(0); + } while (--len); + b++; /* point to next byte(s) */ + len = save_len; + } + /* And the last few bytes */ + if(len){ + do { + u8 *p = (u8 *)b; + DO_CRC(*p++); + b = (void *)p; + } while (--len); + } + return __be32_to_cpu(crc); +#undef ENDIAN_SHIFT +#undef DO_CRC + +# elif CRC_BE_BITS == 4 + while (len--) { + crc ^= *p++ << 24; + crc = (crc << 4) ^ crc32table_be[crc >> 28]; + crc = (crc << 4) ^ crc32table_be[crc >> 28]; + } + return crc; +# elif CRC_BE_BITS == 2 + while (len--) { + crc ^= *p++ << 24; + crc = (crc << 2) ^ crc32table_be[crc >> 30]; + crc = (crc << 2) ^ crc32table_be[crc >> 30]; + crc = (crc << 2) ^ crc32table_be[crc >> 30]; + crc = (crc << 2) ^ crc32table_be[crc >> 30]; + } + return crc; +# endif +} +#endif + +EXPORT_SYMBOL(crc32_le); +EXPORT_SYMBOL(crc32_be); + +/* + * A brief CRC tutorial. + * + * A CRC is a long-division remainder. You add the CRC to the message, + * and the whole thing (message+CRC) is a multiple of the given + * CRC polynomial. To check the CRC, you can either check that the + * CRC matches the recomputed value, *or* you can check that the + * remainder computed on the message+CRC is 0. This latter approach + * is used by a lot of hardware implementations, and is why so many + * protocols put the end-of-frame flag after the CRC. + * + * It's actually the same long division you learned in school, except that + * - We're working in binary, so the digits are only 0 and 1, and + * - When dividing polynomials, there are no carries. Rather than add and + * subtract, we just xor. Thus, we tend to get a bit sloppy about + * the difference between adding and subtracting. + * + * A 32-bit CRC polynomial is actually 33 bits long. But since it's + * 33 bits long, bit 32 is always going to be set, so usually the CRC + * is written in hex with the most significant bit omitted. (If you're + * familiar with the IEEE 754 floating-point format, it's the same idea.) + * + * Note that a CRC is computed over a string of *bits*, so you have + * to decide on the endianness of the bits within each byte. To get + * the best error-detecting properties, this should correspond to the + * order they're actually sent. For example, standard RS-232 serial is + * little-endian; the most significant bit (sometimes used for parity) + * is sent last. And when appending a CRC word to a message, you should + * do it in the right order, matching the endianness. + * + * Just like with ordinary division, the remainder is always smaller than + * the divisor (the CRC polynomial) you're dividing by. Each step of the + * division, you take one more digit (bit) of the dividend and append it + * to the current remainder. Then you figure out the appropriate multiple + * of the divisor to subtract to being the remainder back into range. + * In binary, it's easy - it has to be either 0 or 1, and to make the + * XOR cancel, it's just a copy of bit 32 of the remainder. + * + * When computing a CRC, we don't care about the quotient, so we can + * throw the quotient bit away, but subtract the appropriate multiple of + * the polynomial from the remainder and we're back to where we started, + * ready to process the next bit. + * + * A big-endian CRC written this way would be coded like: + * for (i = 0; i < input_bits; i++) { + * multiple = remainder & 0x80000000 ? CRCPOLY : 0; + * remainder = (remainder << 1 | next_input_bit()) ^ multiple; + * } + * Notice how, to get at bit 32 of the shifted remainder, we look + * at bit 31 of the remainder *before* shifting it. + * + * But also notice how the next_input_bit() bits we're shifting into + * the remainder don't actually affect any decision-making until + * 32 bits later. Thus, the first 32 cycles of this are pretty boring. + * Also, to add the CRC to a message, we need a 32-bit-long hole for it at + * the end, so we have to add 32 extra cycles shifting in zeros at the + * end of every message, + * + * So the standard trick is to rearrage merging in the next_input_bit() + * until the moment it's needed. Then the first 32 cycles can be precomputed, + * and merging in the final 32 zero bits to make room for the CRC can be + * skipped entirely. + * This changes the code to: + * for (i = 0; i < input_bits; i++) { + * remainder ^= next_input_bit() << 31; + * multiple = (remainder & 0x80000000) ? CRCPOLY : 0; + * remainder = (remainder << 1) ^ multiple; + * } + * With this optimization, the little-endian code is simpler: + * for (i = 0; i < input_bits; i++) { + * remainder ^= next_input_bit(); + * multiple = (remainder & 1) ? CRCPOLY : 0; + * remainder = (remainder >> 1) ^ multiple; + * } + * + * Note that the other details of endianness have been hidden in CRCPOLY + * (which must be bit-reversed) and next_input_bit(). + * + * However, as long as next_input_bit is returning the bits in a sensible + * order, we can actually do the merging 8 or more bits at a time rather + * than one bit at a time: + * for (i = 0; i < input_bytes; i++) { + * remainder ^= next_input_byte() << 24; + * for (j = 0; j < 8; j++) { + * multiple = (remainder & 0x80000000) ? CRCPOLY : 0; + * remainder = (remainder << 1) ^ multiple; + * } + * } + * Or in little-endian: + * for (i = 0; i < input_bytes; i++) { + * remainder ^= next_input_byte(); + * for (j = 0; j < 8; j++) { + * multiple = (remainder & 1) ? CRCPOLY : 0; + * remainder = (remainder << 1) ^ multiple; + * } + * } + * If the input is a multiple of 32 bits, you can even XOR in a 32-bit + * word at a time and increase the inner loop count to 32. + * + * You can also mix and match the two loop styles, for example doing the + * bulk of a message byte-at-a-time and adding bit-at-a-time processing + * for any fractional bytes at the end. + * + * The only remaining optimization is to the byte-at-a-time table method. + * Here, rather than just shifting one bit of the remainder to decide + * in the correct multiple to subtract, we can shift a byte at a time. + * This produces a 40-bit (rather than a 33-bit) intermediate remainder, + * but again the multiple of the polynomial to subtract depends only on + * the high bits, the high 8 bits in this case. + * + * The multiple we need in that case is the low 32 bits of a 40-bit + * value whose high 8 bits are given, and which is a multiple of the + * generator polynomial. This is simply the CRC-32 of the given + * one-byte message. + * + * Two more details: normally, appending zero bits to a message which + * is already a multiple of a polynomial produces a larger multiple of that + * polynomial. To enable a CRC to detect this condition, it's common to + * invert the CRC before appending it. This makes the remainder of the + * message+crc come out not as zero, but some fixed non-zero value. + * + * The same problem applies to zero bits prepended to the message, and + * a similar solution is used. Instead of starting with a remainder of + * 0, an initial remainder of all ones is used. As long as you start + * the same way on decoding, it doesn't make a difference. + */ + +#ifdef UNITTEST + +#include <stdlib.h> +#include <stdio.h> + +#if 0 /*Not used at present */ +static void +buf_dump(char const *prefix, unsigned char const *buf, size_t len) +{ + fputs(prefix, stdout); + while (len--) + printf(" %02x", *buf++); + putchar('\n'); + +} +#endif + +static void bytereverse(unsigned char *buf, size_t len) +{ + while (len--) { + unsigned char x = bitrev8(*buf); + *buf++ = x; + } +} + +static void random_garbage(unsigned char *buf, size_t len) +{ + while (len--) + *buf++ = (unsigned char) random(); +} + +#if 0 /* Not used at present */ +static void store_le(u32 x, unsigned char *buf) +{ + buf[0] = (unsigned char) x; + buf[1] = (unsigned char) (x >> 8); + buf[2] = (unsigned char) (x >> 16); + buf[3] = (unsigned char) (x >> 24); +} +#endif + +static void store_be(u32 x, unsigned char *buf) +{ + buf[0] = (unsigned char) (x >> 24); + buf[1] = (unsigned char) (x >> 16); + buf[2] = (unsigned char) (x >> 8); + buf[3] = (unsigned char) x; +} + +/* + * This checks that CRC(buf + CRC(buf)) = 0, and that + * CRC commutes with bit-reversal. This has the side effect + * of bytewise bit-reversing the input buffer, and returns + * the CRC of the reversed buffer. + */ +static u32 test_step(u32 init, unsigned char *buf, size_t len) +{ + u32 crc1, crc2; + size_t i; + + crc1 = crc32_be(init, buf, len); + store_be(crc1, buf + len); + crc2 = crc32_be(init, buf, len + 4); + if (crc2) + printf("\nCRC cancellation fail: 0x%08x should be 0\n", + crc2); + + for (i = 0; i <= len + 4; i++) { + crc2 = crc32_be(init, buf, i); + crc2 = crc32_be(crc2, buf + i, len + 4 - i); + if (crc2) + printf("\nCRC split fail: 0x%08x\n", crc2); + } + + /* Now swap it around for the other test */ + + bytereverse(buf, len + 4); + init = bitrev32(init); + crc2 = bitrev32(crc1); + if (crc1 != bitrev32(crc2)) + printf("\nBit reversal fail: 0x%08x -> 0x%08x -> 0x%08x\n", + crc1, crc2, bitrev32(crc2)); + crc1 = crc32_le(init, buf, len); + if (crc1 != crc2) + printf("\nCRC endianness fail: 0x%08x != 0x%08x\n", crc1, + crc2); + crc2 = crc32_le(init, buf, len + 4); + if (crc2) + printf("\nCRC cancellation fail: 0x%08x should be 0\n", + crc2); + + for (i = 0; i <= len + 4; i++) { + crc2 = crc32_le(init, buf, i); + crc2 = crc32_le(crc2, buf + i, len + 4 - i); + if (crc2) + printf("\nCRC split fail: 0x%08x\n", crc2); + } + + return crc1; +} + +#define SIZE 64 +#define INIT1 0 +#define INIT2 0 + +int main(void) +{ + unsigned char buf1[SIZE + 4]; + unsigned char buf2[SIZE + 4]; + unsigned char buf3[SIZE + 4]; + int i, j; + u32 crc1, crc2, crc3; + + for (i = 0; i <= SIZE; i++) { + printf("\rTesting length %d...", i); + fflush(stdout); + random_garbage(buf1, i); + random_garbage(buf2, i); + for (j = 0; j < i; j++) + buf3[j] = buf1[j] ^ buf2[j]; + + crc1 = test_step(INIT1, buf1, i); + crc2 = test_step(INIT2, buf2, i); + /* Now check that CRC(buf1 ^ buf2) = CRC(buf1) ^ CRC(buf2) */ + crc3 = test_step(INIT1 ^ INIT2, buf3, i); + if (crc3 != (crc1 ^ crc2)) + printf("CRC XOR fail: 0x%08x != 0x%08x ^ 0x%08x\n", + crc3, crc1, crc2); + } + printf("\nAll test complete. No failures expected.\n"); + return 0; +} + +#endif /* UNITTEST */ diff --git a/libdde-linux26/contrib/lib/crc32defs.h b/libdde-linux26/contrib/lib/crc32defs.h new file mode 100644 index 00000000..9b6773d7 --- /dev/null +++ b/libdde-linux26/contrib/lib/crc32defs.h @@ -0,0 +1,32 @@ +/* + * There are multiple 16-bit CRC polynomials in common use, but this is + * *the* standard CRC-32 polynomial, first popularized by Ethernet. + * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0 + */ +#define CRCPOLY_LE 0xedb88320 +#define CRCPOLY_BE 0x04c11db7 + +/* How many bits at a time to use. Requires a table of 4<<CRC_xx_BITS bytes. */ +/* For less performance-sensitive, use 4 */ +#ifndef CRC_LE_BITS +# define CRC_LE_BITS 8 +#endif +#ifndef CRC_BE_BITS +# define CRC_BE_BITS 8 +#endif + +/* + * Little-endian CRC computation. Used with serial bit streams sent + * lsbit-first. Be sure to use cpu_to_le32() to append the computed CRC. + */ +#if CRC_LE_BITS > 8 || CRC_LE_BITS < 1 || CRC_LE_BITS & CRC_LE_BITS-1 +# error CRC_LE_BITS must be a power of 2 between 1 and 8 +#endif + +/* + * Big-endian CRC computation. Used with serial bit streams sent + * msbit-first. Be sure to use cpu_to_be32() to append the computed CRC. + */ +#if CRC_BE_BITS > 8 || CRC_BE_BITS < 1 || CRC_BE_BITS & CRC_BE_BITS-1 +# error CRC_BE_BITS must be a power of 2 between 1 and 8 +#endif diff --git a/libdde-linux26/contrib/lib/ctype.c b/libdde-linux26/contrib/lib/ctype.c new file mode 100644 index 00000000..d02ace14 --- /dev/null +++ b/libdde-linux26/contrib/lib/ctype.c @@ -0,0 +1,36 @@ +/* + * linux/lib/ctype.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + */ + +#include <linux/ctype.h> +#include <linux/module.h> + +unsigned char _ctype[] = { +_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */ +_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */ +_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */ +_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */ +_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */ +_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */ +_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */ +_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */ +_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */ +_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */ +_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */ +_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */ +_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */ +_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */ +_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */ +_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */ +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */ +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */ +_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */ +_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */ +_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */ +_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */ +_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */ +_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */ + +EXPORT_SYMBOL(_ctype); diff --git a/libdde-linux26/contrib/lib/find_next_bit.c b/libdde-linux26/contrib/lib/find_next_bit.c new file mode 100644 index 00000000..24c59ded --- /dev/null +++ b/libdde-linux26/contrib/lib/find_next_bit.c @@ -0,0 +1,275 @@ +/* find_next_bit.c: fallback find next bit implementation + * + * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/bitops.h> +#include <linux/module.h> +#include <asm/types.h> +#include <asm/byteorder.h> + +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) + +#ifdef CONFIG_GENERIC_FIND_NEXT_BIT +/* + * Find the next set bit in a memory region. + */ +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp &= (~0UL << offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + __ffs(tmp); +} +EXPORT_SYMBOL(find_next_bit); + +/* + * This implementation of find_{first,next}_zero_bit was stolen from + * Linus' asm-alpha/bitops.h. + */ +unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp |= ~0UL >> (BITS_PER_LONG - offset); + if (size < BITS_PER_LONG) + goto found_first; + if (~tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if (~(tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp |= ~0UL << size; + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. */ +found_middle: + return result + ffz(tmp); +} +EXPORT_SYMBOL(find_next_zero_bit); +#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ + +#ifdef CONFIG_GENERIC_FIND_FIRST_BIT +/* + * Find the first set bit in a memory region. + */ +unsigned long find_first_bit(const unsigned long *addr, unsigned long size) +{ + const unsigned long *p = addr; + unsigned long result = 0; + unsigned long tmp; + + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + + tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found: + return result + __ffs(tmp); +} +EXPORT_SYMBOL(find_first_bit); + +/* + * Find the first cleared bit in a memory region. + */ +unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) +{ + const unsigned long *p = addr; + unsigned long result = 0; + unsigned long tmp; + + while (size & ~(BITS_PER_LONG-1)) { + if (~(tmp = *(p++))) + goto found; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + + tmp = (*p) | (~0UL << size); + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. */ +found: + return result + ffz(tmp); +} +EXPORT_SYMBOL(find_first_zero_bit); +#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ + +#ifdef __BIG_ENDIAN + +/* include/linux/byteorder does not support "unsigned long" type */ +static inline unsigned long ext2_swabp(const unsigned long * x) +{ +#if BITS_PER_LONG == 64 + return (unsigned long) __swab64p((u64 *) x); +#elif BITS_PER_LONG == 32 + return (unsigned long) __swab32p((u32 *) x); +#else +#error BITS_PER_LONG not defined +#endif +} + +/* include/linux/byteorder doesn't support "unsigned long" type */ +static inline unsigned long ext2_swab(const unsigned long y) +{ +#if BITS_PER_LONG == 64 + return (unsigned long) __swab64((u64) y); +#elif BITS_PER_LONG == 32 + return (unsigned long) __swab32((u32) y); +#else +#error BITS_PER_LONG not defined +#endif +} + +unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned + long size, unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG - 1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= (BITS_PER_LONG - 1UL); + if (offset) { + tmp = ext2_swabp(p++); + tmp |= (~0UL >> (BITS_PER_LONG - offset)); + if (size < BITS_PER_LONG) + goto found_first; + if (~tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + + while (size & ~(BITS_PER_LONG - 1)) { + if (~(tmp = *(p++))) + goto found_middle_swap; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = ext2_swabp(p); +found_first: + tmp |= ~0UL << size; + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. Skip ffz */ +found_middle: + return result + ffz(tmp); + +found_middle_swap: + return result + ffz(ext2_swab(tmp)); +} + +EXPORT_SYMBOL(generic_find_next_zero_le_bit); + +unsigned long generic_find_next_le_bit(const unsigned long *addr, unsigned + long size, unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG - 1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= (BITS_PER_LONG - 1UL); + if (offset) { + tmp = ext2_swabp(p++); + tmp &= (~0UL << offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + + while (size & ~(BITS_PER_LONG - 1)) { + tmp = *(p++); + if (tmp) + goto found_middle_swap; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = ext2_swabp(p); +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + __ffs(tmp); + +found_middle_swap: + return result + __ffs(ext2_swab(tmp)); +} +EXPORT_SYMBOL(generic_find_next_le_bit); +#endif /* __BIG_ENDIAN */ diff --git a/libdde-linux26/contrib/lib/gen_crc32table.c b/libdde-linux26/contrib/lib/gen_crc32table.c new file mode 100644 index 00000000..bea5d97d --- /dev/null +++ b/libdde-linux26/contrib/lib/gen_crc32table.c @@ -0,0 +1,82 @@ +#include <stdio.h> +#include "crc32defs.h" +#include <inttypes.h> + +#define ENTRIES_PER_LINE 4 + +#define LE_TABLE_SIZE (1 << CRC_LE_BITS) +#define BE_TABLE_SIZE (1 << CRC_BE_BITS) + +static uint32_t crc32table_le[LE_TABLE_SIZE]; +static uint32_t crc32table_be[BE_TABLE_SIZE]; + +/** + * crc32init_le() - allocate and initialize LE table data + * + * crc is the crc of the byte i; other entries are filled in based on the + * fact that crctable[i^j] = crctable[i] ^ crctable[j]. + * + */ +static void crc32init_le(void) +{ + unsigned i, j; + uint32_t crc = 1; + + crc32table_le[0] = 0; + + for (i = 1 << (CRC_LE_BITS - 1); i; i >>= 1) { + crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0); + for (j = 0; j < LE_TABLE_SIZE; j += 2 * i) + crc32table_le[i + j] = crc ^ crc32table_le[j]; + } +} + +/** + * crc32init_be() - allocate and initialize BE table data + */ +static void crc32init_be(void) +{ + unsigned i, j; + uint32_t crc = 0x80000000; + + crc32table_be[0] = 0; + + for (i = 1; i < BE_TABLE_SIZE; i <<= 1) { + crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0); + for (j = 0; j < i; j++) + crc32table_be[i + j] = crc ^ crc32table_be[j]; + } +} + +static void output_table(uint32_t table[], int len, char *trans) +{ + int i; + + for (i = 0; i < len - 1; i++) { + if (i % ENTRIES_PER_LINE == 0) + printf("\n"); + printf("%s(0x%8.8xL), ", trans, table[i]); + } + printf("%s(0x%8.8xL)\n", trans, table[len - 1]); +} + +int main(int argc, char** argv) +{ + printf("/* this file is generated - do not edit */\n\n"); + + if (CRC_LE_BITS > 1) { + crc32init_le(); + printf("static const u32 crc32table_le[] = {"); + output_table(crc32table_le, LE_TABLE_SIZE, "tole"); + printf("};\n"); + } + + if (CRC_BE_BITS > 1) { + crc32init_be(); + printf("static const u32 crc32table_be[] = {"); + output_table(crc32table_be, BE_TABLE_SIZE, "tobe"); + printf("};\n"); + } + + return 0; +} diff --git a/libdde-linux26/contrib/lib/hexdump.c b/libdde-linux26/contrib/lib/hexdump.c new file mode 100644 index 00000000..f07c0db8 --- /dev/null +++ b/libdde-linux26/contrib/lib/hexdump.c @@ -0,0 +1,201 @@ +/* + * lib/hexdump.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. See README and COPYING for + * more details. + */ + +#include <linux/types.h> +#include <linux/ctype.h> +#include <linux/kernel.h> +#include <linux/module.h> + +const char hex_asc[] = "0123456789abcdef"; +EXPORT_SYMBOL(hex_asc); + +/** + * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory + * @buf: data blob to dump + * @len: number of bytes in the @buf + * @rowsize: number of bytes to print per line; must be 16 or 32 + * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1) + * @linebuf: where to put the converted data + * @linebuflen: total size of @linebuf, including space for terminating NUL + * @ascii: include ASCII after the hex output + * + * hex_dump_to_buffer() works on one "line" of output at a time, i.e., + * 16 or 32 bytes of input data converted to hex + ASCII output. + * + * Given a buffer of u8 data, hex_dump_to_buffer() converts the input data + * to a hex + ASCII dump at the supplied memory location. + * The converted output is always NUL-terminated. + * + * E.g.: + * hex_dump_to_buffer(frame->data, frame->len, 16, 1, + * linebuf, sizeof(linebuf), 1); + * + * example output buffer: + * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO + */ +void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, + int groupsize, char *linebuf, size_t linebuflen, + bool ascii) +{ + const u8 *ptr = buf; + u8 ch; + int j, lx = 0; + int ascii_column; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + if (!len) + goto nil; + if (len > rowsize) /* limit to one line at a time */ + len = rowsize; + if ((len % groupsize) != 0) /* no mixed size output */ + groupsize = 1; + + switch (groupsize) { + case 8: { + const u64 *ptr8 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf(linebuf + lx, linebuflen - lx, + "%16.16llx ", (unsigned long long)*(ptr8 + j)); + ascii_column = 17 * ngroups + 2; + break; + } + + case 4: { + const u32 *ptr4 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf(linebuf + lx, linebuflen - lx, + "%8.8x ", *(ptr4 + j)); + ascii_column = 9 * ngroups + 2; + break; + } + + case 2: { + const u16 *ptr2 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf(linebuf + lx, linebuflen - lx, + "%4.4x ", *(ptr2 + j)); + ascii_column = 5 * ngroups + 2; + break; + } + + default: + for (j = 0; (j < rowsize) && (j < len) && (lx + 4) < linebuflen; + j++) { + ch = ptr[j]; + linebuf[lx++] = hex_asc_hi(ch); + linebuf[lx++] = hex_asc_lo(ch); + linebuf[lx++] = ' '; + } + ascii_column = 3 * rowsize + 2; + break; + } + if (!ascii) + goto nil; + + while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) + linebuf[lx++] = ' '; + for (j = 0; (j < rowsize) && (j < len) && (lx + 2) < linebuflen; j++) + linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] + : '.'; +nil: + linebuf[lx++] = '\0'; +} +EXPORT_SYMBOL(hex_dump_to_buffer); + +/** + * print_hex_dump - print a text hex dump to syslog for a binary blob of data + * @level: kernel log level (e.g. KERN_DEBUG) + * @prefix_str: string to prefix each line with; + * caller supplies trailing spaces for alignment if desired + * @prefix_type: controls whether prefix of an offset, address, or none + * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE) + * @rowsize: number of bytes to print per line; must be 16 or 32 + * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1) + * @buf: data blob to dump + * @len: number of bytes in the @buf + * @ascii: include ASCII after the hex output + * + * Given a buffer of u8 data, print_hex_dump() prints a hex + ASCII dump + * to the kernel log at the specified kernel log level, with an optional + * leading prefix. + * + * print_hex_dump() works on one "line" of output at a time, i.e., + * 16 or 32 bytes of input data converted to hex + ASCII output. + * print_hex_dump() iterates over the entire input @buf, breaking it into + * "line size" chunks to format and print. + * + * E.g.: + * print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS, + * 16, 1, frame->data, frame->len, 1); + * + * Example output using %DUMP_PREFIX_OFFSET and 1-byte mode: + * 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO + * Example output using %DUMP_PREFIX_ADDRESS and 4-byte mode: + * ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~. + */ +void print_hex_dump(const char *level, const char *prefix_str, int prefix_type, + int rowsize, int groupsize, + const void *buf, size_t len, bool ascii) +{ + const u8 *ptr = buf; + int i, linelen, remaining = len; + unsigned char linebuf[200]; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + for (i = 0; i < len; i += rowsize) { + linelen = min(remaining, rowsize); + remaining -= rowsize; + hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, + linebuf, sizeof(linebuf), ascii); + + switch (prefix_type) { + case DUMP_PREFIX_ADDRESS: + printk("%s%s%*p: %s\n", level, prefix_str, + (int)(2 * sizeof(void *)), ptr + i, linebuf); + break; + case DUMP_PREFIX_OFFSET: + printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); + break; + default: + printk("%s%s%s\n", level, prefix_str, linebuf); + break; + } + } +} +EXPORT_SYMBOL(print_hex_dump); + +/** + * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params + * @prefix_str: string to prefix each line with; + * caller supplies trailing spaces for alignment if desired + * @prefix_type: controls whether prefix of an offset, address, or none + * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE) + * @buf: data blob to dump + * @len: number of bytes in the @buf + * + * Calls print_hex_dump(), with log level of KERN_DEBUG, + * rowsize of 16, groupsize of 1, and ASCII output included. + */ +void print_hex_dump_bytes(const char *prefix_str, int prefix_type, + const void *buf, size_t len) +{ + print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1, + buf, len, 1); +} +EXPORT_SYMBOL(print_hex_dump_bytes); diff --git a/libdde-linux26/contrib/lib/hweight.c b/libdde-linux26/contrib/lib/hweight.c new file mode 100644 index 00000000..389424ec --- /dev/null +++ b/libdde-linux26/contrib/lib/hweight.c @@ -0,0 +1,59 @@ +#include <linux/module.h> +#include <linux/bitops.h> +#include <asm/types.h> + +/** + * hweightN - returns the hamming weight of a N-bit word + * @x: the word to weigh + * + * The Hamming Weight of a number is the total number of bits set in it. + */ + +unsigned int hweight32(unsigned int w) +{ + unsigned int res = w - ((w >> 1) & 0x55555555); + res = (res & 0x33333333) + ((res >> 2) & 0x33333333); + res = (res + (res >> 4)) & 0x0F0F0F0F; + res = res + (res >> 8); + return (res + (res >> 16)) & 0x000000FF; +} +EXPORT_SYMBOL(hweight32); + +unsigned int hweight16(unsigned int w) +{ + unsigned int res = w - ((w >> 1) & 0x5555); + res = (res & 0x3333) + ((res >> 2) & 0x3333); + res = (res + (res >> 4)) & 0x0F0F; + return (res + (res >> 8)) & 0x00FF; +} +EXPORT_SYMBOL(hweight16); + +unsigned int hweight8(unsigned int w) +{ + unsigned int res = w - ((w >> 1) & 0x55); + res = (res & 0x33) + ((res >> 2) & 0x33); + return (res + (res >> 4)) & 0x0F; +} +EXPORT_SYMBOL(hweight8); + +unsigned long hweight64(__u64 w) +{ +#if BITS_PER_LONG == 32 + return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); +#elif BITS_PER_LONG == 64 +#ifdef ARCH_HAS_FAST_MULTIPLIER + w -= (w >> 1) & 0x5555555555555555ul; + w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul); + w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful; + return (w * 0x0101010101010101ul) >> 56; +#else + __u64 res = w - ((w >> 1) & 0x5555555555555555ul); + res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); + res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful; + res = res + (res >> 8); + res = res + (res >> 16); + return (res + (res >> 32)) & 0x00000000000000FFul; +#endif +#endif +} +EXPORT_SYMBOL(hweight64); diff --git a/libdde-linux26/contrib/lib/idr.c b/libdde-linux26/contrib/lib/idr.c new file mode 100644 index 00000000..dca3e14e --- /dev/null +++ b/libdde-linux26/contrib/lib/idr.c @@ -0,0 +1,890 @@ +/* + * 2002-10-18 written by Jim Houston jim.houston@ccur.com + * Copyright (C) 2002 by Concurrent Computer Corporation + * Distributed under the GNU GPL license version 2. + * + * Modified by George Anzinger to reuse immediately and to use + * find bit instructions. Also removed _irq on spinlocks. + * + * Modified by Nadia Derbey to make it RCU safe. + * + * Small id to pointer translation service. + * + * It uses a radix tree like structure as a sparse array indexed + * by the id to obtain the pointer. The bitmap makes allocating + * a new id quick. + * + * You call it to allocate an id (an int) an associate with that id a + * pointer or what ever, we treat it as a (void *). You can pass this + * id to a user for him to pass back at a later time. You then pass + * that id to this code and it returns your pointer. + + * You can release ids at any time. When all ids are released, most of + * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we + * don't need to go to the memory "store" during an id allocate, just + * so you don't need to be too concerned about locking and conflicts + * with the slab allocator. + */ + +#ifndef TEST // to test in user space... +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/module.h> +#endif +#include <linux/err.h> +#include <linux/string.h> +#include <linux/idr.h> + +static struct kmem_cache *idr_layer_cache; + +static struct idr_layer *get_from_free_list(struct idr *idp) +{ + struct idr_layer *p; + unsigned long flags; + + spin_lock_irqsave(&idp->lock, flags); + if ((p = idp->id_free)) { + idp->id_free = p->ary[0]; + idp->id_free_cnt--; + p->ary[0] = NULL; + } + spin_unlock_irqrestore(&idp->lock, flags); + return(p); +} + +static void idr_layer_rcu_free(struct rcu_head *head) +{ + struct idr_layer *layer; + + layer = container_of(head, struct idr_layer, rcu_head); + kmem_cache_free(idr_layer_cache, layer); +} + +static inline void free_layer(struct idr_layer *p) +{ +#ifndef DDE_LINUX + call_rcu(&p->rcu_head, idr_layer_rcu_free); +#else + idr_layer_rcu_free(&p->rcu_head); +#endif +} + +/* only called when idp->lock is held */ +static void __move_to_free_list(struct idr *idp, struct idr_layer *p) +{ + p->ary[0] = idp->id_free; + idp->id_free = p; + idp->id_free_cnt++; +} + +static void move_to_free_list(struct idr *idp, struct idr_layer *p) +{ + unsigned long flags; + + /* + * Depends on the return element being zeroed. + */ + spin_lock_irqsave(&idp->lock, flags); + __move_to_free_list(idp, p); + spin_unlock_irqrestore(&idp->lock, flags); +} + +static void idr_mark_full(struct idr_layer **pa, int id) +{ + struct idr_layer *p = pa[0]; + int l = 0; + + __set_bit(id & IDR_MASK, &p->bitmap); + /* + * If this layer is full mark the bit in the layer above to + * show that this part of the radix tree is full. This may + * complete the layer above and require walking up the radix + * tree. + */ + while (p->bitmap == IDR_FULL) { + if (!(p = pa[++l])) + break; + id = id >> IDR_BITS; + __set_bit((id & IDR_MASK), &p->bitmap); + } +} + +/** + * idr_pre_get - reserver resources for idr allocation + * @idp: idr handle + * @gfp_mask: memory allocation flags + * + * This function should be called prior to locking and calling the + * idr_get_new* functions. It preallocates enough memory to satisfy + * the worst possible allocation. + * + * If the system is REALLY out of memory this function returns 0, + * otherwise 1. + */ +int idr_pre_get(struct idr *idp, gfp_t gfp_mask) +{ + while (idp->id_free_cnt < IDR_FREE_MAX) { + struct idr_layer *new; + new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); + if (new == NULL) + return (0); + move_to_free_list(idp, new); + } + return 1; +} +EXPORT_SYMBOL(idr_pre_get); + +static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) +{ + int n, m, sh; + struct idr_layer *p, *new; + int l, id, oid; + unsigned long bm; + + id = *starting_id; + restart: + p = idp->top; + l = idp->layers; + pa[l--] = NULL; + while (1) { + /* + * We run around this while until we reach the leaf node... + */ + n = (id >> (IDR_BITS*l)) & IDR_MASK; + bm = ~p->bitmap; + m = find_next_bit(&bm, IDR_SIZE, n); + if (m == IDR_SIZE) { + /* no space available go back to previous layer. */ + l++; + oid = id; + id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; + + /* if already at the top layer, we need to grow */ + if (!(p = pa[l])) { + *starting_id = id; + return IDR_NEED_TO_GROW; + } + + /* If we need to go up one layer, continue the + * loop; otherwise, restart from the top. + */ + sh = IDR_BITS * (l + 1); + if (oid >> sh == id >> sh) + continue; + else + goto restart; + } + if (m != n) { + sh = IDR_BITS*l; + id = ((id >> sh) ^ n ^ m) << sh; + } + if ((id >= MAX_ID_BIT) || (id < 0)) + return IDR_NOMORE_SPACE; + if (l == 0) + break; + /* + * Create the layer below if it is missing. + */ + if (!p->ary[m]) { + new = get_from_free_list(idp); + if (!new) + return -1; + new->layer = l-1; + rcu_assign_pointer(p->ary[m], new); + p->count++; + } + pa[l--] = p; + p = p->ary[m]; + } + + pa[l] = p; + return id; +} + +static int idr_get_empty_slot(struct idr *idp, int starting_id, + struct idr_layer **pa) +{ + struct idr_layer *p, *new; + int layers, v, id; + unsigned long flags; + + id = starting_id; +build_up: + p = idp->top; + layers = idp->layers; + if (unlikely(!p)) { + if (!(p = get_from_free_list(idp))) + return -1; + p->layer = 0; + layers = 1; + } + /* + * Add a new layer to the top of the tree if the requested + * id is larger than the currently allocated space. + */ + while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { + layers++; + if (!p->count) { + /* special case: if the tree is currently empty, + * then we grow the tree by moving the top node + * upwards. + */ + p->layer++; + continue; + } + if (!(new = get_from_free_list(idp))) { + /* + * The allocation failed. If we built part of + * the structure tear it down. + */ + spin_lock_irqsave(&idp->lock, flags); + for (new = p; p && p != idp->top; new = p) { + p = p->ary[0]; + new->ary[0] = NULL; + new->bitmap = new->count = 0; + __move_to_free_list(idp, new); + } + spin_unlock_irqrestore(&idp->lock, flags); + return -1; + } + new->ary[0] = p; + new->count = 1; + new->layer = layers-1; + if (p->bitmap == IDR_FULL) + __set_bit(0, &new->bitmap); + p = new; + } + rcu_assign_pointer(idp->top, p); + idp->layers = layers; + v = sub_alloc(idp, &id, pa); + if (v == IDR_NEED_TO_GROW) + goto build_up; + return(v); +} + +static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) +{ + struct idr_layer *pa[MAX_LEVEL]; + int id; + + id = idr_get_empty_slot(idp, starting_id, pa); + if (id >= 0) { + /* + * Successfully found an empty slot. Install the user + * pointer and mark the slot full. + */ + rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], + (struct idr_layer *)ptr); + pa[0]->count++; + idr_mark_full(pa, id); + } + + return id; +} + +/** + * idr_get_new_above - allocate new idr entry above or equal to a start id + * @idp: idr handle + * @ptr: pointer you want associated with the ide + * @start_id: id to start search at + * @id: pointer to the allocated handle + * + * This is the allocate id function. It should be called with any + * required locks. + * + * If memory is required, it will return -EAGAIN, you should unlock + * and go back to the idr_pre_get() call. If the idr is full, it will + * return -ENOSPC. + * + * @id returns a value in the range @starting_id ... 0x7fffffff + */ +int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) +{ + int rv; + + rv = idr_get_new_above_int(idp, ptr, starting_id); + /* + * This is a cheap hack until the IDR code can be fixed to + * return proper error values. + */ + if (rv < 0) + return _idr_rc_to_errno(rv); + *id = rv; + return 0; +} +EXPORT_SYMBOL(idr_get_new_above); + +/** + * idr_get_new - allocate new idr entry + * @idp: idr handle + * @ptr: pointer you want associated with the ide + * @id: pointer to the allocated handle + * + * This is the allocate id function. It should be called with any + * required locks. + * + * If memory is required, it will return -EAGAIN, you should unlock + * and go back to the idr_pre_get() call. If the idr is full, it will + * return -ENOSPC. + * + * @id returns a value in the range 0 ... 0x7fffffff + */ +int idr_get_new(struct idr *idp, void *ptr, int *id) +{ + int rv; + + rv = idr_get_new_above_int(idp, ptr, 0); + /* + * This is a cheap hack until the IDR code can be fixed to + * return proper error values. + */ + if (rv < 0) + return _idr_rc_to_errno(rv); + *id = rv; + return 0; +} +EXPORT_SYMBOL(idr_get_new); + +static void idr_remove_warning(int id) +{ + printk(KERN_WARNING + "idr_remove called for id=%d which is not allocated.\n", id); + dump_stack(); +} + +static void sub_remove(struct idr *idp, int shift, int id) +{ + struct idr_layer *p = idp->top; + struct idr_layer **pa[MAX_LEVEL]; + struct idr_layer ***paa = &pa[0]; + struct idr_layer *to_free; + int n; + + *paa = NULL; + *++paa = &idp->top; + + while ((shift > 0) && p) { + n = (id >> shift) & IDR_MASK; + __clear_bit(n, &p->bitmap); + *++paa = &p->ary[n]; + p = p->ary[n]; + shift -= IDR_BITS; + } + n = id & IDR_MASK; + if (likely(p != NULL && test_bit(n, &p->bitmap))){ + __clear_bit(n, &p->bitmap); + rcu_assign_pointer(p->ary[n], NULL); + to_free = NULL; + while(*paa && ! --((**paa)->count)){ + if (to_free) + free_layer(to_free); + to_free = **paa; + **paa-- = NULL; + } + if (!*paa) + idp->layers = 0; + if (to_free) + free_layer(to_free); + } else + idr_remove_warning(id); +} + +/** + * idr_remove - remove the given id and free it's slot + * @idp: idr handle + * @id: unique key + */ +void idr_remove(struct idr *idp, int id) +{ + struct idr_layer *p; + struct idr_layer *to_free; + + /* Mask off upper bits we don't use for the search. */ + id &= MAX_ID_MASK; + + sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); + if (idp->top && idp->top->count == 1 && (idp->layers > 1) && + idp->top->ary[0]) { + /* + * Single child at leftmost slot: we can shrink the tree. + * This level is not needed anymore since when layers are + * inserted, they are inserted at the top of the existing + * tree. + */ + to_free = idp->top; + p = idp->top->ary[0]; + rcu_assign_pointer(idp->top, p); + --idp->layers; + to_free->bitmap = to_free->count = 0; + free_layer(to_free); + } + while (idp->id_free_cnt >= IDR_FREE_MAX) { + p = get_from_free_list(idp); + /* + * Note: we don't call the rcu callback here, since the only + * layers that fall into the freelist are those that have been + * preallocated. + */ + kmem_cache_free(idr_layer_cache, p); + } + return; +} +EXPORT_SYMBOL(idr_remove); + +/** + * idr_remove_all - remove all ids from the given idr tree + * @idp: idr handle + * + * idr_destroy() only frees up unused, cached idp_layers, but this + * function will remove all id mappings and leave all idp_layers + * unused. + * + * A typical clean-up sequence for objects stored in an idr tree, will + * use idr_for_each() to free all objects, if necessay, then + * idr_remove_all() to remove all ids, and idr_destroy() to free + * up the cached idr_layers. + */ +void idr_remove_all(struct idr *idp) +{ + int n, id, max; + struct idr_layer *p; + struct idr_layer *pa[MAX_LEVEL]; + struct idr_layer **paa = &pa[0]; + + n = idp->layers * IDR_BITS; + p = idp->top; + rcu_assign_pointer(idp->top, NULL); + max = 1 << n; + + id = 0; + while (id < max) { + while (n > IDR_BITS && p) { + n -= IDR_BITS; + *paa++ = p; + p = p->ary[(id >> n) & IDR_MASK]; + } + + id += 1 << n; + while (n < fls(id)) { + if (p) + free_layer(p); + n += IDR_BITS; + p = *--paa; + } + } + idp->layers = 0; +} +EXPORT_SYMBOL(idr_remove_all); + +/** + * idr_destroy - release all cached layers within an idr tree + * idp: idr handle + */ +void idr_destroy(struct idr *idp) +{ + while (idp->id_free_cnt) { + struct idr_layer *p = get_from_free_list(idp); + kmem_cache_free(idr_layer_cache, p); + } +} +EXPORT_SYMBOL(idr_destroy); + +/** + * idr_find - return pointer for given id + * @idp: idr handle + * @id: lookup key + * + * Return the pointer given the id it has been registered with. A %NULL + * return indicates that @id is not valid or you passed %NULL in + * idr_get_new(). + * + * This function can be called under rcu_read_lock(), given that the leaf + * pointers lifetimes are correctly managed. + */ +void *idr_find(struct idr *idp, int id) +{ + int n; + struct idr_layer *p; + + p = rcu_dereference(idp->top); + if (!p) + return NULL; + n = (p->layer+1) * IDR_BITS; + + /* Mask off upper bits we don't use for the search. */ + id &= MAX_ID_MASK; + + if (id >= (1 << n)) + return NULL; + BUG_ON(n == 0); + + while (n > 0 && p) { + n -= IDR_BITS; + BUG_ON(n != p->layer*IDR_BITS); + p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); + } + return((void *)p); +} +EXPORT_SYMBOL(idr_find); + +/** + * idr_for_each - iterate through all stored pointers + * @idp: idr handle + * @fn: function to be called for each pointer + * @data: data passed back to callback function + * + * Iterate over the pointers registered with the given idr. The + * callback function will be called for each pointer currently + * registered, passing the id, the pointer and the data pointer passed + * to this function. It is not safe to modify the idr tree while in + * the callback, so functions such as idr_get_new and idr_remove are + * not allowed. + * + * We check the return of @fn each time. If it returns anything other + * than 0, we break out and return that value. + * + * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). + */ +int idr_for_each(struct idr *idp, + int (*fn)(int id, void *p, void *data), void *data) +{ + int n, id, max, error = 0; + struct idr_layer *p; + struct idr_layer *pa[MAX_LEVEL]; + struct idr_layer **paa = &pa[0]; + + n = idp->layers * IDR_BITS; + p = rcu_dereference(idp->top); + max = 1 << n; + + id = 0; + while (id < max) { + while (n > 0 && p) { + n -= IDR_BITS; + *paa++ = p; + p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); + } + + if (p) { + error = fn(id, (void *)p, data); + if (error) + break; + } + + id += 1 << n; + while (n < fls(id)) { + n += IDR_BITS; + p = *--paa; + } + } + + return error; +} +EXPORT_SYMBOL(idr_for_each); + +/** + * idr_replace - replace pointer for given id + * @idp: idr handle + * @ptr: pointer you want associated with the id + * @id: lookup key + * + * Replace the pointer registered with an id and return the old value. + * A -ENOENT return indicates that @id was not found. + * A -EINVAL return indicates that @id was not within valid constraints. + * + * The caller must serialize with writers. + */ +void *idr_replace(struct idr *idp, void *ptr, int id) +{ + int n; + struct idr_layer *p, *old_p; + + p = idp->top; + if (!p) + return ERR_PTR(-EINVAL); + + n = (p->layer+1) * IDR_BITS; + + id &= MAX_ID_MASK; + + if (id >= (1 << n)) + return ERR_PTR(-EINVAL); + + n -= IDR_BITS; + while ((n > 0) && p) { + p = p->ary[(id >> n) & IDR_MASK]; + n -= IDR_BITS; + } + + n = id & IDR_MASK; + if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) + return ERR_PTR(-ENOENT); + + old_p = p->ary[n]; + rcu_assign_pointer(p->ary[n], ptr); + + return old_p; +} +EXPORT_SYMBOL(idr_replace); + +void __init idr_init_cache(void) +{ + idr_layer_cache = kmem_cache_create("idr_layer_cache", + sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); +} + +/** + * idr_init - initialize idr handle + * @idp: idr handle + * + * This function is use to set up the handle (@idp) that you will pass + * to the rest of the functions. + */ +void idr_init(struct idr *idp) +{ + memset(idp, 0, sizeof(struct idr)); + spin_lock_init(&idp->lock); +} +EXPORT_SYMBOL(idr_init); + + +/* + * IDA - IDR based ID allocator + * + * this is id allocator without id -> pointer translation. Memory + * usage is much lower than full blown idr because each id only + * occupies a bit. ida uses a custom leaf node which contains + * IDA_BITMAP_BITS slots. + * + * 2007-04-25 written by Tejun Heo <htejun@gmail.com> + */ + +static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) +{ + unsigned long flags; + + if (!ida->free_bitmap) { + spin_lock_irqsave(&ida->idr.lock, flags); + if (!ida->free_bitmap) { + ida->free_bitmap = bitmap; + bitmap = NULL; + } + spin_unlock_irqrestore(&ida->idr.lock, flags); + } + + kfree(bitmap); +} + +/** + * ida_pre_get - reserve resources for ida allocation + * @ida: ida handle + * @gfp_mask: memory allocation flag + * + * This function should be called prior to locking and calling the + * following function. It preallocates enough memory to satisfy the + * worst possible allocation. + * + * If the system is REALLY out of memory this function returns 0, + * otherwise 1. + */ +int ida_pre_get(struct ida *ida, gfp_t gfp_mask) +{ + /* allocate idr_layers */ + if (!idr_pre_get(&ida->idr, gfp_mask)) + return 0; + + /* allocate free_bitmap */ + if (!ida->free_bitmap) { + struct ida_bitmap *bitmap; + + bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); + if (!bitmap) + return 0; + + free_bitmap(ida, bitmap); + } + + return 1; +} +EXPORT_SYMBOL(ida_pre_get); + +/** + * ida_get_new_above - allocate new ID above or equal to a start id + * @ida: ida handle + * @staring_id: id to start search at + * @p_id: pointer to the allocated handle + * + * Allocate new ID above or equal to @ida. It should be called with + * any required locks. + * + * If memory is required, it will return -EAGAIN, you should unlock + * and go back to the ida_pre_get() call. If the ida is full, it will + * return -ENOSPC. + * + * @p_id returns a value in the range @starting_id ... 0x7fffffff. + */ +int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) +{ + struct idr_layer *pa[MAX_LEVEL]; + struct ida_bitmap *bitmap; + unsigned long flags; + int idr_id = starting_id / IDA_BITMAP_BITS; + int offset = starting_id % IDA_BITMAP_BITS; + int t, id; + + restart: + /* get vacant slot */ + t = idr_get_empty_slot(&ida->idr, idr_id, pa); + if (t < 0) + return _idr_rc_to_errno(t); + + if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) + return -ENOSPC; + + if (t != idr_id) + offset = 0; + idr_id = t; + + /* if bitmap isn't there, create a new one */ + bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; + if (!bitmap) { + spin_lock_irqsave(&ida->idr.lock, flags); + bitmap = ida->free_bitmap; + ida->free_bitmap = NULL; + spin_unlock_irqrestore(&ida->idr.lock, flags); + + if (!bitmap) + return -EAGAIN; + + memset(bitmap, 0, sizeof(struct ida_bitmap)); + rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], + (void *)bitmap); + pa[0]->count++; + } + + /* lookup for empty slot */ + t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); + if (t == IDA_BITMAP_BITS) { + /* no empty slot after offset, continue to the next chunk */ + idr_id++; + offset = 0; + goto restart; + } + + id = idr_id * IDA_BITMAP_BITS + t; + if (id >= MAX_ID_BIT) + return -ENOSPC; + + __set_bit(t, bitmap->bitmap); + if (++bitmap->nr_busy == IDA_BITMAP_BITS) + idr_mark_full(pa, idr_id); + + *p_id = id; + + /* Each leaf node can handle nearly a thousand slots and the + * whole idea of ida is to have small memory foot print. + * Throw away extra resources one by one after each successful + * allocation. + */ + if (ida->idr.id_free_cnt || ida->free_bitmap) { + struct idr_layer *p = get_from_free_list(&ida->idr); + if (p) + kmem_cache_free(idr_layer_cache, p); + } + + return 0; +} +EXPORT_SYMBOL(ida_get_new_above); + +/** + * ida_get_new - allocate new ID + * @ida: idr handle + * @p_id: pointer to the allocated handle + * + * Allocate new ID. It should be called with any required locks. + * + * If memory is required, it will return -EAGAIN, you should unlock + * and go back to the idr_pre_get() call. If the idr is full, it will + * return -ENOSPC. + * + * @id returns a value in the range 0 ... 0x7fffffff. + */ +int ida_get_new(struct ida *ida, int *p_id) +{ + return ida_get_new_above(ida, 0, p_id); +} +EXPORT_SYMBOL(ida_get_new); + +/** + * ida_remove - remove the given ID + * @ida: ida handle + * @id: ID to free + */ +void ida_remove(struct ida *ida, int id) +{ + struct idr_layer *p = ida->idr.top; + int shift = (ida->idr.layers - 1) * IDR_BITS; + int idr_id = id / IDA_BITMAP_BITS; + int offset = id % IDA_BITMAP_BITS; + int n; + struct ida_bitmap *bitmap; + + /* clear full bits while looking up the leaf idr_layer */ + while ((shift > 0) && p) { + n = (idr_id >> shift) & IDR_MASK; + __clear_bit(n, &p->bitmap); + p = p->ary[n]; + shift -= IDR_BITS; + } + + if (p == NULL) + goto err; + + n = idr_id & IDR_MASK; + __clear_bit(n, &p->bitmap); + + bitmap = (void *)p->ary[n]; + if (!test_bit(offset, bitmap->bitmap)) + goto err; + + /* update bitmap and remove it if empty */ + __clear_bit(offset, bitmap->bitmap); + if (--bitmap->nr_busy == 0) { + __set_bit(n, &p->bitmap); /* to please idr_remove() */ + idr_remove(&ida->idr, idr_id); + free_bitmap(ida, bitmap); + } + + return; + + err: + printk(KERN_WARNING + "ida_remove called for id=%d which is not allocated.\n", id); +} +EXPORT_SYMBOL(ida_remove); + +/** + * ida_destroy - release all cached layers within an ida tree + * ida: ida handle + */ +void ida_destroy(struct ida *ida) +{ + idr_destroy(&ida->idr); + kfree(ida->free_bitmap); +} +EXPORT_SYMBOL(ida_destroy); + +/** + * ida_init - initialize ida handle + * @ida: ida handle + * + * This function is use to set up the handle (@ida) that you will pass + * to the rest of the functions. + */ +void ida_init(struct ida *ida) +{ + memset(ida, 0, sizeof(struct ida)); + idr_init(&ida->idr); + +} +EXPORT_SYMBOL(ida_init); diff --git a/libdde-linux26/contrib/lib/kasprintf.c b/libdde-linux26/contrib/lib/kasprintf.c new file mode 100644 index 00000000..c5ff1fd1 --- /dev/null +++ b/libdde-linux26/contrib/lib/kasprintf.c @@ -0,0 +1,44 @@ +/* + * linux/lib/kasprintf.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + */ + +#include <stdarg.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/string.h> + +/* Simplified asprintf. */ +char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap) +{ + unsigned int len; + char *p; + va_list aq; + + va_copy(aq, ap); + len = vsnprintf(NULL, 0, fmt, aq); + va_end(aq); + + p = kmalloc(len+1, gfp); + if (!p) + return NULL; + + vsnprintf(p, len+1, fmt, ap); + + return p; +} +EXPORT_SYMBOL(kvasprintf); + +char *kasprintf(gfp_t gfp, const char *fmt, ...) +{ + va_list ap; + char *p; + + va_start(ap, fmt); + p = kvasprintf(gfp, fmt, ap); + va_end(ap); + + return p; +} +EXPORT_SYMBOL(kasprintf); diff --git a/libdde-linux26/contrib/lib/kernel_lock.c b/libdde-linux26/contrib/lib/kernel_lock.c new file mode 100644 index 00000000..01a3c22c --- /dev/null +++ b/libdde-linux26/contrib/lib/kernel_lock.c @@ -0,0 +1,133 @@ +/* + * lib/kernel_lock.c + * + * This is the traditional BKL - big kernel lock. Largely + * relegated to obsolescence, but used by various less + * important (or lazy) subsystems. + */ +#include <linux/smp_lock.h> +#include <linux/module.h> +#include <linux/kallsyms.h> +#include <linux/semaphore.h> + +/* + * The 'big kernel lock' + * + * This spinlock is taken and released recursively by lock_kernel() + * and unlock_kernel(). It is transparently dropped and reacquired + * over schedule(). It is used to protect legacy code that hasn't + * been migrated to a proper locking design yet. + * + * Don't use in new code. + */ +static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); + + +/* + * Acquire/release the underlying lock from the scheduler. + * + * This is called with preemption disabled, and should + * return an error value if it cannot get the lock and + * TIF_NEED_RESCHED gets set. + * + * If it successfully gets the lock, it should increment + * the preemption count like any spinlock does. + * + * (This works on UP too - _raw_spin_trylock will never + * return false in that case) + */ +int __lockfunc __reacquire_kernel_lock(void) +{ + while (!_raw_spin_trylock(&kernel_flag)) { + if (test_thread_flag(TIF_NEED_RESCHED)) + return -EAGAIN; + cpu_relax(); + } + preempt_disable(); + return 0; +} + +void __lockfunc __release_kernel_lock(void) +{ + _raw_spin_unlock(&kernel_flag); + preempt_enable_no_resched(); +} + +/* + * These are the BKL spinlocks - we try to be polite about preemption. + * If SMP is not on (ie UP preemption), this all goes away because the + * _raw_spin_trylock() will always succeed. + */ +#ifdef CONFIG_PREEMPT +static inline void __lock_kernel(void) +{ + preempt_disable(); + if (unlikely(!_raw_spin_trylock(&kernel_flag))) { + /* + * If preemption was disabled even before this + * was called, there's nothing we can be polite + * about - just spin. + */ + if (preempt_count() > 1) { + _raw_spin_lock(&kernel_flag); + return; + } + + /* + * Otherwise, let's wait for the kernel lock + * with preemption enabled.. + */ + do { + preempt_enable(); + while (spin_is_locked(&kernel_flag)) + cpu_relax(); + preempt_disable(); + } while (!_raw_spin_trylock(&kernel_flag)); + } +} + +#else + +/* + * Non-preemption case - just get the spinlock + */ +static inline void __lock_kernel(void) +{ + _raw_spin_lock(&kernel_flag); +} +#endif + +static inline void __unlock_kernel(void) +{ + /* + * the BKL is not covered by lockdep, so we open-code the + * unlocking sequence (and thus avoid the dep-chain ops): + */ + _raw_spin_unlock(&kernel_flag); + preempt_enable(); +} + +/* + * Getting the big kernel lock. + * + * This cannot happen asynchronously, so we only need to + * worry about other CPU's. + */ +void __lockfunc lock_kernel(void) +{ + int depth = current->lock_depth+1; + if (likely(!depth)) + __lock_kernel(); + current->lock_depth = depth; +} + +void __lockfunc unlock_kernel(void) +{ + BUG_ON(current->lock_depth < 0); + if (likely(--current->lock_depth < 0)) + __unlock_kernel(); +} + +EXPORT_SYMBOL(lock_kernel); +EXPORT_SYMBOL(unlock_kernel); + diff --git a/libdde-linux26/contrib/lib/klist.c b/libdde-linux26/contrib/lib/klist.c new file mode 100644 index 00000000..573d6068 --- /dev/null +++ b/libdde-linux26/contrib/lib/klist.c @@ -0,0 +1,365 @@ +/* + * klist.c - Routines for manipulating klists. + * + * Copyright (C) 2005 Patrick Mochel + * + * This file is released under the GPL v2. + * + * This klist interface provides a couple of structures that wrap around + * struct list_head to provide explicit list "head" (struct klist) and list + * "node" (struct klist_node) objects. For struct klist, a spinlock is + * included that protects access to the actual list itself. struct + * klist_node provides a pointer to the klist that owns it and a kref + * reference count that indicates the number of current users of that node + * in the list. + * + * The entire point is to provide an interface for iterating over a list + * that is safe and allows for modification of the list during the + * iteration (e.g. insertion and removal), including modification of the + * current node on the list. + * + * It works using a 3rd object type - struct klist_iter - that is declared + * and initialized before an iteration. klist_next() is used to acquire the + * next element in the list. It returns NULL if there are no more items. + * Internally, that routine takes the klist's lock, decrements the + * reference count of the previous klist_node and increments the count of + * the next klist_node. It then drops the lock and returns. + * + * There are primitives for adding and removing nodes to/from a klist. + * When deleting, klist_del() will simply decrement the reference count. + * Only when the count goes to 0 is the node removed from the list. + * klist_remove() will try to delete the node from the list and block until + * it is actually removed. This is useful for objects (like devices) that + * have been removed from the system and must be freed (but must wait until + * all accessors have finished). + */ + +#include <linux/klist.h> +#include <linux/module.h> +#include <linux/sched.h> + +/* + * Use the lowest bit of n_klist to mark deleted nodes and exclude + * dead ones from iteration. + */ +#define KNODE_DEAD 1LU +#define KNODE_KLIST_MASK ~KNODE_DEAD + +static struct klist *knode_klist(struct klist_node *knode) +{ + return (struct klist *) + ((unsigned long)knode->n_klist & KNODE_KLIST_MASK); +} + +static bool knode_dead(struct klist_node *knode) +{ + return (unsigned long)knode->n_klist & KNODE_DEAD; +} + +static void knode_set_klist(struct klist_node *knode, struct klist *klist) +{ + knode->n_klist = klist; + /* no knode deserves to start its life dead */ + WARN_ON(knode_dead(knode)); +} + +static void knode_kill(struct klist_node *knode) +{ + /* and no knode should die twice ever either, see we're very humane */ + WARN_ON(knode_dead(knode)); + *(unsigned long *)&knode->n_klist |= KNODE_DEAD; +} + +/** + * klist_init - Initialize a klist structure. + * @k: The klist we're initializing. + * @get: The get function for the embedding object (NULL if none) + * @put: The put function for the embedding object (NULL if none) + * + * Initialises the klist structure. If the klist_node structures are + * going to be embedded in refcounted objects (necessary for safe + * deletion) then the get/put arguments are used to initialise + * functions that take and release references on the embedding + * objects. + */ +void klist_init(struct klist *k, void (*get)(struct klist_node *), + void (*put)(struct klist_node *)) +{ + INIT_LIST_HEAD(&k->k_list); + spin_lock_init(&k->k_lock); + k->get = get; + k->put = put; +} +EXPORT_SYMBOL_GPL(klist_init); + +static void add_head(struct klist *k, struct klist_node *n) +{ + spin_lock(&k->k_lock); + list_add(&n->n_node, &k->k_list); + spin_unlock(&k->k_lock); +} + +static void add_tail(struct klist *k, struct klist_node *n) +{ + spin_lock(&k->k_lock); + list_add_tail(&n->n_node, &k->k_list); + spin_unlock(&k->k_lock); +} + +static void klist_node_init(struct klist *k, struct klist_node *n) +{ + INIT_LIST_HEAD(&n->n_node); + kref_init(&n->n_ref); + knode_set_klist(n, k); + if (k->get) + k->get(n); +} + +/** + * klist_add_head - Initialize a klist_node and add it to front. + * @n: node we're adding. + * @k: klist it's going on. + */ +void klist_add_head(struct klist_node *n, struct klist *k) +{ + klist_node_init(k, n); + add_head(k, n); +} +EXPORT_SYMBOL_GPL(klist_add_head); + +/** + * klist_add_tail - Initialize a klist_node and add it to back. + * @n: node we're adding. + * @k: klist it's going on. + */ +void klist_add_tail(struct klist_node *n, struct klist *k) +{ + klist_node_init(k, n); + add_tail(k, n); +} +EXPORT_SYMBOL_GPL(klist_add_tail); + +/** + * klist_add_after - Init a klist_node and add it after an existing node + * @n: node we're adding. + * @pos: node to put @n after + */ +void klist_add_after(struct klist_node *n, struct klist_node *pos) +{ + struct klist *k = knode_klist(pos); + + klist_node_init(k, n); + spin_lock(&k->k_lock); + list_add(&n->n_node, &pos->n_node); + spin_unlock(&k->k_lock); +} +EXPORT_SYMBOL_GPL(klist_add_after); + +/** + * klist_add_before - Init a klist_node and add it before an existing node + * @n: node we're adding. + * @pos: node to put @n after + */ +void klist_add_before(struct klist_node *n, struct klist_node *pos) +{ + struct klist *k = knode_klist(pos); + + klist_node_init(k, n); + spin_lock(&k->k_lock); + list_add_tail(&n->n_node, &pos->n_node); + spin_unlock(&k->k_lock); +} +EXPORT_SYMBOL_GPL(klist_add_before); + +struct klist_waiter { + struct list_head list; + struct klist_node *node; + struct task_struct *process; + int woken; +}; + +static DEFINE_SPINLOCK(klist_remove_lock); +static LIST_HEAD(klist_remove_waiters); + +static void klist_release(struct kref *kref) +{ + struct klist_waiter *waiter, *tmp; + struct klist_node *n = container_of(kref, struct klist_node, n_ref); + + WARN_ON(!knode_dead(n)); + list_del(&n->n_node); + spin_lock(&klist_remove_lock); + list_for_each_entry_safe(waiter, tmp, &klist_remove_waiters, list) { + if (waiter->node != n) + continue; + + waiter->woken = 1; + mb(); + wake_up_process(waiter->process); + list_del(&waiter->list); + } + spin_unlock(&klist_remove_lock); + knode_set_klist(n, NULL); +} + +static int klist_dec_and_del(struct klist_node *n) +{ + return kref_put(&n->n_ref, klist_release); +} + +static void klist_put(struct klist_node *n, bool kill) +{ + struct klist *k = knode_klist(n); + void (*put)(struct klist_node *) = k->put; + + spin_lock(&k->k_lock); + if (kill) + knode_kill(n); + if (!klist_dec_and_del(n)) + put = NULL; + spin_unlock(&k->k_lock); + if (put) + put(n); +} + +/** + * klist_del - Decrement the reference count of node and try to remove. + * @n: node we're deleting. + */ +void klist_del(struct klist_node *n) +{ + klist_put(n, true); +} +EXPORT_SYMBOL_GPL(klist_del); + +/** + * klist_remove - Decrement the refcount of node and wait for it to go away. + * @n: node we're removing. + */ +void klist_remove(struct klist_node *n) +{ + struct klist_waiter waiter; + + waiter.node = n; + waiter.process = current; + waiter.woken = 0; + spin_lock(&klist_remove_lock); + list_add(&waiter.list, &klist_remove_waiters); + spin_unlock(&klist_remove_lock); + + klist_del(n); + + for (;;) { + set_current_state(TASK_UNINTERRUPTIBLE); + if (waiter.woken) + break; + schedule(); + } + __set_current_state(TASK_RUNNING); +} +EXPORT_SYMBOL_GPL(klist_remove); + +/** + * klist_node_attached - Say whether a node is bound to a list or not. + * @n: Node that we're testing. + */ +int klist_node_attached(struct klist_node *n) +{ + return (n->n_klist != NULL); +} +EXPORT_SYMBOL_GPL(klist_node_attached); + +/** + * klist_iter_init_node - Initialize a klist_iter structure. + * @k: klist we're iterating. + * @i: klist_iter we're filling. + * @n: node to start with. + * + * Similar to klist_iter_init(), but starts the action off with @n, + * instead of with the list head. + */ +void klist_iter_init_node(struct klist *k, struct klist_iter *i, + struct klist_node *n) +{ + i->i_klist = k; + i->i_cur = n; + if (n) + kref_get(&n->n_ref); +} +EXPORT_SYMBOL_GPL(klist_iter_init_node); + +/** + * klist_iter_init - Iniitalize a klist_iter structure. + * @k: klist we're iterating. + * @i: klist_iter structure we're filling. + * + * Similar to klist_iter_init_node(), but start with the list head. + */ +void klist_iter_init(struct klist *k, struct klist_iter *i) +{ + klist_iter_init_node(k, i, NULL); +} +EXPORT_SYMBOL_GPL(klist_iter_init); + +/** + * klist_iter_exit - Finish a list iteration. + * @i: Iterator structure. + * + * Must be called when done iterating over list, as it decrements the + * refcount of the current node. Necessary in case iteration exited before + * the end of the list was reached, and always good form. + */ +void klist_iter_exit(struct klist_iter *i) +{ + if (i->i_cur) { + klist_put(i->i_cur, false); + i->i_cur = NULL; + } +} +EXPORT_SYMBOL_GPL(klist_iter_exit); + +static struct klist_node *to_klist_node(struct list_head *n) +{ + return container_of(n, struct klist_node, n_node); +} + +/** + * klist_next - Ante up next node in list. + * @i: Iterator structure. + * + * First grab list lock. Decrement the reference count of the previous + * node, if there was one. Grab the next node, increment its reference + * count, drop the lock, and return that next node. + */ +struct klist_node *klist_next(struct klist_iter *i) +{ + void (*put)(struct klist_node *) = i->i_klist->put; + struct klist_node *last = i->i_cur; + struct klist_node *next; + + spin_lock(&i->i_klist->k_lock); + + if (last) { + next = to_klist_node(last->n_node.next); + if (!klist_dec_and_del(last)) + put = NULL; + } else + next = to_klist_node(i->i_klist->k_list.next); + + i->i_cur = NULL; + while (next != to_klist_node(&i->i_klist->k_list)) { + if (likely(!knode_dead(next))) { + kref_get(&next->n_ref); + i->i_cur = next; + break; + } + next = to_klist_node(next->n_node.next); + } + + spin_unlock(&i->i_klist->k_lock); + + if (put && last) + put(last); + return i->i_cur; +} +EXPORT_SYMBOL_GPL(klist_next); diff --git a/libdde-linux26/contrib/lib/kobject.c b/libdde-linux26/contrib/lib/kobject.c new file mode 100644 index 00000000..0487d1f6 --- /dev/null +++ b/libdde-linux26/contrib/lib/kobject.c @@ -0,0 +1,850 @@ +/* + * kobject.c - library routines for handling generic kernel objects + * + * Copyright (c) 2002-2003 Patrick Mochel <mochel@osdl.org> + * Copyright (c) 2006-2007 Greg Kroah-Hartman <greg@kroah.com> + * Copyright (c) 2006-2007 Novell Inc. + * + * This file is released under the GPLv2. + * + * + * Please see the file Documentation/kobject.txt for critical information + * about using the kobject interface. + */ + +#include <linux/kobject.h> +#include <linux/string.h> +#include <linux/module.h> +#include <linux/stat.h> +#include <linux/slab.h> + +/* + * populate_dir - populate directory with attributes. + * @kobj: object we're working on. + * + * Most subsystems have a set of default attributes that are associated + * with an object that registers with them. This is a helper called during + * object registration that loops through the default attributes of the + * subsystem and creates attributes files for them in sysfs. + */ +static int populate_dir(struct kobject *kobj) +{ + struct kobj_type *t = get_ktype(kobj); + struct attribute *attr; + int error = 0; + int i; + + if (t && t->default_attrs) { + for (i = 0; (attr = t->default_attrs[i]) != NULL; i++) { + error = sysfs_create_file(kobj, attr); + if (error) + break; + } + } + return error; +} + +static int create_dir(struct kobject *kobj) +{ + int error = 0; + if (kobject_name(kobj)) { + error = sysfs_create_dir(kobj); + if (!error) { + error = populate_dir(kobj); + if (error) + sysfs_remove_dir(kobj); + } + } + return error; +} + +static int get_kobj_path_length(struct kobject *kobj) +{ + int length = 1; + struct kobject *parent = kobj; + + /* walk up the ancestors until we hit the one pointing to the + * root. + * Add 1 to strlen for leading '/' of each level. + */ + do { + if (kobject_name(parent) == NULL) + return 0; + length += strlen(kobject_name(parent)) + 1; + parent = parent->parent; + } while (parent); + return length; +} + +static void fill_kobj_path(struct kobject *kobj, char *path, int length) +{ + struct kobject *parent; + + --length; + for (parent = kobj; parent; parent = parent->parent) { + int cur = strlen(kobject_name(parent)); + /* back up enough to print this name with '/' */ + length -= cur; + strncpy(path + length, kobject_name(parent), cur); + *(path + --length) = '/'; + } + + pr_debug("kobject: '%s' (%p): %s: path = '%s'\n", kobject_name(kobj), + kobj, __func__, path); +} + +/** + * kobject_get_path - generate and return the path associated with a given kobj and kset pair. + * + * @kobj: kobject in question, with which to build the path + * @gfp_mask: the allocation type used to allocate the path + * + * The result must be freed by the caller with kfree(). + */ +char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask) +{ + char *path; + int len; + + len = get_kobj_path_length(kobj); + if (len == 0) + return NULL; + path = kzalloc(len, gfp_mask); + if (!path) + return NULL; + fill_kobj_path(kobj, path, len); + + return path; +} +EXPORT_SYMBOL_GPL(kobject_get_path); + +/* add the kobject to its kset's list */ +static void kobj_kset_join(struct kobject *kobj) +{ + if (!kobj->kset) + return; + + kset_get(kobj->kset); + spin_lock(&kobj->kset->list_lock); + list_add_tail(&kobj->entry, &kobj->kset->list); + spin_unlock(&kobj->kset->list_lock); +} + +/* remove the kobject from its kset's list */ +static void kobj_kset_leave(struct kobject *kobj) +{ + if (!kobj->kset) + return; + + spin_lock(&kobj->kset->list_lock); + list_del_init(&kobj->entry); + spin_unlock(&kobj->kset->list_lock); + kset_put(kobj->kset); +} + +static void kobject_init_internal(struct kobject *kobj) +{ + if (!kobj) + return; + kref_init(&kobj->kref); + INIT_LIST_HEAD(&kobj->entry); + kobj->state_in_sysfs = 0; + kobj->state_add_uevent_sent = 0; + kobj->state_remove_uevent_sent = 0; + kobj->state_initialized = 1; +} + + +static int kobject_add_internal(struct kobject *kobj) +{ + int error = 0; + struct kobject *parent; + + if (!kobj) + return -ENOENT; + + if (!kobj->name || !kobj->name[0]) { + WARN(1, "kobject: (%p): attempted to be registered with empty " + "name!\n", kobj); + return -EINVAL; + } + + parent = kobject_get(kobj->parent); + + /* join kset if set, use it as parent if we do not already have one */ + if (kobj->kset) { + if (!parent) + parent = kobject_get(&kobj->kset->kobj); + kobj_kset_join(kobj); + kobj->parent = parent; + } + + pr_debug("kobject: '%s' (%p): %s: parent: '%s', set: '%s'\n", + kobject_name(kobj), kobj, __func__, + parent ? kobject_name(parent) : "<NULL>", + kobj->kset ? kobject_name(&kobj->kset->kobj) : "<NULL>"); + + error = create_dir(kobj); + if (error) { + kobj_kset_leave(kobj); + kobject_put(parent); + kobj->parent = NULL; + + /* be noisy on error issues */ + if (error == -EEXIST) + printk(KERN_ERR "%s failed for %s with " + "-EEXIST, don't try to register things with " + "the same name in the same directory.\n", + __func__, kobject_name(kobj)); + else + printk(KERN_ERR "%s failed for %s (%d)\n", + __func__, kobject_name(kobj), error); + dump_stack(); + } else + kobj->state_in_sysfs = 1; + + return error; +} + +/** + * kobject_set_name_vargs - Set the name of an kobject + * @kobj: struct kobject to set the name of + * @fmt: format string used to build the name + * @vargs: vargs to format the string. + */ +static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, + va_list vargs) +{ + const char *old_name = kobj->name; + char *s; + + kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); + if (!kobj->name) + return -ENOMEM; + + /* ewww... some of these buggers have '/' in the name ... */ + while ((s = strchr(kobj->name, '/'))) + s[0] = '!'; + + kfree(old_name); + return 0; +} + +/** + * kobject_set_name - Set the name of a kobject + * @kobj: struct kobject to set the name of + * @fmt: format string used to build the name + * + * This sets the name of the kobject. If you have already added the + * kobject to the system, you must call kobject_rename() in order to + * change the name of the kobject. + */ +int kobject_set_name(struct kobject *kobj, const char *fmt, ...) +{ + va_list vargs; + int retval; + + va_start(vargs, fmt); + retval = kobject_set_name_vargs(kobj, fmt, vargs); + va_end(vargs); + + return retval; +} +EXPORT_SYMBOL(kobject_set_name); + +/** + * kobject_init - initialize a kobject structure + * @kobj: pointer to the kobject to initialize + * @ktype: pointer to the ktype for this kobject. + * + * This function will properly initialize a kobject such that it can then + * be passed to the kobject_add() call. + * + * After this function is called, the kobject MUST be cleaned up by a call + * to kobject_put(), not by a call to kfree directly to ensure that all of + * the memory is cleaned up properly. + */ +void kobject_init(struct kobject *kobj, struct kobj_type *ktype) +{ + char *err_str; + + if (!kobj) { + err_str = "invalid kobject pointer!"; + goto error; + } + if (!ktype) { + err_str = "must have a ktype to be initialized properly!\n"; + goto error; + } + if (kobj->state_initialized) { + /* do not error out as sometimes we can recover */ + printk(KERN_ERR "kobject (%p): tried to init an initialized " + "object, something is seriously wrong.\n", kobj); + dump_stack(); + } + + kobject_init_internal(kobj); + kobj->ktype = ktype; + return; + +error: + printk(KERN_ERR "kobject (%p): %s\n", kobj, err_str); + dump_stack(); +} +EXPORT_SYMBOL(kobject_init); + +static int kobject_add_varg(struct kobject *kobj, struct kobject *parent, + const char *fmt, va_list vargs) +{ + int retval; + + retval = kobject_set_name_vargs(kobj, fmt, vargs); + if (retval) { + printk(KERN_ERR "kobject: can not set name properly!\n"); + return retval; + } + kobj->parent = parent; + return kobject_add_internal(kobj); +} + +/** + * kobject_add - the main kobject add function + * @kobj: the kobject to add + * @parent: pointer to the parent of the kobject. + * @fmt: format to name the kobject with. + * + * The kobject name is set and added to the kobject hierarchy in this + * function. + * + * If @parent is set, then the parent of the @kobj will be set to it. + * If @parent is NULL, then the parent of the @kobj will be set to the + * kobject associted with the kset assigned to this kobject. If no kset + * is assigned to the kobject, then the kobject will be located in the + * root of the sysfs tree. + * + * If this function returns an error, kobject_put() must be called to + * properly clean up the memory associated with the object. + * Under no instance should the kobject that is passed to this function + * be directly freed with a call to kfree(), that can leak memory. + * + * Note, no "add" uevent will be created with this call, the caller should set + * up all of the necessary sysfs files for the object and then call + * kobject_uevent() with the UEVENT_ADD parameter to ensure that + * userspace is properly notified of this kobject's creation. + */ +int kobject_add(struct kobject *kobj, struct kobject *parent, + const char *fmt, ...) +{ + va_list args; + int retval; + + if (!kobj) + return -EINVAL; + + if (!kobj->state_initialized) { + printk(KERN_ERR "kobject '%s' (%p): tried to add an " + "uninitialized object, something is seriously wrong.\n", + kobject_name(kobj), kobj); + dump_stack(); + return -EINVAL; + } + va_start(args, fmt); + retval = kobject_add_varg(kobj, parent, fmt, args); + va_end(args); + + return retval; +} +EXPORT_SYMBOL(kobject_add); + +/** + * kobject_init_and_add - initialize a kobject structure and add it to the kobject hierarchy + * @kobj: pointer to the kobject to initialize + * @ktype: pointer to the ktype for this kobject. + * @parent: pointer to the parent of this kobject. + * @fmt: the name of the kobject. + * + * This function combines the call to kobject_init() and + * kobject_add(). The same type of error handling after a call to + * kobject_add() and kobject lifetime rules are the same here. + */ +int kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype, + struct kobject *parent, const char *fmt, ...) +{ + va_list args; + int retval; + + kobject_init(kobj, ktype); + + va_start(args, fmt); + retval = kobject_add_varg(kobj, parent, fmt, args); + va_end(args); + + return retval; +} +EXPORT_SYMBOL_GPL(kobject_init_and_add); + +/** + * kobject_rename - change the name of an object + * @kobj: object in question. + * @new_name: object's new name + * + * It is the responsibility of the caller to provide mutual + * exclusion between two different calls of kobject_rename + * on the same kobject and to ensure that new_name is valid and + * won't conflict with other kobjects. + */ +int kobject_rename(struct kobject *kobj, const char *new_name) +{ + int error = 0; + const char *devpath = NULL; + const char *dup_name = NULL, *name; + char *devpath_string = NULL; + char *envp[2]; + + kobj = kobject_get(kobj); + if (!kobj) + return -EINVAL; + if (!kobj->parent) + return -EINVAL; + + devpath = kobject_get_path(kobj, GFP_KERNEL); + if (!devpath) { + error = -ENOMEM; + goto out; + } + devpath_string = kmalloc(strlen(devpath) + 15, GFP_KERNEL); + if (!devpath_string) { + error = -ENOMEM; + goto out; + } + sprintf(devpath_string, "DEVPATH_OLD=%s", devpath); + envp[0] = devpath_string; + envp[1] = NULL; + + name = dup_name = kstrdup(new_name, GFP_KERNEL); + if (!name) { + error = -ENOMEM; + goto out; + } + + error = sysfs_rename_dir(kobj, new_name); + if (error) + goto out; + + /* Install the new kobject name */ + dup_name = kobj->name; + kobj->name = name; + + /* This function is mostly/only used for network interface. + * Some hotplug package track interfaces by their name and + * therefore want to know when the name is changed by the user. */ + kobject_uevent_env(kobj, KOBJ_MOVE, envp); + +out: + kfree(dup_name); + kfree(devpath_string); + kfree(devpath); + kobject_put(kobj); + + return error; +} +EXPORT_SYMBOL_GPL(kobject_rename); + +/** + * kobject_move - move object to another parent + * @kobj: object in question. + * @new_parent: object's new parent (can be NULL) + */ +int kobject_move(struct kobject *kobj, struct kobject *new_parent) +{ + int error; + struct kobject *old_parent; + const char *devpath = NULL; + char *devpath_string = NULL; + char *envp[2]; + + kobj = kobject_get(kobj); + if (!kobj) + return -EINVAL; + new_parent = kobject_get(new_parent); + if (!new_parent) { + if (kobj->kset) + new_parent = kobject_get(&kobj->kset->kobj); + } + /* old object path */ + devpath = kobject_get_path(kobj, GFP_KERNEL); + if (!devpath) { + error = -ENOMEM; + goto out; + } + devpath_string = kmalloc(strlen(devpath) + 15, GFP_KERNEL); + if (!devpath_string) { + error = -ENOMEM; + goto out; + } + sprintf(devpath_string, "DEVPATH_OLD=%s", devpath); + envp[0] = devpath_string; + envp[1] = NULL; + error = sysfs_move_dir(kobj, new_parent); + if (error) + goto out; + old_parent = kobj->parent; + kobj->parent = new_parent; + new_parent = NULL; + kobject_put(old_parent); + kobject_uevent_env(kobj, KOBJ_MOVE, envp); +out: + kobject_put(new_parent); + kobject_put(kobj); + kfree(devpath_string); + kfree(devpath); + return error; +} + +/** + * kobject_del - unlink kobject from hierarchy. + * @kobj: object. + */ +void kobject_del(struct kobject *kobj) +{ + if (!kobj) + return; + + sysfs_remove_dir(kobj); + kobj->state_in_sysfs = 0; + kobj_kset_leave(kobj); + kobject_put(kobj->parent); + kobj->parent = NULL; +} + +/** + * kobject_get - increment refcount for object. + * @kobj: object. + */ +struct kobject *kobject_get(struct kobject *kobj) +{ + if (kobj) + kref_get(&kobj->kref); + return kobj; +} + +/* + * kobject_cleanup - free kobject resources. + * @kobj: object to cleanup + */ +static void kobject_cleanup(struct kobject *kobj) +{ + struct kobj_type *t = get_ktype(kobj); + const char *name = kobj->name; + + pr_debug("kobject: '%s' (%p): %s\n", + kobject_name(kobj), kobj, __func__); + + if (t && !t->release) + pr_debug("kobject: '%s' (%p): does not have a release() " + "function, it is broken and must be fixed.\n", + kobject_name(kobj), kobj); + + /* send "remove" if the caller did not do it but sent "add" */ + if (kobj->state_add_uevent_sent && !kobj->state_remove_uevent_sent) { + pr_debug("kobject: '%s' (%p): auto cleanup 'remove' event\n", + kobject_name(kobj), kobj); + kobject_uevent(kobj, KOBJ_REMOVE); + } + + /* remove from sysfs if the caller did not do it */ + if (kobj->state_in_sysfs) { + pr_debug("kobject: '%s' (%p): auto cleanup kobject_del\n", + kobject_name(kobj), kobj); + kobject_del(kobj); + } + + if (t && t->release) { + pr_debug("kobject: '%s' (%p): calling ktype release\n", + kobject_name(kobj), kobj); + t->release(kobj); + } + + /* free name if we allocated it */ + if (name) { + pr_debug("kobject: '%s': free name\n", name); + kfree(name); + } +} + +static void kobject_release(struct kref *kref) +{ + kobject_cleanup(container_of(kref, struct kobject, kref)); +} + +/** + * kobject_put - decrement refcount for object. + * @kobj: object. + * + * Decrement the refcount, and if 0, call kobject_cleanup(). + */ +void kobject_put(struct kobject *kobj) +{ + if (kobj) { + if (!kobj->state_initialized) + WARN(1, KERN_WARNING "kobject: '%s' (%p): is not " + "initialized, yet kobject_put() is being " + "called.\n", kobject_name(kobj), kobj); + kref_put(&kobj->kref, kobject_release); + } +} + +static void dynamic_kobj_release(struct kobject *kobj) +{ + pr_debug("kobject: (%p): %s\n", kobj, __func__); + kfree(kobj); +} + +static struct kobj_type dynamic_kobj_ktype = { + .release = dynamic_kobj_release, + .sysfs_ops = &kobj_sysfs_ops, +}; + +/** + * kobject_create - create a struct kobject dynamically + * + * This function creates a kobject structure dynamically and sets it up + * to be a "dynamic" kobject with a default release function set up. + * + * If the kobject was not able to be created, NULL will be returned. + * The kobject structure returned from here must be cleaned up with a + * call to kobject_put() and not kfree(), as kobject_init() has + * already been called on this structure. + */ +struct kobject *kobject_create(void) +{ + struct kobject *kobj; + + kobj = kzalloc(sizeof(*kobj), GFP_KERNEL); + if (!kobj) + return NULL; + + kobject_init(kobj, &dynamic_kobj_ktype); + return kobj; +} + +/** + * kobject_create_and_add - create a struct kobject dynamically and register it with sysfs + * + * @name: the name for the kset + * @parent: the parent kobject of this kobject, if any. + * + * This function creates a kobject structure dynamically and registers it + * with sysfs. When you are finished with this structure, call + * kobject_put() and the structure will be dynamically freed when + * it is no longer being used. + * + * If the kobject was not able to be created, NULL will be returned. + */ +struct kobject *kobject_create_and_add(const char *name, struct kobject *parent) +{ + struct kobject *kobj; + int retval; + + kobj = kobject_create(); + if (!kobj) + return NULL; + + retval = kobject_add(kobj, parent, "%s", name); + if (retval) { + printk(KERN_WARNING "%s: kobject_add error: %d\n", + __func__, retval); + kobject_put(kobj); + kobj = NULL; + } + return kobj; +} +EXPORT_SYMBOL_GPL(kobject_create_and_add); + +/** + * kset_init - initialize a kset for use + * @k: kset + */ +void kset_init(struct kset *k) +{ + kobject_init_internal(&k->kobj); + INIT_LIST_HEAD(&k->list); + spin_lock_init(&k->list_lock); +} + +/* default kobject attribute operations */ +static ssize_t kobj_attr_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct kobj_attribute *kattr; + ssize_t ret = -EIO; + + kattr = container_of(attr, struct kobj_attribute, attr); + if (kattr->show) + ret = kattr->show(kobj, kattr, buf); + return ret; +} + +static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct kobj_attribute *kattr; + ssize_t ret = -EIO; + + kattr = container_of(attr, struct kobj_attribute, attr); + if (kattr->store) + ret = kattr->store(kobj, kattr, buf, count); + return ret; +} + +struct sysfs_ops kobj_sysfs_ops = { + .show = kobj_attr_show, + .store = kobj_attr_store, +}; + +/** + * kset_register - initialize and add a kset. + * @k: kset. + */ +int kset_register(struct kset *k) +{ + int err; + + if (!k) + return -EINVAL; + + kset_init(k); + err = kobject_add_internal(&k->kobj); + if (err) + return err; + kobject_uevent(&k->kobj, KOBJ_ADD); + return 0; +} + +/** + * kset_unregister - remove a kset. + * @k: kset. + */ +void kset_unregister(struct kset *k) +{ + if (!k) + return; + kobject_put(&k->kobj); +} + +/** + * kset_find_obj - search for object in kset. + * @kset: kset we're looking in. + * @name: object's name. + * + * Lock kset via @kset->subsys, and iterate over @kset->list, + * looking for a matching kobject. If matching object is found + * take a reference and return the object. + */ +struct kobject *kset_find_obj(struct kset *kset, const char *name) +{ + struct kobject *k; + struct kobject *ret = NULL; + + spin_lock(&kset->list_lock); + list_for_each_entry(k, &kset->list, entry) { + if (kobject_name(k) && !strcmp(kobject_name(k), name)) { + ret = kobject_get(k); + break; + } + } + spin_unlock(&kset->list_lock); + return ret; +} + +static void kset_release(struct kobject *kobj) +{ + struct kset *kset = container_of(kobj, struct kset, kobj); + pr_debug("kobject: '%s' (%p): %s\n", + kobject_name(kobj), kobj, __func__); + kfree(kset); +} + +static struct kobj_type kset_ktype = { + .sysfs_ops = &kobj_sysfs_ops, + .release = kset_release, +}; + +/** + * kset_create - create a struct kset dynamically + * + * @name: the name for the kset + * @uevent_ops: a struct kset_uevent_ops for the kset + * @parent_kobj: the parent kobject of this kset, if any. + * + * This function creates a kset structure dynamically. This structure can + * then be registered with the system and show up in sysfs with a call to + * kset_register(). When you are finished with this structure, if + * kset_register() has been called, call kset_unregister() and the + * structure will be dynamically freed when it is no longer being used. + * + * If the kset was not able to be created, NULL will be returned. + */ +static struct kset *kset_create(const char *name, + struct kset_uevent_ops *uevent_ops, + struct kobject *parent_kobj) +{ + struct kset *kset; + + kset = kzalloc(sizeof(*kset), GFP_KERNEL); + if (!kset) + return NULL; + kobject_set_name(&kset->kobj, name); + kset->uevent_ops = uevent_ops; + kset->kobj.parent = parent_kobj; + + /* + * The kobject of this kset will have a type of kset_ktype and belong to + * no kset itself. That way we can properly free it when it is + * finished being used. + */ + kset->kobj.ktype = &kset_ktype; + kset->kobj.kset = NULL; + + return kset; +} + +/** + * kset_create_and_add - create a struct kset dynamically and add it to sysfs + * + * @name: the name for the kset + * @uevent_ops: a struct kset_uevent_ops for the kset + * @parent_kobj: the parent kobject of this kset, if any. + * + * This function creates a kset structure dynamically and registers it + * with sysfs. When you are finished with this structure, call + * kset_unregister() and the structure will be dynamically freed when it + * is no longer being used. + * + * If the kset was not able to be created, NULL will be returned. + */ +struct kset *kset_create_and_add(const char *name, + struct kset_uevent_ops *uevent_ops, + struct kobject *parent_kobj) +{ + struct kset *kset; + int error; + + kset = kset_create(name, uevent_ops, parent_kobj); + if (!kset) + return NULL; + error = kset_register(kset); + if (error) { + kfree(kset); + return NULL; + } + return kset; +} +EXPORT_SYMBOL_GPL(kset_create_and_add); + +EXPORT_SYMBOL(kobject_get); +EXPORT_SYMBOL(kobject_put); +EXPORT_SYMBOL(kobject_del); + +EXPORT_SYMBOL(kset_register); +EXPORT_SYMBOL(kset_unregister); diff --git a/libdde-linux26/contrib/lib/kref.c b/libdde-linux26/contrib/lib/kref.c new file mode 100644 index 00000000..9ecd6e86 --- /dev/null +++ b/libdde-linux26/contrib/lib/kref.c @@ -0,0 +1,77 @@ +/* + * kref.c - library routines for handling generic reference counted objects + * + * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> + * Copyright (C) 2004 IBM Corp. + * + * based on lib/kobject.c which was: + * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org> + * + * This file is released under the GPLv2. + * + */ + +#include <linux/kref.h> +#include <linux/module.h> + +/** + * kref_set - initialize object and set refcount to requested number. + * @kref: object in question. + * @num: initial reference counter + */ +void kref_set(struct kref *kref, int num) +{ + atomic_set(&kref->refcount, num); + smp_mb(); +} + +/** + * kref_init - initialize object. + * @kref: object in question. + */ +void kref_init(struct kref *kref) +{ + kref_set(kref, 1); +} + +/** + * kref_get - increment refcount for object. + * @kref: object. + */ +void kref_get(struct kref *kref) +{ + WARN_ON(!atomic_read(&kref->refcount)); + atomic_inc(&kref->refcount); + smp_mb__after_atomic_inc(); +} + +/** + * kref_put - decrement refcount for object. + * @kref: object. + * @release: pointer to the function that will clean up the object when the + * last reference to the object is released. + * This pointer is required, and it is not acceptable to pass kfree + * in as this function. + * + * Decrement the refcount, and if 0, call release(). + * Return 1 if the object was removed, otherwise return 0. Beware, if this + * function returns 0, you still can not count on the kref from remaining in + * memory. Only use the return value if you want to see if the kref is now + * gone, not present. + */ +int kref_put(struct kref *kref, void (*release)(struct kref *kref)) +{ + WARN_ON(release == NULL); + WARN_ON(release == (void (*)(struct kref *))kfree); + + if (atomic_dec_and_test(&kref->refcount)) { + release(kref); + return 1; + } + return 0; +} + +EXPORT_SYMBOL(kref_set); +EXPORT_SYMBOL(kref_init); +EXPORT_SYMBOL(kref_get); +EXPORT_SYMBOL(kref_put); diff --git a/libdde-linux26/contrib/lib/parser.c b/libdde-linux26/contrib/lib/parser.c new file mode 100644 index 00000000..b00d0205 --- /dev/null +++ b/libdde-linux26/contrib/lib/parser.c @@ -0,0 +1,228 @@ +/* + * lib/parser.c - simple parser for mount, etc. options. + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#include <linux/ctype.h> +#include <linux/module.h> +#include <linux/parser.h> +#include <linux/slab.h> +#include <linux/string.h> + +/** + * match_one: - Determines if a string matches a simple pattern + * @s: the string to examine for presense of the pattern + * @p: the string containing the pattern + * @args: array of %MAX_OPT_ARGS &substring_t elements. Used to return match + * locations. + * + * Description: Determines if the pattern @p is present in string @s. Can only + * match extremely simple token=arg style patterns. If the pattern is found, + * the location(s) of the arguments will be returned in the @args array. + */ +static int match_one(char *s, const char *p, substring_t args[]) +{ + char *meta; + int argc = 0; + + if (!p) + return 1; + + while(1) { + int len = -1; + meta = strchr(p, '%'); + if (!meta) + return strcmp(p, s) == 0; + + if (strncmp(p, s, meta-p)) + return 0; + + s += meta - p; + p = meta + 1; + + if (isdigit(*p)) + len = simple_strtoul(p, (char **) &p, 10); + else if (*p == '%') { + if (*s++ != '%') + return 0; + p++; + continue; + } + + if (argc >= MAX_OPT_ARGS) + return 0; + + args[argc].from = s; + switch (*p++) { + case 's': + if (strlen(s) == 0) + return 0; + else if (len == -1 || len > strlen(s)) + len = strlen(s); + args[argc].to = s + len; + break; + case 'd': + simple_strtol(s, &args[argc].to, 0); + goto num; + case 'u': + simple_strtoul(s, &args[argc].to, 0); + goto num; + case 'o': + simple_strtoul(s, &args[argc].to, 8); + goto num; + case 'x': + simple_strtoul(s, &args[argc].to, 16); + num: + if (args[argc].to == args[argc].from) + return 0; + break; + default: + return 0; + } + s = args[argc].to; + argc++; + } +} + +/** + * match_token: - Find a token (and optional args) in a string + * @s: the string to examine for token/argument pairs + * @table: match_table_t describing the set of allowed option tokens and the + * arguments that may be associated with them. Must be terminated with a + * &struct match_token whose pattern is set to the NULL pointer. + * @args: array of %MAX_OPT_ARGS &substring_t elements. Used to return match + * locations. + * + * Description: Detects which if any of a set of token strings has been passed + * to it. Tokens can include up to MAX_OPT_ARGS instances of basic c-style + * format identifiers which will be taken into account when matching the + * tokens, and whose locations will be returned in the @args array. + */ +int match_token(char *s, const match_table_t table, substring_t args[]) +{ + const struct match_token *p; + + for (p = table; !match_one(s, p->pattern, args) ; p++) + ; + + return p->token; +} + +/** + * match_number: scan a number in the given base from a substring_t + * @s: substring to be scanned + * @result: resulting integer on success + * @base: base to use when converting string + * + * Description: Given a &substring_t and a base, attempts to parse the substring + * as a number in that base. On success, sets @result to the integer represented + * by the string and returns 0. Returns either -ENOMEM or -EINVAL on failure. + */ +static int match_number(substring_t *s, int *result, int base) +{ + char *endp; + char *buf; + int ret; + + buf = kmalloc(s->to - s->from + 1, GFP_KERNEL); + if (!buf) + return -ENOMEM; + memcpy(buf, s->from, s->to - s->from); + buf[s->to - s->from] = '\0'; + *result = simple_strtol(buf, &endp, base); + ret = 0; + if (endp == buf) + ret = -EINVAL; + kfree(buf); + return ret; +} + +/** + * match_int: - scan a decimal representation of an integer from a substring_t + * @s: substring_t to be scanned + * @result: resulting integer on success + * + * Description: Attempts to parse the &substring_t @s as a decimal integer. On + * success, sets @result to the integer represented by the string and returns 0. + * Returns either -ENOMEM or -EINVAL on failure. + */ +int match_int(substring_t *s, int *result) +{ + return match_number(s, result, 0); +} + +/** + * match_octal: - scan an octal representation of an integer from a substring_t + * @s: substring_t to be scanned + * @result: resulting integer on success + * + * Description: Attempts to parse the &substring_t @s as an octal integer. On + * success, sets @result to the integer represented by the string and returns + * 0. Returns either -ENOMEM or -EINVAL on failure. + */ +int match_octal(substring_t *s, int *result) +{ + return match_number(s, result, 8); +} + +/** + * match_hex: - scan a hex representation of an integer from a substring_t + * @s: substring_t to be scanned + * @result: resulting integer on success + * + * Description: Attempts to parse the &substring_t @s as a hexadecimal integer. + * On success, sets @result to the integer represented by the string and + * returns 0. Returns either -ENOMEM or -EINVAL on failure. + */ +int match_hex(substring_t *s, int *result) +{ + return match_number(s, result, 16); +} + +/** + * match_strlcpy: - Copy the characters from a substring_t to a sized buffer + * @dest: where to copy to + * @src: &substring_t to copy + * @size: size of destination buffer + * + * Description: Copy the characters in &substring_t @src to the + * c-style string @dest. Copy no more than @size - 1 characters, plus + * the terminating NUL. Return length of @src. + */ +size_t match_strlcpy(char *dest, const substring_t *src, size_t size) +{ + size_t ret = src->to - src->from; + + if (size) { + size_t len = ret >= size ? size - 1 : ret; + memcpy(dest, src->from, len); + dest[len] = '\0'; + } + return ret; +} + +/** + * match_strdup: - allocate a new string with the contents of a substring_t + * @s: &substring_t to copy + * + * Description: Allocates and returns a string filled with the contents of + * the &substring_t @s. The caller is responsible for freeing the returned + * string with kfree(). + */ +char *match_strdup(const substring_t *s) +{ + size_t sz = s->to - s->from + 1; + char *p = kmalloc(sz, GFP_KERNEL); + if (p) + match_strlcpy(p, s, sz); + return p; +} + +EXPORT_SYMBOL(match_token); +EXPORT_SYMBOL(match_int); +EXPORT_SYMBOL(match_octal); +EXPORT_SYMBOL(match_hex); +EXPORT_SYMBOL(match_strlcpy); +EXPORT_SYMBOL(match_strdup); diff --git a/libdde-linux26/contrib/lib/proportions.c b/libdde-linux26/contrib/lib/proportions.c new file mode 100644 index 00000000..d50746a7 --- /dev/null +++ b/libdde-linux26/contrib/lib/proportions.c @@ -0,0 +1,407 @@ +/* + * Floating proportions + * + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> + * + * Description: + * + * The floating proportion is a time derivative with an exponentially decaying + * history: + * + * p_{j} = \Sum_{i=0} (dx_{j}/dt_{-i}) / 2^(1+i) + * + * Where j is an element from {prop_local}, x_{j} is j's number of events, + * and i the time period over which the differential is taken. So d/dt_{-i} is + * the differential over the i-th last period. + * + * The decaying history gives smooth transitions. The time differential carries + * the notion of speed. + * + * The denominator is 2^(1+i) because we want the series to be normalised, ie. + * + * \Sum_{i=0} 1/2^(1+i) = 1 + * + * Further more, if we measure time (t) in the same events as x; so that: + * + * t = \Sum_{j} x_{j} + * + * we get that: + * + * \Sum_{j} p_{j} = 1 + * + * Writing this in an iterative fashion we get (dropping the 'd's): + * + * if (++x_{j}, ++t > period) + * t /= 2; + * for_each (j) + * x_{j} /= 2; + * + * so that: + * + * p_{j} = x_{j} / t; + * + * We optimize away the '/= 2' for the global time delta by noting that: + * + * if (++t > period) t /= 2: + * + * Can be approximated by: + * + * period/2 + (++t % period/2) + * + * [ Furthermore, when we choose period to be 2^n it can be written in terms of + * binary operations and wraparound artefacts disappear. ] + * + * Also note that this yields a natural counter of the elapsed periods: + * + * c = t / (period/2) + * + * [ Its monotonic increasing property can be applied to mitigate the wrap- + * around issue. ] + * + * This allows us to do away with the loop over all prop_locals on each period + * expiration. By remembering the period count under which it was last accessed + * as c_{j}, we can obtain the number of 'missed' cycles from: + * + * c - c_{j} + * + * We can then lazily catch up to the global period count every time we are + * going to use x_{j}, by doing: + * + * x_{j} /= 2^(c - c_{j}), c_{j} = c + */ + +#include <linux/proportions.h> +#include <linux/rcupdate.h> + +int prop_descriptor_init(struct prop_descriptor *pd, int shift) +{ + int err; + + if (shift > PROP_MAX_SHIFT) + shift = PROP_MAX_SHIFT; + + pd->index = 0; + pd->pg[0].shift = shift; + mutex_init(&pd->mutex); + err = percpu_counter_init(&pd->pg[0].events, 0); + if (err) + goto out; + + err = percpu_counter_init(&pd->pg[1].events, 0); + if (err) + percpu_counter_destroy(&pd->pg[0].events); + +out: + return err; +} + +/* + * We have two copies, and flip between them to make it seem like an atomic + * update. The update is not really atomic wrt the events counter, but + * it is internally consistent with the bit layout depending on shift. + * + * We copy the events count, move the bits around and flip the index. + */ +void prop_change_shift(struct prop_descriptor *pd, int shift) +{ + int index; + int offset; + u64 events; + unsigned long flags; + + if (shift > PROP_MAX_SHIFT) + shift = PROP_MAX_SHIFT; + + mutex_lock(&pd->mutex); + + index = pd->index ^ 1; + offset = pd->pg[pd->index].shift - shift; + if (!offset) + goto out; + + pd->pg[index].shift = shift; + + local_irq_save(flags); + events = percpu_counter_sum(&pd->pg[pd->index].events); + if (offset < 0) + events <<= -offset; + else + events >>= offset; + percpu_counter_set(&pd->pg[index].events, events); + + /* + * ensure the new pg is fully written before the switch + */ + smp_wmb(); + pd->index = index; + local_irq_restore(flags); + + synchronize_rcu(); + +out: + mutex_unlock(&pd->mutex); +} + +/* + * wrap the access to the data in an rcu_read_lock() section; + * this is used to track the active references. + */ +static struct prop_global *prop_get_global(struct prop_descriptor *pd) +__acquires(RCU) +{ + int index; + + rcu_read_lock(); + index = pd->index; + /* + * match the wmb from vcd_flip() + */ + smp_rmb(); + return &pd->pg[index]; +} + +static void prop_put_global(struct prop_descriptor *pd, struct prop_global *pg) +__releases(RCU) +{ + rcu_read_unlock(); +} + +static void +prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift) +{ + int offset = *pl_shift - new_shift; + + if (!offset) + return; + + if (offset < 0) + *pl_period <<= -offset; + else + *pl_period >>= offset; + + *pl_shift = new_shift; +} + +/* + * PERCPU + */ + +#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids))) + +int prop_local_init_percpu(struct prop_local_percpu *pl) +{ + spin_lock_init(&pl->lock); + pl->shift = 0; + pl->period = 0; + return percpu_counter_init(&pl->events, 0); +} + +void prop_local_destroy_percpu(struct prop_local_percpu *pl) +{ + percpu_counter_destroy(&pl->events); +} + +/* + * Catch up with missed period expirations. + * + * until (c_{j} == c) + * x_{j} -= x_{j}/2; + * c_{j}++; + */ +static +void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl) +{ + unsigned long period = 1UL << (pg->shift - 1); + unsigned long period_mask = ~(period - 1); + unsigned long global_period; + unsigned long flags; + + global_period = percpu_counter_read(&pg->events); + global_period &= period_mask; + + /* + * Fast path - check if the local and global period count still match + * outside of the lock. + */ + if (pl->period == global_period) + return; + + spin_lock_irqsave(&pl->lock, flags); + prop_adjust_shift(&pl->shift, &pl->period, pg->shift); + + /* + * For each missed period, we half the local counter. + * basically: + * pl->events >> (global_period - pl->period); + */ + period = (global_period - pl->period) >> (pg->shift - 1); + if (period < BITS_PER_LONG) { + s64 val = percpu_counter_read(&pl->events); + + if (val < (nr_cpu_ids * PROP_BATCH)) + val = percpu_counter_sum(&pl->events); + + __percpu_counter_add(&pl->events, -val + (val >> period), + PROP_BATCH); + } else + percpu_counter_set(&pl->events, 0); + + pl->period = global_period; + spin_unlock_irqrestore(&pl->lock, flags); +} + +/* + * ++x_{j}, ++t + */ +void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl) +{ + struct prop_global *pg = prop_get_global(pd); + + prop_norm_percpu(pg, pl); + __percpu_counter_add(&pl->events, 1, PROP_BATCH); + percpu_counter_add(&pg->events, 1); + prop_put_global(pd, pg); +} + +/* + * identical to __prop_inc_percpu, except that it limits this pl's fraction to + * @frac/PROP_FRAC_BASE by ignoring events when this limit has been exceeded. + */ +void __prop_inc_percpu_max(struct prop_descriptor *pd, + struct prop_local_percpu *pl, long frac) +{ + struct prop_global *pg = prop_get_global(pd); + + prop_norm_percpu(pg, pl); + + if (unlikely(frac != PROP_FRAC_BASE)) { + unsigned long period_2 = 1UL << (pg->shift - 1); + unsigned long counter_mask = period_2 - 1; + unsigned long global_count; + long numerator, denominator; + + numerator = percpu_counter_read_positive(&pl->events); + global_count = percpu_counter_read(&pg->events); + denominator = period_2 + (global_count & counter_mask); + + if (numerator > ((denominator * frac) >> PROP_FRAC_SHIFT)) + goto out_put; + } + + percpu_counter_add(&pl->events, 1); + percpu_counter_add(&pg->events, 1); + +out_put: + prop_put_global(pd, pg); +} + +/* + * Obtain a fraction of this proportion + * + * p_{j} = x_{j} / (period/2 + t % period/2) + */ +void prop_fraction_percpu(struct prop_descriptor *pd, + struct prop_local_percpu *pl, + long *numerator, long *denominator) +{ + struct prop_global *pg = prop_get_global(pd); + unsigned long period_2 = 1UL << (pg->shift - 1); + unsigned long counter_mask = period_2 - 1; + unsigned long global_count; + + prop_norm_percpu(pg, pl); + *numerator = percpu_counter_read_positive(&pl->events); + + global_count = percpu_counter_read(&pg->events); + *denominator = period_2 + (global_count & counter_mask); + + prop_put_global(pd, pg); +} + +/* + * SINGLE + */ + +int prop_local_init_single(struct prop_local_single *pl) +{ + spin_lock_init(&pl->lock); + pl->shift = 0; + pl->period = 0; + pl->events = 0; + return 0; +} + +void prop_local_destroy_single(struct prop_local_single *pl) +{ +} + +/* + * Catch up with missed period expirations. + */ +static +void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl) +{ + unsigned long period = 1UL << (pg->shift - 1); + unsigned long period_mask = ~(period - 1); + unsigned long global_period; + unsigned long flags; + + global_period = percpu_counter_read(&pg->events); + global_period &= period_mask; + + /* + * Fast path - check if the local and global period count still match + * outside of the lock. + */ + if (pl->period == global_period) + return; + + spin_lock_irqsave(&pl->lock, flags); + prop_adjust_shift(&pl->shift, &pl->period, pg->shift); + /* + * For each missed period, we half the local counter. + */ + period = (global_period - pl->period) >> (pg->shift - 1); + if (likely(period < BITS_PER_LONG)) + pl->events >>= period; + else + pl->events = 0; + pl->period = global_period; + spin_unlock_irqrestore(&pl->lock, flags); +} + +/* + * ++x_{j}, ++t + */ +void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl) +{ + struct prop_global *pg = prop_get_global(pd); + + prop_norm_single(pg, pl); + pl->events++; + percpu_counter_add(&pg->events, 1); + prop_put_global(pd, pg); +} + +/* + * Obtain a fraction of this proportion + * + * p_{j} = x_{j} / (period/2 + t % period/2) + */ +void prop_fraction_single(struct prop_descriptor *pd, + struct prop_local_single *pl, + long *numerator, long *denominator) +{ + struct prop_global *pg = prop_get_global(pd); + unsigned long period_2 = 1UL << (pg->shift - 1); + unsigned long counter_mask = period_2 - 1; + unsigned long global_count; + + prop_norm_single(pg, pl); + *numerator = pl->events; + + global_count = percpu_counter_read(&pg->events); + *denominator = period_2 + (global_count & counter_mask); + + prop_put_global(pd, pg); +} diff --git a/libdde-linux26/contrib/lib/radix-tree.c b/libdde-linux26/contrib/lib/radix-tree.c new file mode 100644 index 00000000..4bb42a03 --- /dev/null +++ b/libdde-linux26/contrib/lib/radix-tree.c @@ -0,0 +1,1240 @@ +/* + * Copyright (C) 2001 Momchil Velikov + * Portions Copyright (C) 2001 Christoph Hellwig + * Copyright (C) 2005 SGI, Christoph Lameter + * Copyright (C) 2006 Nick Piggin + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/radix-tree.h> +#include <linux/percpu.h> +#include <linux/slab.h> +#include <linux/notifier.h> +#include <linux/cpu.h> +#include <linux/gfp.h> +#include <linux/string.h> +#include <linux/bitops.h> +#include <linux/rcupdate.h> + + +#ifdef __KERNEL__ +#define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6) +#else +#define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */ +#endif + +#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) +#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) + +#define RADIX_TREE_TAG_LONGS \ + ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG) + +struct radix_tree_node { + unsigned int height; /* Height from the bottom */ + unsigned int count; + struct rcu_head rcu_head; + void *slots[RADIX_TREE_MAP_SIZE]; + unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; +}; + +struct radix_tree_path { + struct radix_tree_node *node; + int offset; +}; + +#define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) +#define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ + RADIX_TREE_MAP_SHIFT)) + +/* + * The height_to_maxindex array needs to be one deeper than the maximum + * path as height 0 holds only 1 entry. + */ +static unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1] __read_mostly; + +/* + * Radix tree node cache. + */ +static struct kmem_cache *radix_tree_node_cachep; + +/* + * Per-cpu pool of preloaded nodes + */ +struct radix_tree_preload { + int nr; + struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH]; +}; +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; + +static inline gfp_t root_gfp_mask(struct radix_tree_root *root) +{ + return root->gfp_mask & __GFP_BITS_MASK; +} + +static inline void tag_set(struct radix_tree_node *node, unsigned int tag, + int offset) +{ + __set_bit(offset, node->tags[tag]); +} + +static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, + int offset) +{ + __clear_bit(offset, node->tags[tag]); +} + +static inline int tag_get(struct radix_tree_node *node, unsigned int tag, + int offset) +{ + return test_bit(offset, node->tags[tag]); +} + +static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag) +{ + root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); +} + +static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag) +{ + root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); +} + +static inline void root_tag_clear_all(struct radix_tree_root *root) +{ + root->gfp_mask &= __GFP_BITS_MASK; +} + +static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag) +{ + return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); +} + +/* + * Returns 1 if any slot in the node has this tag set. + * Otherwise returns 0. + */ +static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) +{ + int idx; + for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { + if (node->tags[tag][idx]) + return 1; + } + return 0; +} +/* + * This assumes that the caller has performed appropriate preallocation, and + * that the caller has pinned this thread of control to the current CPU. + */ +static struct radix_tree_node * +radix_tree_node_alloc(struct radix_tree_root *root) +{ + struct radix_tree_node *ret = NULL; + gfp_t gfp_mask = root_gfp_mask(root); + + if (!(gfp_mask & __GFP_WAIT)) { + struct radix_tree_preload *rtp; + + /* + * Provided the caller has preloaded here, we will always + * succeed in getting a node here (and never reach + * kmem_cache_alloc) + */ + rtp = &__get_cpu_var(radix_tree_preloads); + if (rtp->nr) { + ret = rtp->nodes[rtp->nr - 1]; + rtp->nodes[rtp->nr - 1] = NULL; + rtp->nr--; + } + } + if (ret == NULL) + ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); + + BUG_ON(radix_tree_is_indirect_ptr(ret)); + return ret; +} + +static void radix_tree_node_rcu_free(struct rcu_head *head) +{ + struct radix_tree_node *node = + container_of(head, struct radix_tree_node, rcu_head); + + /* + * must only free zeroed nodes into the slab. radix_tree_shrink + * can leave us with a non-NULL entry in the first slot, so clear + * that here to make sure. + */ + tag_clear(node, 0, 0); + tag_clear(node, 1, 0); + node->slots[0] = NULL; + node->count = 0; + + kmem_cache_free(radix_tree_node_cachep, node); +} + +static inline void +radix_tree_node_free(struct radix_tree_node *node) +{ + call_rcu(&node->rcu_head, radix_tree_node_rcu_free); +} + +/* + * Load up this CPU's radix_tree_node buffer with sufficient objects to + * ensure that the addition of a single element in the tree cannot fail. On + * success, return zero, with preemption disabled. On error, return -ENOMEM + * with preemption not disabled. + */ +int radix_tree_preload(gfp_t gfp_mask) +{ + struct radix_tree_preload *rtp; + struct radix_tree_node *node; + int ret = -ENOMEM; + + preempt_disable(); + rtp = &__get_cpu_var(radix_tree_preloads); + while (rtp->nr < ARRAY_SIZE(rtp->nodes)) { + preempt_enable(); + node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); + if (node == NULL) + goto out; + preempt_disable(); + rtp = &__get_cpu_var(radix_tree_preloads); + if (rtp->nr < ARRAY_SIZE(rtp->nodes)) + rtp->nodes[rtp->nr++] = node; + else + kmem_cache_free(radix_tree_node_cachep, node); + } + ret = 0; +out: + return ret; +} +EXPORT_SYMBOL(radix_tree_preload); + +/* + * Return the maximum key which can be store into a + * radix tree with height HEIGHT. + */ +static inline unsigned long radix_tree_maxindex(unsigned int height) +{ + return height_to_maxindex[height]; +} + +/* + * Extend a radix tree so it can store key @index. + */ +static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) +{ + struct radix_tree_node *node; + unsigned int height; + int tag; + + /* Figure out what the height should be. */ + height = root->height + 1; + while (index > radix_tree_maxindex(height)) + height++; + + if (root->rnode == NULL) { + root->height = height; + goto out; + } + + do { + unsigned int newheight; + if (!(node = radix_tree_node_alloc(root))) + return -ENOMEM; + + /* Increase the height. */ + node->slots[0] = radix_tree_indirect_to_ptr(root->rnode); + + /* Propagate the aggregated tag info into the new root */ + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { + if (root_tag_get(root, tag)) + tag_set(node, tag, 0); + } + + newheight = root->height+1; + node->height = newheight; + node->count = 1; + node = radix_tree_ptr_to_indirect(node); + rcu_assign_pointer(root->rnode, node); + root->height = newheight; + } while (height > root->height); +out: + return 0; +} + +/** + * radix_tree_insert - insert into a radix tree + * @root: radix tree root + * @index: index key + * @item: item to insert + * + * Insert an item into the radix tree at position @index. + */ +int radix_tree_insert(struct radix_tree_root *root, + unsigned long index, void *item) +{ + struct radix_tree_node *node = NULL, *slot; + unsigned int height, shift; + int offset; + int error; + + BUG_ON(radix_tree_is_indirect_ptr(item)); + + /* Make sure the tree is high enough. */ + if (index > radix_tree_maxindex(root->height)) { + error = radix_tree_extend(root, index); + if (error) + return error; + } + + slot = radix_tree_indirect_to_ptr(root->rnode); + + height = root->height; + shift = (height-1) * RADIX_TREE_MAP_SHIFT; + + offset = 0; /* uninitialised var warning */ + while (height > 0) { + if (slot == NULL) { + /* Have to add a child node. */ + if (!(slot = radix_tree_node_alloc(root))) + return -ENOMEM; + slot->height = height; + if (node) { + rcu_assign_pointer(node->slots[offset], slot); + node->count++; + } else + rcu_assign_pointer(root->rnode, + radix_tree_ptr_to_indirect(slot)); + } + + /* Go a level down */ + offset = (index >> shift) & RADIX_TREE_MAP_MASK; + node = slot; + slot = node->slots[offset]; + shift -= RADIX_TREE_MAP_SHIFT; + height--; + } + + if (slot != NULL) + return -EEXIST; + + if (node) { + node->count++; + rcu_assign_pointer(node->slots[offset], item); + BUG_ON(tag_get(node, 0, offset)); + BUG_ON(tag_get(node, 1, offset)); + } else { + rcu_assign_pointer(root->rnode, item); + BUG_ON(root_tag_get(root, 0)); + BUG_ON(root_tag_get(root, 1)); + } + + return 0; +} +EXPORT_SYMBOL(radix_tree_insert); + +/** + * radix_tree_lookup_slot - lookup a slot in a radix tree + * @root: radix tree root + * @index: index key + * + * Returns: the slot corresponding to the position @index in the + * radix tree @root. This is useful for update-if-exists operations. + * + * This function can be called under rcu_read_lock iff the slot is not + * modified by radix_tree_replace_slot, otherwise it must be called + * exclusive from other writers. Any dereference of the slot must be done + * using radix_tree_deref_slot. + */ +void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) +{ + unsigned int height, shift; + struct radix_tree_node *node, **slot; + + node = rcu_dereference(root->rnode); + if (node == NULL) + return NULL; + + if (!radix_tree_is_indirect_ptr(node)) { + if (index > 0) + return NULL; + return (void **)&root->rnode; + } + node = radix_tree_indirect_to_ptr(node); + + height = node->height; + if (index > radix_tree_maxindex(height)) + return NULL; + + shift = (height-1) * RADIX_TREE_MAP_SHIFT; + + do { + slot = (struct radix_tree_node **) + (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); + node = rcu_dereference(*slot); + if (node == NULL) + return NULL; + + shift -= RADIX_TREE_MAP_SHIFT; + height--; + } while (height > 0); + + return (void **)slot; +} +EXPORT_SYMBOL(radix_tree_lookup_slot); + +/** + * radix_tree_lookup - perform lookup operation on a radix tree + * @root: radix tree root + * @index: index key + * + * Lookup the item at the position @index in the radix tree @root. + * + * This function can be called under rcu_read_lock, however the caller + * must manage lifetimes of leaf nodes (eg. RCU may also be used to free + * them safely). No RCU barriers are required to access or modify the + * returned item, however. + */ +void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) +{ + unsigned int height, shift; + struct radix_tree_node *node, **slot; + + node = rcu_dereference(root->rnode); + if (node == NULL) + return NULL; + + if (!radix_tree_is_indirect_ptr(node)) { + if (index > 0) + return NULL; + return node; + } + node = radix_tree_indirect_to_ptr(node); + + height = node->height; + if (index > radix_tree_maxindex(height)) + return NULL; + + shift = (height-1) * RADIX_TREE_MAP_SHIFT; + + do { + slot = (struct radix_tree_node **) + (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); + node = rcu_dereference(*slot); + if (node == NULL) + return NULL; + + shift -= RADIX_TREE_MAP_SHIFT; + height--; + } while (height > 0); + + return node; +} +EXPORT_SYMBOL(radix_tree_lookup); + +/** + * radix_tree_tag_set - set a tag on a radix tree node + * @root: radix tree root + * @index: index key + * @tag: tag index + * + * Set the search tag (which must be < RADIX_TREE_MAX_TAGS) + * corresponding to @index in the radix tree. From + * the root all the way down to the leaf node. + * + * Returns the address of the tagged item. Setting a tag on a not-present + * item is a bug. + */ +void *radix_tree_tag_set(struct radix_tree_root *root, + unsigned long index, unsigned int tag) +{ + unsigned int height, shift; + struct radix_tree_node *slot; + + height = root->height; + BUG_ON(index > radix_tree_maxindex(height)); + + slot = radix_tree_indirect_to_ptr(root->rnode); + shift = (height - 1) * RADIX_TREE_MAP_SHIFT; + + while (height > 0) { + int offset; + + offset = (index >> shift) & RADIX_TREE_MAP_MASK; + if (!tag_get(slot, tag, offset)) + tag_set(slot, tag, offset); + slot = slot->slots[offset]; + BUG_ON(slot == NULL); + shift -= RADIX_TREE_MAP_SHIFT; + height--; + } + + /* set the root's tag bit */ + if (slot && !root_tag_get(root, tag)) + root_tag_set(root, tag); + + return slot; +} +EXPORT_SYMBOL(radix_tree_tag_set); + +/** + * radix_tree_tag_clear - clear a tag on a radix tree node + * @root: radix tree root + * @index: index key + * @tag: tag index + * + * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS) + * corresponding to @index in the radix tree. If + * this causes the leaf node to have no tags set then clear the tag in the + * next-to-leaf node, etc. + * + * Returns the address of the tagged item on success, else NULL. ie: + * has the same return value and semantics as radix_tree_lookup(). + */ +void *radix_tree_tag_clear(struct radix_tree_root *root, + unsigned long index, unsigned int tag) +{ + /* + * The radix tree path needs to be one longer than the maximum path + * since the "list" is null terminated. + */ + struct radix_tree_path path[RADIX_TREE_MAX_PATH + 1], *pathp = path; + struct radix_tree_node *slot = NULL; + unsigned int height, shift; + + height = root->height; + if (index > radix_tree_maxindex(height)) + goto out; + + shift = (height - 1) * RADIX_TREE_MAP_SHIFT; + pathp->node = NULL; + slot = radix_tree_indirect_to_ptr(root->rnode); + + while (height > 0) { + int offset; + + if (slot == NULL) + goto out; + + offset = (index >> shift) & RADIX_TREE_MAP_MASK; + pathp[1].offset = offset; + pathp[1].node = slot; + slot = slot->slots[offset]; + pathp++; + shift -= RADIX_TREE_MAP_SHIFT; + height--; + } + + if (slot == NULL) + goto out; + + while (pathp->node) { + if (!tag_get(pathp->node, tag, pathp->offset)) + goto out; + tag_clear(pathp->node, tag, pathp->offset); + if (any_tag_set(pathp->node, tag)) + goto out; + pathp--; + } + + /* clear the root's tag bit */ + if (root_tag_get(root, tag)) + root_tag_clear(root, tag); + +out: + return slot; +} +EXPORT_SYMBOL(radix_tree_tag_clear); + +#ifndef __KERNEL__ /* Only the test harness uses this at present */ +/** + * radix_tree_tag_get - get a tag on a radix tree node + * @root: radix tree root + * @index: index key + * @tag: tag index (< RADIX_TREE_MAX_TAGS) + * + * Return values: + * + * 0: tag not present or not set + * 1: tag set + */ +int radix_tree_tag_get(struct radix_tree_root *root, + unsigned long index, unsigned int tag) +{ + unsigned int height, shift; + struct radix_tree_node *node; + int saw_unset_tag = 0; + + /* check the root's tag bit */ + if (!root_tag_get(root, tag)) + return 0; + + node = rcu_dereference(root->rnode); + if (node == NULL) + return 0; + + if (!radix_tree_is_indirect_ptr(node)) + return (index == 0); + node = radix_tree_indirect_to_ptr(node); + + height = node->height; + if (index > radix_tree_maxindex(height)) + return 0; + + shift = (height - 1) * RADIX_TREE_MAP_SHIFT; + + for ( ; ; ) { + int offset; + + if (node == NULL) + return 0; + + offset = (index >> shift) & RADIX_TREE_MAP_MASK; + + /* + * This is just a debug check. Later, we can bale as soon as + * we see an unset tag. + */ + if (!tag_get(node, tag, offset)) + saw_unset_tag = 1; + if (height == 1) { + int ret = tag_get(node, tag, offset); + + BUG_ON(ret && saw_unset_tag); + return !!ret; + } + node = rcu_dereference(node->slots[offset]); + shift -= RADIX_TREE_MAP_SHIFT; + height--; + } +} +EXPORT_SYMBOL(radix_tree_tag_get); +#endif + +/** + * radix_tree_next_hole - find the next hole (not-present entry) + * @root: tree root + * @index: index key + * @max_scan: maximum range to search + * + * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the lowest + * indexed hole. + * + * Returns: the index of the hole if found, otherwise returns an index + * outside of the set specified (in which case 'return - index >= max_scan' + * will be true). In rare cases of index wrap-around, 0 will be returned. + * + * radix_tree_next_hole may be called under rcu_read_lock. However, like + * radix_tree_gang_lookup, this will not atomically search a snapshot of + * the tree at a single point in time. For example, if a hole is created + * at index 5, then subsequently a hole is created at index 10, + * radix_tree_next_hole covering both indexes may return 10 if called + * under rcu_read_lock. + */ +unsigned long radix_tree_next_hole(struct radix_tree_root *root, + unsigned long index, unsigned long max_scan) +{ + unsigned long i; + + for (i = 0; i < max_scan; i++) { + if (!radix_tree_lookup(root, index)) + break; + index++; + if (index == 0) + break; + } + + return index; +} +EXPORT_SYMBOL(radix_tree_next_hole); + +static unsigned int +__lookup(struct radix_tree_node *slot, void ***results, unsigned long index, + unsigned int max_items, unsigned long *next_index) +{ + unsigned int nr_found = 0; + unsigned int shift, height; + unsigned long i; + + height = slot->height; + if (height == 0) + goto out; + shift = (height-1) * RADIX_TREE_MAP_SHIFT; + + for ( ; height > 1; height--) { + i = (index >> shift) & RADIX_TREE_MAP_MASK; + for (;;) { + if (slot->slots[i] != NULL) + break; + index &= ~((1UL << shift) - 1); + index += 1UL << shift; + if (index == 0) + goto out; /* 32-bit wraparound */ + i++; + if (i == RADIX_TREE_MAP_SIZE) + goto out; + } + + shift -= RADIX_TREE_MAP_SHIFT; + slot = rcu_dereference(slot->slots[i]); + if (slot == NULL) + goto out; + } + + /* Bottom level: grab some items */ + for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) { + index++; + if (slot->slots[i]) { + results[nr_found++] = &(slot->slots[i]); + if (nr_found == max_items) + goto out; + } + } +out: + *next_index = index; + return nr_found; +} + +/** + * radix_tree_gang_lookup - perform multiple lookup on a radix tree + * @root: radix tree root + * @results: where the results of the lookup are placed + * @first_index: start the lookup from this key + * @max_items: place up to this many items at *results + * + * Performs an index-ascending scan of the tree for present items. Places + * them at *@results and returns the number of items which were placed at + * *@results. + * + * The implementation is naive. + * + * Like radix_tree_lookup, radix_tree_gang_lookup may be called under + * rcu_read_lock. In this case, rather than the returned results being + * an atomic snapshot of the tree at a single point in time, the semantics + * of an RCU protected gang lookup are as though multiple radix_tree_lookups + * have been issued in individual locks, and results stored in 'results'. + */ +unsigned int +radix_tree_gang_lookup(struct radix_tree_root *root, void **results, + unsigned long first_index, unsigned int max_items) +{ + unsigned long max_index; + struct radix_tree_node *node; + unsigned long cur_index = first_index; + unsigned int ret; + + node = rcu_dereference(root->rnode); + if (!node) + return 0; + + if (!radix_tree_is_indirect_ptr(node)) { + if (first_index > 0) + return 0; + results[0] = node; + return 1; + } + node = radix_tree_indirect_to_ptr(node); + + max_index = radix_tree_maxindex(node->height); + + ret = 0; + while (ret < max_items) { + unsigned int nr_found, slots_found, i; + unsigned long next_index; /* Index of next search */ + + if (cur_index > max_index) + break; + slots_found = __lookup(node, (void ***)results + ret, cur_index, + max_items - ret, &next_index); + nr_found = 0; + for (i = 0; i < slots_found; i++) { + struct radix_tree_node *slot; + slot = *(((void ***)results)[ret + i]); + if (!slot) + continue; + results[ret + nr_found] = rcu_dereference(slot); + nr_found++; + } + ret += nr_found; + if (next_index == 0) + break; + cur_index = next_index; + } + + return ret; +} +EXPORT_SYMBOL(radix_tree_gang_lookup); + +/** + * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree + * @root: radix tree root + * @results: where the results of the lookup are placed + * @first_index: start the lookup from this key + * @max_items: place up to this many items at *results + * + * Performs an index-ascending scan of the tree for present items. Places + * their slots at *@results and returns the number of items which were + * placed at *@results. + * + * The implementation is naive. + * + * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must + * be dereferenced with radix_tree_deref_slot, and if using only RCU + * protection, radix_tree_deref_slot may fail requiring a retry. + */ +unsigned int +radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, + unsigned long first_index, unsigned int max_items) +{ + unsigned long max_index; + struct radix_tree_node *node; + unsigned long cur_index = first_index; + unsigned int ret; + + node = rcu_dereference(root->rnode); + if (!node) + return 0; + + if (!radix_tree_is_indirect_ptr(node)) { + if (first_index > 0) + return 0; + results[0] = (void **)&root->rnode; + return 1; + } + node = radix_tree_indirect_to_ptr(node); + + max_index = radix_tree_maxindex(node->height); + + ret = 0; + while (ret < max_items) { + unsigned int slots_found; + unsigned long next_index; /* Index of next search */ + + if (cur_index > max_index) + break; + slots_found = __lookup(node, results + ret, cur_index, + max_items - ret, &next_index); + ret += slots_found; + if (next_index == 0) + break; + cur_index = next_index; + } + + return ret; +} +EXPORT_SYMBOL(radix_tree_gang_lookup_slot); + +/* + * FIXME: the two tag_get()s here should use find_next_bit() instead of + * open-coding the search. + */ +static unsigned int +__lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index, + unsigned int max_items, unsigned long *next_index, unsigned int tag) +{ + unsigned int nr_found = 0; + unsigned int shift, height; + + height = slot->height; + if (height == 0) + goto out; + shift = (height-1) * RADIX_TREE_MAP_SHIFT; + + while (height > 0) { + unsigned long i = (index >> shift) & RADIX_TREE_MAP_MASK ; + + for (;;) { + if (tag_get(slot, tag, i)) + break; + index &= ~((1UL << shift) - 1); + index += 1UL << shift; + if (index == 0) + goto out; /* 32-bit wraparound */ + i++; + if (i == RADIX_TREE_MAP_SIZE) + goto out; + } + height--; + if (height == 0) { /* Bottom level: grab some items */ + unsigned long j = index & RADIX_TREE_MAP_MASK; + + for ( ; j < RADIX_TREE_MAP_SIZE; j++) { + index++; + if (!tag_get(slot, tag, j)) + continue; + /* + * Even though the tag was found set, we need to + * recheck that we have a non-NULL node, because + * if this lookup is lockless, it may have been + * subsequently deleted. + * + * Similar care must be taken in any place that + * lookup ->slots[x] without a lock (ie. can't + * rely on its value remaining the same). + */ + if (slot->slots[j]) { + results[nr_found++] = &(slot->slots[j]); + if (nr_found == max_items) + goto out; + } + } + } + shift -= RADIX_TREE_MAP_SHIFT; + slot = rcu_dereference(slot->slots[i]); + if (slot == NULL) + break; + } +out: + *next_index = index; + return nr_found; +} + +/** + * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree + * based on a tag + * @root: radix tree root + * @results: where the results of the lookup are placed + * @first_index: start the lookup from this key + * @max_items: place up to this many items at *results + * @tag: the tag index (< RADIX_TREE_MAX_TAGS) + * + * Performs an index-ascending scan of the tree for present items which + * have the tag indexed by @tag set. Places the items at *@results and + * returns the number of items which were placed at *@results. + */ +unsigned int +radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, + unsigned long first_index, unsigned int max_items, + unsigned int tag) +{ + struct radix_tree_node *node; + unsigned long max_index; + unsigned long cur_index = first_index; + unsigned int ret; + + /* check the root's tag bit */ + if (!root_tag_get(root, tag)) + return 0; + + node = rcu_dereference(root->rnode); + if (!node) + return 0; + + if (!radix_tree_is_indirect_ptr(node)) { + if (first_index > 0) + return 0; + results[0] = node; + return 1; + } + node = radix_tree_indirect_to_ptr(node); + + max_index = radix_tree_maxindex(node->height); + + ret = 0; + while (ret < max_items) { + unsigned int nr_found, slots_found, i; + unsigned long next_index; /* Index of next search */ + + if (cur_index > max_index) + break; + slots_found = __lookup_tag(node, (void ***)results + ret, + cur_index, max_items - ret, &next_index, tag); + nr_found = 0; + for (i = 0; i < slots_found; i++) { + struct radix_tree_node *slot; + slot = *(((void ***)results)[ret + i]); + if (!slot) + continue; + results[ret + nr_found] = rcu_dereference(slot); + nr_found++; + } + ret += nr_found; + if (next_index == 0) + break; + cur_index = next_index; + } + + return ret; +} +EXPORT_SYMBOL(radix_tree_gang_lookup_tag); + +/** + * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a + * radix tree based on a tag + * @root: radix tree root + * @results: where the results of the lookup are placed + * @first_index: start the lookup from this key + * @max_items: place up to this many items at *results + * @tag: the tag index (< RADIX_TREE_MAX_TAGS) + * + * Performs an index-ascending scan of the tree for present items which + * have the tag indexed by @tag set. Places the slots at *@results and + * returns the number of slots which were placed at *@results. + */ +unsigned int +radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, + unsigned long first_index, unsigned int max_items, + unsigned int tag) +{ + struct radix_tree_node *node; + unsigned long max_index; + unsigned long cur_index = first_index; + unsigned int ret; + + /* check the root's tag bit */ + if (!root_tag_get(root, tag)) + return 0; + + node = rcu_dereference(root->rnode); + if (!node) + return 0; + + if (!radix_tree_is_indirect_ptr(node)) { + if (first_index > 0) + return 0; + results[0] = (void **)&root->rnode; + return 1; + } + node = radix_tree_indirect_to_ptr(node); + + max_index = radix_tree_maxindex(node->height); + + ret = 0; + while (ret < max_items) { + unsigned int slots_found; + unsigned long next_index; /* Index of next search */ + + if (cur_index > max_index) + break; + slots_found = __lookup_tag(node, results + ret, + cur_index, max_items - ret, &next_index, tag); + ret += slots_found; + if (next_index == 0) + break; + cur_index = next_index; + } + + return ret; +} +EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); + + +/** + * radix_tree_shrink - shrink height of a radix tree to minimal + * @root radix tree root + */ +static inline void radix_tree_shrink(struct radix_tree_root *root) +{ + /* try to shrink tree height */ + while (root->height > 0) { + struct radix_tree_node *to_free = root->rnode; + void *newptr; + + BUG_ON(!radix_tree_is_indirect_ptr(to_free)); + to_free = radix_tree_indirect_to_ptr(to_free); + + /* + * The candidate node has more than one child, or its child + * is not at the leftmost slot, we cannot shrink. + */ + if (to_free->count != 1) + break; + if (!to_free->slots[0]) + break; + + /* + * We don't need rcu_assign_pointer(), since we are simply + * moving the node from one part of the tree to another. If + * it was safe to dereference the old pointer to it + * (to_free->slots[0]), it will be safe to dereference the new + * one (root->rnode). + */ + newptr = to_free->slots[0]; + if (root->height > 1) + newptr = radix_tree_ptr_to_indirect(newptr); + root->rnode = newptr; + root->height--; + radix_tree_node_free(to_free); + } +} + +/** + * radix_tree_delete - delete an item from a radix tree + * @root: radix tree root + * @index: index key + * + * Remove the item at @index from the radix tree rooted at @root. + * + * Returns the address of the deleted item, or NULL if it was not present. + */ +void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) +{ + /* + * The radix tree path needs to be one longer than the maximum path + * since the "list" is null terminated. + */ + struct radix_tree_path path[RADIX_TREE_MAX_PATH + 1], *pathp = path; + struct radix_tree_node *slot = NULL; + struct radix_tree_node *to_free; + unsigned int height, shift; + int tag; + int offset; + + height = root->height; + if (index > radix_tree_maxindex(height)) + goto out; + + slot = root->rnode; + if (height == 0) { + root_tag_clear_all(root); + root->rnode = NULL; + goto out; + } + slot = radix_tree_indirect_to_ptr(slot); + + shift = (height - 1) * RADIX_TREE_MAP_SHIFT; + pathp->node = NULL; + + do { + if (slot == NULL) + goto out; + + pathp++; + offset = (index >> shift) & RADIX_TREE_MAP_MASK; + pathp->offset = offset; + pathp->node = slot; + slot = slot->slots[offset]; + shift -= RADIX_TREE_MAP_SHIFT; + height--; + } while (height > 0); + + if (slot == NULL) + goto out; + + /* + * Clear all tags associated with the just-deleted item + */ + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { + if (tag_get(pathp->node, tag, pathp->offset)) + radix_tree_tag_clear(root, index, tag); + } + + to_free = NULL; + /* Now free the nodes we do not need anymore */ + while (pathp->node) { + pathp->node->slots[pathp->offset] = NULL; + pathp->node->count--; + /* + * Queue the node for deferred freeing after the + * last reference to it disappears (set NULL, above). + */ + if (to_free) + radix_tree_node_free(to_free); + + if (pathp->node->count) { + if (pathp->node == + radix_tree_indirect_to_ptr(root->rnode)) + radix_tree_shrink(root); + goto out; + } + + /* Node with zero slots in use so free it */ + to_free = pathp->node; + pathp--; + + } + root_tag_clear_all(root); + root->height = 0; + root->rnode = NULL; + if (to_free) + radix_tree_node_free(to_free); + +out: + return slot; +} +EXPORT_SYMBOL(radix_tree_delete); + +/** + * radix_tree_tagged - test whether any items in the tree are tagged + * @root: radix tree root + * @tag: tag to test + */ +int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag) +{ + return root_tag_get(root, tag); +} +EXPORT_SYMBOL(radix_tree_tagged); + +static void +radix_tree_node_ctor(void *node) +{ + memset(node, 0, sizeof(struct radix_tree_node)); +} + +static __init unsigned long __maxindex(unsigned int height) +{ + unsigned int width = height * RADIX_TREE_MAP_SHIFT; + int shift = RADIX_TREE_INDEX_BITS - width; + + if (shift < 0) + return ~0UL; + if (shift >= BITS_PER_LONG) + return 0UL; + return ~0UL >> shift; +} + +static __init void radix_tree_init_maxindex(void) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++) + height_to_maxindex[i] = __maxindex(i); +} + +static int radix_tree_callback(struct notifier_block *nfb, + unsigned long action, + void *hcpu) +{ + int cpu = (long)hcpu; + struct radix_tree_preload *rtp; + + /* Free per-cpu pool of perloaded nodes */ + if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { + rtp = &per_cpu(radix_tree_preloads, cpu); + while (rtp->nr) { + kmem_cache_free(radix_tree_node_cachep, + rtp->nodes[rtp->nr-1]); + rtp->nodes[rtp->nr-1] = NULL; + rtp->nr--; + } + } + return NOTIFY_OK; +} + +void __init radix_tree_init(void) +{ + radix_tree_node_cachep = kmem_cache_create("radix_tree_node", + sizeof(struct radix_tree_node), 0, + SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, + radix_tree_node_ctor); + radix_tree_init_maxindex(); + hotcpu_notifier(radix_tree_callback, 0); +} diff --git a/libdde-linux26/contrib/lib/rbtree.c b/libdde-linux26/contrib/lib/rbtree.c new file mode 100644 index 00000000..9956b996 --- /dev/null +++ b/libdde-linux26/contrib/lib/rbtree.c @@ -0,0 +1,397 @@ +/* + Red Black Trees + (C) 1999 Andrea Arcangeli <andrea@suse.de> + (C) 2002 David Woodhouse <dwmw2@infradead.org> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + linux/lib/rbtree.c +*/ + +#include <linux/rbtree.h> +#include <linux/module.h> + +static void __rb_rotate_left(struct rb_node *node, struct rb_root *root) +{ + struct rb_node *right = node->rb_right; + struct rb_node *parent = rb_parent(node); + + if ((node->rb_right = right->rb_left)) + rb_set_parent(right->rb_left, node); + right->rb_left = node; + + rb_set_parent(right, parent); + + if (parent) + { + if (node == parent->rb_left) + parent->rb_left = right; + else + parent->rb_right = right; + } + else + root->rb_node = right; + rb_set_parent(node, right); +} + +static void __rb_rotate_right(struct rb_node *node, struct rb_root *root) +{ + struct rb_node *left = node->rb_left; + struct rb_node *parent = rb_parent(node); + + if ((node->rb_left = left->rb_right)) + rb_set_parent(left->rb_right, node); + left->rb_right = node; + + rb_set_parent(left, parent); + + if (parent) + { + if (node == parent->rb_right) + parent->rb_right = left; + else + parent->rb_left = left; + } + else + root->rb_node = left; + rb_set_parent(node, left); +} + +void rb_insert_color(struct rb_node *node, struct rb_root *root) +{ + struct rb_node *parent, *gparent; + + while ((parent = rb_parent(node)) && rb_is_red(parent)) + { + gparent = rb_parent(parent); + + if (parent == gparent->rb_left) + { + { + register struct rb_node *uncle = gparent->rb_right; + if (uncle && rb_is_red(uncle)) + { + rb_set_black(uncle); + rb_set_black(parent); + rb_set_red(gparent); + node = gparent; + continue; + } + } + + if (parent->rb_right == node) + { + register struct rb_node *tmp; + __rb_rotate_left(parent, root); + tmp = parent; + parent = node; + node = tmp; + } + + rb_set_black(parent); + rb_set_red(gparent); + __rb_rotate_right(gparent, root); + } else { + { + register struct rb_node *uncle = gparent->rb_left; + if (uncle && rb_is_red(uncle)) + { + rb_set_black(uncle); + rb_set_black(parent); + rb_set_red(gparent); + node = gparent; + continue; + } + } + + if (parent->rb_left == node) + { + register struct rb_node *tmp; + __rb_rotate_right(parent, root); + tmp = parent; + parent = node; + node = tmp; + } + + rb_set_black(parent); + rb_set_red(gparent); + __rb_rotate_left(gparent, root); + } + } + + rb_set_black(root->rb_node); +} +EXPORT_SYMBOL(rb_insert_color); + +static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, + struct rb_root *root) +{ + struct rb_node *other; + + while ((!node || rb_is_black(node)) && node != root->rb_node) + { + if (parent->rb_left == node) + { + other = parent->rb_right; + if (rb_is_red(other)) + { + rb_set_black(other); + rb_set_red(parent); + __rb_rotate_left(parent, root); + other = parent->rb_right; + } + if ((!other->rb_left || rb_is_black(other->rb_left)) && + (!other->rb_right || rb_is_black(other->rb_right))) + { + rb_set_red(other); + node = parent; + parent = rb_parent(node); + } + else + { + if (!other->rb_right || rb_is_black(other->rb_right)) + { + struct rb_node *o_left; + if ((o_left = other->rb_left)) + rb_set_black(o_left); + rb_set_red(other); + __rb_rotate_right(other, root); + other = parent->rb_right; + } + rb_set_color(other, rb_color(parent)); + rb_set_black(parent); + if (other->rb_right) + rb_set_black(other->rb_right); + __rb_rotate_left(parent, root); + node = root->rb_node; + break; + } + } + else + { + other = parent->rb_left; + if (rb_is_red(other)) + { + rb_set_black(other); + rb_set_red(parent); + __rb_rotate_right(parent, root); + other = parent->rb_left; + } + if ((!other->rb_left || rb_is_black(other->rb_left)) && + (!other->rb_right || rb_is_black(other->rb_right))) + { + rb_set_red(other); + node = parent; + parent = rb_parent(node); + } + else + { + if (!other->rb_left || rb_is_black(other->rb_left)) + { + register struct rb_node *o_right; + if ((o_right = other->rb_right)) + rb_set_black(o_right); + rb_set_red(other); + __rb_rotate_left(other, root); + other = parent->rb_left; + } + rb_set_color(other, rb_color(parent)); + rb_set_black(parent); + if (other->rb_left) + rb_set_black(other->rb_left); + __rb_rotate_right(parent, root); + node = root->rb_node; + break; + } + } + } + if (node) + rb_set_black(node); +} + +void rb_erase(struct rb_node *node, struct rb_root *root) +{ + struct rb_node *child, *parent; + int color; + + if (!node->rb_left) + child = node->rb_right; + else if (!node->rb_right) + child = node->rb_left; + else + { + struct rb_node *old = node, *left; + + node = node->rb_right; + while ((left = node->rb_left) != NULL) + node = left; + child = node->rb_right; + parent = rb_parent(node); + color = rb_color(node); + + if (child) + rb_set_parent(child, parent); + if (parent == old) { + parent->rb_right = child; + parent = node; + } else + parent->rb_left = child; + + node->rb_parent_color = old->rb_parent_color; + node->rb_right = old->rb_right; + node->rb_left = old->rb_left; + + if (rb_parent(old)) + { + if (rb_parent(old)->rb_left == old) + rb_parent(old)->rb_left = node; + else + rb_parent(old)->rb_right = node; + } else + root->rb_node = node; + + rb_set_parent(old->rb_left, node); + if (old->rb_right) + rb_set_parent(old->rb_right, node); + goto color; + } + + parent = rb_parent(node); + color = rb_color(node); + + if (child) + rb_set_parent(child, parent); + if (parent) + { + if (parent->rb_left == node) + parent->rb_left = child; + else + parent->rb_right = child; + } + else + root->rb_node = child; + + color: + if (color == RB_BLACK) + __rb_erase_color(child, parent, root); +} +EXPORT_SYMBOL(rb_erase); + +/* + * This function returns the first node (in sort order) of the tree. + */ +struct rb_node *rb_first(const struct rb_root *root) +{ + struct rb_node *n; + + n = root->rb_node; + if (!n) + return NULL; + while (n->rb_left) + n = n->rb_left; + return n; +} +EXPORT_SYMBOL(rb_first); + +struct rb_node *rb_last(const struct rb_root *root) +{ + struct rb_node *n; + + n = root->rb_node; + if (!n) + return NULL; + while (n->rb_right) + n = n->rb_right; + return n; +} +EXPORT_SYMBOL(rb_last); + +struct rb_node *rb_next(const struct rb_node *node) +{ + struct rb_node *parent; + + if (rb_parent(node) == node) + return NULL; + + /* If we have a right-hand child, go down and then left as far + as we can. */ + if (node->rb_right) { + node = node->rb_right; + while (node->rb_left) + node=node->rb_left; + return (struct rb_node *)node; + } + + /* No right-hand children. Everything down and left is + smaller than us, so any 'next' node must be in the general + direction of our parent. Go up the tree; any time the + ancestor is a right-hand child of its parent, keep going + up. First time it's a left-hand child of its parent, said + parent is our 'next' node. */ + while ((parent = rb_parent(node)) && node == parent->rb_right) + node = parent; + + return parent; +} +EXPORT_SYMBOL(rb_next); + +struct rb_node *rb_prev(const struct rb_node *node) +{ + struct rb_node *parent; + + if (rb_parent(node) == node) + return NULL; + + /* If we have a left-hand child, go down and then right as far + as we can. */ + if (node->rb_left) { + node = node->rb_left; + while (node->rb_right) + node=node->rb_right; + return (struct rb_node *)node; + } + + /* No left-hand children. Go up till we find an ancestor which + is a right-hand child of its parent */ + while ((parent = rb_parent(node)) && node == parent->rb_left) + node = parent; + + return parent; +} +EXPORT_SYMBOL(rb_prev); + +void rb_replace_node(struct rb_node *victim, struct rb_node *new, + struct rb_root *root) +{ + struct rb_node *parent = rb_parent(victim); + + /* Set the surrounding nodes to point to the replacement */ + if (parent) { + if (victim == parent->rb_left) + parent->rb_left = new; + else + parent->rb_right = new; + } else { + root->rb_node = new; + } + if (victim->rb_left) + rb_set_parent(victim->rb_left, new); + if (victim->rb_right) + rb_set_parent(victim->rb_right, new); + + /* Copy the pointers/colour from the victim to the replacement */ + *new = *victim; +} +EXPORT_SYMBOL(rb_replace_node); diff --git a/libdde-linux26/contrib/lib/rwsem-spinlock.c b/libdde-linux26/contrib/lib/rwsem-spinlock.c new file mode 100644 index 00000000..9df3ca56 --- /dev/null +++ b/libdde-linux26/contrib/lib/rwsem-spinlock.c @@ -0,0 +1,316 @@ +/* rwsem-spinlock.c: R/W semaphores: contention handling functions for + * generic spinlock implementation + * + * Copyright (c) 2001 David Howells (dhowells@redhat.com). + * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de> + * - Derived also from comments by Linus + */ +#include <linux/rwsem.h> +#include <linux/sched.h> +#include <linux/module.h> + +struct rwsem_waiter { + struct list_head list; + struct task_struct *task; + unsigned int flags; +#define RWSEM_WAITING_FOR_READ 0x00000001 +#define RWSEM_WAITING_FOR_WRITE 0x00000002 +}; + +/* + * initialise the semaphore + */ +void __init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held semaphore: + */ + debug_check_no_locks_freed((void *)sem, sizeof(*sem)); + lockdep_init_map(&sem->dep_map, name, key, 0); +#endif + sem->activity = 0; + spin_lock_init(&sem->wait_lock); + INIT_LIST_HEAD(&sem->wait_list); +} + +/* + * handle the lock release when processes blocked on it that can now run + * - if we come here, then: + * - the 'active count' _reached_ zero + * - the 'waiting count' is non-zero + * - the spinlock must be held by the caller + * - woken process blocks are discarded from the list after having task zeroed + * - writers are only woken if wakewrite is non-zero + */ +static inline struct rw_semaphore * +__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) +{ + struct rwsem_waiter *waiter; + struct task_struct *tsk; + int woken; + + waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); + + if (!wakewrite) { + if (waiter->flags & RWSEM_WAITING_FOR_WRITE) + goto out; + goto dont_wake_writers; + } + + /* if we are allowed to wake writers try to grant a single write lock + * if there's a writer at the front of the queue + * - we leave the 'waiting count' incremented to signify potential + * contention + */ + if (waiter->flags & RWSEM_WAITING_FOR_WRITE) { + sem->activity = -1; + list_del(&waiter->list); + tsk = waiter->task; + /* Don't touch waiter after ->task has been NULLed */ + smp_mb(); + waiter->task = NULL; + wake_up_process(tsk); + put_task_struct(tsk); + goto out; + } + + /* grant an infinite number of read locks to the front of the queue */ + dont_wake_writers: + woken = 0; + while (waiter->flags & RWSEM_WAITING_FOR_READ) { + struct list_head *next = waiter->list.next; + + list_del(&waiter->list); + tsk = waiter->task; + smp_mb(); + waiter->task = NULL; + wake_up_process(tsk); + put_task_struct(tsk); + woken++; + if (list_empty(&sem->wait_list)) + break; + waiter = list_entry(next, struct rwsem_waiter, list); + } + + sem->activity += woken; + + out: + return sem; +} + +/* + * wake a single writer + */ +static inline struct rw_semaphore * +__rwsem_wake_one_writer(struct rw_semaphore *sem) +{ + struct rwsem_waiter *waiter; + struct task_struct *tsk; + + sem->activity = -1; + + waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); + list_del(&waiter->list); + + tsk = waiter->task; + smp_mb(); + waiter->task = NULL; + wake_up_process(tsk); + put_task_struct(tsk); + return sem; +} + +/* + * get a read lock on the semaphore + */ +void __sched __down_read(struct rw_semaphore *sem) +{ + struct rwsem_waiter waiter; + struct task_struct *tsk; + + spin_lock_irq(&sem->wait_lock); + + if (sem->activity >= 0 && list_empty(&sem->wait_list)) { + /* granted */ + sem->activity++; + spin_unlock_irq(&sem->wait_lock); + goto out; + } + + tsk = current; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + + /* set up my own style of waitqueue */ + waiter.task = tsk; + waiter.flags = RWSEM_WAITING_FOR_READ; + get_task_struct(tsk); + + list_add_tail(&waiter.list, &sem->wait_list); + + /* we don't need to touch the semaphore struct anymore */ + spin_unlock_irq(&sem->wait_lock); + + /* wait to be given the lock */ + for (;;) { + if (!waiter.task) + break; + schedule(); + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + } + + tsk->state = TASK_RUNNING; + out: + ; +} + +/* + * trylock for reading -- returns 1 if successful, 0 if contention + */ +int __down_read_trylock(struct rw_semaphore *sem) +{ + unsigned long flags; + int ret = 0; + + + spin_lock_irqsave(&sem->wait_lock, flags); + + if (sem->activity >= 0 && list_empty(&sem->wait_list)) { + /* granted */ + sem->activity++; + ret = 1; + } + + spin_unlock_irqrestore(&sem->wait_lock, flags); + + return ret; +} + +/* + * get a write lock on the semaphore + * - we increment the waiting count anyway to indicate an exclusive lock + */ +void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) +{ + struct rwsem_waiter waiter; + struct task_struct *tsk; + + spin_lock_irq(&sem->wait_lock); + + if (sem->activity == 0 && list_empty(&sem->wait_list)) { + /* granted */ + sem->activity = -1; + spin_unlock_irq(&sem->wait_lock); + goto out; + } + + tsk = current; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + + /* set up my own style of waitqueue */ + waiter.task = tsk; + waiter.flags = RWSEM_WAITING_FOR_WRITE; + get_task_struct(tsk); + + list_add_tail(&waiter.list, &sem->wait_list); + + /* we don't need to touch the semaphore struct anymore */ + spin_unlock_irq(&sem->wait_lock); + + /* wait to be given the lock */ + for (;;) { + if (!waiter.task) + break; + schedule(); + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + } + + tsk->state = TASK_RUNNING; + out: + ; +} + +void __sched __down_write(struct rw_semaphore *sem) +{ + __down_write_nested(sem, 0); +} + +/* + * trylock for writing -- returns 1 if successful, 0 if contention + */ +int __down_write_trylock(struct rw_semaphore *sem) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&sem->wait_lock, flags); + + if (sem->activity == 0 && list_empty(&sem->wait_list)) { + /* granted */ + sem->activity = -1; + ret = 1; + } + + spin_unlock_irqrestore(&sem->wait_lock, flags); + + return ret; +} + +/* + * release a read lock on the semaphore + */ +void __up_read(struct rw_semaphore *sem) +{ + unsigned long flags; + + spin_lock_irqsave(&sem->wait_lock, flags); + + if (--sem->activity == 0 && !list_empty(&sem->wait_list)) + sem = __rwsem_wake_one_writer(sem); + + spin_unlock_irqrestore(&sem->wait_lock, flags); +} + +/* + * release a write lock on the semaphore + */ +void __up_write(struct rw_semaphore *sem) +{ + unsigned long flags; + + spin_lock_irqsave(&sem->wait_lock, flags); + + sem->activity = 0; + if (!list_empty(&sem->wait_list)) + sem = __rwsem_do_wake(sem, 1); + + spin_unlock_irqrestore(&sem->wait_lock, flags); +} + +/* + * downgrade a write lock into a read lock + * - just wake up any readers at the front of the queue + */ +void __downgrade_write(struct rw_semaphore *sem) +{ + unsigned long flags; + + spin_lock_irqsave(&sem->wait_lock, flags); + + sem->activity = 1; + if (!list_empty(&sem->wait_list)) + sem = __rwsem_do_wake(sem, 0); + + spin_unlock_irqrestore(&sem->wait_lock, flags); +} + +EXPORT_SYMBOL(__init_rwsem); +EXPORT_SYMBOL(__down_read); +EXPORT_SYMBOL(__down_read_trylock); +EXPORT_SYMBOL(__down_write_nested); +EXPORT_SYMBOL(__down_write); +EXPORT_SYMBOL(__down_write_trylock); +EXPORT_SYMBOL(__up_read); +EXPORT_SYMBOL(__up_write); +EXPORT_SYMBOL(__downgrade_write); diff --git a/libdde-linux26/contrib/lib/rwsem.c b/libdde-linux26/contrib/lib/rwsem.c new file mode 100644 index 00000000..3e3365e5 --- /dev/null +++ b/libdde-linux26/contrib/lib/rwsem.c @@ -0,0 +1,257 @@ +/* rwsem.c: R/W semaphores: contention handling functions + * + * Written by David Howells (dhowells@redhat.com). + * Derived from arch/i386/kernel/semaphore.c + */ +#include <linux/rwsem.h> +#include <linux/sched.h> +#include <linux/init.h> +#include <linux/module.h> + +/* + * Initialize an rwsem: + */ +void __init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held semaphore: + */ + debug_check_no_locks_freed((void *)sem, sizeof(*sem)); + lockdep_init_map(&sem->dep_map, name, key, 0); +#endif + sem->count = RWSEM_UNLOCKED_VALUE; + spin_lock_init(&sem->wait_lock); + INIT_LIST_HEAD(&sem->wait_list); +} + +EXPORT_SYMBOL(__init_rwsem); + +struct rwsem_waiter { + struct list_head list; + struct task_struct *task; + unsigned int flags; +#define RWSEM_WAITING_FOR_READ 0x00000001 +#define RWSEM_WAITING_FOR_WRITE 0x00000002 +}; + +/* + * handle the lock release when processes blocked on it that can now run + * - if we come here from up_xxxx(), then: + * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) + * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) + * - there must be someone on the queue + * - the spinlock must be held by the caller + * - woken process blocks are discarded from the list after having task zeroed + * - writers are only woken if downgrading is false + */ +static inline struct rw_semaphore * +__rwsem_do_wake(struct rw_semaphore *sem, int downgrading) +{ + struct rwsem_waiter *waiter; + struct task_struct *tsk; + struct list_head *next; + signed long oldcount, woken, loop; + + if (downgrading) + goto dont_wake_writers; + + /* if we came through an up_xxxx() call, we only only wake someone up + * if we can transition the active part of the count from 0 -> 1 + */ + try_again: + oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem) + - RWSEM_ACTIVE_BIAS; + if (oldcount & RWSEM_ACTIVE_MASK) + goto undo; + + waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); + + /* try to grant a single write lock if there's a writer at the front + * of the queue - note we leave the 'active part' of the count + * incremented by 1 and the waiting part incremented by 0x00010000 + */ + if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE)) + goto readers_only; + + /* We must be careful not to touch 'waiter' after we set ->task = NULL. + * It is an allocated on the waiter's stack and may become invalid at + * any time after that point (due to a wakeup from another source). + */ + list_del(&waiter->list); + tsk = waiter->task; + smp_mb(); + waiter->task = NULL; + wake_up_process(tsk); + put_task_struct(tsk); + goto out; + + /* don't want to wake any writers */ + dont_wake_writers: + waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); + if (waiter->flags & RWSEM_WAITING_FOR_WRITE) + goto out; + + /* grant an infinite number of read locks to the readers at the front + * of the queue + * - note we increment the 'active part' of the count by the number of + * readers before waking any processes up + */ + readers_only: + woken = 0; + do { + woken++; + + if (waiter->list.next == &sem->wait_list) + break; + + waiter = list_entry(waiter->list.next, + struct rwsem_waiter, list); + + } while (waiter->flags & RWSEM_WAITING_FOR_READ); + + loop = woken; + woken *= RWSEM_ACTIVE_BIAS - RWSEM_WAITING_BIAS; + if (!downgrading) + /* we'd already done one increment earlier */ + woken -= RWSEM_ACTIVE_BIAS; + + rwsem_atomic_add(woken, sem); + + next = sem->wait_list.next; + for (; loop > 0; loop--) { + waiter = list_entry(next, struct rwsem_waiter, list); + next = waiter->list.next; + tsk = waiter->task; + smp_mb(); + waiter->task = NULL; + wake_up_process(tsk); + put_task_struct(tsk); + } + + sem->wait_list.next = next; + next->prev = &sem->wait_list; + + out: + return sem; + + /* undo the change to count, but check for a transition 1->0 */ + undo: + if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) != 0) + goto out; + goto try_again; +} + +/* + * wait for a lock to be granted + */ +static struct rw_semaphore __sched * +rwsem_down_failed_common(struct rw_semaphore *sem, + struct rwsem_waiter *waiter, signed long adjustment) +{ + struct task_struct *tsk = current; + signed long count; + + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + + /* set up my own style of waitqueue */ + spin_lock_irq(&sem->wait_lock); + waiter->task = tsk; + get_task_struct(tsk); + + list_add_tail(&waiter->list, &sem->wait_list); + + /* we're now waiting on the lock, but no longer actively read-locking */ + count = rwsem_atomic_update(adjustment, sem); + + /* if there are no active locks, wake the front queued process(es) up */ + if (!(count & RWSEM_ACTIVE_MASK)) + sem = __rwsem_do_wake(sem, 0); + + spin_unlock_irq(&sem->wait_lock); + + /* wait to be given the lock */ + for (;;) { + if (!waiter->task) + break; + schedule(); + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + } + + tsk->state = TASK_RUNNING; + + return sem; +} + +/* + * wait for the read lock to be granted + */ +asmregparm struct rw_semaphore __sched * +rwsem_down_read_failed(struct rw_semaphore *sem) +{ + struct rwsem_waiter waiter; + + waiter.flags = RWSEM_WAITING_FOR_READ; + rwsem_down_failed_common(sem, &waiter, + RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS); + return sem; +} + +/* + * wait for the write lock to be granted + */ +asmregparm struct rw_semaphore __sched * +rwsem_down_write_failed(struct rw_semaphore *sem) +{ + struct rwsem_waiter waiter; + + waiter.flags = RWSEM_WAITING_FOR_WRITE; + rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS); + + return sem; +} + +/* + * handle waking up a waiter on the semaphore + * - up_read/up_write has decremented the active part of count if we come here + */ +asmregparm struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) +{ + unsigned long flags; + + spin_lock_irqsave(&sem->wait_lock, flags); + + /* do nothing if list empty */ + if (!list_empty(&sem->wait_list)) + sem = __rwsem_do_wake(sem, 0); + + spin_unlock_irqrestore(&sem->wait_lock, flags); + + return sem; +} + +/* + * downgrade a write lock into a read lock + * - caller incremented waiting part of count and discovered it still negative + * - just wake up any readers at the front of the queue + */ +asmregparm struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) +{ + unsigned long flags; + + spin_lock_irqsave(&sem->wait_lock, flags); + + /* do nothing if list empty */ + if (!list_empty(&sem->wait_list)) + sem = __rwsem_do_wake(sem, 1); + + spin_unlock_irqrestore(&sem->wait_lock, flags); + + return sem; +} + +EXPORT_SYMBOL(rwsem_down_read_failed); +EXPORT_SYMBOL(rwsem_down_write_failed); +EXPORT_SYMBOL(rwsem_wake); +EXPORT_SYMBOL(rwsem_downgrade_wake); diff --git a/libdde-linux26/contrib/lib/scatterlist.c b/libdde-linux26/contrib/lib/scatterlist.c new file mode 100644 index 00000000..b7b449da --- /dev/null +++ b/libdde-linux26/contrib/lib/scatterlist.c @@ -0,0 +1,484 @@ +/* + * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com> + * + * Scatterlist handling helpers. + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ +#include <linux/module.h> +#include <linux/scatterlist.h> +#include <linux/highmem.h> + +/** + * sg_next - return the next scatterlist entry in a list + * @sg: The current sg entry + * + * Description: + * Usually the next entry will be @sg@ + 1, but if this sg element is part + * of a chained scatterlist, it could jump to the start of a new + * scatterlist array. + * + **/ +struct scatterlist *sg_next(struct scatterlist *sg) +{ +#ifdef CONFIG_DEBUG_SG + BUG_ON(sg->sg_magic != SG_MAGIC); +#endif + if (sg_is_last(sg)) + return NULL; + + sg++; + if (unlikely(sg_is_chain(sg))) + sg = sg_chain_ptr(sg); + + return sg; +} +EXPORT_SYMBOL(sg_next); + +/** + * sg_last - return the last scatterlist entry in a list + * @sgl: First entry in the scatterlist + * @nents: Number of entries in the scatterlist + * + * Description: + * Should only be used casually, it (currently) scans the entire list + * to get the last entry. + * + * Note that the @sgl@ pointer passed in need not be the first one, + * the important bit is that @nents@ denotes the number of entries that + * exist from @sgl@. + * + **/ +struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) +{ +#ifndef ARCH_HAS_SG_CHAIN + struct scatterlist *ret = &sgl[nents - 1]; +#else + struct scatterlist *sg, *ret = NULL; + unsigned int i; + + for_each_sg(sgl, sg, nents, i) + ret = sg; + +#endif +#ifdef CONFIG_DEBUG_SG + BUG_ON(sgl[0].sg_magic != SG_MAGIC); + BUG_ON(!sg_is_last(ret)); +#endif + return ret; +} +EXPORT_SYMBOL(sg_last); + +/** + * sg_init_table - Initialize SG table + * @sgl: The SG table + * @nents: Number of entries in table + * + * Notes: + * If this is part of a chained sg table, sg_mark_end() should be + * used only on the last table part. + * + **/ +void sg_init_table(struct scatterlist *sgl, unsigned int nents) +{ + memset(sgl, 0, sizeof(*sgl) * nents); +#ifdef CONFIG_DEBUG_SG + { + unsigned int i; + for (i = 0; i < nents; i++) + sgl[i].sg_magic = SG_MAGIC; + } +#endif + sg_mark_end(&sgl[nents - 1]); +} +EXPORT_SYMBOL(sg_init_table); + +/** + * sg_init_one - Initialize a single entry sg list + * @sg: SG entry + * @buf: Virtual address for IO + * @buflen: IO length + * + **/ +void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen) +{ + sg_init_table(sg, 1); + sg_set_buf(sg, buf, buflen); +} +EXPORT_SYMBOL(sg_init_one); + +/* + * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree + * helpers. + */ +static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) +{ + if (nents == SG_MAX_SINGLE_ALLOC) + return (struct scatterlist *) __get_free_page(gfp_mask); + else + return kmalloc(nents * sizeof(struct scatterlist), gfp_mask); +} + +static void sg_kfree(struct scatterlist *sg, unsigned int nents) +{ + if (nents == SG_MAX_SINGLE_ALLOC) + free_page((unsigned long) sg); + else + kfree(sg); +} + +/** + * __sg_free_table - Free a previously mapped sg table + * @table: The sg table header to use + * @max_ents: The maximum number of entries per single scatterlist + * @free_fn: Free function + * + * Description: + * Free an sg table previously allocated and setup with + * __sg_alloc_table(). The @max_ents value must be identical to + * that previously used with __sg_alloc_table(). + * + **/ +void __sg_free_table(struct sg_table *table, unsigned int max_ents, + sg_free_fn *free_fn) +{ + struct scatterlist *sgl, *next; + + if (unlikely(!table->sgl)) + return; + + sgl = table->sgl; + while (table->orig_nents) { + unsigned int alloc_size = table->orig_nents; + unsigned int sg_size; + + /* + * If we have more than max_ents segments left, + * then assign 'next' to the sg table after the current one. + * sg_size is then one less than alloc size, since the last + * element is the chain pointer. + */ + if (alloc_size > max_ents) { + next = sg_chain_ptr(&sgl[max_ents - 1]); + alloc_size = max_ents; + sg_size = alloc_size - 1; + } else { + sg_size = alloc_size; + next = NULL; + } + + table->orig_nents -= sg_size; + free_fn(sgl, alloc_size); + sgl = next; + } + + table->sgl = NULL; +} +EXPORT_SYMBOL(__sg_free_table); + +/** + * sg_free_table - Free a previously allocated sg table + * @table: The mapped sg table header + * + **/ +void sg_free_table(struct sg_table *table) +{ + __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree); +} +EXPORT_SYMBOL(sg_free_table); + +/** + * __sg_alloc_table - Allocate and initialize an sg table with given allocator + * @table: The sg table header to use + * @nents: Number of entries in sg list + * @max_ents: The maximum number of entries the allocator returns per call + * @gfp_mask: GFP allocation mask + * @alloc_fn: Allocator to use + * + * Description: + * This function returns a @table @nents long. The allocator is + * defined to return scatterlist chunks of maximum size @max_ents. + * Thus if @nents is bigger than @max_ents, the scatterlists will be + * chained in units of @max_ents. + * + * Notes: + * If this function returns non-0 (eg failure), the caller must call + * __sg_free_table() to cleanup any leftover allocations. + * + **/ +int __sg_alloc_table(struct sg_table *table, unsigned int nents, + unsigned int max_ents, gfp_t gfp_mask, + sg_alloc_fn *alloc_fn) +{ + struct scatterlist *sg, *prv; + unsigned int left; + +#ifndef ARCH_HAS_SG_CHAIN + BUG_ON(nents > max_ents); +#endif + + memset(table, 0, sizeof(*table)); + + left = nents; + prv = NULL; + do { + unsigned int sg_size, alloc_size = left; + + if (alloc_size > max_ents) { + alloc_size = max_ents; + sg_size = alloc_size - 1; + } else + sg_size = alloc_size; + + left -= sg_size; + + sg = alloc_fn(alloc_size, gfp_mask); + if (unlikely(!sg)) + return -ENOMEM; + + sg_init_table(sg, alloc_size); + table->nents = table->orig_nents += sg_size; + + /* + * If this is the first mapping, assign the sg table header. + * If this is not the first mapping, chain previous part. + */ + if (prv) + sg_chain(prv, max_ents, sg); + else + table->sgl = sg; + + /* + * If no more entries after this one, mark the end + */ + if (!left) + sg_mark_end(&sg[sg_size - 1]); + + /* + * only really needed for mempool backed sg allocations (like + * SCSI), a possible improvement here would be to pass the + * table pointer into the allocator and let that clear these + * flags + */ + gfp_mask &= ~__GFP_WAIT; + gfp_mask |= __GFP_HIGH; + prv = sg; + } while (left); + + return 0; +} +EXPORT_SYMBOL(__sg_alloc_table); + +/** + * sg_alloc_table - Allocate and initialize an sg table + * @table: The sg table header to use + * @nents: Number of entries in sg list + * @gfp_mask: GFP allocation mask + * + * Description: + * Allocate and initialize an sg table. If @nents@ is larger than + * SG_MAX_SINGLE_ALLOC a chained sg table will be setup. + * + **/ +int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) +{ + int ret; + + ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, + gfp_mask, sg_kmalloc); + if (unlikely(ret)) + __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree); + + return ret; +} +EXPORT_SYMBOL(sg_alloc_table); + +/** + * sg_miter_start - start mapping iteration over a sg list + * @miter: sg mapping iter to be started + * @sgl: sg list to iterate over + * @nents: number of sg entries + * + * Description: + * Starts mapping iterator @miter. + * + * Context: + * Don't care. + */ +void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, + unsigned int nents, unsigned int flags) +{ + memset(miter, 0, sizeof(struct sg_mapping_iter)); + + miter->__sg = sgl; + miter->__nents = nents; + miter->__offset = 0; + miter->__flags = flags; +} +EXPORT_SYMBOL(sg_miter_start); + +/** + * sg_miter_next - proceed mapping iterator to the next mapping + * @miter: sg mapping iter to proceed + * + * Description: + * Proceeds @miter@ to the next mapping. @miter@ should have been + * started using sg_miter_start(). On successful return, + * @miter@->page, @miter@->addr and @miter@->length point to the + * current mapping. + * + * Context: + * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till + * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC. + * + * Returns: + * true if @miter contains the next mapping. false if end of sg + * list is reached. + */ +bool sg_miter_next(struct sg_mapping_iter *miter) +{ + unsigned int off, len; + + /* check for end and drop resources from the last iteration */ + if (!miter->__nents) + return false; + + sg_miter_stop(miter); + + /* get to the next sg if necessary. __offset is adjusted by stop */ + if (miter->__offset == miter->__sg->length && --miter->__nents) { + miter->__sg = sg_next(miter->__sg); + miter->__offset = 0; + } + + /* map the next page */ + off = miter->__sg->offset + miter->__offset; + len = miter->__sg->length - miter->__offset; + + miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT); + off &= ~PAGE_MASK; + miter->length = min_t(unsigned int, len, PAGE_SIZE - off); + miter->consumed = miter->length; + + if (miter->__flags & SG_MITER_ATOMIC) + miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off; + else + miter->addr = kmap(miter->page) + off; + + return true; +} +EXPORT_SYMBOL(sg_miter_next); + +/** + * sg_miter_stop - stop mapping iteration + * @miter: sg mapping iter to be stopped + * + * Description: + * Stops mapping iterator @miter. @miter should have been started + * started using sg_miter_start(). A stopped iteration can be + * resumed by calling sg_miter_next() on it. This is useful when + * resources (kmap) need to be released during iteration. + * + * Context: + * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise. + */ +void sg_miter_stop(struct sg_mapping_iter *miter) +{ + WARN_ON(miter->consumed > miter->length); + + /* drop resources from the last iteration */ + if (miter->addr) { + miter->__offset += miter->consumed; + + if (miter->__flags & SG_MITER_ATOMIC) { + WARN_ON(!irqs_disabled()); + kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); + } else + kunmap(miter->page); + + miter->page = NULL; + miter->addr = NULL; + miter->length = 0; + miter->consumed = 0; + } +} +EXPORT_SYMBOL(sg_miter_stop); + +/** + * sg_copy_buffer - Copy data between a linear buffer and an SG list + * @sgl: The SG list + * @nents: Number of SG entries + * @buf: Where to copy from + * @buflen: The number of bytes to copy + * @to_buffer: transfer direction (non zero == from an sg list to a + * buffer, 0 == from a buffer to an sg list + * + * Returns the number of copied bytes. + * + **/ +static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen, int to_buffer) +{ + unsigned int offset = 0; + struct sg_mapping_iter miter; + unsigned long flags; + + sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC); + + local_irq_save(flags); + + while (sg_miter_next(&miter) && offset < buflen) { + unsigned int len; + + len = min(miter.length, buflen - offset); + + if (to_buffer) + memcpy(buf + offset, miter.addr, len); + else { + memcpy(miter.addr, buf + offset, len); + flush_kernel_dcache_page(miter.page); + } + + offset += len; + } + + sg_miter_stop(&miter); + + local_irq_restore(flags); + return offset; +} + +/** + * sg_copy_from_buffer - Copy from a linear buffer to an SG list + * @sgl: The SG list + * @nents: Number of SG entries + * @buf: Where to copy from + * @buflen: The number of bytes to copy + * + * Returns the number of copied bytes. + * + **/ +size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen) +{ + return sg_copy_buffer(sgl, nents, buf, buflen, 0); +} +EXPORT_SYMBOL(sg_copy_from_buffer); + +/** + * sg_copy_to_buffer - Copy from an SG list to a linear buffer + * @sgl: The SG list + * @nents: Number of SG entries + * @buf: Where to copy to + * @buflen: The number of bytes to copy + * + * Returns the number of copied bytes. + * + **/ +size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen) +{ + return sg_copy_buffer(sgl, nents, buf, buflen, 1); +} +EXPORT_SYMBOL(sg_copy_to_buffer); diff --git a/libdde-linux26/contrib/lib/sha1.c b/libdde-linux26/contrib/lib/sha1.c new file mode 100644 index 00000000..4c45fd50 --- /dev/null +++ b/libdde-linux26/contrib/lib/sha1.c @@ -0,0 +1,95 @@ +/* + * SHA transform algorithm, originally taken from code written by + * Peter Gutmann, and placed in the public domain. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/cryptohash.h> + +/* The SHA f()-functions. */ + +#define f1(x,y,z) (z ^ (x & (y ^ z))) /* x ? y : z */ +#define f2(x,y,z) (x ^ y ^ z) /* XOR */ +#define f3(x,y,z) ((x & y) + (z & (x ^ y))) /* majority */ + +/* The SHA Mysterious Constants */ + +#define K1 0x5A827999L /* Rounds 0-19: sqrt(2) * 2^30 */ +#define K2 0x6ED9EBA1L /* Rounds 20-39: sqrt(3) * 2^30 */ +#define K3 0x8F1BBCDCL /* Rounds 40-59: sqrt(5) * 2^30 */ +#define K4 0xCA62C1D6L /* Rounds 60-79: sqrt(10) * 2^30 */ + +/** + * sha_transform - single block SHA1 transform + * + * @digest: 160 bit digest to update + * @data: 512 bits of data to hash + * @W: 80 words of workspace (see note) + * + * This function generates a SHA1 digest for a single 512-bit block. + * Be warned, it does not handle padding and message digest, do not + * confuse it with the full FIPS 180-1 digest algorithm for variable + * length messages. + * + * Note: If the hash is security sensitive, the caller should be sure + * to clear the workspace. This is left to the caller to avoid + * unnecessary clears between chained hashing operations. + */ +void sha_transform(__u32 *digest, const char *in, __u32 *W) +{ + __u32 a, b, c, d, e, t, i; + + for (i = 0; i < 16; i++) + W[i] = be32_to_cpu(((const __be32 *)in)[i]); + + for (i = 0; i < 64; i++) + W[i+16] = rol32(W[i+13] ^ W[i+8] ^ W[i+2] ^ W[i], 1); + + a = digest[0]; + b = digest[1]; + c = digest[2]; + d = digest[3]; + e = digest[4]; + + for (i = 0; i < 20; i++) { + t = f1(b, c, d) + K1 + rol32(a, 5) + e + W[i]; + e = d; d = c; c = rol32(b, 30); b = a; a = t; + } + + for (; i < 40; i ++) { + t = f2(b, c, d) + K2 + rol32(a, 5) + e + W[i]; + e = d; d = c; c = rol32(b, 30); b = a; a = t; + } + + for (; i < 60; i ++) { + t = f3(b, c, d) + K3 + rol32(a, 5) + e + W[i]; + e = d; d = c; c = rol32(b, 30); b = a; a = t; + } + + for (; i < 80; i ++) { + t = f2(b, c, d) + K4 + rol32(a, 5) + e + W[i]; + e = d; d = c; c = rol32(b, 30); b = a; a = t; + } + + digest[0] += a; + digest[1] += b; + digest[2] += c; + digest[3] += d; + digest[4] += e; +} +EXPORT_SYMBOL(sha_transform); + +/** + * sha_init - initialize the vectors for a SHA1 digest + * @buf: vector to initialize + */ +void sha_init(__u32 *buf) +{ + buf[0] = 0x67452301; + buf[1] = 0xefcdab89; + buf[2] = 0x98badcfe; + buf[3] = 0x10325476; + buf[4] = 0xc3d2e1f0; +} + diff --git a/libdde-linux26/contrib/lib/string.c b/libdde-linux26/contrib/lib/string.c new file mode 100644 index 00000000..b19b87af --- /dev/null +++ b/libdde-linux26/contrib/lib/string.c @@ -0,0 +1,689 @@ +/* + * linux/lib/string.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + */ + +/* + * stupid library routines.. The optimized versions should generally be found + * as inline code in <asm-xx/string.h> + * + * These are buggy as well.. + * + * * Fri Jun 25 1999, Ingo Oeser <ioe@informatik.tu-chemnitz.de> + * - Added strsep() which will replace strtok() soon (because strsep() is + * reentrant and should be faster). Use only strsep() in new code, please. + * + * * Sat Feb 09 2002, Jason Thomas <jason@topic.com.au>, + * Matthew Hawkins <matt@mh.dropbear.id.au> + * - Kissed strtok() goodbye + */ + +#include <linux/types.h> +#include <linux/string.h> +#include <linux/ctype.h> +#include <linux/module.h> + +#ifndef __HAVE_ARCH_STRNICMP +/** + * strnicmp - Case insensitive, length-limited string comparison + * @s1: One string + * @s2: The other string + * @len: the maximum number of characters to compare + */ +int strnicmp(const char *s1, const char *s2, size_t len) +{ + /* Yes, Virginia, it had better be unsigned */ + unsigned char c1, c2; + + c1 = c2 = 0; + if (len) { + do { + c1 = *s1; + c2 = *s2; + s1++; + s2++; + if (!c1) + break; + if (!c2) + break; + if (c1 == c2) + continue; + c1 = tolower(c1); + c2 = tolower(c2); + if (c1 != c2) + break; + } while (--len); + } + return (int)c1 - (int)c2; +} +EXPORT_SYMBOL(strnicmp); +#endif + +#ifndef __HAVE_ARCH_STRCASECMP +int strcasecmp(const char *s1, const char *s2) +{ + int c1, c2; + + do { + c1 = tolower(*s1++); + c2 = tolower(*s2++); + } while (c1 == c2 && c1 != 0); + return c1 - c2; +} +EXPORT_SYMBOL(strcasecmp); +#endif + +#ifndef __HAVE_ARCH_STRNCASECMP +int strncasecmp(const char *s1, const char *s2, size_t n) +{ + int c1, c2; + + do { + c1 = tolower(*s1++); + c2 = tolower(*s2++); + } while ((--n > 0) && c1 == c2 && c1 != 0); + return c1 - c2; +} +EXPORT_SYMBOL(strncasecmp); +#endif + +#ifndef __HAVE_ARCH_STRCPY +/** + * strcpy - Copy a %NUL terminated string + * @dest: Where to copy the string to + * @src: Where to copy the string from + */ +#undef strcpy +char *strcpy(char *dest, const char *src) +{ + char *tmp = dest; + + while ((*dest++ = *src++) != '\0') + /* nothing */; + return tmp; +} +EXPORT_SYMBOL(strcpy); +#endif + +#ifndef __HAVE_ARCH_STRNCPY +/** + * strncpy - Copy a length-limited, %NUL-terminated string + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @count: The maximum number of bytes to copy + * + * The result is not %NUL-terminated if the source exceeds + * @count bytes. + * + * In the case where the length of @src is less than that of + * count, the remainder of @dest will be padded with %NUL. + * + */ +char *strncpy(char *dest, const char *src, size_t count) +{ + char *tmp = dest; + + while (count) { + if ((*tmp = *src) != 0) + src++; + tmp++; + count--; + } + return dest; +} +EXPORT_SYMBOL(strncpy); +#endif + +#ifndef __HAVE_ARCH_STRLCPY +/** + * strlcpy - Copy a %NUL terminated string into a sized buffer + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @size: size of destination buffer + * + * Compatible with *BSD: the result is always a valid + * NUL-terminated string that fits in the buffer (unless, + * of course, the buffer size is zero). It does not pad + * out the result like strncpy() does. + */ +size_t strlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = strlen(src); + + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + memcpy(dest, src, len); + dest[len] = '\0'; + } + return ret; +} +EXPORT_SYMBOL(strlcpy); +#endif + +#ifndef __HAVE_ARCH_STRCAT +/** + * strcat - Append one %NUL-terminated string to another + * @dest: The string to be appended to + * @src: The string to append to it + */ +#undef strcat +char *strcat(char *dest, const char *src) +{ + char *tmp = dest; + + while (*dest) + dest++; + while ((*dest++ = *src++) != '\0') + ; + return tmp; +} +EXPORT_SYMBOL(strcat); +#endif + +#ifndef __HAVE_ARCH_STRNCAT +/** + * strncat - Append a length-limited, %NUL-terminated string to another + * @dest: The string to be appended to + * @src: The string to append to it + * @count: The maximum numbers of bytes to copy + * + * Note that in contrast to strncpy(), strncat() ensures the result is + * terminated. + */ +char *strncat(char *dest, const char *src, size_t count) +{ + char *tmp = dest; + + if (count) { + while (*dest) + dest++; + while ((*dest++ = *src++) != 0) { + if (--count == 0) { + *dest = '\0'; + break; + } + } + } + return tmp; +} +EXPORT_SYMBOL(strncat); +#endif + +#ifndef __HAVE_ARCH_STRLCAT +/** + * strlcat - Append a length-limited, %NUL-terminated string to another + * @dest: The string to be appended to + * @src: The string to append to it + * @count: The size of the destination buffer. + */ +size_t strlcat(char *dest, const char *src, size_t count) +{ + size_t dsize = strlen(dest); + size_t len = strlen(src); + size_t res = dsize + len; + + /* This would be a bug */ + BUG_ON(dsize >= count); + + dest += dsize; + count -= dsize; + if (len >= count) + len = count-1; + memcpy(dest, src, len); + dest[len] = 0; + return res; +} +EXPORT_SYMBOL(strlcat); +#endif + +#ifndef __HAVE_ARCH_STRCMP +/** + * strcmp - Compare two strings + * @cs: One string + * @ct: Another string + */ +#undef strcmp +int strcmp(const char *cs, const char *ct) +{ + signed char __res; + + while (1) { + if ((__res = *cs - *ct++) != 0 || !*cs++) + break; + } + return __res; +} +EXPORT_SYMBOL(strcmp); +#endif + +#ifndef __HAVE_ARCH_STRNCMP +/** + * strncmp - Compare two length-limited strings + * @cs: One string + * @ct: Another string + * @count: The maximum number of bytes to compare + */ +int strncmp(const char *cs, const char *ct, size_t count) +{ + signed char __res = 0; + + while (count) { + if ((__res = *cs - *ct++) != 0 || !*cs++) + break; + count--; + } + return __res; +} +EXPORT_SYMBOL(strncmp); +#endif + +#ifndef __HAVE_ARCH_STRCHR +/** + * strchr - Find the first occurrence of a character in a string + * @s: The string to be searched + * @c: The character to search for + */ +char *strchr(const char *s, int c) +{ + for (; *s != (char)c; ++s) + if (*s == '\0') + return NULL; + return (char *)s; +} +EXPORT_SYMBOL(strchr); +#endif + +#ifndef __HAVE_ARCH_STRRCHR +/** + * strrchr - Find the last occurrence of a character in a string + * @s: The string to be searched + * @c: The character to search for + */ +char *strrchr(const char *s, int c) +{ + const char *p = s + strlen(s); + do { + if (*p == (char)c) + return (char *)p; + } while (--p >= s); + return NULL; +} +EXPORT_SYMBOL(strrchr); +#endif + +#ifndef __HAVE_ARCH_STRNCHR +/** + * strnchr - Find a character in a length limited string + * @s: The string to be searched + * @count: The number of characters to be searched + * @c: The character to search for + */ +char *strnchr(const char *s, size_t count, int c) +{ + for (; count-- && *s != '\0'; ++s) + if (*s == (char)c) + return (char *)s; + return NULL; +} +EXPORT_SYMBOL(strnchr); +#endif + +/** + * strstrip - Removes leading and trailing whitespace from @s. + * @s: The string to be stripped. + * + * Note that the first trailing whitespace is replaced with a %NUL-terminator + * in the given string @s. Returns a pointer to the first non-whitespace + * character in @s. + */ +char *strstrip(char *s) +{ + size_t size; + char *end; + + size = strlen(s); + + if (!size) + return s; + + end = s + size - 1; + while (end >= s && isspace(*end)) + end--; + *(end + 1) = '\0'; + + while (*s && isspace(*s)) + s++; + + return s; +} +EXPORT_SYMBOL(strstrip); + +#ifndef __HAVE_ARCH_STRLEN +/** + * strlen - Find the length of a string + * @s: The string to be sized + */ +size_t strlen(const char *s) +{ + const char *sc; + + for (sc = s; *sc != '\0'; ++sc) + /* nothing */; + return sc - s; +} +EXPORT_SYMBOL(strlen); +#endif + +#ifndef __HAVE_ARCH_STRNLEN +/** + * strnlen - Find the length of a length-limited string + * @s: The string to be sized + * @count: The maximum number of bytes to search + */ +size_t strnlen(const char *s, size_t count) +{ + const char *sc; + + for (sc = s; count-- && *sc != '\0'; ++sc) + /* nothing */; + return sc - s; +} +EXPORT_SYMBOL(strnlen); +#endif + +#ifndef __HAVE_ARCH_STRSPN +/** + * strspn - Calculate the length of the initial substring of @s which only contain letters in @accept + * @s: The string to be searched + * @accept: The string to search for + */ +size_t strspn(const char *s, const char *accept) +{ + const char *p; + const char *a; + size_t count = 0; + + for (p = s; *p != '\0'; ++p) { + for (a = accept; *a != '\0'; ++a) { + if (*p == *a) + break; + } + if (*a == '\0') + return count; + ++count; + } + return count; +} + +EXPORT_SYMBOL(strspn); +#endif + +#ifndef __HAVE_ARCH_STRCSPN +/** + * strcspn - Calculate the length of the initial substring of @s which does not contain letters in @reject + * @s: The string to be searched + * @reject: The string to avoid + */ +size_t strcspn(const char *s, const char *reject) +{ + const char *p; + const char *r; + size_t count = 0; + + for (p = s; *p != '\0'; ++p) { + for (r = reject; *r != '\0'; ++r) { + if (*p == *r) + return count; + } + ++count; + } + return count; +} +EXPORT_SYMBOL(strcspn); +#endif + +#ifndef __HAVE_ARCH_STRPBRK +/** + * strpbrk - Find the first occurrence of a set of characters + * @cs: The string to be searched + * @ct: The characters to search for + */ +char *strpbrk(const char *cs, const char *ct) +{ + const char *sc1, *sc2; + + for (sc1 = cs; *sc1 != '\0'; ++sc1) { + for (sc2 = ct; *sc2 != '\0'; ++sc2) { + if (*sc1 == *sc2) + return (char *)sc1; + } + } + return NULL; +} +EXPORT_SYMBOL(strpbrk); +#endif + +#ifndef __HAVE_ARCH_STRSEP +/** + * strsep - Split a string into tokens + * @s: The string to be searched + * @ct: The characters to search for + * + * strsep() updates @s to point after the token, ready for the next call. + * + * It returns empty tokens, too, behaving exactly like the libc function + * of that name. In fact, it was stolen from glibc2 and de-fancy-fied. + * Same semantics, slimmer shape. ;) + */ +char *strsep(char **s, const char *ct) +{ + char *sbegin = *s; + char *end; + + if (sbegin == NULL) + return NULL; + + end = strpbrk(sbegin, ct); + if (end) + *end++ = '\0'; + *s = end; + return sbegin; +} +EXPORT_SYMBOL(strsep); +#endif + +/** + * sysfs_streq - return true if strings are equal, modulo trailing newline + * @s1: one string + * @s2: another string + * + * This routine returns true iff two strings are equal, treating both + * NUL and newline-then-NUL as equivalent string terminations. It's + * geared for use with sysfs input strings, which generally terminate + * with newlines but are compared against values without newlines. + */ +bool sysfs_streq(const char *s1, const char *s2) +{ + while (*s1 && *s1 == *s2) { + s1++; + s2++; + } + + if (*s1 == *s2) + return true; + if (!*s1 && *s2 == '\n' && !s2[1]) + return true; + if (*s1 == '\n' && !s1[1] && !*s2) + return true; + return false; +} +EXPORT_SYMBOL(sysfs_streq); + +#ifndef __HAVE_ARCH_MEMSET +/** + * memset - Fill a region of memory with the given value + * @s: Pointer to the start of the area. + * @c: The byte to fill the area with + * @count: The size of the area. + * + * Do not use memset() to access IO space, use memset_io() instead. + */ +void *memset(void *s, int c, size_t count) +{ + char *xs = s; + + while (count--) + *xs++ = c; + return s; +} +EXPORT_SYMBOL(memset); +#endif + +#ifndef __HAVE_ARCH_MEMCPY +/** + * memcpy - Copy one area of memory to another + * @dest: Where to copy to + * @src: Where to copy from + * @count: The size of the area. + * + * You should not use this function to access IO space, use memcpy_toio() + * or memcpy_fromio() instead. + */ +void *memcpy(void *dest, const void *src, size_t count) +{ + char *tmp = dest; + const char *s = src; + + while (count--) + *tmp++ = *s++; + return dest; +} +EXPORT_SYMBOL(memcpy); +#endif + +#ifndef __HAVE_ARCH_MEMMOVE +/** + * memmove - Copy one area of memory to another + * @dest: Where to copy to + * @src: Where to copy from + * @count: The size of the area. + * + * Unlike memcpy(), memmove() copes with overlapping areas. + */ +void *memmove(void *dest, const void *src, size_t count) +{ + char *tmp; + const char *s; + + if (dest <= src) { + tmp = dest; + s = src; + while (count--) + *tmp++ = *s++; + } else { + tmp = dest; + tmp += count; + s = src; + s += count; + while (count--) + *--tmp = *--s; + } + return dest; +} +EXPORT_SYMBOL(memmove); +#endif + +#ifndef __HAVE_ARCH_MEMCMP +/** + * memcmp - Compare two areas of memory + * @cs: One area of memory + * @ct: Another area of memory + * @count: The size of the area. + */ +#undef memcmp +int memcmp(const void *cs, const void *ct, size_t count) +{ + const unsigned char *su1, *su2; + int res = 0; + + for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) + if ((res = *su1 - *su2) != 0) + break; + return res; +} +EXPORT_SYMBOL(memcmp); +#endif + +#ifndef __HAVE_ARCH_MEMSCAN +/** + * memscan - Find a character in an area of memory. + * @addr: The memory area + * @c: The byte to search for + * @size: The size of the area. + * + * returns the address of the first occurrence of @c, or 1 byte past + * the area if @c is not found + */ +void *memscan(void *addr, int c, size_t size) +{ + unsigned char *p = addr; + + while (size) { + if (*p == c) + return (void *)p; + p++; + size--; + } + return (void *)p; +} +EXPORT_SYMBOL(memscan); +#endif + +#ifndef __HAVE_ARCH_STRSTR +/** + * strstr - Find the first substring in a %NUL terminated string + * @s1: The string to be searched + * @s2: The string to search for + */ +char *strstr(const char *s1, const char *s2) +{ + int l1, l2; + + l2 = strlen(s2); + if (!l2) + return (char *)s1; + l1 = strlen(s1); + while (l1 >= l2) { + l1--; + if (!memcmp(s1, s2, l2)) + return (char *)s1; + s1++; + } + return NULL; +} +EXPORT_SYMBOL(strstr); +#endif + +#ifndef __HAVE_ARCH_MEMCHR +/** + * memchr - Find a character in an area of memory. + * @s: The memory area + * @c: The byte to search for + * @n: The size of the area. + * + * returns the address of the first occurrence of @c, or %NULL + * if @c is not found + */ +void *memchr(const void *s, int c, size_t n) +{ + const unsigned char *p = s; + while (n-- != 0) { + if ((unsigned char)c == *p++) { + return (void *)(p - 1); + } + } + return NULL; +} +EXPORT_SYMBOL(memchr); +#endif diff --git a/libdde-linux26/contrib/lib/vsprintf.c b/libdde-linux26/contrib/lib/vsprintf.c new file mode 100644 index 00000000..0fbd0121 --- /dev/null +++ b/libdde-linux26/contrib/lib/vsprintf.c @@ -0,0 +1,1305 @@ +/* + * linux/lib/vsprintf.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + */ + +/* vsprintf.c -- Lars Wirzenius & Linus Torvalds. */ +/* + * Wirzenius wrote this portably, Torvalds fucked it up :-) + */ + +/* + * Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com> + * - changed to provide snprintf and vsnprintf functions + * So Feb 1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de> + * - scnprintf and vscnprintf + */ + +#include <stdarg.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/ctype.h> +#include <linux/kernel.h> +#include <linux/kallsyms.h> +#include <linux/uaccess.h> +#include <linux/ioport.h> + +#include <asm/page.h> /* for PAGE_SIZE */ +#include <asm/div64.h> +#include <asm/sections.h> /* for dereference_function_descriptor() */ + +/* Works only for digits and letters, but small and fast */ +#define TOLOWER(x) ((x) | 0x20) + +static unsigned int simple_guess_base(const char *cp) +{ + if (cp[0] == '0') { + if (TOLOWER(cp[1]) == 'x' && isxdigit(cp[2])) + return 16; + else + return 8; + } else { + return 10; + } +} + +/** + * simple_strtoul - convert a string to an unsigned long + * @cp: The start of the string + * @endp: A pointer to the end of the parsed string will be placed here + * @base: The number base to use + */ +unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base) +{ + unsigned long result = 0; + + if (!base) + base = simple_guess_base(cp); + + if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x') + cp += 2; + + while (isxdigit(*cp)) { + unsigned int value; + + value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10; + if (value >= base) + break; + result = result * base + value; + cp++; + } + + if (endp) + *endp = (char *)cp; + return result; +} +EXPORT_SYMBOL(simple_strtoul); + +/** + * simple_strtol - convert a string to a signed long + * @cp: The start of the string + * @endp: A pointer to the end of the parsed string will be placed here + * @base: The number base to use + */ +long simple_strtol(const char *cp, char **endp, unsigned int base) +{ + if(*cp == '-') + return -simple_strtoul(cp + 1, endp, base); + return simple_strtoul(cp, endp, base); +} +EXPORT_SYMBOL(simple_strtol); + +/** + * simple_strtoull - convert a string to an unsigned long long + * @cp: The start of the string + * @endp: A pointer to the end of the parsed string will be placed here + * @base: The number base to use + */ +unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) +{ + unsigned long long result = 0; + + if (!base) + base = simple_guess_base(cp); + + if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x') + cp += 2; + + while (isxdigit(*cp)) { + unsigned int value; + + value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10; + if (value >= base) + break; + result = result * base + value; + cp++; + } + + if (endp) + *endp = (char *)cp; + return result; +} +EXPORT_SYMBOL(simple_strtoull); + +/** + * simple_strtoll - convert a string to a signed long long + * @cp: The start of the string + * @endp: A pointer to the end of the parsed string will be placed here + * @base: The number base to use + */ +long long simple_strtoll(const char *cp, char **endp, unsigned int base) +{ + if(*cp=='-') + return -simple_strtoull(cp + 1, endp, base); + return simple_strtoull(cp, endp, base); +} + +/** + * strict_strtoul - convert a string to an unsigned long strictly + * @cp: The string to be converted + * @base: The number base to use + * @res: The converted result value + * + * strict_strtoul converts a string to an unsigned long only if the + * string is really an unsigned long string, any string containing + * any invalid char at the tail will be rejected and -EINVAL is returned, + * only a newline char at the tail is acceptible because people generally + * change a module parameter in the following way: + * + * echo 1024 > /sys/module/e1000/parameters/copybreak + * + * echo will append a newline to the tail. + * + * It returns 0 if conversion is successful and *res is set to the converted + * value, otherwise it returns -EINVAL and *res is set to 0. + * + * simple_strtoul just ignores the successive invalid characters and + * return the converted value of prefix part of the string. + */ +int strict_strtoul(const char *cp, unsigned int base, unsigned long *res) +{ + char *tail; + unsigned long val; + size_t len; + + *res = 0; + len = strlen(cp); + if (len == 0) + return -EINVAL; + + val = simple_strtoul(cp, &tail, base); + if (tail == cp) + return -EINVAL; + if ((*tail == '\0') || + ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) { + *res = val; + return 0; + } + + return -EINVAL; +} +EXPORT_SYMBOL(strict_strtoul); + +/** + * strict_strtol - convert a string to a long strictly + * @cp: The string to be converted + * @base: The number base to use + * @res: The converted result value + * + * strict_strtol is similiar to strict_strtoul, but it allows the first + * character of a string is '-'. + * + * It returns 0 if conversion is successful and *res is set to the converted + * value, otherwise it returns -EINVAL and *res is set to 0. + */ +int strict_strtol(const char *cp, unsigned int base, long *res) +{ + int ret; + if (*cp == '-') { + ret = strict_strtoul(cp + 1, base, (unsigned long *)res); + if (!ret) + *res = -(*res); + } else { + ret = strict_strtoul(cp, base, (unsigned long *)res); + } + + return ret; +} +EXPORT_SYMBOL(strict_strtol); + +/** + * strict_strtoull - convert a string to an unsigned long long strictly + * @cp: The string to be converted + * @base: The number base to use + * @res: The converted result value + * + * strict_strtoull converts a string to an unsigned long long only if the + * string is really an unsigned long long string, any string containing + * any invalid char at the tail will be rejected and -EINVAL is returned, + * only a newline char at the tail is acceptible because people generally + * change a module parameter in the following way: + * + * echo 1024 > /sys/module/e1000/parameters/copybreak + * + * echo will append a newline to the tail of the string. + * + * It returns 0 if conversion is successful and *res is set to the converted + * value, otherwise it returns -EINVAL and *res is set to 0. + * + * simple_strtoull just ignores the successive invalid characters and + * return the converted value of prefix part of the string. + */ +int strict_strtoull(const char *cp, unsigned int base, unsigned long long *res) +{ + char *tail; + unsigned long long val; + size_t len; + + *res = 0; + len = strlen(cp); + if (len == 0) + return -EINVAL; + + val = simple_strtoull(cp, &tail, base); + if (tail == cp) + return -EINVAL; + if ((*tail == '\0') || + ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) { + *res = val; + return 0; + } + + return -EINVAL; +} +EXPORT_SYMBOL(strict_strtoull); + +/** + * strict_strtoll - convert a string to a long long strictly + * @cp: The string to be converted + * @base: The number base to use + * @res: The converted result value + * + * strict_strtoll is similiar to strict_strtoull, but it allows the first + * character of a string is '-'. + * + * It returns 0 if conversion is successful and *res is set to the converted + * value, otherwise it returns -EINVAL and *res is set to 0. + */ +int strict_strtoll(const char *cp, unsigned int base, long long *res) +{ + int ret; + if (*cp == '-') { + ret = strict_strtoull(cp + 1, base, (unsigned long long *)res); + if (!ret) + *res = -(*res); + } else { + ret = strict_strtoull(cp, base, (unsigned long long *)res); + } + + return ret; +} +EXPORT_SYMBOL(strict_strtoll); + +static int skip_atoi(const char **s) +{ + int i=0; + + while (isdigit(**s)) + i = i*10 + *((*s)++) - '0'; + return i; +} + +/* Decimal conversion is by far the most typical, and is used + * for /proc and /sys data. This directly impacts e.g. top performance + * with many processes running. We optimize it for speed + * using code from + * http://www.cs.uiowa.edu/~jones/bcd/decimal.html + * (with permission from the author, Douglas W. Jones). */ + +/* Formats correctly any integer in [0,99999]. + * Outputs from one to five digits depending on input. + * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */ +static char* put_dec_trunc(char *buf, unsigned q) +{ + unsigned d3, d2, d1, d0; + d1 = (q>>4) & 0xf; + d2 = (q>>8) & 0xf; + d3 = (q>>12); + + d0 = 6*(d3 + d2 + d1) + (q & 0xf); + q = (d0 * 0xcd) >> 11; + d0 = d0 - 10*q; + *buf++ = d0 + '0'; /* least significant digit */ + d1 = q + 9*d3 + 5*d2 + d1; + if (d1 != 0) { + q = (d1 * 0xcd) >> 11; + d1 = d1 - 10*q; + *buf++ = d1 + '0'; /* next digit */ + + d2 = q + 2*d2; + if ((d2 != 0) || (d3 != 0)) { + q = (d2 * 0xd) >> 7; + d2 = d2 - 10*q; + *buf++ = d2 + '0'; /* next digit */ + + d3 = q + 4*d3; + if (d3 != 0) { + q = (d3 * 0xcd) >> 11; + d3 = d3 - 10*q; + *buf++ = d3 + '0'; /* next digit */ + if (q != 0) + *buf++ = q + '0'; /* most sign. digit */ + } + } + } + return buf; +} +/* Same with if's removed. Always emits five digits */ +static char* put_dec_full(char *buf, unsigned q) +{ + /* BTW, if q is in [0,9999], 8-bit ints will be enough, */ + /* but anyway, gcc produces better code with full-sized ints */ + unsigned d3, d2, d1, d0; + d1 = (q>>4) & 0xf; + d2 = (q>>8) & 0xf; + d3 = (q>>12); + + /* Possible ways to approx. divide by 10 */ + /* gcc -O2 replaces multiply with shifts and adds */ + // (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386) + // (x * 0x67) >> 10: 1100111 + // (x * 0x34) >> 9: 110100 - same + // (x * 0x1a) >> 8: 11010 - same + // (x * 0x0d) >> 7: 1101 - same, shortest code (on i386) + + d0 = 6*(d3 + d2 + d1) + (q & 0xf); + q = (d0 * 0xcd) >> 11; + d0 = d0 - 10*q; + *buf++ = d0 + '0'; + d1 = q + 9*d3 + 5*d2 + d1; + q = (d1 * 0xcd) >> 11; + d1 = d1 - 10*q; + *buf++ = d1 + '0'; + + d2 = q + 2*d2; + q = (d2 * 0xd) >> 7; + d2 = d2 - 10*q; + *buf++ = d2 + '0'; + + d3 = q + 4*d3; + q = (d3 * 0xcd) >> 11; /* - shorter code */ + /* q = (d3 * 0x67) >> 10; - would also work */ + d3 = d3 - 10*q; + *buf++ = d3 + '0'; + *buf++ = q + '0'; + return buf; +} +/* No inlining helps gcc to use registers better */ +static noinline char* put_dec(char *buf, unsigned long long num) +{ + while (1) { + unsigned rem; + if (num < 100000) + return put_dec_trunc(buf, num); + rem = do_div(num, 100000); + buf = put_dec_full(buf, rem); + } +} + +#define ZEROPAD 1 /* pad with zero */ +#define SIGN 2 /* unsigned/signed long */ +#define PLUS 4 /* show plus */ +#define SPACE 8 /* space if plus */ +#define LEFT 16 /* left justified */ +#define SMALL 32 /* Must be 32 == 0x20 */ +#define SPECIAL 64 /* 0x */ + +static char *number(char *buf, char *end, unsigned long long num, int base, int size, int precision, int type) +{ + /* we are called with base 8, 10 or 16, only, thus don't need "G..." */ + static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */ + + char tmp[66]; + char sign; + char locase; + int need_pfx = ((type & SPECIAL) && base != 10); + int i; + + /* locase = 0 or 0x20. ORing digits or letters with 'locase' + * produces same digits or (maybe lowercased) letters */ + locase = (type & SMALL); + if (type & LEFT) + type &= ~ZEROPAD; + sign = 0; + if (type & SIGN) { + if ((signed long long) num < 0) { + sign = '-'; + num = - (signed long long) num; + size--; + } else if (type & PLUS) { + sign = '+'; + size--; + } else if (type & SPACE) { + sign = ' '; + size--; + } + } + if (need_pfx) { + size--; + if (base == 16) + size--; + } + + /* generate full string in tmp[], in reverse order */ + i = 0; + if (num == 0) + tmp[i++] = '0'; + /* Generic code, for any base: + else do { + tmp[i++] = (digits[do_div(num,base)] | locase); + } while (num != 0); + */ + else if (base != 10) { /* 8 or 16 */ + int mask = base - 1; + int shift = 3; + if (base == 16) shift = 4; + do { + tmp[i++] = (digits[((unsigned char)num) & mask] | locase); + num >>= shift; + } while (num); + } else { /* base 10 */ + i = put_dec(tmp, num) - tmp; + } + + /* printing 100 using %2d gives "100", not "00" */ + if (i > precision) + precision = i; + /* leading space padding */ + size -= precision; + if (!(type & (ZEROPAD+LEFT))) { + while(--size >= 0) { + if (buf < end) + *buf = ' '; + ++buf; + } + } + /* sign */ + if (sign) { + if (buf < end) + *buf = sign; + ++buf; + } + /* "0x" / "0" prefix */ + if (need_pfx) { + if (buf < end) + *buf = '0'; + ++buf; + if (base == 16) { + if (buf < end) + *buf = ('X' | locase); + ++buf; + } + } + /* zero or space padding */ + if (!(type & LEFT)) { + char c = (type & ZEROPAD) ? '0' : ' '; + while (--size >= 0) { + if (buf < end) + *buf = c; + ++buf; + } + } + /* hmm even more zero padding? */ + while (i <= --precision) { + if (buf < end) + *buf = '0'; + ++buf; + } + /* actual digits of result */ + while (--i >= 0) { + if (buf < end) + *buf = tmp[i]; + ++buf; + } + /* trailing space padding */ + while (--size >= 0) { + if (buf < end) + *buf = ' '; + ++buf; + } + return buf; +} + +static char *string(char *buf, char *end, char *s, int field_width, int precision, int flags) +{ + int len, i; + + if ((unsigned long)s < PAGE_SIZE) + s = "<NULL>"; + + len = strnlen(s, precision); + + if (!(flags & LEFT)) { + while (len < field_width--) { + if (buf < end) + *buf = ' '; + ++buf; + } + } + for (i = 0; i < len; ++i) { + if (buf < end) + *buf = *s; + ++buf; ++s; + } + while (len < field_width--) { + if (buf < end) + *buf = ' '; + ++buf; + } + return buf; +} + +static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags) +{ + unsigned long value = (unsigned long) ptr; +#ifdef CONFIG_KALLSYMS + char sym[KSYM_SYMBOL_LEN]; + sprint_symbol(sym, value); + return string(buf, end, sym, field_width, precision, flags); +#else + field_width = 2*sizeof(void *); + flags |= SPECIAL | SMALL | ZEROPAD; + return number(buf, end, value, 16, field_width, precision, flags); +#endif +} + +static char *resource_string(char *buf, char *end, struct resource *res, int field_width, int precision, int flags) +{ +#ifndef IO_RSRC_PRINTK_SIZE +#define IO_RSRC_PRINTK_SIZE 4 +#endif + +#ifndef MEM_RSRC_PRINTK_SIZE +#define MEM_RSRC_PRINTK_SIZE 8 +#endif + + /* room for the actual numbers, the two "0x", -, [, ] and the final zero */ + char sym[4*sizeof(resource_size_t) + 8]; + char *p = sym, *pend = sym + sizeof(sym); + int size = -1; + + if (res->flags & IORESOURCE_IO) + size = IO_RSRC_PRINTK_SIZE; + else if (res->flags & IORESOURCE_MEM) + size = MEM_RSRC_PRINTK_SIZE; + + *p++ = '['; + p = number(p, pend, res->start, 16, size, -1, SPECIAL | SMALL | ZEROPAD); + *p++ = '-'; + p = number(p, pend, res->end, 16, size, -1, SPECIAL | SMALL | ZEROPAD); + *p++ = ']'; + *p = 0; + + return string(buf, end, sym, field_width, precision, flags); +} + +static char *mac_address_string(char *buf, char *end, u8 *addr, int field_width, + int precision, int flags) +{ + char mac_addr[6 * 3]; /* (6 * 2 hex digits), 5 colons and trailing zero */ + char *p = mac_addr; + int i; + + for (i = 0; i < 6; i++) { + p = pack_hex_byte(p, addr[i]); + if (!(flags & SPECIAL) && i != 5) + *p++ = ':'; + } + *p = '\0'; + + return string(buf, end, mac_addr, field_width, precision, flags & ~SPECIAL); +} + +static char *ip6_addr_string(char *buf, char *end, u8 *addr, int field_width, + int precision, int flags) +{ + char ip6_addr[8 * 5]; /* (8 * 4 hex digits), 7 colons and trailing zero */ + char *p = ip6_addr; + int i; + + for (i = 0; i < 8; i++) { + p = pack_hex_byte(p, addr[2 * i]); + p = pack_hex_byte(p, addr[2 * i + 1]); + if (!(flags & SPECIAL) && i != 7) + *p++ = ':'; + } + *p = '\0'; + + return string(buf, end, ip6_addr, field_width, precision, flags & ~SPECIAL); +} + +static char *ip4_addr_string(char *buf, char *end, u8 *addr, int field_width, + int precision, int flags) +{ + char ip4_addr[4 * 4]; /* (4 * 3 decimal digits), 3 dots and trailing zero */ + char temp[3]; /* hold each IP quad in reverse order */ + char *p = ip4_addr; + int i, digits; + + for (i = 0; i < 4; i++) { + digits = put_dec_trunc(temp, addr[i]) - temp; + /* reverse the digits in the quad */ + while (digits--) + *p++ = temp[digits]; + if (i != 3) + *p++ = '.'; + } + *p = '\0'; + + return string(buf, end, ip4_addr, field_width, precision, flags & ~SPECIAL); +} + +/* + * Show a '%p' thing. A kernel extension is that the '%p' is followed + * by an extra set of alphanumeric characters that are extended format + * specifiers. + * + * Right now we handle: + * + * - 'F' For symbolic function descriptor pointers + * - 'S' For symbolic direct pointers + * - 'R' For a struct resource pointer, it prints the range of + * addresses (not the name nor the flags) + * - 'M' For a 6-byte MAC address, it prints the address in the + * usual colon-separated hex notation + * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way (dot-separated + * decimal for v4 and colon separated network-order 16 bit hex for v6) + * - 'i' [46] for 'raw' IPv4/IPv6 addresses, IPv6 omits the colons, IPv4 is + * currently the same + * + * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 + * function pointers are really function descriptors, which contain a + * pointer to the real address. + */ +static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags) +{ + if (!ptr) + return string(buf, end, "(null)", field_width, precision, flags); + + switch (*fmt) { + case 'F': + ptr = dereference_function_descriptor(ptr); + /* Fallthrough */ + case 'S': + return symbol_string(buf, end, ptr, field_width, precision, flags); + case 'R': + return resource_string(buf, end, ptr, field_width, precision, flags); + case 'm': + flags |= SPECIAL; + /* Fallthrough */ + case 'M': + return mac_address_string(buf, end, ptr, field_width, precision, flags); + case 'i': + flags |= SPECIAL; + /* Fallthrough */ + case 'I': + if (fmt[1] == '6') + return ip6_addr_string(buf, end, ptr, field_width, precision, flags); + if (fmt[1] == '4') + return ip4_addr_string(buf, end, ptr, field_width, precision, flags); + flags &= ~SPECIAL; + break; + } + flags |= SMALL; + if (field_width == -1) { + field_width = 2*sizeof(void *); + flags |= ZEROPAD; + } + return number(buf, end, (unsigned long) ptr, 16, field_width, precision, flags); +} + +/** + * vsnprintf - Format a string and place it in a buffer + * @buf: The buffer to place the result into + * @size: The size of the buffer, including the trailing null space + * @fmt: The format string to use + * @args: Arguments for the format string + * + * This function follows C99 vsnprintf, but has some extensions: + * %pS output the name of a text symbol + * %pF output the name of a function pointer + * %pR output the address range in a struct resource + * + * The return value is the number of characters which would + * be generated for the given input, excluding the trailing + * '\0', as per ISO C99. If you want to have the exact + * number of characters written into @buf as return value + * (not including the trailing '\0'), use vscnprintf(). If the + * return is greater than or equal to @size, the resulting + * string is truncated. + * + * Call this function if you are already dealing with a va_list. + * You probably want snprintf() instead. + */ +int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) +{ + unsigned long long num; + int base; + char *str, *end, c; + + int flags; /* flags to number() */ + + int field_width; /* width of output field */ + int precision; /* min. # of digits for integers; max + number of chars for from string */ + int qualifier; /* 'h', 'l', or 'L' for integer fields */ + /* 'z' support added 23/7/1999 S.H. */ + /* 'z' changed to 'Z' --davidm 1/25/99 */ + /* 't' added for ptrdiff_t */ + + /* Reject out-of-range values early. Large positive sizes are + used for unknown buffer sizes. */ + if (unlikely((int) size < 0)) { + /* There can be only one.. */ + static char warn = 1; + WARN_ON(warn); + warn = 0; + return 0; + } + + str = buf; + end = buf + size; + + /* Make sure end is always >= buf */ + if (end < buf) { + end = ((void *)-1); + size = end - buf; + } + + for (; *fmt ; ++fmt) { + if (*fmt != '%') { + if (str < end) + *str = *fmt; + ++str; + continue; + } + + /* process flags */ + flags = 0; + repeat: + ++fmt; /* this also skips first '%' */ + switch (*fmt) { + case '-': flags |= LEFT; goto repeat; + case '+': flags |= PLUS; goto repeat; + case ' ': flags |= SPACE; goto repeat; + case '#': flags |= SPECIAL; goto repeat; + case '0': flags |= ZEROPAD; goto repeat; + } + + /* get field width */ + field_width = -1; + if (isdigit(*fmt)) + field_width = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + field_width = va_arg(args, int); + if (field_width < 0) { + field_width = -field_width; + flags |= LEFT; + } + } + + /* get the precision */ + precision = -1; + if (*fmt == '.') { + ++fmt; + if (isdigit(*fmt)) + precision = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + precision = va_arg(args, int); + } + if (precision < 0) + precision = 0; + } + + /* get the conversion qualifier */ + qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || + *fmt =='Z' || *fmt == 'z' || *fmt == 't') { + qualifier = *fmt; + ++fmt; + if (qualifier == 'l' && *fmt == 'l') { + qualifier = 'L'; + ++fmt; + } + } + + /* default base */ + base = 10; + + switch (*fmt) { + case 'c': + if (!(flags & LEFT)) { + while (--field_width > 0) { + if (str < end) + *str = ' '; + ++str; + } + } + c = (unsigned char) va_arg(args, int); + if (str < end) + *str = c; + ++str; + while (--field_width > 0) { + if (str < end) + *str = ' '; + ++str; + } + continue; + + case 's': + str = string(str, end, va_arg(args, char *), field_width, precision, flags); + continue; + + case 'p': + str = pointer(fmt+1, str, end, + va_arg(args, void *), + field_width, precision, flags); + /* Skip all alphanumeric pointer suffixes */ + while (isalnum(fmt[1])) + fmt++; + continue; + + case 'n': + /* FIXME: + * What does C99 say about the overflow case here? */ + if (qualifier == 'l') { + long * ip = va_arg(args, long *); + *ip = (str - buf); + } else if (qualifier == 'Z' || qualifier == 'z') { + size_t * ip = va_arg(args, size_t *); + *ip = (str - buf); + } else { + int * ip = va_arg(args, int *); + *ip = (str - buf); + } + continue; + + case '%': + if (str < end) + *str = '%'; + ++str; + continue; + + /* integer number formats - set up the flags and "break" */ + case 'o': + base = 8; + break; + + case 'x': + flags |= SMALL; + case 'X': + base = 16; + break; + + case 'd': + case 'i': + flags |= SIGN; + case 'u': + break; + + default: + if (str < end) + *str = '%'; + ++str; + if (*fmt) { + if (str < end) + *str = *fmt; + ++str; + } else { + --fmt; + } + continue; + } + if (qualifier == 'L') + num = va_arg(args, long long); + else if (qualifier == 'l') { + num = va_arg(args, unsigned long); + if (flags & SIGN) + num = (signed long) num; + } else if (qualifier == 'Z' || qualifier == 'z') { + num = va_arg(args, size_t); + } else if (qualifier == 't') { + num = va_arg(args, ptrdiff_t); + } else if (qualifier == 'h') { + num = (unsigned short) va_arg(args, int); + if (flags & SIGN) + num = (signed short) num; + } else { + num = va_arg(args, unsigned int); + if (flags & SIGN) + num = (signed int) num; + } + str = number(str, end, num, base, + field_width, precision, flags); + } + if (size > 0) { + if (str < end) + *str = '\0'; + else + end[-1] = '\0'; + } + /* the trailing null byte doesn't count towards the total */ + return str-buf; +} +EXPORT_SYMBOL(vsnprintf); + +/** + * vscnprintf - Format a string and place it in a buffer + * @buf: The buffer to place the result into + * @size: The size of the buffer, including the trailing null space + * @fmt: The format string to use + * @args: Arguments for the format string + * + * The return value is the number of characters which have been written into + * the @buf not including the trailing '\0'. If @size is <= 0 the function + * returns 0. + * + * Call this function if you are already dealing with a va_list. + * You probably want scnprintf() instead. + * + * See the vsnprintf() documentation for format string extensions over C99. + */ +int vscnprintf(char *buf, size_t size, const char *fmt, va_list args) +{ + int i; + + i=vsnprintf(buf,size,fmt,args); + return (i >= size) ? (size - 1) : i; +} +EXPORT_SYMBOL(vscnprintf); + +/** + * snprintf - Format a string and place it in a buffer + * @buf: The buffer to place the result into + * @size: The size of the buffer, including the trailing null space + * @fmt: The format string to use + * @...: Arguments for the format string + * + * The return value is the number of characters which would be + * generated for the given input, excluding the trailing null, + * as per ISO C99. If the return is greater than or equal to + * @size, the resulting string is truncated. + * + * See the vsnprintf() documentation for format string extensions over C99. + */ +int snprintf(char * buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i=vsnprintf(buf,size,fmt,args); + va_end(args); + return i; +} +EXPORT_SYMBOL(snprintf); + +/** + * scnprintf - Format a string and place it in a buffer + * @buf: The buffer to place the result into + * @size: The size of the buffer, including the trailing null space + * @fmt: The format string to use + * @...: Arguments for the format string + * + * The return value is the number of characters written into @buf not including + * the trailing '\0'. If @size is <= 0 the function returns 0. + */ + +int scnprintf(char * buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = vsnprintf(buf, size, fmt, args); + va_end(args); + return (i >= size) ? (size - 1) : i; +} +EXPORT_SYMBOL(scnprintf); + +/** + * vsprintf - Format a string and place it in a buffer + * @buf: The buffer to place the result into + * @fmt: The format string to use + * @args: Arguments for the format string + * + * The function returns the number of characters written + * into @buf. Use vsnprintf() or vscnprintf() in order to avoid + * buffer overflows. + * + * Call this function if you are already dealing with a va_list. + * You probably want sprintf() instead. + * + * See the vsnprintf() documentation for format string extensions over C99. + */ +int vsprintf(char *buf, const char *fmt, va_list args) +{ + return vsnprintf(buf, INT_MAX, fmt, args); +} +EXPORT_SYMBOL(vsprintf); + +/** + * sprintf - Format a string and place it in a buffer + * @buf: The buffer to place the result into + * @fmt: The format string to use + * @...: Arguments for the format string + * + * The function returns the number of characters written + * into @buf. Use snprintf() or scnprintf() in order to avoid + * buffer overflows. + * + * See the vsnprintf() documentation for format string extensions over C99. + */ +int sprintf(char * buf, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i=vsnprintf(buf, INT_MAX, fmt, args); + va_end(args); + return i; +} +EXPORT_SYMBOL(sprintf); + +/** + * vsscanf - Unformat a buffer into a list of arguments + * @buf: input buffer + * @fmt: format of buffer + * @args: arguments + */ +int vsscanf(const char * buf, const char * fmt, va_list args) +{ + const char *str = buf; + char *next; + char digit; + int num = 0; + int qualifier; + int base; + int field_width; + int is_sign = 0; + + while(*fmt && *str) { + /* skip any white space in format */ + /* white space in format matchs any amount of + * white space, including none, in the input. + */ + if (isspace(*fmt)) { + while (isspace(*fmt)) + ++fmt; + while (isspace(*str)) + ++str; + } + + /* anything that is not a conversion must match exactly */ + if (*fmt != '%' && *fmt) { + if (*fmt++ != *str++) + break; + continue; + } + + if (!*fmt) + break; + ++fmt; + + /* skip this conversion. + * advance both strings to next white space + */ + if (*fmt == '*') { + while (!isspace(*fmt) && *fmt) + fmt++; + while (!isspace(*str) && *str) + str++; + continue; + } + + /* get field width */ + field_width = -1; + if (isdigit(*fmt)) + field_width = skip_atoi(&fmt); + + /* get conversion qualifier */ + qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || + *fmt == 'Z' || *fmt == 'z') { + qualifier = *fmt++; + if (unlikely(qualifier == *fmt)) { + if (qualifier == 'h') { + qualifier = 'H'; + fmt++; + } else if (qualifier == 'l') { + qualifier = 'L'; + fmt++; + } + } + } + base = 10; + is_sign = 0; + + if (!*fmt || !*str) + break; + + switch(*fmt++) { + case 'c': + { + char *s = (char *) va_arg(args,char*); + if (field_width == -1) + field_width = 1; + do { + *s++ = *str++; + } while (--field_width > 0 && *str); + num++; + } + continue; + case 's': + { + char *s = (char *) va_arg(args, char *); + if(field_width == -1) + field_width = INT_MAX; + /* first, skip leading white space in buffer */ + while (isspace(*str)) + str++; + + /* now copy until next white space */ + while (*str && !isspace(*str) && field_width--) { + *s++ = *str++; + } + *s = '\0'; + num++; + } + continue; + case 'n': + /* return number of characters read so far */ + { + int *i = (int *)va_arg(args,int*); + *i = str - buf; + } + continue; + case 'o': + base = 8; + break; + case 'x': + case 'X': + base = 16; + break; + case 'i': + base = 0; + case 'd': + is_sign = 1; + case 'u': + break; + case '%': + /* looking for '%' in str */ + if (*str++ != '%') + return num; + continue; + default: + /* invalid format; stop here */ + return num; + } + + /* have some sort of integer conversion. + * first, skip white space in buffer. + */ + while (isspace(*str)) + str++; + + digit = *str; + if (is_sign && digit == '-') + digit = *(str + 1); + + if (!digit + || (base == 16 && !isxdigit(digit)) + || (base == 10 && !isdigit(digit)) + || (base == 8 && (!isdigit(digit) || digit > '7')) + || (base == 0 && !isdigit(digit))) + break; + + switch(qualifier) { + case 'H': /* that's 'hh' in format */ + if (is_sign) { + signed char *s = (signed char *) va_arg(args,signed char *); + *s = (signed char) simple_strtol(str,&next,base); + } else { + unsigned char *s = (unsigned char *) va_arg(args, unsigned char *); + *s = (unsigned char) simple_strtoul(str, &next, base); + } + break; + case 'h': + if (is_sign) { + short *s = (short *) va_arg(args,short *); + *s = (short) simple_strtol(str,&next,base); + } else { + unsigned short *s = (unsigned short *) va_arg(args, unsigned short *); + *s = (unsigned short) simple_strtoul(str, &next, base); + } + break; + case 'l': + if (is_sign) { + long *l = (long *) va_arg(args,long *); + *l = simple_strtol(str,&next,base); + } else { + unsigned long *l = (unsigned long*) va_arg(args,unsigned long*); + *l = simple_strtoul(str,&next,base); + } + break; + case 'L': + if (is_sign) { + long long *l = (long long*) va_arg(args,long long *); + *l = simple_strtoll(str,&next,base); + } else { + unsigned long long *l = (unsigned long long*) va_arg(args,unsigned long long*); + *l = simple_strtoull(str,&next,base); + } + break; + case 'Z': + case 'z': + { + size_t *s = (size_t*) va_arg(args,size_t*); + *s = (size_t) simple_strtoul(str,&next,base); + } + break; + default: + if (is_sign) { + int *i = (int *) va_arg(args, int*); + *i = (int) simple_strtol(str,&next,base); + } else { + unsigned int *i = (unsigned int*) va_arg(args, unsigned int*); + *i = (unsigned int) simple_strtoul(str,&next,base); + } + break; + } + num++; + + if (!next) + break; + str = next; + } + + /* + * Now we've come all the way through so either the input string or the + * format ended. In the former case, there can be a %n at the current + * position in the format that needs to be filled. + */ + if (*fmt == '%' && *(fmt + 1) == 'n') { + int *p = (int *)va_arg(args, int *); + *p = str - buf; + } + + return num; +} +EXPORT_SYMBOL(vsscanf); + +/** + * sscanf - Unformat a buffer into a list of arguments + * @buf: input buffer + * @fmt: formatting of buffer + * @...: resulting arguments + */ +int sscanf(const char * buf, const char * fmt, ...) +{ + va_list args; + int i; + + va_start(args,fmt); + i = vsscanf(buf,fmt,args); + va_end(args); + return i; +} +EXPORT_SYMBOL(sscanf); |