On smp, cpumasks become quite useful. Add a simple implementation, along with implementations of bitops it needs. Signed-off-by: Andrew Jones <drjones@xxxxxxxxxx> --- config/config-arm-common.mak | 1 + lib/arm/asm/bitops.h | 53 +++++++++++++++++++ lib/arm/asm/cpumask.h | 118 +++++++++++++++++++++++++++++++++++++++++++ lib/arm/bitops.c | 81 +++++++++++++++++++++++++++++ lib/arm64/asm/bitops.h | 51 +++++++++++++++++++ lib/arm64/asm/cpumask.h | 1 + 6 files changed, 305 insertions(+) create mode 100644 lib/arm/asm/bitops.h create mode 100644 lib/arm/asm/cpumask.h create mode 100644 lib/arm/bitops.c create mode 100644 lib/arm64/asm/bitops.h create mode 100644 lib/arm64/asm/cpumask.h diff --git a/config/config-arm-common.mak b/config/config-arm-common.mak index b01e9ab836b2d..94eac8967e234 100644 --- a/config/config-arm-common.mak +++ b/config/config-arm-common.mak @@ -34,6 +34,7 @@ cflatobjs += lib/chr-testdev.o cflatobjs += lib/arm/io.o cflatobjs += lib/arm/setup.o cflatobjs += lib/arm/mmu.o +cflatobjs += lib/arm/bitops.o libeabi = lib/arm/libeabi.a eabiobjs = lib/arm/eabi_compat.o diff --git a/lib/arm/asm/bitops.h b/lib/arm/asm/bitops.h new file mode 100644 index 0000000000000..8049634be0485 --- /dev/null +++ b/lib/arm/asm/bitops.h @@ -0,0 +1,53 @@ +#ifndef _ASMARM_BITOPS_H_ +#define _ASMARM_BITOPS_H_ +/* + * Adapated from + * include/linux/bitops.h + * arch/arm/lib/bitops.h + * + * Copyright (C) 2015, Red Hat Inc, Andrew Jones <drjones@xxxxxxxxxx> + * + * This work is licensed under the terms of the GNU LGPL, version 2. + */ + +#define BITS_PER_LONG 32 +#define BIT(nr) (1UL << (nr)) +#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) + +#define ATOMIC_BITOP(insn, mask, word) \ +({ \ + unsigned long tmp1, tmp2; \ + asm volatile( \ + "1: ldrex %0, [%2]\n" \ + insn" %0, %0, %3\n" \ + " strex %1, %0, [%2]\n" \ + " cmp %1, #0\n" \ + " bne 1b\n" \ + : "=&r" (tmp1), "=&r" (tmp2) \ + : "r" (word), "r" (mask) \ + : "cc"); \ +}) + +#define ATOMIC_TESTOP(insn, mask, word, old) \ +({ \ + unsigned long tmp1, tmp2; \ + asm volatile( \ + "1: ldrex %0, [%3]\n" \ + " and %1, %0, %4\n" \ + insn" %0, %0, %4\n" \ + " strex %2, %0, [%3]\n" \ + " cmp %2, #0\n" \ + " bne 1b\n" \ + : "=&r" (tmp1), "=&r" (old), "=&r" (tmp2) \ + : "r" (word), "r" (mask) \ + : "cc"); \ +}) + +extern void set_bit(int nr, volatile unsigned long *addr); +extern void clear_bit(int nr, volatile unsigned long *addr); +extern int test_bit(int nr, const volatile unsigned long *addr); +extern int test_and_set_bit(int nr, volatile unsigned long *addr); +extern int test_and_clear_bit(int nr, volatile unsigned long *addr); + +#endif /* _ASMARM_BITOPS_H_ */ diff --git a/lib/arm/asm/cpumask.h b/lib/arm/asm/cpumask.h new file mode 100644 index 0000000000000..85b8e4b51a403 --- /dev/null +++ b/lib/arm/asm/cpumask.h @@ -0,0 +1,118 @@ +#ifndef _ASMARM_CPUMASK_H_ +#define _ASMARM_CPUMASK_H_ +/* + * Simple cpumask implementation + * + * Copyright (C) 2015, Red Hat Inc, Andrew Jones <drjones@xxxxxxxxxx> + * + * This work is licensed under the terms of the GNU LGPL, version 2. + */ +#include <asm/setup.h> +#include <asm/bitops.h> + +#define CPUMASK_NR_LONGS ((NR_CPUS + BITS_PER_LONG - 1) / BITS_PER_LONG) + +typedef struct cpumask { + unsigned long bits[CPUMASK_NR_LONGS]; +} cpumask_t; + +#define cpumask_bits(maskp) ((maskp)->bits) + +static inline void cpumask_set_cpu(int cpu, cpumask_t *mask) +{ + set_bit(cpu, cpumask_bits(mask)); +} + +static inline void cpumask_clear_cpu(int cpu, cpumask_t *mask) +{ + clear_bit(cpu, cpumask_bits(mask)); +} + +static inline int cpumask_test_cpu(int cpu, const cpumask_t *mask) +{ + return test_bit(cpu, cpumask_bits(mask)); +} + +static inline int cpumask_test_and_set_cpu(int cpu, cpumask_t *mask) +{ + return test_and_set_bit(cpu, cpumask_bits(mask)); +} + +static inline int cpumask_test_and_clear_cpu(int cpu, cpumask_t *mask) +{ + return test_and_clear_bit(cpu, cpumask_bits(mask)); +} + +static inline void cpumask_setall(cpumask_t *mask) +{ + int i; + for (i = 0; i < nr_cpus; i += BITS_PER_LONG) + cpumask_bits(mask)[BIT_WORD(i)] = ~0UL; + i -= BITS_PER_LONG; + if ((nr_cpus - i) < BITS_PER_LONG) + cpumask_bits(mask)[BIT_WORD(i)] = BIT_MASK(nr_cpus - i) - 1; +} + +static inline void cpumask_clear(cpumask_t *mask) +{ + int i; + for (i = 0; i < nr_cpus; i += BITS_PER_LONG) + cpumask_bits(mask)[BIT_WORD(i)] = 0UL; +} + +static inline bool cpumask_empty(const cpumask_t *mask) +{ + int i; + for (i = 0; i < nr_cpus; i += BITS_PER_LONG) { + if (i < NR_CPUS) { /* silence crazy compiler warning */ + if (cpumask_bits(mask)[BIT_WORD(i)] != 0UL) + return false; + } + } + return true; +} + +static inline bool cpumask_full(const cpumask_t *mask) +{ + int i; + for (i = 0; i < nr_cpus; i += BITS_PER_LONG) { + if (cpumask_bits(mask)[BIT_WORD(i)] != ~0UL) { + if ((nr_cpus - i) >= BITS_PER_LONG) + return false; + if (cpumask_bits(mask)[BIT_WORD(i)] + != BIT_MASK(nr_cpus - i) - 1) + return false; + } + } + return true; +} + +static inline int cpumask_weight(const cpumask_t *mask) +{ + int w = 0, i; + + for (i = 0; i < nr_cpus; ++i) + if (cpumask_test_cpu(i, mask)) + ++w; + return w; +} + +static inline void cpumask_copy(cpumask_t *dst, const cpumask_t *src) +{ + memcpy(cpumask_bits(dst), cpumask_bits(src), + CPUMASK_NR_LONGS * sizeof(long)); +} + +static inline int cpumask_next(int cpu, const cpumask_t *mask) +{ + while (cpu < nr_cpus && !cpumask_test_cpu(++cpu, mask)) + ; + return cpu; +} + +#define for_each_cpu(cpu, mask) \ + for ((cpu) = cpumask_next(-1, mask); \ + (cpu) < nr_cpus; \ + (cpu) = cpumask_next(cpu, mask)) + +#endif /* _ASMARM_CPUMASK_H_ */ diff --git a/lib/arm/bitops.c b/lib/arm/bitops.c new file mode 100644 index 0000000000000..9ad112162a29f --- /dev/null +++ b/lib/arm/bitops.c @@ -0,0 +1,81 @@ +/* + * Adapated from + * include/asm-generic/bitops/atomic.h + * + * Copyright (C) 2015, Red Hat Inc, Andrew Jones <drjones@xxxxxxxxxx> + * + * This work is licensed under the terms of the GNU LGPL, version 2. + */ +#include <asm/bitops.h> +#include <asm/barrier.h> +#include <asm/mmu.h> + +void set_bit(int nr, volatile unsigned long *addr) +{ + volatile unsigned long *word = addr + BIT_WORD(nr); + unsigned long mask = BIT_MASK(nr); + + if (mmu_enabled()) + ATOMIC_BITOP("orr", mask, word); + else + *word |= mask; + smp_mb(); +} + +void clear_bit(int nr, volatile unsigned long *addr) +{ + volatile unsigned long *word = addr + BIT_WORD(nr); + unsigned long mask = BIT_MASK(nr); + + if (mmu_enabled()) + ATOMIC_BITOP("bic", mask, word); + else + *word &= ~mask; + smp_mb(); +} + +int test_bit(int nr, const volatile unsigned long *addr) +{ + const volatile unsigned long *word = addr + BIT_WORD(nr); + unsigned long mask = BIT_MASK(nr); + + return (*word & mask) != 0; +} + +int test_and_set_bit(int nr, volatile unsigned long *addr) +{ + volatile unsigned long *word = addr + BIT_WORD(nr); + unsigned long mask = BIT_MASK(nr); + unsigned long old; + + smp_mb(); + + if (mmu_enabled()) { + ATOMIC_TESTOP("orr", mask, word, old); + } else { + old = *word; + *word = old | mask; + } + smp_mb(); + + return (old & mask) != 0; +} + +int test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + volatile unsigned long *word = addr + BIT_WORD(nr); + unsigned long mask = BIT_MASK(nr); + unsigned long old; + + smp_mb(); + + if (mmu_enabled()) { + ATOMIC_TESTOP("bic", mask, word, old); + } else { + old = *word; + *word = old & ~mask; + } + smp_mb(); + + return (old & mask) != 0; +} diff --git a/lib/arm64/asm/bitops.h b/lib/arm64/asm/bitops.h new file mode 100644 index 0000000000000..3371c60bdc4f2 --- /dev/null +++ b/lib/arm64/asm/bitops.h @@ -0,0 +1,51 @@ +#ifndef _ASMARM64_BITOPS_H_ +#define _ASMARM64_BITOPS_H_ +/* + * Adapated from + * include/linux/bitops.h + * arch/arm64/lib/bitops.S + * + * Copyright (C) 2015, Red Hat Inc, Andrew Jones <drjones@xxxxxxxxxx> + * + * This work is licensed under the terms of the GNU LGPL, version 2. + */ + +#define BITS_PER_LONG 64 +#define BIT(nr) (1UL << (nr)) +#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) + +#define ATOMIC_BITOP(insn, mask, word) \ +({ \ + unsigned long tmp1, tmp2; \ + asm volatile( \ + "1: ldxr %0, [%2]\n" \ + insn" %0, %0, %3\n" \ + " stxr %w1, %0, [%2]\n" \ + " cbnz %w1, 1b\n" \ + : "=&r" (tmp1), "=&r" (tmp2) \ + : "r" (word), "r" (mask) \ + : "cc"); \ +}) + +#define ATOMIC_TESTOP(insn, mask, word, old) \ +({ \ + unsigned long tmp1, tmp2; \ + asm volatile( \ + "1: ldxr %0, [%3]\n" \ + " and %1, %0, %4\n" \ + insn" %0, %0, %4\n" \ + " stlxr %w2, %0, [%3]\n" \ + " cbnz %w2, 1b\n" \ + : "=&r" (tmp1), "=&r" (old), "=&r" (tmp2) \ + : "r" (word), "r" (mask) \ + : "cc"); \ +}) + +extern void set_bit(int nr, volatile unsigned long *addr); +extern void clear_bit(int nr, volatile unsigned long *addr); +extern int test_bit(int nr, const volatile unsigned long *addr); +extern int test_and_set_bit(int nr, volatile unsigned long *addr); +extern int test_and_clear_bit(int nr, volatile unsigned long *addr); + +#endif /* _ASMARM64_BITOPS_H_ */ diff --git a/lib/arm64/asm/cpumask.h b/lib/arm64/asm/cpumask.h new file mode 100644 index 0000000000000..d1421e7abe310 --- /dev/null +++ b/lib/arm64/asm/cpumask.h @@ -0,0 +1 @@ +#include "../../arm/asm/cpumask.h" -- 1.9.3 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html