[merged] lib-move-mathematic-helpers-to-separate-folder.patch removed from -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled
     Subject: lib: Move mathematic helpers to separate folder
has been removed from the -mm tree.  Its filename was
     lib-move-mathematic-helpers-to-separate-folder.patch

This patch was dropped because it was merged into mainline or a subsystem tree

------------------------------------------------------
From: Andy Shevchenko <andriy.shevchenko@xxxxxxxxxxxxxxx>
Subject: lib: Move mathematic helpers to separate folder

For better maintenance and expansion move the mathematic helpers to
the separate folder.

No functional change intended.

Note, the int_sqrt() is not used as a part of lib, so, moved to regular obj.

Link: http://lkml.kernel.org/r/20190323172531.80025-1-andriy.shevchenko@xxxxxxxxxxxxxxx
Signed-off-by: Andy Shevchenko <andriy.shevchenko@xxxxxxxxxxxxxxx>
Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@xxxxxxxxxx>
Cc: Randy Dunlap <rdunlap@xxxxxxxxxxxxx>
Cc: Thierry Reding <thierry.reding@xxxxxxxxx>
Cc: Lee Jones <lee.jones@xxxxxxxxxx>
Cc: Daniel Thompson <daniel.thompson@xxxxxxxxxx>
Cc: Ray Jui <rjui@xxxxxxxxxxxx>
[mchehab+samsung@xxxxxxxxxx: fix broken doc references for div64.c and gcd.c]
  Link: http://lkml.kernel.org/r/734f49bae5d4052b3c25691dfefad59bea2e5843.1555580999.git.mchehab+samsung@xxxxxxxxxx
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---

 Documentation/core-api/kernel-api.rst |    4 
 lib/Kconfig                           |   14 -
 lib/Makefile                          |   15 -
 lib/cordic.c                          |   92 -------
 lib/div64.c                           |  192 --------------
 lib/gcd.c                             |   84 ------
 lib/int_sqrt.c                        |   70 -----
 lib/lcm.c                             |   25 -
 lib/math/Kconfig                      |   11 
 lib/math/Makefile                     |    5 
 lib/math/cordic.c                     |   92 +++++++
 lib/math/div64.c                      |  192 ++++++++++++++
 lib/math/gcd.c                        |   84 ++++++
 lib/math/int_sqrt.c                   |   70 +++++
 lib/math/lcm.c                        |   25 +
 lib/math/prime_numbers.c              |  315 ++++++++++++++++++++++++
 lib/math/rational.c                   |   65 ++++
 lib/math/reciprocal_div.c             |   69 +++++
 lib/prime_numbers.c                   |  315 ------------------------
 lib/rational.c                        |   65 ----
 lib/reciprocal_div.c                  |   69 -----
 21 files changed, 938 insertions(+), 935 deletions(-)

--- a/Documentation/core-api/kernel-api.rst~lib-move-mathematic-helpers-to-separate-folder
+++ a/Documentation/core-api/kernel-api.rst
@@ -147,10 +147,10 @@ Division Functions
 .. kernel-doc:: include/linux/math64.h
    :internal:
 
-.. kernel-doc:: lib/div64.c
+.. kernel-doc:: lib/math/div64.c
    :functions: div_s64_rem div64_u64_rem div64_u64 div64_s64
 
-.. kernel-doc:: lib/gcd.c
+.. kernel-doc:: lib/math/gcd.c
    :export:
 
 UUID/GUID
--- a/lib/cordic.c
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2011 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-#include <linux/module.h>
-#include <linux/cordic.h>
-
-static const s32 arctan_table[] = {
-	2949120,
-	1740967,
-	919879,
-	466945,
-	234379,
-	117304,
-	58666,
-	29335,
-	14668,
-	7334,
-	3667,
-	1833,
-	917,
-	458,
-	229,
-	115,
-	57,
-	29
-};
-
-/*
- * cordic_calc_iq() - calculates the i/q coordinate for given angle
- *
- * theta: angle in degrees for which i/q coordinate is to be calculated
- * coord: function output parameter holding the i/q coordinate
- */
-struct cordic_iq cordic_calc_iq(s32 theta)
-{
-	struct cordic_iq coord;
-	s32 angle, valtmp;
-	unsigned iter;
-	int signx = 1;
-	int signtheta;
-
-	coord.i = CORDIC_ANGLE_GEN;
-	coord.q = 0;
-	angle = 0;
-
-	theta = CORDIC_FIXED(theta);
-	signtheta = (theta < 0) ? -1 : 1;
-	theta = ((theta + CORDIC_FIXED(180) * signtheta) % CORDIC_FIXED(360)) -
-		CORDIC_FIXED(180) * signtheta;
-
-	if (CORDIC_FLOAT(theta) > 90) {
-		theta -= CORDIC_FIXED(180);
-		signx = -1;
-	} else if (CORDIC_FLOAT(theta) < -90) {
-		theta += CORDIC_FIXED(180);
-		signx = -1;
-	}
-
-	for (iter = 0; iter < CORDIC_NUM_ITER; iter++) {
-		if (theta > angle) {
-			valtmp = coord.i - (coord.q >> iter);
-			coord.q += (coord.i >> iter);
-			angle += arctan_table[iter];
-		} else {
-			valtmp = coord.i + (coord.q >> iter);
-			coord.q -= (coord.i >> iter);
-			angle -= arctan_table[iter];
-		}
-		coord.i = valtmp;
-	}
-
-	coord.i *= signx;
-	coord.q *= signx;
-	return coord;
-}
-EXPORT_SYMBOL(cordic_calc_iq);
-
-MODULE_DESCRIPTION("CORDIC algorithm");
-MODULE_AUTHOR("Broadcom Corporation");
-MODULE_LICENSE("Dual BSD/GPL");
--- a/lib/div64.c
+++ /dev/null
@@ -1,192 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2003 Bernardo Innocenti <bernie@xxxxxxxxxxx>
- *
- * Based on former do_div() implementation from asm-parisc/div64.h:
- *	Copyright (C) 1999 Hewlett-Packard Co
- *	Copyright (C) 1999 David Mosberger-Tang <davidm@xxxxxxxxxx>
- *
- *
- * Generic C version of 64bit/32bit division and modulo, with
- * 64bit result and 32bit remainder.
- *
- * The fast case for (n>>32 == 0) is handled inline by do_div(). 
- *
- * Code generated for this function might be very inefficient
- * for some CPUs. __div64_32() can be overridden by linking arch-specific
- * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S
- * or by defining a preprocessor macro in arch/include/asm/div64.h.
- */
-
-#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/math64.h>
-
-/* Not needed on 64bit architectures */
-#if BITS_PER_LONG == 32
-
-#ifndef __div64_32
-uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
-{
-	uint64_t rem = *n;
-	uint64_t b = base;
-	uint64_t res, d = 1;
-	uint32_t high = rem >> 32;
-
-	/* Reduce the thing a bit first */
-	res = 0;
-	if (high >= base) {
-		high /= base;
-		res = (uint64_t) high << 32;
-		rem -= (uint64_t) (high*base) << 32;
-	}
-
-	while ((int64_t)b > 0 && b < rem) {
-		b = b+b;
-		d = d+d;
-	}
-
-	do {
-		if (rem >= b) {
-			rem -= b;
-			res += d;
-		}
-		b >>= 1;
-		d >>= 1;
-	} while (d);
-
-	*n = res;
-	return rem;
-}
-EXPORT_SYMBOL(__div64_32);
-#endif
-
-/**
- * div_s64_rem - signed 64bit divide with 64bit divisor and remainder
- * @dividend:	64bit dividend
- * @divisor:	64bit divisor
- * @remainder:  64bit remainder
- */
-#ifndef div_s64_rem
-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
-{
-	u64 quotient;
-
-	if (dividend < 0) {
-		quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
-		*remainder = -*remainder;
-		if (divisor > 0)
-			quotient = -quotient;
-	} else {
-		quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
-		if (divisor < 0)
-			quotient = -quotient;
-	}
-	return quotient;
-}
-EXPORT_SYMBOL(div_s64_rem);
-#endif
-
-/**
- * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
- * @dividend:	64bit dividend
- * @divisor:	64bit divisor
- * @remainder:  64bit remainder
- *
- * This implementation is a comparable to algorithm used by div64_u64.
- * But this operation, which includes math for calculating the remainder,
- * is kept distinct to avoid slowing down the div64_u64 operation on 32bit
- * systems.
- */
-#ifndef div64_u64_rem
-u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
-{
-	u32 high = divisor >> 32;
-	u64 quot;
-
-	if (high == 0) {
-		u32 rem32;
-		quot = div_u64_rem(dividend, divisor, &rem32);
-		*remainder = rem32;
-	} else {
-		int n = fls(high);
-		quot = div_u64(dividend >> n, divisor >> n);
-
-		if (quot != 0)
-			quot--;
-
-		*remainder = dividend - quot * divisor;
-		if (*remainder >= divisor) {
-			quot++;
-			*remainder -= divisor;
-		}
-	}
-
-	return quot;
-}
-EXPORT_SYMBOL(div64_u64_rem);
-#endif
-
-/**
- * div64_u64 - unsigned 64bit divide with 64bit divisor
- * @dividend:	64bit dividend
- * @divisor:	64bit divisor
- *
- * This implementation is a modified version of the algorithm proposed
- * by the book 'Hacker's Delight'.  The original source and full proof
- * can be found here and is available for use without restriction.
- *
- * 'http://www.hackersdelight.org/hdcodetxt/divDouble.c.txt'
- */
-#ifndef div64_u64
-u64 div64_u64(u64 dividend, u64 divisor)
-{
-	u32 high = divisor >> 32;
-	u64 quot;
-
-	if (high == 0) {
-		quot = div_u64(dividend, divisor);
-	} else {
-		int n = fls(high);
-		quot = div_u64(dividend >> n, divisor >> n);
-
-		if (quot != 0)
-			quot--;
-		if ((dividend - quot * divisor) >= divisor)
-			quot++;
-	}
-
-	return quot;
-}
-EXPORT_SYMBOL(div64_u64);
-#endif
-
-/**
- * div64_s64 - signed 64bit divide with 64bit divisor
- * @dividend:	64bit dividend
- * @divisor:	64bit divisor
- */
-#ifndef div64_s64
-s64 div64_s64(s64 dividend, s64 divisor)
-{
-	s64 quot, t;
-
-	quot = div64_u64(abs(dividend), abs(divisor));
-	t = (dividend ^ divisor) >> 63;
-
-	return (quot ^ t) - t;
-}
-EXPORT_SYMBOL(div64_s64);
-#endif
-
-#endif /* BITS_PER_LONG == 32 */
-
-/*
- * Iterative div/mod for use when dividend is not expected to be much
- * bigger than divisor.
- */
-u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
-{
-	return __iter_div_u64_rem(dividend, divisor, remainder);
-}
-EXPORT_SYMBOL(iter_div_u64_rem);
--- a/lib/gcd.c
+++ /dev/null
@@ -1,84 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/gcd.h>
-#include <linux/export.h>
-
-/*
- * This implements the binary GCD algorithm. (Often attributed to Stein,
- * but as Knuth has noted, appears in a first-century Chinese math text.)
- *
- * This is faster than the division-based algorithm even on x86, which
- * has decent hardware division.
- */
-
-#if !defined(CONFIG_CPU_NO_EFFICIENT_FFS)
-
-/* If __ffs is available, the even/odd algorithm benchmarks slower. */
-
-/**
- * gcd - calculate and return the greatest common divisor of 2 unsigned longs
- * @a: first value
- * @b: second value
- */
-unsigned long gcd(unsigned long a, unsigned long b)
-{
-	unsigned long r = a | b;
-
-	if (!a || !b)
-		return r;
-
-	b >>= __ffs(b);
-	if (b == 1)
-		return r & -r;
-
-	for (;;) {
-		a >>= __ffs(a);
-		if (a == 1)
-			return r & -r;
-		if (a == b)
-			return a << __ffs(r);
-
-		if (a < b)
-			swap(a, b);
-		a -= b;
-	}
-}
-
-#else
-
-/* If normalization is done by loops, the even/odd algorithm is a win. */
-unsigned long gcd(unsigned long a, unsigned long b)
-{
-	unsigned long r = a | b;
-
-	if (!a || !b)
-		return r;
-
-	/* Isolate lsbit of r */
-	r &= -r;
-
-	while (!(b & r))
-		b >>= 1;
-	if (b == r)
-		return r;
-
-	for (;;) {
-		while (!(a & r))
-			a >>= 1;
-		if (a == r)
-			return r;
-		if (a == b)
-			return a;
-
-		if (a < b)
-			swap(a, b);
-		a -= b;
-		a >>= 1;
-		if (a & r)
-			a += b;
-		a >>= 1;
-	}
-}
-
-#endif
-
-EXPORT_SYMBOL_GPL(gcd);
--- a/lib/int_sqrt.c
+++ /dev/null
@@ -1,70 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2013 Davidlohr Bueso <davidlohr.bueso@xxxxxx>
- *
- *  Based on the shift-and-subtract algorithm for computing integer
- *  square root from Guy L. Steele.
- */
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/bitops.h>
-
-/**
- * int_sqrt - computes the integer square root
- * @x: integer of which to calculate the sqrt
- *
- * Computes: floor(sqrt(x))
- */
-unsigned long int_sqrt(unsigned long x)
-{
-	unsigned long b, m, y = 0;
-
-	if (x <= 1)
-		return x;
-
-	m = 1UL << (__fls(x) & ~1UL);
-	while (m != 0) {
-		b = y + m;
-		y >>= 1;
-
-		if (x >= b) {
-			x -= b;
-			y += m;
-		}
-		m >>= 2;
-	}
-
-	return y;
-}
-EXPORT_SYMBOL(int_sqrt);
-
-#if BITS_PER_LONG < 64
-/**
- * int_sqrt64 - strongly typed int_sqrt function when minimum 64 bit input
- * is expected.
- * @x: 64bit integer of which to calculate the sqrt
- */
-u32 int_sqrt64(u64 x)
-{
-	u64 b, m, y = 0;
-
-	if (x <= ULONG_MAX)
-		return int_sqrt((unsigned long) x);
-
-	m = 1ULL << ((fls64(x) - 1) & ~1ULL);
-	while (m != 0) {
-		b = y + m;
-		y >>= 1;
-
-		if (x >= b) {
-			x -= b;
-			y += m;
-		}
-		m >>= 2;
-	}
-
-	return y;
-}
-EXPORT_SYMBOL(int_sqrt64);
-#endif
--- a/lib/Kconfig~lib-move-mathematic-helpers-to-separate-folder
+++ a/lib/Kconfig
@@ -46,9 +46,6 @@ config HAVE_ARCH_BITREVERSE
 	  This option enables the use of hardware bit-reversal instructions on
 	  architectures which support such operations.
 
-config RATIONAL
-	bool
-
 config GENERIC_STRNCPY_FROM_USER
 	bool
 
@@ -61,6 +58,8 @@ config GENERIC_NET_UTILS
 config GENERIC_FIND_FIRST_BIT
 	bool
 
+source "lib/math/Kconfig"
+
 config NO_GENERIC_PCI_IOPORT_MAP
 	bool
 
@@ -531,12 +530,6 @@ config LRU_CACHE
 config CLZ_TAB
 	bool
 
-config CORDIC
-	tristate "CORDIC algorithm"
-	help
-	  This option provides an implementation of the CORDIC algorithm;
-	  calculations are in fixed point. Module will be called cordic.
-
 config DDR
 	bool "JEDEC DDR data"
 	help
@@ -628,9 +621,6 @@ config SBITMAP
 config PARMAN
 	tristate "parman" if COMPILE_TEST
 
-config PRIME_NUMBERS
-	tristate
-
 config STRING_SELFTEST
 	tristate "Test string functions"
 
--- a/lib/lcm.c
+++ /dev/null
@@ -1,25 +0,0 @@
-#include <linux/compiler.h>
-#include <linux/gcd.h>
-#include <linux/export.h>
-#include <linux/lcm.h>
-
-/* Lowest common multiple */
-unsigned long lcm(unsigned long a, unsigned long b)
-{
-	if (a && b)
-		return (a / gcd(a, b)) * b;
-	else
-		return 0;
-}
-EXPORT_SYMBOL_GPL(lcm);
-
-unsigned long lcm_not_zero(unsigned long a, unsigned long b)
-{
-	unsigned long l = lcm(a, b);
-
-	if (l)
-		return l;
-
-	return (b ? : a);
-}
-EXPORT_SYMBOL_GPL(lcm_not_zero);
--- a/lib/Makefile~lib-move-mathematic-helpers-to-separate-folder
+++ a/lib/Makefile
@@ -30,7 +30,7 @@ endif
 
 lib-y := ctype.o string.o vsprintf.o cmdline.o \
 	 rbtree.o radix-tree.o timerqueue.o xarray.o \
-	 idr.o int_sqrt.o extable.o \
+	 idr.o extable.o \
 	 sha1.o chacha.o irq_regs.o argv_split.o \
 	 flex_proportions.o ratelimit.o show_mem.o \
 	 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
@@ -44,11 +44,11 @@ lib-$(CONFIG_SMP) += cpumask.o
 lib-y	+= kobject.o klist.o
 obj-y	+= lockref.o
 
-obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
+obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \
 	 bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
-	 gcd.o lcm.o list_sort.o uuid.o iov_iter.o clz_ctz.o \
+	 list_sort.o uuid.o iov_iter.o clz_ctz.o \
 	 bsearch.o find_bit.o llist.o memweight.o kfifo.o \
-	 percpu-refcount.o rhashtable.o reciprocal_div.o \
+	 percpu-refcount.o rhashtable.o \
 	 once.o refcount.o usercopy.o errseq.o bucket_locks.o \
 	 generic-radix-tree.o
 obj-$(CONFIG_STRING_SELFTEST) += test_string.o
@@ -102,6 +102,8 @@ endif
 obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o
 CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any)
 
+obj-y += math/
+
 obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
 obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o
 obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
@@ -121,7 +123,6 @@ obj-$(CONFIG_DEBUG_OBJECTS) += debugobje
 
 obj-$(CONFIG_BITREVERSE) += bitrev.o
 obj-$(CONFIG_PACKING)	+= packing.o
-obj-$(CONFIG_RATIONAL)	+= rational.o
 obj-$(CONFIG_CRC_CCITT)	+= crc-ccitt.o
 obj-$(CONFIG_CRC16)	+= crc16.o
 obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o
@@ -195,8 +196,6 @@ obj-$(CONFIG_ATOMIC64_SELFTEST) += atomi
 
 obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
 
-obj-$(CONFIG_CORDIC) += cordic.o
-
 obj-$(CONFIG_DQL) += dynamic_queue_limits.o
 
 obj-$(CONFIG_GLOB) += glob.o
@@ -238,8 +237,6 @@ obj-$(CONFIG_ASN1) += asn1_decoder.o
 
 obj-$(CONFIG_FONT_SUPPORT) += fonts/
 
-obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o
-
 hostprogs-y	:= gen_crc32table
 hostprogs-y	+= gen_crc64table
 clean-files	:= crc32table.h
--- /dev/null
+++ a/lib/math/cordic.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/cordic.h>
+
+static const s32 arctan_table[] = {
+	2949120,
+	1740967,
+	919879,
+	466945,
+	234379,
+	117304,
+	58666,
+	29335,
+	14668,
+	7334,
+	3667,
+	1833,
+	917,
+	458,
+	229,
+	115,
+	57,
+	29
+};
+
+/*
+ * cordic_calc_iq() - calculates the i/q coordinate for given angle
+ *
+ * theta: angle in degrees for which i/q coordinate is to be calculated
+ * coord: function output parameter holding the i/q coordinate
+ */
+struct cordic_iq cordic_calc_iq(s32 theta)
+{
+	struct cordic_iq coord;
+	s32 angle, valtmp;
+	unsigned iter;
+	int signx = 1;
+	int signtheta;
+
+	coord.i = CORDIC_ANGLE_GEN;
+	coord.q = 0;
+	angle = 0;
+
+	theta = CORDIC_FIXED(theta);
+	signtheta = (theta < 0) ? -1 : 1;
+	theta = ((theta + CORDIC_FIXED(180) * signtheta) % CORDIC_FIXED(360)) -
+		CORDIC_FIXED(180) * signtheta;
+
+	if (CORDIC_FLOAT(theta) > 90) {
+		theta -= CORDIC_FIXED(180);
+		signx = -1;
+	} else if (CORDIC_FLOAT(theta) < -90) {
+		theta += CORDIC_FIXED(180);
+		signx = -1;
+	}
+
+	for (iter = 0; iter < CORDIC_NUM_ITER; iter++) {
+		if (theta > angle) {
+			valtmp = coord.i - (coord.q >> iter);
+			coord.q += (coord.i >> iter);
+			angle += arctan_table[iter];
+		} else {
+			valtmp = coord.i + (coord.q >> iter);
+			coord.q -= (coord.i >> iter);
+			angle -= arctan_table[iter];
+		}
+		coord.i = valtmp;
+	}
+
+	coord.i *= signx;
+	coord.q *= signx;
+	return coord;
+}
+EXPORT_SYMBOL(cordic_calc_iq);
+
+MODULE_DESCRIPTION("CORDIC algorithm");
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_LICENSE("Dual BSD/GPL");
--- /dev/null
+++ a/lib/math/div64.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2003 Bernardo Innocenti <bernie@xxxxxxxxxxx>
+ *
+ * Based on former do_div() implementation from asm-parisc/div64.h:
+ *	Copyright (C) 1999 Hewlett-Packard Co
+ *	Copyright (C) 1999 David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *
+ *
+ * Generic C version of 64bit/32bit division and modulo, with
+ * 64bit result and 32bit remainder.
+ *
+ * The fast case for (n>>32 == 0) is handled inline by do_div().
+ *
+ * Code generated for this function might be very inefficient
+ * for some CPUs. __div64_32() can be overridden by linking arch-specific
+ * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S
+ * or by defining a preprocessor macro in arch/include/asm/div64.h.
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+
+/* Not needed on 64bit architectures */
+#if BITS_PER_LONG == 32
+
+#ifndef __div64_32
+uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
+{
+	uint64_t rem = *n;
+	uint64_t b = base;
+	uint64_t res, d = 1;
+	uint32_t high = rem >> 32;
+
+	/* Reduce the thing a bit first */
+	res = 0;
+	if (high >= base) {
+		high /= base;
+		res = (uint64_t) high << 32;
+		rem -= (uint64_t) (high*base) << 32;
+	}
+
+	while ((int64_t)b > 0 && b < rem) {
+		b = b+b;
+		d = d+d;
+	}
+
+	do {
+		if (rem >= b) {
+			rem -= b;
+			res += d;
+		}
+		b >>= 1;
+		d >>= 1;
+	} while (d);
+
+	*n = res;
+	return rem;
+}
+EXPORT_SYMBOL(__div64_32);
+#endif
+
+/**
+ * div_s64_rem - signed 64bit divide with 64bit divisor and remainder
+ * @dividend:	64bit dividend
+ * @divisor:	64bit divisor
+ * @remainder:  64bit remainder
+ */
+#ifndef div_s64_rem
+s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
+{
+	u64 quotient;
+
+	if (dividend < 0) {
+		quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
+		*remainder = -*remainder;
+		if (divisor > 0)
+			quotient = -quotient;
+	} else {
+		quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
+		if (divisor < 0)
+			quotient = -quotient;
+	}
+	return quotient;
+}
+EXPORT_SYMBOL(div_s64_rem);
+#endif
+
+/**
+ * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
+ * @dividend:	64bit dividend
+ * @divisor:	64bit divisor
+ * @remainder:  64bit remainder
+ *
+ * This implementation is a comparable to algorithm used by div64_u64.
+ * But this operation, which includes math for calculating the remainder,
+ * is kept distinct to avoid slowing down the div64_u64 operation on 32bit
+ * systems.
+ */
+#ifndef div64_u64_rem
+u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
+{
+	u32 high = divisor >> 32;
+	u64 quot;
+
+	if (high == 0) {
+		u32 rem32;
+		quot = div_u64_rem(dividend, divisor, &rem32);
+		*remainder = rem32;
+	} else {
+		int n = fls(high);
+		quot = div_u64(dividend >> n, divisor >> n);
+
+		if (quot != 0)
+			quot--;
+
+		*remainder = dividend - quot * divisor;
+		if (*remainder >= divisor) {
+			quot++;
+			*remainder -= divisor;
+		}
+	}
+
+	return quot;
+}
+EXPORT_SYMBOL(div64_u64_rem);
+#endif
+
+/**
+ * div64_u64 - unsigned 64bit divide with 64bit divisor
+ * @dividend:	64bit dividend
+ * @divisor:	64bit divisor
+ *
+ * This implementation is a modified version of the algorithm proposed
+ * by the book 'Hacker's Delight'.  The original source and full proof
+ * can be found here and is available for use without restriction.
+ *
+ * 'http://www.hackersdelight.org/hdcodetxt/divDouble.c.txt'
+ */
+#ifndef div64_u64
+u64 div64_u64(u64 dividend, u64 divisor)
+{
+	u32 high = divisor >> 32;
+	u64 quot;
+
+	if (high == 0) {
+		quot = div_u64(dividend, divisor);
+	} else {
+		int n = fls(high);
+		quot = div_u64(dividend >> n, divisor >> n);
+
+		if (quot != 0)
+			quot--;
+		if ((dividend - quot * divisor) >= divisor)
+			quot++;
+	}
+
+	return quot;
+}
+EXPORT_SYMBOL(div64_u64);
+#endif
+
+/**
+ * div64_s64 - signed 64bit divide with 64bit divisor
+ * @dividend:	64bit dividend
+ * @divisor:	64bit divisor
+ */
+#ifndef div64_s64
+s64 div64_s64(s64 dividend, s64 divisor)
+{
+	s64 quot, t;
+
+	quot = div64_u64(abs(dividend), abs(divisor));
+	t = (dividend ^ divisor) >> 63;
+
+	return (quot ^ t) - t;
+}
+EXPORT_SYMBOL(div64_s64);
+#endif
+
+#endif /* BITS_PER_LONG == 32 */
+
+/*
+ * Iterative div/mod for use when dividend is not expected to be much
+ * bigger than divisor.
+ */
+u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
+{
+	return __iter_div_u64_rem(dividend, divisor, remainder);
+}
+EXPORT_SYMBOL(iter_div_u64_rem);
--- /dev/null
+++ a/lib/math/gcd.c
@@ -0,0 +1,84 @@
+#include <linux/kernel.h>
+#include <linux/gcd.h>
+#include <linux/export.h>
+
+/*
+ * This implements the binary GCD algorithm. (Often attributed to Stein,
+ * but as Knuth has noted, appears in a first-century Chinese math text.)
+ *
+ * This is faster than the division-based algorithm even on x86, which
+ * has decent hardware division.
+ */
+
+#if !defined(CONFIG_CPU_NO_EFFICIENT_FFS)
+
+/* If __ffs is available, the even/odd algorithm benchmarks slower. */
+
+/**
+ * gcd - calculate and return the greatest common divisor of 2 unsigned longs
+ * @a: first value
+ * @b: second value
+ */
+unsigned long gcd(unsigned long a, unsigned long b)
+{
+	unsigned long r = a | b;
+
+	if (!a || !b)
+		return r;
+
+	b >>= __ffs(b);
+	if (b == 1)
+		return r & -r;
+
+	for (;;) {
+		a >>= __ffs(a);
+		if (a == 1)
+			return r & -r;
+		if (a == b)
+			return a << __ffs(r);
+
+		if (a < b)
+			swap(a, b);
+		a -= b;
+	}
+}
+
+#else
+
+/* If normalization is done by loops, the even/odd algorithm is a win. */
+unsigned long gcd(unsigned long a, unsigned long b)
+{
+	unsigned long r = a | b;
+
+	if (!a || !b)
+		return r;
+
+	/* Isolate lsbit of r */
+	r &= -r;
+
+	while (!(b & r))
+		b >>= 1;
+	if (b == r)
+		return r;
+
+	for (;;) {
+		while (!(a & r))
+			a >>= 1;
+		if (a == r)
+			return r;
+		if (a == b)
+			return a;
+
+		if (a < b)
+			swap(a, b);
+		a -= b;
+		a >>= 1;
+		if (a & r)
+			a += b;
+		a >>= 1;
+	}
+}
+
+#endif
+
+EXPORT_SYMBOL_GPL(gcd);
--- /dev/null
+++ a/lib/math/int_sqrt.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013 Davidlohr Bueso <davidlohr.bueso@xxxxxx>
+ *
+ *  Based on the shift-and-subtract algorithm for computing integer
+ *  square root from Guy L. Steele.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/bitops.h>
+
+/**
+ * int_sqrt - computes the integer square root
+ * @x: integer of which to calculate the sqrt
+ *
+ * Computes: floor(sqrt(x))
+ */
+unsigned long int_sqrt(unsigned long x)
+{
+	unsigned long b, m, y = 0;
+
+	if (x <= 1)
+		return x;
+
+	m = 1UL << (__fls(x) & ~1UL);
+	while (m != 0) {
+		b = y + m;
+		y >>= 1;
+
+		if (x >= b) {
+			x -= b;
+			y += m;
+		}
+		m >>= 2;
+	}
+
+	return y;
+}
+EXPORT_SYMBOL(int_sqrt);
+
+#if BITS_PER_LONG < 64
+/**
+ * int_sqrt64 - strongly typed int_sqrt function when minimum 64 bit input
+ * is expected.
+ * @x: 64bit integer of which to calculate the sqrt
+ */
+u32 int_sqrt64(u64 x)
+{
+	u64 b, m, y = 0;
+
+	if (x <= ULONG_MAX)
+		return int_sqrt((unsigned long) x);
+
+	m = 1ULL << ((fls64(x) - 1) & ~1ULL);
+	while (m != 0) {
+		b = y + m;
+		y >>= 1;
+
+		if (x >= b) {
+			x -= b;
+			y += m;
+		}
+		m >>= 2;
+	}
+
+	return y;
+}
+EXPORT_SYMBOL(int_sqrt64);
+#endif
--- /dev/null
+++ a/lib/math/Kconfig
@@ -0,0 +1,11 @@
+config CORDIC
+	tristate "CORDIC algorithm"
+	help
+	  This option provides an implementation of the CORDIC algorithm;
+	  calculations are in fixed point. Module will be called cordic.
+
+config PRIME_NUMBERS
+	tristate
+
+config RATIONAL
+	bool
--- /dev/null
+++ a/lib/math/lcm.c
@@ -0,0 +1,25 @@
+#include <linux/compiler.h>
+#include <linux/gcd.h>
+#include <linux/export.h>
+#include <linux/lcm.h>
+
+/* Lowest common multiple */
+unsigned long lcm(unsigned long a, unsigned long b)
+{
+	if (a && b)
+		return (a / gcd(a, b)) * b;
+	else
+		return 0;
+}
+EXPORT_SYMBOL_GPL(lcm);
+
+unsigned long lcm_not_zero(unsigned long a, unsigned long b)
+{
+	unsigned long l = lcm(a, b);
+
+	if (l)
+		return l;
+
+	return (b ? : a);
+}
+EXPORT_SYMBOL_GPL(lcm_not_zero);
--- /dev/null
+++ a/lib/math/Makefile
@@ -0,0 +1,5 @@
+obj-y += div64.o gcd.o lcm.o int_sqrt.o reciprocal_div.o
+
+obj-$(CONFIG_CORDIC)		+= cordic.o
+obj-$(CONFIG_PRIME_NUMBERS)	+= prime_numbers.o
+obj-$(CONFIG_RATIONAL)		+= rational.o
--- /dev/null
+++ a/lib/math/prime_numbers.c
@@ -0,0 +1,315 @@
+#define pr_fmt(fmt) "prime numbers: " fmt "\n"
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/prime_numbers.h>
+#include <linux/slab.h>
+
+#define bitmap_size(nbits) (BITS_TO_LONGS(nbits) * sizeof(unsigned long))
+
+struct primes {
+	struct rcu_head rcu;
+	unsigned long last, sz;
+	unsigned long primes[];
+};
+
+#if BITS_PER_LONG == 64
+static const struct primes small_primes = {
+	.last = 61,
+	.sz = 64,
+	.primes = {
+		BIT(2) |
+		BIT(3) |
+		BIT(5) |
+		BIT(7) |
+		BIT(11) |
+		BIT(13) |
+		BIT(17) |
+		BIT(19) |
+		BIT(23) |
+		BIT(29) |
+		BIT(31) |
+		BIT(37) |
+		BIT(41) |
+		BIT(43) |
+		BIT(47) |
+		BIT(53) |
+		BIT(59) |
+		BIT(61)
+	}
+};
+#elif BITS_PER_LONG == 32
+static const struct primes small_primes = {
+	.last = 31,
+	.sz = 32,
+	.primes = {
+		BIT(2) |
+		BIT(3) |
+		BIT(5) |
+		BIT(7) |
+		BIT(11) |
+		BIT(13) |
+		BIT(17) |
+		BIT(19) |
+		BIT(23) |
+		BIT(29) |
+		BIT(31)
+	}
+};
+#else
+#error "unhandled BITS_PER_LONG"
+#endif
+
+static DEFINE_MUTEX(lock);
+static const struct primes __rcu *primes = RCU_INITIALIZER(&small_primes);
+
+static unsigned long selftest_max;
+
+static bool slow_is_prime_number(unsigned long x)
+{
+	unsigned long y = int_sqrt(x);
+
+	while (y > 1) {
+		if ((x % y) == 0)
+			break;
+		y--;
+	}
+
+	return y == 1;
+}
+
+static unsigned long slow_next_prime_number(unsigned long x)
+{
+	while (x < ULONG_MAX && !slow_is_prime_number(++x))
+		;
+
+	return x;
+}
+
+static unsigned long clear_multiples(unsigned long x,
+				     unsigned long *p,
+				     unsigned long start,
+				     unsigned long end)
+{
+	unsigned long m;
+
+	m = 2 * x;
+	if (m < start)
+		m = roundup(start, x);
+
+	while (m < end) {
+		__clear_bit(m, p);
+		m += x;
+	}
+
+	return x;
+}
+
+static bool expand_to_next_prime(unsigned long x)
+{
+	const struct primes *p;
+	struct primes *new;
+	unsigned long sz, y;
+
+	/* Betrand's Postulate (or Chebyshev's theorem) states that if n > 3,
+	 * there is always at least one prime p between n and 2n - 2.
+	 * Equivalently, if n > 1, then there is always at least one prime p
+	 * such that n < p < 2n.
+	 *
+	 * http://mathworld.wolfram.com/BertrandsPostulate.html
+	 * https://en.wikipedia.org/wiki/Bertrand's_postulate
+	 */
+	sz = 2 * x;
+	if (sz < x)
+		return false;
+
+	sz = round_up(sz, BITS_PER_LONG);
+	new = kmalloc(sizeof(*new) + bitmap_size(sz),
+		      GFP_KERNEL | __GFP_NOWARN);
+	if (!new)
+		return false;
+
+	mutex_lock(&lock);
+	p = rcu_dereference_protected(primes, lockdep_is_held(&lock));
+	if (x < p->last) {
+		kfree(new);
+		goto unlock;
+	}
+
+	/* Where memory permits, track the primes using the
+	 * Sieve of Eratosthenes. The sieve is to remove all multiples of known
+	 * primes from the set, what remains in the set is therefore prime.
+	 */
+	bitmap_fill(new->primes, sz);
+	bitmap_copy(new->primes, p->primes, p->sz);
+	for (y = 2UL; y < sz; y = find_next_bit(new->primes, sz, y + 1))
+		new->last = clear_multiples(y, new->primes, p->sz, sz);
+	new->sz = sz;
+
+	BUG_ON(new->last <= x);
+
+	rcu_assign_pointer(primes, new);
+	if (p != &small_primes)
+		kfree_rcu((struct primes *)p, rcu);
+
+unlock:
+	mutex_unlock(&lock);
+	return true;
+}
+
+static void free_primes(void)
+{
+	const struct primes *p;
+
+	mutex_lock(&lock);
+	p = rcu_dereference_protected(primes, lockdep_is_held(&lock));
+	if (p != &small_primes) {
+		rcu_assign_pointer(primes, &small_primes);
+		kfree_rcu((struct primes *)p, rcu);
+	}
+	mutex_unlock(&lock);
+}
+
+/**
+ * next_prime_number - return the next prime number
+ * @x: the starting point for searching to test
+ *
+ * A prime number is an integer greater than 1 that is only divisible by
+ * itself and 1.  The set of prime numbers is computed using the Sieve of
+ * Eratoshenes (on finding a prime, all multiples of that prime are removed
+ * from the set) enabling a fast lookup of the next prime number larger than
+ * @x. If the sieve fails (memory limitation), the search falls back to using
+ * slow trial-divison, up to the value of ULONG_MAX (which is reported as the
+ * final prime as a sentinel).
+ *
+ * Returns: the next prime number larger than @x
+ */
+unsigned long next_prime_number(unsigned long x)
+{
+	const struct primes *p;
+
+	rcu_read_lock();
+	p = rcu_dereference(primes);
+	while (x >= p->last) {
+		rcu_read_unlock();
+
+		if (!expand_to_next_prime(x))
+			return slow_next_prime_number(x);
+
+		rcu_read_lock();
+		p = rcu_dereference(primes);
+	}
+	x = find_next_bit(p->primes, p->last, x + 1);
+	rcu_read_unlock();
+
+	return x;
+}
+EXPORT_SYMBOL(next_prime_number);
+
+/**
+ * is_prime_number - test whether the given number is prime
+ * @x: the number to test
+ *
+ * A prime number is an integer greater than 1 that is only divisible by
+ * itself and 1. Internally a cache of prime numbers is kept (to speed up
+ * searching for sequential primes, see next_prime_number()), but if the number
+ * falls outside of that cache, its primality is tested using trial-divison.
+ *
+ * Returns: true if @x is prime, false for composite numbers.
+ */
+bool is_prime_number(unsigned long x)
+{
+	const struct primes *p;
+	bool result;
+
+	rcu_read_lock();
+	p = rcu_dereference(primes);
+	while (x >= p->sz) {
+		rcu_read_unlock();
+
+		if (!expand_to_next_prime(x))
+			return slow_is_prime_number(x);
+
+		rcu_read_lock();
+		p = rcu_dereference(primes);
+	}
+	result = test_bit(x, p->primes);
+	rcu_read_unlock();
+
+	return result;
+}
+EXPORT_SYMBOL(is_prime_number);
+
+static void dump_primes(void)
+{
+	const struct primes *p;
+	char *buf;
+
+	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+
+	rcu_read_lock();
+	p = rcu_dereference(primes);
+
+	if (buf)
+		bitmap_print_to_pagebuf(true, buf, p->primes, p->sz);
+	pr_info("primes.{last=%lu, .sz=%lu, .primes[]=...x%lx} = %s",
+		p->last, p->sz, p->primes[BITS_TO_LONGS(p->sz) - 1], buf);
+
+	rcu_read_unlock();
+
+	kfree(buf);
+}
+
+static int selftest(unsigned long max)
+{
+	unsigned long x, last;
+
+	if (!max)
+		return 0;
+
+	for (last = 0, x = 2; x < max; x++) {
+		bool slow = slow_is_prime_number(x);
+		bool fast = is_prime_number(x);
+
+		if (slow != fast) {
+			pr_err("inconsistent result for is-prime(%lu): slow=%s, fast=%s!",
+			       x, slow ? "yes" : "no", fast ? "yes" : "no");
+			goto err;
+		}
+
+		if (!slow)
+			continue;
+
+		if (next_prime_number(last) != x) {
+			pr_err("incorrect result for next-prime(%lu): expected %lu, got %lu",
+			       last, x, next_prime_number(last));
+			goto err;
+		}
+		last = x;
+	}
+
+	pr_info("selftest(%lu) passed, last prime was %lu", x, last);
+	return 0;
+
+err:
+	dump_primes();
+	return -EINVAL;
+}
+
+static int __init primes_init(void)
+{
+	return selftest(selftest_max);
+}
+
+static void __exit primes_exit(void)
+{
+	free_primes();
+}
+
+module_init(primes_init);
+module_exit(primes_exit);
+
+module_param_named(selftest, selftest_max, ulong, 0400);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
--- /dev/null
+++ a/lib/math/rational.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * rational fractions
+ *
+ * Copyright (C) 2009 emlix GmbH, Oskar Schirmer <oskar@xxxxxxxxx>
+ *
+ * helper functions when coping with rational numbers
+ */
+
+#include <linux/rational.h>
+#include <linux/compiler.h>
+#include <linux/export.h>
+
+/*
+ * calculate best rational approximation for a given fraction
+ * taking into account restricted register size, e.g. to find
+ * appropriate values for a pll with 5 bit denominator and
+ * 8 bit numerator register fields, trying to set up with a
+ * frequency ratio of 3.1415, one would say:
+ *
+ * rational_best_approximation(31415, 10000,
+ *		(1 << 8) - 1, (1 << 5) - 1, &n, &d);
+ *
+ * you may look at given_numerator as a fixed point number,
+ * with the fractional part size described in given_denominator.
+ *
+ * for theoretical background, see:
+ * http://en.wikipedia.org/wiki/Continued_fraction
+ */
+
+void rational_best_approximation(
+	unsigned long given_numerator, unsigned long given_denominator,
+	unsigned long max_numerator, unsigned long max_denominator,
+	unsigned long *best_numerator, unsigned long *best_denominator)
+{
+	unsigned long n, d, n0, d0, n1, d1;
+	n = given_numerator;
+	d = given_denominator;
+	n0 = d1 = 0;
+	n1 = d0 = 1;
+	for (;;) {
+		unsigned long t, a;
+		if ((n1 > max_numerator) || (d1 > max_denominator)) {
+			n1 = n0;
+			d1 = d0;
+			break;
+		}
+		if (d == 0)
+			break;
+		t = d;
+		a = n / d;
+		d = n % d;
+		n = t;
+		t = n0 + a * n1;
+		n0 = n1;
+		n1 = t;
+		t = d0 + a * d1;
+		d0 = d1;
+		d1 = t;
+	}
+	*best_numerator = n1;
+	*best_denominator = d1;
+}
+
+EXPORT_SYMBOL(rational_best_approximation);
--- /dev/null
+++ a/lib/math/reciprocal_div.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <asm/div64.h>
+#include <linux/reciprocal_div.h>
+#include <linux/export.h>
+
+/*
+ * For a description of the algorithm please have a look at
+ * include/linux/reciprocal_div.h
+ */
+
+struct reciprocal_value reciprocal_value(u32 d)
+{
+	struct reciprocal_value R;
+	u64 m;
+	int l;
+
+	l = fls(d - 1);
+	m = ((1ULL << 32) * ((1ULL << l) - d));
+	do_div(m, d);
+	++m;
+	R.m = (u32)m;
+	R.sh1 = min(l, 1);
+	R.sh2 = max(l - 1, 0);
+
+	return R;
+}
+EXPORT_SYMBOL(reciprocal_value);
+
+struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec)
+{
+	struct reciprocal_value_adv R;
+	u32 l, post_shift;
+	u64 mhigh, mlow;
+
+	/* ceil(log2(d)) */
+	l = fls(d - 1);
+	/* NOTE: mlow/mhigh could overflow u64 when l == 32. This case needs to
+	 * be handled before calling "reciprocal_value_adv", please see the
+	 * comment at include/linux/reciprocal_div.h.
+	 */
+	WARN(l == 32,
+	     "ceil(log2(0x%08x)) == 32, %s doesn't support such divisor",
+	     d, __func__);
+	post_shift = l;
+	mlow = 1ULL << (32 + l);
+	do_div(mlow, d);
+	mhigh = (1ULL << (32 + l)) + (1ULL << (32 + l - prec));
+	do_div(mhigh, d);
+
+	for (; post_shift > 0; post_shift--) {
+		u64 lo = mlow >> 1, hi = mhigh >> 1;
+
+		if (lo >= hi)
+			break;
+
+		mlow = lo;
+		mhigh = hi;
+	}
+
+	R.m = (u32)mhigh;
+	R.sh = post_shift;
+	R.exp = l;
+	R.is_wide_m = mhigh > U32_MAX;
+
+	return R;
+}
+EXPORT_SYMBOL(reciprocal_value_adv);
--- a/lib/prime_numbers.c
+++ /dev/null
@@ -1,315 +0,0 @@
-#define pr_fmt(fmt) "prime numbers: " fmt "\n"
-
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/prime_numbers.h>
-#include <linux/slab.h>
-
-#define bitmap_size(nbits) (BITS_TO_LONGS(nbits) * sizeof(unsigned long))
-
-struct primes {
-	struct rcu_head rcu;
-	unsigned long last, sz;
-	unsigned long primes[];
-};
-
-#if BITS_PER_LONG == 64
-static const struct primes small_primes = {
-	.last = 61,
-	.sz = 64,
-	.primes = {
-		BIT(2) |
-		BIT(3) |
-		BIT(5) |
-		BIT(7) |
-		BIT(11) |
-		BIT(13) |
-		BIT(17) |
-		BIT(19) |
-		BIT(23) |
-		BIT(29) |
-		BIT(31) |
-		BIT(37) |
-		BIT(41) |
-		BIT(43) |
-		BIT(47) |
-		BIT(53) |
-		BIT(59) |
-		BIT(61)
-	}
-};
-#elif BITS_PER_LONG == 32
-static const struct primes small_primes = {
-	.last = 31,
-	.sz = 32,
-	.primes = {
-		BIT(2) |
-		BIT(3) |
-		BIT(5) |
-		BIT(7) |
-		BIT(11) |
-		BIT(13) |
-		BIT(17) |
-		BIT(19) |
-		BIT(23) |
-		BIT(29) |
-		BIT(31)
-	}
-};
-#else
-#error "unhandled BITS_PER_LONG"
-#endif
-
-static DEFINE_MUTEX(lock);
-static const struct primes __rcu *primes = RCU_INITIALIZER(&small_primes);
-
-static unsigned long selftest_max;
-
-static bool slow_is_prime_number(unsigned long x)
-{
-	unsigned long y = int_sqrt(x);
-
-	while (y > 1) {
-		if ((x % y) == 0)
-			break;
-		y--;
-	}
-
-	return y == 1;
-}
-
-static unsigned long slow_next_prime_number(unsigned long x)
-{
-	while (x < ULONG_MAX && !slow_is_prime_number(++x))
-		;
-
-	return x;
-}
-
-static unsigned long clear_multiples(unsigned long x,
-				     unsigned long *p,
-				     unsigned long start,
-				     unsigned long end)
-{
-	unsigned long m;
-
-	m = 2 * x;
-	if (m < start)
-		m = roundup(start, x);
-
-	while (m < end) {
-		__clear_bit(m, p);
-		m += x;
-	}
-
-	return x;
-}
-
-static bool expand_to_next_prime(unsigned long x)
-{
-	const struct primes *p;
-	struct primes *new;
-	unsigned long sz, y;
-
-	/* Betrand's Postulate (or Chebyshev's theorem) states that if n > 3,
-	 * there is always at least one prime p between n and 2n - 2.
-	 * Equivalently, if n > 1, then there is always at least one prime p
-	 * such that n < p < 2n.
-	 *
-	 * http://mathworld.wolfram.com/BertrandsPostulate.html
-	 * https://en.wikipedia.org/wiki/Bertrand's_postulate
-	 */
-	sz = 2 * x;
-	if (sz < x)
-		return false;
-
-	sz = round_up(sz, BITS_PER_LONG);
-	new = kmalloc(sizeof(*new) + bitmap_size(sz),
-		      GFP_KERNEL | __GFP_NOWARN);
-	if (!new)
-		return false;
-
-	mutex_lock(&lock);
-	p = rcu_dereference_protected(primes, lockdep_is_held(&lock));
-	if (x < p->last) {
-		kfree(new);
-		goto unlock;
-	}
-
-	/* Where memory permits, track the primes using the
-	 * Sieve of Eratosthenes. The sieve is to remove all multiples of known
-	 * primes from the set, what remains in the set is therefore prime.
-	 */
-	bitmap_fill(new->primes, sz);
-	bitmap_copy(new->primes, p->primes, p->sz);
-	for (y = 2UL; y < sz; y = find_next_bit(new->primes, sz, y + 1))
-		new->last = clear_multiples(y, new->primes, p->sz, sz);
-	new->sz = sz;
-
-	BUG_ON(new->last <= x);
-
-	rcu_assign_pointer(primes, new);
-	if (p != &small_primes)
-		kfree_rcu((struct primes *)p, rcu);
-
-unlock:
-	mutex_unlock(&lock);
-	return true;
-}
-
-static void free_primes(void)
-{
-	const struct primes *p;
-
-	mutex_lock(&lock);
-	p = rcu_dereference_protected(primes, lockdep_is_held(&lock));
-	if (p != &small_primes) {
-		rcu_assign_pointer(primes, &small_primes);
-		kfree_rcu((struct primes *)p, rcu);
-	}
-	mutex_unlock(&lock);
-}
-
-/**
- * next_prime_number - return the next prime number
- * @x: the starting point for searching to test
- *
- * A prime number is an integer greater than 1 that is only divisible by
- * itself and 1.  The set of prime numbers is computed using the Sieve of
- * Eratoshenes (on finding a prime, all multiples of that prime are removed
- * from the set) enabling a fast lookup of the next prime number larger than
- * @x. If the sieve fails (memory limitation), the search falls back to using
- * slow trial-divison, up to the value of ULONG_MAX (which is reported as the
- * final prime as a sentinel).
- *
- * Returns: the next prime number larger than @x
- */
-unsigned long next_prime_number(unsigned long x)
-{
-	const struct primes *p;
-
-	rcu_read_lock();
-	p = rcu_dereference(primes);
-	while (x >= p->last) {
-		rcu_read_unlock();
-
-		if (!expand_to_next_prime(x))
-			return slow_next_prime_number(x);
-
-		rcu_read_lock();
-		p = rcu_dereference(primes);
-	}
-	x = find_next_bit(p->primes, p->last, x + 1);
-	rcu_read_unlock();
-
-	return x;
-}
-EXPORT_SYMBOL(next_prime_number);
-
-/**
- * is_prime_number - test whether the given number is prime
- * @x: the number to test
- *
- * A prime number is an integer greater than 1 that is only divisible by
- * itself and 1. Internally a cache of prime numbers is kept (to speed up
- * searching for sequential primes, see next_prime_number()), but if the number
- * falls outside of that cache, its primality is tested using trial-divison.
- *
- * Returns: true if @x is prime, false for composite numbers.
- */
-bool is_prime_number(unsigned long x)
-{
-	const struct primes *p;
-	bool result;
-
-	rcu_read_lock();
-	p = rcu_dereference(primes);
-	while (x >= p->sz) {
-		rcu_read_unlock();
-
-		if (!expand_to_next_prime(x))
-			return slow_is_prime_number(x);
-
-		rcu_read_lock();
-		p = rcu_dereference(primes);
-	}
-	result = test_bit(x, p->primes);
-	rcu_read_unlock();
-
-	return result;
-}
-EXPORT_SYMBOL(is_prime_number);
-
-static void dump_primes(void)
-{
-	const struct primes *p;
-	char *buf;
-
-	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
-
-	rcu_read_lock();
-	p = rcu_dereference(primes);
-
-	if (buf)
-		bitmap_print_to_pagebuf(true, buf, p->primes, p->sz);
-	pr_info("primes.{last=%lu, .sz=%lu, .primes[]=...x%lx} = %s",
-		p->last, p->sz, p->primes[BITS_TO_LONGS(p->sz) - 1], buf);
-
-	rcu_read_unlock();
-
-	kfree(buf);
-}
-
-static int selftest(unsigned long max)
-{
-	unsigned long x, last;
-
-	if (!max)
-		return 0;
-
-	for (last = 0, x = 2; x < max; x++) {
-		bool slow = slow_is_prime_number(x);
-		bool fast = is_prime_number(x);
-
-		if (slow != fast) {
-			pr_err("inconsistent result for is-prime(%lu): slow=%s, fast=%s!",
-			       x, slow ? "yes" : "no", fast ? "yes" : "no");
-			goto err;
-		}
-
-		if (!slow)
-			continue;
-
-		if (next_prime_number(last) != x) {
-			pr_err("incorrect result for next-prime(%lu): expected %lu, got %lu",
-			       last, x, next_prime_number(last));
-			goto err;
-		}
-		last = x;
-	}
-
-	pr_info("selftest(%lu) passed, last prime was %lu", x, last);
-	return 0;
-
-err:
-	dump_primes();
-	return -EINVAL;
-}
-
-static int __init primes_init(void)
-{
-	return selftest(selftest_max);
-}
-
-static void __exit primes_exit(void)
-{
-	free_primes();
-}
-
-module_init(primes_init);
-module_exit(primes_exit);
-
-module_param_named(selftest, selftest_max, ulong, 0400);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
--- a/lib/rational.c
+++ /dev/null
@@ -1,65 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * rational fractions
- *
- * Copyright (C) 2009 emlix GmbH, Oskar Schirmer <oskar@xxxxxxxxx>
- *
- * helper functions when coping with rational numbers
- */
-
-#include <linux/rational.h>
-#include <linux/compiler.h>
-#include <linux/export.h>
-
-/*
- * calculate best rational approximation for a given fraction
- * taking into account restricted register size, e.g. to find
- * appropriate values for a pll with 5 bit denominator and
- * 8 bit numerator register fields, trying to set up with a
- * frequency ratio of 3.1415, one would say:
- *
- * rational_best_approximation(31415, 10000,
- *		(1 << 8) - 1, (1 << 5) - 1, &n, &d);
- *
- * you may look at given_numerator as a fixed point number,
- * with the fractional part size described in given_denominator.
- *
- * for theoretical background, see:
- * http://en.wikipedia.org/wiki/Continued_fraction
- */
-
-void rational_best_approximation(
-	unsigned long given_numerator, unsigned long given_denominator,
-	unsigned long max_numerator, unsigned long max_denominator,
-	unsigned long *best_numerator, unsigned long *best_denominator)
-{
-	unsigned long n, d, n0, d0, n1, d1;
-	n = given_numerator;
-	d = given_denominator;
-	n0 = d1 = 0;
-	n1 = d0 = 1;
-	for (;;) {
-		unsigned long t, a;
-		if ((n1 > max_numerator) || (d1 > max_denominator)) {
-			n1 = n0;
-			d1 = d0;
-			break;
-		}
-		if (d == 0)
-			break;
-		t = d;
-		a = n / d;
-		d = n % d;
-		n = t;
-		t = n0 + a * n1;
-		n0 = n1;
-		n1 = t;
-		t = d0 + a * d1;
-		d0 = d1;
-		d1 = t;
-	}
-	*best_numerator = n1;
-	*best_denominator = d1;
-}
-
-EXPORT_SYMBOL(rational_best_approximation);
--- a/lib/reciprocal_div.c
+++ /dev/null
@@ -1,69 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/bug.h>
-#include <linux/kernel.h>
-#include <asm/div64.h>
-#include <linux/reciprocal_div.h>
-#include <linux/export.h>
-
-/*
- * For a description of the algorithm please have a look at
- * include/linux/reciprocal_div.h
- */
-
-struct reciprocal_value reciprocal_value(u32 d)
-{
-	struct reciprocal_value R;
-	u64 m;
-	int l;
-
-	l = fls(d - 1);
-	m = ((1ULL << 32) * ((1ULL << l) - d));
-	do_div(m, d);
-	++m;
-	R.m = (u32)m;
-	R.sh1 = min(l, 1);
-	R.sh2 = max(l - 1, 0);
-
-	return R;
-}
-EXPORT_SYMBOL(reciprocal_value);
-
-struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec)
-{
-	struct reciprocal_value_adv R;
-	u32 l, post_shift;
-	u64 mhigh, mlow;
-
-	/* ceil(log2(d)) */
-	l = fls(d - 1);
-	/* NOTE: mlow/mhigh could overflow u64 when l == 32. This case needs to
-	 * be handled before calling "reciprocal_value_adv", please see the
-	 * comment at include/linux/reciprocal_div.h.
-	 */
-	WARN(l == 32,
-	     "ceil(log2(0x%08x)) == 32, %s doesn't support such divisor",
-	     d, __func__);
-	post_shift = l;
-	mlow = 1ULL << (32 + l);
-	do_div(mlow, d);
-	mhigh = (1ULL << (32 + l)) + (1ULL << (32 + l - prec));
-	do_div(mhigh, d);
-
-	for (; post_shift > 0; post_shift--) {
-		u64 lo = mlow >> 1, hi = mhigh >> 1;
-
-		if (lo >= hi)
-			break;
-
-		mlow = lo;
-		mhigh = hi;
-	}
-
-	R.m = (u32)mhigh;
-	R.sh = post_shift;
-	R.exp = l;
-	R.is_wide_m = mhigh > U32_MAX;
-
-	return R;
-}
-EXPORT_SYMBOL(reciprocal_value_adv);
_

Patches currently in -mm which might be from andriy.shevchenko@xxxxxxxxxxxxxxx are





[Index of Archives]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux