summaryrefslogtreecommitdiff
path: root/target/linux/patches/2.6.29/mips-gcc-44.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/patches/2.6.29/mips-gcc-44.patch')
-rw-r--r--target/linux/patches/2.6.29/mips-gcc-44.patch215
1 files changed, 215 insertions, 0 deletions
diff --git a/target/linux/patches/2.6.29/mips-gcc-44.patch b/target/linux/patches/2.6.29/mips-gcc-44.patch
new file mode 100644
index 000000000..ccee68b9b
--- /dev/null
+++ b/target/linux/patches/2.6.29/mips-gcc-44.patch
@@ -0,0 +1,215 @@
+diff -Nur linux-2.6.29.4.orig/arch/mips/include/asm/compiler.h linux-2.6.29.4/arch/mips/include/asm/compiler.h
+--- linux-2.6.29.4.orig/arch/mips/include/asm/compiler.h 2009-05-19 01:52:34.000000000 +0200
++++ linux-2.6.29.4/arch/mips/include/asm/compiler.h 2009-05-24 19:32:14.000000000 +0200
+@@ -16,4 +16,11 @@
+ #define GCC_REG_ACCUM "accum"
+ #endif
+
++#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)
++#define GCC_NO_H_CONSTRAINT
++#ifdef CONFIG_64BIT
++typedef unsigned int uint128_t __attribute__((mode(TI)));
++#endif
++#endif
++
+ #endif /* _ASM_COMPILER_H */
+diff -Nur linux-2.6.29.4.orig/arch/mips/include/asm/delay.h linux-2.6.29.4/arch/mips/include/asm/delay.h
+--- linux-2.6.29.4.orig/arch/mips/include/asm/delay.h 2009-05-19 01:52:34.000000000 +0200
++++ linux-2.6.29.4/arch/mips/include/asm/delay.h 2009-05-24 19:32:14.000000000 +0200
+@@ -62,8 +62,9 @@
+
+ static inline void __udelay(unsigned long usecs, unsigned long lpj)
+ {
++#ifndef GCC_NO_H_CONSTRAINT
+ unsigned long hi, lo;
+-
++#endif
+ /*
+ * The rates of 128 is rounded wrongly by the catchall case
+ * for 64-bit. Excessive precission? Probably ...
+@@ -77,6 +78,17 @@
+ 0x80000000ULL) >> 32);
+ #endif
+
++#ifdef GCC_NO_H_CONSTRAINT
++#ifdef CONFIG_64BIT
++ usecs = ((uint128_t)usecs * lpj) >> 64;
++#else
++#define SZHALF (sizeof(long)*4)
++#define LOWERHALF ((0x1ul<<SZHALF) - 1)
++ usecs = (usecs >> SZHALF) * (lpj >> SZHALF) + ( ((usecs & LOWERHALF) * (lpj >> SZHALF) + (usecs >> SZHALF) * (lpj & LOWERHALF)) >> SZHALF );
++#undef SZHALF
++#undef LOWERHALF
++#endif
++#else
+ if (sizeof(long) == 4)
+ __asm__("multu\t%2, %3"
+ : "=h" (usecs), "=l" (lo)
+@@ -92,7 +104,7 @@
+ : "=r" (usecs), "=h" (hi), "=l" (lo)
+ : "r" (usecs), "r" (lpj)
+ : GCC_REG_ACCUM);
+-
++#endif
+ __delay(usecs);
+ }
+
+diff -Nur linux-2.6.29.4.orig/arch/mips/include/asm/div64.h linux-2.6.29.4/arch/mips/include/asm/div64.h
+--- linux-2.6.29.4.orig/arch/mips/include/asm/div64.h 2009-05-19 01:52:34.000000000 +0200
++++ linux-2.6.29.4/arch/mips/include/asm/div64.h 2009-05-22 13:38:14.000000000 +0200
+@@ -6,105 +6,63 @@
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+-#ifndef _ASM_DIV64_H
+-#define _ASM_DIV64_H
++#ifndef __ASM_DIV64_H
++#define __ASM_DIV64_H
+
+-#include <linux/types.h>
++#include <asm-generic/div64.h>
+
+-#if (_MIPS_SZLONG == 32)
++#if BITS_PER_LONG == 64
+
+-#include <asm/compiler.h>
++#include <linux/types.h>
+
+ /*
+ * No traps on overflows for any of these...
+ */
+
+-#define do_div64_32(res, high, low, base) ({ \
+- unsigned long __quot32, __mod32; \
+- unsigned long __cf, __tmp, __tmp2, __i; \
+- \
+- __asm__(".set push\n\t" \
+- ".set noat\n\t" \
+- ".set noreorder\n\t" \
+- "move %2, $0\n\t" \
+- "move %3, $0\n\t" \
+- "b 1f\n\t" \
+- " li %4, 0x21\n" \
+- "0:\n\t" \
+- "sll $1, %0, 0x1\n\t" \
+- "srl %3, %0, 0x1f\n\t" \
+- "or %0, $1, %5\n\t" \
+- "sll %1, %1, 0x1\n\t" \
+- "sll %2, %2, 0x1\n" \
+- "1:\n\t" \
+- "bnez %3, 2f\n\t" \
+- " sltu %5, %0, %z6\n\t" \
+- "bnez %5, 3f\n" \
+- "2:\n\t" \
+- " addiu %4, %4, -1\n\t" \
+- "subu %0, %0, %z6\n\t" \
+- "addiu %2, %2, 1\n" \
+- "3:\n\t" \
+- "bnez %4, 0b\n\t" \
+- " srl %5, %1, 0x1f\n\t" \
+- ".set pop" \
+- : "=&r" (__mod32), "=&r" (__tmp), \
+- "=&r" (__quot32), "=&r" (__cf), \
+- "=&r" (__i), "=&r" (__tmp2) \
+- : "Jr" (base), "0" (high), "1" (low)); \
+- \
+- (res) = __quot32; \
+- __mod32; })
+-
+-#define do_div(n, base) ({ \
+- unsigned long long __quot; \
+- unsigned long __mod; \
+- unsigned long long __div; \
+- unsigned long __upper, __low, __high, __base; \
+- \
+- __div = (n); \
+- __base = (base); \
+- \
+- __high = __div >> 32; \
+- __low = __div; \
+- __upper = __high; \
+- \
+- if (__high) \
+- __asm__("divu $0, %z2, %z3" \
+- : "=h" (__upper), "=l" (__high) \
+- : "Jr" (__high), "Jr" (__base) \
+- : GCC_REG_ACCUM); \
+- \
+- __mod = do_div64_32(__low, __upper, __low, __base); \
+- \
+- __quot = __high; \
+- __quot = __quot << 32 | __low; \
+- (n) = __quot; \
+- __mod; })
+-
+-#endif /* (_MIPS_SZLONG == 32) */
+-
+-#if (_MIPS_SZLONG == 64)
+-
+-/*
+- * Hey, we're already 64-bit, no
+- * need to play games..
+- */
+-#define do_div(n, base) ({ \
+- unsigned long __quot; \
+- unsigned int __mod; \
+- unsigned long __div; \
+- unsigned int __base; \
+- \
+- __div = (n); \
+- __base = (base); \
+- \
+- __mod = __div % __base; \
+- __quot = __div / __base; \
+- \
+- (n) = __quot; \
+- __mod; })
++#define __div64_32(n, base) \
++({ \
++ unsigned long __cf, __tmp, __tmp2, __i; \
++ unsigned long __quot32, __mod32; \
++ unsigned long __high, __low; \
++ unsigned long long __n; \
++ \
++ __high = *__n >> 32; \
++ __low = __n; \
++ __asm__( \
++ " .set push \n" \
++ " .set noat \n" \
++ " .set noreorder \n" \
++ " move %2, $0 \n" \
++ " move %3, $0 \n" \
++ " b 1f \n" \
++ " li %4, 0x21 \n" \
++ "0: \n" \
++ " sll $1, %0, 0x1 \n" \
++ " srl %3, %0, 0x1f \n" \
++ " or %0, $1, %5 \n" \
++ " sll %1, %1, 0x1 \n" \
++ " sll %2, %2, 0x1 \n" \
++ "1: \n" \
++ " bnez %3, 2f \n" \
++ " sltu %5, %0, %z6 \n" \
++ " bnez %5, 3f \n" \
++ "2: \n" \
++ " addiu %4, %4, -1 \n" \
++ " subu %0, %0, %z6 \n" \
++ " addiu %2, %2, 1 \n" \
++ "3: \n" \
++ " bnez %4, 0b\n\t" \
++ " srl %5, %1, 0x1f\n\t" \
++ " .set pop" \
++ : "=&r" (__mod32), "=&r" (__tmp), \
++ "=&r" (__quot32), "=&r" (__cf), \
++ "=&r" (__i), "=&r" (__tmp2) \
++ : "Jr" (base), "0" (__high), "1" (__low)); \
++ \
++ (__n) = __quot32; \
++ __mod32; \
++})
+
+-#endif /* (_MIPS_SZLONG == 64) */
++#endif /* BITS_PER_LONG == 64 */
+
+-#endif /* _ASM_DIV64_H */
++#endif /* __ASM_DIV64_H */