summaryrefslogtreecommitdiff
path: root/linux/src/arch/i386/lib/delay.c
diff options
context:
space:
mode:
Diffstat (limited to 'linux/src/arch/i386/lib/delay.c')
-rw-r--r--linux/src/arch/i386/lib/delay.c45
1 files changed, 45 insertions, 0 deletions
diff --git a/linux/src/arch/i386/lib/delay.c b/linux/src/arch/i386/lib/delay.c
new file mode 100644
index 0000000..b1551b2
--- /dev/null
+++ b/linux/src/arch/i386/lib/delay.c
@@ -0,0 +1,45 @@
+/*
+ * Precise Delay Loops for i386
+ *
+ * Copyright (C) 1993 Linus Torvalds
+ * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ *
+ * The __delay function must _NOT_ be inlined as its execution time
+ * depends wildly on alignment on many x86 processors. The additional
+ * jump magic is needed to get the timing stable on all the CPU's
+ * we have to worry about.
+ */
+
+#include <linux/sched.h>
+#include <linux/delay.h>
+
+#ifdef __SMP__
+#include <asm/smp.h>
+#endif
+
+void __delay(unsigned long loops)
+{
+ int d0;
+ __asm__ __volatile__(
+ "\tjmp 1f\n"
+ ".align 16\n"
+ "1:\tjmp 2f\n"
+ ".align 16\n"
+ "2:\tdecl %0\n\tjns 2b"
+ :"=&amp;a" (d0)
+ :"0" (loops));
+}
+
+inline void __const_udelay(unsigned long xloops)
+{
+ int d0;
+ __asm__("mull %0"
+ :"=d" (xloops), "=&amp;a" (d0)
+ :"1" (xloops),"0" (loops_per_sec));
+ __delay(xloops);
+}
+
+void __udelay(unsigned long usecs)
+{
+ __const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */
+}