diff -urN linux-2.4.0/arch/alpha/config.in linux-2.4.0-elsc/arch/alpha/config.in
--- linux-2.4.0/arch/alpha/config.in	Fri Dec 29 17:07:19 2000
+++ linux-2.4.0-elsc/arch/alpha/config.in	Tue May 15 13:31:39 2001
@@ -231,6 +231,7 @@
 bool 'System V IPC' CONFIG_SYSVIPC
 bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
 bool 'Sysctl support' CONFIG_SYSCTL
+bool 'Use ELSC Scheduler' CONFIG_ELSC_SCHED
 if [ "$CONFIG_PROC_FS" = "y" ]; then
    choice 'Kernel core (/proc/kcore) format' \
 	"ELF		CONFIG_KCORE_ELF	\
diff -urN linux-2.4.0/arch/arm/config.in linux-2.4.0-elsc/arch/arm/config.in
--- linux-2.4.0/arch/arm/config.in	Thu Nov 16 15:51:28 2000
+++ linux-2.4.0-elsc/arch/arm/config.in	Tue May 15 13:31:39 2001
@@ -251,6 +251,7 @@
 bool 'System V IPC' CONFIG_SYSVIPC
 bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
 bool 'Sysctl support' CONFIG_SYSCTL
+bool 'Use ELSC Scheduler' CONFIG_ELSC_SCHED
 tristate 'NWFPE math emulation' CONFIG_NWFPE
 choice 'Kernel core (/proc/kcore) format' \
 	"ELF		CONFIG_KCORE_ELF	\
diff -urN linux-2.4.0/arch/i386/config.in linux-2.4.0-elsc/arch/i386/config.in
--- linux-2.4.0/arch/i386/config.in	Fri Dec 29 17:35:47 2000
+++ linux-2.4.0-elsc/arch/i386/config.in	Tue May 15 13:31:39 2001
@@ -226,6 +226,7 @@
 bool 'System V IPC' CONFIG_SYSVIPC
 bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
 bool 'Sysctl support' CONFIG_SYSCTL
+bool 'Use ELSC Scheduler' CONFIG_ELSC_SCHED
 if [ "$CONFIG_PROC_FS" = "y" ]; then
    choice 'Kernel core (/proc/kcore) format' \
 	"ELF		CONFIG_KCORE_ELF	\
diff -urN linux-2.4.0/arch/ia64/config.in linux-2.4.0-elsc/arch/ia64/config.in
--- linux-2.4.0/arch/ia64/config.in	Thu Jan  4 15:50:17 2001
+++ linux-2.4.0-elsc/arch/ia64/config.in	Tue May 15 13:31:39 2001
@@ -93,6 +93,7 @@
 bool 'System V IPC' CONFIG_SYSVIPC
 bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
 bool 'Sysctl support' CONFIG_SYSCTL
+bool 'Use ELSC Scheduler' CONFIG_ELSC_SCHED
 tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF
 tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC
 
diff -urN linux-2.4.0/arch/m68k/config.in linux-2.4.0-elsc/arch/m68k/config.in
--- linux-2.4.0/arch/m68k/config.in	Thu Jan  4 16:00:55 2001
+++ linux-2.4.0-elsc/arch/m68k/config.in	Tue May 15 13:31:39 2001
@@ -91,6 +91,7 @@
 bool 'System V IPC' CONFIG_SYSVIPC
 bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
 bool 'Sysctl support' CONFIG_SYSCTL
+bool 'Use ELSC Scheduler' CONFIG_ELSC_SCHED
 if [ "$CONFIG_PROC_FS" = "y" ]; then
    choice 'Kernel core (/proc/kcore) format' \
 	"ELF		CONFIG_KCORE_ELF	\
diff -urN linux-2.4.0/arch/mips/config.in linux-2.4.0-elsc/arch/mips/config.in
--- linux-2.4.0/arch/mips/config.in	Thu Nov 16 15:51:28 2000
+++ linux-2.4.0-elsc/arch/mips/config.in	Tue May 15 13:31:39 2001
@@ -168,6 +168,7 @@
 bool 'System V IPC' CONFIG_SYSVIPC
 bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
 bool 'Sysctl support' CONFIG_SYSCTL
+bool 'Use ELSC Scheduler' CONFIG_ELSC_SCHED
 
 source drivers/parport/Config.in
 
diff -urN linux-2.4.0/arch/mips64/config.in linux-2.4.0-elsc/arch/mips64/config.in
--- linux-2.4.0/arch/mips64/config.in	Wed Nov 29 00:42:04 2000
+++ linux-2.4.0-elsc/arch/mips64/config.in	Tue May 15 13:31:39 2001
@@ -104,6 +104,7 @@
 bool 'System V IPC' CONFIG_SYSVIPC
 bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
 bool 'Sysctl support' CONFIG_SYSCTL
+bool 'Use ELSC Scheduler' CONFIG_ELSC_SCHED
 tristate 'Kernel support for 64-bit ELF binaries' CONFIG_BINFMT_ELF
 bool 'Kernel support for Linux/MIPS 32-bit binary compatibility' CONFIG_MIPS32_COMPAT
 if [ "$CONFIG_MIPS32_COMPAT" = "y" ]; then
diff -urN linux-2.4.0/arch/parisc/config.in linux-2.4.0-elsc/arch/parisc/config.in
--- linux-2.4.0/arch/parisc/config.in	Tue Dec  5 15:29:39 2000
+++ linux-2.4.0-elsc/arch/parisc/config.in	Tue May 15 13:31:39 2001
@@ -63,6 +63,7 @@
 bool 'System V IPC' CONFIG_SYSVIPC
 bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
 bool 'Sysctl support' CONFIG_SYSCTL
+bool 'Use ELSC Scheduler' CONFIG_ELSC_SCHED
 tristate 'Kernel support for SOM binaries' CONFIG_BINFMT_SOM
 tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF
 tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC
diff -urN linux-2.4.0/arch/ppc/config.in linux-2.4.0-elsc/arch/ppc/config.in
--- linux-2.4.0/arch/ppc/config.in	Thu Nov 16 15:51:28 2000
+++ linux-2.4.0-elsc/arch/ppc/config.in	Tue May 15 13:31:39 2001
@@ -120,6 +120,7 @@
 bool 'Sysctl support' CONFIG_SYSCTL
 bool 'System V IPC' CONFIG_SYSVIPC
 bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
+bool 'Use ELSC Scheduler' CONFIG_ELSC_SCHED
 
 # only elf supported, a.out is not -- Cort
 if [ "$CONFIG_PROC_FS" = "y" ]; then
diff -urN linux-2.4.0/arch/s390/config.in linux-2.4.0-elsc/arch/s390/config.in
--- linux-2.4.0/arch/s390/config.in	Thu Nov 16 15:51:28 2000
+++ linux-2.4.0-elsc/arch/s390/config.in	Tue May 15 13:31:39 2001
@@ -44,6 +44,7 @@
 bool 'System V IPC' CONFIG_SYSVIPC
 bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
 bool 'Sysctl support' CONFIG_SYSCTL
+bool 'Use ELSC Scheduler' CONFIG_ELSC_SCHED
 tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF
 
 endmenu
diff -urN linux-2.4.0/arch/s390/kernel/smp.c linux-2.4.0-elsc/arch/s390/kernel/smp.c
--- linux-2.4.0/arch/s390/kernel/smp.c	Tue Sep  5 16:50:01 2000
+++ linux-2.4.0-elsc/arch/s390/kernel/smp.c	Tue May 15 13:31:39 2001
@@ -712,6 +712,10 @@
                                 p->counter = 0;
                                 p->need_resched = 1;
                         }
+#ifdef CONFIG_ELSC_SCHED
+			if (task_on_runtable(p))
+				rtable_reinsert(p);
+#endif
                         if (p->nice > 0) {
                                 kstat.cpu_nice += user;
                                 kstat.per_cpu_nice[cpu] += user;
diff -urN linux-2.4.0/arch/sh/config.in linux-2.4.0-elsc/arch/sh/config.in
--- linux-2.4.0/arch/sh/config.in	Thu Jan  4 16:19:13 2001
+++ linux-2.4.0-elsc/arch/sh/config.in	Tue May 15 13:31:39 2001
@@ -129,6 +129,7 @@
 bool 'System V IPC' CONFIG_SYSVIPC
 bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
 bool 'Sysctl support' CONFIG_SYSCTL
+bool 'Use ELSC Scheduler' CONFIG_ELSC_SCHED
 if [ "$CONFIG_PROC_FS" = "y" ]; then
    choice 'Kernel core (/proc/kcore) format' \
 	"ELF		CONFIG_KCORE_ELF	\
diff -urN linux-2.4.0/arch/sparc/config.in linux-2.4.0-elsc/arch/sparc/config.in
--- linux-2.4.0/arch/sparc/config.in	Wed Nov 29 00:53:44 2000
+++ linux-2.4.0-elsc/arch/sparc/config.in	Tue May 15 13:31:39 2001
@@ -59,6 +59,7 @@
 bool 'System V IPC' CONFIG_SYSVIPC
 bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
 bool 'Sysctl support' CONFIG_SYSCTL
+bool 'Use ELSC Scheduler' CONFIG_ELSC_SCHED
 if [ "$CONFIG_PROC_FS" = "y" ]; then
    define_bool CONFIG_KCORE_ELF y
 fi
diff -urN linux-2.4.0/arch/sparc64/config.in linux-2.4.0-elsc/arch/sparc64/config.in
--- linux-2.4.0/arch/sparc64/config.in	Thu Nov 16 15:51:28 2000
+++ linux-2.4.0-elsc/arch/sparc64/config.in	Tue May 15 13:31:39 2001
@@ -51,6 +51,7 @@
 bool 'System V IPC' CONFIG_SYSVIPC
 bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
 bool 'Sysctl support' CONFIG_SYSCTL
+bool 'Use ELSC Scheduler' CONFIG_ELSC_SCHED
 if [ "$CONFIG_PROC_FS" = "y" ]; then
    define_bool CONFIG_KCORE_ELF y
 fi
diff -urN linux-2.4.0/drivers/net/slip.c linux-2.4.0-elsc/drivers/net/slip.c
--- linux-2.4.0/drivers/net/slip.c	Sun Dec  3 20:45:22 2000
+++ linux-2.4.0-elsc/drivers/net/slip.c	Tue May 15 13:31:39 2001
@@ -1395,6 +1395,10 @@
 		do {
 			if (busy) {
 				current->counter = 0;
+#ifdef CONFIG_ELSC_SCHED
+				if (task_on_runtable(current))
+					rtable_reinsert(current);
+#endif
 				schedule();
 			}
 
diff -urN linux-2.4.0/include/linux/elsc.h linux-2.4.0-elsc/include/linux/elsc.h
--- linux-2.4.0/include/linux/elsc.h	Wed Dec 31 19:00:00 1969
+++ linux-2.4.0-elsc/include/linux/elsc.h	Tue May 15 13:31:39 2001
@@ -0,0 +1,57 @@
+#ifndef _LINUX_ELSC_H
+#define _LINUX_ELSC_H
+
+#ifdef __KERNEL__
+
+/*
+ * 'include/linux/elsc.h' contains the constants, macros, structures
+ * and definitions needed for using the elsc scheduler.  Most of this
+ * stuff is for maintaining the rtable structure (i.e., the priority-
+ * indexed run_table).
+ */
+
+/* Four 'static goodness' values per bucket */
+#define RTABLE_MAX_INDEX	(((MAX_COUNTER + 40) >> 2) + 1)
+/* Allow for rt tasks too */
+#define RTABLE_BUCKETS		(RTABLE_MAX_INDEX + 10)
+#define RTABLE_TABLE_TOP 	(RTABLE_BUCKETS)
+/* How deep into a bucket do we search for bonuses? */
+#define RTABLE_SEARCH_LIMIT 	((NR_CPUS >> 1) + 3)
+
+/* Macros */
+#define rtable_fetch_head(b)	&(runtable.table[b])
+#define rtable_fetch_first(b)	(runtable.table[b].next)
+#define rtable_fetch_last(b)	(runtable.table[b].prev)
+#define RTABLE_TASK_INDEX(x)	((x->policy & ~SCHED_YIELD) ? \
+	(RTABLE_MAX_INDEX + 1 + (x->rt_priority / 10)) : \
+	(x->counter ? (((20 - x->nice + x->counter) >> 2) + 1) : \
+		(((20 - x->nice + NICE_TO_TICKS(x->nice)) >> 2) + 1)))
+
+/* The Structure */
+struct run_table
+{
+        /* table to index the run queue */
+        struct list_head table[RTABLE_BUCKETS + 1];
+
+        /* current (and future) highest level bucket */
+        int top;
+        int next_top;
+	int iter;
+};
+
+/* Setup Routines */
+extern void rtable_init(void);
+
+/* Testing Routines */
+inline int rtable_nonzero_empty(int bucket);
+inline int rtable_zero_empty(int bucket);
+
+/* RTable Manipulation Routines */
+inline void rtable_insert(struct task_struct *task, const unsigned int bucket);
+inline void rtable_remove(struct task_struct *task);
+inline void rtable_move_front(struct task_struct *task);
+inline void rtable_move_back(struct task_struct *task);
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_ELSC_H */
diff -urN linux-2.4.0/include/linux/sched.h linux-2.4.0-elsc/include/linux/sched.h
--- linux-2.4.0/include/linux/sched.h	Thu Jan  4 17:50:47 2001
+++ linux-2.4.0-elsc/include/linux/sched.h	Tue May 15 16:46:32 2001
@@ -827,6 +827,11 @@
 #define next_thread(p) \
 	list_entry((p)->thread_group.next, struct task_struct, thread_group)
 
+#ifdef CONFIG_ELSC_SCHED
+extern inline int task_on_runtable(struct task_struct *task);
+extern inline void rtable_reinsert(struct task_struct *task);
+extern inline void del_from_runqueue(struct task_struct * p);
+#else
 static inline void del_from_runqueue(struct task_struct * p)
 {
 	nr_running--;
@@ -834,6 +839,7 @@
 	list_del(&p->run_list);
 	p->run_list.next = NULL;
 }
+#endif
 
 static inline int task_on_runqueue(struct task_struct *p)
 {
diff -urN linux-2.4.0/kernel/sched.c linux-2.4.0-elsc/kernel/sched.c
--- linux-2.4.0/kernel/sched.c	Thu Jan  4 16:50:38 2001
+++ linux-2.4.0-elsc/kernel/sched.c	Tue May 15 13:52:30 2001
@@ -66,6 +66,9 @@
 
 #define NICE_TO_TICKS(nice)	(TICK_SCALE(20-(nice))+1)
 
+#ifdef CONFIG_ELSC_SCHED
+#include <linux/elsc.h>
+#endif
 
 /*
  *	Init task must be ok at boot for the ix86 as we will check its signals
@@ -86,7 +89,11 @@
 spinlock_t runqueue_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED;  /* inner */
 rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED;	/* outer */
 
+#ifdef CONFIG_ELSC_SCHED
+static struct run_table runtable;
+#else
 static LIST_HEAD(runqueue_head);
+#endif
 
 /*
  * We align per-CPU scheduling data on cacheline boundaries,
@@ -118,6 +125,8 @@
 
 #endif
 
+#define is_idle_task(p) (!p->pid)
+
 void scheduling_functions_start_here(void) { }
 
 /*
@@ -302,20 +311,42 @@
  */
 static inline void add_to_runqueue(struct task_struct * p)
 {
+#ifdef CONFIG_ELSC_SCHED
+	rtable_insert(p, RTABLE_TASK_INDEX(p));
+#else
 	list_add(&p->run_list, &runqueue_head);
+#endif
 	nr_running++;
 }
 
+#ifdef CONFIG_ELSC_SCHED
+inline void del_from_runqueue(struct task_struct * p)
+{
+	nr_running--;
+	p->sleep_time = jiffies;
+	rtable_remove(p);
+	p->run_list.next = NULL;
+}
+#endif
+
 static inline void move_last_runqueue(struct task_struct * p)
 {
+#ifdef CONFIG_ELSC_SCHED
+	rtable_move_back(p);
+#else
 	list_del(&p->run_list);
 	list_add_tail(&p->run_list, &runqueue_head);
+#endif
 }
 
 static inline void move_first_runqueue(struct task_struct * p)
 {
+#ifdef CONFIG_ELSC_SCHED
+	rtable_move_front(p);
+#else
 	list_del(&p->run_list);
 	list_add(&p->run_list, &runqueue_head);
+#endif
 }
 
 /*
@@ -509,8 +540,13 @@
 {
 	struct schedule_data * sched_data;
 	struct task_struct *prev, *next, *p;
-	struct list_head *tmp;
 	int this_cpu, c;
+#ifdef CONFIG_ELSC_SCHED
+	struct list_head *p_first, *p_iter;
+	int i, weight;
+#else
+	struct list_head *tmp;
+#endif
 
 	if (!current->active_mm) BUG();
 need_resched_back:
@@ -535,6 +571,176 @@
 
 	spin_lock_irq(&runqueue_lock);
 
+#ifdef CONFIG_ELSC_SCHED
+	/* Reinsert the previous task into the runqueue here.
+	 * If the counter has a zero value, don't reset it yet.  Just
+	 * fake what the reset value would be, then insert the task.
+	 * rtable_insert will add it to the right place in the
+	 * bucket. (all done by RTABLE_TASK_INDEX and rtable_insert)
+	 * Don't worry about yielded tasks and such.  All that is
+	 * taken care of later.  Just get it in the table.
+	 */
+
+	if (is_idle_task(prev))
+		goto move_rr_back;
+
+	switch (prev->state)
+	{
+		case TASK_INTERRUPTIBLE:
+			if (signal_pending(prev))
+			{
+				prev->state = TASK_RUNNING;
+				rtable_insert(prev, RTABLE_TASK_INDEX(prev));
+				break;
+			}
+		default:
+			nr_running--;
+			prev->sleep_time = jiffies;
+			prev->run_list.next = NULL;
+			break;
+		case TASK_RUNNING:
+			rtable_insert(prev, RTABLE_TASK_INDEX(prev));
+	}
+	prev->need_resched = 0;
+
+	/* move exhausted RR processes to the end of their buckets */
+	if (prev->policy == SCHED_RR)
+		goto move_rr_last;
+
+move_rr_back:
+	/* If the top pointer is at 0, then there are no valid
+	 * tasks in the runtable (ie there aren't any or they are
+	 * all 0 counters.)  Check to see if we should recalculate.
+	 */
+	if (!runtable.top)
+		goto recalculate;
+
+recalculate_back:
+	/* If the bucket pointed to by top has nothing in it, then
+	 * we won't end up here.  So, grab the top buckets list and
+	 * search through it.  Default to the idle_task.
+	 */
+	runtable.iter = runtable.top;
+	p_iter = p_first = rtable_fetch_first(runtable.iter);
+	next = idle_task(this_cpu);
+	c = -1;
+
+#ifdef CONFIG_SMP
+retry_search_back:
+#endif
+
+	/* This search loop attempts to emulate the goodness loop
+	 * of the old scheduler.  The first check is can_schedule()
+	 * which is wrapped around the goodness calculation in the
+	 * old scheduler.  The other checks all follow in order. By
+	 * not encapsulating this computation inside a function though,
+	 * we can break out early if we need to.  To avoid the worst
+	 * case scenario, only search the first X tasks in the
+	 * bucket (since they're all about the same goodness anyway.)
+	 */
+	for (i = 0; (p_iter->next != p_first) && (i < RTABLE_SEARCH_LIMIT); p_iter = p_iter->next)
+	{
+		p = list_entry(p_iter, struct task_struct, run_list);
+
+#ifdef CONFIG_SMP
+		/* If this process is still running on another CPU,
+		 * then we had best leave it alone.  If there is
+		 * nobody else in this bucket, we'll drop down
+		 * the the next one and everything will be ok
+		 */
+		if (!can_schedule(p, this_cpu) && (p != prev))
+			continue;
+#endif
+
+		/* Don't worry about counter values for real time tasks.
+		 * Just run em.
+		 */
+		if (p->policy & ~SCHED_YIELD)
+		{
+			weight = 100 + p->rt_priority;
+			i++;
+			goto weight_compare;
+		}
+
+		/* Don't schedule tasks with a zero counter.  Finding
+		 * one of these means the rest of the bucket is
+		 * actually "empty" for this runthrough of the
+		 * runtable.
+		 */
+		if (!p->counter)
+			break;
+
+		/* If possible, the just yielded task shouldn't be
+		 * run (except when its the only task).  So send it
+		 * to be checked for the best weight now before it
+		 * gains any affinity bonuses.  Also, make sure that
+		 * other tasks that haven't yielded (but get no
+		 * bonuses) receive a better weight too.  Note that
+		 * _any_ non-yielded, runnable task in this bucket
+		 * will run instead of this task since c is left < 0.
+		 */
+		if ((p->policy & SCHED_YIELD) && (c < 0))
+		{
+			next = p;
+			continue;
+		}
+
+		/* Getting to this point means we will officially
+		 * examine a runnable task, no matter what. So
+		 * note that in i.
+		 */
+		i++;
+		weight = 0;
+
+		/* check for mm match */
+		if (p->mm == prev->active_mm || !p->mm)
+#ifndef CONFIG_SMP
+		/* Since this is the only "bonus" we give on a UP machine,
+		 * we won't find anyone better than this.  Go with him. */
+		{
+			next = p;
+			break;
+		}
+#else
+			weight += 1;
+
+		/* Give a "bonus" for processor affinity.  This is
+		 * essentially a penalty for switching CPU's.
+		 * Hopefully this will keep caches as hot as
+		 * possible.
+		 */
+		if (p->processor == this_cpu)
+			weight += PROC_CHANGE_PENALTY;
+#endif /* CONFIG_SMP */
+
+weight_compare:
+		if (weight > c)
+		{
+			c = weight;
+			next = p;
+		}
+	}
+#ifdef CONFIG_SMP
+	/* Now if we're here and i is still 0, then we didn't look at any
+	 * tasks in that bucket (they were running on another CPU or
+	 * something.)  So lets drop to the next bucket and try again.
+	 * This can only happen in SMP, so don't do this on UP.
+	 */
+	if (!i)
+		goto retry_search;
+#endif
+
+	/* At this point, we should have a runable task in hand.  Take it
+	 * off the runqueue so it won't be considered in most other calls
+	 * to schedule() (its still possible to schedule 1 task on >1 cpu
+	 * though, so keep can_schedule(p) in the above search loop.
+	 */
+	if (!is_idle_task(next))
+		rtable_remove(next);
+
+sched_next:
+#else /* !CONFIG_ELSC_SCHED */
+
 	/* move an exhausted RR process to be last.. */
 	if (prev->policy == SCHED_RR)
 		goto move_rr_last;
@@ -578,6 +784,8 @@
 	/* Do we need to re-calculate counters? */
 	if (!c)
 		goto recalculate;
+#endif /* !CONFIG_ELSC_SCHED */
+
 	/*
 	 * from this point on nothing can prevent us from
 	 * switching to the next task, save this fact in
@@ -655,6 +863,56 @@
 
 	return;
 
+#ifdef CONFIG_ELSC_SCHED
+#ifdef CONFIG_SMP
+	/* Drop down one bucket at a time, looking for the next
+	 * non-"empty" bucket.  If we don't find any, then
+	 * fall through to recalculate to see if we need to do
+	 * just that.
+	 */
+retry_search:
+	do { runtable.iter--; } while ((runtable.iter > 0) && rtable_nonzero_empty(runtable.iter));
+	if (runtable.iter)
+	{
+		p_iter = p_first = rtable_fetch_first(runtable.iter);
+		goto retry_search_back;
+	}
+#endif
+
+recalculate:
+	/* If next_top is not pointing to the 0 bucket, then there are
+	 * indeed runnable tasks in the table.  They just have 0 value
+	 * counters.  So lets fix that.
+	 */
+	if (runtable.next_top)
+	{
+		struct task_struct *p;
+
+		/* Concurrency problems arise if you let go of the rq_lock
+		 * here.  We don't do this often enough, or spend enough
+		 * time in this section to merit letting go.
+		 * You shouldn't hold the tasklist_lock before getting
+		 * the rq_lock anyway.
+		 */
+		read_lock(&tasklist_lock);
+		for_each_task(p)
+			p->counter = (p->counter >> 1) + NICE_TO_TICKS(p->nice);
+		read_unlock(&tasklist_lock);
+
+		/* Now switch over to use the recalculated runtable */
+		runtable.top = runtable.next_top;
+		runtable.next_top = 0;
+		goto recalculate_back;
+	}
+	/* If, however, next_top is zero right now (and bucket 0 is
+	 * completely empty, then there are no runable tasks anywhere
+	 * in the table.  In which case, we'll want to run the idle task.
+	 */
+	next = idle_task(this_cpu);
+	goto sched_next;
+
+#else /* ! CONFIG_ELSC_SCHED */
+
 recalculate:
 	{
 		struct task_struct *p;
@@ -671,6 +929,7 @@
 	c = goodness(prev, this_cpu, prev->active_mm);
 	next = prev;
 	goto still_running_back;
+#endif /* CONFIG_ELSC_SCHED */
 
 handle_softirq:
 	do_softirq();
@@ -1250,6 +1509,10 @@
 	int cpu = smp_processor_id();
 	int nr;
 
+#ifdef CONFIG_ELSC_SCHED
+	rtable_init();
+#endif
+
 	init_task.processor = cpu;
 
 	for(nr = 0; nr < PIDHASH_SZ; nr++)
@@ -1267,3 +1530,211 @@
 	atomic_inc(&init_mm.mm_count);
 	enter_lazy_tlb(&init_mm, current, cpu);
 }
+
+#ifdef CONFIG_ELSC_SCHED
+void rtable_init(void)
+{
+	int i;
+
+	/* reset the top */
+	runtable.top = 0;
+	runtable.next_top = 0;
+
+	/* init the buckets */
+	for (i = 0; i <= RTABLE_BUCKETS; i++)
+		INIT_LIST_HEAD(&(runtable.table[i]));
+}
+
+/* This is _very_ important.  If a task has a zero counter value,
+ * then it must be added to the _end_ of a buckets list.  This is
+ * how the scheduler can tell if a bucket is empty for the
+ * current run-through of the table or not.
+ */
+inline void rtable_insert(struct task_struct *task, unsigned int bucket)
+{
+	if (bucket > RTABLE_TABLE_TOP)
+		bucket = RTABLE_MAX_INDEX;
+	if (task->counter || (task->policy & ~SCHED_YIELD))
+	{
+		list_add(&task->run_list, &(runtable.table[bucket]));
+		if (bucket > runtable.top) runtable.top = bucket;
+	}
+	else
+	{
+		list_add_tail(&task->run_list, &(runtable.table[bucket]));
+		if (bucket > runtable.next_top) runtable.next_top = bucket;
+	}
+}
+
+/* Remove a task from the runtable.  This does not necessarily mean the task
+ * is "off the runqueue" as it could be taken off to avoid examination
+ * by other CPU's while it runs (since it has to be reinserted later
+ * anyway.)  Thus, don't worry about setting run_list.next to NULL.
+ * But _do_ set run_list.prev to NULL so we can use the task_on_runtable()
+ * check.  Otherwise strange designs let you decrement the counter
+ * of a task while it is on the runtable just before getting swapped out.
+ * Not a big deal if counter goes from 5 to 4, but a _huge_ deal if
+ * it goes from 1 to 0.
+ */
+inline void rtable_remove(struct task_struct *task)
+{
+	/* remove from its current position */
+	list_del(&task->run_list);
+	task->run_list.prev = NULL;
+
+	/* this is painful - reason why we keep the table as dense as possible */
+	while ((runtable.top > 0) && rtable_nonzero_empty(runtable.top)) (runtable.top)--;
+	while ((runtable.next_top > 0) && rtable_zero_empty(runtable.next_top)) (runtable.next_top)--;
+}
+
+/* move_last_runqueue is basically intended to make sure the task
+ * you move loses ties in the scheduler proper.  Thus, moving a task
+ * within its own bucket should suffice.  Since tasks are constantly
+ * being inserted and removed from the rtable, the effects of this
+ * function don't last very long.  That and the fact that its more
+ * expensive to move tasks in a bucket mean that you probably shouldn't
+ * try to use this function too much.  I've tried to shortcut the
+ * real-time case somewhat since RR tasks seem to be the only
+ * case that really needs this.
+ */
+inline void rtable_move_back(struct task_struct *task)
+{
+	int bucket = RTABLE_TASK_INDEX(task);
+
+	if (!task_on_runtable(task))
+		return;
+
+	list_del(&task->run_list);
+
+	if ((task->policy & ~SCHED_YIELD) || !task->counter)
+	{
+		list_add_tail(&task->run_list, &runtable.table[bucket]);
+		if (bucket > runtable.next_top) runtable.next_top = bucket;
+	}
+	else
+	{
+		struct list_head *p_iter, *p_last;
+		struct task_struct *t;
+
+		p_last = p_iter = rtable_fetch_last(bucket);
+		while (p_iter->prev != p_last)
+		{
+			t = list_entry(p_iter, struct task_struct, run_list);
+			if (t->counter)
+				break;
+			p_iter = p_iter->prev;
+		}
+		list_add(&task->run_list, p_iter);
+		if (bucket > runtable.top) runtable.top = bucket;
+	}
+
+	while ((runtable.next_top > 0) && rtable_zero_empty(runtable.next_top)) (runtable.next_top)--;
+	while ((runtable.top > 0) && rtable_nonzero_empty(runtable.top)) (runtable.top)--;
+}
+
+/* This is the function that does what is needed for move_last_runqueue.
+ * This can be a relatively expensive call now compared to the non-rtable
+ * version (since we can't blindly move tasks to front or back anymore and
+ * the scheduler expects different counter values in those places.) The
+ * only real use for this is function is with SCHED_RR tasks to keep
+ * things fair.  Notice that that case is the optimal path here.
+ */
+/* move_first_runqueue is basically intended to make sure the task
+ * you move wins ties in the scheduler proper.  Thus, moving a task
+ * within its own bucket should suffice.  Since tasks are constantly
+ * being inserted and removed from the rtable, the effects of this
+ * function don't last very long.  That and the fact that its more
+ * expensive to move tasks in a bucket mean that you probably shouldn't
+ * try to use this function too much.  Just like move_back, I've tried
+ * to shortcut the real-time case.  Currently, this function is only
+ * used when an ordinary task is made into a real-time task.
+ */
+inline void rtable_move_front(struct task_struct *task)
+{
+	int bucket = RTABLE_TASK_INDEX(task);
+
+	if (!task_on_runtable(task))
+		return;
+
+	list_del(&task->run_list);
+
+	if ((task->policy & ~SCHED_YIELD) || task->counter)
+	{
+		list_add(&task->run_list, &runtable.table[bucket]);
+		if (bucket > runtable.top) runtable.top = bucket;
+	}
+	else
+	{
+		struct list_head *p_iter, *p_first;
+		struct task_struct *t;
+
+		p_first = p_iter = rtable_fetch_first(bucket);
+		while (p_iter->next != p_first)
+		{
+			t = list_entry(p_iter, struct task_struct, run_list);
+			if (!t->counter)
+				break;
+			p_iter = p_iter->next;
+		}
+		list_add_tail(&task->run_list, p_iter);
+		if (bucket > runtable.next_top) runtable.next_top = bucket;
+	}
+
+	while ((runtable.top > 0) && rtable_nonzero_empty(runtable.top)) (runtable.top)--;
+	while ((runtable.next_top > 0) && rtable_zero_empty(runtable.next_top)) (runtable.next_top)--;
+}
+
+inline void rtable_reinsert(struct task_struct *task)
+{
+	int bucket = RTABLE_TASK_INDEX(task);
+
+	/* remove from its current position */
+	list_del(&(task->run_list));
+
+	/* put it in its new position */
+	if (task->counter || (task->policy & ~SCHED_YIELD))
+	{
+		list_add(&task->run_list, &(runtable.table[bucket]));
+		if (bucket > runtable.top) runtable.top = bucket;
+	}
+	else
+	{
+		list_add_tail(&task->run_list, &(runtable.table[bucket]));
+		if (bucket > runtable.next_top) runtable.next_top = bucket;
+	}
+
+	/* perhaps we just moved the highest task down a bucket */
+	while ((runtable.top > 0) && rtable_nonzero_empty(runtable.top)) (runtable.top)--;
+	while ((runtable.next_top > 0) && rtable_zero_empty(runtable.next_top)) (runtable.next_top)--;	
+}
+
+inline int task_on_runtable(struct task_struct *task)
+{
+	return (task->run_list.prev != NULL);
+}
+
+/* Check to see if a bucket has no >0 counter tasks in it. */
+inline int rtable_nonzero_empty(int bucket)
+{
+	struct task_struct *p;
+
+	if (list_empty(&(runtable.table[bucket])))
+		return 1;
+
+	p = list_entry(runtable.table[bucket].next, struct task_struct, run_list);
+	return (!p->counter);
+}
+
+/* Check to see if a bucket has no 0 counter tasks in it. */
+inline int rtable_zero_empty(int bucket)
+{
+	struct task_struct *p;
+
+	if (list_empty(&(runtable.table[bucket])))
+		return 1;
+
+	p = list_entry(runtable.table[bucket].prev, struct task_struct, run_list);
+	return (p->counter);
+}
+
+#endif /* CONFIG_ELSC_SCHED */
diff -urN linux-2.4.0/kernel/timer.c linux-2.4.0-elsc/kernel/timer.c
--- linux-2.4.0/kernel/timer.c	Sun Dec 10 12:53:19 2000
+++ linux-2.4.0-elsc/kernel/timer.c	Tue May 15 13:31:39 2001
@@ -587,6 +587,10 @@
 			p->counter = 0;
 			p->need_resched = 1;
 		}
+#ifdef CONFIG_ELSC_SCHED
+		if (task_on_runtable(p))
+			rtable_reinsert(p);
+#endif
 		if (p->nice > 0)
 			kstat.per_cpu_nice[cpu] += user_tick;
 		else
diff -urN linux-2.4.0/mm/oom_kill.c linux-2.4.0-elsc/mm/oom_kill.c
--- linux-2.4.0/mm/oom_kill.c	Tue Nov 14 13:56:46 2000
+++ linux-2.4.0-elsc/mm/oom_kill.c	Tue May 15 13:31:39 2001
@@ -163,7 +163,13 @@
 	 * all the memory it needs. That way it should be able to
 	 * exit() and clear out its resources quickly...
 	 */
+#ifdef CONFIG_ELSC_SCHED
+	p->counter = MAX_COUNTER;
+	if (task_on_runtable(p))
+		rtable_reinsert(p);
+#else
 	p->counter = 5 * HZ;
+#endif
 	p->flags |= PF_MEMALLOC;
 
 	/* This process has hardware access, be more careful. */
