4 #include <linux/math64.h>
5 #include <linux/printk.h>
6 #include <linux/rcupdate.h>
7 #include <linux/sched.h>
8 #include <linux/timer.h>
10 __thread struct task_struct *current;
12 void __put_task_struct(struct task_struct *t)
14 pthread_join(t->thread, NULL);
18 /* returns true if process was woken up, false if it was already running */
19 int wake_up_process(struct task_struct *p)
23 pthread_mutex_lock(&p->lock);
24 ret = p->state != TASK_RUNNING;
25 p->state = TASK_RUNNING;
27 pthread_cond_signal(&p->wait);
28 pthread_mutex_unlock(&p->lock);
35 rcu_quiescent_state();
37 pthread_mutex_lock(¤t->lock);
39 while (current->state != TASK_RUNNING)
40 pthread_cond_wait(¤t->wait, ¤t->lock);
42 pthread_mutex_unlock(¤t->lock);
45 static void process_timeout(unsigned long __data)
47 wake_up_process((struct task_struct *)__data);
50 long schedule_timeout(long timeout)
52 struct timer_list timer;
57 case MAX_SCHEDULE_TIMEOUT:
59 * These two special cases are useful to be comfortable
60 * in the caller. Nothing more. We could take
61 * MAX_SCHEDULE_TIMEOUT from one of the negative value
62 * but I' d like to return a valid offset (>=0) to allow
63 * the caller to do everything it want with the retval.
69 * Another bit of PARANOID. Note that the retval will be
70 * 0 since no piece of kernel is supposed to do a check
71 * for a negative retval of schedule_timeout() (since it
72 * should never happens anyway). You just have the printk()
73 * that will tell you if something is gone wrong and where.
76 printk(KERN_ERR "schedule_timeout: wrong timeout "
77 "value %lx\n", timeout);
78 current->state = TASK_RUNNING;
83 expire = timeout + jiffies;
85 setup_timer(&timer, process_timeout, (unsigned long)current);
86 mod_timer(&timer, expire);
88 del_timer_sync(&timer);
90 timeout = expire - jiffies;
92 return timeout < 0 ? 0 : timeout;
95 unsigned long __msecs_to_jiffies(const unsigned int m)
98 * Negative value, means infinite timeout:
101 return MAX_JIFFY_OFFSET;
102 return _msecs_to_jiffies(m);
105 u64 nsecs_to_jiffies64(u64 n)
107 #if (NSEC_PER_SEC % HZ) == 0
108 /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
109 return div_u64(n, NSEC_PER_SEC / HZ);
110 #elif (HZ % 512) == 0
111 /* overflow after 292 years if HZ = 1024 */
112 return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
115 * Generic case - optimized for cases where HZ is a multiple of 3.
116 * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
118 return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
122 unsigned long nsecs_to_jiffies(u64 n)
124 return (unsigned long)nsecs_to_jiffies64(n);
127 unsigned int jiffies_to_msecs(const unsigned long j)
129 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
130 return (MSEC_PER_SEC / HZ) * j;
131 #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
132 return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
134 # if BITS_PER_LONG == 32
135 return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
137 return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
142 unsigned int jiffies_to_usecs(const unsigned long j)
145 * Hz usually doesn't go much further MSEC_PER_SEC.
146 * jiffies_to_usecs() and usecs_to_jiffies() depend on that.
148 BUILD_BUG_ON(HZ > USEC_PER_SEC);
150 #if !(USEC_PER_SEC % HZ)
151 return (USEC_PER_SEC / HZ) * j;
153 # if BITS_PER_LONG == 32
154 return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
156 return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN;
161 __attribute__((constructor(101)))
162 static void sched_init(void)
164 struct task_struct *p = malloc(sizeof(*p));
166 memset(p, 0, sizeof(*p));
168 p->state = TASK_RUNNING;
169 pthread_mutex_init(&p->lock, NULL);
170 pthread_cond_init(&p->wait, NULL);
171 atomic_set(&p->usage, 1);
172 init_completion(&p->exited);
177 rcu_register_thread();