28 #ifndef _XENO_NUCLEUS_POD_H
29 #define _XENO_NUCLEUS_POD_H
37 #define XNFATAL 0x00000001
38 #define XNPEXEC 0x00000002
41 #define XNPOD_SPARE0 0x01000000
42 #define XNPOD_SPARE1 0x02000000
43 #define XNPOD_SPARE2 0x04000000
44 #define XNPOD_SPARE3 0x08000000
45 #define XNPOD_SPARE4 0x10000000
46 #define XNPOD_SPARE5 0x20000000
47 #define XNPOD_SPARE6 0x40000000
48 #define XNPOD_SPARE7 0x80000000
50 #define XNPOD_NORMAL_EXIT 0x0
51 #define XNPOD_FATAL_EXIT 0x1
53 #define XNPOD_ALL_CPUS XNARCH_CPU_MASK_ALL
55 #define XNPOD_FATAL_BUFSZ 16384
57 #define nkpod (&nkpod_struct)
74 #ifdef CONFIG_XENO_OPT_VFILE
89 void (*schedhook) (xnthread_t *thread, xnflags_t mask);
95 DECLARE_EXTERN_XNLOCK(nklock);
97 extern u_long nklatency;
99 extern u_long nktimerlat;
101 extern xnarch_cpumask_t nkaffinity;
105 #ifdef CONFIG_XENO_OPT_VFILE
106 int xnpod_init_proc(
void);
107 void xnpod_cleanup_proc(
void);
109 static inline int xnpod_init_proc(
void) {
return 0; }
110 static inline void xnpod_cleanup_proc(
void) {}
113 static inline int xnpod_mount(
void)
115 xnsched_register_classes();
116 return xnpod_init_proc();
119 static inline void xnpod_umount(
void)
121 xnpod_cleanup_proc();
128 int __xnpod_set_thread_schedparam(
struct xnthread *thread,
129 struct xnsched_class *sched_class,
130 const union xnsched_policy_param *sched_param,
133 #ifdef CONFIG_XENO_HW_FPU
141 #define xnpod_sched_slot(cpu) \
144 #define xnpod_current_sched() \
145 xnpod_sched_slot(xnarch_current_cpu())
147 #define xnpod_active_p() \
148 testbits(nkpod->status, XNPEXEC)
150 #define xnpod_fatal_p() \
151 testbits(nkpod->status, XNFATAL)
153 #define xnpod_interrupt_p() \
154 testbits(xnpod_current_sched()->lflags, XNINIRQ)
156 #define xnpod_callout_p() \
157 testbits(xnpod_current_sched()->status, XNKCOUT)
159 #define xnpod_asynch_p() \
161 xnsched_t *sched = xnpod_current_sched(); \
162 testbits(sched->status | sched->lflags, XNKCOUT|XNINIRQ); \
165 #define xnpod_current_thread() \
166 (xnpod_current_sched()->curr)
168 #define xnpod_current_root() \
169 (&xnpod_current_sched()->rootcb)
171 #ifdef CONFIG_XENO_OPT_PERVASIVE
172 #define xnpod_current_p(thread) \
173 ({ int __shadow_p = xnthread_test_state(thread, XNSHADOW); \
174 int __curr_p = __shadow_p ? xnshadow_thread(current) == thread \
175 : thread == xnpod_current_thread(); \
178 #define xnpod_current_p(thread) \
179 (xnpod_current_thread() == (thread))
182 #define xnpod_locked_p() \
183 xnthread_test_state(xnpod_current_thread(), XNLOCK)
185 #define xnpod_unblockable_p() \
186 (xnpod_asynch_p() || xnthread_test_state(xnpod_current_thread(), XNROOT))
188 #define xnpod_root_p() \
189 xnthread_test_state(xnpod_current_thread(),XNROOT)
191 #define xnpod_shadow_p() \
192 xnthread_test_state(xnpod_current_thread(),XNSHADOW)
194 #define xnpod_userspace_p() \
195 xnthread_test_state(xnpod_current_thread(),XNROOT|XNSHADOW)
197 #define xnpod_primary_p() \
198 (!(xnpod_asynch_p() || xnpod_root_p()))
200 #define xnpod_secondary_p() xnpod_root_p()
202 #define xnpod_idle_p() xnpod_root_p()
213 const struct xnthread_init_attr *attr,
214 struct xnsched_class *sched_class,
215 const union xnsched_policy_param *sched_param);
218 const struct xnthread_start_attr *attr);
235 xntmode_t timeout_mode,
236 struct xnsynch *wchan);
244 struct xnsched_class *sched_class,
245 const union xnsched_policy_param *sched_param);
273 sched = xnpod_current_sched();
279 #if XENO_DEBUG(NUCLEUS)
281 XNKCOUT|XNINIRQ|XNINSW|XNINLOCK))
285 XNKCOUT|XNINIRQ|XNINSW|XNRESCHED|XNINLOCK) != XNRESCHED)
289 __xnpod_schedule(sched);
292 void ___xnpod_lock_sched(
xnsched_t *sched);
294 void ___xnpod_unlock_sched(
xnsched_t *sched);
296 static inline void __xnpod_lock_sched(
void)
301 sched = xnpod_current_sched();
302 ___xnpod_lock_sched(sched);
305 static inline void __xnpod_unlock_sched(
void)
310 sched = xnpod_current_sched();
311 ___xnpod_unlock_sched(sched);
314 static inline void xnpod_lock_sched(
void)
319 xnlock_get_irqsave(&nklock, s);
320 sched = xnpod_current_sched();
321 ___xnpod_lock_sched(sched);
322 xnlock_put_irqrestore(&nklock, s);
325 static inline void xnpod_unlock_sched(
void)
330 xnlock_get_irqsave(&nklock, s);
331 sched = xnpod_current_sched();
332 ___xnpod_unlock_sched(sched);
333 xnlock_put_irqrestore(&nklock, s);
336 void xnpod_fire_callouts(xnqueue_t *hookq,
339 static inline void xnpod_run_hooks(
struct xnqueue *q,
340 struct xnthread *thread,
const char *type)
343 trace_mark(xn_nucleus, thread_callout,
344 "thread %p thread_name %s hook %s",
345 thread, xnthread_name(thread), type);
346 xnpod_fire_callouts(q, thread);
359 static inline xntime_t xnpod_get_cpu_time(
void)
361 return xnarch_get_cpu_time();
368 static inline void xnpod_yield(
void)
374 static inline void xnpod_delay(xnticks_t timeout)
379 static inline void xnpod_suspend_self(
void)
384 static inline void xnpod_delete_self(
void)