@@ -12,84 +12,76 @@ void longjmp(void *buf, int val) __attribute__((noreturn));
12
12
static void * get_stack_pointer (void )
13
13
{
14
14
void * ptr ;
15
- __asm ("global.get __stack_pointer\n"
16
- "local.set %0"
17
- : "=r" (ptr ));
15
+ __asm__ volatile ("global.get __stack_pointer\n"
16
+ "local.set %0"
17
+ : "=r" (ptr ));
18
18
return ptr ;
19
19
}
20
20
21
21
static void __always_inline set_stack_pointer (void * ptr )
22
22
{
23
- __asm ("local.get %0\n"
24
- "global.set __stack_pointer" ::"r" (ptr ));
23
+ __asm__ volatile ("local.get %0\n"
24
+ "global.set __stack_pointer" ::"r" (ptr ));
25
25
}
26
26
27
- struct task_struct * __switch_to (struct task_struct * from ,
28
- struct task_struct * to )
27
+ static struct task_struct * prev = & init_task ;
28
+
29
+ inline static struct task_struct * __switch_to_inner (struct task_struct * from ,
30
+ struct task_struct * to )
29
31
{
30
32
struct pt_regs * from_regs = task_pt_regs (from );
31
33
struct pt_regs * to_regs = task_pt_regs (to );
32
34
struct thread_info * from_info = task_thread_info (from );
33
35
struct thread_info * to_info = task_thread_info (to );
34
36
35
- // the following call crashes clang:
36
- // from_info->jmpbuf = kmalloc(40, 0);
37
-
38
- // to_info->from_sched = from;
39
- current = to ;
40
- from_regs -> current_stack = get_stack_pointer ();
41
-
42
-
43
37
if (setjmp (from_info -> jmpbuf ) == 0 ) {
44
38
set_stack_pointer (to_regs -> current_stack );
45
-
46
- schedule_tail (from );
47
- pr_info ("switch %u -> %u\n" , from -> pid , to -> pid );
48
- // if (to_info->from_sched)
49
- // schedule_tail(to_info->from_sched);
50
- // to_info->from_sched = NULL;
39
+
40
+ if (to_info -> from_sched )
41
+ schedule_tail (to_info -> from_sched );
42
+ to_info -> from_sched = NULL ;
51
43
52
44
if (to_regs -> fn ) {
53
45
int (* fn )(void * ) = to_regs -> fn ;
46
+ int result ;
47
+
54
48
to_regs -> fn = NULL ;
49
+ pr_info ("call %p(%p)\n" , fn , to_regs -> fn_arg );
55
50
56
51
// callback returns if the kernel thread execs a process?
57
- fn (to_regs -> fn_arg );
52
+ result = fn (to_regs -> fn_arg );
53
+ pr_info ("call %p(%p) = %u\n" , fn , to_regs -> fn_arg ,
54
+ result );
58
55
} else {
56
+ pr_info ("longjmp %p to %u\n" , to_info -> jmpbuf , to -> pid );
59
57
longjmp (to_info -> jmpbuf , 1 );
60
58
}
59
+ } else {
60
+ pr_info ("free %p\n" , from_info -> jmpbuf );
61
+ kfree (from_info -> jmpbuf );
61
62
}
62
63
63
- kfree (from_info -> jmpbuf );
64
-
65
- // pr_info("hi %u %u %u\n", from->pid, to->pid, current->pid);
66
- // return current_thread_info()->from_sched;
67
- return from ;
68
-
69
- // from_regs->current_stack = get_stack_pointer();
70
-
71
- // if (setjmp(from_info->jmpbuf) == 0) {
72
- // pr_info("context switch %u %p\n"
73
- // " -> %u %p\n",
74
- // from->pid, from_regs->current_stack, to->pid,
75
- // to_regs->current_stack);
76
-
77
- // set_stack_pointer(to_regs->current_stack);
78
- // pr_info("before jmp %u -> %u\n", current->pid, to->pid);
79
- // current = to;
64
+ return prev ;
65
+ }
80
66
81
- // schedule_tail(from);
67
+ struct task_struct * __switch_to (struct task_struct * from ,
68
+ struct task_struct * to )
69
+ {
70
+ struct pt_regs * from_regs = task_pt_regs (from );
71
+ struct pt_regs * to_regs = task_pt_regs (to );
72
+ struct thread_info * from_info = task_thread_info (from );
73
+ struct thread_info * to_info = task_thread_info (to );
82
74
83
- // pr_info("regs: %p %p\n", to_regs, to_regs->fn);
75
+ from_regs -> current_stack = get_stack_pointer ();
76
+ from_info -> jmpbuf = kmalloc (16 , 0 );
84
77
85
- // longjmp(to_info->jmpbuf, 1);
86
- // }
78
+ pr_info ("alloc %p for %u\n" , from_info -> jmpbuf , from -> pid );
87
79
88
- // set_stack_pointer(from_regs->current_stack) ;
89
- // pr_info("resuming from jmp %u -> %u\n", current->pid, from->pid) ;
90
- // current = from;
80
+ current = to ;
81
+ to_info -> from_sched = prev ;
82
+ prev = from ;
91
83
92
- // return from;
84
+ return __switch_to_inner ( from , to ) ;
93
85
}
94
86
95
87
int copy_thread (struct task_struct * p , const struct kernel_clone_args * args )
@@ -103,8 +95,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
103
95
if (!args -> fn )
104
96
panic ("can't copy userspace thread" ); // yet
105
97
106
- // pr_info("copying thread %i %p\n", p->pid, childregs);
107
-
108
98
childregs -> fn = args -> fn ;
109
99
childregs -> fn_arg = args -> fn_arg ;
110
100
0 commit comments