29 #include <linux/module.h> 30 #include <linux/kernel.h> 32 #include <linux/cdev.h> 33 #include <linux/errno.h> 34 #include <linux/device.h> 35 #include <linux/kprobes.h> 36 #include <linux/mutex.h> 38 #include <linux/sched.h> 39 #include <linux/slab.h> 40 #include <linux/version.h> 41 #include <linux/interrupt.h> 42 #include <linux/time.h> 43 #include <linux/string.h> 44 #include <linux/vmalloc.h> 45 #include <linux/preempt.h> 46 #include <asm/atomic.h> 54 #if LINUX_VERSION_CODE > KERNEL_VERSION(3,3,0) 55 #include <asm/switch_to.h> 57 #include <asm/system.h> 60 #define X86_CR0_WP 0x00010000 64 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) 65 #define APIC_EOI_ACK 0x0 70 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,25) 71 #error Unsupported Kernel Version 76 #define DEBUG if(1) // change with 0 for removing module coded debug stuff 77 #define DEBUG_SCHEDULE_HOOK if(1) // change with 0 for removing schedule_hook coded debug stuff 81 unsigned long the_hook = 0;
82 #define PERMISSION_MASK (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH) 83 module_param(the_hook, ulong, PERMISSION_MASK);
84 unsigned int audit_counter = 0;
85 #undef PERMISSION_MASK 86 #define PERMISSION_MASK (S_IRUSR | S_IRGRP | S_IROTH) 87 module_param(audit_counter,
int, PERMISSION_MASK);
96 static void synchronize_all_slaves(
void *);
97 static int schedule_hook_init(
void);
98 static void schedule_unpatch(
void);
99 static int schedule_patch(
void);
100 static void print_bytes(
char *str,
unsigned char *ptr,
size_t s);
101 extern void schedule_hook(
void);
102 extern void schedule_hook_2(
void);
104 MODULE_LICENSE(
"GPL");
105 MODULE_AUTHOR(
"Alessandro Pellegrini <pellegrini@dis.uniroma1.it>, Francesco Quaglia <quaglia@dis.uniroma1.it>");
106 MODULE_DESCRIPTION(
"Run time patch of the Linux kernel scheduler to support the execution of a generic custom function upon thread reschedule");
107 module_init(schedule_hook_init);
108 module_exit(schedule_unpatch);
112 void *finish_task_switch = (
void *)FTS_ADDR;
113 void *finish_task_switch_next = (
void *)FTS_ADDR_NEXT;
118 unsigned short int len;
123 unsigned short int backup_count = 0;
127 unsigned char bytecode[16];
128 short unsigned int size;
133 extern void schedule_hook_end(
void);
134 extern void schedule_hook_patch_point(
void);
135 extern void schedule_hook_patch_point_2(
void);
138 static void synchronize_all_slaves(
void *info) {
141 printk(KERN_DEBUG
"%s: cpu %d entering synchronize_all_slaves\n", KBUILD_MODNAME, smp_processor_id());
146 printk(KERN_DEBUG
"%s: cpu %d leaving synchronize_all_slaves\n", KBUILD_MODNAME, smp_processor_id());
149 #define synchronize_all() do { \ 150 printk("cpu %d asking from unpreemptive synchronization\n", smp_processor_id()); \ 151 atomic_set(&synch_enter, num_online_cpus() - 1); \ 152 atomic_set(&synch_leave, 1); \ 154 smp_call_function_many(cpu_online_mask, synchronize_all_slaves, NULL, false); \ 155 while(atomic_read(&synch_enter) > 0); \ 156 printk("cpu %d all kernel threads synchronized\n", smp_processor_id()); \ 159 #define unsynchronize_all() do { \ 160 printk("cpu %d freeing other kernel threads\n", smp_processor_id()); \ 161 atomic_set(&synch_leave, 0); \ 166 static void print_bytes(
char *str,
unsigned char *ptr,
size_t s) {
169 printk(KERN_DEBUG
"%s: %s: ", KBUILD_MODNAME, str);
170 for(i = 0; i < s; i++)
171 printk(KERN_CONT
"%02x ", ptr[i]);
172 printk(KERN_CONT
"\n");
177 static int schedule_patch(
void) {
182 unsigned char bytes_to_redirect[6];
188 int patch_size, patch_offset;
190 void *upper_bound, *lower_bound;
192 instr_t* v=(
instr_t*) kmalloc(((
unsigned char*)(finish_task_switch_next)-(
unsigned char*)(finish_task_switch))*
sizeof(
instr_t), GFP_KERNEL);
194 printk(
"Errore 1\n");
197 memset(v, 0, ((
unsigned char*)(finish_task_switch_next)-(
unsigned char*)(finish_task_switch))*
sizeof(
instr_t));
202 printk(
"Errore 2\n");
209 temp=(
void*)finish_task_switch;
211 while (temp<finish_task_switch_next){
212 size=length_disasm(temp, MODE_X64);
214 v[i].ptr=(
void*)((
char*)(temp));
215 memcpy(v[i].bytecode, (
unsigned char*)(temp), size);
217 temp=((
unsigned char*)(temp))+size;
220 print_bytes(
"finish_task_switch before patch", finish_task_switch, finish_task_switch_next-finish_task_switch);
223 print_bytes(
"instruction:", v[j].bytecode, v[j].size);
229 write_cr0(cr0 & ~X86_CR0_WP);
232 print_bytes(
"finish_task_switch_next before self patching", (
unsigned char *)finish_task_switch, finish_task_switch_next-finish_task_switch);
234 if (v[j].size==1 && v[j].bytecode[0]==0xc3){
235 printk(KERN_DEBUG
"%s: return found at address %p, offset %p\n", KBUILD_MODNAME, (
unsigned char*)v[j].ptr, (
void*)(v[j].ptr-finish_task_switch));
242 print_bytes(
"upper bound instruction: ", v[j-count].bytecode, v[j-count].size);
243 upper_bound=v[j-count].ptr;
244 lower_bound=v[j].ptr;
245 size=lower_bound-upper_bound;
246 printk(KERN_DEBUG
"%s: size=%d\n", KBUILD_MODNAME, size);
247 b[backup_count].init_addr=upper_bound;
248 b[backup_count].len=size;
249 memcpy(b[backup_count].bytes, upper_bound, size);
250 print_bytes(
"Backup bytes: ", b[backup_count].bytes, size);
253 displacement=((long)((
long)schedule_hook+(long)backup_count*((
long)schedule_hook_2-(long)schedule_hook))-(
long)upper_bound)-5;
256 bytes_to_redirect[pos++] = 0xe9;
257 bytes_to_redirect[pos++] = (
unsigned char)(displacement & 0xff);
258 bytes_to_redirect[pos++] = (
unsigned char)(displacement >> 8 & 0xff);
259 bytes_to_redirect[pos++] = (
unsigned char)(displacement >> 16 & 0xff);
260 bytes_to_redirect[pos++] = (
unsigned char)(displacement >> 24 & 0xff);
262 print_bytes(
"assembled jump is", bytes_to_redirect, 5);
268 memcpy(upper_bound, bytes_to_redirect, 5);
272 print_bytes(
"schedule after patch: ", finish_task_switch, finish_task_switch_next-finish_task_switch);
276 patch_size=schedule_hook_2-schedule_hook_patch_point;
277 patch_offset=schedule_hook_patch_point_2-schedule_hook_patch_point;
278 printk(KERN_DEBUG
"%s: schedule_hook is at address %p\n", KBUILD_MODNAME, schedule_hook);
279 print_bytes(
"schedule_hook_patch_point: ", (
unsigned char *)schedule_hook_patch_point, patch_size);
283 for (j=0; j<backup_count; j++){
284 memcpy(schedule_hook_patch_point+j*patch_offset, b[j].bytes, b[j].len);
285 print_bytes(
"schedule_hook_patch_point after patch: ", (
unsigned char *)schedule_hook+j*(schedule_hook_2-schedule_hook), (schedule_hook_2-schedule_hook));
293 static void schedule_unpatch(
void) {
299 printk(KERN_DEBUG
"%s: restoring standard schedule function...\n", KBUILD_MODNAME);
304 write_cr0(cr0 & ~X86_CR0_WP);
308 for (i=0; i<backup_count; i++){
309 memcpy(b[i].init_addr, b[i].bytes, b[i].len);
314 print_bytes(
"Schedule after restore: ", finish_task_switch, finish_task_switch_next-finish_task_switch);
315 printk(KERN_INFO
"%s: standard schedule function correctly restored...\n", KBUILD_MODNAME);
321 static int schedule_hook_init(
void) {
328 printk(KERN_INFO
"%s: mounting the module\n", KBUILD_MODNAME);
330 ret = schedule_patch();
#define atomic_read(v)
Read operation on an atomic counter.
void atomic_dec(atomic_t *)
#define atomic_set(v, i)
Set operation on an atomic counter.
This is the header file to configure the timestretch module.