From 01f8f8f4d16a97bbfc6c65ef29516efa4b4c86e6 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Thu, 17 Nov 2011 00:59:08 +0400 Subject: [PATCH] restore: Bring trivial locker back Threads are better to be restored in serialized way otherwise if some error happened an error message will be screwed. Signed-off-by: Cyrill Gorcunov --- cr-restore.c | 3 +++ include/restorer.h | 27 +++++++++++++++++++++++++++ restorer.c | 6 +++++- 3 files changed, 35 insertions(+), 1 deletion(-) diff --git a/cr-restore.c b/cr-restore.c index 2da56bc23..792a0736a 100644 --- a/cr-restore.c +++ b/cr-restore.c @@ -1453,6 +1453,7 @@ static void sigreturn_restore(pid_t pstree_pid, pid_t pid) task_args->pid = pid; task_args->fd_core = fd_core; task_args->fd_self_vmas = fd_self_vmas; + task_args->rst_lock = 0; if (pstree_entry.nr_threads) { int i; @@ -1479,6 +1480,8 @@ static void sigreturn_restore(pid_t pstree_pid, pid_t pid) goto err; } + thread_args[i].rst_lock = &task_args->rst_lock; + pr_info("Thread %4d stack %8p heap %8p rt_sigframe %8p\n", i, (long)thread_args[i].mem_zone.stack, thread_args[i].mem_zone.heap, diff --git a/include/restorer.h b/include/restorer.h index 179a8d5c4..43b2926b6 100644 --- a/include/restorer.h +++ b/include/restorer.h @@ -54,6 +54,7 @@ struct thread_restore_args { int pid; int fd_core; + long *rst_lock; } __aligned(sizeof(long)); struct task_restore_core_args { @@ -63,6 +64,7 @@ struct task_restore_core_args { int fd_core; /* opened core file */ int fd_self_vmas; /* opened file with running VMAs to unmap */ bool restore_threads; /* if to restore threads */ + long rst_lock; /* threads restoration */ int nr_threads; /* number of threads */ @@ -222,4 +224,29 @@ static void always_inline write_hex_n(unsigned long num) sys_write(1, &c, 1); } +static always_inline void rst_lock(long *v) +{ + while (*v) { + asm volatile("lfence"); + asm volatile("pause"); + } + (*v)++; + + asm volatile("sfence"); +} + +static always_inline void rst_unlock(long *v) +{ + (*v)--; + asm volatile("sfence"); +} + +static always_inline void rst_wait_unlock(long *v) +{ + while (*v) { + asm volatile("lfence"); + asm volatile("pause"); + } +} + #endif /* CR_RESTORER_H__ */ diff --git a/restorer.c b/restorer.c index cdf85e549..4d5580436 100644 --- a/restorer.c +++ b/restorer.c @@ -86,6 +86,8 @@ long restore_thread(long cmd, struct thread_restore_args *args) goto core_restore_end; } + rst_unlock(args->rst_lock); + new_sp = (long)rt_sigframe + 8; asm volatile( "movq %0, %%rax \n" @@ -428,6 +430,8 @@ self_len_end: if (thread_args[i].pid == args->pid) continue; + rst_lock(&args->rst_lock); + new_sp = RESTORE_ALIGN_STACK((long)thread_args[i].mem_zone.stack, sizeof(thread_args[i].mem_zone.stack)); @@ -478,7 +482,7 @@ self_len_end: "g"(&thread_args[i]) : "rax", "rdi", "rsi", "rdx", "r10", "memory"); - //r_wait_unlock(args->lock); + rst_wait_unlock(&args->rst_lock); } }