X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Fdlm%2Frecoverd.c;h=fd677c8c3d3b041396817bc930bde7e5e5b0be87;hb=366ec51ba9622c44191fcf4d77ed4dc84acbdeec;hp=70103533677d9197fdf3355bdb76317dc8d61976;hpb=901359256b2666f52a3a7d3f31927677e91b3a2a;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c index 7010353..fd677c8 100644 --- a/fs/dlm/recoverd.c +++ b/fs/dlm/recoverd.c @@ -2,7 +2,7 @@ ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. -** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. +** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. ** ** This copyrighted material is made available to anyone wishing to use, ** modify, copy, or redistribute it subject to the terms and conditions @@ -24,19 +24,28 @@ /* If the start for which we're re-enabling locking (seq) has been superseded - by a newer stop (ls_recover_seq), we need to leave locking disabled. */ + by a newer stop (ls_recover_seq), we need to leave locking disabled. + + We suspend dlm_recv threads here to avoid the race where dlm_recv a) sees + locking stopped and b) adds a message to the requestqueue, but dlm_recoverd + enables locking and clears the requestqueue between a and b. */ static int enable_locking(struct dlm_ls *ls, uint64_t seq) { int error = -EINTR; + down_write(&ls->ls_recv_active); + spin_lock(&ls->ls_recover_lock); if (ls->ls_recover_seq == seq) { set_bit(LSFL_RUNNING, &ls->ls_flags); + /* unblocks processes waiting to enter the dlm */ up_write(&ls->ls_in_recovery); error = 0; } spin_unlock(&ls->ls_recover_lock); + + up_write(&ls->ls_recv_active); return error; } @@ -45,7 +54,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) unsigned long start; int error, neg = 0; - log_debug(ls, "recover %llx", rv->seq); + log_debug(ls, "recover %llx", (unsigned long long)rv->seq); mutex_lock(&ls->ls_recoverd_active); @@ -58,17 +67,18 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) dlm_astd_resume(); /* - * This list of root rsb's will be the basis of most of the recovery - * routines. + * Free non-master tossed rsb's. Master rsb's are kept on toss + * list and put on root list to be included in resdir recovery. */ - dlm_create_root_list(ls); + dlm_clear_toss_list(ls); /* - * Free all the tossed rsb's so we don't have to recover them. + * This list of root rsb's will be the basis of most of the recovery + * routines. */ - dlm_clear_toss_list(ls); + dlm_create_root_list(ls); /* * Add or remove nodes from the lockspace's ls_nodes list. @@ -77,7 +87,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) error = dlm_recover_members(ls, rv, &neg); if (error) { - log_error(ls, "recover_members failed %d", error); + log_debug(ls, "recover_members failed %d", error); goto fail; } start = jiffies; @@ -89,25 +99,17 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) error = dlm_recover_directory(ls); if (error) { - log_error(ls, "recover_directory failed %d", error); + log_debug(ls, "recover_directory failed %d", error); goto fail; } /* - * Purge directory-related requests that are saved in requestqueue. - * All dir requests from before recovery are invalid now due to the dir - * rebuild and will be resent by the requesting nodes. - */ - - dlm_purge_requestqueue(ls); - - /* * Wait for all nodes to complete directory rebuild. */ error = dlm_recover_directory_wait(ls); if (error) { - log_error(ls, "recover_directory_wait failed %d", error); + log_debug(ls, "recover_directory_wait failed %d", error); goto fail; } @@ -137,7 +139,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) error = dlm_recover_masters(ls); if (error) { - log_error(ls, "recover_masters failed %d", error); + log_debug(ls, "recover_masters failed %d", error); goto fail; } @@ -147,13 +149,13 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) error = dlm_recover_locks(ls); if (error) { - log_error(ls, "recover_locks failed %d", error); + log_debug(ls, "recover_locks failed %d", error); goto fail; } error = dlm_recover_locks_wait(ls); if (error) { - log_error(ls, "recover_locks_wait failed %d", error); + log_debug(ls, "recover_locks_wait failed %d", error); goto fail; } @@ -164,34 +166,57 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) */ dlm_recover_rsbs(ls); + } else { + /* + * Other lockspace members may be going through the "neg" steps + * while also adding us to the lockspace, in which case they'll + * be doing the recover_locks (RS_LOCKS) barrier. + */ + dlm_set_recover_status(ls, DLM_RS_LOCKS); + + error = dlm_recover_locks_wait(ls); + if (error) { + log_debug(ls, "recover_locks_wait failed %d", error); + goto fail; + } } dlm_release_root_list(ls); + /* + * Purge directory-related requests that are saved in requestqueue. + * All dir requests from before recovery are invalid now due to the dir + * rebuild and will be resent by the requesting nodes. + */ + + dlm_purge_requestqueue(ls); + dlm_set_recover_status(ls, DLM_RS_DONE); error = dlm_recover_done_wait(ls); if (error) { - log_error(ls, "recover_done_wait failed %d", error); + log_debug(ls, "recover_done_wait failed %d", error); goto fail; } dlm_clear_members_gone(ls); + dlm_adjust_timeouts(ls); + error = enable_locking(ls, rv->seq); if (error) { - log_error(ls, "enable_locking failed %d", error); + log_debug(ls, "enable_locking failed %d", error); goto fail; } error = dlm_process_requestqueue(ls); if (error) { - log_error(ls, "process_requestqueue failed %d", error); + log_debug(ls, "process_requestqueue failed %d", error); goto fail; } error = dlm_recover_waiters_post(ls); if (error) { - log_error(ls, "recover_waiters_post failed %d", error); + log_debug(ls, "recover_waiters_post failed %d", error); goto fail; } @@ -199,7 +224,8 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) dlm_astd_wake(); - log_debug(ls, "recover %llx done: %u ms", rv->seq, + log_debug(ls, "recover %llx done: %u ms", + (unsigned long long)rv->seq, jiffies_to_msecs(jiffies - start)); mutex_unlock(&ls->ls_recoverd_active); @@ -207,11 +233,16 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) fail: dlm_release_root_list(ls); - log_debug(ls, "recover %llx error %d", rv->seq, error); + log_debug(ls, "recover %llx error %d", + (unsigned long long)rv->seq, error); mutex_unlock(&ls->ls_recoverd_active); return error; } +/* The dlm_ls_start() that created the rv we take here may already have been + stopped via dlm_ls_stop(); in that case we need to leave the RECOVERY_STOP + flag set. */ + static void do_ls_recovery(struct dlm_ls *ls) { struct dlm_recover *rv = NULL; @@ -219,12 +250,14 @@ static void do_ls_recovery(struct dlm_ls *ls) spin_lock(&ls->ls_recover_lock); rv = ls->ls_recover_args; ls->ls_recover_args = NULL; - clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags); + if (rv && ls->ls_recover_seq == rv->seq) + clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags); spin_unlock(&ls->ls_recover_lock); if (rv) { ls_recover(ls, rv); kfree(rv->nodeids); + kfree(rv->new); kfree(rv); } } @@ -234,6 +267,10 @@ static int dlm_recoverd(void *arg) struct dlm_ls *ls; ls = dlm_find_lockspace_local(arg); + if (!ls) { + log_print("dlm_recoverd: no lockspace %p", arg); + return -1; + } while (!kthread_should_stop()) { set_current_state(TASK_INTERRUPTIBLE); @@ -275,6 +312,7 @@ void dlm_recoverd_stop(struct dlm_ls *ls) void dlm_recoverd_suspend(struct dlm_ls *ls) { + wake_up(&ls->ls_wait_general); mutex_lock(&ls->ls_recoverd_active); }