1 /******************************************************************************
2 *******************************************************************************
4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5 ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
7 ** This copyrighted material is made available to anyone wishing to use,
8 ** modify, copy, or redistribute it subject to the terms and conditions
9 ** of the GNU General Public License v.2.
11 *******************************************************************************
12 ******************************************************************************/
14 #include "dlm_internal.h"
15 #include "lockspace.h"
26 #ifdef CONFIG_DLM_DEBUG
27 int dlm_create_debug_file(struct dlm_ls *ls);
28 void dlm_delete_debug_file(struct dlm_ls *ls);
30 static inline int dlm_create_debug_file(struct dlm_ls *ls) { return 0; }
31 static inline void dlm_delete_debug_file(struct dlm_ls *ls) { }
35 static struct mutex ls_lock;
36 static struct list_head lslist;
37 static spinlock_t lslist_lock;
38 static struct task_struct * scand_task;
41 static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
44 int n = simple_strtol(buf, NULL, 0);
59 static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len)
61 ls->ls_uevent_result = simple_strtol(buf, NULL, 0);
62 set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags);
63 wake_up(&ls->ls_uevent_wait);
67 static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf)
69 return snprintf(buf, PAGE_SIZE, "%u\n", ls->ls_global_id);
72 static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len)
74 ls->ls_global_id = simple_strtoul(buf, NULL, 0);
78 static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf)
80 uint32_t status = dlm_recover_status(ls);
81 return snprintf(buf, PAGE_SIZE, "%x\n", status);
84 static ssize_t dlm_recover_nodeid_show(struct dlm_ls *ls, char *buf)
86 return snprintf(buf, PAGE_SIZE, "%d\n", ls->ls_recover_nodeid);
90 struct attribute attr;
91 ssize_t (*show)(struct dlm_ls *, char *);
92 ssize_t (*store)(struct dlm_ls *, const char *, size_t);
95 static struct dlm_attr dlm_attr_control = {
96 .attr = {.name = "control", .mode = S_IWUSR},
97 .store = dlm_control_store
100 static struct dlm_attr dlm_attr_event = {
101 .attr = {.name = "event_done", .mode = S_IWUSR},
102 .store = dlm_event_store
105 static struct dlm_attr dlm_attr_id = {
106 .attr = {.name = "id", .mode = S_IRUGO | S_IWUSR},
108 .store = dlm_id_store
111 static struct dlm_attr dlm_attr_recover_status = {
112 .attr = {.name = "recover_status", .mode = S_IRUGO},
113 .show = dlm_recover_status_show
116 static struct dlm_attr dlm_attr_recover_nodeid = {
117 .attr = {.name = "recover_nodeid", .mode = S_IRUGO},
118 .show = dlm_recover_nodeid_show
121 static struct attribute *dlm_attrs[] = {
122 &dlm_attr_control.attr,
123 &dlm_attr_event.attr,
125 &dlm_attr_recover_status.attr,
126 &dlm_attr_recover_nodeid.attr,
130 static ssize_t dlm_attr_show(struct kobject *kobj, struct attribute *attr,
133 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
134 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
135 return a->show ? a->show(ls, buf) : 0;
138 static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr,
139 const char *buf, size_t len)
141 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
142 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
143 return a->store ? a->store(ls, buf, len) : len;
146 static void lockspace_kobj_release(struct kobject *k)
148 struct dlm_ls *ls = container_of(k, struct dlm_ls, ls_kobj);
152 static struct sysfs_ops dlm_attr_ops = {
153 .show = dlm_attr_show,
154 .store = dlm_attr_store,
157 static struct kobj_type dlm_ktype = {
158 .default_attrs = dlm_attrs,
159 .sysfs_ops = &dlm_attr_ops,
160 .release = lockspace_kobj_release,
163 static struct kset dlm_kset = {
164 .subsys = &kernel_subsys,
165 .kobj = {.name = "dlm",},
169 static int kobject_setup(struct dlm_ls *ls)
171 char lsname[DLM_LOCKSPACE_LEN];
174 memset(lsname, 0, DLM_LOCKSPACE_LEN);
175 snprintf(lsname, DLM_LOCKSPACE_LEN, "%s", ls->ls_name);
177 error = kobject_set_name(&ls->ls_kobj, "%s", lsname);
181 ls->ls_kobj.kset = &dlm_kset;
182 ls->ls_kobj.ktype = &dlm_ktype;
186 static int do_uevent(struct dlm_ls *ls, int in)
191 kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
193 kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
195 error = wait_event_interruptible(ls->ls_uevent_wait,
196 test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
200 error = ls->ls_uevent_result;
206 int dlm_lockspace_init(void)
211 mutex_init(&ls_lock);
212 INIT_LIST_HEAD(&lslist);
213 spin_lock_init(&lslist_lock);
215 error = kset_register(&dlm_kset);
217 printk("dlm_lockspace_init: cannot register kset %d\n", error);
221 void dlm_lockspace_exit(void)
223 kset_unregister(&dlm_kset);
226 static int dlm_scand(void *data)
230 while (!kthread_should_stop()) {
231 list_for_each_entry(ls, &lslist, ls_list)
233 schedule_timeout_interruptible(dlm_config.scan_secs * HZ);
238 static int dlm_scand_start(void)
240 struct task_struct *p;
243 p = kthread_run(dlm_scand, NULL, "dlm_scand");
251 static void dlm_scand_stop(void)
253 kthread_stop(scand_task);
256 static struct dlm_ls *dlm_find_lockspace_name(char *name, int namelen)
260 spin_lock(&lslist_lock);
262 list_for_each_entry(ls, &lslist, ls_list) {
263 if (ls->ls_namelen == namelen &&
264 memcmp(ls->ls_name, name, namelen) == 0)
269 spin_unlock(&lslist_lock);
273 struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
277 spin_lock(&lslist_lock);
279 list_for_each_entry(ls, &lslist, ls_list) {
280 if (ls->ls_global_id == id) {
287 spin_unlock(&lslist_lock);
291 struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
295 spin_lock(&lslist_lock);
296 list_for_each_entry(ls, &lslist, ls_list) {
297 if (ls->ls_local_handle == lockspace) {
304 spin_unlock(&lslist_lock);
308 struct dlm_ls *dlm_find_lockspace_device(int minor)
312 spin_lock(&lslist_lock);
313 list_for_each_entry(ls, &lslist, ls_list) {
314 if (ls->ls_device.minor == minor) {
321 spin_unlock(&lslist_lock);
325 void dlm_put_lockspace(struct dlm_ls *ls)
327 spin_lock(&lslist_lock);
329 spin_unlock(&lslist_lock);
332 static void remove_lockspace(struct dlm_ls *ls)
335 spin_lock(&lslist_lock);
336 if (ls->ls_count == 0) {
337 list_del(&ls->ls_list);
338 spin_unlock(&lslist_lock);
341 spin_unlock(&lslist_lock);
346 static int threads_start(void)
350 /* Thread which process lock requests for all lockspace's */
351 error = dlm_astd_start();
353 log_print("cannot start dlm_astd thread %d", error);
357 error = dlm_scand_start();
359 log_print("cannot start dlm_scand thread %d", error);
363 /* Thread for sending/receiving messages for all lockspace's */
364 error = dlm_lowcomms_start();
366 log_print("cannot start dlm lowcomms %d", error);
380 static void threads_stop(void)
387 static int new_lockspace(char *name, int namelen, void **lockspace,
388 uint32_t flags, int lvblen)
391 int i, size, error = -ENOMEM;
393 if (namelen > DLM_LOCKSPACE_LEN)
396 if (!lvblen || (lvblen % 8))
399 if (!try_module_get(THIS_MODULE))
402 ls = dlm_find_lockspace_name(name, namelen);
405 module_put(THIS_MODULE);
409 ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_KERNEL);
412 memcpy(ls->ls_name, name, namelen);
413 ls->ls_namelen = namelen;
414 ls->ls_exflags = flags;
415 ls->ls_lvblen = lvblen;
419 size = dlm_config.rsbtbl_size;
420 ls->ls_rsbtbl_size = size;
422 ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_KERNEL);
425 for (i = 0; i < size; i++) {
426 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
427 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
428 rwlock_init(&ls->ls_rsbtbl[i].lock);
431 size = dlm_config.lkbtbl_size;
432 ls->ls_lkbtbl_size = size;
434 ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_KERNEL);
437 for (i = 0; i < size; i++) {
438 INIT_LIST_HEAD(&ls->ls_lkbtbl[i].list);
439 rwlock_init(&ls->ls_lkbtbl[i].lock);
440 ls->ls_lkbtbl[i].counter = 1;
443 size = dlm_config.dirtbl_size;
444 ls->ls_dirtbl_size = size;
446 ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_KERNEL);
449 for (i = 0; i < size; i++) {
450 INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
451 rwlock_init(&ls->ls_dirtbl[i].lock);
454 INIT_LIST_HEAD(&ls->ls_waiters);
455 mutex_init(&ls->ls_waiters_mutex);
457 INIT_LIST_HEAD(&ls->ls_nodes);
458 INIT_LIST_HEAD(&ls->ls_nodes_gone);
459 ls->ls_num_nodes = 0;
460 ls->ls_low_nodeid = 0;
461 ls->ls_total_weight = 0;
462 ls->ls_node_array = NULL;
464 memset(&ls->ls_stub_rsb, 0, sizeof(struct dlm_rsb));
465 ls->ls_stub_rsb.res_ls = ls;
467 ls->ls_debug_rsb_dentry = NULL;
468 ls->ls_debug_waiters_dentry = NULL;
470 init_waitqueue_head(&ls->ls_uevent_wait);
471 ls->ls_uevent_result = 0;
473 ls->ls_recoverd_task = NULL;
474 mutex_init(&ls->ls_recoverd_active);
475 spin_lock_init(&ls->ls_recover_lock);
476 ls->ls_recover_status = 0;
477 ls->ls_recover_seq = 0;
478 ls->ls_recover_args = NULL;
479 init_rwsem(&ls->ls_in_recovery);
480 INIT_LIST_HEAD(&ls->ls_requestqueue);
481 mutex_init(&ls->ls_requestqueue_mutex);
482 mutex_init(&ls->ls_clear_proc_locks);
484 ls->ls_recover_buf = kmalloc(dlm_config.buffer_size, GFP_KERNEL);
485 if (!ls->ls_recover_buf)
488 INIT_LIST_HEAD(&ls->ls_recover_list);
489 spin_lock_init(&ls->ls_recover_list_lock);
490 ls->ls_recover_list_count = 0;
491 ls->ls_local_handle = ls;
492 init_waitqueue_head(&ls->ls_wait_general);
493 INIT_LIST_HEAD(&ls->ls_root_list);
494 init_rwsem(&ls->ls_root_sem);
496 down_write(&ls->ls_in_recovery);
498 spin_lock(&lslist_lock);
499 list_add(&ls->ls_list, &lslist);
500 spin_unlock(&lslist_lock);
502 /* needs to find ls in lslist */
503 error = dlm_recoverd_start(ls);
505 log_error(ls, "can't start dlm_recoverd %d", error);
509 dlm_create_debug_file(ls);
511 error = kobject_setup(ls);
515 error = kobject_register(&ls->ls_kobj);
519 error = do_uevent(ls, 1);
527 kobject_unregister(&ls->ls_kobj);
529 dlm_delete_debug_file(ls);
530 dlm_recoverd_stop(ls);
532 spin_lock(&lslist_lock);
533 list_del(&ls->ls_list);
534 spin_unlock(&lslist_lock);
535 kfree(ls->ls_recover_buf);
537 kfree(ls->ls_dirtbl);
539 kfree(ls->ls_lkbtbl);
541 kfree(ls->ls_rsbtbl);
545 module_put(THIS_MODULE);
549 int dlm_new_lockspace(char *name, int namelen, void **lockspace,
550 uint32_t flags, int lvblen)
554 mutex_lock(&ls_lock);
556 error = threads_start();
560 error = new_lockspace(name, namelen, lockspace, flags, lvblen);
564 mutex_unlock(&ls_lock);
568 /* Return 1 if the lockspace still has active remote locks,
569 * 2 if the lockspace still has active local locks.
571 static int lockspace_busy(struct dlm_ls *ls)
573 int i, lkb_found = 0;
576 /* NOTE: We check the lockidtbl here rather than the resource table.
577 This is because there may be LKBs queued as ASTs that have been
578 unlinked from their RSBs and are pending deletion once the AST has
581 for (i = 0; i < ls->ls_lkbtbl_size; i++) {
582 read_lock(&ls->ls_lkbtbl[i].lock);
583 if (!list_empty(&ls->ls_lkbtbl[i].list)) {
585 list_for_each_entry(lkb, &ls->ls_lkbtbl[i].list,
587 if (!lkb->lkb_nodeid) {
588 read_unlock(&ls->ls_lkbtbl[i].lock);
593 read_unlock(&ls->ls_lkbtbl[i].lock);
598 static int release_lockspace(struct dlm_ls *ls, int force)
602 struct list_head *head;
604 int busy = lockspace_busy(ls);
612 dlm_recoverd_stop(ls);
614 remove_lockspace(ls);
616 dlm_delete_debug_file(ls);
620 kfree(ls->ls_recover_buf);
623 * Free direntry structs.
627 kfree(ls->ls_dirtbl);
630 * Free all lkb's on lkbtbl[] lists.
633 for (i = 0; i < ls->ls_lkbtbl_size; i++) {
634 head = &ls->ls_lkbtbl[i].list;
635 while (!list_empty(head)) {
636 lkb = list_entry(head->next, struct dlm_lkb,
639 list_del(&lkb->lkb_idtbl_list);
643 if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
644 free_lvb(lkb->lkb_lvbptr);
651 kfree(ls->ls_lkbtbl);
654 * Free all rsb's on rsbtbl[] lists
657 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
658 head = &ls->ls_rsbtbl[i].list;
659 while (!list_empty(head)) {
660 rsb = list_entry(head->next, struct dlm_rsb,
663 list_del(&rsb->res_hashchain);
667 head = &ls->ls_rsbtbl[i].toss;
668 while (!list_empty(head)) {
669 rsb = list_entry(head->next, struct dlm_rsb,
671 list_del(&rsb->res_hashchain);
676 kfree(ls->ls_rsbtbl);
679 * Free structures on any other lists
682 kfree(ls->ls_recover_args);
683 dlm_clear_free_entries(ls);
684 dlm_clear_members(ls);
685 dlm_clear_members_gone(ls);
686 kfree(ls->ls_node_array);
687 kobject_unregister(&ls->ls_kobj);
688 /* The ls structure will be freed when the kobject is done with */
690 mutex_lock(&ls_lock);
694 mutex_unlock(&ls_lock);
696 module_put(THIS_MODULE);
701 * Called when a system has released all its locks and is not going to use the
702 * lockspace any longer. We free everything we're managing for this lockspace.
703 * Remaining nodes will go through the recovery process as if we'd died. The
704 * lockspace must continue to function as usual, participating in recoveries,
705 * until this returns.
707 * Force has 4 possible values:
708 * 0 - don't destroy locksapce if it has any LKBs
709 * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
710 * 2 - destroy lockspace regardless of LKBs
711 * 3 - destroy lockspace as part of a forced shutdown
714 int dlm_release_lockspace(void *lockspace, int force)
718 ls = dlm_find_lockspace_local(lockspace);
721 dlm_put_lockspace(ls);
722 return release_lockspace(ls, force);