1 /******************************************************************************
2 *******************************************************************************
4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5 ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
7 ** This copyrighted material is made available to anyone wishing to use,
8 ** modify, copy, or redistribute it subject to the terms and conditions
9 ** of the GNU General Public License v.2.
11 *******************************************************************************
12 ******************************************************************************/
14 #include "dlm_internal.h"
15 #include "lockspace.h"
26 #ifdef CONFIG_DLM_DEBUG
27 int dlm_create_debug_file(struct dlm_ls *ls);
28 void dlm_delete_debug_file(struct dlm_ls *ls);
30 static inline int dlm_create_debug_file(struct dlm_ls *ls) { return 0; }
31 static inline void dlm_delete_debug_file(struct dlm_ls *ls) { }
35 static struct mutex ls_lock;
36 static struct list_head lslist;
37 static spinlock_t lslist_lock;
38 static struct task_struct * scand_task;
41 static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
44 int n = simple_strtol(buf, NULL, 0);
59 static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len)
61 ls->ls_uevent_result = simple_strtol(buf, NULL, 0);
62 set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags);
63 wake_up(&ls->ls_uevent_wait);
67 static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf)
69 return sprintf(buf, "%u\n", ls->ls_global_id);
72 static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len)
74 ls->ls_global_id = simple_strtoul(buf, NULL, 0);
78 static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf)
80 uint32_t status = dlm_recover_status(ls);
81 return sprintf(buf, "%x\n", status);
85 struct attribute attr;
86 ssize_t (*show)(struct dlm_ls *, char *);
87 ssize_t (*store)(struct dlm_ls *, const char *, size_t);
90 static struct dlm_attr dlm_attr_control = {
91 .attr = {.name = "control", .mode = S_IWUSR},
92 .store = dlm_control_store
95 static struct dlm_attr dlm_attr_event = {
96 .attr = {.name = "event_done", .mode = S_IWUSR},
97 .store = dlm_event_store
100 static struct dlm_attr dlm_attr_id = {
101 .attr = {.name = "id", .mode = S_IRUGO | S_IWUSR},
103 .store = dlm_id_store
106 static struct dlm_attr dlm_attr_recover_status = {
107 .attr = {.name = "recover_status", .mode = S_IRUGO},
108 .show = dlm_recover_status_show
111 static struct attribute *dlm_attrs[] = {
112 &dlm_attr_control.attr,
113 &dlm_attr_event.attr,
115 &dlm_attr_recover_status.attr,
119 static ssize_t dlm_attr_show(struct kobject *kobj, struct attribute *attr,
122 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
123 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
124 return a->show ? a->show(ls, buf) : 0;
127 static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr,
128 const char *buf, size_t len)
130 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
131 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
132 return a->store ? a->store(ls, buf, len) : len;
135 static struct sysfs_ops dlm_attr_ops = {
136 .show = dlm_attr_show,
137 .store = dlm_attr_store,
140 static struct kobj_type dlm_ktype = {
141 .default_attrs = dlm_attrs,
142 .sysfs_ops = &dlm_attr_ops,
145 static struct kset dlm_kset = {
146 .subsys = &kernel_subsys,
147 .kobj = {.name = "dlm",},
151 static int kobject_setup(struct dlm_ls *ls)
153 char lsname[DLM_LOCKSPACE_LEN];
156 memset(lsname, 0, DLM_LOCKSPACE_LEN);
157 snprintf(lsname, DLM_LOCKSPACE_LEN, "%s", ls->ls_name);
159 error = kobject_set_name(&ls->ls_kobj, "%s", lsname);
163 ls->ls_kobj.kset = &dlm_kset;
164 ls->ls_kobj.ktype = &dlm_ktype;
168 static int do_uevent(struct dlm_ls *ls, int in)
173 kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
175 kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
177 error = wait_event_interruptible(ls->ls_uevent_wait,
178 test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
182 error = ls->ls_uevent_result;
188 int dlm_lockspace_init(void)
193 mutex_init(&ls_lock);
194 INIT_LIST_HEAD(&lslist);
195 spin_lock_init(&lslist_lock);
197 error = kset_register(&dlm_kset);
199 printk("dlm_lockspace_init: cannot register kset %d\n", error);
203 void dlm_lockspace_exit(void)
205 kset_unregister(&dlm_kset);
208 static int dlm_scand(void *data)
212 while (!kthread_should_stop()) {
213 list_for_each_entry(ls, &lslist, ls_list)
215 schedule_timeout_interruptible(dlm_config.scan_secs * HZ);
220 static int dlm_scand_start(void)
222 struct task_struct *p;
225 p = kthread_run(dlm_scand, NULL, "dlm_scand");
233 static void dlm_scand_stop(void)
235 kthread_stop(scand_task);
238 static struct dlm_ls *dlm_find_lockspace_name(char *name, int namelen)
242 spin_lock(&lslist_lock);
244 list_for_each_entry(ls, &lslist, ls_list) {
245 if (ls->ls_namelen == namelen &&
246 memcmp(ls->ls_name, name, namelen) == 0)
251 spin_unlock(&lslist_lock);
255 struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
259 spin_lock(&lslist_lock);
261 list_for_each_entry(ls, &lslist, ls_list) {
262 if (ls->ls_global_id == id) {
269 spin_unlock(&lslist_lock);
273 struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
277 spin_lock(&lslist_lock);
278 list_for_each_entry(ls, &lslist, ls_list) {
279 if (ls->ls_local_handle == lockspace) {
286 spin_unlock(&lslist_lock);
290 struct dlm_ls *dlm_find_lockspace_device(int minor)
294 spin_lock(&lslist_lock);
295 list_for_each_entry(ls, &lslist, ls_list) {
296 if (ls->ls_device.minor == minor) {
303 spin_unlock(&lslist_lock);
307 void dlm_put_lockspace(struct dlm_ls *ls)
309 spin_lock(&lslist_lock);
311 spin_unlock(&lslist_lock);
314 static void remove_lockspace(struct dlm_ls *ls)
317 spin_lock(&lslist_lock);
318 if (ls->ls_count == 0) {
319 list_del(&ls->ls_list);
320 spin_unlock(&lslist_lock);
323 spin_unlock(&lslist_lock);
328 static int threads_start(void)
332 /* Thread which process lock requests for all lockspace's */
333 error = dlm_astd_start();
335 log_print("cannot start dlm_astd thread %d", error);
339 error = dlm_scand_start();
341 log_print("cannot start dlm_scand thread %d", error);
345 /* Thread for sending/receiving messages for all lockspace's */
346 error = dlm_lowcomms_start();
348 log_print("cannot start dlm lowcomms %d", error);
362 static void threads_stop(void)
369 static int new_lockspace(char *name, int namelen, void **lockspace,
370 uint32_t flags, int lvblen)
373 int i, size, error = -ENOMEM;
375 if (namelen > DLM_LOCKSPACE_LEN)
378 if (!lvblen || (lvblen % 8))
381 if (!try_module_get(THIS_MODULE))
384 ls = dlm_find_lockspace_name(name, namelen);
387 module_put(THIS_MODULE);
391 ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_KERNEL);
394 memcpy(ls->ls_name, name, namelen);
395 ls->ls_namelen = namelen;
396 ls->ls_exflags = flags;
397 ls->ls_lvblen = lvblen;
401 size = dlm_config.rsbtbl_size;
402 ls->ls_rsbtbl_size = size;
404 ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_KERNEL);
407 for (i = 0; i < size; i++) {
408 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
409 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
410 rwlock_init(&ls->ls_rsbtbl[i].lock);
413 size = dlm_config.lkbtbl_size;
414 ls->ls_lkbtbl_size = size;
416 ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_KERNEL);
419 for (i = 0; i < size; i++) {
420 INIT_LIST_HEAD(&ls->ls_lkbtbl[i].list);
421 rwlock_init(&ls->ls_lkbtbl[i].lock);
422 ls->ls_lkbtbl[i].counter = 1;
425 size = dlm_config.dirtbl_size;
426 ls->ls_dirtbl_size = size;
428 ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_KERNEL);
431 for (i = 0; i < size; i++) {
432 INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
433 rwlock_init(&ls->ls_dirtbl[i].lock);
436 INIT_LIST_HEAD(&ls->ls_waiters);
437 mutex_init(&ls->ls_waiters_mutex);
439 INIT_LIST_HEAD(&ls->ls_nodes);
440 INIT_LIST_HEAD(&ls->ls_nodes_gone);
441 ls->ls_num_nodes = 0;
442 ls->ls_low_nodeid = 0;
443 ls->ls_total_weight = 0;
444 ls->ls_node_array = NULL;
446 memset(&ls->ls_stub_rsb, 0, sizeof(struct dlm_rsb));
447 ls->ls_stub_rsb.res_ls = ls;
449 ls->ls_debug_dentry = NULL;
451 init_waitqueue_head(&ls->ls_uevent_wait);
452 ls->ls_uevent_result = 0;
454 ls->ls_recoverd_task = NULL;
455 mutex_init(&ls->ls_recoverd_active);
456 spin_lock_init(&ls->ls_recover_lock);
457 ls->ls_recover_status = 0;
458 ls->ls_recover_seq = 0;
459 ls->ls_recover_args = NULL;
460 init_rwsem(&ls->ls_in_recovery);
461 INIT_LIST_HEAD(&ls->ls_requestqueue);
462 mutex_init(&ls->ls_requestqueue_mutex);
463 mutex_init(&ls->ls_clear_proc_locks);
465 ls->ls_recover_buf = kmalloc(dlm_config.buffer_size, GFP_KERNEL);
466 if (!ls->ls_recover_buf)
469 INIT_LIST_HEAD(&ls->ls_recover_list);
470 spin_lock_init(&ls->ls_recover_list_lock);
471 ls->ls_recover_list_count = 0;
472 ls->ls_local_handle = ls;
473 init_waitqueue_head(&ls->ls_wait_general);
474 INIT_LIST_HEAD(&ls->ls_root_list);
475 init_rwsem(&ls->ls_root_sem);
477 down_write(&ls->ls_in_recovery);
479 error = dlm_recoverd_start(ls);
481 log_error(ls, "can't start dlm_recoverd %d", error);
485 spin_lock(&lslist_lock);
486 list_add(&ls->ls_list, &lslist);
487 spin_unlock(&lslist_lock);
489 dlm_create_debug_file(ls);
491 error = kobject_setup(ls);
495 error = kobject_register(&ls->ls_kobj);
499 error = do_uevent(ls, 1);
507 kobject_unregister(&ls->ls_kobj);
509 dlm_delete_debug_file(ls);
510 spin_lock(&lslist_lock);
511 list_del(&ls->ls_list);
512 spin_unlock(&lslist_lock);
513 dlm_recoverd_stop(ls);
515 kfree(ls->ls_recover_buf);
517 kfree(ls->ls_dirtbl);
519 kfree(ls->ls_lkbtbl);
521 kfree(ls->ls_rsbtbl);
525 module_put(THIS_MODULE);
529 int dlm_new_lockspace(char *name, int namelen, void **lockspace,
530 uint32_t flags, int lvblen)
534 mutex_lock(&ls_lock);
536 error = threads_start();
540 error = new_lockspace(name, namelen, lockspace, flags, lvblen);
544 mutex_unlock(&ls_lock);
548 /* Return 1 if the lockspace still has active remote locks,
549 * 2 if the lockspace still has active local locks.
551 static int lockspace_busy(struct dlm_ls *ls)
553 int i, lkb_found = 0;
556 /* NOTE: We check the lockidtbl here rather than the resource table.
557 This is because there may be LKBs queued as ASTs that have been
558 unlinked from their RSBs and are pending deletion once the AST has
561 for (i = 0; i < ls->ls_lkbtbl_size; i++) {
562 read_lock(&ls->ls_lkbtbl[i].lock);
563 if (!list_empty(&ls->ls_lkbtbl[i].list)) {
565 list_for_each_entry(lkb, &ls->ls_lkbtbl[i].list,
567 if (!lkb->lkb_nodeid) {
568 read_unlock(&ls->ls_lkbtbl[i].lock);
573 read_unlock(&ls->ls_lkbtbl[i].lock);
578 static int release_lockspace(struct dlm_ls *ls, int force)
582 struct list_head *head;
584 int busy = lockspace_busy(ls);
592 dlm_recoverd_stop(ls);
594 remove_lockspace(ls);
596 dlm_delete_debug_file(ls);
600 kfree(ls->ls_recover_buf);
603 * Free direntry structs.
607 kfree(ls->ls_dirtbl);
610 * Free all lkb's on lkbtbl[] lists.
613 for (i = 0; i < ls->ls_lkbtbl_size; i++) {
614 head = &ls->ls_lkbtbl[i].list;
615 while (!list_empty(head)) {
616 lkb = list_entry(head->next, struct dlm_lkb,
619 list_del(&lkb->lkb_idtbl_list);
623 if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
624 free_lvb(lkb->lkb_lvbptr);
631 kfree(ls->ls_lkbtbl);
634 * Free all rsb's on rsbtbl[] lists
637 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
638 head = &ls->ls_rsbtbl[i].list;
639 while (!list_empty(head)) {
640 rsb = list_entry(head->next, struct dlm_rsb,
643 list_del(&rsb->res_hashchain);
647 head = &ls->ls_rsbtbl[i].toss;
648 while (!list_empty(head)) {
649 rsb = list_entry(head->next, struct dlm_rsb,
651 list_del(&rsb->res_hashchain);
656 kfree(ls->ls_rsbtbl);
659 * Free structures on any other lists
662 kfree(ls->ls_recover_args);
663 dlm_clear_free_entries(ls);
664 dlm_clear_members(ls);
665 dlm_clear_members_gone(ls);
666 kfree(ls->ls_node_array);
667 kobject_unregister(&ls->ls_kobj);
670 mutex_lock(&ls_lock);
674 mutex_unlock(&ls_lock);
676 module_put(THIS_MODULE);
681 * Called when a system has released all its locks and is not going to use the
682 * lockspace any longer. We free everything we're managing for this lockspace.
683 * Remaining nodes will go through the recovery process as if we'd died. The
684 * lockspace must continue to function as usual, participating in recoveries,
685 * until this returns.
687 * Force has 4 possible values:
688 * 0 - don't destroy locksapce if it has any LKBs
689 * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
690 * 2 - destroy lockspace regardless of LKBs
691 * 3 - destroy lockspace as part of a forced shutdown
694 int dlm_release_lockspace(void *lockspace, int force)
698 ls = dlm_find_lockspace_local(lockspace);
701 dlm_put_lockspace(ls);
702 return release_lockspace(ls, force);