[XFS] Resolve a namespace collision on vnode/vnodeops for FreeBSD porters.
[safe/jmp/linux-2.6] / fs / xfs / linux-2.6 / xfs_super.c
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_bit.h"
20 #include "xfs_log.h"
21 #include "xfs_clnt.h"
22 #include "xfs_inum.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_dir.h"
27 #include "xfs_dir2.h"
28 #include "xfs_alloc.h"
29 #include "xfs_dmapi.h"
30 #include "xfs_quota.h"
31 #include "xfs_mount.h"
32 #include "xfs_bmap_btree.h"
33 #include "xfs_alloc_btree.h"
34 #include "xfs_ialloc_btree.h"
35 #include "xfs_dir_sf.h"
36 #include "xfs_dir2_sf.h"
37 #include "xfs_attr_sf.h"
38 #include "xfs_dinode.h"
39 #include "xfs_inode.h"
40 #include "xfs_btree.h"
41 #include "xfs_ialloc.h"
42 #include "xfs_bmap.h"
43 #include "xfs_rtalloc.h"
44 #include "xfs_error.h"
45 #include "xfs_itable.h"
46 #include "xfs_rw.h"
47 #include "xfs_acl.h"
48 #include "xfs_cap.h"
49 #include "xfs_mac.h"
50 #include "xfs_attr.h"
51 #include "xfs_buf_item.h"
52 #include "xfs_utils.h"
53 #include "xfs_version.h"
54
55 #include <linux/namei.h>
56 #include <linux/init.h>
57 #include <linux/mount.h>
58 #include <linux/mempool.h>
59 #include <linux/writeback.h>
60 #include <linux/kthread.h>
61
62 STATIC struct quotactl_ops xfs_quotactl_operations;
63 STATIC struct super_operations xfs_super_operations;
64 STATIC kmem_zone_t *xfs_vnode_zone;
65 STATIC kmem_zone_t *xfs_ioend_zone;
66 mempool_t *xfs_ioend_pool;
67
68 STATIC struct xfs_mount_args *
69 xfs_args_allocate(
70         struct super_block      *sb,
71         int                     silent)
72 {
73         struct xfs_mount_args   *args;
74
75         args = kmem_zalloc(sizeof(struct xfs_mount_args), KM_SLEEP);
76         args->logbufs = args->logbufsize = -1;
77         strncpy(args->fsname, sb->s_id, MAXNAMELEN);
78
79         /* Copy the already-parsed mount(2) flags we're interested in */
80         if (sb->s_flags & MS_DIRSYNC)
81                 args->flags |= XFSMNT_DIRSYNC;
82         if (sb->s_flags & MS_SYNCHRONOUS)
83                 args->flags |= XFSMNT_WSYNC;
84         if (silent)
85                 args->flags |= XFSMNT_QUIET;
86         args->flags |= XFSMNT_32BITINODES;
87
88         return args;
89 }
90
91 __uint64_t
92 xfs_max_file_offset(
93         unsigned int            blockshift)
94 {
95         unsigned int            pagefactor = 1;
96         unsigned int            bitshift = BITS_PER_LONG - 1;
97
98         /* Figure out maximum filesize, on Linux this can depend on
99          * the filesystem blocksize (on 32 bit platforms).
100          * __block_prepare_write does this in an [unsigned] long...
101          *      page->index << (PAGE_CACHE_SHIFT - bbits)
102          * So, for page sized blocks (4K on 32 bit platforms),
103          * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
104          *      (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
105          * but for smaller blocksizes it is less (bbits = log2 bsize).
106          * Note1: get_block_t takes a long (implicit cast from above)
107          * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
108          * can optionally convert the [unsigned] long from above into
109          * an [unsigned] long long.
110          */
111
112 #if BITS_PER_LONG == 32
113 # if defined(CONFIG_LBD)
114         ASSERT(sizeof(sector_t) == 8);
115         pagefactor = PAGE_CACHE_SIZE;
116         bitshift = BITS_PER_LONG;
117 # else
118         pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
119 # endif
120 #endif
121
122         return (((__uint64_t)pagefactor) << bitshift) - 1;
123 }
124
125 STATIC __inline__ void
126 xfs_set_inodeops(
127         struct inode            *inode)
128 {
129         switch (inode->i_mode & S_IFMT) {
130         case S_IFREG:
131                 inode->i_op = &xfs_inode_operations;
132                 inode->i_fop = &xfs_file_operations;
133                 inode->i_mapping->a_ops = &xfs_address_space_operations;
134                 break;
135         case S_IFDIR:
136                 inode->i_op = &xfs_dir_inode_operations;
137                 inode->i_fop = &xfs_dir_file_operations;
138                 break;
139         case S_IFLNK:
140                 inode->i_op = &xfs_symlink_inode_operations;
141                 if (inode->i_blocks)
142                         inode->i_mapping->a_ops = &xfs_address_space_operations;
143                 break;
144         default:
145                 inode->i_op = &xfs_inode_operations;
146                 init_special_inode(inode, inode->i_mode, inode->i_rdev);
147                 break;
148         }
149 }
150
151 STATIC __inline__ void
152 xfs_revalidate_inode(
153         xfs_mount_t             *mp,
154         bhv_vnode_t             *vp,
155         xfs_inode_t             *ip)
156 {
157         struct inode            *inode = vn_to_inode(vp);
158
159         inode->i_mode   = ip->i_d.di_mode;
160         inode->i_nlink  = ip->i_d.di_nlink;
161         inode->i_uid    = ip->i_d.di_uid;
162         inode->i_gid    = ip->i_d.di_gid;
163
164         switch (inode->i_mode & S_IFMT) {
165         case S_IFBLK:
166         case S_IFCHR:
167                 inode->i_rdev =
168                         MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
169                               sysv_minor(ip->i_df.if_u2.if_rdev));
170                 break;
171         default:
172                 inode->i_rdev = 0;
173                 break;
174         }
175
176         inode->i_blksize = xfs_preferred_iosize(mp);
177         inode->i_generation = ip->i_d.di_gen;
178         i_size_write(inode, ip->i_d.di_size);
179         inode->i_blocks =
180                 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
181         inode->i_atime.tv_sec   = ip->i_d.di_atime.t_sec;
182         inode->i_atime.tv_nsec  = ip->i_d.di_atime.t_nsec;
183         inode->i_mtime.tv_sec   = ip->i_d.di_mtime.t_sec;
184         inode->i_mtime.tv_nsec  = ip->i_d.di_mtime.t_nsec;
185         inode->i_ctime.tv_sec   = ip->i_d.di_ctime.t_sec;
186         inode->i_ctime.tv_nsec  = ip->i_d.di_ctime.t_nsec;
187         if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
188                 inode->i_flags |= S_IMMUTABLE;
189         else
190                 inode->i_flags &= ~S_IMMUTABLE;
191         if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
192                 inode->i_flags |= S_APPEND;
193         else
194                 inode->i_flags &= ~S_APPEND;
195         if (ip->i_d.di_flags & XFS_DIFLAG_SYNC)
196                 inode->i_flags |= S_SYNC;
197         else
198                 inode->i_flags &= ~S_SYNC;
199         if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME)
200                 inode->i_flags |= S_NOATIME;
201         else
202                 inode->i_flags &= ~S_NOATIME;
203         vp->v_flag &= ~VMODIFIED;
204 }
205
206 void
207 xfs_initialize_vnode(
208         bhv_desc_t              *bdp,
209         bhv_vnode_t             *vp,
210         bhv_desc_t              *inode_bhv,
211         int                     unlock)
212 {
213         xfs_inode_t             *ip = XFS_BHVTOI(inode_bhv);
214         struct inode            *inode = vn_to_inode(vp);
215
216         if (!inode_bhv->bd_vobj) {
217                 vp->v_vfsp = bhvtovfs(bdp);
218                 bhv_desc_init(inode_bhv, ip, vp, &xfs_vnodeops);
219                 bhv_insert(VN_BHV_HEAD(vp), inode_bhv);
220         }
221
222         /*
223          * We need to set the ops vectors, and unlock the inode, but if
224          * we have been called during the new inode create process, it is
225          * too early to fill in the Linux inode.  We will get called a
226          * second time once the inode is properly set up, and then we can
227          * finish our work.
228          */
229         if (ip->i_d.di_mode != 0 && unlock && (inode->i_state & I_NEW)) {
230                 xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip);
231                 xfs_set_inodeops(inode);
232
233                 ip->i_flags &= ~XFS_INEW;
234                 barrier();
235
236                 unlock_new_inode(inode);
237         }
238 }
239
240 int
241 xfs_blkdev_get(
242         xfs_mount_t             *mp,
243         const char              *name,
244         struct block_device     **bdevp)
245 {
246         int                     error = 0;
247
248         *bdevp = open_bdev_excl(name, 0, mp);
249         if (IS_ERR(*bdevp)) {
250                 error = PTR_ERR(*bdevp);
251                 printk("XFS: Invalid device [%s], error=%d\n", name, error);
252         }
253
254         return -error;
255 }
256
257 void
258 xfs_blkdev_put(
259         struct block_device     *bdev)
260 {
261         if (bdev)
262                 close_bdev_excl(bdev);
263 }
264
265 /*
266  * Try to write out the superblock using barriers.
267  */
268 STATIC int
269 xfs_barrier_test(
270         xfs_mount_t     *mp)
271 {
272         xfs_buf_t       *sbp = xfs_getsb(mp, 0);
273         int             error;
274
275         XFS_BUF_UNDONE(sbp);
276         XFS_BUF_UNREAD(sbp);
277         XFS_BUF_UNDELAYWRITE(sbp);
278         XFS_BUF_WRITE(sbp);
279         XFS_BUF_UNASYNC(sbp);
280         XFS_BUF_ORDERED(sbp);
281
282         xfsbdstrat(mp, sbp);
283         error = xfs_iowait(sbp);
284
285         /*
286          * Clear all the flags we set and possible error state in the
287          * buffer.  We only did the write to try out whether barriers
288          * worked and shouldn't leave any traces in the superblock
289          * buffer.
290          */
291         XFS_BUF_DONE(sbp);
292         XFS_BUF_ERROR(sbp, 0);
293         XFS_BUF_UNORDERED(sbp);
294
295         xfs_buf_relse(sbp);
296         return error;
297 }
298
299 void
300 xfs_mountfs_check_barriers(xfs_mount_t *mp)
301 {
302         int error;
303
304         if (mp->m_logdev_targp != mp->m_ddev_targp) {
305                 xfs_fs_cmn_err(CE_NOTE, mp,
306                   "Disabling barriers, not supported with external log device");
307                 mp->m_flags &= ~XFS_MOUNT_BARRIER;
308                 return;
309         }
310
311         if (mp->m_ddev_targp->bt_bdev->bd_disk->queue->ordered ==
312                                         QUEUE_ORDERED_NONE) {
313                 xfs_fs_cmn_err(CE_NOTE, mp,
314                   "Disabling barriers, not supported by the underlying device");
315                 mp->m_flags &= ~XFS_MOUNT_BARRIER;
316                 return;
317         }
318
319         error = xfs_barrier_test(mp);
320         if (error) {
321                 xfs_fs_cmn_err(CE_NOTE, mp,
322                   "Disabling barriers, trial barrier write failed");
323                 mp->m_flags &= ~XFS_MOUNT_BARRIER;
324                 return;
325         }
326 }
327
328 void
329 xfs_blkdev_issue_flush(
330         xfs_buftarg_t           *buftarg)
331 {
332         blkdev_issue_flush(buftarg->bt_bdev, NULL);
333 }
334
335 STATIC struct inode *
336 xfs_fs_alloc_inode(
337         struct super_block      *sb)
338 {
339         bhv_vnode_t             *vp;
340
341         vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP);
342         if (unlikely(!vp))
343                 return NULL;
344         return vn_to_inode(vp);
345 }
346
347 STATIC void
348 xfs_fs_destroy_inode(
349         struct inode            *inode)
350 {
351         kmem_zone_free(xfs_vnode_zone, vn_from_inode(inode));
352 }
353
354 STATIC void
355 xfs_fs_inode_init_once(
356         void                    *vnode,
357         kmem_zone_t             *zonep,
358         unsigned long           flags)
359 {
360         if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
361                       SLAB_CTOR_CONSTRUCTOR)
362                 inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));
363 }
364
365 STATIC int
366 xfs_init_zones(void)
367 {
368         xfs_vnode_zone = kmem_zone_init_flags(sizeof(bhv_vnode_t), "xfs_vnode",
369                                         KM_ZONE_HWALIGN | KM_ZONE_RECLAIM |
370                                         KM_ZONE_SPREAD,
371                                         xfs_fs_inode_init_once);
372         if (!xfs_vnode_zone)
373                 goto out;
374
375         xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
376         if (!xfs_ioend_zone)
377                 goto out_destroy_vnode_zone;
378
379         xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
380                                                   xfs_ioend_zone);
381         if (!xfs_ioend_pool)
382                 goto out_free_ioend_zone;
383         return 0;
384
385  out_free_ioend_zone:
386         kmem_zone_destroy(xfs_ioend_zone);
387  out_destroy_vnode_zone:
388         kmem_zone_destroy(xfs_vnode_zone);
389  out:
390         return -ENOMEM;
391 }
392
393 STATIC void
394 xfs_destroy_zones(void)
395 {
396         mempool_destroy(xfs_ioend_pool);
397         kmem_zone_destroy(xfs_vnode_zone);
398         kmem_zone_destroy(xfs_ioend_zone);
399 }
400
401 /*
402  * Attempt to flush the inode, this will actually fail
403  * if the inode is pinned, but we dirty the inode again
404  * at the point when it is unpinned after a log write,
405  * since this is when the inode itself becomes flushable.
406  */
407 STATIC int
408 xfs_fs_write_inode(
409         struct inode            *inode,
410         int                     sync)
411 {
412         bhv_vnode_t             *vp = vn_from_inode(inode);
413         int                     error = 0, flags = FLUSH_INODE;
414
415         if (vp) {
416                 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
417                 if (sync)
418                         flags |= FLUSH_SYNC;
419                 error = bhv_vop_iflush(vp, flags);
420                 if (error == EAGAIN)
421                         error = sync? bhv_vop_iflush(vp, flags | FLUSH_LOG) : 0;
422         }
423         return -error;
424 }
425
426 STATIC void
427 xfs_fs_clear_inode(
428         struct inode            *inode)
429 {
430         bhv_vnode_t             *vp = vn_from_inode(inode);
431
432         vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
433
434         XFS_STATS_INC(vn_rele);
435         XFS_STATS_INC(vn_remove);
436         XFS_STATS_INC(vn_reclaim);
437         XFS_STATS_DEC(vn_active);
438
439         /*
440          * This can happen because xfs_iget_core calls xfs_idestroy if we
441          * find an inode with di_mode == 0 but without IGET_CREATE set.
442          */
443         if (VNHEAD(vp))
444                 bhv_vop_inactive(vp, NULL);
445
446         VN_LOCK(vp);
447         vp->v_flag &= ~VMODIFIED;
448         VN_UNLOCK(vp, 0);
449
450         if (VNHEAD(vp))
451                 if (bhv_vop_reclaim(vp))
452                         panic("%s: cannot reclaim 0x%p\n", __FUNCTION__, vp);
453
454         ASSERT(VNHEAD(vp) == NULL);
455
456 #ifdef XFS_VNODE_TRACE
457         ktrace_free(vp->v_trace);
458 #endif
459 }
460
461 /*
462  * Enqueue a work item to be picked up by the vfs xfssyncd thread.
463  * Doing this has two advantages:
464  * - It saves on stack space, which is tight in certain situations
465  * - It can be used (with care) as a mechanism to avoid deadlocks.
466  * Flushing while allocating in a full filesystem requires both.
467  */
468 STATIC void
469 xfs_syncd_queue_work(
470         struct bhv_vfs  *vfs,
471         void            *data,
472         void            (*syncer)(bhv_vfs_t *, void *))
473 {
474         struct bhv_vfs_sync_work *work;
475
476         work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP);
477         INIT_LIST_HEAD(&work->w_list);
478         work->w_syncer = syncer;
479         work->w_data = data;
480         work->w_vfs = vfs;
481         spin_lock(&vfs->vfs_sync_lock);
482         list_add_tail(&work->w_list, &vfs->vfs_sync_list);
483         spin_unlock(&vfs->vfs_sync_lock);
484         wake_up_process(vfs->vfs_sync_task);
485 }
486
487 /*
488  * Flush delayed allocate data, attempting to free up reserved space
489  * from existing allocations.  At this point a new allocation attempt
490  * has failed with ENOSPC and we are in the process of scratching our
491  * heads, looking about for more room...
492  */
493 STATIC void
494 xfs_flush_inode_work(
495         bhv_vfs_t       *vfs,
496         void            *inode)
497 {
498         filemap_flush(((struct inode *)inode)->i_mapping);
499         iput((struct inode *)inode);
500 }
501
502 void
503 xfs_flush_inode(
504         xfs_inode_t     *ip)
505 {
506         struct inode    *inode = vn_to_inode(XFS_ITOV(ip));
507         struct bhv_vfs  *vfs = XFS_MTOVFS(ip->i_mount);
508
509         igrab(inode);
510         xfs_syncd_queue_work(vfs, inode, xfs_flush_inode_work);
511         delay(msecs_to_jiffies(500));
512 }
513
514 /*
515  * This is the "bigger hammer" version of xfs_flush_inode_work...
516  * (IOW, "If at first you don't succeed, use a Bigger Hammer").
517  */
518 STATIC void
519 xfs_flush_device_work(
520         bhv_vfs_t       *vfs,
521         void            *inode)
522 {
523         sync_blockdev(vfs->vfs_super->s_bdev);
524         iput((struct inode *)inode);
525 }
526
527 void
528 xfs_flush_device(
529         xfs_inode_t     *ip)
530 {
531         struct inode    *inode = vn_to_inode(XFS_ITOV(ip));
532         struct bhv_vfs  *vfs = XFS_MTOVFS(ip->i_mount);
533
534         igrab(inode);
535         xfs_syncd_queue_work(vfs, inode, xfs_flush_device_work);
536         delay(msecs_to_jiffies(500));
537         xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
538 }
539
540 STATIC void
541 vfs_sync_worker(
542         bhv_vfs_t       *vfsp,
543         void            *unused)
544 {
545         int             error;
546
547         if (!(vfsp->vfs_flag & VFS_RDONLY))
548                 error = bhv_vfs_sync(vfsp, SYNC_FSDATA | SYNC_BDFLUSH | \
549                                         SYNC_ATTR | SYNC_REFCACHE, NULL);
550         vfsp->vfs_sync_seq++;
551         wmb();
552         wake_up(&vfsp->vfs_wait_single_sync_task);
553 }
554
555 STATIC int
556 xfssyncd(
557         void                    *arg)
558 {
559         long                    timeleft;
560         bhv_vfs_t               *vfsp = (bhv_vfs_t *) arg;
561         bhv_vfs_sync_work_t     *work, *n;
562         LIST_HEAD               (tmp);
563
564         timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
565         for (;;) {
566                 timeleft = schedule_timeout_interruptible(timeleft);
567                 /* swsusp */
568                 try_to_freeze();
569                 if (kthread_should_stop() && list_empty(&vfsp->vfs_sync_list))
570                         break;
571
572                 spin_lock(&vfsp->vfs_sync_lock);
573                 /*
574                  * We can get woken by laptop mode, to do a sync -
575                  * that's the (only!) case where the list would be
576                  * empty with time remaining.
577                  */
578                 if (!timeleft || list_empty(&vfsp->vfs_sync_list)) {
579                         if (!timeleft)
580                                 timeleft = xfs_syncd_centisecs *
581                                                         msecs_to_jiffies(10);
582                         INIT_LIST_HEAD(&vfsp->vfs_sync_work.w_list);
583                         list_add_tail(&vfsp->vfs_sync_work.w_list,
584                                         &vfsp->vfs_sync_list);
585                 }
586                 list_for_each_entry_safe(work, n, &vfsp->vfs_sync_list, w_list)
587                         list_move(&work->w_list, &tmp);
588                 spin_unlock(&vfsp->vfs_sync_lock);
589
590                 list_for_each_entry_safe(work, n, &tmp, w_list) {
591                         (*work->w_syncer)(vfsp, work->w_data);
592                         list_del(&work->w_list);
593                         if (work == &vfsp->vfs_sync_work)
594                                 continue;
595                         kmem_free(work, sizeof(struct bhv_vfs_sync_work));
596                 }
597         }
598
599         return 0;
600 }
601
602 STATIC int
603 xfs_fs_start_syncd(
604         bhv_vfs_t               *vfsp)
605 {
606         vfsp->vfs_sync_work.w_syncer = vfs_sync_worker;
607         vfsp->vfs_sync_work.w_vfs = vfsp;
608         vfsp->vfs_sync_task = kthread_run(xfssyncd, vfsp, "xfssyncd");
609         if (IS_ERR(vfsp->vfs_sync_task))
610                 return -PTR_ERR(vfsp->vfs_sync_task);
611         return 0;
612 }
613
614 STATIC void
615 xfs_fs_stop_syncd(
616         bhv_vfs_t               *vfsp)
617 {
618         kthread_stop(vfsp->vfs_sync_task);
619 }
620
621 STATIC void
622 xfs_fs_put_super(
623         struct super_block      *sb)
624 {
625         bhv_vfs_t               *vfsp = vfs_from_sb(sb);
626         int                     error;
627
628         xfs_fs_stop_syncd(vfsp);
629         bhv_vfs_sync(vfsp, SYNC_ATTR | SYNC_DELWRI, NULL);
630         error = bhv_vfs_unmount(vfsp, 0, NULL);
631         if (error) {
632                 printk("XFS: unmount got error=%d\n", error);
633                 printk("%s: vfs=0x%p left dangling!\n", __FUNCTION__, vfsp);
634         } else {
635                 vfs_deallocate(vfsp);
636         }
637 }
638
639 STATIC void
640 xfs_fs_write_super(
641         struct super_block      *sb)
642 {
643         if (!(sb->s_flags & MS_RDONLY))
644                 bhv_vfs_sync(vfs_from_sb(sb), SYNC_FSDATA, NULL);
645         sb->s_dirt = 0;
646 }
647
648 STATIC int
649 xfs_fs_sync_super(
650         struct super_block      *sb,
651         int                     wait)
652 {
653         bhv_vfs_t               *vfsp = vfs_from_sb(sb);
654         int                     error;
655         int                     flags;
656
657         if (unlikely(sb->s_frozen == SB_FREEZE_WRITE))
658                 flags = SYNC_QUIESCE;
659         else
660                 flags = SYNC_FSDATA | (wait ? SYNC_WAIT : 0);
661
662         error = bhv_vfs_sync(vfsp, flags, NULL);
663         sb->s_dirt = 0;
664
665         if (unlikely(laptop_mode)) {
666                 int     prev_sync_seq = vfsp->vfs_sync_seq;
667
668                 /*
669                  * The disk must be active because we're syncing.
670                  * We schedule xfssyncd now (now that the disk is
671                  * active) instead of later (when it might not be).
672                  */
673                 wake_up_process(vfsp->vfs_sync_task);
674                 /*
675                  * We have to wait for the sync iteration to complete.
676                  * If we don't, the disk activity caused by the sync
677                  * will come after the sync is completed, and that
678                  * triggers another sync from laptop mode.
679                  */
680                 wait_event(vfsp->vfs_wait_single_sync_task,
681                                 vfsp->vfs_sync_seq != prev_sync_seq);
682         }
683
684         return -error;
685 }
686
687 STATIC int
688 xfs_fs_statfs(
689         struct super_block      *sb,
690         struct kstatfs          *statp)
691 {
692         return -bhv_vfs_statvfs(vfs_from_sb(sb), statp, NULL);
693 }
694
695 STATIC int
696 xfs_fs_remount(
697         struct super_block      *sb,
698         int                     *flags,
699         char                    *options)
700 {
701         bhv_vfs_t               *vfsp = vfs_from_sb(sb);
702         struct xfs_mount_args   *args = xfs_args_allocate(sb, 0);
703         int                     error;
704
705         error = bhv_vfs_parseargs(vfsp, options, args, 1);
706         if (!error)
707                 error = bhv_vfs_mntupdate(vfsp, flags, args);
708         kmem_free(args, sizeof(*args));
709         return -error;
710 }
711
712 STATIC void
713 xfs_fs_lockfs(
714         struct super_block      *sb)
715 {
716         bhv_vfs_freeze(vfs_from_sb(sb));
717 }
718
719 STATIC int
720 xfs_fs_show_options(
721         struct seq_file         *m,
722         struct vfsmount         *mnt)
723 {
724         return -bhv_vfs_showargs(vfs_from_sb(mnt->mnt_sb), m);
725 }
726
727 STATIC int
728 xfs_fs_quotasync(
729         struct super_block      *sb,
730         int                     type)
731 {
732         return -bhv_vfs_quotactl(vfs_from_sb(sb), Q_XQUOTASYNC, 0, NULL);
733 }
734
735 STATIC int
736 xfs_fs_getxstate(
737         struct super_block      *sb,
738         struct fs_quota_stat    *fqs)
739 {
740         return -bhv_vfs_quotactl(vfs_from_sb(sb), Q_XGETQSTAT, 0, (caddr_t)fqs);
741 }
742
743 STATIC int
744 xfs_fs_setxstate(
745         struct super_block      *sb,
746         unsigned int            flags,
747         int                     op)
748 {
749         return -bhv_vfs_quotactl(vfs_from_sb(sb), op, 0, (caddr_t)&flags);
750 }
751
752 STATIC int
753 xfs_fs_getxquota(
754         struct super_block      *sb,
755         int                     type,
756         qid_t                   id,
757         struct fs_disk_quota    *fdq)
758 {
759         return -bhv_vfs_quotactl(vfs_from_sb(sb),
760                                  (type == USRQUOTA) ? Q_XGETQUOTA :
761                                   ((type == GRPQUOTA) ? Q_XGETGQUOTA :
762                                    Q_XGETPQUOTA), id, (caddr_t)fdq);
763 }
764
765 STATIC int
766 xfs_fs_setxquota(
767         struct super_block      *sb,
768         int                     type,
769         qid_t                   id,
770         struct fs_disk_quota    *fdq)
771 {
772         return -bhv_vfs_quotactl(vfs_from_sb(sb),
773                                  (type == USRQUOTA) ? Q_XSETQLIM :
774                                   ((type == GRPQUOTA) ? Q_XSETGQLIM :
775                                    Q_XSETPQLIM), id, (caddr_t)fdq);
776 }
777
778 STATIC int
779 xfs_fs_fill_super(
780         struct super_block      *sb,
781         void                    *data,
782         int                     silent)
783 {
784         struct bhv_vnode        *rootvp;
785         struct bhv_vfs          *vfsp = vfs_allocate(sb);
786         struct xfs_mount_args   *args = xfs_args_allocate(sb, silent);
787         struct kstatfs          statvfs;
788         int                     error;
789
790         bhv_insert_all_vfsops(vfsp);
791
792         error = bhv_vfs_parseargs(vfsp, (char *)data, args, 0);
793         if (error) {
794                 bhv_remove_all_vfsops(vfsp, 1);
795                 goto fail_vfsop;
796         }
797
798         sb_min_blocksize(sb, BBSIZE);
799 #ifdef CONFIG_XFS_EXPORT
800         sb->s_export_op = &xfs_export_operations;
801 #endif
802         sb->s_qcop = &xfs_quotactl_operations;
803         sb->s_op = &xfs_super_operations;
804
805         error = bhv_vfs_mount(vfsp, args, NULL);
806         if (error) {
807                 bhv_remove_all_vfsops(vfsp, 1);
808                 goto fail_vfsop;
809         }
810
811         error = bhv_vfs_statvfs(vfsp, &statvfs, NULL);
812         if (error)
813                 goto fail_unmount;
814
815         sb->s_dirt = 1;
816         sb->s_magic = statvfs.f_type;
817         sb->s_blocksize = statvfs.f_bsize;
818         sb->s_blocksize_bits = ffs(statvfs.f_bsize) - 1;
819         sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
820         sb->s_time_gran = 1;
821         set_posix_acl_flag(sb);
822
823         error = bhv_vfs_root(vfsp, &rootvp);
824         if (error)
825                 goto fail_unmount;
826
827         sb->s_root = d_alloc_root(vn_to_inode(rootvp));
828         if (!sb->s_root) {
829                 error = ENOMEM;
830                 goto fail_vnrele;
831         }
832         if (is_bad_inode(sb->s_root->d_inode)) {
833                 error = EINVAL;
834                 goto fail_vnrele;
835         }
836         if ((error = xfs_fs_start_syncd(vfsp)))
837                 goto fail_vnrele;
838         vn_trace_exit(rootvp, __FUNCTION__, (inst_t *)__return_address);
839
840         kmem_free(args, sizeof(*args));
841         return 0;
842
843 fail_vnrele:
844         if (sb->s_root) {
845                 dput(sb->s_root);
846                 sb->s_root = NULL;
847         } else {
848                 VN_RELE(rootvp);
849         }
850
851 fail_unmount:
852         bhv_vfs_unmount(vfsp, 0, NULL);
853
854 fail_vfsop:
855         vfs_deallocate(vfsp);
856         kmem_free(args, sizeof(*args));
857         return -error;
858 }
859
860 STATIC struct super_block *
861 xfs_fs_get_sb(
862         struct file_system_type *fs_type,
863         int                     flags,
864         const char              *dev_name,
865         void                    *data)
866 {
867         return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
868 }
869
870 STATIC struct super_operations xfs_super_operations = {
871         .alloc_inode            = xfs_fs_alloc_inode,
872         .destroy_inode          = xfs_fs_destroy_inode,
873         .write_inode            = xfs_fs_write_inode,
874         .clear_inode            = xfs_fs_clear_inode,
875         .put_super              = xfs_fs_put_super,
876         .write_super            = xfs_fs_write_super,
877         .sync_fs                = xfs_fs_sync_super,
878         .write_super_lockfs     = xfs_fs_lockfs,
879         .statfs                 = xfs_fs_statfs,
880         .remount_fs             = xfs_fs_remount,
881         .show_options           = xfs_fs_show_options,
882 };
883
884 STATIC struct quotactl_ops xfs_quotactl_operations = {
885         .quota_sync             = xfs_fs_quotasync,
886         .get_xstate             = xfs_fs_getxstate,
887         .set_xstate             = xfs_fs_setxstate,
888         .get_xquota             = xfs_fs_getxquota,
889         .set_xquota             = xfs_fs_setxquota,
890 };
891
892 STATIC struct file_system_type xfs_fs_type = {
893         .owner                  = THIS_MODULE,
894         .name                   = "xfs",
895         .get_sb                 = xfs_fs_get_sb,
896         .kill_sb                = kill_block_super,
897         .fs_flags               = FS_REQUIRES_DEV,
898 };
899
900
901 STATIC int __init
902 init_xfs_fs( void )
903 {
904         int                     error;
905         struct sysinfo          si;
906         static char             message[] __initdata = KERN_INFO \
907                 XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n";
908
909         printk(message);
910
911         si_meminfo(&si);
912         xfs_physmem = si.totalram;
913
914         ktrace_init(64);
915
916         error = xfs_init_zones();
917         if (error < 0)
918                 goto undo_zones;
919
920         error = xfs_buf_init();
921         if (error < 0)
922                 goto undo_buffers;
923
924         vn_init();
925         xfs_init();
926         uuid_init();
927         vfs_initquota();
928
929         error = register_filesystem(&xfs_fs_type);
930         if (error)
931                 goto undo_register;
932         return 0;
933
934 undo_register:
935         xfs_buf_terminate();
936
937 undo_buffers:
938         xfs_destroy_zones();
939
940 undo_zones:
941         return error;
942 }
943
944 STATIC void __exit
945 exit_xfs_fs( void )
946 {
947         vfs_exitquota();
948         unregister_filesystem(&xfs_fs_type);
949         xfs_cleanup();
950         xfs_buf_terminate();
951         xfs_destroy_zones();
952         ktrace_uninit();
953 }
954
955 module_init(init_xfs_fs);
956 module_exit(exit_xfs_fs);
957
958 MODULE_AUTHOR("Silicon Graphics, Inc.");
959 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
960 MODULE_LICENSE("GPL");