[CIFS] fix typo in previous
[safe/jmp/linux-2.6] / fs / cifs / file.c
1 /*
2  *   fs/cifs/file.c
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2007
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  *   This library is free software; you can redistribute it and/or modify
11  *   it under the terms of the GNU Lesser General Public License as published
12  *   by the Free Software Foundation; either version 2.1 of the License, or
13  *   (at your option) any later version.
14  *
15  *   This library is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
18  *   the GNU Lesser General Public License for more details.
19  *
20  *   You should have received a copy of the GNU Lesser General Public License
21  *   along with this library; if not, write to the Free Software
22  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23  */
24 #include <linux/fs.h>
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <asm/div64.h>
34 #include "cifsfs.h"
35 #include "cifspdu.h"
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_unicode.h"
39 #include "cifs_debug.h"
40 #include "cifs_fs_sb.h"
41
42 static inline struct cifsFileInfo *cifs_init_private(
43         struct cifsFileInfo *private_data, struct inode *inode,
44         struct file *file, __u16 netfid)
45 {
46         memset(private_data, 0, sizeof(struct cifsFileInfo));
47         private_data->netfid = netfid;
48         private_data->pid = current->tgid;
49         init_MUTEX(&private_data->fh_sem);
50         mutex_init(&private_data->lock_mutex);
51         INIT_LIST_HEAD(&private_data->llist);
52         private_data->pfile = file; /* needed for writepage */
53         private_data->pInode = inode;
54         private_data->invalidHandle = FALSE;
55         private_data->closePend = FALSE;
56         /* we have to track num writers to the inode, since writepages
57         does not tell us which handle the write is for so there can
58         be a close (overlapping with write) of the filehandle that
59         cifs_writepages chose to use */
60         atomic_set(&private_data->wrtPending, 0);
61
62         return private_data;
63 }
64
65 static inline int cifs_convert_flags(unsigned int flags)
66 {
67         if ((flags & O_ACCMODE) == O_RDONLY)
68                 return GENERIC_READ;
69         else if ((flags & O_ACCMODE) == O_WRONLY)
70                 return GENERIC_WRITE;
71         else if ((flags & O_ACCMODE) == O_RDWR) {
72                 /* GENERIC_ALL is too much permission to request
73                    can cause unnecessary access denied on create */
74                 /* return GENERIC_ALL; */
75                 return (GENERIC_READ | GENERIC_WRITE);
76         }
77
78         return 0x20197;
79 }
80
81 static inline int cifs_get_disposition(unsigned int flags)
82 {
83         if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
84                 return FILE_CREATE;
85         else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
86                 return FILE_OVERWRITE_IF;
87         else if ((flags & O_CREAT) == O_CREAT)
88                 return FILE_OPEN_IF;
89         else if ((flags & O_TRUNC) == O_TRUNC)
90                 return FILE_OVERWRITE;
91         else
92                 return FILE_OPEN;
93 }
94
95 /* all arguments to this function must be checked for validity in caller */
96 static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
97         struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
98         struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
99         char *full_path, int xid)
100 {
101         struct timespec temp;
102         int rc;
103
104         /* want handles we can use to read with first
105            in the list so we do not have to walk the
106            list to search for one in prepare_write */
107         if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
108                 list_add_tail(&pCifsFile->flist,
109                               &pCifsInode->openFileList);
110         } else {
111                 list_add(&pCifsFile->flist,
112                          &pCifsInode->openFileList);
113         }
114         write_unlock(&GlobalSMBSeslock);
115         if (pCifsInode->clientCanCacheRead) {
116                 /* we have the inode open somewhere else
117                    no need to discard cache data */
118                 goto client_can_cache;
119         }
120
121         /* BB need same check in cifs_create too? */
122         /* if not oplocked, invalidate inode pages if mtime or file
123            size changed */
124         temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
125         if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
126                            (file->f_path.dentry->d_inode->i_size ==
127                             (loff_t)le64_to_cpu(buf->EndOfFile))) {
128                 cFYI(1, ("inode unchanged on server"));
129         } else {
130                 if (file->f_path.dentry->d_inode->i_mapping) {
131                 /* BB no need to lock inode until after invalidate
132                    since namei code should already have it locked? */
133                         filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
134                 }
135                 cFYI(1, ("invalidating remote inode since open detected it "
136                          "changed"));
137                 invalidate_remote_inode(file->f_path.dentry->d_inode);
138         }
139
140 client_can_cache:
141         if (pTcon->unix_ext)
142                 rc = cifs_get_inode_info_unix(&file->f_path.dentry->d_inode,
143                         full_path, inode->i_sb, xid);
144         else
145                 rc = cifs_get_inode_info(&file->f_path.dentry->d_inode,
146                         full_path, buf, inode->i_sb, xid);
147
148         if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
149                 pCifsInode->clientCanCacheAll = TRUE;
150                 pCifsInode->clientCanCacheRead = TRUE;
151                 cFYI(1, ("Exclusive Oplock granted on inode %p",
152                          file->f_path.dentry->d_inode));
153         } else if ((*oplock & 0xF) == OPLOCK_READ)
154                 pCifsInode->clientCanCacheRead = TRUE;
155
156         return rc;
157 }
158
159 int cifs_open(struct inode *inode, struct file *file)
160 {
161         int rc = -EACCES;
162         int xid, oplock;
163         struct cifs_sb_info *cifs_sb;
164         struct cifsTconInfo *pTcon;
165         struct cifsFileInfo *pCifsFile;
166         struct cifsInodeInfo *pCifsInode;
167         struct list_head *tmp;
168         char *full_path = NULL;
169         int desiredAccess;
170         int disposition;
171         __u16 netfid;
172         FILE_ALL_INFO *buf = NULL;
173
174         xid = GetXid();
175
176         cifs_sb = CIFS_SB(inode->i_sb);
177         pTcon = cifs_sb->tcon;
178
179         if (file->f_flags & O_CREAT) {
180                 /* search inode for this file and fill in file->private_data */
181                 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
182                 read_lock(&GlobalSMBSeslock);
183                 list_for_each(tmp, &pCifsInode->openFileList) {
184                         pCifsFile = list_entry(tmp, struct cifsFileInfo,
185                                                flist);
186                         if ((pCifsFile->pfile == NULL) &&
187                             (pCifsFile->pid == current->tgid)) {
188                                 /* mode set in cifs_create */
189
190                                 /* needed for writepage */
191                                 pCifsFile->pfile = file;
192
193                                 file->private_data = pCifsFile;
194                                 break;
195                         }
196                 }
197                 read_unlock(&GlobalSMBSeslock);
198                 if (file->private_data != NULL) {
199                         rc = 0;
200                         FreeXid(xid);
201                         return rc;
202                 } else {
203                         if (file->f_flags & O_EXCL)
204                                 cERROR(1, ("could not find file instance for "
205                                            "new file %p", file));
206                 }
207         }
208
209         full_path = build_path_from_dentry(file->f_path.dentry);
210         if (full_path == NULL) {
211                 FreeXid(xid);
212                 return -ENOMEM;
213         }
214
215         cFYI(1, ("inode = 0x%p file flags are 0x%x for %s",
216                  inode, file->f_flags, full_path));
217         desiredAccess = cifs_convert_flags(file->f_flags);
218
219 /*********************************************************************
220  *  open flag mapping table:
221  *
222  *      POSIX Flag            CIFS Disposition
223  *      ----------            ----------------
224  *      O_CREAT               FILE_OPEN_IF
225  *      O_CREAT | O_EXCL      FILE_CREATE
226  *      O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
227  *      O_TRUNC               FILE_OVERWRITE
228  *      none of the above     FILE_OPEN
229  *
230  *      Note that there is not a direct match between disposition
231  *      FILE_SUPERSEDE (ie create whether or not file exists although
232  *      O_CREAT | O_TRUNC is similar but truncates the existing
233  *      file rather than creating a new file as FILE_SUPERSEDE does
234  *      (which uses the attributes / metadata passed in on open call)
235  *?
236  *?  O_SYNC is a reasonable match to CIFS writethrough flag
237  *?  and the read write flags match reasonably.  O_LARGEFILE
238  *?  is irrelevant because largefile support is always used
239  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
240  *       O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
241  *********************************************************************/
242
243         disposition = cifs_get_disposition(file->f_flags);
244
245         if (oplockEnabled)
246                 oplock = REQ_OPLOCK;
247         else
248                 oplock = FALSE;
249
250         /* BB pass O_SYNC flag through on file attributes .. BB */
251
252         /* Also refresh inode by passing in file_info buf returned by SMBOpen
253            and calling get_inode_info with returned buf (at least helps
254            non-Unix server case) */
255
256         /* BB we can not do this if this is the second open of a file
257            and the first handle has writebehind data, we might be
258            able to simply do a filemap_fdatawrite/filemap_fdatawait first */
259         buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
260         if (!buf) {
261                 rc = -ENOMEM;
262                 goto out;
263         }
264
265         if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
266                 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
267                          desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
268                          cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
269                                  & CIFS_MOUNT_MAP_SPECIAL_CHR);
270         else
271                 rc = -EIO; /* no NT SMB support fall into legacy open below */
272
273         if (rc == -EIO) {
274                 /* Old server, try legacy style OpenX */
275                 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
276                         desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
277                         cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
278                                 & CIFS_MOUNT_MAP_SPECIAL_CHR);
279         }
280         if (rc) {
281                 cFYI(1, ("cifs_open returned 0x%x", rc));
282                 goto out;
283         }
284         file->private_data =
285                 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
286         if (file->private_data == NULL) {
287                 rc = -ENOMEM;
288                 goto out;
289         }
290         pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
291         write_lock(&GlobalSMBSeslock);
292         list_add(&pCifsFile->tlist, &pTcon->openFileList);
293
294         pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
295         if (pCifsInode) {
296                 rc = cifs_open_inode_helper(inode, file, pCifsInode,
297                                             pCifsFile, pTcon,
298                                             &oplock, buf, full_path, xid);
299         } else {
300                 write_unlock(&GlobalSMBSeslock);
301         }
302
303         if (oplock & CIFS_CREATE_ACTION) {
304                 /* time to set mode which we can not set earlier due to
305                    problems creating new read-only files */
306                 if (pTcon->unix_ext) {
307                         CIFSSMBUnixSetPerms(xid, pTcon, full_path,
308                                             inode->i_mode,
309                                             (__u64)-1, (__u64)-1, 0 /* dev */,
310                                             cifs_sb->local_nls,
311                                             cifs_sb->mnt_cifs_flags &
312                                                 CIFS_MOUNT_MAP_SPECIAL_CHR);
313                 } else {
314                         /* BB implement via Windows security descriptors eg
315                            CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
316                                               -1, -1, local_nls);
317                            in the meantime could set r/o dos attribute when
318                            perms are eg: mode & 0222 == 0 */
319                 }
320         }
321
322 out:
323         kfree(buf);
324         kfree(full_path);
325         FreeXid(xid);
326         return rc;
327 }
328
329 /* Try to reacquire byte range locks that were released when session */
330 /* to server was lost */
331 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
332 {
333         int rc = 0;
334
335 /* BB list all locks open on this file and relock */
336
337         return rc;
338 }
339
340 static int cifs_reopen_file(struct file *file, int can_flush)
341 {
342         int rc = -EACCES;
343         int xid, oplock;
344         struct cifs_sb_info *cifs_sb;
345         struct cifsTconInfo *pTcon;
346         struct cifsFileInfo *pCifsFile;
347         struct cifsInodeInfo *pCifsInode;
348         struct inode *inode;
349         char *full_path = NULL;
350         int desiredAccess;
351         int disposition = FILE_OPEN;
352         __u16 netfid;
353
354         if (file->private_data) {
355                 pCifsFile = (struct cifsFileInfo *)file->private_data;
356         } else
357                 return -EBADF;
358
359         xid = GetXid();
360         down(&pCifsFile->fh_sem);
361         if (pCifsFile->invalidHandle == FALSE) {
362                 up(&pCifsFile->fh_sem);
363                 FreeXid(xid);
364                 return 0;
365         }
366
367         if (file->f_path.dentry == NULL) {
368                 cERROR(1, ("no valid name if dentry freed"));
369                 dump_stack();
370                 rc = -EBADF;
371                 goto reopen_error_exit;
372         }
373
374         inode = file->f_path.dentry->d_inode;
375         if (inode == NULL) {
376                 cERROR(1, ("inode not valid"));
377                 dump_stack();
378                 rc = -EBADF;
379                 goto reopen_error_exit;
380         }
381
382         cifs_sb = CIFS_SB(inode->i_sb);
383         pTcon = cifs_sb->tcon;
384
385 /* can not grab rename sem here because various ops, including
386    those that already have the rename sem can end up causing writepage
387    to get called and if the server was down that means we end up here,
388    and we can never tell if the caller already has the rename_sem */
389         full_path = build_path_from_dentry(file->f_path.dentry);
390         if (full_path == NULL) {
391                 rc = -ENOMEM;
392 reopen_error_exit:
393                 up(&pCifsFile->fh_sem);
394                 FreeXid(xid);
395                 return rc;
396         }
397
398         cFYI(1, ("inode = 0x%p file flags 0x%x for %s",
399                  inode, file->f_flags, full_path));
400         desiredAccess = cifs_convert_flags(file->f_flags);
401
402         if (oplockEnabled)
403                 oplock = REQ_OPLOCK;
404         else
405                 oplock = FALSE;
406
407         /* Can not refresh inode by passing in file_info buf to be returned
408            by SMBOpen and then calling get_inode_info with returned buf
409            since file might have write behind data that needs to be flushed
410            and server version of file size can be stale. If we knew for sure
411            that inode was not dirty locally we could do this */
412
413         rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
414                          CREATE_NOT_DIR, &netfid, &oplock, NULL,
415                          cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
416                                 CIFS_MOUNT_MAP_SPECIAL_CHR);
417         if (rc) {
418                 up(&pCifsFile->fh_sem);
419                 cFYI(1, ("cifs_open returned 0x%x", rc));
420                 cFYI(1, ("oplock: %d", oplock));
421         } else {
422                 pCifsFile->netfid = netfid;
423                 pCifsFile->invalidHandle = FALSE;
424                 up(&pCifsFile->fh_sem);
425                 pCifsInode = CIFS_I(inode);
426                 if (pCifsInode) {
427                         if (can_flush) {
428                                 filemap_write_and_wait(inode->i_mapping);
429                         /* temporarily disable caching while we
430                            go to server to get inode info */
431                                 pCifsInode->clientCanCacheAll = FALSE;
432                                 pCifsInode->clientCanCacheRead = FALSE;
433                                 if (pTcon->unix_ext)
434                                         rc = cifs_get_inode_info_unix(&inode,
435                                                 full_path, inode->i_sb, xid);
436                                 else
437                                         rc = cifs_get_inode_info(&inode,
438                                                 full_path, NULL, inode->i_sb,
439                                                 xid);
440                         } /* else we are writing out data to server already
441                              and could deadlock if we tried to flush data, and
442                              since we do not know if we have data that would
443                              invalidate the current end of file on the server
444                              we can not go to the server to get the new inod
445                              info */
446                         if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
447                                 pCifsInode->clientCanCacheAll = TRUE;
448                                 pCifsInode->clientCanCacheRead = TRUE;
449                                 cFYI(1, ("Exclusive Oplock granted on inode %p",
450                                          file->f_path.dentry->d_inode));
451                         } else if ((oplock & 0xF) == OPLOCK_READ) {
452                                 pCifsInode->clientCanCacheRead = TRUE;
453                                 pCifsInode->clientCanCacheAll = FALSE;
454                         } else {
455                                 pCifsInode->clientCanCacheRead = FALSE;
456                                 pCifsInode->clientCanCacheAll = FALSE;
457                         }
458                         cifs_relock_file(pCifsFile);
459                 }
460         }
461
462         kfree(full_path);
463         FreeXid(xid);
464         return rc;
465 }
466
467 int cifs_close(struct inode *inode, struct file *file)
468 {
469         int rc = 0;
470         int xid;
471         struct cifs_sb_info *cifs_sb;
472         struct cifsTconInfo *pTcon;
473         struct cifsFileInfo *pSMBFile =
474                 (struct cifsFileInfo *)file->private_data;
475
476         xid = GetXid();
477
478         cifs_sb = CIFS_SB(inode->i_sb);
479         pTcon = cifs_sb->tcon;
480         if (pSMBFile) {
481                 struct cifsLockInfo *li, *tmp;
482
483                 pSMBFile->closePend = TRUE;
484                 if (pTcon) {
485                         /* no sense reconnecting to close a file that is
486                            already closed */
487                         if (pTcon->tidStatus != CifsNeedReconnect) {
488                                 int timeout = 2;
489                                 while ((atomic_read(&pSMBFile->wrtPending) != 0)
490                                          && (timeout < 1000) ) {
491                                         /* Give write a better chance to get to
492                                         server ahead of the close.  We do not
493                                         want to add a wait_q here as it would
494                                         increase the memory utilization as
495                                         the struct would be in each open file,
496                                         but this should give enough time to
497                                         clear the socket */
498 #ifdef CONFIG_CIFS_DEBUG2
499                                         cFYI(1, ("close delay, write pending"));
500 #endif /* DEBUG2 */
501                                         msleep(timeout);
502                                         timeout *= 4;
503                                 }
504                                 if (atomic_read(&pSMBFile->wrtPending))
505                                         cERROR(1,
506                                                 ("close with pending writes"));
507                                 rc = CIFSSMBClose(xid, pTcon,
508                                                   pSMBFile->netfid);
509                         }
510                 }
511
512                 /* Delete any outstanding lock records.
513                    We'll lose them when the file is closed anyway. */
514                 mutex_lock(&pSMBFile->lock_mutex);
515                 list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
516                         list_del(&li->llist);
517                         kfree(li);
518                 }
519                 mutex_unlock(&pSMBFile->lock_mutex);
520
521                 write_lock(&GlobalSMBSeslock);
522                 list_del(&pSMBFile->flist);
523                 list_del(&pSMBFile->tlist);
524                 write_unlock(&GlobalSMBSeslock);
525                 kfree(pSMBFile->search_resume_name);
526                 kfree(file->private_data);
527                 file->private_data = NULL;
528         } else
529                 rc = -EBADF;
530
531         if (list_empty(&(CIFS_I(inode)->openFileList))) {
532                 cFYI(1, ("closing last open instance for inode %p", inode));
533                 /* if the file is not open we do not know if we can cache info
534                    on this inode, much less write behind and read ahead */
535                 CIFS_I(inode)->clientCanCacheRead = FALSE;
536                 CIFS_I(inode)->clientCanCacheAll  = FALSE;
537         }
538         if ((rc == 0) && CIFS_I(inode)->write_behind_rc)
539                 rc = CIFS_I(inode)->write_behind_rc;
540         FreeXid(xid);
541         return rc;
542 }
543
544 int cifs_closedir(struct inode *inode, struct file *file)
545 {
546         int rc = 0;
547         int xid;
548         struct cifsFileInfo *pCFileStruct =
549             (struct cifsFileInfo *)file->private_data;
550         char *ptmp;
551
552         cFYI(1, ("Closedir inode = 0x%p", inode));
553
554         xid = GetXid();
555
556         if (pCFileStruct) {
557                 struct cifsTconInfo *pTcon;
558                 struct cifs_sb_info *cifs_sb =
559                         CIFS_SB(file->f_path.dentry->d_sb);
560
561                 pTcon = cifs_sb->tcon;
562
563                 cFYI(1, ("Freeing private data in close dir"));
564                 if ((pCFileStruct->srch_inf.endOfSearch == FALSE) &&
565                    (pCFileStruct->invalidHandle == FALSE)) {
566                         pCFileStruct->invalidHandle = TRUE;
567                         rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
568                         cFYI(1, ("Closing uncompleted readdir with rc %d",
569                                  rc));
570                         /* not much we can do if it fails anyway, ignore rc */
571                         rc = 0;
572                 }
573                 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
574                 if (ptmp) {
575                         cFYI(1, ("closedir free smb buf in srch struct"));
576                         pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
577                         if (pCFileStruct->srch_inf.smallBuf)
578                                 cifs_small_buf_release(ptmp);
579                         else
580                                 cifs_buf_release(ptmp);
581                 }
582                 ptmp = pCFileStruct->search_resume_name;
583                 if (ptmp) {
584                         cFYI(1, ("closedir free resume name"));
585                         pCFileStruct->search_resume_name = NULL;
586                         kfree(ptmp);
587                 }
588                 kfree(file->private_data);
589                 file->private_data = NULL;
590         }
591         /* BB can we lock the filestruct while this is going on? */
592         FreeXid(xid);
593         return rc;
594 }
595
596 static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
597                                 __u64 offset, __u8 lockType)
598 {
599         struct cifsLockInfo *li =
600                 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
601         if (li == NULL)
602                 return -ENOMEM;
603         li->offset = offset;
604         li->length = len;
605         li->type = lockType;
606         mutex_lock(&fid->lock_mutex);
607         list_add(&li->llist, &fid->llist);
608         mutex_unlock(&fid->lock_mutex);
609         return 0;
610 }
611
612 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
613 {
614         int rc, xid;
615         __u32 numLock = 0;
616         __u32 numUnlock = 0;
617         __u64 length;
618         int wait_flag = FALSE;
619         struct cifs_sb_info *cifs_sb;
620         struct cifsTconInfo *pTcon;
621         __u16 netfid;
622         __u8 lockType = LOCKING_ANDX_LARGE_FILES;
623         int posix_locking;
624
625         length = 1 + pfLock->fl_end - pfLock->fl_start;
626         rc = -EACCES;
627         xid = GetXid();
628
629         cFYI(1, ("Lock parm: 0x%x flockflags: "
630                  "0x%x flocktype: 0x%x start: %lld end: %lld",
631                 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
632                 pfLock->fl_end));
633
634         if (pfLock->fl_flags & FL_POSIX)
635                 cFYI(1, ("Posix"));
636         if (pfLock->fl_flags & FL_FLOCK)
637                 cFYI(1, ("Flock"));
638         if (pfLock->fl_flags & FL_SLEEP) {
639                 cFYI(1, ("Blocking lock"));
640                 wait_flag = TRUE;
641         }
642         if (pfLock->fl_flags & FL_ACCESS)
643                 cFYI(1, ("Process suspended by mandatory locking - "
644                          "not implemented yet"));
645         if (pfLock->fl_flags & FL_LEASE)
646                 cFYI(1, ("Lease on file - not implemented yet"));
647         if (pfLock->fl_flags &
648             (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
649                 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
650
651         if (pfLock->fl_type == F_WRLCK) {
652                 cFYI(1, ("F_WRLCK "));
653                 numLock = 1;
654         } else if (pfLock->fl_type == F_UNLCK) {
655                 cFYI(1, ("F_UNLCK"));
656                 numUnlock = 1;
657                 /* Check if unlock includes more than
658                 one lock range */
659         } else if (pfLock->fl_type == F_RDLCK) {
660                 cFYI(1, ("F_RDLCK"));
661                 lockType |= LOCKING_ANDX_SHARED_LOCK;
662                 numLock = 1;
663         } else if (pfLock->fl_type == F_EXLCK) {
664                 cFYI(1, ("F_EXLCK"));
665                 numLock = 1;
666         } else if (pfLock->fl_type == F_SHLCK) {
667                 cFYI(1, ("F_SHLCK"));
668                 lockType |= LOCKING_ANDX_SHARED_LOCK;
669                 numLock = 1;
670         } else
671                 cFYI(1, ("Unknown type of lock"));
672
673         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
674         pTcon = cifs_sb->tcon;
675
676         if (file->private_data == NULL) {
677                 FreeXid(xid);
678                 return -EBADF;
679         }
680         netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
681
682         posix_locking = (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
683                         (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability));
684
685         /* BB add code here to normalize offset and length to
686         account for negative length which we can not accept over the
687         wire */
688         if (IS_GETLK(cmd)) {
689                 if (posix_locking) {
690                         int posix_lock_type;
691                         if (lockType & LOCKING_ANDX_SHARED_LOCK)
692                                 posix_lock_type = CIFS_RDLCK;
693                         else
694                                 posix_lock_type = CIFS_WRLCK;
695                         rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */,
696                                         length, pfLock,
697                                         posix_lock_type, wait_flag);
698                         FreeXid(xid);
699                         return rc;
700                 }
701
702                 /* BB we could chain these into one lock request BB */
703                 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
704                                  0, 1, lockType, 0 /* wait flag */ );
705                 if (rc == 0) {
706                         rc = CIFSSMBLock(xid, pTcon, netfid, length,
707                                          pfLock->fl_start, 1 /* numUnlock */ ,
708                                          0 /* numLock */ , lockType,
709                                          0 /* wait flag */ );
710                         pfLock->fl_type = F_UNLCK;
711                         if (rc != 0)
712                                 cERROR(1, ("Error unlocking previously locked "
713                                            "range %d during test of lock", rc));
714                         rc = 0;
715
716                 } else {
717                         /* if rc == ERR_SHARING_VIOLATION ? */
718                         rc = 0; /* do not change lock type to unlock
719                                    since range in use */
720                 }
721
722                 FreeXid(xid);
723                 return rc;
724         }
725
726         if (!numLock && !numUnlock) {
727                 /* if no lock or unlock then nothing
728                 to do since we do not know what it is */
729                 FreeXid(xid);
730                 return -EOPNOTSUPP;
731         }
732
733         if (posix_locking) {
734                 int posix_lock_type;
735                 if (lockType & LOCKING_ANDX_SHARED_LOCK)
736                         posix_lock_type = CIFS_RDLCK;
737                 else
738                         posix_lock_type = CIFS_WRLCK;
739
740                 if (numUnlock == 1)
741                         posix_lock_type = CIFS_UNLCK;
742
743                 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
744                                       length, pfLock,
745                                       posix_lock_type, wait_flag);
746         } else {
747                 struct cifsFileInfo *fid =
748                         (struct cifsFileInfo *)file->private_data;
749
750                 if (numLock) {
751                         rc = CIFSSMBLock(xid, pTcon, netfid, length,
752                                         pfLock->fl_start,
753                                         0, numLock, lockType, wait_flag);
754
755                         if (rc == 0) {
756                                 /* For Windows locks we must store them. */
757                                 rc = store_file_lock(fid, length,
758                                                 pfLock->fl_start, lockType);
759                         }
760                 } else if (numUnlock) {
761                         /* For each stored lock that this unlock overlaps
762                            completely, unlock it. */
763                         int stored_rc = 0;
764                         struct cifsLockInfo *li, *tmp;
765
766                         rc = 0;
767                         mutex_lock(&fid->lock_mutex);
768                         list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
769                                 if (pfLock->fl_start <= li->offset &&
770                                                 (pfLock->fl_start + length) >=
771                                                 (li->offset + li->length)) {
772                                         stored_rc = CIFSSMBLock(xid, pTcon,
773                                                         netfid,
774                                                         li->length, li->offset,
775                                                         1, 0, li->type, FALSE);
776                                         if (stored_rc)
777                                                 rc = stored_rc;
778
779                                         list_del(&li->llist);
780                                         kfree(li);
781                                 }
782                         }
783                         mutex_unlock(&fid->lock_mutex);
784                 }
785         }
786
787         if (pfLock->fl_flags & FL_POSIX)
788                 posix_lock_file_wait(file, pfLock);
789         FreeXid(xid);
790         return rc;
791 }
792
793 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
794         size_t write_size, loff_t *poffset)
795 {
796         int rc = 0;
797         unsigned int bytes_written = 0;
798         unsigned int total_written;
799         struct cifs_sb_info *cifs_sb;
800         struct cifsTconInfo *pTcon;
801         int xid, long_op;
802         struct cifsFileInfo *open_file;
803
804         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
805
806         pTcon = cifs_sb->tcon;
807
808         /* cFYI(1,
809            (" write %d bytes to offset %lld of %s", write_size,
810            *poffset, file->f_path.dentry->d_name.name)); */
811
812         if (file->private_data == NULL)
813                 return -EBADF;
814         open_file = (struct cifsFileInfo *) file->private_data;
815
816         xid = GetXid();
817
818         if (*poffset > file->f_path.dentry->d_inode->i_size)
819                 long_op = 2; /* writes past end of file can take a long time */
820         else
821                 long_op = 1;
822
823         for (total_written = 0; write_size > total_written;
824              total_written += bytes_written) {
825                 rc = -EAGAIN;
826                 while (rc == -EAGAIN) {
827                         if (file->private_data == NULL) {
828                                 /* file has been closed on us */
829                                 FreeXid(xid);
830                         /* if we have gotten here we have written some data
831                            and blocked, and the file has been freed on us while
832                            we blocked so return what we managed to write */
833                                 return total_written;
834                         }
835                         if (open_file->closePend) {
836                                 FreeXid(xid);
837                                 if (total_written)
838                                         return total_written;
839                                 else
840                                         return -EBADF;
841                         }
842                         if (open_file->invalidHandle) {
843                                 /* we could deadlock if we called
844                                    filemap_fdatawait from here so tell
845                                    reopen_file not to flush data to server
846                                    now */
847                                 rc = cifs_reopen_file(file, FALSE);
848                                 if (rc != 0)
849                                         break;
850                         }
851
852                         rc = CIFSSMBWrite(xid, pTcon,
853                                 open_file->netfid,
854                                 min_t(const int, cifs_sb->wsize,
855                                       write_size - total_written),
856                                 *poffset, &bytes_written,
857                                 NULL, write_data + total_written, long_op);
858                 }
859                 if (rc || (bytes_written == 0)) {
860                         if (total_written)
861                                 break;
862                         else {
863                                 FreeXid(xid);
864                                 return rc;
865                         }
866                 } else
867                         *poffset += bytes_written;
868                 long_op = FALSE; /* subsequent writes fast -
869                                     15 seconds is plenty */
870         }
871
872         cifs_stats_bytes_written(pTcon, total_written);
873
874         /* since the write may have blocked check these pointers again */
875         if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
876                 struct inode *inode = file->f_path.dentry->d_inode;
877 /* Do not update local mtime - server will set its actual value on write
878  *              inode->i_ctime = inode->i_mtime =
879  *                      current_fs_time(inode->i_sb);*/
880                 if (total_written > 0) {
881                         spin_lock(&inode->i_lock);
882                         if (*poffset > file->f_path.dentry->d_inode->i_size)
883                                 i_size_write(file->f_path.dentry->d_inode,
884                                         *poffset);
885                         spin_unlock(&inode->i_lock);
886                 }
887                 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
888         }
889         FreeXid(xid);
890         return total_written;
891 }
892
893 static ssize_t cifs_write(struct file *file, const char *write_data,
894         size_t write_size, loff_t *poffset)
895 {
896         int rc = 0;
897         unsigned int bytes_written = 0;
898         unsigned int total_written;
899         struct cifs_sb_info *cifs_sb;
900         struct cifsTconInfo *pTcon;
901         int xid, long_op;
902         struct cifsFileInfo *open_file;
903
904         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
905
906         pTcon = cifs_sb->tcon;
907
908         cFYI(1, ("write %zd bytes to offset %lld of %s", write_size,
909            *poffset, file->f_path.dentry->d_name.name));
910
911         if (file->private_data == NULL)
912                 return -EBADF;
913         open_file = (struct cifsFileInfo *)file->private_data;
914
915         xid = GetXid();
916
917         if (*poffset > file->f_path.dentry->d_inode->i_size)
918                 long_op = 2; /* writes past end of file can take a long time */
919         else
920                 long_op = 1;
921
922         for (total_written = 0; write_size > total_written;
923              total_written += bytes_written) {
924                 rc = -EAGAIN;
925                 while (rc == -EAGAIN) {
926                         if (file->private_data == NULL) {
927                                 /* file has been closed on us */
928                                 FreeXid(xid);
929                         /* if we have gotten here we have written some data
930                            and blocked, and the file has been freed on us
931                            while we blocked so return what we managed to
932                            write */
933                                 return total_written;
934                         }
935                         if (open_file->closePend) {
936                                 FreeXid(xid);
937                                 if (total_written)
938                                         return total_written;
939                                 else
940                                         return -EBADF;
941                         }
942                         if (open_file->invalidHandle) {
943                                 /* we could deadlock if we called
944                                    filemap_fdatawait from here so tell
945                                    reopen_file not to flush data to
946                                    server now */
947                                 rc = cifs_reopen_file(file, FALSE);
948                                 if (rc != 0)
949                                         break;
950                         }
951                         if (experimEnabled || (pTcon->ses->server &&
952                                 ((pTcon->ses->server->secMode &
953                                 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
954                                 == 0))) {
955                                 struct kvec iov[2];
956                                 unsigned int len;
957
958                                 len = min((size_t)cifs_sb->wsize,
959                                           write_size - total_written);
960                                 /* iov[0] is reserved for smb header */
961                                 iov[1].iov_base = (char *)write_data +
962                                                   total_written;
963                                 iov[1].iov_len = len;
964                                 rc = CIFSSMBWrite2(xid, pTcon,
965                                                 open_file->netfid, len,
966                                                 *poffset, &bytes_written,
967                                                 iov, 1, long_op);
968                         } else
969                                 rc = CIFSSMBWrite(xid, pTcon,
970                                          open_file->netfid,
971                                          min_t(const int, cifs_sb->wsize,
972                                                write_size - total_written),
973                                          *poffset, &bytes_written,
974                                          write_data + total_written,
975                                          NULL, long_op);
976                 }
977                 if (rc || (bytes_written == 0)) {
978                         if (total_written)
979                                 break;
980                         else {
981                                 FreeXid(xid);
982                                 return rc;
983                         }
984                 } else
985                         *poffset += bytes_written;
986                 long_op = FALSE; /* subsequent writes fast -
987                                     15 seconds is plenty */
988         }
989
990         cifs_stats_bytes_written(pTcon, total_written);
991
992         /* since the write may have blocked check these pointers again */
993         if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
994 /*BB We could make this contingent on superblock ATIME flag too */
995 /*              file->f_path.dentry->d_inode->i_ctime =
996                 file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
997                 if (total_written > 0) {
998                         spin_lock(&file->f_path.dentry->d_inode->i_lock);
999                         if (*poffset > file->f_path.dentry->d_inode->i_size)
1000                                 i_size_write(file->f_path.dentry->d_inode,
1001                                              *poffset);
1002                         spin_unlock(&file->f_path.dentry->d_inode->i_lock);
1003                 }
1004                 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1005         }
1006         FreeXid(xid);
1007         return total_written;
1008 }
1009
1010 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
1011 {
1012         struct cifsFileInfo *open_file;
1013         int rc;
1014
1015         /* Having a null inode here (because mapping->host was set to zero by
1016         the VFS or MM) should not happen but we had reports of on oops (due to
1017         it being zero) during stress testcases so we need to check for it */
1018
1019         if (cifs_inode == NULL) {
1020                 cERROR(1, ("Null inode passed to cifs_writeable_file"));
1021                 dump_stack();
1022                 return NULL;
1023         }
1024
1025         read_lock(&GlobalSMBSeslock);
1026         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1027                 if (open_file->closePend)
1028                         continue;
1029                 if (open_file->pfile &&
1030                     ((open_file->pfile->f_flags & O_RDWR) ||
1031                      (open_file->pfile->f_flags & O_WRONLY))) {
1032                         atomic_inc(&open_file->wrtPending);
1033                         read_unlock(&GlobalSMBSeslock);
1034                         if ((open_file->invalidHandle) &&
1035                            (!open_file->closePend) /* BB fixme -since the second clause can not be true remove it BB */) {
1036                                 rc = cifs_reopen_file(open_file->pfile, FALSE);
1037                                 /* if it fails, try another handle - might be */
1038                                 /* dangerous to hold up writepages with retry */
1039                                 if (rc) {
1040                                         cFYI(1,
1041                                               ("failed on reopen file in wp"));
1042                                         read_lock(&GlobalSMBSeslock);
1043                                         /* can not use this handle, no write
1044                                         pending on this one after all */
1045                                         atomic_dec
1046                                              (&open_file->wrtPending);
1047                                         continue;
1048                                 }
1049                         }
1050                         return open_file;
1051                 }
1052         }
1053         read_unlock(&GlobalSMBSeslock);
1054         return NULL;
1055 }
1056
1057 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1058 {
1059         struct address_space *mapping = page->mapping;
1060         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1061         char *write_data;
1062         int rc = -EFAULT;
1063         int bytes_written = 0;
1064         struct cifs_sb_info *cifs_sb;
1065         struct cifsTconInfo *pTcon;
1066         struct inode *inode;
1067         struct cifsFileInfo *open_file;
1068
1069         if (!mapping || !mapping->host)
1070                 return -EFAULT;
1071
1072         inode = page->mapping->host;
1073         cifs_sb = CIFS_SB(inode->i_sb);
1074         pTcon = cifs_sb->tcon;
1075
1076         offset += (loff_t)from;
1077         write_data = kmap(page);
1078         write_data += from;
1079
1080         if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1081                 kunmap(page);
1082                 return -EIO;
1083         }
1084
1085         /* racing with truncate? */
1086         if (offset > mapping->host->i_size) {
1087                 kunmap(page);
1088                 return 0; /* don't care */
1089         }
1090
1091         /* check to make sure that we are not extending the file */
1092         if (mapping->host->i_size - offset < (loff_t)to)
1093                 to = (unsigned)(mapping->host->i_size - offset);
1094
1095         open_file = find_writable_file(CIFS_I(mapping->host));
1096         if (open_file) {
1097                 bytes_written = cifs_write(open_file->pfile, write_data,
1098                                            to-from, &offset);
1099                 atomic_dec(&open_file->wrtPending);
1100                 /* Does mm or vfs already set times? */
1101                 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1102                 if ((bytes_written > 0) && (offset)) {
1103                         rc = 0;
1104                 } else if (bytes_written < 0) {
1105                         if (rc != -EBADF)
1106                                 rc = bytes_written;
1107                 }
1108         } else {
1109                 cFYI(1, ("No writeable filehandles for inode"));
1110                 rc = -EIO;
1111         }
1112
1113         kunmap(page);
1114         return rc;
1115 }
1116
1117 static int cifs_writepages(struct address_space *mapping,
1118                            struct writeback_control *wbc)
1119 {
1120         struct backing_dev_info *bdi = mapping->backing_dev_info;
1121         unsigned int bytes_to_write;
1122         unsigned int bytes_written;
1123         struct cifs_sb_info *cifs_sb;
1124         int done = 0;
1125         pgoff_t end;
1126         pgoff_t index;
1127         int range_whole = 0;
1128         struct kvec *iov;
1129         int len;
1130         int n_iov = 0;
1131         pgoff_t next;
1132         int nr_pages;
1133         __u64 offset = 0;
1134         struct cifsFileInfo *open_file;
1135         struct page *page;
1136         struct pagevec pvec;
1137         int rc = 0;
1138         int scanned = 0;
1139         int xid;
1140
1141         cifs_sb = CIFS_SB(mapping->host->i_sb);
1142
1143         /*
1144          * If wsize is smaller that the page cache size, default to writing
1145          * one page at a time via cifs_writepage
1146          */
1147         if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1148                 return generic_writepages(mapping, wbc);
1149
1150         if ((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1151                 if (cifs_sb->tcon->ses->server->secMode &
1152                                 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1153                         if (!experimEnabled)
1154                                 return generic_writepages(mapping, wbc);
1155
1156         iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
1157         if (iov == NULL)
1158                 return generic_writepages(mapping, wbc);
1159
1160
1161         /*
1162          * BB: Is this meaningful for a non-block-device file system?
1163          * If it is, we should test it again after we do I/O
1164          */
1165         if (wbc->nonblocking && bdi_write_congested(bdi)) {
1166                 wbc->encountered_congestion = 1;
1167                 kfree(iov);
1168                 return 0;
1169         }
1170
1171         xid = GetXid();
1172
1173         pagevec_init(&pvec, 0);
1174         if (wbc->range_cyclic) {
1175                 index = mapping->writeback_index; /* Start from prev offset */
1176                 end = -1;
1177         } else {
1178                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1179                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1180                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1181                         range_whole = 1;
1182                 scanned = 1;
1183         }
1184 retry:
1185         while (!done && (index <= end) &&
1186                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1187                         PAGECACHE_TAG_DIRTY,
1188                         min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1189                 int first;
1190                 unsigned int i;
1191
1192                 first = -1;
1193                 next = 0;
1194                 n_iov = 0;
1195                 bytes_to_write = 0;
1196
1197                 for (i = 0; i < nr_pages; i++) {
1198                         page = pvec.pages[i];
1199                         /*
1200                          * At this point we hold neither mapping->tree_lock nor
1201                          * lock on the page itself: the page may be truncated or
1202                          * invalidated (changing page->mapping to NULL), or even
1203                          * swizzled back from swapper_space to tmpfs file
1204                          * mapping
1205                          */
1206
1207                         if (first < 0)
1208                                 lock_page(page);
1209                         else if (TestSetPageLocked(page))
1210                                 break;
1211
1212                         if (unlikely(page->mapping != mapping)) {
1213                                 unlock_page(page);
1214                                 break;
1215                         }
1216
1217                         if (!wbc->range_cyclic && page->index > end) {
1218                                 done = 1;
1219                                 unlock_page(page);
1220                                 break;
1221                         }
1222
1223                         if (next && (page->index != next)) {
1224                                 /* Not next consecutive page */
1225                                 unlock_page(page);
1226                                 break;
1227                         }
1228
1229                         if (wbc->sync_mode != WB_SYNC_NONE)
1230                                 wait_on_page_writeback(page);
1231
1232                         if (PageWriteback(page) ||
1233                                         !clear_page_dirty_for_io(page)) {
1234                                 unlock_page(page);
1235                                 break;
1236                         }
1237
1238                         /*
1239                          * This actually clears the dirty bit in the radix tree.
1240                          * See cifs_writepage() for more commentary.
1241                          */
1242                         set_page_writeback(page);
1243
1244                         if (page_offset(page) >= mapping->host->i_size) {
1245                                 done = 1;
1246                                 unlock_page(page);
1247                                 end_page_writeback(page);
1248                                 break;
1249                         }
1250
1251                         /*
1252                          * BB can we get rid of this?  pages are held by pvec
1253                          */
1254                         page_cache_get(page);
1255
1256                         len = min(mapping->host->i_size - page_offset(page),
1257                                   (loff_t)PAGE_CACHE_SIZE);
1258
1259                         /* reserve iov[0] for the smb header */
1260                         n_iov++;
1261                         iov[n_iov].iov_base = kmap(page);
1262                         iov[n_iov].iov_len = len;
1263                         bytes_to_write += len;
1264
1265                         if (first < 0) {
1266                                 first = i;
1267                                 offset = page_offset(page);
1268                         }
1269                         next = page->index + 1;
1270                         if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1271                                 break;
1272                 }
1273                 if (n_iov) {
1274                         /* Search for a writable handle every time we call
1275                          * CIFSSMBWrite2.  We can't rely on the last handle
1276                          * we used to still be valid
1277                          */
1278                         open_file = find_writable_file(CIFS_I(mapping->host));
1279                         if (!open_file) {
1280                                 cERROR(1, ("No writable handles for inode"));
1281                                 rc = -EBADF;
1282                         } else {
1283                                 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1284                                                    open_file->netfid,
1285                                                    bytes_to_write, offset,
1286                                                    &bytes_written, iov, n_iov,
1287                                                    1);
1288                                 atomic_dec(&open_file->wrtPending);
1289                                 if (rc || bytes_written < bytes_to_write) {
1290                                         cERROR(1, ("Write2 ret %d, wrote %d",
1291                                                   rc, bytes_written));
1292                                         /* BB what if continued retry is
1293                                            requested via mount flags? */
1294                                         set_bit(AS_EIO, &mapping->flags);
1295                                 } else {
1296                                         cifs_stats_bytes_written(cifs_sb->tcon,
1297                                                                  bytes_written);
1298                                 }
1299                         }
1300                         for (i = 0; i < n_iov; i++) {
1301                                 page = pvec.pages[first + i];
1302                                 /* Should we also set page error on
1303                                 success rc but too little data written? */
1304                                 /* BB investigate retry logic on temporary
1305                                 server crash cases and how recovery works
1306                                 when page marked as error */
1307                                 if (rc)
1308                                         SetPageError(page);
1309                                 kunmap(page);
1310                                 unlock_page(page);
1311                                 end_page_writeback(page);
1312                                 page_cache_release(page);
1313                         }
1314                         if ((wbc->nr_to_write -= n_iov) <= 0)
1315                                 done = 1;
1316                         index = next;
1317                 }
1318                 pagevec_release(&pvec);
1319         }
1320         if (!scanned && !done) {
1321                 /*
1322                  * We hit the last page and there is more work to be done: wrap
1323                  * back to the start of the file
1324                  */
1325                 scanned = 1;
1326                 index = 0;
1327                 goto retry;
1328         }
1329         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1330                 mapping->writeback_index = index;
1331
1332         FreeXid(xid);
1333         kfree(iov);
1334         return rc;
1335 }
1336
1337 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1338 {
1339         int rc = -EFAULT;
1340         int xid;
1341
1342         xid = GetXid();
1343 /* BB add check for wbc flags */
1344         page_cache_get(page);
1345         if (!PageUptodate(page)) {
1346                 cFYI(1, ("ppw - page not up to date"));
1347         }
1348
1349         /*
1350          * Set the "writeback" flag, and clear "dirty" in the radix tree.
1351          *
1352          * A writepage() implementation always needs to do either this,
1353          * or re-dirty the page with "redirty_page_for_writepage()" in
1354          * the case of a failure.
1355          *
1356          * Just unlocking the page will cause the radix tree tag-bits
1357          * to fail to update with the state of the page correctly.
1358          */
1359         set_page_writeback(page);
1360         rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1361         SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1362         unlock_page(page);
1363         end_page_writeback(page);
1364         page_cache_release(page);
1365         FreeXid(xid);
1366         return rc;
1367 }
1368
1369 static int cifs_commit_write(struct file *file, struct page *page,
1370         unsigned offset, unsigned to)
1371 {
1372         int xid;
1373         int rc = 0;
1374         struct inode *inode = page->mapping->host;
1375         loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1376         char *page_data;
1377
1378         xid = GetXid();
1379         cFYI(1, ("commit write for page %p up to position %lld for %d",
1380                  page, position, to));
1381         spin_lock(&inode->i_lock);
1382         if (position > inode->i_size) {
1383                 i_size_write(inode, position);
1384         }
1385         spin_unlock(&inode->i_lock);
1386         if (!PageUptodate(page)) {
1387                 position =  ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
1388                 /* can not rely on (or let) writepage write this data */
1389                 if (to < offset) {
1390                         cFYI(1, ("Illegal offsets, can not copy from %d to %d",
1391                                 offset, to));
1392                         FreeXid(xid);
1393                         return rc;
1394                 }
1395                 /* this is probably better than directly calling
1396                    partialpage_write since in this function the file handle is
1397                    known which we might as well leverage */
1398                 /* BB check if anything else missing out of ppw
1399                    such as updating last write time */
1400                 page_data = kmap(page);
1401                 rc = cifs_write(file, page_data + offset, to-offset,
1402                                 &position);
1403                 if (rc > 0)
1404                         rc = 0;
1405                 /* else if (rc < 0) should we set writebehind rc? */
1406                 kunmap(page);
1407         } else {
1408                 set_page_dirty(page);
1409         }
1410
1411         FreeXid(xid);
1412         return rc;
1413 }
1414
1415 int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1416 {
1417         int xid;
1418         int rc = 0;
1419         struct inode *inode = file->f_path.dentry->d_inode;
1420
1421         xid = GetXid();
1422
1423         cFYI(1, ("Sync file - name: %s datasync: 0x%x",
1424                 dentry->d_name.name, datasync));
1425
1426         rc = filemap_fdatawrite(inode->i_mapping);
1427         if (rc == 0)
1428                 CIFS_I(inode)->write_behind_rc = 0;
1429         FreeXid(xid);
1430         return rc;
1431 }
1432
1433 /* static void cifs_sync_page(struct page *page)
1434 {
1435         struct address_space *mapping;
1436         struct inode *inode;
1437         unsigned long index = page->index;
1438         unsigned int rpages = 0;
1439         int rc = 0;
1440
1441         cFYI(1, ("sync page %p",page));
1442         mapping = page->mapping;
1443         if (!mapping)
1444                 return 0;
1445         inode = mapping->host;
1446         if (!inode)
1447                 return; */
1448
1449 /*      fill in rpages then
1450         result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1451
1452 /*      cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
1453
1454 #if 0
1455         if (rc < 0)
1456                 return rc;
1457         return 0;
1458 #endif
1459 } */
1460
1461 /*
1462  * As file closes, flush all cached write data for this inode checking
1463  * for write behind errors.
1464  */
1465 int cifs_flush(struct file *file, fl_owner_t id)
1466 {
1467         struct inode *inode = file->f_path.dentry->d_inode;
1468         int rc = 0;
1469
1470         /* Rather than do the steps manually:
1471            lock the inode for writing
1472            loop through pages looking for write behind data (dirty pages)
1473            coalesce into contiguous 16K (or smaller) chunks to write to server
1474            send to server (prefer in parallel)
1475            deal with writebehind errors
1476            unlock inode for writing
1477            filemapfdatawrite appears easier for the time being */
1478
1479         rc = filemap_fdatawrite(inode->i_mapping);
1480         if (!rc) /* reset wb rc if we were able to write out dirty pages */
1481                 CIFS_I(inode)->write_behind_rc = 0;
1482
1483         cFYI(1, ("Flush inode %p file %p rc %d", inode, file, rc));
1484
1485         return rc;
1486 }
1487
1488 ssize_t cifs_user_read(struct file *file, char __user *read_data,
1489         size_t read_size, loff_t *poffset)
1490 {
1491         int rc = -EACCES;
1492         unsigned int bytes_read = 0;
1493         unsigned int total_read = 0;
1494         unsigned int current_read_size;
1495         struct cifs_sb_info *cifs_sb;
1496         struct cifsTconInfo *pTcon;
1497         int xid;
1498         struct cifsFileInfo *open_file;
1499         char *smb_read_data;
1500         char __user *current_offset;
1501         struct smb_com_read_rsp *pSMBr;
1502
1503         xid = GetXid();
1504         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1505         pTcon = cifs_sb->tcon;
1506
1507         if (file->private_data == NULL) {
1508                 FreeXid(xid);
1509                 return -EBADF;
1510         }
1511         open_file = (struct cifsFileInfo *)file->private_data;
1512
1513         if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
1514                 cFYI(1, ("attempting read on write only file instance"));
1515         }
1516         for (total_read = 0, current_offset = read_data;
1517              read_size > total_read;
1518              total_read += bytes_read, current_offset += bytes_read) {
1519                 current_read_size = min_t(const int, read_size - total_read,
1520                                           cifs_sb->rsize);
1521                 rc = -EAGAIN;
1522                 smb_read_data = NULL;
1523                 while (rc == -EAGAIN) {
1524                         int buf_type = CIFS_NO_BUFFER;
1525                         if ((open_file->invalidHandle) &&
1526                             (!open_file->closePend)) {
1527                                 rc = cifs_reopen_file(file, TRUE);
1528                                 if (rc != 0)
1529                                         break;
1530                         }
1531                         rc = CIFSSMBRead(xid, pTcon,
1532                                          open_file->netfid,
1533                                          current_read_size, *poffset,
1534                                          &bytes_read, &smb_read_data,
1535                                          &buf_type);
1536                         pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1537                         if (smb_read_data) {
1538                                 if (copy_to_user(current_offset,
1539                                                 smb_read_data +
1540                                                 4 /* RFC1001 length field */ +
1541                                                 le16_to_cpu(pSMBr->DataOffset),
1542                                                 bytes_read)) {
1543                                         rc = -EFAULT;
1544                                 }
1545
1546                                 if (buf_type == CIFS_SMALL_BUFFER)
1547                                         cifs_small_buf_release(smb_read_data);
1548                                 else if (buf_type == CIFS_LARGE_BUFFER)
1549                                         cifs_buf_release(smb_read_data);
1550                                 smb_read_data = NULL;
1551                         }
1552                 }
1553                 if (rc || (bytes_read == 0)) {
1554                         if (total_read) {
1555                                 break;
1556                         } else {
1557                                 FreeXid(xid);
1558                                 return rc;
1559                         }
1560                 } else {
1561                         cifs_stats_bytes_read(pTcon, bytes_read);
1562                         *poffset += bytes_read;
1563                 }
1564         }
1565         FreeXid(xid);
1566         return total_read;
1567 }
1568
1569
1570 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1571         loff_t *poffset)
1572 {
1573         int rc = -EACCES;
1574         unsigned int bytes_read = 0;
1575         unsigned int total_read;
1576         unsigned int current_read_size;
1577         struct cifs_sb_info *cifs_sb;
1578         struct cifsTconInfo *pTcon;
1579         int xid;
1580         char *current_offset;
1581         struct cifsFileInfo *open_file;
1582         int buf_type = CIFS_NO_BUFFER;
1583
1584         xid = GetXid();
1585         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1586         pTcon = cifs_sb->tcon;
1587
1588         if (file->private_data == NULL) {
1589                 FreeXid(xid);
1590                 return -EBADF;
1591         }
1592         open_file = (struct cifsFileInfo *)file->private_data;
1593
1594         if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1595                 cFYI(1, ("attempting read on write only file instance"));
1596
1597         for (total_read = 0, current_offset = read_data;
1598              read_size > total_read;
1599              total_read += bytes_read, current_offset += bytes_read) {
1600                 current_read_size = min_t(const int, read_size - total_read,
1601                                           cifs_sb->rsize);
1602                 /* For windows me and 9x we do not want to request more
1603                 than it negotiated since it will refuse the read then */
1604                 if ((pTcon->ses) &&
1605                         !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1606                         current_read_size = min_t(const int, current_read_size,
1607                                         pTcon->ses->server->maxBuf - 128);
1608                 }
1609                 rc = -EAGAIN;
1610                 while (rc == -EAGAIN) {
1611                         if ((open_file->invalidHandle) &&
1612                             (!open_file->closePend)) {
1613                                 rc = cifs_reopen_file(file, TRUE);
1614                                 if (rc != 0)
1615                                         break;
1616                         }
1617                         rc = CIFSSMBRead(xid, pTcon,
1618                                          open_file->netfid,
1619                                          current_read_size, *poffset,
1620                                          &bytes_read, &current_offset,
1621                                          &buf_type);
1622                 }
1623                 if (rc || (bytes_read == 0)) {
1624                         if (total_read) {
1625                                 break;
1626                         } else {
1627                                 FreeXid(xid);
1628                                 return rc;
1629                         }
1630                 } else {
1631                         cifs_stats_bytes_read(pTcon, total_read);
1632                         *poffset += bytes_read;
1633                 }
1634         }
1635         FreeXid(xid);
1636         return total_read;
1637 }
1638
1639 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1640 {
1641         struct dentry *dentry = file->f_path.dentry;
1642         int rc, xid;
1643
1644         xid = GetXid();
1645         rc = cifs_revalidate(dentry);
1646         if (rc) {
1647                 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1648                 FreeXid(xid);
1649                 return rc;
1650         }
1651         rc = generic_file_mmap(file, vma);
1652         FreeXid(xid);
1653         return rc;
1654 }
1655
1656
1657 static void cifs_copy_cache_pages(struct address_space *mapping,
1658         struct list_head *pages, int bytes_read, char *data,
1659         struct pagevec *plru_pvec)
1660 {
1661         struct page *page;
1662         char *target;
1663
1664         while (bytes_read > 0) {
1665                 if (list_empty(pages))
1666                         break;
1667
1668                 page = list_entry(pages->prev, struct page, lru);
1669                 list_del(&page->lru);
1670
1671                 if (add_to_page_cache(page, mapping, page->index,
1672                                       GFP_KERNEL)) {
1673                         page_cache_release(page);
1674                         cFYI(1, ("Add page cache failed"));
1675                         data += PAGE_CACHE_SIZE;
1676                         bytes_read -= PAGE_CACHE_SIZE;
1677                         continue;
1678                 }
1679
1680                 target = kmap_atomic(page, KM_USER0);
1681
1682                 if (PAGE_CACHE_SIZE > bytes_read) {
1683                         memcpy(target, data, bytes_read);
1684                         /* zero the tail end of this partial page */
1685                         memset(target + bytes_read, 0,
1686                                PAGE_CACHE_SIZE - bytes_read);
1687                         bytes_read = 0;
1688                 } else {
1689                         memcpy(target, data, PAGE_CACHE_SIZE);
1690                         bytes_read -= PAGE_CACHE_SIZE;
1691                 }
1692                 kunmap_atomic(target, KM_USER0);
1693
1694                 flush_dcache_page(page);
1695                 SetPageUptodate(page);
1696                 unlock_page(page);
1697                 if (!pagevec_add(plru_pvec, page))
1698                         __pagevec_lru_add(plru_pvec);
1699                 data += PAGE_CACHE_SIZE;
1700         }
1701         return;
1702 }
1703
1704 static int cifs_readpages(struct file *file, struct address_space *mapping,
1705         struct list_head *page_list, unsigned num_pages)
1706 {
1707         int rc = -EACCES;
1708         int xid;
1709         loff_t offset;
1710         struct page *page;
1711         struct cifs_sb_info *cifs_sb;
1712         struct cifsTconInfo *pTcon;
1713         int bytes_read = 0;
1714         unsigned int read_size, i;
1715         char *smb_read_data = NULL;
1716         struct smb_com_read_rsp *pSMBr;
1717         struct pagevec lru_pvec;
1718         struct cifsFileInfo *open_file;
1719         int buf_type = CIFS_NO_BUFFER;
1720
1721         xid = GetXid();
1722         if (file->private_data == NULL) {
1723                 FreeXid(xid);
1724                 return -EBADF;
1725         }
1726         open_file = (struct cifsFileInfo *)file->private_data;
1727         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1728         pTcon = cifs_sb->tcon;
1729
1730         pagevec_init(&lru_pvec, 0);
1731 #ifdef CONFIG_CIFS_DEBUG2
1732                 cFYI(1, ("rpages: num pages %d", num_pages));
1733 #endif
1734         for (i = 0; i < num_pages; ) {
1735                 unsigned contig_pages;
1736                 struct page *tmp_page;
1737                 unsigned long expected_index;
1738
1739                 if (list_empty(page_list))
1740                         break;
1741
1742                 page = list_entry(page_list->prev, struct page, lru);
1743                 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1744
1745                 /* count adjacent pages that we will read into */
1746                 contig_pages = 0;
1747                 expected_index =
1748                         list_entry(page_list->prev, struct page, lru)->index;
1749                 list_for_each_entry_reverse(tmp_page, page_list, lru) {
1750                         if (tmp_page->index == expected_index) {
1751                                 contig_pages++;
1752                                 expected_index++;
1753                         } else
1754                                 break;
1755                 }
1756                 if (contig_pages + i >  num_pages)
1757                         contig_pages = num_pages - i;
1758
1759                 /* for reads over a certain size could initiate async
1760                    read ahead */
1761
1762                 read_size = contig_pages * PAGE_CACHE_SIZE;
1763                 /* Read size needs to be in multiples of one page */
1764                 read_size = min_t(const unsigned int, read_size,
1765                                   cifs_sb->rsize & PAGE_CACHE_MASK);
1766 #ifdef CONFIG_CIFS_DEBUG2
1767                 cFYI(1, ("rpages: read size 0x%x  contiguous pages %d",
1768                                 read_size, contig_pages));
1769 #endif
1770                 rc = -EAGAIN;
1771                 while (rc == -EAGAIN) {
1772                         if ((open_file->invalidHandle) &&
1773                             (!open_file->closePend)) {
1774                                 rc = cifs_reopen_file(file, TRUE);
1775                                 if (rc != 0)
1776                                         break;
1777                         }
1778
1779                         rc = CIFSSMBRead(xid, pTcon,
1780                                          open_file->netfid,
1781                                          read_size, offset,
1782                                          &bytes_read, &smb_read_data,
1783                                          &buf_type);
1784                         /* BB more RC checks ? */
1785                         if (rc == -EAGAIN) {
1786                                 if (smb_read_data) {
1787                                         if (buf_type == CIFS_SMALL_BUFFER)
1788                                                 cifs_small_buf_release(smb_read_data);
1789                                         else if (buf_type == CIFS_LARGE_BUFFER)
1790                                                 cifs_buf_release(smb_read_data);
1791                                         smb_read_data = NULL;
1792                                 }
1793                         }
1794                 }
1795                 if ((rc < 0) || (smb_read_data == NULL)) {
1796                         cFYI(1, ("Read error in readpages: %d", rc));
1797                         break;
1798                 } else if (bytes_read > 0) {
1799                         task_io_account_read(bytes_read);
1800                         pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1801                         cifs_copy_cache_pages(mapping, page_list, bytes_read,
1802                                 smb_read_data + 4 /* RFC1001 hdr */ +
1803                                 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1804
1805                         i +=  bytes_read >> PAGE_CACHE_SHIFT;
1806                         cifs_stats_bytes_read(pTcon, bytes_read);
1807                         if ((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1808                                 i++; /* account for partial page */
1809
1810                                 /* server copy of file can have smaller size
1811                                    than client */
1812                                 /* BB do we need to verify this common case ?
1813                                    this case is ok - if we are at server EOF
1814                                    we will hit it on next read */
1815
1816                                 /* break; */
1817                         }
1818                 } else {
1819                         cFYI(1, ("No bytes read (%d) at offset %lld . "
1820                                  "Cleaning remaining pages from readahead list",
1821                                  bytes_read, offset));
1822                         /* BB turn off caching and do new lookup on
1823                            file size at server? */
1824                         break;
1825                 }
1826                 if (smb_read_data) {
1827                         if (buf_type == CIFS_SMALL_BUFFER)
1828                                 cifs_small_buf_release(smb_read_data);
1829                         else if (buf_type == CIFS_LARGE_BUFFER)
1830                                 cifs_buf_release(smb_read_data);
1831                         smb_read_data = NULL;
1832                 }
1833                 bytes_read = 0;
1834         }
1835
1836         pagevec_lru_add(&lru_pvec);
1837
1838 /* need to free smb_read_data buf before exit */
1839         if (smb_read_data) {
1840                 if (buf_type == CIFS_SMALL_BUFFER)
1841                         cifs_small_buf_release(smb_read_data);
1842                 else if (buf_type == CIFS_LARGE_BUFFER)
1843                         cifs_buf_release(smb_read_data);
1844                 smb_read_data = NULL;
1845         }
1846
1847         FreeXid(xid);
1848         return rc;
1849 }
1850
1851 static int cifs_readpage_worker(struct file *file, struct page *page,
1852         loff_t *poffset)
1853 {
1854         char *read_data;
1855         int rc;
1856
1857         page_cache_get(page);
1858         read_data = kmap(page);
1859         /* for reads over a certain size could initiate async read ahead */
1860
1861         rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
1862
1863         if (rc < 0)
1864                 goto io_error;
1865         else
1866                 cFYI(1, ("Bytes read %d", rc));
1867
1868         file->f_path.dentry->d_inode->i_atime =
1869                 current_fs_time(file->f_path.dentry->d_inode->i_sb);
1870
1871         if (PAGE_CACHE_SIZE > rc)
1872                 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1873
1874         flush_dcache_page(page);
1875         SetPageUptodate(page);
1876         rc = 0;
1877
1878 io_error:
1879         kunmap(page);
1880         page_cache_release(page);
1881         return rc;
1882 }
1883
1884 static int cifs_readpage(struct file *file, struct page *page)
1885 {
1886         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1887         int rc = -EACCES;
1888         int xid;
1889
1890         xid = GetXid();
1891
1892         if (file->private_data == NULL) {
1893                 FreeXid(xid);
1894                 return -EBADF;
1895         }
1896
1897         cFYI(1, ("readpage %p at offset %d 0x%x\n",
1898                  page, (int)offset, (int)offset));
1899
1900         rc = cifs_readpage_worker(file, page, &offset);
1901
1902         unlock_page(page);
1903
1904         FreeXid(xid);
1905         return rc;
1906 }
1907
1908 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
1909 {
1910         struct cifsFileInfo *open_file;
1911
1912         read_lock(&GlobalSMBSeslock);
1913         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1914                 if (open_file->closePend)
1915                         continue;
1916                 if (open_file->pfile &&
1917                     ((open_file->pfile->f_flags & O_RDWR) ||
1918                      (open_file->pfile->f_flags & O_WRONLY))) {
1919                         read_unlock(&GlobalSMBSeslock);
1920                         return 1;
1921                 }
1922         }
1923         read_unlock(&GlobalSMBSeslock);
1924         return 0;
1925 }
1926
1927 /* We do not want to update the file size from server for inodes
1928    open for write - to avoid races with writepage extending
1929    the file - in the future we could consider allowing
1930    refreshing the inode only on increases in the file size
1931    but this is tricky to do without racing with writebehind
1932    page caching in the current Linux kernel design */
1933 int is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1934 {
1935         if (!cifsInode)
1936                 return 1;
1937
1938         if (is_inode_writable(cifsInode)) {
1939                 /* This inode is open for write at least once */
1940                 struct cifs_sb_info *cifs_sb;
1941
1942                 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
1943                 if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) {
1944                         /* since no page cache to corrupt on directio
1945                         we can change size safely */
1946                         return 1;
1947                 }
1948
1949                 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
1950                         return 1;
1951
1952                 return 0;
1953         } else
1954                 return 1;
1955 }
1956
1957 static int cifs_prepare_write(struct file *file, struct page *page,
1958         unsigned from, unsigned to)
1959 {
1960         int rc = 0;
1961         loff_t i_size;
1962         loff_t offset;
1963
1964         cFYI(1, ("prepare write for page %p from %d to %d", page, from, to));
1965         if (PageUptodate(page))
1966                 return 0;
1967
1968         /* If we are writing a full page it will be up to date,
1969            no need to read from the server */
1970         if ((to == PAGE_CACHE_SIZE) && (from == 0)) {
1971                 SetPageUptodate(page);
1972                 return 0;
1973         }
1974
1975         offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1976         i_size = i_size_read(page->mapping->host);
1977
1978         if ((offset >= i_size) ||
1979             ((from == 0) && (offset + to) >= i_size)) {
1980                 /*
1981                  * We don't need to read data beyond the end of the file.
1982                  * zero it, and set the page uptodate
1983                  */
1984                 simple_prepare_write(file, page, from, to);
1985                 SetPageUptodate(page);
1986         } else if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
1987                 /* might as well read a page, it is fast enough */
1988                 rc = cifs_readpage_worker(file, page, &offset);
1989         } else {
1990                 /* we could try using another file handle if there is one -
1991                    but how would we lock it to prevent close of that handle
1992                    racing with this read? In any case
1993                    this will be written out by commit_write so is fine */
1994         }
1995
1996         /* we do not need to pass errors back
1997            e.g. if we do not have read access to the file
1998            because cifs_commit_write will do the right thing.  -- shaggy */
1999
2000         return 0;
2001 }
2002
2003 const struct address_space_operations cifs_addr_ops = {
2004         .readpage = cifs_readpage,
2005         .readpages = cifs_readpages,
2006         .writepage = cifs_writepage,
2007         .writepages = cifs_writepages,
2008         .prepare_write = cifs_prepare_write,
2009         .commit_write = cifs_commit_write,
2010         .set_page_dirty = __set_page_dirty_nobuffers,
2011         /* .sync_page = cifs_sync_page, */
2012         /* .direct_IO = */
2013 };
2014
2015 /*
2016  * cifs_readpages requires the server to support a buffer large enough to
2017  * contain the header plus one complete page of data.  Otherwise, we need
2018  * to leave cifs_readpages out of the address space operations.
2019  */
2020 const struct address_space_operations cifs_addr_ops_smallbuf = {
2021         .readpage = cifs_readpage,
2022         .writepage = cifs_writepage,
2023         .writepages = cifs_writepages,
2024         .prepare_write = cifs_prepare_write,
2025         .commit_write = cifs_commit_write,
2026         .set_page_dirty = __set_page_dirty_nobuffers,
2027         /* .sync_page = cifs_sync_page, */
2028         /* .direct_IO = */
2029 };