[CIFS] whitespace/formatting fixes
[safe/jmp/linux-2.6] / fs / cifs / file.c
1 /*
2  *   fs/cifs/file.c
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2007
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  *   This library is free software; you can redistribute it and/or modify
11  *   it under the terms of the GNU Lesser General Public License as published
12  *   by the Free Software Foundation; either version 2.1 of the License, or
13  *   (at your option) any later version.
14  *
15  *   This library is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
18  *   the GNU Lesser General Public License for more details.
19  *
20  *   You should have received a copy of the GNU Lesser General Public License
21  *   along with this library; if not, write to the Free Software
22  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23  */
24 #include <linux/fs.h>
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <asm/div64.h>
34 #include "cifsfs.h"
35 #include "cifspdu.h"
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_unicode.h"
39 #include "cifs_debug.h"
40 #include "cifs_fs_sb.h"
41
42 static inline struct cifsFileInfo *cifs_init_private(
43         struct cifsFileInfo *private_data, struct inode *inode,
44         struct file *file, __u16 netfid)
45 {
46         memset(private_data, 0, sizeof(struct cifsFileInfo));
47         private_data->netfid = netfid;
48         private_data->pid = current->tgid;
49         init_MUTEX(&private_data->fh_sem);
50         mutex_init(&private_data->lock_mutex);
51         INIT_LIST_HEAD(&private_data->llist);
52         private_data->pfile = file; /* needed for writepage */
53         private_data->pInode = inode;
54         private_data->invalidHandle = FALSE;
55         private_data->closePend = FALSE;
56         /* we have to track num writers to the inode, since writepages
57         does not tell us which handle the write is for so there can
58         be a close (overlapping with write) of the filehandle that
59         cifs_writepages chose to use */
60         atomic_set(&private_data->wrtPending, 0);
61
62         return private_data;
63 }
64
65 static inline int cifs_convert_flags(unsigned int flags)
66 {
67         if ((flags & O_ACCMODE) == O_RDONLY)
68                 return GENERIC_READ;
69         else if ((flags & O_ACCMODE) == O_WRONLY)
70                 return GENERIC_WRITE;
71         else if ((flags & O_ACCMODE) == O_RDWR) {
72                 /* GENERIC_ALL is too much permission to request
73                    can cause unnecessary access denied on create */
74                 /* return GENERIC_ALL; */
75                 return (GENERIC_READ | GENERIC_WRITE);
76         }
77
78         return 0x20197;
79 }
80
81 static inline int cifs_get_disposition(unsigned int flags)
82 {
83         if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
84                 return FILE_CREATE;
85         else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
86                 return FILE_OVERWRITE_IF;
87         else if ((flags & O_CREAT) == O_CREAT)
88                 return FILE_OPEN_IF;
89         else if ((flags & O_TRUNC) == O_TRUNC)
90                 return FILE_OVERWRITE;
91         else
92                 return FILE_OPEN;
93 }
94
95 /* all arguments to this function must be checked for validity in caller */
96 static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
97         struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
98         struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
99         char *full_path, int xid)
100 {
101         struct timespec temp;
102         int rc;
103
104         /* want handles we can use to read with first
105            in the list so we do not have to walk the
106            list to search for one in prepare_write */
107         if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
108                 list_add_tail(&pCifsFile->flist,
109                               &pCifsInode->openFileList);
110         } else {
111                 list_add(&pCifsFile->flist,
112                          &pCifsInode->openFileList);
113         }
114         write_unlock(&GlobalSMBSeslock);
115         if (pCifsInode->clientCanCacheRead) {
116                 /* we have the inode open somewhere else
117                    no need to discard cache data */
118                 goto client_can_cache;
119         }
120
121         /* BB need same check in cifs_create too? */
122         /* if not oplocked, invalidate inode pages if mtime or file
123            size changed */
124         temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
125         if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
126                            (file->f_path.dentry->d_inode->i_size ==
127                             (loff_t)le64_to_cpu(buf->EndOfFile))) {
128                 cFYI(1, ("inode unchanged on server"));
129         } else {
130                 if (file->f_path.dentry->d_inode->i_mapping) {
131                 /* BB no need to lock inode until after invalidate
132                    since namei code should already have it locked? */
133                         filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
134                 }
135                 cFYI(1, ("invalidating remote inode since open detected it "
136                          "changed"));
137                 invalidate_remote_inode(file->f_path.dentry->d_inode);
138         }
139
140 client_can_cache:
141         if (pTcon->ses->capabilities & CAP_UNIX)
142                 rc = cifs_get_inode_info_unix(&file->f_path.dentry->d_inode,
143                         full_path, inode->i_sb, xid);
144         else
145                 rc = cifs_get_inode_info(&file->f_path.dentry->d_inode,
146                         full_path, buf, inode->i_sb, xid);
147
148         if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
149                 pCifsInode->clientCanCacheAll = TRUE;
150                 pCifsInode->clientCanCacheRead = TRUE;
151                 cFYI(1, ("Exclusive Oplock granted on inode %p",
152                          file->f_path.dentry->d_inode));
153         } else if ((*oplock & 0xF) == OPLOCK_READ)
154                 pCifsInode->clientCanCacheRead = TRUE;
155
156         return rc;
157 }
158
159 int cifs_open(struct inode *inode, struct file *file)
160 {
161         int rc = -EACCES;
162         int xid, oplock;
163         struct cifs_sb_info *cifs_sb;
164         struct cifsTconInfo *pTcon;
165         struct cifsFileInfo *pCifsFile;
166         struct cifsInodeInfo *pCifsInode;
167         struct list_head *tmp;
168         char *full_path = NULL;
169         int desiredAccess;
170         int disposition;
171         __u16 netfid;
172         FILE_ALL_INFO *buf = NULL;
173
174         xid = GetXid();
175
176         cifs_sb = CIFS_SB(inode->i_sb);
177         pTcon = cifs_sb->tcon;
178
179         if (file->f_flags & O_CREAT) {
180                 /* search inode for this file and fill in file->private_data */
181                 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
182                 read_lock(&GlobalSMBSeslock);
183                 list_for_each(tmp, &pCifsInode->openFileList) {
184                         pCifsFile = list_entry(tmp, struct cifsFileInfo,
185                                                flist);
186                         if ((pCifsFile->pfile == NULL) &&
187                             (pCifsFile->pid == current->tgid)) {
188                                 /* mode set in cifs_create */
189
190                                 /* needed for writepage */
191                                 pCifsFile->pfile = file;
192
193                                 file->private_data = pCifsFile;
194                                 break;
195                         }
196                 }
197                 read_unlock(&GlobalSMBSeslock);
198                 if (file->private_data != NULL) {
199                         rc = 0;
200                         FreeXid(xid);
201                         return rc;
202                 } else {
203                         if (file->f_flags & O_EXCL)
204                                 cERROR(1, ("could not find file instance for "
205                                            "new file %p", file));
206                 }
207         }
208
209         full_path = build_path_from_dentry(file->f_path.dentry);
210         if (full_path == NULL) {
211                 FreeXid(xid);
212                 return -ENOMEM;
213         }
214
215         cFYI(1, ("inode = 0x%p file flags are 0x%x for %s",
216                  inode, file->f_flags, full_path));
217         desiredAccess = cifs_convert_flags(file->f_flags);
218
219 /*********************************************************************
220  *  open flag mapping table:
221  *
222  *      POSIX Flag            CIFS Disposition
223  *      ----------            ----------------
224  *      O_CREAT               FILE_OPEN_IF
225  *      O_CREAT | O_EXCL      FILE_CREATE
226  *      O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
227  *      O_TRUNC               FILE_OVERWRITE
228  *      none of the above     FILE_OPEN
229  *
230  *      Note that there is not a direct match between disposition
231  *      FILE_SUPERSEDE (ie create whether or not file exists although
232  *      O_CREAT | O_TRUNC is similar but truncates the existing
233  *      file rather than creating a new file as FILE_SUPERSEDE does
234  *      (which uses the attributes / metadata passed in on open call)
235  *?
236  *?  O_SYNC is a reasonable match to CIFS writethrough flag
237  *?  and the read write flags match reasonably.  O_LARGEFILE
238  *?  is irrelevant because largefile support is always used
239  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
240  *       O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
241  *********************************************************************/
242
243         disposition = cifs_get_disposition(file->f_flags);
244
245         if (oplockEnabled)
246                 oplock = REQ_OPLOCK;
247         else
248                 oplock = FALSE;
249
250         /* BB pass O_SYNC flag through on file attributes .. BB */
251
252         /* Also refresh inode by passing in file_info buf returned by SMBOpen
253            and calling get_inode_info with returned buf (at least helps
254            non-Unix server case) */
255
256         /* BB we can not do this if this is the second open of a file
257            and the first handle has writebehind data, we might be
258            able to simply do a filemap_fdatawrite/filemap_fdatawait first */
259         buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
260         if (!buf) {
261                 rc = -ENOMEM;
262                 goto out;
263         }
264
265         if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
266                 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
267                          desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
268                          cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
269                                  & CIFS_MOUNT_MAP_SPECIAL_CHR);
270         else
271                 rc = -EIO; /* no NT SMB support fall into legacy open below */
272
273         if (rc == -EIO) {
274                 /* Old server, try legacy style OpenX */
275                 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
276                         desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
277                         cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
278                                 & CIFS_MOUNT_MAP_SPECIAL_CHR);
279         }
280         if (rc) {
281                 cFYI(1, ("cifs_open returned 0x%x", rc));
282                 goto out;
283         }
284         file->private_data =
285                 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
286         if (file->private_data == NULL) {
287                 rc = -ENOMEM;
288                 goto out;
289         }
290         pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
291         write_lock(&GlobalSMBSeslock);
292         list_add(&pCifsFile->tlist, &pTcon->openFileList);
293
294         pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
295         if (pCifsInode) {
296                 rc = cifs_open_inode_helper(inode, file, pCifsInode,
297                                             pCifsFile, pTcon,
298                                             &oplock, buf, full_path, xid);
299         } else {
300                 write_unlock(&GlobalSMBSeslock);
301         }
302
303         if (oplock & CIFS_CREATE_ACTION) {
304                 /* time to set mode which we can not set earlier due to
305                    problems creating new read-only files */
306                 if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
307                         CIFSSMBUnixSetPerms(xid, pTcon, full_path,
308                                             inode->i_mode,
309                                             (__u64)-1, (__u64)-1, 0 /* dev */,
310                                             cifs_sb->local_nls,
311                                             cifs_sb->mnt_cifs_flags &
312                                                 CIFS_MOUNT_MAP_SPECIAL_CHR);
313                 } else {
314                         /* BB implement via Windows security descriptors eg
315                            CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
316                                               -1, -1, local_nls);
317                            in the meantime could set r/o dos attribute when
318                            perms are eg: mode & 0222 == 0 */
319                 }
320         }
321
322 out:
323         kfree(buf);
324         kfree(full_path);
325         FreeXid(xid);
326         return rc;
327 }
328
329 /* Try to reacquire byte range locks that were released when session */
330 /* to server was lost */
331 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
332 {
333         int rc = 0;
334
335 /* BB list all locks open on this file and relock */
336
337         return rc;
338 }
339
340 static int cifs_reopen_file(struct file *file, int can_flush)
341 {
342         int rc = -EACCES;
343         int xid, oplock;
344         struct cifs_sb_info *cifs_sb;
345         struct cifsTconInfo *pTcon;
346         struct cifsFileInfo *pCifsFile;
347         struct cifsInodeInfo *pCifsInode;
348         struct inode *inode;
349         char *full_path = NULL;
350         int desiredAccess;
351         int disposition = FILE_OPEN;
352         __u16 netfid;
353
354         if (file->private_data) {
355                 pCifsFile = (struct cifsFileInfo *)file->private_data;
356         } else
357                 return -EBADF;
358
359         xid = GetXid();
360         down(&pCifsFile->fh_sem);
361         if (pCifsFile->invalidHandle == FALSE) {
362                 up(&pCifsFile->fh_sem);
363                 FreeXid(xid);
364                 return 0;
365         }
366
367         if (file->f_path.dentry == NULL) {
368                 cERROR(1, ("no valid name if dentry freed"));
369                 dump_stack();
370                 rc = -EBADF;
371                 goto reopen_error_exit;
372         }
373
374         inode = file->f_path.dentry->d_inode;
375         if (inode == NULL) {
376                 cERROR(1, ("inode not valid"));
377                 dump_stack();
378                 rc = -EBADF;
379                 goto reopen_error_exit;
380         }
381
382         cifs_sb = CIFS_SB(inode->i_sb);
383         pTcon = cifs_sb->tcon;
384
385 /* can not grab rename sem here because various ops, including
386    those that already have the rename sem can end up causing writepage
387    to get called and if the server was down that means we end up here,
388    and we can never tell if the caller already has the rename_sem */
389         full_path = build_path_from_dentry(file->f_path.dentry);
390         if (full_path == NULL) {
391                 rc = -ENOMEM;
392 reopen_error_exit:
393                 up(&pCifsFile->fh_sem);
394                 FreeXid(xid);
395                 return rc;
396         }
397
398         cFYI(1, ("inode = 0x%p file flags 0x%x for %s",
399                  inode, file->f_flags, full_path));
400         desiredAccess = cifs_convert_flags(file->f_flags);
401
402         if (oplockEnabled)
403                 oplock = REQ_OPLOCK;
404         else
405                 oplock = FALSE;
406
407         /* Can not refresh inode by passing in file_info buf to be returned
408            by SMBOpen and then calling get_inode_info with returned buf
409            since file might have write behind data that needs to be flushed
410            and server version of file size can be stale. If we knew for sure
411            that inode was not dirty locally we could do this */
412
413         rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
414                          CREATE_NOT_DIR, &netfid, &oplock, NULL,
415                          cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
416                                 CIFS_MOUNT_MAP_SPECIAL_CHR);
417         if (rc) {
418                 up(&pCifsFile->fh_sem);
419                 cFYI(1, ("cifs_open returned 0x%x", rc));
420                 cFYI(1, ("oplock: %d", oplock));
421         } else {
422                 pCifsFile->netfid = netfid;
423                 pCifsFile->invalidHandle = FALSE;
424                 up(&pCifsFile->fh_sem);
425                 pCifsInode = CIFS_I(inode);
426                 if (pCifsInode) {
427                         if (can_flush) {
428                                 filemap_write_and_wait(inode->i_mapping);
429                         /* temporarily disable caching while we
430                            go to server to get inode info */
431                                 pCifsInode->clientCanCacheAll = FALSE;
432                                 pCifsInode->clientCanCacheRead = FALSE;
433                                 if (pTcon->ses->capabilities & CAP_UNIX)
434                                         rc = cifs_get_inode_info_unix(&inode,
435                                                 full_path, inode->i_sb, xid);
436                                 else
437                                         rc = cifs_get_inode_info(&inode,
438                                                 full_path, NULL, inode->i_sb,
439                                                 xid);
440                         } /* else we are writing out data to server already
441                              and could deadlock if we tried to flush data, and
442                              since we do not know if we have data that would
443                              invalidate the current end of file on the server
444                              we can not go to the server to get the new inod
445                              info */
446                         if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
447                                 pCifsInode->clientCanCacheAll = TRUE;
448                                 pCifsInode->clientCanCacheRead = TRUE;
449                                 cFYI(1, ("Exclusive Oplock granted on inode %p",
450                                          file->f_path.dentry->d_inode));
451                         } else if ((oplock & 0xF) == OPLOCK_READ) {
452                                 pCifsInode->clientCanCacheRead = TRUE;
453                                 pCifsInode->clientCanCacheAll = FALSE;
454                         } else {
455                                 pCifsInode->clientCanCacheRead = FALSE;
456                                 pCifsInode->clientCanCacheAll = FALSE;
457                         }
458                         cifs_relock_file(pCifsFile);
459                 }
460         }
461
462         kfree(full_path);
463         FreeXid(xid);
464         return rc;
465 }
466
467 int cifs_close(struct inode *inode, struct file *file)
468 {
469         int rc = 0;
470         int xid;
471         struct cifs_sb_info *cifs_sb;
472         struct cifsTconInfo *pTcon;
473         struct cifsFileInfo *pSMBFile =
474                 (struct cifsFileInfo *)file->private_data;
475
476         xid = GetXid();
477
478         cifs_sb = CIFS_SB(inode->i_sb);
479         pTcon = cifs_sb->tcon;
480         if (pSMBFile) {
481                 struct cifsLockInfo *li, *tmp;
482
483                 pSMBFile->closePend = TRUE;
484                 if (pTcon) {
485                         /* no sense reconnecting to close a file that is
486                            already closed */
487                         if (pTcon->tidStatus != CifsNeedReconnect) {
488                                 int timeout = 2;
489                                 while ((atomic_read(&pSMBFile->wrtPending) != 0)
490                                          && (timeout < 1000) ) {
491                                         /* Give write a better chance to get to
492                                         server ahead of the close.  We do not
493                                         want to add a wait_q here as it would
494                                         increase the memory utilization as
495                                         the struct would be in each open file,
496                                         but this should give enough time to
497                                         clear the socket */
498 #ifdef CONFIG_CIFS_DEBUG2
499                                         cFYI(1, ("close delay, write pending"));
500 #endif /* DEBUG2 */
501                                         msleep(timeout);
502                                         timeout *= 4;
503                                 }
504                                 if (atomic_read(&pSMBFile->wrtPending))
505                                         cERROR(1,("close with pending writes"));
506                                 rc = CIFSSMBClose(xid, pTcon,
507                                                   pSMBFile->netfid);
508                         }
509                 }
510
511                 /* Delete any outstanding lock records.
512                    We'll lose them when the file is closed anyway. */
513                 mutex_lock(&pSMBFile->lock_mutex);
514                 list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
515                         list_del(&li->llist);
516                         kfree(li);
517                 }
518                 mutex_unlock(&pSMBFile->lock_mutex);
519
520                 write_lock(&GlobalSMBSeslock);
521                 list_del(&pSMBFile->flist);
522                 list_del(&pSMBFile->tlist);
523                 write_unlock(&GlobalSMBSeslock);
524                 kfree(pSMBFile->search_resume_name);
525                 kfree(file->private_data);
526                 file->private_data = NULL;
527         } else
528                 rc = -EBADF;
529
530         if (list_empty(&(CIFS_I(inode)->openFileList))) {
531                 cFYI(1, ("closing last open instance for inode %p", inode));
532                 /* if the file is not open we do not know if we can cache info
533                    on this inode, much less write behind and read ahead */
534                 CIFS_I(inode)->clientCanCacheRead = FALSE;
535                 CIFS_I(inode)->clientCanCacheAll  = FALSE;
536         }
537         if ((rc == 0) && CIFS_I(inode)->write_behind_rc)
538                 rc = CIFS_I(inode)->write_behind_rc;
539         FreeXid(xid);
540         return rc;
541 }
542
543 int cifs_closedir(struct inode *inode, struct file *file)
544 {
545         int rc = 0;
546         int xid;
547         struct cifsFileInfo *pCFileStruct =
548             (struct cifsFileInfo *)file->private_data;
549         char *ptmp;
550
551         cFYI(1, ("Closedir inode = 0x%p", inode));
552
553         xid = GetXid();
554
555         if (pCFileStruct) {
556                 struct cifsTconInfo *pTcon;
557                 struct cifs_sb_info *cifs_sb =
558                         CIFS_SB(file->f_path.dentry->d_sb);
559
560                 pTcon = cifs_sb->tcon;
561
562                 cFYI(1, ("Freeing private data in close dir"));
563                 if ((pCFileStruct->srch_inf.endOfSearch == FALSE) &&
564                    (pCFileStruct->invalidHandle == FALSE)) {
565                         pCFileStruct->invalidHandle = TRUE;
566                         rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
567                         cFYI(1, ("Closing uncompleted readdir with rc %d",
568                                  rc));
569                         /* not much we can do if it fails anyway, ignore rc */
570                         rc = 0;
571                 }
572                 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
573                 if (ptmp) {
574                         cFYI(1, ("closedir free smb buf in srch struct"));
575                         pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
576                         if (pCFileStruct->srch_inf.smallBuf)
577                                 cifs_small_buf_release(ptmp);
578                         else
579                                 cifs_buf_release(ptmp);
580                 }
581                 ptmp = pCFileStruct->search_resume_name;
582                 if (ptmp) {
583                         cFYI(1, ("closedir free resume name"));
584                         pCFileStruct->search_resume_name = NULL;
585                         kfree(ptmp);
586                 }
587                 kfree(file->private_data);
588                 file->private_data = NULL;
589         }
590         /* BB can we lock the filestruct while this is going on? */
591         FreeXid(xid);
592         return rc;
593 }
594
595 static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
596                                 __u64 offset, __u8 lockType)
597 {
598         struct cifsLockInfo *li =
599                 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
600         if (li == NULL)
601                 return -ENOMEM;
602         li->offset = offset;
603         li->length = len;
604         li->type = lockType;
605         mutex_lock(&fid->lock_mutex);
606         list_add(&li->llist, &fid->llist);
607         mutex_unlock(&fid->lock_mutex);
608         return 0;
609 }
610
611 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
612 {
613         int rc, xid;
614         __u32 numLock = 0;
615         __u32 numUnlock = 0;
616         __u64 length;
617         int wait_flag = FALSE;
618         struct cifs_sb_info *cifs_sb;
619         struct cifsTconInfo *pTcon;
620         __u16 netfid;
621         __u8 lockType = LOCKING_ANDX_LARGE_FILES;
622         int posix_locking;
623
624         length = 1 + pfLock->fl_end - pfLock->fl_start;
625         rc = -EACCES;
626         xid = GetXid();
627
628         cFYI(1, ("Lock parm: 0x%x flockflags: "
629                  "0x%x flocktype: 0x%x start: %lld end: %lld",
630                 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
631                 pfLock->fl_end));
632
633         if (pfLock->fl_flags & FL_POSIX)
634                 cFYI(1, ("Posix"));
635         if (pfLock->fl_flags & FL_FLOCK)
636                 cFYI(1, ("Flock"));
637         if (pfLock->fl_flags & FL_SLEEP) {
638                 cFYI(1, ("Blocking lock"));
639                 wait_flag = TRUE;
640         }
641         if (pfLock->fl_flags & FL_ACCESS)
642                 cFYI(1, ("Process suspended by mandatory locking - "
643                          "not implemented yet"));
644         if (pfLock->fl_flags & FL_LEASE)
645                 cFYI(1, ("Lease on file - not implemented yet"));
646         if (pfLock->fl_flags &
647             (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
648                 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
649
650         if (pfLock->fl_type == F_WRLCK) {
651                 cFYI(1, ("F_WRLCK "));
652                 numLock = 1;
653         } else if (pfLock->fl_type == F_UNLCK) {
654                 cFYI(1, ("F_UNLCK"));
655                 numUnlock = 1;
656                 /* Check if unlock includes more than
657                 one lock range */
658         } else if (pfLock->fl_type == F_RDLCK) {
659                 cFYI(1, ("F_RDLCK"));
660                 lockType |= LOCKING_ANDX_SHARED_LOCK;
661                 numLock = 1;
662         } else if (pfLock->fl_type == F_EXLCK) {
663                 cFYI(1, ("F_EXLCK"));
664                 numLock = 1;
665         } else if (pfLock->fl_type == F_SHLCK) {
666                 cFYI(1, ("F_SHLCK"));
667                 lockType |= LOCKING_ANDX_SHARED_LOCK;
668                 numLock = 1;
669         } else
670                 cFYI(1, ("Unknown type of lock"));
671
672         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
673         pTcon = cifs_sb->tcon;
674
675         if (file->private_data == NULL) {
676                 FreeXid(xid);
677                 return -EBADF;
678         }
679         netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
680
681         posix_locking = (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
682                         (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability));
683
684         /* BB add code here to normalize offset and length to
685         account for negative length which we can not accept over the
686         wire */
687         if (IS_GETLK(cmd)) {
688                 if (posix_locking) {
689                         int posix_lock_type;
690                         if (lockType & LOCKING_ANDX_SHARED_LOCK)
691                                 posix_lock_type = CIFS_RDLCK;
692                         else
693                                 posix_lock_type = CIFS_WRLCK;
694                         rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */,
695                                         length, pfLock,
696                                         posix_lock_type, wait_flag);
697                         FreeXid(xid);
698                         return rc;
699                 }
700
701                 /* BB we could chain these into one lock request BB */
702                 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
703                                  0, 1, lockType, 0 /* wait flag */ );
704                 if (rc == 0) {
705                         rc = CIFSSMBLock(xid, pTcon, netfid, length,
706                                          pfLock->fl_start, 1 /* numUnlock */ ,
707                                          0 /* numLock */ , lockType,
708                                          0 /* wait flag */ );
709                         pfLock->fl_type = F_UNLCK;
710                         if (rc != 0)
711                                 cERROR(1, ("Error unlocking previously locked "
712                                            "range %d during test of lock", rc));
713                         rc = 0;
714
715                 } else {
716                         /* if rc == ERR_SHARING_VIOLATION ? */
717                         rc = 0; /* do not change lock type to unlock
718                                    since range in use */
719                 }
720
721                 FreeXid(xid);
722                 return rc;
723         }
724
725         if (!numLock && !numUnlock) {
726                 /* if no lock or unlock then nothing
727                 to do since we do not know what it is */
728                 FreeXid(xid);
729                 return -EOPNOTSUPP;
730         }
731
732         if (posix_locking) {
733                 int posix_lock_type;
734                 if (lockType & LOCKING_ANDX_SHARED_LOCK)
735                         posix_lock_type = CIFS_RDLCK;
736                 else
737                         posix_lock_type = CIFS_WRLCK;
738
739                 if (numUnlock == 1)
740                         posix_lock_type = CIFS_UNLCK;
741
742                 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
743                                       length, pfLock,
744                                       posix_lock_type, wait_flag);
745         } else {
746                 struct cifsFileInfo *fid =
747                         (struct cifsFileInfo *)file->private_data;
748
749                 if (numLock) {
750                         rc = CIFSSMBLock(xid, pTcon, netfid, length,
751                                         pfLock->fl_start,
752                                         0, numLock, lockType, wait_flag);
753
754                         if (rc == 0) {
755                                 /* For Windows locks we must store them. */
756                                 rc = store_file_lock(fid, length,
757                                                 pfLock->fl_start, lockType);
758                         }
759                 } else if (numUnlock) {
760                         /* For each stored lock that this unlock overlaps
761                            completely, unlock it. */
762                         int stored_rc = 0;
763                         struct cifsLockInfo *li, *tmp;
764
765                         rc = 0;
766                         mutex_lock(&fid->lock_mutex);
767                         list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
768                                 if (pfLock->fl_start <= li->offset &&
769                                                 length >= li->length) {
770                                         stored_rc = CIFSSMBLock(xid, pTcon,
771                                                         netfid,
772                                                         li->length, li->offset,
773                                                         1, 0, li->type, FALSE);
774                                         if (stored_rc)
775                                                 rc = stored_rc;
776
777                                         list_del(&li->llist);
778                                         kfree(li);
779                                 }
780                         }
781                         mutex_unlock(&fid->lock_mutex);
782                 }
783         }
784
785         if (pfLock->fl_flags & FL_POSIX)
786                 posix_lock_file_wait(file, pfLock);
787         FreeXid(xid);
788         return rc;
789 }
790
791 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
792         size_t write_size, loff_t *poffset)
793 {
794         int rc = 0;
795         unsigned int bytes_written = 0;
796         unsigned int total_written;
797         struct cifs_sb_info *cifs_sb;
798         struct cifsTconInfo *pTcon;
799         int xid, long_op;
800         struct cifsFileInfo *open_file;
801
802         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
803
804         pTcon = cifs_sb->tcon;
805
806         /* cFYI(1,
807            (" write %d bytes to offset %lld of %s", write_size,
808            *poffset, file->f_path.dentry->d_name.name)); */
809
810         if (file->private_data == NULL)
811                 return -EBADF;
812         open_file = (struct cifsFileInfo *) file->private_data;
813
814         xid = GetXid();
815
816         if (*poffset > file->f_path.dentry->d_inode->i_size)
817                 long_op = 2; /* writes past end of file can take a long time */
818         else
819                 long_op = 1;
820
821         for (total_written = 0; write_size > total_written;
822              total_written += bytes_written) {
823                 rc = -EAGAIN;
824                 while (rc == -EAGAIN) {
825                         if (file->private_data == NULL) {
826                                 /* file has been closed on us */
827                                 FreeXid(xid);
828                         /* if we have gotten here we have written some data
829                            and blocked, and the file has been freed on us while
830                            we blocked so return what we managed to write */
831                                 return total_written;
832                         }
833                         if (open_file->closePend) {
834                                 FreeXid(xid);
835                                 if (total_written)
836                                         return total_written;
837                                 else
838                                         return -EBADF;
839                         }
840                         if (open_file->invalidHandle) {
841                                 /* we could deadlock if we called
842                                    filemap_fdatawait from here so tell
843                                    reopen_file not to flush data to server
844                                    now */
845                                 rc = cifs_reopen_file(file, FALSE);
846                                 if (rc != 0)
847                                         break;
848                         }
849
850                         rc = CIFSSMBWrite(xid, pTcon,
851                                 open_file->netfid,
852                                 min_t(const int, cifs_sb->wsize,
853                                       write_size - total_written),
854                                 *poffset, &bytes_written,
855                                 NULL, write_data + total_written, long_op);
856                 }
857                 if (rc || (bytes_written == 0)) {
858                         if (total_written)
859                                 break;
860                         else {
861                                 FreeXid(xid);
862                                 return rc;
863                         }
864                 } else
865                         *poffset += bytes_written;
866                 long_op = FALSE; /* subsequent writes fast -
867                                     15 seconds is plenty */
868         }
869
870         cifs_stats_bytes_written(pTcon, total_written);
871
872         /* since the write may have blocked check these pointers again */
873         if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
874                 struct inode *inode = file->f_path.dentry->d_inode;
875 /* Do not update local mtime - server will set its actual value on write
876  *              inode->i_ctime = inode->i_mtime =
877  *                      current_fs_time(inode->i_sb);*/
878                 if (total_written > 0) {
879                         spin_lock(&inode->i_lock);
880                         if (*poffset > file->f_path.dentry->d_inode->i_size)
881                                 i_size_write(file->f_path.dentry->d_inode,
882                                         *poffset);
883                         spin_unlock(&inode->i_lock);
884                 }
885                 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
886         }
887         FreeXid(xid);
888         return total_written;
889 }
890
891 static ssize_t cifs_write(struct file *file, const char *write_data,
892         size_t write_size, loff_t *poffset)
893 {
894         int rc = 0;
895         unsigned int bytes_written = 0;
896         unsigned int total_written;
897         struct cifs_sb_info *cifs_sb;
898         struct cifsTconInfo *pTcon;
899         int xid, long_op;
900         struct cifsFileInfo *open_file;
901
902         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
903
904         pTcon = cifs_sb->tcon;
905
906         cFYI(1, ("write %zd bytes to offset %lld of %s", write_size,
907            *poffset, file->f_path.dentry->d_name.name));
908
909         if (file->private_data == NULL)
910                 return -EBADF;
911         open_file = (struct cifsFileInfo *)file->private_data;
912
913         xid = GetXid();
914
915         if (*poffset > file->f_path.dentry->d_inode->i_size)
916                 long_op = 2; /* writes past end of file can take a long time */
917         else
918                 long_op = 1;
919
920         for (total_written = 0; write_size > total_written;
921              total_written += bytes_written) {
922                 rc = -EAGAIN;
923                 while (rc == -EAGAIN) {
924                         if (file->private_data == NULL) {
925                                 /* file has been closed on us */
926                                 FreeXid(xid);
927                         /* if we have gotten here we have written some data
928                            and blocked, and the file has been freed on us
929                            while we blocked so return what we managed to
930                            write */
931                                 return total_written;
932                         }
933                         if (open_file->closePend) {
934                                 FreeXid(xid);
935                                 if (total_written)
936                                         return total_written;
937                                 else
938                                         return -EBADF;
939                         }
940                         if (open_file->invalidHandle) {
941                                 /* we could deadlock if we called
942                                    filemap_fdatawait from here so tell
943                                    reopen_file not to flush data to
944                                    server now */
945                                 rc = cifs_reopen_file(file, FALSE);
946                                 if (rc != 0)
947                                         break;
948                         }
949                         if (experimEnabled || (pTcon->ses->server &&
950                                 ((pTcon->ses->server->secMode &
951                                 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
952                                 == 0))) {
953                                 struct kvec iov[2];
954                                 unsigned int len;
955
956                                 len = min((size_t)cifs_sb->wsize,
957                                           write_size - total_written);
958                                 /* iov[0] is reserved for smb header */
959                                 iov[1].iov_base = (char *)write_data +
960                                                   total_written;
961                                 iov[1].iov_len = len;
962                                 rc = CIFSSMBWrite2(xid, pTcon,
963                                                 open_file->netfid, len,
964                                                 *poffset, &bytes_written,
965                                                 iov, 1, long_op);
966                         } else
967                                 rc = CIFSSMBWrite(xid, pTcon,
968                                          open_file->netfid,
969                                          min_t(const int, cifs_sb->wsize,
970                                                write_size - total_written),
971                                          *poffset, &bytes_written,
972                                          write_data + total_written,
973                                          NULL, long_op);
974                 }
975                 if (rc || (bytes_written == 0)) {
976                         if (total_written)
977                                 break;
978                         else {
979                                 FreeXid(xid);
980                                 return rc;
981                         }
982                 } else
983                         *poffset += bytes_written;
984                 long_op = FALSE; /* subsequent writes fast -
985                                     15 seconds is plenty */
986         }
987
988         cifs_stats_bytes_written(pTcon, total_written);
989
990         /* since the write may have blocked check these pointers again */
991         if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
992 /*BB We could make this contingent on superblock ATIME flag too */
993 /*              file->f_path.dentry->d_inode->i_ctime =
994                 file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
995                 if (total_written > 0) {
996                         spin_lock(&file->f_path.dentry->d_inode->i_lock);
997                         if (*poffset > file->f_path.dentry->d_inode->i_size)
998                                 i_size_write(file->f_path.dentry->d_inode,
999                                              *poffset);
1000                         spin_unlock(&file->f_path.dentry->d_inode->i_lock);
1001                 }
1002                 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1003         }
1004         FreeXid(xid);
1005         return total_written;
1006 }
1007
1008 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
1009 {
1010         struct cifsFileInfo *open_file;
1011         int rc;
1012
1013         /* Having a null inode here (because mapping->host was set to zero by
1014         the VFS or MM) should not happen but we had reports of on oops (due to
1015         it being zero) during stress testcases so we need to check for it */
1016
1017         if (cifs_inode == NULL) {
1018                 cERROR(1, ("Null inode passed to cifs_writeable_file"));
1019                 dump_stack();
1020                 return NULL;
1021         }
1022
1023         read_lock(&GlobalSMBSeslock);
1024         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1025                 if (open_file->closePend)
1026                         continue;
1027                 if (open_file->pfile &&
1028                     ((open_file->pfile->f_flags & O_RDWR) ||
1029                      (open_file->pfile->f_flags & O_WRONLY))) {
1030                         atomic_inc(&open_file->wrtPending);
1031                         read_unlock(&GlobalSMBSeslock);
1032                         if ((open_file->invalidHandle) &&
1033                            (!open_file->closePend) /* BB fixme -since the second clause can not be true remove it BB */) {
1034                                 rc = cifs_reopen_file(open_file->pfile, FALSE);
1035                                 /* if it fails, try another handle - might be */
1036                                 /* dangerous to hold up writepages with retry */
1037                                 if (rc) {
1038                                         cFYI(1,
1039                                               ("failed on reopen file in wp"));
1040                                         read_lock(&GlobalSMBSeslock);
1041                                         /* can not use this handle, no write
1042                                         pending on this one after all */
1043                                         atomic_dec
1044                                              (&open_file->wrtPending);
1045                                         continue;
1046                                 }
1047                         }
1048                         return open_file;
1049                 }
1050         }
1051         read_unlock(&GlobalSMBSeslock);
1052         return NULL;
1053 }
1054
1055 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1056 {
1057         struct address_space *mapping = page->mapping;
1058         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1059         char *write_data;
1060         int rc = -EFAULT;
1061         int bytes_written = 0;
1062         struct cifs_sb_info *cifs_sb;
1063         struct cifsTconInfo *pTcon;
1064         struct inode *inode;
1065         struct cifsFileInfo *open_file;
1066
1067         if (!mapping || !mapping->host)
1068                 return -EFAULT;
1069
1070         inode = page->mapping->host;
1071         cifs_sb = CIFS_SB(inode->i_sb);
1072         pTcon = cifs_sb->tcon;
1073
1074         offset += (loff_t)from;
1075         write_data = kmap(page);
1076         write_data += from;
1077
1078         if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1079                 kunmap(page);
1080                 return -EIO;
1081         }
1082
1083         /* racing with truncate? */
1084         if (offset > mapping->host->i_size) {
1085                 kunmap(page);
1086                 return 0; /* don't care */
1087         }
1088
1089         /* check to make sure that we are not extending the file */
1090         if (mapping->host->i_size - offset < (loff_t)to)
1091                 to = (unsigned)(mapping->host->i_size - offset);
1092
1093         open_file = find_writable_file(CIFS_I(mapping->host));
1094         if (open_file) {
1095                 bytes_written = cifs_write(open_file->pfile, write_data,
1096                                            to-from, &offset);
1097                 atomic_dec(&open_file->wrtPending);
1098                 /* Does mm or vfs already set times? */
1099                 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1100                 if ((bytes_written > 0) && (offset)) {
1101                         rc = 0;
1102                 } else if (bytes_written < 0) {
1103                         if (rc != -EBADF)
1104                                 rc = bytes_written;
1105                 }
1106         } else {
1107                 cFYI(1, ("No writeable filehandles for inode"));
1108                 rc = -EIO;
1109         }
1110
1111         kunmap(page);
1112         return rc;
1113 }
1114
1115 static int cifs_writepages(struct address_space *mapping,
1116                            struct writeback_control *wbc)
1117 {
1118         struct backing_dev_info *bdi = mapping->backing_dev_info;
1119         unsigned int bytes_to_write;
1120         unsigned int bytes_written;
1121         struct cifs_sb_info *cifs_sb;
1122         int done = 0;
1123         pgoff_t end;
1124         pgoff_t index;
1125         int range_whole = 0;
1126         struct kvec *iov;
1127         int len;
1128         int n_iov = 0;
1129         pgoff_t next;
1130         int nr_pages;
1131         __u64 offset = 0;
1132         struct cifsFileInfo *open_file;
1133         struct page *page;
1134         struct pagevec pvec;
1135         int rc = 0;
1136         int scanned = 0;
1137         int xid;
1138
1139         cifs_sb = CIFS_SB(mapping->host->i_sb);
1140
1141         /*
1142          * If wsize is smaller that the page cache size, default to writing
1143          * one page at a time via cifs_writepage
1144          */
1145         if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1146                 return generic_writepages(mapping, wbc);
1147
1148         if ((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1149                 if (cifs_sb->tcon->ses->server->secMode &
1150                                 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1151                         if (!experimEnabled)
1152                                 return generic_writepages(mapping, wbc);
1153
1154         iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
1155         if (iov == NULL)
1156                 return generic_writepages(mapping, wbc);
1157
1158
1159         /*
1160          * BB: Is this meaningful for a non-block-device file system?
1161          * If it is, we should test it again after we do I/O
1162          */
1163         if (wbc->nonblocking && bdi_write_congested(bdi)) {
1164                 wbc->encountered_congestion = 1;
1165                 kfree(iov);
1166                 return 0;
1167         }
1168
1169         xid = GetXid();
1170
1171         pagevec_init(&pvec, 0);
1172         if (wbc->range_cyclic) {
1173                 index = mapping->writeback_index; /* Start from prev offset */
1174                 end = -1;
1175         } else {
1176                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1177                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1178                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1179                         range_whole = 1;
1180                 scanned = 1;
1181         }
1182 retry:
1183         while (!done && (index <= end) &&
1184                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1185                         PAGECACHE_TAG_DIRTY,
1186                         min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1187                 int first;
1188                 unsigned int i;
1189
1190                 first = -1;
1191                 next = 0;
1192                 n_iov = 0;
1193                 bytes_to_write = 0;
1194
1195                 for (i = 0; i < nr_pages; i++) {
1196                         page = pvec.pages[i];
1197                         /*
1198                          * At this point we hold neither mapping->tree_lock nor
1199                          * lock on the page itself: the page may be truncated or
1200                          * invalidated (changing page->mapping to NULL), or even
1201                          * swizzled back from swapper_space to tmpfs file
1202                          * mapping
1203                          */
1204
1205                         if (first < 0)
1206                                 lock_page(page);
1207                         else if (TestSetPageLocked(page))
1208                                 break;
1209
1210                         if (unlikely(page->mapping != mapping)) {
1211                                 unlock_page(page);
1212                                 break;
1213                         }
1214
1215                         if (!wbc->range_cyclic && page->index > end) {
1216                                 done = 1;
1217                                 unlock_page(page);
1218                                 break;
1219                         }
1220
1221                         if (next && (page->index != next)) {
1222                                 /* Not next consecutive page */
1223                                 unlock_page(page);
1224                                 break;
1225                         }
1226
1227                         if (wbc->sync_mode != WB_SYNC_NONE)
1228                                 wait_on_page_writeback(page);
1229
1230                         if (PageWriteback(page) ||
1231                                         !clear_page_dirty_for_io(page)) {
1232                                 unlock_page(page);
1233                                 break;
1234                         }
1235
1236                         /*
1237                          * This actually clears the dirty bit in the radix tree.
1238                          * See cifs_writepage() for more commentary.
1239                          */
1240                         set_page_writeback(page);
1241
1242                         if (page_offset(page) >= mapping->host->i_size) {
1243                                 done = 1;
1244                                 unlock_page(page);
1245                                 end_page_writeback(page);
1246                                 break;
1247                         }
1248
1249                         /*
1250                          * BB can we get rid of this?  pages are held by pvec
1251                          */
1252                         page_cache_get(page);
1253
1254                         len = min(mapping->host->i_size - page_offset(page),
1255                                   (loff_t)PAGE_CACHE_SIZE);
1256
1257                         /* reserve iov[0] for the smb header */
1258                         n_iov++;
1259                         iov[n_iov].iov_base = kmap(page);
1260                         iov[n_iov].iov_len = len;
1261                         bytes_to_write += len;
1262
1263                         if (first < 0) {
1264                                 first = i;
1265                                 offset = page_offset(page);
1266                         }
1267                         next = page->index + 1;
1268                         if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1269                                 break;
1270                 }
1271                 if (n_iov) {
1272                         /* Search for a writable handle every time we call
1273                          * CIFSSMBWrite2.  We can't rely on the last handle
1274                          * we used to still be valid
1275                          */
1276                         open_file = find_writable_file(CIFS_I(mapping->host));
1277                         if (!open_file) {
1278                                 cERROR(1, ("No writable handles for inode"));
1279                                 rc = -EBADF;
1280                         } else {
1281                                 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1282                                                    open_file->netfid,
1283                                                    bytes_to_write, offset,
1284                                                    &bytes_written, iov, n_iov,
1285                                                    1);
1286                                 atomic_dec(&open_file->wrtPending);
1287                                 if (rc || bytes_written < bytes_to_write) {
1288                                         cERROR(1,("Write2 ret %d, written = %d",
1289                                                   rc, bytes_written));
1290                                         /* BB what if continued retry is
1291                                            requested via mount flags? */
1292                                         set_bit(AS_EIO, &mapping->flags);
1293                                 } else {
1294                                         cifs_stats_bytes_written(cifs_sb->tcon,
1295                                                                  bytes_written);
1296                                 }
1297                         }
1298                         for (i = 0; i < n_iov; i++) {
1299                                 page = pvec.pages[first + i];
1300                                 /* Should we also set page error on
1301                                 success rc but too little data written? */
1302                                 /* BB investigate retry logic on temporary
1303                                 server crash cases and how recovery works
1304                                 when page marked as error */
1305                                 if (rc)
1306                                         SetPageError(page);
1307                                 kunmap(page);
1308                                 unlock_page(page);
1309                                 end_page_writeback(page);
1310                                 page_cache_release(page);
1311                         }
1312                         if ((wbc->nr_to_write -= n_iov) <= 0)
1313                                 done = 1;
1314                         index = next;
1315                 }
1316                 pagevec_release(&pvec);
1317         }
1318         if (!scanned && !done) {
1319                 /*
1320                  * We hit the last page and there is more work to be done: wrap
1321                  * back to the start of the file
1322                  */
1323                 scanned = 1;
1324                 index = 0;
1325                 goto retry;
1326         }
1327         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1328                 mapping->writeback_index = index;
1329
1330         FreeXid(xid);
1331         kfree(iov);
1332         return rc;
1333 }
1334
1335 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1336 {
1337         int rc = -EFAULT;
1338         int xid;
1339
1340         xid = GetXid();
1341 /* BB add check for wbc flags */
1342         page_cache_get(page);
1343         if (!PageUptodate(page)) {
1344                 cFYI(1, ("ppw - page not up to date"));
1345         }
1346
1347         /*
1348          * Set the "writeback" flag, and clear "dirty" in the radix tree.
1349          *
1350          * A writepage() implementation always needs to do either this,
1351          * or re-dirty the page with "redirty_page_for_writepage()" in
1352          * the case of a failure.
1353          *
1354          * Just unlocking the page will cause the radix tree tag-bits
1355          * to fail to update with the state of the page correctly.
1356          */
1357         set_page_writeback(page);
1358         rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1359         SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1360         unlock_page(page);
1361         end_page_writeback(page);
1362         page_cache_release(page);
1363         FreeXid(xid);
1364         return rc;
1365 }
1366
1367 static int cifs_commit_write(struct file *file, struct page *page,
1368         unsigned offset, unsigned to)
1369 {
1370         int xid;
1371         int rc = 0;
1372         struct inode *inode = page->mapping->host;
1373         loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1374         char *page_data;
1375
1376         xid = GetXid();
1377         cFYI(1, ("commit write for page %p up to position %lld for %d",
1378                  page, position, to));
1379         spin_lock(&inode->i_lock);
1380         if (position > inode->i_size) {
1381                 i_size_write(inode, position);
1382         }
1383         spin_unlock(&inode->i_lock);
1384         if (!PageUptodate(page)) {
1385                 position =  ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
1386                 /* can not rely on (or let) writepage write this data */
1387                 if (to < offset) {
1388                         cFYI(1, ("Illegal offsets, can not copy from %d to %d",
1389                                 offset, to));
1390                         FreeXid(xid);
1391                         return rc;
1392                 }
1393                 /* this is probably better than directly calling
1394                    partialpage_write since in this function the file handle is
1395                    known which we might as well leverage */
1396                 /* BB check if anything else missing out of ppw
1397                    such as updating last write time */
1398                 page_data = kmap(page);
1399                 rc = cifs_write(file, page_data + offset, to-offset,
1400                                 &position);
1401                 if (rc > 0)
1402                         rc = 0;
1403                 /* else if (rc < 0) should we set writebehind rc? */
1404                 kunmap(page);
1405         } else {
1406                 set_page_dirty(page);
1407         }
1408
1409         FreeXid(xid);
1410         return rc;
1411 }
1412
1413 int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1414 {
1415         int xid;
1416         int rc = 0;
1417         struct inode *inode = file->f_path.dentry->d_inode;
1418
1419         xid = GetXid();
1420
1421         cFYI(1, ("Sync file - name: %s datasync: 0x%x",
1422                 dentry->d_name.name, datasync));
1423
1424         rc = filemap_fdatawrite(inode->i_mapping);
1425         if (rc == 0)
1426                 CIFS_I(inode)->write_behind_rc = 0;
1427         FreeXid(xid);
1428         return rc;
1429 }
1430
1431 /* static void cifs_sync_page(struct page *page)
1432 {
1433         struct address_space *mapping;
1434         struct inode *inode;
1435         unsigned long index = page->index;
1436         unsigned int rpages = 0;
1437         int rc = 0;
1438
1439         cFYI(1, ("sync page %p",page));
1440         mapping = page->mapping;
1441         if (!mapping)
1442                 return 0;
1443         inode = mapping->host;
1444         if (!inode)
1445                 return; */
1446
1447 /*      fill in rpages then
1448         result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1449
1450 /*      cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
1451
1452 #if 0
1453         if (rc < 0)
1454                 return rc;
1455         return 0;
1456 #endif
1457 } */
1458
1459 /*
1460  * As file closes, flush all cached write data for this inode checking
1461  * for write behind errors.
1462  */
1463 int cifs_flush(struct file *file, fl_owner_t id)
1464 {
1465         struct inode *inode = file->f_path.dentry->d_inode;
1466         int rc = 0;
1467
1468         /* Rather than do the steps manually:
1469            lock the inode for writing
1470            loop through pages looking for write behind data (dirty pages)
1471            coalesce into contiguous 16K (or smaller) chunks to write to server
1472            send to server (prefer in parallel)
1473            deal with writebehind errors
1474            unlock inode for writing
1475            filemapfdatawrite appears easier for the time being */
1476
1477         rc = filemap_fdatawrite(inode->i_mapping);
1478         if (!rc) /* reset wb rc if we were able to write out dirty pages */
1479                 CIFS_I(inode)->write_behind_rc = 0;
1480
1481         cFYI(1, ("Flush inode %p file %p rc %d", inode, file, rc));
1482
1483         return rc;
1484 }
1485
1486 ssize_t cifs_user_read(struct file *file, char __user *read_data,
1487         size_t read_size, loff_t *poffset)
1488 {
1489         int rc = -EACCES;
1490         unsigned int bytes_read = 0;
1491         unsigned int total_read = 0;
1492         unsigned int current_read_size;
1493         struct cifs_sb_info *cifs_sb;
1494         struct cifsTconInfo *pTcon;
1495         int xid;
1496         struct cifsFileInfo *open_file;
1497         char *smb_read_data;
1498         char __user *current_offset;
1499         struct smb_com_read_rsp *pSMBr;
1500
1501         xid = GetXid();
1502         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1503         pTcon = cifs_sb->tcon;
1504
1505         if (file->private_data == NULL) {
1506                 FreeXid(xid);
1507                 return -EBADF;
1508         }
1509         open_file = (struct cifsFileInfo *)file->private_data;
1510
1511         if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
1512                 cFYI(1, ("attempting read on write only file instance"));
1513         }
1514         for (total_read = 0, current_offset = read_data;
1515              read_size > total_read;
1516              total_read += bytes_read, current_offset += bytes_read) {
1517                 current_read_size = min_t(const int, read_size - total_read,
1518                                           cifs_sb->rsize);
1519                 rc = -EAGAIN;
1520                 smb_read_data = NULL;
1521                 while (rc == -EAGAIN) {
1522                         int buf_type = CIFS_NO_BUFFER;
1523                         if ((open_file->invalidHandle) &&
1524                             (!open_file->closePend)) {
1525                                 rc = cifs_reopen_file(file, TRUE);
1526                                 if (rc != 0)
1527                                         break;
1528                         }
1529                         rc = CIFSSMBRead(xid, pTcon,
1530                                          open_file->netfid,
1531                                          current_read_size, *poffset,
1532                                          &bytes_read, &smb_read_data,
1533                                          &buf_type);
1534                         pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1535                         if (smb_read_data) {
1536                                 if (copy_to_user(current_offset,
1537                                                 smb_read_data +
1538                                                 4 /* RFC1001 length field */ +
1539                                                 le16_to_cpu(pSMBr->DataOffset),
1540                                                 bytes_read)) {
1541                                         rc = -EFAULT;
1542                                 }
1543
1544                                 if (buf_type == CIFS_SMALL_BUFFER)
1545                                         cifs_small_buf_release(smb_read_data);
1546                                 else if (buf_type == CIFS_LARGE_BUFFER)
1547                                         cifs_buf_release(smb_read_data);
1548                                 smb_read_data = NULL;
1549                         }
1550                 }
1551                 if (rc || (bytes_read == 0)) {
1552                         if (total_read) {
1553                                 break;
1554                         } else {
1555                                 FreeXid(xid);
1556                                 return rc;
1557                         }
1558                 } else {
1559                         cifs_stats_bytes_read(pTcon, bytes_read);
1560                         *poffset += bytes_read;
1561                 }
1562         }
1563         FreeXid(xid);
1564         return total_read;
1565 }
1566
1567
1568 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1569         loff_t *poffset)
1570 {
1571         int rc = -EACCES;
1572         unsigned int bytes_read = 0;
1573         unsigned int total_read;
1574         unsigned int current_read_size;
1575         struct cifs_sb_info *cifs_sb;
1576         struct cifsTconInfo *pTcon;
1577         int xid;
1578         char *current_offset;
1579         struct cifsFileInfo *open_file;
1580         int buf_type = CIFS_NO_BUFFER;
1581
1582         xid = GetXid();
1583         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1584         pTcon = cifs_sb->tcon;
1585
1586         if (file->private_data == NULL) {
1587                 FreeXid(xid);
1588                 return -EBADF;
1589         }
1590         open_file = (struct cifsFileInfo *)file->private_data;
1591
1592         if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1593                 cFYI(1, ("attempting read on write only file instance"));
1594
1595         for (total_read = 0, current_offset = read_data;
1596              read_size > total_read;
1597              total_read += bytes_read, current_offset += bytes_read) {
1598                 current_read_size = min_t(const int, read_size - total_read,
1599                                           cifs_sb->rsize);
1600                 /* For windows me and 9x we do not want to request more
1601                 than it negotiated since it will refuse the read then */
1602                 if ((pTcon->ses) &&
1603                         !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1604                         current_read_size = min_t(const int, current_read_size,
1605                                         pTcon->ses->server->maxBuf - 128);
1606                 }
1607                 rc = -EAGAIN;
1608                 while (rc == -EAGAIN) {
1609                         if ((open_file->invalidHandle) &&
1610                             (!open_file->closePend)) {
1611                                 rc = cifs_reopen_file(file, TRUE);
1612                                 if (rc != 0)
1613                                         break;
1614                         }
1615                         rc = CIFSSMBRead(xid, pTcon,
1616                                          open_file->netfid,
1617                                          current_read_size, *poffset,
1618                                          &bytes_read, &current_offset,
1619                                          &buf_type);
1620                 }
1621                 if (rc || (bytes_read == 0)) {
1622                         if (total_read) {
1623                                 break;
1624                         } else {
1625                                 FreeXid(xid);
1626                                 return rc;
1627                         }
1628                 } else {
1629                         cifs_stats_bytes_read(pTcon, total_read);
1630                         *poffset += bytes_read;
1631                 }
1632         }
1633         FreeXid(xid);
1634         return total_read;
1635 }
1636
1637 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1638 {
1639         struct dentry *dentry = file->f_path.dentry;
1640         int rc, xid;
1641
1642         xid = GetXid();
1643         rc = cifs_revalidate(dentry);
1644         if (rc) {
1645                 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1646                 FreeXid(xid);
1647                 return rc;
1648         }
1649         rc = generic_file_mmap(file, vma);
1650         FreeXid(xid);
1651         return rc;
1652 }
1653
1654
1655 static void cifs_copy_cache_pages(struct address_space *mapping,
1656         struct list_head *pages, int bytes_read, char *data,
1657         struct pagevec *plru_pvec)
1658 {
1659         struct page *page;
1660         char *target;
1661
1662         while (bytes_read > 0) {
1663                 if (list_empty(pages))
1664                         break;
1665
1666                 page = list_entry(pages->prev, struct page, lru);
1667                 list_del(&page->lru);
1668
1669                 if (add_to_page_cache(page, mapping, page->index,
1670                                       GFP_KERNEL)) {
1671                         page_cache_release(page);
1672                         cFYI(1, ("Add page cache failed"));
1673                         data += PAGE_CACHE_SIZE;
1674                         bytes_read -= PAGE_CACHE_SIZE;
1675                         continue;
1676                 }
1677
1678                 target = kmap_atomic(page, KM_USER0);
1679
1680                 if (PAGE_CACHE_SIZE > bytes_read) {
1681                         memcpy(target, data, bytes_read);
1682                         /* zero the tail end of this partial page */
1683                         memset(target + bytes_read, 0,
1684                                PAGE_CACHE_SIZE - bytes_read);
1685                         bytes_read = 0;
1686                 } else {
1687                         memcpy(target, data, PAGE_CACHE_SIZE);
1688                         bytes_read -= PAGE_CACHE_SIZE;
1689                 }
1690                 kunmap_atomic(target, KM_USER0);
1691
1692                 flush_dcache_page(page);
1693                 SetPageUptodate(page);
1694                 unlock_page(page);
1695                 if (!pagevec_add(plru_pvec, page))
1696                         __pagevec_lru_add(plru_pvec);
1697                 data += PAGE_CACHE_SIZE;
1698         }
1699         return;
1700 }
1701
1702 static int cifs_readpages(struct file *file, struct address_space *mapping,
1703         struct list_head *page_list, unsigned num_pages)
1704 {
1705         int rc = -EACCES;
1706         int xid;
1707         loff_t offset;
1708         struct page *page;
1709         struct cifs_sb_info *cifs_sb;
1710         struct cifsTconInfo *pTcon;
1711         int bytes_read = 0;
1712         unsigned int read_size, i;
1713         char *smb_read_data = NULL;
1714         struct smb_com_read_rsp *pSMBr;
1715         struct pagevec lru_pvec;
1716         struct cifsFileInfo *open_file;
1717         int buf_type = CIFS_NO_BUFFER;
1718
1719         xid = GetXid();
1720         if (file->private_data == NULL) {
1721                 FreeXid(xid);
1722                 return -EBADF;
1723         }
1724         open_file = (struct cifsFileInfo *)file->private_data;
1725         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1726         pTcon = cifs_sb->tcon;
1727
1728         pagevec_init(&lru_pvec, 0);
1729 #ifdef CONFIG_CIFS_DEBUG2
1730                 cFYI(1, ("rpages: num pages %d", num_pages));
1731 #endif
1732         for (i = 0; i < num_pages; ) {
1733                 unsigned contig_pages;
1734                 struct page *tmp_page;
1735                 unsigned long expected_index;
1736
1737                 if (list_empty(page_list))
1738                         break;
1739
1740                 page = list_entry(page_list->prev, struct page, lru);
1741                 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1742
1743                 /* count adjacent pages that we will read into */
1744                 contig_pages = 0;
1745                 expected_index =
1746                         list_entry(page_list->prev, struct page, lru)->index;
1747                 list_for_each_entry_reverse(tmp_page, page_list, lru) {
1748                         if (tmp_page->index == expected_index) {
1749                                 contig_pages++;
1750                                 expected_index++;
1751                         } else
1752                                 break;
1753                 }
1754                 if (contig_pages + i >  num_pages)
1755                         contig_pages = num_pages - i;
1756
1757                 /* for reads over a certain size could initiate async
1758                    read ahead */
1759
1760                 read_size = contig_pages * PAGE_CACHE_SIZE;
1761                 /* Read size needs to be in multiples of one page */
1762                 read_size = min_t(const unsigned int, read_size,
1763                                   cifs_sb->rsize & PAGE_CACHE_MASK);
1764 #ifdef CONFIG_CIFS_DEBUG2
1765                 cFYI(1, ("rpages: read size 0x%x  contiguous pages %d",
1766                                 read_size, contig_pages));
1767 #endif
1768                 rc = -EAGAIN;
1769                 while (rc == -EAGAIN) {
1770                         if ((open_file->invalidHandle) &&
1771                             (!open_file->closePend)) {
1772                                 rc = cifs_reopen_file(file, TRUE);
1773                                 if (rc != 0)
1774                                         break;
1775                         }
1776
1777                         rc = CIFSSMBRead(xid, pTcon,
1778                                          open_file->netfid,
1779                                          read_size, offset,
1780                                          &bytes_read, &smb_read_data,
1781                                          &buf_type);
1782                         /* BB more RC checks ? */
1783                         if (rc == -EAGAIN) {
1784                                 if (smb_read_data) {
1785                                         if (buf_type == CIFS_SMALL_BUFFER)
1786                                                 cifs_small_buf_release(smb_read_data);
1787                                         else if (buf_type == CIFS_LARGE_BUFFER)
1788                                                 cifs_buf_release(smb_read_data);
1789                                         smb_read_data = NULL;
1790                                 }
1791                         }
1792                 }
1793                 if ((rc < 0) || (smb_read_data == NULL)) {
1794                         cFYI(1, ("Read error in readpages: %d", rc));
1795                         break;
1796                 } else if (bytes_read > 0) {
1797                         task_io_account_read(bytes_read);
1798                         pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1799                         cifs_copy_cache_pages(mapping, page_list, bytes_read,
1800                                 smb_read_data + 4 /* RFC1001 hdr */ +
1801                                 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1802
1803                         i +=  bytes_read >> PAGE_CACHE_SHIFT;
1804                         cifs_stats_bytes_read(pTcon, bytes_read);
1805                         if ((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1806                                 i++; /* account for partial page */
1807
1808                                 /* server copy of file can have smaller size
1809                                    than client */
1810                                 /* BB do we need to verify this common case ?
1811                                    this case is ok - if we are at server EOF
1812                                    we will hit it on next read */
1813
1814                                 /* break; */
1815                         }
1816                 } else {
1817                         cFYI(1, ("No bytes read (%d) at offset %lld . "
1818                                  "Cleaning remaining pages from readahead list",
1819                                  bytes_read, offset));
1820                         /* BB turn off caching and do new lookup on
1821                            file size at server? */
1822                         break;
1823                 }
1824                 if (smb_read_data) {
1825                         if (buf_type == CIFS_SMALL_BUFFER)
1826                                 cifs_small_buf_release(smb_read_data);
1827                         else if (buf_type == CIFS_LARGE_BUFFER)
1828                                 cifs_buf_release(smb_read_data);
1829                         smb_read_data = NULL;
1830                 }
1831                 bytes_read = 0;
1832         }
1833
1834         pagevec_lru_add(&lru_pvec);
1835
1836 /* need to free smb_read_data buf before exit */
1837         if (smb_read_data) {
1838                 if (buf_type == CIFS_SMALL_BUFFER)
1839                         cifs_small_buf_release(smb_read_data);
1840                 else if (buf_type == CIFS_LARGE_BUFFER)
1841                         cifs_buf_release(smb_read_data);
1842                 smb_read_data = NULL;
1843         }
1844
1845         FreeXid(xid);
1846         return rc;
1847 }
1848
1849 static int cifs_readpage_worker(struct file *file, struct page *page,
1850         loff_t *poffset)
1851 {
1852         char *read_data;
1853         int rc;
1854
1855         page_cache_get(page);
1856         read_data = kmap(page);
1857         /* for reads over a certain size could initiate async read ahead */
1858
1859         rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
1860
1861         if (rc < 0)
1862                 goto io_error;
1863         else
1864                 cFYI(1, ("Bytes read %d", rc));
1865
1866         file->f_path.dentry->d_inode->i_atime =
1867                 current_fs_time(file->f_path.dentry->d_inode->i_sb);
1868
1869         if (PAGE_CACHE_SIZE > rc)
1870                 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1871
1872         flush_dcache_page(page);
1873         SetPageUptodate(page);
1874         rc = 0;
1875
1876 io_error:
1877         kunmap(page);
1878         page_cache_release(page);
1879         return rc;
1880 }
1881
1882 static int cifs_readpage(struct file *file, struct page *page)
1883 {
1884         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1885         int rc = -EACCES;
1886         int xid;
1887
1888         xid = GetXid();
1889
1890         if (file->private_data == NULL) {
1891                 FreeXid(xid);
1892                 return -EBADF;
1893         }
1894
1895         cFYI(1, ("readpage %p at offset %d 0x%x\n",
1896                  page, (int)offset, (int)offset));
1897
1898         rc = cifs_readpage_worker(file, page, &offset);
1899
1900         unlock_page(page);
1901
1902         FreeXid(xid);
1903         return rc;
1904 }
1905
1906 /* We do not want to update the file size from server for inodes
1907    open for write - to avoid races with writepage extending
1908    the file - in the future we could consider allowing
1909    refreshing the inode only on increases in the file size
1910    but this is tricky to do without racing with writebehind
1911    page caching in the current Linux kernel design */
1912 int is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1913 {
1914         struct cifsFileInfo *open_file = NULL;
1915
1916         if (cifsInode)
1917                 open_file =  find_writable_file(cifsInode);
1918
1919         if (open_file) {
1920                 struct cifs_sb_info *cifs_sb;
1921
1922                 /* there is not actually a write pending so let
1923                 this handle go free and allow it to
1924                 be closable if needed */
1925                 atomic_dec(&open_file->wrtPending);
1926
1927                 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
1928                 if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) {
1929                         /* since no page cache to corrupt on directio
1930                         we can change size safely */
1931                         return 1;
1932                 }
1933
1934                 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
1935                         return 1;
1936
1937                 return 0;
1938         } else
1939                 return 1;
1940 }
1941
1942 static int cifs_prepare_write(struct file *file, struct page *page,
1943         unsigned from, unsigned to)
1944 {
1945         int rc = 0;
1946         loff_t i_size;
1947         loff_t offset;
1948
1949         cFYI(1, ("prepare write for page %p from %d to %d", page, from, to));
1950         if (PageUptodate(page))
1951                 return 0;
1952
1953         /* If we are writing a full page it will be up to date,
1954            no need to read from the server */
1955         if ((to == PAGE_CACHE_SIZE) && (from == 0)) {
1956                 SetPageUptodate(page);
1957                 return 0;
1958         }
1959
1960         offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1961         i_size = i_size_read(page->mapping->host);
1962
1963         if ((offset >= i_size) ||
1964             ((from == 0) && (offset + to) >= i_size)) {
1965                 /*
1966                  * We don't need to read data beyond the end of the file.
1967                  * zero it, and set the page uptodate
1968                  */
1969                 void *kaddr = kmap_atomic(page, KM_USER0);
1970
1971                 if (from)
1972                         memset(kaddr, 0, from);
1973                 if (to < PAGE_CACHE_SIZE)
1974                         memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
1975                 flush_dcache_page(page);
1976                 kunmap_atomic(kaddr, KM_USER0);
1977                 SetPageUptodate(page);
1978         } else if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
1979                 /* might as well read a page, it is fast enough */
1980                 rc = cifs_readpage_worker(file, page, &offset);
1981         } else {
1982                 /* we could try using another file handle if there is one -
1983                    but how would we lock it to prevent close of that handle
1984                    racing with this read? In any case
1985                    this will be written out by commit_write so is fine */
1986         }
1987
1988         /* we do not need to pass errors back
1989            e.g. if we do not have read access to the file
1990            because cifs_commit_write will do the right thing.  -- shaggy */
1991
1992         return 0;
1993 }
1994
1995 const struct address_space_operations cifs_addr_ops = {
1996         .readpage = cifs_readpage,
1997         .readpages = cifs_readpages,
1998         .writepage = cifs_writepage,
1999         .writepages = cifs_writepages,
2000         .prepare_write = cifs_prepare_write,
2001         .commit_write = cifs_commit_write,
2002         .set_page_dirty = __set_page_dirty_nobuffers,
2003         /* .sync_page = cifs_sync_page, */
2004         /* .direct_IO = */
2005 };
2006
2007 /*
2008  * cifs_readpages requires the server to support a buffer large enough to
2009  * contain the header plus one complete page of data.  Otherwise, we need
2010  * to leave cifs_readpages out of the address space operations.
2011  */
2012 const struct address_space_operations cifs_addr_ops_smallbuf = {
2013         .readpage = cifs_readpage,
2014         .writepage = cifs_writepage,
2015         .writepages = cifs_writepages,
2016         .prepare_write = cifs_prepare_write,
2017         .commit_write = cifs_commit_write,
2018         .set_page_dirty = __set_page_dirty_nobuffers,
2019         /* .sync_page = cifs_sync_page, */
2020         /* .direct_IO = */
2021 };