[CIFS] Remove unnecessary checks
[safe/jmp/linux-2.6] / fs / cifs / file.c
1 /*
2  *   fs/cifs/file.c
3  *
4  *   vfs operations that deal with files
5  * 
6  *   Copyright (C) International Business Machines  Corp., 2002,2003
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  *   This library is free software; you can redistribute it and/or modify
11  *   it under the terms of the GNU Lesser General Public License as published
12  *   by the Free Software Foundation; either version 2.1 of the License, or
13  *   (at your option) any later version.
14  *
15  *   This library is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
18  *   the GNU Lesser General Public License for more details.
19  *
20  *   You should have received a copy of the GNU Lesser General Public License
21  *   along with this library; if not, write to the Free Software
22  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23  */
24 #include <linux/fs.h>
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/smp_lock.h>
31 #include <linux/writeback.h>
32 #include <linux/task_io_accounting_ops.h>
33 #include <linux/delay.h>
34 #include <asm/div64.h>
35 #include "cifsfs.h"
36 #include "cifspdu.h"
37 #include "cifsglob.h"
38 #include "cifsproto.h"
39 #include "cifs_unicode.h"
40 #include "cifs_debug.h"
41 #include "cifs_fs_sb.h"
42
43 static inline struct cifsFileInfo *cifs_init_private(
44         struct cifsFileInfo *private_data, struct inode *inode,
45         struct file *file, __u16 netfid)
46 {
47         memset(private_data, 0, sizeof(struct cifsFileInfo));
48         private_data->netfid = netfid;
49         private_data->pid = current->tgid;      
50         init_MUTEX(&private_data->fh_sem);
51         init_MUTEX(&private_data->lock_sem);
52         INIT_LIST_HEAD(&private_data->llist);
53         private_data->pfile = file; /* needed for writepage */
54         private_data->pInode = inode;
55         private_data->invalidHandle = FALSE;
56         private_data->closePend = FALSE;
57         /* we have to track num writers to the inode, since writepages
58         does not tell us which handle the write is for so there can
59         be a close (overlapping with write) of the filehandle that
60         cifs_writepages chose to use */
61         atomic_set(&private_data->wrtPending,0); 
62
63         return private_data;
64 }
65
66 static inline int cifs_convert_flags(unsigned int flags)
67 {
68         if ((flags & O_ACCMODE) == O_RDONLY)
69                 return GENERIC_READ;
70         else if ((flags & O_ACCMODE) == O_WRONLY)
71                 return GENERIC_WRITE;
72         else if ((flags & O_ACCMODE) == O_RDWR) {
73                 /* GENERIC_ALL is too much permission to request
74                    can cause unnecessary access denied on create */
75                 /* return GENERIC_ALL; */
76                 return (GENERIC_READ | GENERIC_WRITE);
77         }
78
79         return 0x20197;
80 }
81
82 static inline int cifs_get_disposition(unsigned int flags)
83 {
84         if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
85                 return FILE_CREATE;
86         else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
87                 return FILE_OVERWRITE_IF;
88         else if ((flags & O_CREAT) == O_CREAT)
89                 return FILE_OPEN_IF;
90         else if ((flags & O_TRUNC) == O_TRUNC)
91                 return FILE_OVERWRITE;
92         else
93                 return FILE_OPEN;
94 }
95
96 /* all arguments to this function must be checked for validity in caller */
97 static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
98         struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
99         struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
100         char *full_path, int xid)
101 {
102         struct timespec temp;
103         int rc;
104
105         /* want handles we can use to read with first
106            in the list so we do not have to walk the
107            list to search for one in prepare_write */
108         if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
109                 list_add_tail(&pCifsFile->flist, 
110                               &pCifsInode->openFileList);
111         } else {
112                 list_add(&pCifsFile->flist,
113                          &pCifsInode->openFileList);
114         }
115         write_unlock(&GlobalSMBSeslock);
116         if (pCifsInode->clientCanCacheRead) {
117                 /* we have the inode open somewhere else
118                    no need to discard cache data */
119                 goto client_can_cache;
120         }
121
122         /* BB need same check in cifs_create too? */
123         /* if not oplocked, invalidate inode pages if mtime or file
124            size changed */
125         temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
126         if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
127                            (file->f_path.dentry->d_inode->i_size ==
128                             (loff_t)le64_to_cpu(buf->EndOfFile))) {
129                 cFYI(1, ("inode unchanged on server"));
130         } else {
131                 if (file->f_path.dentry->d_inode->i_mapping) {
132                 /* BB no need to lock inode until after invalidate
133                    since namei code should already have it locked? */
134                         filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
135                 }
136                 cFYI(1, ("invalidating remote inode since open detected it "
137                          "changed"));
138                 invalidate_remote_inode(file->f_path.dentry->d_inode);
139         }
140
141 client_can_cache:
142         if (pTcon->ses->capabilities & CAP_UNIX)
143                 rc = cifs_get_inode_info_unix(&file->f_path.dentry->d_inode,
144                         full_path, inode->i_sb, xid);
145         else
146                 rc = cifs_get_inode_info(&file->f_path.dentry->d_inode,
147                         full_path, buf, inode->i_sb, xid);
148
149         if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
150                 pCifsInode->clientCanCacheAll = TRUE;
151                 pCifsInode->clientCanCacheRead = TRUE;
152                 cFYI(1, ("Exclusive Oplock granted on inode %p",
153                          file->f_path.dentry->d_inode));
154         } else if ((*oplock & 0xF) == OPLOCK_READ)
155                 pCifsInode->clientCanCacheRead = TRUE;
156
157         return rc;
158 }
159
160 int cifs_open(struct inode *inode, struct file *file)
161 {
162         int rc = -EACCES;
163         int xid, oplock;
164         struct cifs_sb_info *cifs_sb;
165         struct cifsTconInfo *pTcon;
166         struct cifsFileInfo *pCifsFile;
167         struct cifsInodeInfo *pCifsInode;
168         struct list_head *tmp;
169         char *full_path = NULL;
170         int desiredAccess;
171         int disposition;
172         __u16 netfid;
173         FILE_ALL_INFO *buf = NULL;
174
175         xid = GetXid();
176
177         cifs_sb = CIFS_SB(inode->i_sb);
178         pTcon = cifs_sb->tcon;
179
180         if (file->f_flags & O_CREAT) {
181                 /* search inode for this file and fill in file->private_data */
182                 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
183                 read_lock(&GlobalSMBSeslock);
184                 list_for_each(tmp, &pCifsInode->openFileList) {
185                         pCifsFile = list_entry(tmp, struct cifsFileInfo,
186                                                flist);
187                         if ((pCifsFile->pfile == NULL) &&
188                             (pCifsFile->pid == current->tgid)) {
189                                 /* mode set in cifs_create */
190
191                                 /* needed for writepage */
192                                 pCifsFile->pfile = file;
193                                 
194                                 file->private_data = pCifsFile;
195                                 break;
196                         }
197                 }
198                 read_unlock(&GlobalSMBSeslock);
199                 if (file->private_data != NULL) {
200                         rc = 0;
201                         FreeXid(xid);
202                         return rc;
203                 } else {
204                         if (file->f_flags & O_EXCL)
205                                 cERROR(1, ("could not find file instance for "
206                                            "new file %p", file));
207                 }
208         }
209
210         full_path = build_path_from_dentry(file->f_path.dentry);
211         if (full_path == NULL) {
212                 FreeXid(xid);
213                 return -ENOMEM;
214         }
215
216         cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
217                  inode, file->f_flags, full_path));
218         desiredAccess = cifs_convert_flags(file->f_flags);
219
220 /*********************************************************************
221  *  open flag mapping table:
222  *  
223  *      POSIX Flag            CIFS Disposition
224  *      ----------            ---------------- 
225  *      O_CREAT               FILE_OPEN_IF
226  *      O_CREAT | O_EXCL      FILE_CREATE
227  *      O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
228  *      O_TRUNC               FILE_OVERWRITE
229  *      none of the above     FILE_OPEN
230  *
231  *      Note that there is not a direct match between disposition
232  *      FILE_SUPERSEDE (ie create whether or not file exists although 
233  *      O_CREAT | O_TRUNC is similar but truncates the existing
234  *      file rather than creating a new file as FILE_SUPERSEDE does
235  *      (which uses the attributes / metadata passed in on open call)
236  *?
237  *?  O_SYNC is a reasonable match to CIFS writethrough flag  
238  *?  and the read write flags match reasonably.  O_LARGEFILE
239  *?  is irrelevant because largefile support is always used
240  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
241  *       O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
242  *********************************************************************/
243
244         disposition = cifs_get_disposition(file->f_flags);
245
246         if (oplockEnabled)
247                 oplock = REQ_OPLOCK;
248         else
249                 oplock = FALSE;
250
251         /* BB pass O_SYNC flag through on file attributes .. BB */
252
253         /* Also refresh inode by passing in file_info buf returned by SMBOpen
254            and calling get_inode_info with returned buf (at least helps
255            non-Unix server case) */
256
257         /* BB we can not do this if this is the second open of a file 
258            and the first handle has writebehind data, we might be 
259            able to simply do a filemap_fdatawrite/filemap_fdatawait first */
260         buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
261         if (!buf) {
262                 rc = -ENOMEM;
263                 goto out;
264         }
265
266         if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
267                 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, 
268                          desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
269                          cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
270                                  & CIFS_MOUNT_MAP_SPECIAL_CHR);
271         else
272                 rc = -EIO; /* no NT SMB support fall into legacy open below */
273
274         if (rc == -EIO) {
275                 /* Old server, try legacy style OpenX */
276                 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
277                         desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
278                         cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
279                                 & CIFS_MOUNT_MAP_SPECIAL_CHR);
280         }
281         if (rc) {
282                 cFYI(1, ("cifs_open returned 0x%x", rc));
283                 goto out;
284         }
285         file->private_data =
286                 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
287         if (file->private_data == NULL) {
288                 rc = -ENOMEM;
289                 goto out;
290         }
291         pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
292         write_lock(&GlobalSMBSeslock);
293         list_add(&pCifsFile->tlist, &pTcon->openFileList);
294
295         pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
296         if (pCifsInode) {
297                 rc = cifs_open_inode_helper(inode, file, pCifsInode,
298                                             pCifsFile, pTcon,
299                                             &oplock, buf, full_path, xid);
300         } else {
301                 write_unlock(&GlobalSMBSeslock);
302         }
303
304         if (oplock & CIFS_CREATE_ACTION) {           
305                 /* time to set mode which we can not set earlier due to
306                    problems creating new read-only files */
307                 if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
308                         CIFSSMBUnixSetPerms(xid, pTcon, full_path,
309                                             inode->i_mode,
310                                             (__u64)-1, (__u64)-1, 0 /* dev */,
311                                             cifs_sb->local_nls,
312                                             cifs_sb->mnt_cifs_flags & 
313                                                 CIFS_MOUNT_MAP_SPECIAL_CHR);
314                 } else {
315                         /* BB implement via Windows security descriptors eg
316                            CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
317                                               -1, -1, local_nls);
318                            in the meantime could set r/o dos attribute when
319                            perms are eg: mode & 0222 == 0 */
320                 }
321         }
322
323 out:
324         kfree(buf);
325         kfree(full_path);
326         FreeXid(xid);
327         return rc;
328 }
329
330 /* Try to reacquire byte range locks that were released when session */
331 /* to server was lost */
332 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
333 {
334         int rc = 0;
335
336 /* BB list all locks open on this file and relock */
337
338         return rc;
339 }
340
341 static int cifs_reopen_file(struct inode *inode, struct file *file, 
342         int can_flush)
343 {
344         int rc = -EACCES;
345         int xid, oplock;
346         struct cifs_sb_info *cifs_sb;
347         struct cifsTconInfo *pTcon;
348         struct cifsFileInfo *pCifsFile;
349         struct cifsInodeInfo *pCifsInode;
350         char *full_path = NULL;
351         int desiredAccess;
352         int disposition = FILE_OPEN;
353         __u16 netfid;
354
355         if (inode == NULL)
356                 return -EBADF;
357         if (file->private_data) {
358                 pCifsFile = (struct cifsFileInfo *)file->private_data;
359         } else
360                 return -EBADF;
361
362         xid = GetXid();
363         down(&pCifsFile->fh_sem);
364         if (pCifsFile->invalidHandle == FALSE) {
365                 up(&pCifsFile->fh_sem);
366                 FreeXid(xid);
367                 return 0;
368         }
369
370         if (file->f_path.dentry == NULL) {
371                 up(&pCifsFile->fh_sem);
372                 cFYI(1, ("failed file reopen, no valid name if dentry freed"));
373                 FreeXid(xid);
374                 return -EBADF;
375         }
376         cifs_sb = CIFS_SB(inode->i_sb);
377         pTcon = cifs_sb->tcon;
378 /* can not grab rename sem here because various ops, including
379    those that already have the rename sem can end up causing writepage
380    to get called and if the server was down that means we end up here,
381    and we can never tell if the caller already has the rename_sem */
382         full_path = build_path_from_dentry(file->f_path.dentry);
383         if (full_path == NULL) {
384                 up(&pCifsFile->fh_sem);
385                 FreeXid(xid);
386                 return -ENOMEM;
387         }
388
389         cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
390                  inode, file->f_flags,full_path));
391         desiredAccess = cifs_convert_flags(file->f_flags);
392
393         if (oplockEnabled)
394                 oplock = REQ_OPLOCK;
395         else
396                 oplock = FALSE;
397
398         /* Can not refresh inode by passing in file_info buf to be returned
399            by SMBOpen and then calling get_inode_info with returned buf 
400            since file might have write behind data that needs to be flushed 
401            and server version of file size can be stale. If we knew for sure
402            that inode was not dirty locally we could do this */
403
404 /*      buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
405         if (buf == 0) {
406                 up(&pCifsFile->fh_sem);
407                 kfree(full_path);
408                 FreeXid(xid);
409                 return -ENOMEM;
410         } */
411         rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
412                          CREATE_NOT_DIR, &netfid, &oplock, NULL,
413                          cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & 
414                                 CIFS_MOUNT_MAP_SPECIAL_CHR);
415         if (rc) {
416                 up(&pCifsFile->fh_sem);
417                 cFYI(1, ("cifs_open returned 0x%x", rc));
418                 cFYI(1, ("oplock: %d", oplock));
419         } else {
420                 pCifsFile->netfid = netfid;
421                 pCifsFile->invalidHandle = FALSE;
422                 up(&pCifsFile->fh_sem);
423                 pCifsInode = CIFS_I(inode);
424                 if (pCifsInode) {
425                         if (can_flush) {
426                                 filemap_write_and_wait(inode->i_mapping);
427                         /* temporarily disable caching while we
428                            go to server to get inode info */
429                                 pCifsInode->clientCanCacheAll = FALSE;
430                                 pCifsInode->clientCanCacheRead = FALSE;
431                                 if (pTcon->ses->capabilities & CAP_UNIX)
432                                         rc = cifs_get_inode_info_unix(&inode,
433                                                 full_path, inode->i_sb, xid);
434                                 else
435                                         rc = cifs_get_inode_info(&inode,
436                                                 full_path, NULL, inode->i_sb,
437                                                 xid);
438                         } /* else we are writing out data to server already
439                              and could deadlock if we tried to flush data, and
440                              since we do not know if we have data that would
441                              invalidate the current end of file on the server
442                              we can not go to the server to get the new inod
443                              info */
444                         if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
445                                 pCifsInode->clientCanCacheAll = TRUE;
446                                 pCifsInode->clientCanCacheRead = TRUE;
447                                 cFYI(1, ("Exclusive Oplock granted on inode %p",
448                                          file->f_path.dentry->d_inode));
449                         } else if ((oplock & 0xF) == OPLOCK_READ) {
450                                 pCifsInode->clientCanCacheRead = TRUE;
451                                 pCifsInode->clientCanCacheAll = FALSE;
452                         } else {
453                                 pCifsInode->clientCanCacheRead = FALSE;
454                                 pCifsInode->clientCanCacheAll = FALSE;
455                         }
456                         cifs_relock_file(pCifsFile);
457                 }
458         }
459
460         kfree(full_path);
461         FreeXid(xid);
462         return rc;
463 }
464
465 int cifs_close(struct inode *inode, struct file *file)
466 {
467         int rc = 0;
468         int xid;
469         struct cifs_sb_info *cifs_sb;
470         struct cifsTconInfo *pTcon;
471         struct cifsFileInfo *pSMBFile =
472                 (struct cifsFileInfo *)file->private_data;
473
474         xid = GetXid();
475
476         cifs_sb = CIFS_SB(inode->i_sb);
477         pTcon = cifs_sb->tcon;
478         if (pSMBFile) {
479                 struct cifsLockInfo *li, *tmp;
480
481                 pSMBFile->closePend = TRUE;
482                 if (pTcon) {
483                         /* no sense reconnecting to close a file that is
484                            already closed */
485                         if (pTcon->tidStatus != CifsNeedReconnect) {
486                                 int timeout = 2;
487                                 while((atomic_read(&pSMBFile->wrtPending) != 0)
488                                          && (timeout < 1000) ) {
489                                         /* Give write a better chance to get to
490                                         server ahead of the close.  We do not
491                                         want to add a wait_q here as it would
492                                         increase the memory utilization as
493                                         the struct would be in each open file,
494                                         but this should give enough time to 
495                                         clear the socket */
496 #ifdef CONFIG_CIFS_DEBUG2
497                                         cFYI(1,("close delay, write pending"));
498 #endif /* DEBUG2 */
499                                         msleep(timeout);
500                                         timeout *= 4;
501                                 }
502                                 if(atomic_read(&pSMBFile->wrtPending))
503                                         cERROR(1,("close with pending writes"));
504                                 rc = CIFSSMBClose(xid, pTcon,
505                                                   pSMBFile->netfid);
506                         }
507                 }
508
509                 /* Delete any outstanding lock records.
510                    We'll lose them when the file is closed anyway. */
511                 down(&pSMBFile->lock_sem);
512                 list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
513                         list_del(&li->llist);
514                         kfree(li);
515                 }
516                 up(&pSMBFile->lock_sem);
517
518                 write_lock(&GlobalSMBSeslock);
519                 list_del(&pSMBFile->flist);
520                 list_del(&pSMBFile->tlist);
521                 write_unlock(&GlobalSMBSeslock);
522                 kfree(pSMBFile->search_resume_name);
523                 kfree(file->private_data);
524                 file->private_data = NULL;
525         } else
526                 rc = -EBADF;
527
528         if (list_empty(&(CIFS_I(inode)->openFileList))) {
529                 cFYI(1, ("closing last open instance for inode %p", inode));
530                 /* if the file is not open we do not know if we can cache info
531                    on this inode, much less write behind and read ahead */
532                 CIFS_I(inode)->clientCanCacheRead = FALSE;
533                 CIFS_I(inode)->clientCanCacheAll  = FALSE;
534         }
535         if ((rc ==0) && CIFS_I(inode)->write_behind_rc)
536                 rc = CIFS_I(inode)->write_behind_rc;
537         FreeXid(xid);
538         return rc;
539 }
540
541 int cifs_closedir(struct inode *inode, struct file *file)
542 {
543         int rc = 0;
544         int xid;
545         struct cifsFileInfo *pCFileStruct =
546             (struct cifsFileInfo *)file->private_data;
547         char *ptmp;
548
549         cFYI(1, ("Closedir inode = 0x%p", inode));
550
551         xid = GetXid();
552
553         if (pCFileStruct) {
554                 struct cifsTconInfo *pTcon;
555                 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
556
557                 pTcon = cifs_sb->tcon;
558
559                 cFYI(1, ("Freeing private data in close dir"));
560                 if ((pCFileStruct->srch_inf.endOfSearch == FALSE) &&
561                    (pCFileStruct->invalidHandle == FALSE)) {
562                         pCFileStruct->invalidHandle = TRUE;
563                         rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
564                         cFYI(1, ("Closing uncompleted readdir with rc %d",
565                                  rc));
566                         /* not much we can do if it fails anyway, ignore rc */
567                         rc = 0;
568                 }
569                 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
570                 if (ptmp) {
571                         cFYI(1, ("closedir free smb buf in srch struct"));
572                         pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
573                         if(pCFileStruct->srch_inf.smallBuf)
574                                 cifs_small_buf_release(ptmp);
575                         else
576                                 cifs_buf_release(ptmp);
577                 }
578                 ptmp = pCFileStruct->search_resume_name;
579                 if (ptmp) {
580                         cFYI(1, ("closedir free resume name"));
581                         pCFileStruct->search_resume_name = NULL;
582                         kfree(ptmp);
583                 }
584                 kfree(file->private_data);
585                 file->private_data = NULL;
586         }
587         /* BB can we lock the filestruct while this is going on? */
588         FreeXid(xid);
589         return rc;
590 }
591
592 static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
593                                 __u64 offset, __u8 lockType)
594 {
595         struct cifsLockInfo *li = kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
596         if (li == NULL)
597                 return -ENOMEM;
598         li->offset = offset;
599         li->length = len;
600         li->type = lockType;
601         down(&fid->lock_sem);
602         list_add(&li->llist, &fid->llist);
603         up(&fid->lock_sem);
604         return 0;
605 }
606
607 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
608 {
609         int rc, xid;
610         __u32 numLock = 0;
611         __u32 numUnlock = 0;
612         __u64 length;
613         int wait_flag = FALSE;
614         struct cifs_sb_info *cifs_sb;
615         struct cifsTconInfo *pTcon;
616         __u16 netfid;
617         __u8 lockType = LOCKING_ANDX_LARGE_FILES;
618         int posix_locking;
619
620         length = 1 + pfLock->fl_end - pfLock->fl_start;
621         rc = -EACCES;
622         xid = GetXid();
623
624         cFYI(1, ("Lock parm: 0x%x flockflags: "
625                  "0x%x flocktype: 0x%x start: %lld end: %lld",
626                 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
627                 pfLock->fl_end));
628
629         if (pfLock->fl_flags & FL_POSIX)
630                 cFYI(1, ("Posix"));
631         if (pfLock->fl_flags & FL_FLOCK)
632                 cFYI(1, ("Flock"));
633         if (pfLock->fl_flags & FL_SLEEP) {
634                 cFYI(1, ("Blocking lock"));
635                 wait_flag = TRUE;
636         }
637         if (pfLock->fl_flags & FL_ACCESS)
638                 cFYI(1, ("Process suspended by mandatory locking - "
639                          "not implemented yet"));
640         if (pfLock->fl_flags & FL_LEASE)
641                 cFYI(1, ("Lease on file - not implemented yet"));
642         if (pfLock->fl_flags & 
643             (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
644                 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
645
646         if (pfLock->fl_type == F_WRLCK) {
647                 cFYI(1, ("F_WRLCK "));
648                 numLock = 1;
649         } else if (pfLock->fl_type == F_UNLCK) {
650                 cFYI(1, ("F_UNLCK"));
651                 numUnlock = 1;
652                 /* Check if unlock includes more than
653                 one lock range */
654         } else if (pfLock->fl_type == F_RDLCK) {
655                 cFYI(1, ("F_RDLCK"));
656                 lockType |= LOCKING_ANDX_SHARED_LOCK;
657                 numLock = 1;
658         } else if (pfLock->fl_type == F_EXLCK) {
659                 cFYI(1, ("F_EXLCK"));
660                 numLock = 1;
661         } else if (pfLock->fl_type == F_SHLCK) {
662                 cFYI(1, ("F_SHLCK"));
663                 lockType |= LOCKING_ANDX_SHARED_LOCK;
664                 numLock = 1;
665         } else
666                 cFYI(1, ("Unknown type of lock"));
667
668         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
669         pTcon = cifs_sb->tcon;
670
671         if (file->private_data == NULL) {
672                 FreeXid(xid);
673                 return -EBADF;
674         }
675         netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
676
677         posix_locking = (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
678                         (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability));
679
680         /* BB add code here to normalize offset and length to
681         account for negative length which we can not accept over the
682         wire */
683         if (IS_GETLK(cmd)) {
684                 if(posix_locking) {
685                         int posix_lock_type;
686                         if(lockType & LOCKING_ANDX_SHARED_LOCK)
687                                 posix_lock_type = CIFS_RDLCK;
688                         else
689                                 posix_lock_type = CIFS_WRLCK;
690                         rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */,
691                                         length, pfLock,
692                                         posix_lock_type, wait_flag);
693                         FreeXid(xid);
694                         return rc;
695                 }
696
697                 /* BB we could chain these into one lock request BB */
698                 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
699                                  0, 1, lockType, 0 /* wait flag */ );
700                 if (rc == 0) {
701                         rc = CIFSSMBLock(xid, pTcon, netfid, length, 
702                                          pfLock->fl_start, 1 /* numUnlock */ ,
703                                          0 /* numLock */ , lockType,
704                                          0 /* wait flag */ );
705                         pfLock->fl_type = F_UNLCK;
706                         if (rc != 0)
707                                 cERROR(1, ("Error unlocking previously locked "
708                                            "range %d during test of lock", rc));
709                         rc = 0;
710
711                 } else {
712                         /* if rc == ERR_SHARING_VIOLATION ? */
713                         rc = 0; /* do not change lock type to unlock
714                                    since range in use */
715                 }
716
717                 FreeXid(xid);
718                 return rc;
719         }
720
721         if (!numLock && !numUnlock) {
722                 /* if no lock or unlock then nothing
723                 to do since we do not know what it is */
724                 FreeXid(xid);
725                 return -EOPNOTSUPP;
726         }
727
728         if (posix_locking) {
729                 int posix_lock_type;
730                 if(lockType & LOCKING_ANDX_SHARED_LOCK)
731                         posix_lock_type = CIFS_RDLCK;
732                 else
733                         posix_lock_type = CIFS_WRLCK;
734                 
735                 if(numUnlock == 1)
736                         posix_lock_type = CIFS_UNLCK;
737
738                 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
739                                       length, pfLock,
740                                       posix_lock_type, wait_flag);
741         } else {
742                 struct cifsFileInfo *fid = (struct cifsFileInfo *)file->private_data;
743
744                 if (numLock) {
745                         rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
746                                         0, numLock, lockType, wait_flag);
747
748                         if (rc == 0) {
749                                 /* For Windows locks we must store them. */
750                                 rc = store_file_lock(fid, length,
751                                                 pfLock->fl_start, lockType);
752                         }
753                 } else if (numUnlock) {
754                         /* For each stored lock that this unlock overlaps
755                            completely, unlock it. */
756                         int stored_rc = 0;
757                         struct cifsLockInfo *li, *tmp;
758
759                         rc = 0;
760                         down(&fid->lock_sem);
761                         list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
762                                 if (pfLock->fl_start <= li->offset &&
763                                                 length >= li->length) {
764                                         stored_rc = CIFSSMBLock(xid, pTcon, netfid,
765                                                         li->length, li->offset,
766                                                         1, 0, li->type, FALSE);
767                                         if (stored_rc)
768                                                 rc = stored_rc;
769
770                                         list_del(&li->llist);
771                                         kfree(li);
772                                 }
773                         }
774                         up(&fid->lock_sem);
775                 }
776         }
777
778         if (pfLock->fl_flags & FL_POSIX)
779                 posix_lock_file_wait(file, pfLock);
780         FreeXid(xid);
781         return rc;
782 }
783
784 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
785         size_t write_size, loff_t *poffset)
786 {
787         int rc = 0;
788         unsigned int bytes_written = 0;
789         unsigned int total_written;
790         struct cifs_sb_info *cifs_sb;
791         struct cifsTconInfo *pTcon;
792         int xid, long_op;
793         struct cifsFileInfo *open_file;
794
795         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
796
797         pTcon = cifs_sb->tcon;
798
799         /* cFYI(1,
800            (" write %d bytes to offset %lld of %s", write_size,
801            *poffset, file->f_path.dentry->d_name.name)); */
802
803         if (file->private_data == NULL)
804                 return -EBADF;
805         open_file = (struct cifsFileInfo *) file->private_data;
806         
807         xid = GetXid();
808
809         if (*poffset > file->f_path.dentry->d_inode->i_size)
810                 long_op = 2; /* writes past end of file can take a long time */
811         else
812                 long_op = 1;
813
814         for (total_written = 0; write_size > total_written;
815              total_written += bytes_written) {
816                 rc = -EAGAIN;
817                 while (rc == -EAGAIN) {
818                         if (file->private_data == NULL) {
819                                 /* file has been closed on us */
820                                 FreeXid(xid);
821                         /* if we have gotten here we have written some data
822                            and blocked, and the file has been freed on us while
823                            we blocked so return what we managed to write */
824                                 return total_written;
825                         } 
826                         if (open_file->closePend) {
827                                 FreeXid(xid);
828                                 if (total_written)
829                                         return total_written;
830                                 else
831                                         return -EBADF;
832                         }
833                         if (open_file->invalidHandle) {
834                                 if ((file->f_path.dentry == NULL) ||
835                                     (file->f_path.dentry->d_inode == NULL)) {
836                                         FreeXid(xid);
837                                         return total_written;
838                                 }
839                                 /* we could deadlock if we called
840                                    filemap_fdatawait from here so tell
841                                    reopen_file not to flush data to server
842                                    now */
843                                 rc = cifs_reopen_file(file->f_path.dentry->d_inode,
844                                         file, FALSE);
845                                 if (rc != 0)
846                                         break;
847                         }
848
849                         rc = CIFSSMBWrite(xid, pTcon,
850                                 open_file->netfid,
851                                 min_t(const int, cifs_sb->wsize,
852                                       write_size - total_written),
853                                 *poffset, &bytes_written,
854                                 NULL, write_data + total_written, long_op);
855                 }
856                 if (rc || (bytes_written == 0)) {
857                         if (total_written)
858                                 break;
859                         else {
860                                 FreeXid(xid);
861                                 return rc;
862                         }
863                 } else
864                         *poffset += bytes_written;
865                 long_op = FALSE; /* subsequent writes fast -
866                                     15 seconds is plenty */
867         }
868
869         cifs_stats_bytes_written(pTcon, total_written);
870
871         /* since the write may have blocked check these pointers again */
872         if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
873                 struct inode *inode = file->f_path.dentry->d_inode;
874 /* Do not update local mtime - server will set its actual value on write                
875  *              inode->i_ctime = inode->i_mtime = 
876  *                      current_fs_time(inode->i_sb);*/
877                 if (total_written > 0) {
878                         spin_lock(&inode->i_lock);
879                         if (*poffset > file->f_path.dentry->d_inode->i_size)
880                                 i_size_write(file->f_path.dentry->d_inode,
881                                         *poffset);
882                         spin_unlock(&inode->i_lock);
883                 }
884                 mark_inode_dirty_sync(file->f_path.dentry->d_inode);    
885         }
886         FreeXid(xid);
887         return total_written;
888 }
889
890 static ssize_t cifs_write(struct file *file, const char *write_data,
891         size_t write_size, loff_t *poffset)
892 {
893         int rc = 0;
894         unsigned int bytes_written = 0;
895         unsigned int total_written;
896         struct cifs_sb_info *cifs_sb;
897         struct cifsTconInfo *pTcon;
898         int xid, long_op;
899         struct cifsFileInfo *open_file;
900
901         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
902
903         pTcon = cifs_sb->tcon;
904
905         cFYI(1,("write %zd bytes to offset %lld of %s", write_size,
906            *poffset, file->f_path.dentry->d_name.name));
907
908         if (file->private_data == NULL)
909                 return -EBADF;
910         open_file = (struct cifsFileInfo *)file->private_data;
911         
912         xid = GetXid();
913
914         if (*poffset > file->f_path.dentry->d_inode->i_size)
915                 long_op = 2; /* writes past end of file can take a long time */
916         else
917                 long_op = 1;
918
919         for (total_written = 0; write_size > total_written;
920              total_written += bytes_written) {
921                 rc = -EAGAIN;
922                 while (rc == -EAGAIN) {
923                         if (file->private_data == NULL) {
924                                 /* file has been closed on us */
925                                 FreeXid(xid);
926                         /* if we have gotten here we have written some data
927                            and blocked, and the file has been freed on us
928                            while we blocked so return what we managed to 
929                            write */
930                                 return total_written;
931                         } 
932                         if (open_file->closePend) {
933                                 FreeXid(xid);
934                                 if (total_written)
935                                         return total_written;
936                                 else
937                                         return -EBADF;
938                         }
939                         if (open_file->invalidHandle) {
940                                 /* we could deadlock if we called
941                                    filemap_fdatawait from here so tell
942                                    reopen_file not to flush data to 
943                                    server now */
944                                 rc = cifs_reopen_file(file->f_path.dentry->d_inode,
945                                         file, FALSE);
946                                 if (rc != 0)
947                                         break;
948                         }
949                         if(experimEnabled || (pTcon->ses->server &&
950                                 ((pTcon->ses->server->secMode & 
951                                 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
952                                 == 0))) {
953                                 struct kvec iov[2];
954                                 unsigned int len;
955
956                                 len = min((size_t)cifs_sb->wsize,
957                                           write_size - total_written);
958                                 /* iov[0] is reserved for smb header */
959                                 iov[1].iov_base = (char *)write_data +
960                                                   total_written;
961                                 iov[1].iov_len = len;
962                                 rc = CIFSSMBWrite2(xid, pTcon,
963                                                 open_file->netfid, len,
964                                                 *poffset, &bytes_written,
965                                                 iov, 1, long_op);
966                         } else
967                                 rc = CIFSSMBWrite(xid, pTcon,
968                                          open_file->netfid,
969                                          min_t(const int, cifs_sb->wsize,
970                                                write_size - total_written),
971                                          *poffset, &bytes_written,
972                                          write_data + total_written,
973                                          NULL, long_op);
974                 }
975                 if (rc || (bytes_written == 0)) {
976                         if (total_written)
977                                 break;
978                         else {
979                                 FreeXid(xid);
980                                 return rc;
981                         }
982                 } else
983                         *poffset += bytes_written;
984                 long_op = FALSE; /* subsequent writes fast - 
985                                     15 seconds is plenty */
986         }
987
988         cifs_stats_bytes_written(pTcon, total_written);
989
990         /* since the write may have blocked check these pointers again */
991         if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
992 /*BB We could make this contingent on superblock ATIME flag too */
993 /*              file->f_path.dentry->d_inode->i_ctime =
994                 file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
995                 if (total_written > 0) {
996                         spin_lock(&file->f_path.dentry->d_inode->i_lock);
997                         if (*poffset > file->f_path.dentry->d_inode->i_size)
998                                 i_size_write(file->f_path.dentry->d_inode,
999                                              *poffset);
1000                         spin_unlock(&file->f_path.dentry->d_inode->i_lock);
1001                 }
1002                 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1003         }
1004         FreeXid(xid);
1005         return total_written;
1006 }
1007
1008 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
1009 {
1010         struct cifsFileInfo *open_file;
1011         int rc;
1012
1013         /* Having a null inode here (because mapping->host was set to zero by
1014         the VFS or MM) should not happen but we had reports of on oops (due to
1015         it being zero) during stress testcases so we need to check for it */
1016
1017         if(cifs_inode == NULL) {
1018                 cERROR(1,("Null inode passed to cifs_writeable_file"));
1019                 dump_stack();
1020                 return NULL;
1021         }
1022
1023         read_lock(&GlobalSMBSeslock);
1024         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1025                 if (open_file->closePend)
1026                         continue;
1027                 if (open_file->pfile &&
1028                     ((open_file->pfile->f_flags & O_RDWR) ||
1029                      (open_file->pfile->f_flags & O_WRONLY))) {
1030                         atomic_inc(&open_file->wrtPending);
1031                         read_unlock(&GlobalSMBSeslock);
1032                         if((open_file->invalidHandle) && 
1033                            (!open_file->closePend) /* BB fixme -since the second clause can not be true remove it BB */) {
1034                                 rc = cifs_reopen_file(&cifs_inode->vfs_inode, 
1035                                                       open_file->pfile, FALSE);
1036                                 /* if it fails, try another handle - might be */
1037                                 /* dangerous to hold up writepages with retry */
1038                                 if(rc) {
1039                                         cFYI(1,("failed on reopen file in wp"));
1040                                         read_lock(&GlobalSMBSeslock);
1041                                         /* can not use this handle, no write
1042                                         pending on this one after all */
1043                                         atomic_dec
1044                                              (&open_file->wrtPending);
1045                                         continue;
1046                                 }
1047                         }
1048                         return open_file;
1049                 }
1050         }
1051         read_unlock(&GlobalSMBSeslock);
1052         return NULL;
1053 }
1054
1055 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1056 {
1057         struct address_space *mapping = page->mapping;
1058         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1059         char *write_data;
1060         int rc = -EFAULT;
1061         int bytes_written = 0;
1062         struct cifs_sb_info *cifs_sb;
1063         struct cifsTconInfo *pTcon;
1064         struct inode *inode;
1065         struct cifsFileInfo *open_file;
1066
1067         if (!mapping || !mapping->host)
1068                 return -EFAULT;
1069
1070         inode = page->mapping->host;
1071         cifs_sb = CIFS_SB(inode->i_sb);
1072         pTcon = cifs_sb->tcon;
1073
1074         offset += (loff_t)from;
1075         write_data = kmap(page);
1076         write_data += from;
1077
1078         if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1079                 kunmap(page);
1080                 return -EIO;
1081         }
1082
1083         /* racing with truncate? */
1084         if (offset > mapping->host->i_size) {
1085                 kunmap(page);
1086                 return 0; /* don't care */
1087         }
1088
1089         /* check to make sure that we are not extending the file */
1090         if (mapping->host->i_size - offset < (loff_t)to)
1091                 to = (unsigned)(mapping->host->i_size - offset); 
1092
1093         open_file = find_writable_file(CIFS_I(mapping->host));
1094         if (open_file) {
1095                 bytes_written = cifs_write(open_file->pfile, write_data,
1096                                            to-from, &offset);
1097                 atomic_dec(&open_file->wrtPending);
1098                 /* Does mm or vfs already set times? */
1099                 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1100                 if ((bytes_written > 0) && (offset)) {
1101                         rc = 0;
1102                 } else if (bytes_written < 0) {
1103                         if (rc != -EBADF)
1104                                 rc = bytes_written;
1105                 }
1106         } else {
1107                 cFYI(1, ("No writeable filehandles for inode"));
1108                 rc = -EIO;
1109         }
1110
1111         kunmap(page);
1112         return rc;
1113 }
1114
1115 static int cifs_writepages(struct address_space *mapping,
1116                            struct writeback_control *wbc)
1117 {
1118         struct backing_dev_info *bdi = mapping->backing_dev_info;
1119         unsigned int bytes_to_write;
1120         unsigned int bytes_written;
1121         struct cifs_sb_info *cifs_sb;
1122         int done = 0;
1123         pgoff_t end;
1124         pgoff_t index;
1125         int range_whole = 0;
1126         struct kvec * iov;
1127         int len;
1128         int n_iov = 0;
1129         pgoff_t next;
1130         int nr_pages;
1131         __u64 offset = 0;
1132         struct cifsFileInfo *open_file;
1133         struct page *page;
1134         struct pagevec pvec;
1135         int rc = 0;
1136         int scanned = 0;
1137         int xid;
1138
1139         cifs_sb = CIFS_SB(mapping->host->i_sb);
1140         
1141         /*
1142          * If wsize is smaller that the page cache size, default to writing
1143          * one page at a time via cifs_writepage
1144          */
1145         if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1146                 return generic_writepages(mapping, wbc);
1147
1148         if((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1149                 if(cifs_sb->tcon->ses->server->secMode &
1150                           (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1151                         if(!experimEnabled) 
1152                                 return generic_writepages(mapping, wbc);
1153
1154         iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
1155         if(iov == NULL)
1156                 return generic_writepages(mapping, wbc);
1157
1158
1159         /*
1160          * BB: Is this meaningful for a non-block-device file system?
1161          * If it is, we should test it again after we do I/O
1162          */
1163         if (wbc->nonblocking && bdi_write_congested(bdi)) {
1164                 wbc->encountered_congestion = 1;
1165                 kfree(iov);
1166                 return 0;
1167         }
1168
1169         xid = GetXid();
1170
1171         pagevec_init(&pvec, 0);
1172         if (wbc->range_cyclic) {
1173                 index = mapping->writeback_index; /* Start from prev offset */
1174                 end = -1;
1175         } else {
1176                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1177                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1178                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1179                         range_whole = 1;
1180                 scanned = 1;
1181         }
1182 retry:
1183         while (!done && (index <= end) &&
1184                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1185                         PAGECACHE_TAG_DIRTY,
1186                         min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1187                 int first;
1188                 unsigned int i;
1189
1190                 first = -1;
1191                 next = 0;
1192                 n_iov = 0;
1193                 bytes_to_write = 0;
1194
1195                 for (i = 0; i < nr_pages; i++) {
1196                         page = pvec.pages[i];
1197                         /*
1198                          * At this point we hold neither mapping->tree_lock nor
1199                          * lock on the page itself: the page may be truncated or
1200                          * invalidated (changing page->mapping to NULL), or even
1201                          * swizzled back from swapper_space to tmpfs file
1202                          * mapping
1203                          */
1204
1205                         if (first < 0)
1206                                 lock_page(page);
1207                         else if (TestSetPageLocked(page))
1208                                 break;
1209
1210                         if (unlikely(page->mapping != mapping)) {
1211                                 unlock_page(page);
1212                                 break;
1213                         }
1214
1215                         if (!wbc->range_cyclic && page->index > end) {
1216                                 done = 1;
1217                                 unlock_page(page);
1218                                 break;
1219                         }
1220
1221                         if (next && (page->index != next)) {
1222                                 /* Not next consecutive page */
1223                                 unlock_page(page);
1224                                 break;
1225                         }
1226
1227                         if (wbc->sync_mode != WB_SYNC_NONE)
1228                                 wait_on_page_writeback(page);
1229
1230                         if (PageWriteback(page) ||
1231                                         !clear_page_dirty_for_io(page)) {
1232                                 unlock_page(page);
1233                                 break;
1234                         }
1235
1236                         /*
1237                          * This actually clears the dirty bit in the radix tree.
1238                          * See cifs_writepage() for more commentary.
1239                          */
1240                         set_page_writeback(page);
1241
1242                         if (page_offset(page) >= mapping->host->i_size) {
1243                                 done = 1;
1244                                 unlock_page(page);
1245                                 end_page_writeback(page);
1246                                 break;
1247                         }
1248
1249                         /*
1250                          * BB can we get rid of this?  pages are held by pvec
1251                          */
1252                         page_cache_get(page);
1253
1254                         len = min(mapping->host->i_size - page_offset(page),
1255                                   (loff_t)PAGE_CACHE_SIZE);
1256
1257                         /* reserve iov[0] for the smb header */
1258                         n_iov++;
1259                         iov[n_iov].iov_base = kmap(page);
1260                         iov[n_iov].iov_len = len;
1261                         bytes_to_write += len;
1262
1263                         if (first < 0) {
1264                                 first = i;
1265                                 offset = page_offset(page);
1266                         }
1267                         next = page->index + 1;
1268                         if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1269                                 break;
1270                 }
1271                 if (n_iov) {
1272                         /* Search for a writable handle every time we call
1273                          * CIFSSMBWrite2.  We can't rely on the last handle
1274                          * we used to still be valid
1275                          */
1276                         open_file = find_writable_file(CIFS_I(mapping->host));
1277                         if (!open_file) {
1278                                 cERROR(1, ("No writable handles for inode"));
1279                                 rc = -EBADF;
1280                         } else {
1281                                 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1282                                                    open_file->netfid,
1283                                                    bytes_to_write, offset,
1284                                                    &bytes_written, iov, n_iov,
1285                                                    1);
1286                                 atomic_dec(&open_file->wrtPending);
1287                                 if (rc || bytes_written < bytes_to_write) {
1288                                         cERROR(1,("Write2 ret %d, written = %d",
1289                                                   rc, bytes_written));
1290                                         /* BB what if continued retry is
1291                                            requested via mount flags? */
1292                                         set_bit(AS_EIO, &mapping->flags);
1293                                 } else {
1294                                         cifs_stats_bytes_written(cifs_sb->tcon,
1295                                                                  bytes_written);
1296                                 }
1297                         }
1298                         for (i = 0; i < n_iov; i++) {
1299                                 page = pvec.pages[first + i];
1300                                 /* Should we also set page error on
1301                                 success rc but too little data written? */
1302                                 /* BB investigate retry logic on temporary
1303                                 server crash cases and how recovery works
1304                                 when page marked as error */ 
1305                                 if(rc)
1306                                         SetPageError(page);
1307                                 kunmap(page);
1308                                 unlock_page(page);
1309                                 end_page_writeback(page);
1310                                 page_cache_release(page);
1311                         }
1312                         if ((wbc->nr_to_write -= n_iov) <= 0)
1313                                 done = 1;
1314                         index = next;
1315                 }
1316                 pagevec_release(&pvec);
1317         }
1318         if (!scanned && !done) {
1319                 /*
1320                  * We hit the last page and there is more work to be done: wrap
1321                  * back to the start of the file
1322                  */
1323                 scanned = 1;
1324                 index = 0;
1325                 goto retry;
1326         }
1327         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1328                 mapping->writeback_index = index;
1329
1330         FreeXid(xid);
1331         kfree(iov);
1332         return rc;
1333 }
1334
1335 static int cifs_writepage(struct page* page, struct writeback_control *wbc)
1336 {
1337         int rc = -EFAULT;
1338         int xid;
1339
1340         xid = GetXid();
1341 /* BB add check for wbc flags */
1342         page_cache_get(page);
1343         if (!PageUptodate(page)) {
1344                 cFYI(1, ("ppw - page not up to date"));
1345         }
1346
1347         /*
1348          * Set the "writeback" flag, and clear "dirty" in the radix tree.
1349          *
1350          * A writepage() implementation always needs to do either this,
1351          * or re-dirty the page with "redirty_page_for_writepage()" in
1352          * the case of a failure.
1353          *
1354          * Just unlocking the page will cause the radix tree tag-bits
1355          * to fail to update with the state of the page correctly.
1356          */
1357         set_page_writeback(page);               
1358         rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1359         SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1360         unlock_page(page);
1361         end_page_writeback(page);
1362         page_cache_release(page);
1363         FreeXid(xid);
1364         return rc;
1365 }
1366
1367 static int cifs_commit_write(struct file *file, struct page *page,
1368         unsigned offset, unsigned to)
1369 {
1370         int xid;
1371         int rc = 0;
1372         struct inode *inode = page->mapping->host;
1373         loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1374         char *page_data;
1375
1376         xid = GetXid();
1377         cFYI(1, ("commit write for page %p up to position %lld for %d", 
1378                  page, position, to));
1379         spin_lock(&inode->i_lock);
1380         if (position > inode->i_size) {
1381                 i_size_write(inode, position);
1382                 /* if (file->private_data == NULL) {
1383                         rc = -EBADF;
1384                 } else {
1385                         open_file = (struct cifsFileInfo *)file->private_data;
1386                         cifs_sb = CIFS_SB(inode->i_sb);
1387                         rc = -EAGAIN;
1388                         while (rc == -EAGAIN) {
1389                                 if ((open_file->invalidHandle) && 
1390                                     (!open_file->closePend)) {
1391                                         rc = cifs_reopen_file(
1392                                                 file->f_path.dentry->d_inode, file);
1393                                         if (rc != 0)
1394                                                 break;
1395                                 }
1396                                 if (!open_file->closePend) {
1397                                         rc = CIFSSMBSetFileSize(xid,
1398                                                 cifs_sb->tcon, position,
1399                                                 open_file->netfid,
1400                                                 open_file->pid, FALSE);
1401                                 } else {
1402                                         rc = -EBADF;
1403                                         break;
1404                                 }
1405                         }
1406                         cFYI(1, (" SetEOF (commit write) rc = %d", rc));
1407                 } */
1408         }
1409         spin_unlock(&inode->i_lock);
1410         if (!PageUptodate(page)) {
1411                 position =  ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
1412                 /* can not rely on (or let) writepage write this data */
1413                 if (to < offset) {
1414                         cFYI(1, ("Illegal offsets, can not copy from %d to %d",
1415                                 offset, to));
1416                         FreeXid(xid);
1417                         return rc;
1418                 }
1419                 /* this is probably better than directly calling
1420                    partialpage_write since in this function the file handle is
1421                    known which we might as well leverage */
1422                 /* BB check if anything else missing out of ppw
1423                    such as updating last write time */
1424                 page_data = kmap(page);
1425                 rc = cifs_write(file, page_data + offset, to-offset,
1426                                 &position);
1427                 if (rc > 0)
1428                         rc = 0;
1429                 /* else if (rc < 0) should we set writebehind rc? */
1430                 kunmap(page);
1431         } else {        
1432                 set_page_dirty(page);
1433         }
1434
1435         FreeXid(xid);
1436         return rc;
1437 }
1438
1439 int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1440 {
1441         int xid;
1442         int rc = 0;
1443         struct inode *inode = file->f_path.dentry->d_inode;
1444
1445         xid = GetXid();
1446
1447         cFYI(1, ("Sync file - name: %s datasync: 0x%x", 
1448                 dentry->d_name.name, datasync));
1449         
1450         rc = filemap_fdatawrite(inode->i_mapping);
1451         if (rc == 0)
1452                 CIFS_I(inode)->write_behind_rc = 0;
1453         FreeXid(xid);
1454         return rc;
1455 }
1456
1457 /* static void cifs_sync_page(struct page *page)
1458 {
1459         struct address_space *mapping;
1460         struct inode *inode;
1461         unsigned long index = page->index;
1462         unsigned int rpages = 0;
1463         int rc = 0;
1464
1465         cFYI(1, ("sync page %p",page));
1466         mapping = page->mapping;
1467         if (!mapping)
1468                 return 0;
1469         inode = mapping->host;
1470         if (!inode)
1471                 return; */
1472
1473 /*      fill in rpages then 
1474         result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1475
1476 /*      cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
1477
1478 #if 0
1479         if (rc < 0)
1480                 return rc;
1481         return 0;
1482 #endif
1483 } */
1484
1485 /*
1486  * As file closes, flush all cached write data for this inode checking
1487  * for write behind errors.
1488  */
1489 int cifs_flush(struct file *file, fl_owner_t id)
1490 {
1491         struct inode * inode = file->f_path.dentry->d_inode;
1492         int rc = 0;
1493
1494         /* Rather than do the steps manually:
1495            lock the inode for writing
1496            loop through pages looking for write behind data (dirty pages)
1497            coalesce into contiguous 16K (or smaller) chunks to write to server
1498            send to server (prefer in parallel)
1499            deal with writebehind errors
1500            unlock inode for writing
1501            filemapfdatawrite appears easier for the time being */
1502
1503         rc = filemap_fdatawrite(inode->i_mapping);
1504         if (!rc) /* reset wb rc if we were able to write out dirty pages */
1505                 CIFS_I(inode)->write_behind_rc = 0;
1506                 
1507         cFYI(1, ("Flush inode %p file %p rc %d",inode,file,rc));
1508
1509         return rc;
1510 }
1511
1512 ssize_t cifs_user_read(struct file *file, char __user *read_data,
1513         size_t read_size, loff_t *poffset)
1514 {
1515         int rc = -EACCES;
1516         unsigned int bytes_read = 0;
1517         unsigned int total_read = 0;
1518         unsigned int current_read_size;
1519         struct cifs_sb_info *cifs_sb;
1520         struct cifsTconInfo *pTcon;
1521         int xid;
1522         struct cifsFileInfo *open_file;
1523         char *smb_read_data;
1524         char __user *current_offset;
1525         struct smb_com_read_rsp *pSMBr;
1526
1527         xid = GetXid();
1528         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1529         pTcon = cifs_sb->tcon;
1530
1531         if (file->private_data == NULL) {
1532                 FreeXid(xid);
1533                 return -EBADF;
1534         }
1535         open_file = (struct cifsFileInfo *)file->private_data;
1536
1537         if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
1538                 cFYI(1, ("attempting read on write only file instance"));
1539         }
1540         for (total_read = 0, current_offset = read_data;
1541              read_size > total_read;
1542              total_read += bytes_read, current_offset += bytes_read) {
1543                 current_read_size = min_t(const int, read_size - total_read, 
1544                                           cifs_sb->rsize);
1545                 rc = -EAGAIN;
1546                 smb_read_data = NULL;
1547                 while (rc == -EAGAIN) {
1548                         int buf_type = CIFS_NO_BUFFER;
1549                         if ((open_file->invalidHandle) && 
1550                             (!open_file->closePend)) {
1551                                 rc = cifs_reopen_file(file->f_path.dentry->d_inode,
1552                                         file, TRUE);
1553                                 if (rc != 0)
1554                                         break;
1555                         }
1556                         rc = CIFSSMBRead(xid, pTcon,
1557                                          open_file->netfid,
1558                                          current_read_size, *poffset,
1559                                          &bytes_read, &smb_read_data,
1560                                          &buf_type);
1561                         pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1562                         if (smb_read_data) {
1563                                 if (copy_to_user(current_offset,
1564                                                 smb_read_data +
1565                                                 4 /* RFC1001 length field */ +
1566                                                 le16_to_cpu(pSMBr->DataOffset),
1567                                                 bytes_read)) {
1568                                         rc = -EFAULT;
1569                                 }
1570
1571                                 if(buf_type == CIFS_SMALL_BUFFER)
1572                                         cifs_small_buf_release(smb_read_data);
1573                                 else if(buf_type == CIFS_LARGE_BUFFER)
1574                                         cifs_buf_release(smb_read_data);
1575                                 smb_read_data = NULL;
1576                         }
1577                 }
1578                 if (rc || (bytes_read == 0)) {
1579                         if (total_read) {
1580                                 break;
1581                         } else {
1582                                 FreeXid(xid);
1583                                 return rc;
1584                         }
1585                 } else {
1586                         cifs_stats_bytes_read(pTcon, bytes_read);
1587                         *poffset += bytes_read;
1588                 }
1589         }
1590         FreeXid(xid);
1591         return total_read;
1592 }
1593
1594
1595 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1596         loff_t *poffset)
1597 {
1598         int rc = -EACCES;
1599         unsigned int bytes_read = 0;
1600         unsigned int total_read;
1601         unsigned int current_read_size;
1602         struct cifs_sb_info *cifs_sb;
1603         struct cifsTconInfo *pTcon;
1604         int xid;
1605         char *current_offset;
1606         struct cifsFileInfo *open_file;
1607         int buf_type = CIFS_NO_BUFFER;
1608
1609         xid = GetXid();
1610         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1611         pTcon = cifs_sb->tcon;
1612
1613         if (file->private_data == NULL) {
1614                 FreeXid(xid);
1615                 return -EBADF;
1616         }
1617         open_file = (struct cifsFileInfo *)file->private_data;
1618
1619         if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1620                 cFYI(1, ("attempting read on write only file instance"));
1621
1622         for (total_read = 0, current_offset = read_data; 
1623              read_size > total_read;
1624              total_read += bytes_read, current_offset += bytes_read) {
1625                 current_read_size = min_t(const int, read_size - total_read,
1626                                           cifs_sb->rsize);
1627                 /* For windows me and 9x we do not want to request more
1628                 than it negotiated since it will refuse the read then */
1629                 if((pTcon->ses) && 
1630                         !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1631                         current_read_size = min_t(const int, current_read_size,
1632                                         pTcon->ses->server->maxBuf - 128);
1633                 }
1634                 rc = -EAGAIN;
1635                 while (rc == -EAGAIN) {
1636                         if ((open_file->invalidHandle) && 
1637                             (!open_file->closePend)) {
1638                                 rc = cifs_reopen_file(file->f_path.dentry->d_inode,
1639                                         file, TRUE);
1640                                 if (rc != 0)
1641                                         break;
1642                         }
1643                         rc = CIFSSMBRead(xid, pTcon,
1644                                          open_file->netfid,
1645                                          current_read_size, *poffset,
1646                                          &bytes_read, &current_offset,
1647                                          &buf_type);
1648                 }
1649                 if (rc || (bytes_read == 0)) {
1650                         if (total_read) {
1651                                 break;
1652                         } else {
1653                                 FreeXid(xid);
1654                                 return rc;
1655                         }
1656                 } else {
1657                         cifs_stats_bytes_read(pTcon, total_read);
1658                         *poffset += bytes_read;
1659                 }
1660         }
1661         FreeXid(xid);
1662         return total_read;
1663 }
1664
1665 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1666 {
1667         struct dentry *dentry = file->f_path.dentry;
1668         int rc, xid;
1669
1670         xid = GetXid();
1671         rc = cifs_revalidate(dentry);
1672         if (rc) {
1673                 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1674                 FreeXid(xid);
1675                 return rc;
1676         }
1677         rc = generic_file_mmap(file, vma);
1678         FreeXid(xid);
1679         return rc;
1680 }
1681
1682
1683 static void cifs_copy_cache_pages(struct address_space *mapping, 
1684         struct list_head *pages, int bytes_read, char *data,
1685         struct pagevec *plru_pvec)
1686 {
1687         struct page *page;
1688         char *target;
1689
1690         while (bytes_read > 0) {
1691                 if (list_empty(pages))
1692                         break;
1693
1694                 page = list_entry(pages->prev, struct page, lru);
1695                 list_del(&page->lru);
1696
1697                 if (add_to_page_cache(page, mapping, page->index,
1698                                       GFP_KERNEL)) {
1699                         page_cache_release(page);
1700                         cFYI(1, ("Add page cache failed"));
1701                         data += PAGE_CACHE_SIZE;
1702                         bytes_read -= PAGE_CACHE_SIZE;
1703                         continue;
1704                 }
1705
1706                 target = kmap_atomic(page,KM_USER0);
1707
1708                 if (PAGE_CACHE_SIZE > bytes_read) {
1709                         memcpy(target, data, bytes_read);
1710                         /* zero the tail end of this partial page */
1711                         memset(target + bytes_read, 0, 
1712                                PAGE_CACHE_SIZE - bytes_read);
1713                         bytes_read = 0;
1714                 } else {
1715                         memcpy(target, data, PAGE_CACHE_SIZE);
1716                         bytes_read -= PAGE_CACHE_SIZE;
1717                 }
1718                 kunmap_atomic(target, KM_USER0);
1719
1720                 flush_dcache_page(page);
1721                 SetPageUptodate(page);
1722                 unlock_page(page);
1723                 if (!pagevec_add(plru_pvec, page))
1724                         __pagevec_lru_add(plru_pvec);
1725                 data += PAGE_CACHE_SIZE;
1726         }
1727         return;
1728 }
1729
1730 static int cifs_readpages(struct file *file, struct address_space *mapping,
1731         struct list_head *page_list, unsigned num_pages)
1732 {
1733         int rc = -EACCES;
1734         int xid;
1735         loff_t offset;
1736         struct page *page;
1737         struct cifs_sb_info *cifs_sb;
1738         struct cifsTconInfo *pTcon;
1739         int bytes_read = 0;
1740         unsigned int read_size,i;
1741         char *smb_read_data = NULL;
1742         struct smb_com_read_rsp *pSMBr;
1743         struct pagevec lru_pvec;
1744         struct cifsFileInfo *open_file;
1745         int buf_type = CIFS_NO_BUFFER;
1746
1747         xid = GetXid();
1748         if (file->private_data == NULL) {
1749                 FreeXid(xid);
1750                 return -EBADF;
1751         }
1752         open_file = (struct cifsFileInfo *)file->private_data;
1753         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1754         pTcon = cifs_sb->tcon;
1755
1756         pagevec_init(&lru_pvec, 0);
1757
1758         for (i = 0; i < num_pages; ) {
1759                 unsigned contig_pages;
1760                 struct page *tmp_page;
1761                 unsigned long expected_index;
1762
1763                 if (list_empty(page_list))
1764                         break;
1765
1766                 page = list_entry(page_list->prev, struct page, lru);
1767                 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1768
1769                 /* count adjacent pages that we will read into */
1770                 contig_pages = 0;
1771                 expected_index = 
1772                         list_entry(page_list->prev, struct page, lru)->index;
1773                 list_for_each_entry_reverse(tmp_page,page_list,lru) {
1774                         if (tmp_page->index == expected_index) {
1775                                 contig_pages++;
1776                                 expected_index++;
1777                         } else
1778                                 break; 
1779                 }
1780                 if (contig_pages + i >  num_pages)
1781                         contig_pages = num_pages - i;
1782
1783                 /* for reads over a certain size could initiate async
1784                    read ahead */
1785
1786                 read_size = contig_pages * PAGE_CACHE_SIZE;
1787                 /* Read size needs to be in multiples of one page */
1788                 read_size = min_t(const unsigned int, read_size,
1789                                   cifs_sb->rsize & PAGE_CACHE_MASK);
1790
1791                 rc = -EAGAIN;
1792                 while (rc == -EAGAIN) {
1793                         if ((open_file->invalidHandle) && 
1794                             (!open_file->closePend)) {
1795                                 rc = cifs_reopen_file(file->f_path.dentry->d_inode,
1796                                         file, TRUE);
1797                                 if (rc != 0)
1798                                         break;
1799                         }
1800
1801                         rc = CIFSSMBRead(xid, pTcon,
1802                                          open_file->netfid,
1803                                          read_size, offset,
1804                                          &bytes_read, &smb_read_data,
1805                                          &buf_type);
1806                         /* BB more RC checks ? */
1807                         if (rc== -EAGAIN) {
1808                                 if (smb_read_data) {
1809                                         if(buf_type == CIFS_SMALL_BUFFER)
1810                                                 cifs_small_buf_release(smb_read_data);
1811                                         else if(buf_type == CIFS_LARGE_BUFFER)
1812                                                 cifs_buf_release(smb_read_data);
1813                                         smb_read_data = NULL;
1814                                 }
1815                         }
1816                 }
1817                 if ((rc < 0) || (smb_read_data == NULL)) {
1818                         cFYI(1, ("Read error in readpages: %d", rc));
1819                         break;
1820                 } else if (bytes_read > 0) {
1821                         task_io_account_read(bytes_read);
1822                         pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1823                         cifs_copy_cache_pages(mapping, page_list, bytes_read,
1824                                 smb_read_data + 4 /* RFC1001 hdr */ +
1825                                 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1826
1827                         i +=  bytes_read >> PAGE_CACHE_SHIFT;
1828                         cifs_stats_bytes_read(pTcon, bytes_read);
1829                         if ((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1830                                 i++; /* account for partial page */
1831
1832                                 /* server copy of file can have smaller size 
1833                                    than client */
1834                                 /* BB do we need to verify this common case ? 
1835                                    this case is ok - if we are at server EOF 
1836                                    we will hit it on next read */
1837
1838                                 /* break; */
1839                         }
1840                 } else {
1841                         cFYI(1, ("No bytes read (%d) at offset %lld . "
1842                                  "Cleaning remaining pages from readahead list",
1843                                  bytes_read, offset));
1844                         /* BB turn off caching and do new lookup on 
1845                            file size at server? */
1846                         break;
1847                 }
1848                 if (smb_read_data) {
1849                         if(buf_type == CIFS_SMALL_BUFFER)
1850                                 cifs_small_buf_release(smb_read_data);
1851                         else if(buf_type == CIFS_LARGE_BUFFER)
1852                                 cifs_buf_release(smb_read_data);
1853                         smb_read_data = NULL;
1854                 }
1855                 bytes_read = 0;
1856         }
1857
1858         pagevec_lru_add(&lru_pvec);
1859
1860 /* need to free smb_read_data buf before exit */
1861         if (smb_read_data) {
1862                 if(buf_type == CIFS_SMALL_BUFFER)
1863                         cifs_small_buf_release(smb_read_data);
1864                 else if(buf_type == CIFS_LARGE_BUFFER)
1865                         cifs_buf_release(smb_read_data);
1866                 smb_read_data = NULL;
1867         } 
1868
1869         FreeXid(xid);
1870         return rc;
1871 }
1872
1873 static int cifs_readpage_worker(struct file *file, struct page *page,
1874         loff_t *poffset)
1875 {
1876         char *read_data;
1877         int rc;
1878
1879         page_cache_get(page);
1880         read_data = kmap(page);
1881         /* for reads over a certain size could initiate async read ahead */
1882                                                                                                                            
1883         rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
1884                                                                                                                            
1885         if (rc < 0)
1886                 goto io_error;
1887         else
1888                 cFYI(1, ("Bytes read %d",rc));
1889                                                                                                                            
1890         file->f_path.dentry->d_inode->i_atime =
1891                 current_fs_time(file->f_path.dentry->d_inode->i_sb);
1892                                                                                                                            
1893         if (PAGE_CACHE_SIZE > rc)
1894                 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1895
1896         flush_dcache_page(page);
1897         SetPageUptodate(page);
1898         rc = 0;
1899                                                                                                                            
1900 io_error:
1901         kunmap(page);
1902         page_cache_release(page);
1903         return rc;
1904 }
1905
1906 static int cifs_readpage(struct file *file, struct page *page)
1907 {
1908         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1909         int rc = -EACCES;
1910         int xid;
1911
1912         xid = GetXid();
1913
1914         if (file->private_data == NULL) {
1915                 FreeXid(xid);
1916                 return -EBADF;
1917         }
1918
1919         cFYI(1, ("readpage %p at offset %d 0x%x\n", 
1920                  page, (int)offset, (int)offset));
1921
1922         rc = cifs_readpage_worker(file, page, &offset);
1923
1924         unlock_page(page);
1925
1926         FreeXid(xid);
1927         return rc;
1928 }
1929
1930 /* We do not want to update the file size from server for inodes
1931    open for write - to avoid races with writepage extending
1932    the file - in the future we could consider allowing
1933    refreshing the inode only on increases in the file size 
1934    but this is tricky to do without racing with writebehind
1935    page caching in the current Linux kernel design */
1936 int is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1937 {
1938         struct cifsFileInfo *open_file = NULL;
1939
1940         if (cifsInode)
1941                 open_file =  find_writable_file(cifsInode);
1942  
1943         if(open_file) {
1944                 struct cifs_sb_info *cifs_sb;
1945
1946                 /* there is not actually a write pending so let
1947                 this handle go free and allow it to
1948                 be closable if needed */
1949                 atomic_dec(&open_file->wrtPending);
1950
1951                 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
1952                 if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) {
1953                         /* since no page cache to corrupt on directio 
1954                         we can change size safely */
1955                         return 1;
1956                 }
1957
1958                 if(i_size_read(&cifsInode->vfs_inode) < end_of_file)
1959                         return 1;
1960
1961                 return 0;
1962         } else
1963                 return 1;
1964 }
1965
1966 static int cifs_prepare_write(struct file *file, struct page *page,
1967         unsigned from, unsigned to)
1968 {
1969         int rc = 0;
1970         loff_t i_size;
1971         loff_t offset;
1972
1973         cFYI(1, ("prepare write for page %p from %d to %d",page,from,to));
1974         if (PageUptodate(page))
1975                 return 0;
1976
1977         /* If we are writing a full page it will be up to date,
1978            no need to read from the server */
1979         if ((to == PAGE_CACHE_SIZE) && (from == 0)) {
1980                 SetPageUptodate(page);
1981                 return 0;
1982         }
1983
1984         offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1985         i_size = i_size_read(page->mapping->host);
1986
1987         if ((offset >= i_size) ||
1988             ((from == 0) && (offset + to) >= i_size)) {
1989                 /*
1990                  * We don't need to read data beyond the end of the file.
1991                  * zero it, and set the page uptodate
1992                  */
1993                 void *kaddr = kmap_atomic(page, KM_USER0);
1994
1995                 if (from)
1996                         memset(kaddr, 0, from);
1997                 if (to < PAGE_CACHE_SIZE)
1998                         memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
1999                 flush_dcache_page(page);
2000                 kunmap_atomic(kaddr, KM_USER0);
2001                 SetPageUptodate(page);
2002         } else if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2003                 /* might as well read a page, it is fast enough */
2004                 rc = cifs_readpage_worker(file, page, &offset);
2005         } else {
2006                 /* we could try using another file handle if there is one -
2007                    but how would we lock it to prevent close of that handle
2008                    racing with this read? In any case
2009                    this will be written out by commit_write so is fine */
2010         }
2011
2012         /* we do not need to pass errors back 
2013            e.g. if we do not have read access to the file 
2014            because cifs_commit_write will do the right thing.  -- shaggy */
2015
2016         return 0;
2017 }
2018
2019 const struct address_space_operations cifs_addr_ops = {
2020         .readpage = cifs_readpage,
2021         .readpages = cifs_readpages,
2022         .writepage = cifs_writepage,
2023         .writepages = cifs_writepages,
2024         .prepare_write = cifs_prepare_write,
2025         .commit_write = cifs_commit_write,
2026         .set_page_dirty = __set_page_dirty_nobuffers,
2027         /* .sync_page = cifs_sync_page, */
2028         /* .direct_IO = */
2029 };
2030
2031 /*
2032  * cifs_readpages requires the server to support a buffer large enough to
2033  * contain the header plus one complete page of data.  Otherwise, we need
2034  * to leave cifs_readpages out of the address space operations.
2035  */
2036 const struct address_space_operations cifs_addr_ops_smallbuf = {
2037         .readpage = cifs_readpage,
2038         .writepage = cifs_writepage,
2039         .writepages = cifs_writepages,
2040         .prepare_write = cifs_prepare_write,
2041         .commit_write = cifs_commit_write,
2042         .set_page_dirty = __set_page_dirty_nobuffers,
2043         /* .sync_page = cifs_sync_page, */
2044         /* .direct_IO = */
2045 };