blob: 9c7755053099534e865fa3c179f525237fc9945d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
5 *
6 * Copyright (C) International Business Machines Corp., 2002,2003
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 *
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23#include <linux/fs.h>
24#include <linux/stat.h>
25#include <linux/fcntl.h>
26#include <linux/pagemap.h>
27#include <linux/pagevec.h>
28#include <linux/smp_lock.h>
29#include <asm/div64.h>
30#include "cifsfs.h"
31#include "cifspdu.h"
32#include "cifsglob.h"
33#include "cifsproto.h"
34#include "cifs_unicode.h"
35#include "cifs_debug.h"
36#include "cifs_fs_sb.h"
37
38static inline struct cifsFileInfo *cifs_init_private(
39 struct cifsFileInfo *private_data, struct inode *inode,
40 struct file *file, __u16 netfid)
41{
42 memset(private_data, 0, sizeof(struct cifsFileInfo));
43 private_data->netfid = netfid;
44 private_data->pid = current->tgid;
45 init_MUTEX(&private_data->fh_sem);
46 private_data->pfile = file; /* needed for writepage */
47 private_data->pInode = inode;
48 private_data->invalidHandle = FALSE;
49 private_data->closePend = FALSE;
50
51 return private_data;
52}
53
54static inline int cifs_convert_flags(unsigned int flags)
55{
56 if ((flags & O_ACCMODE) == O_RDONLY)
57 return GENERIC_READ;
58 else if ((flags & O_ACCMODE) == O_WRONLY)
59 return GENERIC_WRITE;
60 else if ((flags & O_ACCMODE) == O_RDWR) {
61 /* GENERIC_ALL is too much permission to request
62 can cause unnecessary access denied on create */
63 /* return GENERIC_ALL; */
64 return (GENERIC_READ | GENERIC_WRITE);
65 }
66
67 return 0x20197;
68}
69
70static inline int cifs_get_disposition(unsigned int flags)
71{
72 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
73 return FILE_CREATE;
74 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
75 return FILE_OVERWRITE_IF;
76 else if ((flags & O_CREAT) == O_CREAT)
77 return FILE_OPEN_IF;
78 else
79 return FILE_OPEN;
80}
81
82/* all arguments to this function must be checked for validity in caller */
83static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
84 struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
85 struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
86 char *full_path, int xid)
87{
88 struct timespec temp;
89 int rc;
90
91 /* want handles we can use to read with first
92 in the list so we do not have to walk the
93 list to search for one in prepare_write */
94 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
95 list_add_tail(&pCifsFile->flist,
96 &pCifsInode->openFileList);
97 } else {
98 list_add(&pCifsFile->flist,
99 &pCifsInode->openFileList);
100 }
101 write_unlock(&GlobalSMBSeslock);
102 write_unlock(&file->f_owner.lock);
103 if (pCifsInode->clientCanCacheRead) {
104 /* we have the inode open somewhere else
105 no need to discard cache data */
106 goto client_can_cache;
107 }
108
109 /* BB need same check in cifs_create too? */
110 /* if not oplocked, invalidate inode pages if mtime or file
111 size changed */
112 temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
113 if (timespec_equal(&file->f_dentry->d_inode->i_mtime, &temp) &&
114 (file->f_dentry->d_inode->i_size ==
115 (loff_t)le64_to_cpu(buf->EndOfFile))) {
116 cFYI(1, ("inode unchanged on server"));
117 } else {
118 if (file->f_dentry->d_inode->i_mapping) {
119 /* BB no need to lock inode until after invalidate
120 since namei code should already have it locked? */
121 filemap_fdatawrite(file->f_dentry->d_inode->i_mapping);
122 filemap_fdatawait(file->f_dentry->d_inode->i_mapping);
123 }
124 cFYI(1, ("invalidating remote inode since open detected it "
125 "changed"));
126 invalidate_remote_inode(file->f_dentry->d_inode);
127 }
128
129client_can_cache:
130 if (pTcon->ses->capabilities & CAP_UNIX)
131 rc = cifs_get_inode_info_unix(&file->f_dentry->d_inode,
132 full_path, inode->i_sb, xid);
133 else
134 rc = cifs_get_inode_info(&file->f_dentry->d_inode,
135 full_path, buf, inode->i_sb, xid);
136
137 if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
138 pCifsInode->clientCanCacheAll = TRUE;
139 pCifsInode->clientCanCacheRead = TRUE;
140 cFYI(1, ("Exclusive Oplock granted on inode %p",
141 file->f_dentry->d_inode));
142 } else if ((*oplock & 0xF) == OPLOCK_READ)
143 pCifsInode->clientCanCacheRead = TRUE;
144
145 return rc;
146}
147
148int cifs_open(struct inode *inode, struct file *file)
149{
150 int rc = -EACCES;
151 int xid, oplock;
152 struct cifs_sb_info *cifs_sb;
153 struct cifsTconInfo *pTcon;
154 struct cifsFileInfo *pCifsFile;
155 struct cifsInodeInfo *pCifsInode;
156 struct list_head *tmp;
157 char *full_path = NULL;
158 int desiredAccess;
159 int disposition;
160 __u16 netfid;
161 FILE_ALL_INFO *buf = NULL;
162
163 xid = GetXid();
164
165 cifs_sb = CIFS_SB(inode->i_sb);
166 pTcon = cifs_sb->tcon;
167
168 if (file->f_flags & O_CREAT) {
169 /* search inode for this file and fill in file->private_data */
170 pCifsInode = CIFS_I(file->f_dentry->d_inode);
171 read_lock(&GlobalSMBSeslock);
172 list_for_each(tmp, &pCifsInode->openFileList) {
173 pCifsFile = list_entry(tmp, struct cifsFileInfo,
174 flist);
175 if ((pCifsFile->pfile == NULL) &&
176 (pCifsFile->pid == current->tgid)) {
177 /* mode set in cifs_create */
178
179 /* needed for writepage */
180 pCifsFile->pfile = file;
181
182 file->private_data = pCifsFile;
183 break;
184 }
185 }
186 read_unlock(&GlobalSMBSeslock);
187 if (file->private_data != NULL) {
188 rc = 0;
189 FreeXid(xid);
190 return rc;
191 } else {
192 if (file->f_flags & O_EXCL)
193 cERROR(1, ("could not find file instance for "
194 "new file %p ", file));
195 }
196 }
197
198 down(&inode->i_sb->s_vfs_rename_sem);
199 full_path = build_path_from_dentry(file->f_dentry);
200 up(&inode->i_sb->s_vfs_rename_sem);
201 if (full_path == NULL) {
202 FreeXid(xid);
203 return -ENOMEM;
204 }
205
206 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
207 inode, file->f_flags, full_path));
208 desiredAccess = cifs_convert_flags(file->f_flags);
209
210/*********************************************************************
211 * open flag mapping table:
212 *
213 * POSIX Flag CIFS Disposition
214 * ---------- ----------------
215 * O_CREAT FILE_OPEN_IF
216 * O_CREAT | O_EXCL FILE_CREATE
217 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
218 * O_TRUNC FILE_OVERWRITE
219 * none of the above FILE_OPEN
220 *
221 * Note that there is not a direct match between disposition
222 * FILE_SUPERSEDE (ie create whether or not file exists although
223 * O_CREAT | O_TRUNC is similar but truncates the existing
224 * file rather than creating a new file as FILE_SUPERSEDE does
225 * (which uses the attributes / metadata passed in on open call)
226 *?
227 *? O_SYNC is a reasonable match to CIFS writethrough flag
228 *? and the read write flags match reasonably. O_LARGEFILE
229 *? is irrelevant because largefile support is always used
230 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
231 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
232 *********************************************************************/
233
234 disposition = cifs_get_disposition(file->f_flags);
235
236 if (oplockEnabled)
237 oplock = REQ_OPLOCK;
238 else
239 oplock = FALSE;
240
241 /* BB pass O_SYNC flag through on file attributes .. BB */
242
243 /* Also refresh inode by passing in file_info buf returned by SMBOpen
244 and calling get_inode_info with returned buf (at least helps
245 non-Unix server case) */
246
247 /* BB we can not do this if this is the second open of a file
248 and the first handle has writebehind data, we might be
249 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
250 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
251 if (!buf) {
252 rc = -ENOMEM;
253 goto out;
254 }
255 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
256 CREATE_NOT_DIR, &netfid, &oplock, buf,
257 cifs_sb->local_nls);
258 if (rc) {
259 cFYI(1, ("cifs_open returned 0x%x ", rc));
260 goto out;
261 }
262 file->private_data =
263 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
264 if (file->private_data == NULL) {
265 rc = -ENOMEM;
266 goto out;
267 }
268 pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
269 write_lock(&file->f_owner.lock);
270 write_lock(&GlobalSMBSeslock);
271 list_add(&pCifsFile->tlist, &pTcon->openFileList);
272
273 pCifsInode = CIFS_I(file->f_dentry->d_inode);
274 if (pCifsInode) {
275 rc = cifs_open_inode_helper(inode, file, pCifsInode,
276 pCifsFile, pTcon,
277 &oplock, buf, full_path, xid);
278 } else {
279 write_unlock(&GlobalSMBSeslock);
280 write_unlock(&file->f_owner.lock);
281 }
282
283 if (oplock & CIFS_CREATE_ACTION) {
284 /* time to set mode which we can not set earlier due to
285 problems creating new read-only files */
286 if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
287 CIFSSMBUnixSetPerms(xid, pTcon, full_path,
288 inode->i_mode,
289 (__u64)-1, (__u64)-1, 0 /* dev */,
290 cifs_sb->local_nls);
291 } else {
292 /* BB implement via Windows security descriptors eg
293 CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
294 -1, -1, local_nls);
295 in the meantime could set r/o dos attribute when
296 perms are eg: mode & 0222 == 0 */
297 }
298 }
299
300out:
301 kfree(buf);
302 kfree(full_path);
303 FreeXid(xid);
304 return rc;
305}
306
307/* Try to reaquire byte range locks that were released when session */
308/* to server was lost */
309static int cifs_relock_file(struct cifsFileInfo *cifsFile)
310{
311 int rc = 0;
312
313/* BB list all locks open on this file and relock */
314
315 return rc;
316}
317
318static int cifs_reopen_file(struct inode *inode, struct file *file,
319 int can_flush)
320{
321 int rc = -EACCES;
322 int xid, oplock;
323 struct cifs_sb_info *cifs_sb;
324 struct cifsTconInfo *pTcon;
325 struct cifsFileInfo *pCifsFile;
326 struct cifsInodeInfo *pCifsInode;
327 char *full_path = NULL;
328 int desiredAccess;
329 int disposition = FILE_OPEN;
330 __u16 netfid;
331
332 if (inode == NULL)
333 return -EBADF;
334 if (file->private_data) {
335 pCifsFile = (struct cifsFileInfo *)file->private_data;
336 } else
337 return -EBADF;
338
339 xid = GetXid();
340 down(&pCifsFile->fh_sem);
341 if (pCifsFile->invalidHandle == FALSE) {
342 up(&pCifsFile->fh_sem);
343 FreeXid(xid);
344 return 0;
345 }
346
347 if (file->f_dentry == NULL) {
348 up(&pCifsFile->fh_sem);
349 cFYI(1, ("failed file reopen, no valid name if dentry freed"));
350 FreeXid(xid);
351 return -EBADF;
352 }
353 cifs_sb = CIFS_SB(inode->i_sb);
354 pTcon = cifs_sb->tcon;
355/* can not grab rename sem here because various ops, including
356 those that already have the rename sem can end up causing writepage
357 to get called and if the server was down that means we end up here,
358 and we can never tell if the caller already has the rename_sem */
359 full_path = build_path_from_dentry(file->f_dentry);
360 if (full_path == NULL) {
361 up(&pCifsFile->fh_sem);
362 FreeXid(xid);
363 return -ENOMEM;
364 }
365
366 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
367 inode, file->f_flags,full_path));
368 desiredAccess = cifs_convert_flags(file->f_flags);
369
370 if (oplockEnabled)
371 oplock = REQ_OPLOCK;
372 else
373 oplock = FALSE;
374
375 /* Can not refresh inode by passing in file_info buf to be returned
376 by SMBOpen and then calling get_inode_info with returned buf
377 since file might have write behind data that needs to be flushed
378 and server version of file size can be stale. If we knew for sure
379 that inode was not dirty locally we could do this */
380
381/* buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
382 if (buf == 0) {
383 up(&pCifsFile->fh_sem);
384 kfree(full_path);
385 FreeXid(xid);
386 return -ENOMEM;
387 } */
388 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
389 CREATE_NOT_DIR, &netfid, &oplock, NULL,
390 cifs_sb->local_nls);
391 if (rc) {
392 up(&pCifsFile->fh_sem);
393 cFYI(1, ("cifs_open returned 0x%x ", rc));
394 cFYI(1, ("oplock: %d ", oplock));
395 } else {
396 pCifsFile->netfid = netfid;
397 pCifsFile->invalidHandle = FALSE;
398 up(&pCifsFile->fh_sem);
399 pCifsInode = CIFS_I(inode);
400 if (pCifsInode) {
401 if (can_flush) {
402 filemap_fdatawrite(inode->i_mapping);
403 filemap_fdatawait(inode->i_mapping);
404 /* temporarily disable caching while we
405 go to server to get inode info */
406 pCifsInode->clientCanCacheAll = FALSE;
407 pCifsInode->clientCanCacheRead = FALSE;
408 if (pTcon->ses->capabilities & CAP_UNIX)
409 rc = cifs_get_inode_info_unix(&inode,
410 full_path, inode->i_sb, xid);
411 else
412 rc = cifs_get_inode_info(&inode,
413 full_path, NULL, inode->i_sb,
414 xid);
415 } /* else we are writing out data to server already
416 and could deadlock if we tried to flush data, and
417 since we do not know if we have data that would
418 invalidate the current end of file on the server
419 we can not go to the server to get the new inod
420 info */
421 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
422 pCifsInode->clientCanCacheAll = TRUE;
423 pCifsInode->clientCanCacheRead = TRUE;
424 cFYI(1, ("Exclusive Oplock granted on inode %p",
425 file->f_dentry->d_inode));
426 } else if ((oplock & 0xF) == OPLOCK_READ) {
427 pCifsInode->clientCanCacheRead = TRUE;
428 pCifsInode->clientCanCacheAll = FALSE;
429 } else {
430 pCifsInode->clientCanCacheRead = FALSE;
431 pCifsInode->clientCanCacheAll = FALSE;
432 }
433 cifs_relock_file(pCifsFile);
434 }
435 }
436
437 kfree(full_path);
438 FreeXid(xid);
439 return rc;
440}
441
442int cifs_close(struct inode *inode, struct file *file)
443{
444 int rc = 0;
445 int xid;
446 struct cifs_sb_info *cifs_sb;
447 struct cifsTconInfo *pTcon;
448 struct cifsFileInfo *pSMBFile =
449 (struct cifsFileInfo *)file->private_data;
450
451 xid = GetXid();
452
453 cifs_sb = CIFS_SB(inode->i_sb);
454 pTcon = cifs_sb->tcon;
455 if (pSMBFile) {
456 pSMBFile->closePend = TRUE;
457 write_lock(&file->f_owner.lock);
458 if (pTcon) {
459 /* no sense reconnecting to close a file that is
460 already closed */
461 if (pTcon->tidStatus != CifsNeedReconnect) {
462 write_unlock(&file->f_owner.lock);
463 rc = CIFSSMBClose(xid, pTcon,
464 pSMBFile->netfid);
465 write_lock(&file->f_owner.lock);
466 }
467 }
Steve Frenchcbe04762005-04-28 22:41:05 -0700468 write_lock(&GlobalSMBSeslock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 list_del(&pSMBFile->flist);
470 list_del(&pSMBFile->tlist);
Steve Frenchcbe04762005-04-28 22:41:05 -0700471 write_unlock(&GlobalSMBSeslock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 write_unlock(&file->f_owner.lock);
473 kfree(pSMBFile->search_resume_name);
474 kfree(file->private_data);
475 file->private_data = NULL;
476 } else
477 rc = -EBADF;
478
479 if (list_empty(&(CIFS_I(inode)->openFileList))) {
480 cFYI(1, ("closing last open instance for inode %p", inode));
481 /* if the file is not open we do not know if we can cache info
482 on this inode, much less write behind and read ahead */
483 CIFS_I(inode)->clientCanCacheRead = FALSE;
484 CIFS_I(inode)->clientCanCacheAll = FALSE;
485 }
486 if ((rc ==0) && CIFS_I(inode)->write_behind_rc)
487 rc = CIFS_I(inode)->write_behind_rc;
488 FreeXid(xid);
489 return rc;
490}
491
492int cifs_closedir(struct inode *inode, struct file *file)
493{
494 int rc = 0;
495 int xid;
496 struct cifsFileInfo *pCFileStruct =
497 (struct cifsFileInfo *)file->private_data;
498 char *ptmp;
499
500 cFYI(1, ("Closedir inode = 0x%p with ", inode));
501
502 xid = GetXid();
503
504 if (pCFileStruct) {
505 struct cifsTconInfo *pTcon;
506 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_dentry->d_sb);
507
508 pTcon = cifs_sb->tcon;
509
510 cFYI(1, ("Freeing private data in close dir"));
511 if (pCFileStruct->srch_inf.endOfSearch == FALSE) {
512 pCFileStruct->invalidHandle = TRUE;
513 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
514 cFYI(1, ("Closing uncompleted readdir with rc %d",
515 rc));
516 /* not much we can do if it fails anyway, ignore rc */
517 rc = 0;
518 }
519 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
520 if (ptmp) {
521 /* BB removeme BB */ cFYI(1, ("freeing smb buf in srch struct in closedir"));
522 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
523 cifs_buf_release(ptmp);
524 }
525 ptmp = pCFileStruct->search_resume_name;
526 if (ptmp) {
527 /* BB removeme BB */ cFYI(1, ("freeing resume name in closedir"));
528 pCFileStruct->search_resume_name = NULL;
529 kfree(ptmp);
530 }
531 kfree(file->private_data);
532 file->private_data = NULL;
533 }
534 /* BB can we lock the filestruct while this is going on? */
535 FreeXid(xid);
536 return rc;
537}
538
539int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
540{
541 int rc, xid;
542 __u32 lockType = LOCKING_ANDX_LARGE_FILES;
543 __u32 numLock = 0;
544 __u32 numUnlock = 0;
545 __u64 length;
546 int wait_flag = FALSE;
547 struct cifs_sb_info *cifs_sb;
548 struct cifsTconInfo *pTcon;
549
550 length = 1 + pfLock->fl_end - pfLock->fl_start;
551 rc = -EACCES;
552 xid = GetXid();
553
554 cFYI(1, ("Lock parm: 0x%x flockflags: "
555 "0x%x flocktype: 0x%x start: %lld end: %lld",
556 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
557 pfLock->fl_end));
558
559 if (pfLock->fl_flags & FL_POSIX)
560 cFYI(1, ("Posix "));
561 if (pfLock->fl_flags & FL_FLOCK)
562 cFYI(1, ("Flock "));
563 if (pfLock->fl_flags & FL_SLEEP) {
564 cFYI(1, ("Blocking lock "));
565 wait_flag = TRUE;
566 }
567 if (pfLock->fl_flags & FL_ACCESS)
568 cFYI(1, ("Process suspended by mandatory locking - "
569 "not implemented yet "));
570 if (pfLock->fl_flags & FL_LEASE)
571 cFYI(1, ("Lease on file - not implemented yet"));
572 if (pfLock->fl_flags &
573 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
574 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
575
576 if (pfLock->fl_type == F_WRLCK) {
577 cFYI(1, ("F_WRLCK "));
578 numLock = 1;
579 } else if (pfLock->fl_type == F_UNLCK) {
580 cFYI(1, ("F_UNLCK "));
581 numUnlock = 1;
582 } else if (pfLock->fl_type == F_RDLCK) {
583 cFYI(1, ("F_RDLCK "));
584 lockType |= LOCKING_ANDX_SHARED_LOCK;
585 numLock = 1;
586 } else if (pfLock->fl_type == F_EXLCK) {
587 cFYI(1, ("F_EXLCK "));
588 numLock = 1;
589 } else if (pfLock->fl_type == F_SHLCK) {
590 cFYI(1, ("F_SHLCK "));
591 lockType |= LOCKING_ANDX_SHARED_LOCK;
592 numLock = 1;
593 } else
594 cFYI(1, ("Unknown type of lock "));
595
596 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
597 pTcon = cifs_sb->tcon;
598
599 if (file->private_data == NULL) {
600 FreeXid(xid);
601 return -EBADF;
602 }
603
604 if (IS_GETLK(cmd)) {
605 rc = CIFSSMBLock(xid, pTcon,
606 ((struct cifsFileInfo *)file->
607 private_data)->netfid,
608 length,
609 pfLock->fl_start, 0, 1, lockType,
610 0 /* wait flag */ );
611 if (rc == 0) {
612 rc = CIFSSMBLock(xid, pTcon,
613 ((struct cifsFileInfo *) file->
614 private_data)->netfid,
615 length,
616 pfLock->fl_start, 1 /* numUnlock */ ,
617 0 /* numLock */ , lockType,
618 0 /* wait flag */ );
619 pfLock->fl_type = F_UNLCK;
620 if (rc != 0)
621 cERROR(1, ("Error unlocking previously locked "
622 "range %d during test of lock ",
623 rc));
624 rc = 0;
625
626 } else {
627 /* if rc == ERR_SHARING_VIOLATION ? */
628 rc = 0; /* do not change lock type to unlock
629 since range in use */
630 }
631
632 FreeXid(xid);
633 return rc;
634 }
635
636 rc = CIFSSMBLock(xid, pTcon,
637 ((struct cifsFileInfo *) file->private_data)->
638 netfid, length,
639 pfLock->fl_start, numUnlock, numLock, lockType,
640 wait_flag);
641 if (rc == 0 && (pfLock->fl_flags & FL_POSIX))
642 posix_lock_file_wait(file, pfLock);
643 FreeXid(xid);
644 return rc;
645}
646
647ssize_t cifs_user_write(struct file *file, const char __user *write_data,
648 size_t write_size, loff_t *poffset)
649{
650 int rc = 0;
651 unsigned int bytes_written = 0;
652 unsigned int total_written;
653 struct cifs_sb_info *cifs_sb;
654 struct cifsTconInfo *pTcon;
655 int xid, long_op;
656 struct cifsFileInfo *open_file;
657
658 if (file->f_dentry == NULL)
659 return -EBADF;
660
661 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
662 if (cifs_sb == NULL)
663 return -EBADF;
664
665 pTcon = cifs_sb->tcon;
666
667 /* cFYI(1,
668 (" write %d bytes to offset %lld of %s", write_size,
669 *poffset, file->f_dentry->d_name.name)); */
670
671 if (file->private_data == NULL)
672 return -EBADF;
673 else
674 open_file = (struct cifsFileInfo *) file->private_data;
675
676 xid = GetXid();
677 if (file->f_dentry->d_inode == NULL) {
678 FreeXid(xid);
679 return -EBADF;
680 }
681
682 if (*poffset > file->f_dentry->d_inode->i_size)
683 long_op = 2; /* writes past end of file can take a long time */
684 else
685 long_op = 1;
686
687 for (total_written = 0; write_size > total_written;
688 total_written += bytes_written) {
689 rc = -EAGAIN;
690 while (rc == -EAGAIN) {
691 if (file->private_data == NULL) {
692 /* file has been closed on us */
693 FreeXid(xid);
694 /* if we have gotten here we have written some data
695 and blocked, and the file has been freed on us while
696 we blocked so return what we managed to write */
697 return total_written;
698 }
699 if (open_file->closePend) {
700 FreeXid(xid);
701 if (total_written)
702 return total_written;
703 else
704 return -EBADF;
705 }
706 if (open_file->invalidHandle) {
707 if ((file->f_dentry == NULL) ||
708 (file->f_dentry->d_inode == NULL)) {
709 FreeXid(xid);
710 return total_written;
711 }
712 /* we could deadlock if we called
713 filemap_fdatawait from here so tell
714 reopen_file not to flush data to server
715 now */
716 rc = cifs_reopen_file(file->f_dentry->d_inode,
717 file, FALSE);
718 if (rc != 0)
719 break;
720 }
721
722 rc = CIFSSMBWrite(xid, pTcon,
723 open_file->netfid,
724 min_t(const int, cifs_sb->wsize,
725 write_size - total_written),
726 *poffset, &bytes_written,
727 NULL, write_data + total_written, long_op);
728 }
729 if (rc || (bytes_written == 0)) {
730 if (total_written)
731 break;
732 else {
733 FreeXid(xid);
734 return rc;
735 }
736 } else
737 *poffset += bytes_written;
738 long_op = FALSE; /* subsequent writes fast -
739 15 seconds is plenty */
740 }
741
742#ifdef CONFIG_CIFS_STATS
743 if (total_written > 0) {
744 atomic_inc(&pTcon->num_writes);
745 spin_lock(&pTcon->stat_lock);
746 pTcon->bytes_written += total_written;
747 spin_unlock(&pTcon->stat_lock);
748 }
749#endif
750
751 /* since the write may have blocked check these pointers again */
752 if (file->f_dentry) {
753 if (file->f_dentry->d_inode) {
754 struct inode *inode = file->f_dentry->d_inode;
755 inode->i_ctime = inode->i_mtime =
756 current_fs_time(inode->i_sb);
757 if (total_written > 0) {
758 if (*poffset > file->f_dentry->d_inode->i_size)
759 i_size_write(file->f_dentry->d_inode,
760 *poffset);
761 }
762 mark_inode_dirty_sync(file->f_dentry->d_inode);
763 }
764 }
765 FreeXid(xid);
766 return total_written;
767}
768
769static ssize_t cifs_write(struct file *file, const char *write_data,
770 size_t write_size, loff_t *poffset)
771{
772 int rc = 0;
773 unsigned int bytes_written = 0;
774 unsigned int total_written;
775 struct cifs_sb_info *cifs_sb;
776 struct cifsTconInfo *pTcon;
777 int xid, long_op;
778 struct cifsFileInfo *open_file;
779
780 if (file->f_dentry == NULL)
781 return -EBADF;
782
783 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
784 if (cifs_sb == NULL)
785 return -EBADF;
786
787 pTcon = cifs_sb->tcon;
788
789 /* cFYI(1,
790 (" write %d bytes to offset %lld of %s", write_size,
791 *poffset, file->f_dentry->d_name.name)); */
792
793 if (file->private_data == NULL)
794 return -EBADF;
795 else
796 open_file = (struct cifsFileInfo *)file->private_data;
797
798 xid = GetXid();
799 if (file->f_dentry->d_inode == NULL) {
800 FreeXid(xid);
801 return -EBADF;
802 }
803
804 if (*poffset > file->f_dentry->d_inode->i_size)
805 long_op = 2; /* writes past end of file can take a long time */
806 else
807 long_op = 1;
808
809 for (total_written = 0; write_size > total_written;
810 total_written += bytes_written) {
811 rc = -EAGAIN;
812 while (rc == -EAGAIN) {
813 if (file->private_data == NULL) {
814 /* file has been closed on us */
815 FreeXid(xid);
816 /* if we have gotten here we have written some data
817 and blocked, and the file has been freed on us
818 while we blocked so return what we managed to
819 write */
820 return total_written;
821 }
822 if (open_file->closePend) {
823 FreeXid(xid);
824 if (total_written)
825 return total_written;
826 else
827 return -EBADF;
828 }
829 if (open_file->invalidHandle) {
830 if ((file->f_dentry == NULL) ||
831 (file->f_dentry->d_inode == NULL)) {
832 FreeXid(xid);
833 return total_written;
834 }
835 /* we could deadlock if we called
836 filemap_fdatawait from here so tell
837 reopen_file not to flush data to
838 server now */
839 rc = cifs_reopen_file(file->f_dentry->d_inode,
840 file, FALSE);
841 if (rc != 0)
842 break;
843 }
844
845 rc = CIFSSMBWrite(xid, pTcon,
846 open_file->netfid,
847 min_t(const int, cifs_sb->wsize,
848 write_size - total_written),
849 *poffset, &bytes_written,
850 write_data + total_written, NULL, long_op);
851 }
852 if (rc || (bytes_written == 0)) {
853 if (total_written)
854 break;
855 else {
856 FreeXid(xid);
857 return rc;
858 }
859 } else
860 *poffset += bytes_written;
861 long_op = FALSE; /* subsequent writes fast -
862 15 seconds is plenty */
863 }
864
865#ifdef CONFIG_CIFS_STATS
866 if (total_written > 0) {
867 atomic_inc(&pTcon->num_writes);
868 spin_lock(&pTcon->stat_lock);
869 pTcon->bytes_written += total_written;
870 spin_unlock(&pTcon->stat_lock);
871 }
872#endif
873
874 /* since the write may have blocked check these pointers again */
875 if (file->f_dentry) {
876 if (file->f_dentry->d_inode) {
877 file->f_dentry->d_inode->i_ctime =
878 file->f_dentry->d_inode->i_mtime = CURRENT_TIME;
879 if (total_written > 0) {
880 if (*poffset > file->f_dentry->d_inode->i_size)
881 i_size_write(file->f_dentry->d_inode,
882 *poffset);
883 }
884 mark_inode_dirty_sync(file->f_dentry->d_inode);
885 }
886 }
887 FreeXid(xid);
888 return total_written;
889}
890
891static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
892{
893 struct address_space *mapping = page->mapping;
894 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
895 char *write_data;
896 int rc = -EFAULT;
897 int bytes_written = 0;
898 struct cifs_sb_info *cifs_sb;
899 struct cifsTconInfo *pTcon;
900 struct inode *inode;
901 struct cifsInodeInfo *cifsInode;
902 struct cifsFileInfo *open_file = NULL;
903 struct list_head *tmp;
904 struct list_head *tmp1;
905
906 if (!mapping || !mapping->host)
907 return -EFAULT;
908
909 inode = page->mapping->host;
910 cifs_sb = CIFS_SB(inode->i_sb);
911 pTcon = cifs_sb->tcon;
912
913 offset += (loff_t)from;
914 write_data = kmap(page);
915 write_data += from;
916
917 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
918 kunmap(page);
919 return -EIO;
920 }
921
922 /* racing with truncate? */
923 if (offset > mapping->host->i_size) {
924 kunmap(page);
925 return 0; /* don't care */
926 }
927
928 /* check to make sure that we are not extending the file */
929 if (mapping->host->i_size - offset < (loff_t)to)
930 to = (unsigned)(mapping->host->i_size - offset);
931
932 cifsInode = CIFS_I(mapping->host);
933 read_lock(&GlobalSMBSeslock);
934 /* BB we should start at the end */
935 list_for_each_safe(tmp, tmp1, &cifsInode->openFileList) {
936 open_file = list_entry(tmp, struct cifsFileInfo, flist);
937 if (open_file->closePend)
938 continue;
939 /* We check if file is open for writing first */
940 if ((open_file->pfile) &&
941 ((open_file->pfile->f_flags & O_RDWR) ||
942 (open_file->pfile->f_flags & O_WRONLY))) {
943 read_unlock(&GlobalSMBSeslock);
944 bytes_written = cifs_write(open_file->pfile,
945 write_data, to-from,
946 &offset);
947 read_lock(&GlobalSMBSeslock);
948 /* Does mm or vfs already set times? */
949 inode->i_atime =
950 inode->i_mtime = current_fs_time(inode->i_sb);
951 if ((bytes_written > 0) && (offset)) {
952 rc = 0;
953 } else if (bytes_written < 0) {
954 if (rc == -EBADF) {
955 /* have seen a case in which kernel seemed to
956 have closed/freed a file even with writes
957 active so we might as well see if there are
958 other file structs to try for the same
959 inode before giving up */
960 continue;
961 } else
962 rc = bytes_written;
963 }
964 break; /* now that we found a valid file handle and
965 tried to write to it we are done, no sense
966 continuing to loop looking for another */
967 }
968 if (tmp->next == NULL) {
969 cFYI(1, ("File instance %p removed", tmp));
970 break;
971 }
972 }
973 read_unlock(&GlobalSMBSeslock);
974 if (open_file == NULL) {
975 cFYI(1, ("No writeable filehandles for inode"));
976 rc = -EIO;
977 }
978
979 kunmap(page);
980 return rc;
981}
982
983#if 0
984static int cifs_writepages(struct address_space *mapping,
985 struct writeback_control *wbc)
986{
987 int rc = -EFAULT;
988 int xid;
989
990 xid = GetXid();
991
992 /* Find contiguous pages then iterate through repeating
993 call 16K write then Setpageuptodate or if LARGE_WRITE_X
994 support then send larger writes via kevec so as to eliminate
995 a memcpy */
996 FreeXid(xid);
997 return rc;
998}
999#endif
1000
1001static int cifs_writepage(struct page* page, struct writeback_control *wbc)
1002{
1003 int rc = -EFAULT;
1004 int xid;
1005
1006 xid = GetXid();
1007/* BB add check for wbc flags */
1008 page_cache_get(page);
1009 if (!PageUptodate(page)) {
1010 cFYI(1, ("ppw - page not up to date"));
1011 }
1012
1013 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1014 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1015 unlock_page(page);
1016 page_cache_release(page);
1017 FreeXid(xid);
1018 return rc;
1019}
1020
1021static int cifs_commit_write(struct file *file, struct page *page,
1022 unsigned offset, unsigned to)
1023{
1024 int xid;
1025 int rc = 0;
1026 struct inode *inode = page->mapping->host;
1027 loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1028 char *page_data;
1029
1030 xid = GetXid();
1031 cFYI(1, ("commit write for page %p up to position %lld for %d",
1032 page, position, to));
1033 if (position > inode->i_size) {
1034 i_size_write(inode, position);
1035 /* if (file->private_data == NULL) {
1036 rc = -EBADF;
1037 } else {
1038 open_file = (struct cifsFileInfo *)file->private_data;
1039 cifs_sb = CIFS_SB(inode->i_sb);
1040 rc = -EAGAIN;
1041 while (rc == -EAGAIN) {
1042 if ((open_file->invalidHandle) &&
1043 (!open_file->closePend)) {
1044 rc = cifs_reopen_file(
1045 file->f_dentry->d_inode, file);
1046 if (rc != 0)
1047 break;
1048 }
1049 if (!open_file->closePend) {
1050 rc = CIFSSMBSetFileSize(xid,
1051 cifs_sb->tcon, position,
1052 open_file->netfid,
1053 open_file->pid, FALSE);
1054 } else {
1055 rc = -EBADF;
1056 break;
1057 }
1058 }
1059 cFYI(1, (" SetEOF (commit write) rc = %d", rc));
1060 } */
1061 }
1062 if (!PageUptodate(page)) {
1063 position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
1064 /* can not rely on (or let) writepage write this data */
1065 if (to < offset) {
1066 cFYI(1, ("Illegal offsets, can not copy from %d to %d",
1067 offset, to));
1068 FreeXid(xid);
1069 return rc;
1070 }
1071 /* this is probably better than directly calling
1072 partialpage_write since in this function the file handle is
1073 known which we might as well leverage */
1074 /* BB check if anything else missing out of ppw
1075 such as updating last write time */
1076 page_data = kmap(page);
1077 rc = cifs_write(file, page_data + offset, to-offset,
1078 &position);
1079 if (rc > 0)
1080 rc = 0;
1081 /* else if (rc < 0) should we set writebehind rc? */
1082 kunmap(page);
1083 } else {
1084 set_page_dirty(page);
1085 }
1086
1087 FreeXid(xid);
1088 return rc;
1089}
1090
1091int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1092{
1093 int xid;
1094 int rc = 0;
1095 struct inode *inode = file->f_dentry->d_inode;
1096
1097 xid = GetXid();
1098
1099 cFYI(1, ("Sync file - name: %s datasync: 0x%x ",
1100 dentry->d_name.name, datasync));
1101
1102 rc = filemap_fdatawrite(inode->i_mapping);
1103 if (rc == 0)
1104 CIFS_I(inode)->write_behind_rc = 0;
1105 FreeXid(xid);
1106 return rc;
1107}
1108
1109/* static int cifs_sync_page(struct page *page)
1110{
1111 struct address_space *mapping;
1112 struct inode *inode;
1113 unsigned long index = page->index;
1114 unsigned int rpages = 0;
1115 int rc = 0;
1116
1117 cFYI(1, ("sync page %p",page));
1118 mapping = page->mapping;
1119 if (!mapping)
1120 return 0;
1121 inode = mapping->host;
1122 if (!inode)
1123 return 0; */
1124
1125/* fill in rpages then
1126 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1127
1128/* cFYI(1, ("rpages is %d for sync page of Index %ld ", rpages, index));
1129
1130 if (rc < 0)
1131 return rc;
1132 return 0;
1133} */
1134
1135/*
1136 * As file closes, flush all cached write data for this inode checking
1137 * for write behind errors.
1138 */
1139int cifs_flush(struct file *file)
1140{
1141 struct inode * inode = file->f_dentry->d_inode;
1142 int rc = 0;
1143
1144 /* Rather than do the steps manually:
1145 lock the inode for writing
1146 loop through pages looking for write behind data (dirty pages)
1147 coalesce into contiguous 16K (or smaller) chunks to write to server
1148 send to server (prefer in parallel)
1149 deal with writebehind errors
1150 unlock inode for writing
1151 filemapfdatawrite appears easier for the time being */
1152
1153 rc = filemap_fdatawrite(inode->i_mapping);
1154 if (!rc) /* reset wb rc if we were able to write out dirty pages */
1155 CIFS_I(inode)->write_behind_rc = 0;
1156
1157 cFYI(1, ("Flush inode %p file %p rc %d",inode,file,rc));
1158
1159 return rc;
1160}
1161
1162ssize_t cifs_user_read(struct file *file, char __user *read_data,
1163 size_t read_size, loff_t *poffset)
1164{
1165 int rc = -EACCES;
1166 unsigned int bytes_read = 0;
1167 unsigned int total_read = 0;
1168 unsigned int current_read_size;
1169 struct cifs_sb_info *cifs_sb;
1170 struct cifsTconInfo *pTcon;
1171 int xid;
1172 struct cifsFileInfo *open_file;
1173 char *smb_read_data;
1174 char __user *current_offset;
1175 struct smb_com_read_rsp *pSMBr;
1176
1177 xid = GetXid();
1178 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1179 pTcon = cifs_sb->tcon;
1180
1181 if (file->private_data == NULL) {
1182 FreeXid(xid);
1183 return -EBADF;
1184 }
1185 open_file = (struct cifsFileInfo *)file->private_data;
1186
1187 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
1188 cFYI(1, ("attempting read on write only file instance"));
1189 }
1190 for (total_read = 0, current_offset = read_data;
1191 read_size > total_read;
1192 total_read += bytes_read, current_offset += bytes_read) {
1193 current_read_size = min_t(const int, read_size - total_read,
1194 cifs_sb->rsize);
1195 rc = -EAGAIN;
1196 smb_read_data = NULL;
1197 while (rc == -EAGAIN) {
1198 if ((open_file->invalidHandle) &&
1199 (!open_file->closePend)) {
1200 rc = cifs_reopen_file(file->f_dentry->d_inode,
1201 file, TRUE);
1202 if (rc != 0)
1203 break;
1204 }
1205
1206 rc = CIFSSMBRead(xid, pTcon,
1207 open_file->netfid,
1208 current_read_size, *poffset,
1209 &bytes_read, &smb_read_data);
1210
1211 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1212 if (copy_to_user(current_offset,
1213 smb_read_data + 4 /* RFC1001 hdr */
1214 + le16_to_cpu(pSMBr->DataOffset),
1215 bytes_read)) {
1216 rc = -EFAULT;
1217 FreeXid(xid);
1218 return rc;
1219 }
1220 if (smb_read_data) {
1221 cifs_buf_release(smb_read_data);
1222 smb_read_data = NULL;
1223 }
1224 }
1225 if (rc || (bytes_read == 0)) {
1226 if (total_read) {
1227 break;
1228 } else {
1229 FreeXid(xid);
1230 return rc;
1231 }
1232 } else {
1233#ifdef CONFIG_CIFS_STATS
1234 atomic_inc(&pTcon->num_reads);
1235 spin_lock(&pTcon->stat_lock);
1236 pTcon->bytes_read += total_read;
1237 spin_unlock(&pTcon->stat_lock);
1238#endif
1239 *poffset += bytes_read;
1240 }
1241 }
1242 FreeXid(xid);
1243 return total_read;
1244}
1245
1246
1247static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1248 loff_t *poffset)
1249{
1250 int rc = -EACCES;
1251 unsigned int bytes_read = 0;
1252 unsigned int total_read;
1253 unsigned int current_read_size;
1254 struct cifs_sb_info *cifs_sb;
1255 struct cifsTconInfo *pTcon;
1256 int xid;
1257 char *current_offset;
1258 struct cifsFileInfo *open_file;
1259
1260 xid = GetXid();
1261 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1262 pTcon = cifs_sb->tcon;
1263
1264 if (file->private_data == NULL) {
1265 FreeXid(xid);
1266 return -EBADF;
1267 }
1268 open_file = (struct cifsFileInfo *)file->private_data;
1269
1270 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1271 cFYI(1, ("attempting read on write only file instance"));
1272
1273 for (total_read = 0, current_offset = read_data;
1274 read_size > total_read;
1275 total_read += bytes_read, current_offset += bytes_read) {
1276 current_read_size = min_t(const int, read_size - total_read,
1277 cifs_sb->rsize);
1278 rc = -EAGAIN;
1279 while (rc == -EAGAIN) {
1280 if ((open_file->invalidHandle) &&
1281 (!open_file->closePend)) {
1282 rc = cifs_reopen_file(file->f_dentry->d_inode,
1283 file, TRUE);
1284 if (rc != 0)
1285 break;
1286 }
1287
1288 rc = CIFSSMBRead(xid, pTcon,
1289 open_file->netfid,
1290 current_read_size, *poffset,
1291 &bytes_read, &current_offset);
1292 }
1293 if (rc || (bytes_read == 0)) {
1294 if (total_read) {
1295 break;
1296 } else {
1297 FreeXid(xid);
1298 return rc;
1299 }
1300 } else {
1301#ifdef CONFIG_CIFS_STATS
1302 atomic_inc(&pTcon->num_reads);
1303 spin_lock(&pTcon->stat_lock);
1304 pTcon->bytes_read += total_read;
1305 spin_unlock(&pTcon->stat_lock);
1306#endif
1307 *poffset += bytes_read;
1308 }
1309 }
1310 FreeXid(xid);
1311 return total_read;
1312}
1313
1314int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1315{
1316 struct dentry *dentry = file->f_dentry;
1317 int rc, xid;
1318
1319 xid = GetXid();
1320 rc = cifs_revalidate(dentry);
1321 if (rc) {
1322 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1323 FreeXid(xid);
1324 return rc;
1325 }
1326 rc = generic_file_mmap(file, vma);
1327 FreeXid(xid);
1328 return rc;
1329}
1330
1331
1332static void cifs_copy_cache_pages(struct address_space *mapping,
1333 struct list_head *pages, int bytes_read, char *data,
1334 struct pagevec *plru_pvec)
1335{
1336 struct page *page;
1337 char *target;
1338
1339 while (bytes_read > 0) {
1340 if (list_empty(pages))
1341 break;
1342
1343 page = list_entry(pages->prev, struct page, lru);
1344 list_del(&page->lru);
1345
1346 if (add_to_page_cache(page, mapping, page->index,
1347 GFP_KERNEL)) {
1348 page_cache_release(page);
1349 cFYI(1, ("Add page cache failed"));
1350 continue;
1351 }
1352
1353 target = kmap_atomic(page,KM_USER0);
1354
1355 if (PAGE_CACHE_SIZE > bytes_read) {
1356 memcpy(target, data, bytes_read);
1357 /* zero the tail end of this partial page */
1358 memset(target + bytes_read, 0,
1359 PAGE_CACHE_SIZE - bytes_read);
1360 bytes_read = 0;
1361 } else {
1362 memcpy(target, data, PAGE_CACHE_SIZE);
1363 bytes_read -= PAGE_CACHE_SIZE;
1364 }
1365 kunmap_atomic(target, KM_USER0);
1366
1367 flush_dcache_page(page);
1368 SetPageUptodate(page);
1369 unlock_page(page);
1370 if (!pagevec_add(plru_pvec, page))
1371 __pagevec_lru_add(plru_pvec);
1372 data += PAGE_CACHE_SIZE;
1373 }
1374 return;
1375}
1376
1377static int cifs_readpages(struct file *file, struct address_space *mapping,
1378 struct list_head *page_list, unsigned num_pages)
1379{
1380 int rc = -EACCES;
1381 int xid;
1382 loff_t offset;
1383 struct page *page;
1384 struct cifs_sb_info *cifs_sb;
1385 struct cifsTconInfo *pTcon;
1386 int bytes_read = 0;
1387 unsigned int read_size,i;
1388 char *smb_read_data = NULL;
1389 struct smb_com_read_rsp *pSMBr;
1390 struct pagevec lru_pvec;
1391 struct cifsFileInfo *open_file;
1392
1393 xid = GetXid();
1394 if (file->private_data == NULL) {
1395 FreeXid(xid);
1396 return -EBADF;
1397 }
1398 open_file = (struct cifsFileInfo *)file->private_data;
1399 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1400 pTcon = cifs_sb->tcon;
1401
1402 pagevec_init(&lru_pvec, 0);
1403
1404 for (i = 0; i < num_pages; ) {
1405 unsigned contig_pages;
1406 struct page *tmp_page;
1407 unsigned long expected_index;
1408
1409 if (list_empty(page_list))
1410 break;
1411
1412 page = list_entry(page_list->prev, struct page, lru);
1413 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1414
1415 /* count adjacent pages that we will read into */
1416 contig_pages = 0;
1417 expected_index =
1418 list_entry(page_list->prev, struct page, lru)->index;
1419 list_for_each_entry_reverse(tmp_page,page_list,lru) {
1420 if (tmp_page->index == expected_index) {
1421 contig_pages++;
1422 expected_index++;
1423 } else
1424 break;
1425 }
1426 if (contig_pages + i > num_pages)
1427 contig_pages = num_pages - i;
1428
1429 /* for reads over a certain size could initiate async
1430 read ahead */
1431
1432 read_size = contig_pages * PAGE_CACHE_SIZE;
1433 /* Read size needs to be in multiples of one page */
1434 read_size = min_t(const unsigned int, read_size,
1435 cifs_sb->rsize & PAGE_CACHE_MASK);
1436
1437 rc = -EAGAIN;
1438 while (rc == -EAGAIN) {
1439 if ((open_file->invalidHandle) &&
1440 (!open_file->closePend)) {
1441 rc = cifs_reopen_file(file->f_dentry->d_inode,
1442 file, TRUE);
1443 if (rc != 0)
1444 break;
1445 }
1446
1447 rc = CIFSSMBRead(xid, pTcon,
1448 open_file->netfid,
1449 read_size, offset,
1450 &bytes_read, &smb_read_data);
1451 /* BB need to check return code here */
1452 if (rc== -EAGAIN) {
1453 if (smb_read_data) {
1454 cifs_buf_release(smb_read_data);
1455 smb_read_data = NULL;
1456 }
1457 }
1458 }
1459 if ((rc < 0) || (smb_read_data == NULL)) {
1460 cFYI(1, ("Read error in readpages: %d", rc));
1461 /* clean up remaing pages off list */
1462 while (!list_empty(page_list) && (i < num_pages)) {
1463 page = list_entry(page_list->prev, struct page,
1464 lru);
1465 list_del(&page->lru);
1466 page_cache_release(page);
1467 }
1468 break;
1469 } else if (bytes_read > 0) {
1470 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1471 cifs_copy_cache_pages(mapping, page_list, bytes_read,
1472 smb_read_data + 4 /* RFC1001 hdr */ +
1473 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1474
1475 i += bytes_read >> PAGE_CACHE_SHIFT;
1476#ifdef CONFIG_CIFS_STATS
1477 atomic_inc(&pTcon->num_reads);
1478 spin_lock(&pTcon->stat_lock);
1479 pTcon->bytes_read += bytes_read;
1480 spin_unlock(&pTcon->stat_lock);
1481#endif
1482 if ((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1483 i++; /* account for partial page */
1484
1485 /* server copy of file can have smaller size
1486 than client */
1487 /* BB do we need to verify this common case ?
1488 this case is ok - if we are at server EOF
1489 we will hit it on next read */
1490
1491 /* while (!list_empty(page_list) && (i < num_pages)) {
1492 page = list_entry(page_list->prev,
1493 struct page, list);
1494 list_del(&page->list);
1495 page_cache_release(page);
1496 }
1497 break; */
1498 }
1499 } else {
1500 cFYI(1, ("No bytes read (%d) at offset %lld . "
1501 "Cleaning remaining pages from readahead list",
1502 bytes_read, offset));
1503 /* BB turn off caching and do new lookup on
1504 file size at server? */
1505 while (!list_empty(page_list) && (i < num_pages)) {
1506 page = list_entry(page_list->prev, struct page,
1507 lru);
1508 list_del(&page->lru);
1509
1510 /* BB removeme - replace with zero of page? */
1511 page_cache_release(page);
1512 }
1513 break;
1514 }
1515 if (smb_read_data) {
1516 cifs_buf_release(smb_read_data);
1517 smb_read_data = NULL;
1518 }
1519 bytes_read = 0;
1520 }
1521
1522 pagevec_lru_add(&lru_pvec);
1523
1524/* need to free smb_read_data buf before exit */
1525 if (smb_read_data) {
1526 cifs_buf_release(smb_read_data);
1527 smb_read_data = NULL;
1528 }
1529
1530 FreeXid(xid);
1531 return rc;
1532}
1533
1534static int cifs_readpage_worker(struct file *file, struct page *page,
1535 loff_t *poffset)
1536{
1537 char *read_data;
1538 int rc;
1539
1540 page_cache_get(page);
1541 read_data = kmap(page);
1542 /* for reads over a certain size could initiate async read ahead */
1543
1544 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
1545
1546 if (rc < 0)
1547 goto io_error;
1548 else
1549 cFYI(1, ("Bytes read %d ",rc));
1550
1551 file->f_dentry->d_inode->i_atime =
1552 current_fs_time(file->f_dentry->d_inode->i_sb);
1553
1554 if (PAGE_CACHE_SIZE > rc)
1555 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1556
1557 flush_dcache_page(page);
1558 SetPageUptodate(page);
1559 rc = 0;
1560
1561io_error:
1562 kunmap(page);
1563 page_cache_release(page);
1564 return rc;
1565}
1566
1567static int cifs_readpage(struct file *file, struct page *page)
1568{
1569 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1570 int rc = -EACCES;
1571 int xid;
1572
1573 xid = GetXid();
1574
1575 if (file->private_data == NULL) {
1576 FreeXid(xid);
1577 return -EBADF;
1578 }
1579
1580 cFYI(1, ("readpage %p at offset %d 0x%x\n",
1581 page, (int)offset, (int)offset));
1582
1583 rc = cifs_readpage_worker(file, page, &offset);
1584
1585 unlock_page(page);
1586
1587 FreeXid(xid);
1588 return rc;
1589}
1590
1591/* We do not want to update the file size from server for inodes
1592 open for write - to avoid races with writepage extending
1593 the file - in the future we could consider allowing
1594 refreshing the inode only on increases in the file size
1595 but this is tricky to do without racing with writebehind
1596 page caching in the current Linux kernel design */
1597int is_size_safe_to_change(struct cifsInodeInfo *cifsInode)
1598{
1599 struct list_head *tmp;
1600 struct list_head *tmp1;
1601 struct cifsFileInfo *open_file = NULL;
1602 int rc = TRUE;
1603
1604 if (cifsInode == NULL)
1605 return rc;
1606
1607 read_lock(&GlobalSMBSeslock);
1608 list_for_each_safe(tmp, tmp1, &cifsInode->openFileList) {
1609 open_file = list_entry(tmp, struct cifsFileInfo, flist);
1610 if (open_file == NULL)
1611 break;
1612 if (open_file->closePend)
1613 continue;
1614 /* We check if file is open for writing,
1615 BB we could supplement this with a check to see if file size
1616 changes have been flushed to server - ie inode metadata dirty */
1617 if ((open_file->pfile) &&
1618 ((open_file->pfile->f_flags & O_RDWR) ||
1619 (open_file->pfile->f_flags & O_WRONLY))) {
1620 rc = FALSE;
1621 break;
1622 }
1623 if (tmp->next == NULL) {
1624 cFYI(1, ("File instance %p removed", tmp));
1625 break;
1626 }
1627 }
1628 read_unlock(&GlobalSMBSeslock);
1629 return rc;
1630}
1631
1632
1633static int cifs_prepare_write(struct file *file, struct page *page,
1634 unsigned from, unsigned to)
1635{
1636 int rc = 0;
1637 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1638 cFYI(1, ("prepare write for page %p from %d to %d",page,from,to));
1639 if (!PageUptodate(page)) {
1640 /* if (to - from != PAGE_CACHE_SIZE) {
1641 void *kaddr = kmap_atomic(page, KM_USER0);
1642 memset(kaddr, 0, from);
1643 memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
1644 flush_dcache_page(page);
1645 kunmap_atomic(kaddr, KM_USER0);
1646 } */
1647 /* If we are writing a full page it will be up to date,
1648 no need to read from the server */
1649 if ((to == PAGE_CACHE_SIZE) && (from == 0))
1650 SetPageUptodate(page);
1651
1652 /* might as well read a page, it is fast enough */
1653 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
1654 rc = cifs_readpage_worker(file, page, &offset);
1655 } else {
1656 /* should we try using another file handle if there is one -
1657 how would we lock it to prevent close of that handle
1658 racing with this read?
1659 In any case this will be written out by commit_write */
1660 }
1661 }
1662
1663 /* BB should we pass any errors back?
1664 e.g. if we do not have read access to the file */
1665 return 0;
1666}
1667
1668struct address_space_operations cifs_addr_ops = {
1669 .readpage = cifs_readpage,
1670 .readpages = cifs_readpages,
1671 .writepage = cifs_writepage,
1672 .prepare_write = cifs_prepare_write,
1673 .commit_write = cifs_commit_write,
1674 .set_page_dirty = __set_page_dirty_nobuffers,
1675 /* .sync_page = cifs_sync_page, */
1676 /* .direct_IO = */
1677};