blob: f36f9a7893da02ae6d38d4642a5563c8ada6ce5c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Steve French07b92d02013-02-18 10:34:26 -060046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
Jeff Laytone10f7b52008-05-14 10:21:33 -070060 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000063}
Jeff Laytone10f7b52008-05-14 10:21:33 -070064
Jeff Layton608712f2010-10-15 15:33:56 -040065static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000066{
Jeff Layton608712f2010-10-15 15:33:56 -040067 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070068
Steve French7fc8f4e2009-02-23 20:43:11 +000069 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040070 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000071 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040072 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
Steve French07b92d02013-02-18 10:34:26 -060076 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040077 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060078 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050081 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060083
Jeff Layton608712f2010-10-15 15:33:56 -040084 if (flags & O_TRUNC)
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010087 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040088 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000089 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040090 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000091 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040092 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000093 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040094 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000095
96 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99static inline int cifs_get_disposition(unsigned int flags)
100{
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102 return FILE_CREATE;
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
106 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 else
110 return FILE_OPEN;
111}
112
Jeff Layton608712f2010-10-15 15:33:56 -0400113int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400116{
117 int rc;
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000123 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400124
Joe Perchesf96637b2013-05-04 22:12:25 -0500125 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400126
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
129 return -ENOMEM;
130
131 tlink = cifs_sb_tlink(cifs_sb);
132 if (IS_ERR(tlink)) {
133 rc = PTR_ERR(tlink);
134 goto posix_open_ret;
135 }
136
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
139
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
143 cifs_sb->mnt_cifs_flags &
144 CIFS_MOUNT_MAP_SPECIAL_CHR);
145 cifs_put_tlink(tlink);
146
147 if (rc)
148 goto posix_open_ret;
149
150 if (presp_data->Type == cpu_to_le32(-1))
151 goto posix_open_ret; /* open ok, caller does qpathinfo */
152
153 if (!pinode)
154 goto posix_open_ret; /* caller does not need info */
155
156 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
157
158 /* get new inode and set it up */
159 if (*pinode == NULL) {
160 cifs_fill_uniqueid(sb, &fattr);
161 *pinode = cifs_iget(sb, &fattr);
162 if (!*pinode) {
163 rc = -ENOMEM;
164 goto posix_open_ret;
165 }
166 } else {
167 cifs_fattr_to_inode(*pinode, &fattr);
168 }
169
170posix_open_ret:
171 kfree(presp_data);
172 return rc;
173}
174
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300175static int
176cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700177 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
178 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179{
180 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700181 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300182 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500183 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300184 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700185 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400186 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300187
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700188 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700189 return -ENOSYS;
190
191 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300192
193/*********************************************************************
194 * open flag mapping table:
195 *
196 * POSIX Flag CIFS Disposition
197 * ---------- ----------------
198 * O_CREAT FILE_OPEN_IF
199 * O_CREAT | O_EXCL FILE_CREATE
200 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
201 * O_TRUNC FILE_OVERWRITE
202 * none of the above FILE_OPEN
203 *
204 * Note that there is not a direct match between disposition
205 * FILE_SUPERSEDE (ie create whether or not file exists although
206 * O_CREAT | O_TRUNC is similar but truncates the existing
207 * file rather than creating a new file as FILE_SUPERSEDE does
208 * (which uses the attributes / metadata passed in on open call)
209 *?
210 *? O_SYNC is a reasonable match to CIFS writethrough flag
211 *? and the read write flags match reasonably. O_LARGEFILE
212 *? is irrelevant because largefile support is always used
213 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
214 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
215 *********************************************************************/
216
217 disposition = cifs_get_disposition(f_flags);
218
219 /* BB pass O_SYNC flag through on file attributes .. BB */
220
221 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
222 if (!buf)
223 return -ENOMEM;
224
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500225 if (backup_cred(cifs_sb))
226 create_options |= CREATE_OPEN_BACKUP_INTENT;
227
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400228 oparms.tcon = tcon;
229 oparms.cifs_sb = cifs_sb;
230 oparms.desired_access = desired_access;
231 oparms.create_options = create_options;
232 oparms.disposition = disposition;
233 oparms.path = full_path;
234 oparms.fid = fid;
235
236 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300237
238 if (rc)
239 goto out;
240
241 if (tcon->unix_ext)
242 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
243 xid);
244 else
245 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700246 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300247
248out:
249 kfree(buf);
250 return rc;
251}
252
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400253static bool
254cifs_has_mand_locks(struct cifsInodeInfo *cinode)
255{
256 struct cifs_fid_locks *cur;
257 bool has_locks = false;
258
259 down_read(&cinode->lock_sem);
260 list_for_each_entry(cur, &cinode->llist, llist) {
261 if (!list_empty(&cur->locks)) {
262 has_locks = true;
263 break;
264 }
265 }
266 up_read(&cinode->lock_sem);
267 return has_locks;
268}
269
Jeff Layton15ecb432010-10-15 15:34:02 -0400270struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700271cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400272 struct tcon_link *tlink, __u32 oplock)
273{
274 struct dentry *dentry = file->f_path.dentry;
275 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700276 struct cifsInodeInfo *cinode = CIFS_I(inode);
277 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700278 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700279 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400280 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400281
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700282 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
283 if (cfile == NULL)
284 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400285
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700286 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
287 if (!fdlocks) {
288 kfree(cfile);
289 return NULL;
290 }
291
292 INIT_LIST_HEAD(&fdlocks->locks);
293 fdlocks->cfile = cfile;
294 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700295 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700296 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700297 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700298
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700299 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700300 cfile->pid = current->tgid;
301 cfile->uid = current_fsuid();
302 cfile->dentry = dget(dentry);
303 cfile->f_flags = file->f_flags;
304 cfile->invalidHandle = false;
305 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700306 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700307 mutex_init(&cfile->fh_mutex);
Jeff Layton15ecb432010-10-15 15:34:02 -0400308
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100309 cifs_sb_active(inode->i_sb);
310
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400311 /*
312 * If the server returned a read oplock and we have mandatory brlocks,
313 * set oplock level to None.
314 */
315 if (oplock == server->vals->oplock_read &&
316 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500317 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400318 oplock = 0;
319 }
320
Jeff Layton44772882010-10-15 15:34:03 -0400321 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400322 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700323 oplock = fid->pending_open->oplock;
324 list_del(&fid->pending_open->olist);
325
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400326 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700327
328 list_add(&cfile->tlist, &tcon->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400329 /* if readable file instance put first in list*/
330 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700331 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400332 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700333 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400334 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400335
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700336 file->private_data = cfile;
337 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400338}
339
Jeff Layton764a1b12012-07-25 14:59:54 -0400340struct cifsFileInfo *
341cifsFileInfo_get(struct cifsFileInfo *cifs_file)
342{
343 spin_lock(&cifs_file_list_lock);
344 cifsFileInfo_get_locked(cifs_file);
345 spin_unlock(&cifs_file_list_lock);
346 return cifs_file;
347}
348
Steve Frenchcdff08e2010-10-21 22:46:14 +0000349/*
350 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400351 * the filehandle out on the server. Must be called without holding
352 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000353 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400354void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
355{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300356 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000357 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700358 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300359 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100360 struct super_block *sb = inode->i_sb;
361 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000362 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700363 struct cifs_fid fid;
364 struct cifs_pending_open open;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000365
366 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400367 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000368 spin_unlock(&cifs_file_list_lock);
369 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400370 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000371
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700372 if (server->ops->get_lease_key)
373 server->ops->get_lease_key(inode, &fid);
374
375 /* store open in pending opens to make sure we don't miss lease break */
376 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
377
Steve Frenchcdff08e2010-10-21 22:46:14 +0000378 /* remove it from the lists */
379 list_del(&cifs_file->flist);
380 list_del(&cifs_file->tlist);
381
382 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500383 cifs_dbg(FYI, "closing last open instance for inode %p\n",
384 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700385 /*
386 * In strict cache mode we need invalidate mapping on the last
387 * close because it may cause a error when we open this file
388 * again and get at least level II oplock.
389 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300390 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
391 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300392 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000393 }
394 spin_unlock(&cifs_file_list_lock);
395
Jeff Laytonad635942011-07-26 12:20:17 -0400396 cancel_work_sync(&cifs_file->oplock_break);
397
Steve Frenchcdff08e2010-10-21 22:46:14 +0000398 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700399 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400400 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700401
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400402 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700403 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400404 server->ops->close(xid, tcon, &cifs_file->fid);
405 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000406 }
407
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700408 cifs_del_pending_open(&open);
409
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700410 /*
411 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000412 * is closed anyway.
413 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700414 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700415 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000416 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400417 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000418 kfree(li);
419 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700420 list_del(&cifs_file->llist->llist);
421 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700422 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000423
424 cifs_put_tlink(cifs_file->tlink);
425 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100426 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000427 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400428}
429
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700431
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432{
433 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400434 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400435 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700437 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000438 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400439 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700440 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300442 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700443 struct cifs_fid fid;
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700444 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400446 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
448 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400449 tlink = cifs_sb_tlink(cifs_sb);
450 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400451 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400452 return PTR_ERR(tlink);
453 }
454 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700455 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800457 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530459 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400460 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 }
462
Joe Perchesf96637b2013-05-04 22:12:25 -0500463 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000464 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000465
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700466 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000467 oplock = REQ_OPLOCK;
468 else
469 oplock = 0;
470
Steve French64cc2c62009-03-04 19:54:08 +0000471 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400472 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
473 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000474 /* can not refresh inode info since size could be stale */
Jeff Layton2422f6762010-06-16 13:40:16 -0400475 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000476 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700477 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000478 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500479 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300480 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000481 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
482 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500483 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
484 tcon->ses->serverName,
485 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000486 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000487 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
488 (rc != -EOPNOTSUPP)) /* path not found or net err */
489 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700490 /*
491 * Else fallthrough to retry open the old way on network i/o
492 * or DFS errors.
493 */
Steve French276a74a2009-03-03 18:00:34 +0000494 }
495
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700496 if (server->ops->get_lease_key)
497 server->ops->get_lease_key(inode, &fid);
498
499 cifs_add_pending_open(&fid, tlink, &open);
500
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300501 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700502 if (server->ops->get_lease_key)
503 server->ops->get_lease_key(inode, &fid);
504
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300505 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700506 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700507 if (rc) {
508 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300509 goto out;
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700510 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300511 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400512
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700513 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
514 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700515 if (server->ops->close)
516 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700517 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 rc = -ENOMEM;
519 goto out;
520 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530522 cifs_fscache_set_inode_cookie(inode, file);
523
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300524 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700525 /*
526 * Time to set mode which we can not set earlier due to
527 * problems creating new read-only files.
528 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300529 struct cifs_unix_set_info_args args = {
530 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800531 .uid = INVALID_UID, /* no change */
532 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300533 .ctime = NO_CHANGE_64,
534 .atime = NO_CHANGE_64,
535 .mtime = NO_CHANGE_64,
536 .device = 0,
537 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700538 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
539 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 }
541
542out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400544 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400545 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 return rc;
547}
548
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400549static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
550
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700551/*
552 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400553 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700554 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400555static int
556cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400558 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
559 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
560 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 int rc = 0;
562
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400563 /* we are going to update can_cache_brlcks here - need a write access */
564 down_write(&cinode->lock_sem);
565 if (cinode->can_cache_brlcks) {
566 /* can cache locks - no need to push them */
567 up_write(&cinode->lock_sem);
568 return rc;
569 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400571 if (cap_unix(tcon->ses) &&
572 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
573 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
574 rc = cifs_push_posix_locks(cfile);
575 else
576 rc = tcon->ses->server->ops->push_mand_locks(cfile);
577
578 up_write(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 return rc;
580}
581
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700582static int
583cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584{
585 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400586 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400587 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000589 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700590 struct TCP_Server_Info *server;
591 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000592 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700594 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500596 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700597 struct cifs_fid fid;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400598 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400600 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700601 mutex_lock(&cfile->fh_mutex);
602 if (!cfile->invalidHandle) {
603 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530604 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400605 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530606 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 }
608
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700609 inode = cfile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700611 tcon = tlink_tcon(cfile->tlink);
612 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000613
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700614 /*
615 * Can not grab rename sem here because various ops, including those
616 * that already have the rename sem can end up causing writepage to get
617 * called and if the server was down that means we end up here, and we
618 * can never tell if the caller already has the rename_sem.
619 */
620 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000622 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700623 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400624 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000625 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 }
627
Joe Perchesf96637b2013-05-04 22:12:25 -0500628 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
629 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300631 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 oplock = REQ_OPLOCK;
633 else
Steve French4b18f2a2008-04-29 00:06:05 +0000634 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400636 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000637 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400638 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400639 /*
640 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
641 * original open. Must mask them off for a reopen.
642 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700643 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400644 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400645
Jeff Layton2422f6762010-06-16 13:40:16 -0400646 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700647 cifs_sb->mnt_file_mode /* ignored */,
648 oflags, &oplock, &fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000649 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500650 cifs_dbg(FYI, "posix reopen succeeded\n");
Steve French7fc8f4e2009-02-23 20:43:11 +0000651 goto reopen_success;
652 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700653 /*
654 * fallthrough to retry open the old way on errors, especially
655 * in the reconnect path it is important to retry hard
656 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000657 }
658
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700659 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000660
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500661 if (backup_cred(cifs_sb))
662 create_options |= CREATE_OPEN_BACKUP_INTENT;
663
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700664 if (server->ops->get_lease_key)
665 server->ops->get_lease_key(inode, &fid);
666
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400667 oparms.tcon = tcon;
668 oparms.cifs_sb = cifs_sb;
669 oparms.desired_access = desired_access;
670 oparms.create_options = create_options;
671 oparms.disposition = disposition;
672 oparms.path = full_path;
673 oparms.fid = &fid;
674
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700675 /*
676 * Can not refresh inode by passing in file_info buf to be returned by
677 * CIFSSMBOpen and then calling get_inode_info with returned buf since
678 * file might have write behind data that needs to be flushed and server
679 * version of file size can be stale. If we knew for sure that inode was
680 * not dirty locally we could do this.
681 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400682 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700684 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500685 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
686 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400687 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 }
Jeff Layton15886172010-10-15 15:33:59 -0400689
690reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700691 cfile->invalidHandle = false;
692 mutex_unlock(&cfile->fh_mutex);
693 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400694
695 if (can_flush) {
696 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400697 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400698
Jeff Layton15886172010-10-15 15:33:59 -0400699 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700700 rc = cifs_get_inode_info_unix(&inode, full_path,
701 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400702 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700703 rc = cifs_get_inode_info(&inode, full_path, NULL,
704 inode->i_sb, xid, NULL);
705 }
706 /*
707 * Else we are writing out data to server already and could deadlock if
708 * we tried to flush data, and since we do not know if we have data that
709 * would invalidate the current end of file on the server we can not go
710 * to the server to get the new inode info.
711 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300712
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700713 server->ops->set_fid(cfile, &fid, oplock);
714 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400715
716reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400718 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 return rc;
720}
721
722int cifs_close(struct inode *inode, struct file *file)
723{
Jeff Layton77970692011-04-05 16:23:47 -0700724 if (file->private_data != NULL) {
725 cifsFileInfo_put(file->private_data);
726 file->private_data = NULL;
727 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728
Steve Frenchcdff08e2010-10-21 22:46:14 +0000729 /* return code from the ->release op is always ignored */
730 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731}
732
733int cifs_closedir(struct inode *inode, struct file *file)
734{
735 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400736 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700737 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700738 struct cifs_tcon *tcon;
739 struct TCP_Server_Info *server;
740 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741
Joe Perchesf96637b2013-05-04 22:12:25 -0500742 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700744 if (cfile == NULL)
745 return rc;
746
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400747 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700748 tcon = tlink_tcon(cfile->tlink);
749 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750
Joe Perchesf96637b2013-05-04 22:12:25 -0500751 cifs_dbg(FYI, "Freeing private data in close dir\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700752 spin_lock(&cifs_file_list_lock);
753 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
754 cfile->invalidHandle = true;
755 spin_unlock(&cifs_file_list_lock);
756 if (server->ops->close_dir)
757 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
758 else
759 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500760 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700761 /* not much we can do if it fails anyway, ignore rc */
762 rc = 0;
763 } else
764 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700766 buf = cfile->srch_inf.ntwrk_buf_start;
767 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500768 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700769 cfile->srch_inf.ntwrk_buf_start = NULL;
770 if (cfile->srch_inf.smallBuf)
771 cifs_small_buf_release(buf);
772 else
773 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700775
776 cifs_put_tlink(cfile->tlink);
777 kfree(file->private_data);
778 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400780 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 return rc;
782}
783
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400784static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300785cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000786{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400787 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000788 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400789 if (!lock)
790 return lock;
791 lock->offset = offset;
792 lock->length = length;
793 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400794 lock->pid = current->tgid;
795 INIT_LIST_HEAD(&lock->blist);
796 init_waitqueue_head(&lock->block_q);
797 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400798}
799
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700800void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400801cifs_del_lock_waiters(struct cifsLockInfo *lock)
802{
803 struct cifsLockInfo *li, *tmp;
804 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
805 list_del_init(&li->blist);
806 wake_up(&li->block_q);
807 }
808}
809
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400810#define CIFS_LOCK_OP 0
811#define CIFS_READ_OP 1
812#define CIFS_WRITE_OP 2
813
814/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400815static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700816cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
817 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400818 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400819{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300820 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700821 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300822 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400823
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700824 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400825 if (offset + length <= li->offset ||
826 offset >= li->offset + li->length)
827 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400828 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
829 server->ops->compare_fids(cfile, cur_cfile)) {
830 /* shared lock prevents write op through the same fid */
831 if (!(li->type & server->vals->shared_lock_type) ||
832 rw_check != CIFS_WRITE_OP)
833 continue;
834 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700835 if ((type & server->vals->shared_lock_type) &&
836 ((server->ops->compare_fids(cfile, cur_cfile) &&
837 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400838 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700839 if (conf_lock)
840 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700841 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400842 }
843 return false;
844}
845
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700846bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300847cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700848 __u8 type, struct cifsLockInfo **conf_lock,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400849 int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400850{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300851 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700852 struct cifs_fid_locks *cur;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300853 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300854
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700855 list_for_each_entry(cur, &cinode->llist, llist) {
856 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700857 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300858 if (rc)
859 break;
860 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300861
862 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400863}
864
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300865/*
866 * Check if there is another lock that prevents us to set the lock (mandatory
867 * style). If such a lock exists, update the flock structure with its
868 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
869 * or leave it the same if we can't. Returns 0 if we don't need to request to
870 * the server or 1 otherwise.
871 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400872static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300873cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
874 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400875{
876 int rc = 0;
877 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300878 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300879 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400880 bool exist;
881
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700882 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400883
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300884 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400885 &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400886 if (exist) {
887 flock->fl_start = conf_lock->offset;
888 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
889 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300890 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400891 flock->fl_type = F_RDLCK;
892 else
893 flock->fl_type = F_WRLCK;
894 } else if (!cinode->can_cache_brlcks)
895 rc = 1;
896 else
897 flock->fl_type = F_UNLCK;
898
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700899 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400900 return rc;
901}
902
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400903static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300904cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400905{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300906 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700907 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700908 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700909 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000910}
911
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300912/*
913 * Set the byte-range lock (mandatory style). Returns:
914 * 1) 0, if we set the lock and don't need to request to the server;
915 * 2) 1, if no locks prevent us but we need to request to the server;
916 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
917 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400918static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300919cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400920 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400921{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400922 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300923 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400924 bool exist;
925 int rc = 0;
926
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400927try_again:
928 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700929 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400930
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300931 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400932 lock->type, &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400933 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700934 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700935 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400936 return rc;
937 }
938
939 if (!exist)
940 rc = 1;
941 else if (!wait)
942 rc = -EACCES;
943 else {
944 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700945 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400946 rc = wait_event_interruptible(lock->block_q,
947 (lock->blist.prev == &lock->blist) &&
948 (lock->blist.next == &lock->blist));
949 if (!rc)
950 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700951 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400952 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400953 }
954
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700955 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400956 return rc;
957}
958
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300959/*
960 * Check if there is another lock that prevents us to set the lock (posix
961 * style). If such a lock exists, update the flock structure with its
962 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
963 * or leave it the same if we can't. Returns 0 if we don't need to request to
964 * the server or 1 otherwise.
965 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400966static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400967cifs_posix_lock_test(struct file *file, struct file_lock *flock)
968{
969 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -0500970 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400971 unsigned char saved_type = flock->fl_type;
972
Pavel Shilovsky50792762011-10-29 17:17:57 +0400973 if ((flock->fl_flags & FL_POSIX) == 0)
974 return 1;
975
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700976 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400977 posix_test_lock(file, flock);
978
979 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
980 flock->fl_type = saved_type;
981 rc = 1;
982 }
983
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700984 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400985 return rc;
986}
987
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300988/*
989 * Set the byte-range lock (posix style). Returns:
990 * 1) 0, if we set the lock and don't need to request to the server;
991 * 2) 1, if we need to request to the server;
992 * 3) <0, if the error occurs while setting the lock.
993 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400994static int
995cifs_posix_lock_set(struct file *file, struct file_lock *flock)
996{
Al Viro496ad9a2013-01-23 17:07:38 -0500997 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +0400998 int rc = 1;
999
1000 if ((flock->fl_flags & FL_POSIX) == 0)
1001 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001002
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001003try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001004 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001005 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001006 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001007 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001008 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001009
1010 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001011 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001012 if (rc == FILE_LOCK_DEFERRED) {
1013 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1014 if (!rc)
1015 goto try_again;
Jeff Layton1a9e64a2013-06-21 08:58:10 -04001016 posix_unblock_lock(flock);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001017 }
Steve French9ebb3892012-04-01 13:52:54 -05001018 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001019}
1020
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001021int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001022cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001023{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001024 unsigned int xid;
1025 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001026 struct cifsLockInfo *li, *tmp;
1027 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001028 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001029 LOCKING_ANDX_RANGE *buf, *cur;
1030 int types[] = {LOCKING_ANDX_LARGE_FILES,
1031 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1032 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001033
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001034 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001035 tcon = tlink_tcon(cfile->tlink);
1036
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001037 /*
1038 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1039 * and check it for zero before using.
1040 */
1041 max_buf = tcon->ses->server->maxBuf;
1042 if (!max_buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001043 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001044 return -EINVAL;
1045 }
1046
1047 max_num = (max_buf - sizeof(struct smb_hdr)) /
1048 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001049 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1050 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001051 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001052 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001053 }
1054
1055 for (i = 0; i < 2; i++) {
1056 cur = buf;
1057 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001058 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001059 if (li->type != types[i])
1060 continue;
1061 cur->Pid = cpu_to_le16(li->pid);
1062 cur->LengthLow = cpu_to_le32((u32)li->length);
1063 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1064 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1065 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1066 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001067 stored_rc = cifs_lockv(xid, tcon,
1068 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001069 (__u8)li->type, 0, num,
1070 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001071 if (stored_rc)
1072 rc = stored_rc;
1073 cur = buf;
1074 num = 0;
1075 } else
1076 cur++;
1077 }
1078
1079 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001080 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001081 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001082 if (stored_rc)
1083 rc = stored_rc;
1084 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001085 }
1086
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001087 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001088 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001089 return rc;
1090}
1091
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001092/* copied from fs/locks.c with a name change */
1093#define cifs_for_each_lock(inode, lockp) \
1094 for (lockp = &inode->i_flock; *lockp != NULL; \
1095 lockp = &(*lockp)->fl_next)
1096
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001097struct lock_to_push {
1098 struct list_head llist;
1099 __u64 offset;
1100 __u64 length;
1101 __u32 pid;
1102 __u16 netfid;
1103 __u8 type;
1104};
1105
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001106static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001107cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001108{
Jeff Layton1c8c6012013-06-21 08:58:15 -04001109 struct inode *inode = cfile->dentry->d_inode;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001110 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1111 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001112 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001113 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001114 struct list_head locks_to_send, *el;
1115 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001116 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001117
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001118 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001119
Jeff Layton1c8c6012013-06-21 08:58:15 -04001120 spin_lock(&inode->i_lock);
1121 cifs_for_each_lock(inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001122 if ((*before)->fl_flags & FL_POSIX)
1123 count++;
1124 }
Jeff Layton1c8c6012013-06-21 08:58:15 -04001125 spin_unlock(&inode->i_lock);
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001126
1127 INIT_LIST_HEAD(&locks_to_send);
1128
1129 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001130 * Allocating count locks is enough because no FL_POSIX locks can be
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001131 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001132 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001133 */
1134 for (; i < count; i++) {
1135 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1136 if (!lck) {
1137 rc = -ENOMEM;
1138 goto err_out;
1139 }
1140 list_add_tail(&lck->llist, &locks_to_send);
1141 }
1142
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001143 el = locks_to_send.next;
Jeff Layton1c8c6012013-06-21 08:58:15 -04001144 spin_lock(&inode->i_lock);
1145 cifs_for_each_lock(inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001146 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001147 if ((flock->fl_flags & FL_POSIX) == 0)
1148 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001149 if (el == &locks_to_send) {
1150 /*
1151 * The list ended. We don't have enough allocated
1152 * structures - something is really wrong.
1153 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001154 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001155 break;
1156 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001157 length = 1 + flock->fl_end - flock->fl_start;
1158 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1159 type = CIFS_RDLCK;
1160 else
1161 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001162 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001163 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001164 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001165 lck->length = length;
1166 lck->type = type;
1167 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001168 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001169 }
Jeff Layton1c8c6012013-06-21 08:58:15 -04001170 spin_unlock(&inode->i_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001171
1172 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001173 int stored_rc;
1174
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001175 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001176 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001177 lck->type, 0);
1178 if (stored_rc)
1179 rc = stored_rc;
1180 list_del(&lck->llist);
1181 kfree(lck);
1182 }
1183
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001184out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001185 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001186 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001187err_out:
1188 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1189 list_del(&lck->llist);
1190 kfree(lck);
1191 }
1192 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001193}
1194
1195static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001196cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001197{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001198 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001199 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001200 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001201 int rc = 0;
1202
1203 /* we are going to update can_cache_brlcks here - need a write access */
1204 down_write(&cinode->lock_sem);
1205 if (!cinode->can_cache_brlcks) {
1206 up_write(&cinode->lock_sem);
1207 return rc;
1208 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001209
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001210 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001211 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1212 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001213 rc = cifs_push_posix_locks(cfile);
1214 else
1215 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001216
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001217 cinode->can_cache_brlcks = false;
1218 up_write(&cinode->lock_sem);
1219 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001220}
1221
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001222static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001223cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001224 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001226 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001227 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001228 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001229 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001230 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001231 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001232 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001234 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001235 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001236 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001237 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001238 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001239 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1240 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001241 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001243 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001244 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001245 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001246 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001247 *lock = 1;
1248 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001249 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001250 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001251 *unlock = 1;
1252 /* Check if unlock includes more than one lock range */
1253 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001254 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001255 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001256 *lock = 1;
1257 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001258 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001259 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001260 *lock = 1;
1261 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001262 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001263 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001264 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001266 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001267}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001269static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001270cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001271 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001272{
1273 int rc = 0;
1274 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001275 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1276 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001277 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001278 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001280 if (posix_lck) {
1281 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001282
1283 rc = cifs_posix_lock_test(file, flock);
1284 if (!rc)
1285 return rc;
1286
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001287 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001288 posix_lock_type = CIFS_RDLCK;
1289 else
1290 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001291 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001292 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001293 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 return rc;
1295 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001296
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001297 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001298 if (!rc)
1299 return rc;
1300
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001301 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001302 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1303 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001304 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001305 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1306 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001307 flock->fl_type = F_UNLCK;
1308 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001309 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1310 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001311 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001312 }
1313
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001314 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001315 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001316 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001317 }
1318
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001319 type &= ~server->vals->exclusive_lock_type;
1320
1321 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1322 type | server->vals->shared_lock_type,
1323 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001324 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001325 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1326 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001327 flock->fl_type = F_RDLCK;
1328 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001329 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1330 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001331 } else
1332 flock->fl_type = F_WRLCK;
1333
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001334 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001335}
1336
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001337void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001338cifs_move_llist(struct list_head *source, struct list_head *dest)
1339{
1340 struct list_head *li, *tmp;
1341 list_for_each_safe(li, tmp, source)
1342 list_move(li, dest);
1343}
1344
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001345void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001346cifs_free_llist(struct list_head *llist)
1347{
1348 struct cifsLockInfo *li, *tmp;
1349 list_for_each_entry_safe(li, tmp, llist, llist) {
1350 cifs_del_lock_waiters(li);
1351 list_del(&li->llist);
1352 kfree(li);
1353 }
1354}
1355
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001356int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001357cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1358 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001359{
1360 int rc = 0, stored_rc;
1361 int types[] = {LOCKING_ANDX_LARGE_FILES,
1362 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1363 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001364 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001365 LOCKING_ANDX_RANGE *buf, *cur;
1366 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1367 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1368 struct cifsLockInfo *li, *tmp;
1369 __u64 length = 1 + flock->fl_end - flock->fl_start;
1370 struct list_head tmp_llist;
1371
1372 INIT_LIST_HEAD(&tmp_llist);
1373
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001374 /*
1375 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1376 * and check it for zero before using.
1377 */
1378 max_buf = tcon->ses->server->maxBuf;
1379 if (!max_buf)
1380 return -EINVAL;
1381
1382 max_num = (max_buf - sizeof(struct smb_hdr)) /
1383 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001384 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1385 if (!buf)
1386 return -ENOMEM;
1387
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001388 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001389 for (i = 0; i < 2; i++) {
1390 cur = buf;
1391 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001392 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001393 if (flock->fl_start > li->offset ||
1394 (flock->fl_start + length) <
1395 (li->offset + li->length))
1396 continue;
1397 if (current->tgid != li->pid)
1398 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001399 if (types[i] != li->type)
1400 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001401 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001402 /*
1403 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001404 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001405 */
1406 list_del(&li->llist);
1407 cifs_del_lock_waiters(li);
1408 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001409 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001410 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001411 cur->Pid = cpu_to_le16(li->pid);
1412 cur->LengthLow = cpu_to_le32((u32)li->length);
1413 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1414 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1415 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1416 /*
1417 * We need to save a lock here to let us add it again to
1418 * the file's list if the unlock range request fails on
1419 * the server.
1420 */
1421 list_move(&li->llist, &tmp_llist);
1422 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001423 stored_rc = cifs_lockv(xid, tcon,
1424 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001425 li->type, num, 0, buf);
1426 if (stored_rc) {
1427 /*
1428 * We failed on the unlock range
1429 * request - add all locks from the tmp
1430 * list to the head of the file's list.
1431 */
1432 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001433 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001434 rc = stored_rc;
1435 } else
1436 /*
1437 * The unlock range request succeed -
1438 * free the tmp list.
1439 */
1440 cifs_free_llist(&tmp_llist);
1441 cur = buf;
1442 num = 0;
1443 } else
1444 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001445 }
1446 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001447 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001448 types[i], num, 0, buf);
1449 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001450 cifs_move_llist(&tmp_llist,
1451 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001452 rc = stored_rc;
1453 } else
1454 cifs_free_llist(&tmp_llist);
1455 }
1456 }
1457
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001458 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001459 kfree(buf);
1460 return rc;
1461}
1462
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001463static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001464cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001465 bool wait_flag, bool posix_lck, int lock, int unlock,
1466 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001467{
1468 int rc = 0;
1469 __u64 length = 1 + flock->fl_end - flock->fl_start;
1470 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1471 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001472 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001473 struct inode *inode = cfile->dentry->d_inode;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001474
1475 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001476 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001477
1478 rc = cifs_posix_lock_set(file, flock);
1479 if (!rc || rc < 0)
1480 return rc;
1481
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001482 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001483 posix_lock_type = CIFS_RDLCK;
1484 else
1485 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001486
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001487 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001488 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001489
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001490 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1491 current->tgid, flock->fl_start, length,
1492 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001493 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001494 }
1495
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001496 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001497 struct cifsLockInfo *lock;
1498
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001499 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001500 if (!lock)
1501 return -ENOMEM;
1502
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001503 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001504 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001505 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001506 return rc;
1507 }
1508 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001509 goto out;
1510
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001511 /*
1512 * Windows 7 server can delay breaking lease from read to None
1513 * if we set a byte-range lock on a file - break it explicitly
1514 * before sending the lock to the server to be sure the next
1515 * read won't conflict with non-overlapted locks due to
1516 * pagereading.
1517 */
1518 if (!CIFS_I(inode)->clientCanCacheAll &&
1519 CIFS_I(inode)->clientCanCacheRead) {
1520 cifs_invalidate_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001521 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1522 inode);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001523 CIFS_I(inode)->clientCanCacheRead = false;
1524 }
1525
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001526 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1527 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001528 if (rc) {
1529 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001530 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001531 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001532
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001533 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001534 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001535 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001536
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001537out:
1538 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001539 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001540 return rc;
1541}
1542
1543int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1544{
1545 int rc, xid;
1546 int lock = 0, unlock = 0;
1547 bool wait_flag = false;
1548 bool posix_lck = false;
1549 struct cifs_sb_info *cifs_sb;
1550 struct cifs_tcon *tcon;
1551 struct cifsInodeInfo *cinode;
1552 struct cifsFileInfo *cfile;
1553 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001554 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001555
1556 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001557 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001558
Joe Perchesf96637b2013-05-04 22:12:25 -05001559 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1560 cmd, flock->fl_flags, flock->fl_type,
1561 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001562
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001563 cfile = (struct cifsFileInfo *)file->private_data;
1564 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001565
1566 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1567 tcon->ses->server);
1568
1569 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001570 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001571 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001572
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001573 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001574 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1575 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1576 posix_lck = true;
1577 /*
1578 * BB add code here to normalize offset and length to account for
1579 * negative length which we can not accept over the wire.
1580 */
1581 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001582 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001583 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001584 return rc;
1585 }
1586
1587 if (!lock && !unlock) {
1588 /*
1589 * if no lock or unlock then nothing to do since we do not
1590 * know what it is
1591 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001592 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001593 return -EOPNOTSUPP;
1594 }
1595
1596 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1597 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001598 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 return rc;
1600}
1601
Jeff Layton597b0272012-03-23 14:40:56 -04001602/*
1603 * update the file size (if needed) after a write. Should be called with
1604 * the inode->i_lock held
1605 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001606void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001607cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1608 unsigned int bytes_written)
1609{
1610 loff_t end_of_write = offset + bytes_written;
1611
1612 if (end_of_write > cifsi->server_eof)
1613 cifsi->server_eof = end_of_write;
1614}
1615
Pavel Shilovskyba9ad722012-09-18 16:20:30 -07001616static ssize_t
1617cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1618 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619{
1620 int rc = 0;
1621 unsigned int bytes_written = 0;
1622 unsigned int total_written;
1623 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad722012-09-18 16:20:30 -07001624 struct cifs_tcon *tcon;
1625 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001626 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001627 struct dentry *dentry = open_file->dentry;
1628 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001629 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630
Jeff Layton7da4b492010-10-15 15:34:00 -04001631 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632
Joe Perchesf96637b2013-05-04 22:12:25 -05001633 cifs_dbg(FYI, "write %zd bytes to offset %lld of %s\n",
1634 write_size, *offset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635
Pavel Shilovskyba9ad722012-09-18 16:20:30 -07001636 tcon = tlink_tcon(open_file->tlink);
1637 server = tcon->ses->server;
1638
1639 if (!server->ops->sync_write)
1640 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001641
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001642 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 for (total_written = 0; write_size > total_written;
1645 total_written += bytes_written) {
1646 rc = -EAGAIN;
1647 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001648 struct kvec iov[2];
1649 unsigned int len;
1650
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 /* we could deadlock if we called
1653 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001654 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001656 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 if (rc != 0)
1658 break;
1659 }
Steve French3e844692005-10-03 13:37:24 -07001660
Jeff Laytonca83ce32011-04-12 09:13:44 -04001661 len = min((size_t)cifs_sb->wsize,
1662 write_size - total_written);
1663 /* iov[0] is reserved for smb header */
1664 iov[1].iov_base = (char *)write_data + total_written;
1665 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001666 io_parms.pid = pid;
Pavel Shilovskyba9ad722012-09-18 16:20:30 -07001667 io_parms.tcon = tcon;
1668 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001669 io_parms.length = len;
Pavel Shilovskyba9ad722012-09-18 16:20:30 -07001670 rc = server->ops->sync_write(xid, open_file, &io_parms,
1671 &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 }
1673 if (rc || (bytes_written == 0)) {
1674 if (total_written)
1675 break;
1676 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001677 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 return rc;
1679 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001680 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001681 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad722012-09-18 16:20:30 -07001682 cifs_update_eof(cifsi, *offset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001683 spin_unlock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad722012-09-18 16:20:30 -07001684 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001685 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 }
1687
Pavel Shilovskyba9ad722012-09-18 16:20:30 -07001688 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689
Jeff Layton7da4b492010-10-15 15:34:00 -04001690 if (total_written > 0) {
1691 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad722012-09-18 16:20:30 -07001692 if (*offset > dentry->d_inode->i_size)
1693 i_size_write(dentry->d_inode, *offset);
Jeff Layton7da4b492010-10-15 15:34:00 -04001694 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001696 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001697 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 return total_written;
1699}
1700
Jeff Layton6508d902010-09-29 19:51:11 -04001701struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1702 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001703{
1704 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001705 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1706
1707 /* only filter by fsuid on multiuser mounts */
1708 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1709 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001710
Jeff Layton44772882010-10-15 15:34:03 -04001711 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001712 /* we could simply get the first_list_entry since write-only entries
1713 are always at the end of the list but since the first entry might
1714 have a close pending, we go through the whole list */
1715 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001716 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001717 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001718 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001719 if (!open_file->invalidHandle) {
1720 /* found a good file */
1721 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001722 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001723 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001724 return open_file;
1725 } /* else might as well continue, and look for
1726 another, or simply have the caller reopen it
1727 again rather than trying to fix this handle */
1728 } else /* write only file */
1729 break; /* write only files are last so must be done */
1730 }
Jeff Layton44772882010-10-15 15:34:03 -04001731 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001732 return NULL;
1733}
Steve French630f3f0c2007-10-25 21:17:17 +00001734
Jeff Layton6508d902010-09-29 19:51:11 -04001735struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1736 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001737{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001738 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001739 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001740 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001741 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001742 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001743
Steve French60808232006-04-22 15:53:05 +00001744 /* Having a null inode here (because mapping->host was set to zero by
1745 the VFS or MM) should not happen but we had reports of on oops (due to
1746 it being zero) during stress testcases so we need to check for it */
1747
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001748 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001749 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001750 dump_stack();
1751 return NULL;
1752 }
1753
Jeff Laytond3892292010-11-02 16:22:50 -04001754 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1755
Jeff Layton6508d902010-09-29 19:51:11 -04001756 /* only filter by fsuid on multiuser mounts */
1757 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1758 fsuid_only = false;
1759
Jeff Layton44772882010-10-15 15:34:03 -04001760 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001761refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001762 if (refind > MAX_REOPEN_ATT) {
1763 spin_unlock(&cifs_file_list_lock);
1764 return NULL;
1765 }
Steve French6148a742005-10-05 12:23:19 -07001766 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001767 if (!any_available && open_file->pid != current->tgid)
1768 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001769 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001770 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001771 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001772 if (!open_file->invalidHandle) {
1773 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001774 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001775 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001776 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001777 } else {
1778 if (!inv_file)
1779 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001780 }
Steve French6148a742005-10-05 12:23:19 -07001781 }
1782 }
Jeff Layton2846d382008-09-22 21:33:33 -04001783 /* couldn't find useable FH with same pid, try any available */
1784 if (!any_available) {
1785 any_available = true;
1786 goto refind_writable;
1787 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001788
1789 if (inv_file) {
1790 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001791 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001792 }
1793
Jeff Layton44772882010-10-15 15:34:03 -04001794 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001795
1796 if (inv_file) {
1797 rc = cifs_reopen_file(inv_file, false);
1798 if (!rc)
1799 return inv_file;
1800 else {
1801 spin_lock(&cifs_file_list_lock);
1802 list_move_tail(&inv_file->flist,
1803 &cifs_inode->openFileList);
1804 spin_unlock(&cifs_file_list_lock);
1805 cifsFileInfo_put(inv_file);
1806 spin_lock(&cifs_file_list_lock);
1807 ++refind;
1808 goto refind_writable;
1809 }
1810 }
1811
Steve French6148a742005-10-05 12:23:19 -07001812 return NULL;
1813}
1814
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1816{
1817 struct address_space *mapping = page->mapping;
1818 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1819 char *write_data;
1820 int rc = -EFAULT;
1821 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001823 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824
1825 if (!mapping || !mapping->host)
1826 return -EFAULT;
1827
1828 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829
1830 offset += (loff_t)from;
1831 write_data = kmap(page);
1832 write_data += from;
1833
1834 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1835 kunmap(page);
1836 return -EIO;
1837 }
1838
1839 /* racing with truncate? */
1840 if (offset > mapping->host->i_size) {
1841 kunmap(page);
1842 return 0; /* don't care */
1843 }
1844
1845 /* check to make sure that we are not extending the file */
1846 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001847 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848
Jeff Layton6508d902010-09-29 19:51:11 -04001849 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001850 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001851 bytes_written = cifs_write(open_file, open_file->pid,
1852 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001853 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001855 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001856 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001857 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001858 else if (bytes_written < 0)
1859 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001860 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -05001861 cifs_dbg(FYI, "No writeable filehandles for inode\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 rc = -EIO;
1863 }
1864
1865 kunmap(page);
1866 return rc;
1867}
1868
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001870 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001872 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1873 bool done = false, scanned = false, range_whole = false;
1874 pgoff_t end, index;
1875 struct cifs_writedata *wdata;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001876 struct TCP_Server_Info *server;
Steve French37c0eb42005-10-05 14:50:29 -07001877 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001878 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00001879
Steve French37c0eb42005-10-05 14:50:29 -07001880 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001881 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001882 * one page at a time via cifs_writepage
1883 */
1884 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1885 return generic_writepages(mapping, wbc);
1886
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001887 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001888 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001889 end = -1;
1890 } else {
1891 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1892 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1893 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001894 range_whole = true;
1895 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001896 }
1897retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001898 while (!done && index <= end) {
1899 unsigned int i, nr_pages, found_pages;
1900 pgoff_t next = 0, tofind;
1901 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001902
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001903 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1904 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001905
Jeff Laytonc2e87642012-03-23 14:40:55 -04001906 wdata = cifs_writedata_alloc((unsigned int)tofind,
1907 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001908 if (!wdata) {
1909 rc = -ENOMEM;
1910 break;
1911 }
1912
1913 /*
1914 * find_get_pages_tag seems to return a max of 256 on each
1915 * iteration, so we must call it several times in order to
1916 * fill the array or the wsize is effectively limited to
1917 * 256 * PAGE_CACHE_SIZE.
1918 */
1919 found_pages = 0;
1920 pages = wdata->pages;
1921 do {
1922 nr_pages = find_get_pages_tag(mapping, &index,
1923 PAGECACHE_TAG_DIRTY,
1924 tofind, pages);
1925 found_pages += nr_pages;
1926 tofind -= nr_pages;
1927 pages += nr_pages;
1928 } while (nr_pages && tofind && index <= end);
1929
1930 if (found_pages == 0) {
1931 kref_put(&wdata->refcount, cifs_writedata_release);
1932 break;
1933 }
1934
1935 nr_pages = 0;
1936 for (i = 0; i < found_pages; i++) {
1937 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001938 /*
1939 * At this point we hold neither mapping->tree_lock nor
1940 * lock on the page itself: the page may be truncated or
1941 * invalidated (changing page->mapping to NULL), or even
1942 * swizzled back from swapper_space to tmpfs file
1943 * mapping
1944 */
1945
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001946 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001947 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001948 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001949 break;
1950
1951 if (unlikely(page->mapping != mapping)) {
1952 unlock_page(page);
1953 break;
1954 }
1955
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001956 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001957 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001958 unlock_page(page);
1959 break;
1960 }
1961
1962 if (next && (page->index != next)) {
1963 /* Not next consecutive page */
1964 unlock_page(page);
1965 break;
1966 }
1967
1968 if (wbc->sync_mode != WB_SYNC_NONE)
1969 wait_on_page_writeback(page);
1970
1971 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001972 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001973 unlock_page(page);
1974 break;
1975 }
Steve French84d2f072005-10-12 15:32:05 -07001976
Linus Torvaldscb876f42006-12-23 16:19:07 -08001977 /*
1978 * This actually clears the dirty bit in the radix tree.
1979 * See cifs_writepage() for more commentary.
1980 */
1981 set_page_writeback(page);
1982
Jeff Layton3a98b862012-11-26 09:48:41 -05001983 if (page_offset(page) >= i_size_read(mapping->host)) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001984 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001985 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001986 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001987 break;
1988 }
1989
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001990 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001991 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001992 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001993 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001994
1995 /* reset index to refind any pages skipped */
1996 if (nr_pages == 0)
1997 index = wdata->pages[0]->index + 1;
1998
1999 /* put any pages we aren't going to use */
2000 for (i = nr_pages; i < found_pages; i++) {
2001 page_cache_release(wdata->pages[i]);
2002 wdata->pages[i] = NULL;
2003 }
2004
2005 /* nothing to write? */
2006 if (nr_pages == 0) {
2007 kref_put(&wdata->refcount, cifs_writedata_release);
2008 continue;
2009 }
2010
2011 wdata->sync_mode = wbc->sync_mode;
2012 wdata->nr_pages = nr_pages;
2013 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytoneddb0792012-09-18 16:20:35 -07002014 wdata->pagesz = PAGE_CACHE_SIZE;
2015 wdata->tailsz =
Jeff Layton3a98b862012-11-26 09:48:41 -05002016 min(i_size_read(mapping->host) -
2017 page_offset(wdata->pages[nr_pages - 1]),
Jeff Laytoneddb0792012-09-18 16:20:35 -07002018 (loff_t)PAGE_CACHE_SIZE);
2019 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
2020 wdata->tailsz;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002021
2022 do {
2023 if (wdata->cfile != NULL)
2024 cifsFileInfo_put(wdata->cfile);
2025 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
2026 false);
2027 if (!wdata->cfile) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002028 cifs_dbg(VFS, "No writable handles for inode\n");
Steve French23e7dd72005-10-20 13:44:56 -07002029 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002030 break;
Steve French37c0eb42005-10-05 14:50:29 -07002031 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04002032 wdata->pid = wdata->cfile->pid;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002033 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2034 rc = server->ops->async_writev(wdata);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002035 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07002036
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002037 for (i = 0; i < nr_pages; ++i)
2038 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05002039
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002040 /* send failure -- clean up the mess */
2041 if (rc != 0) {
2042 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05002043 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002044 redirty_page_for_writepage(wbc,
2045 wdata->pages[i]);
2046 else
2047 SetPageError(wdata->pages[i]);
2048 end_page_writeback(wdata->pages[i]);
2049 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002050 }
Jeff Layton941b8532011-01-11 07:24:01 -05002051 if (rc != -EAGAIN)
2052 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002053 }
2054 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002055
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002056 wbc->nr_to_write -= nr_pages;
2057 if (wbc->nr_to_write <= 0)
2058 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002059
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002060 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002061 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002062
Steve French37c0eb42005-10-05 14:50:29 -07002063 if (!scanned && !done) {
2064 /*
2065 * We hit the last page and there is more work to be done: wrap
2066 * back to the start of the file
2067 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002068 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002069 index = 0;
2070 goto retry;
2071 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002072
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002073 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002074 mapping->writeback_index = index;
2075
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 return rc;
2077}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002079static int
2080cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002082 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002083 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002085 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086/* BB add check for wbc flags */
2087 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002088 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002089 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002090
2091 /*
2092 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2093 *
2094 * A writepage() implementation always needs to do either this,
2095 * or re-dirty the page with "redirty_page_for_writepage()" in
2096 * the case of a failure.
2097 *
2098 * Just unlocking the page will cause the radix tree tag-bits
2099 * to fail to update with the state of the page correctly.
2100 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002101 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002102retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002104 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2105 goto retry_write;
2106 else if (rc == -EAGAIN)
2107 redirty_page_for_writepage(wbc, page);
2108 else if (rc != 0)
2109 SetPageError(page);
2110 else
2111 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002112 end_page_writeback(page);
2113 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002114 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 return rc;
2116}
2117
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002118static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2119{
2120 int rc = cifs_writepage_locked(page, wbc);
2121 unlock_page(page);
2122 return rc;
2123}
2124
Nick Piggind9414772008-09-24 11:32:59 -04002125static int cifs_write_end(struct file *file, struct address_space *mapping,
2126 loff_t pos, unsigned len, unsigned copied,
2127 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128{
Nick Piggind9414772008-09-24 11:32:59 -04002129 int rc;
2130 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002131 struct cifsFileInfo *cfile = file->private_data;
2132 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2133 __u32 pid;
2134
2135 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2136 pid = cfile->pid;
2137 else
2138 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139
Joe Perchesf96637b2013-05-04 22:12:25 -05002140 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002141 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002142
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002143 if (PageChecked(page)) {
2144 if (copied == len)
2145 SetPageUptodate(page);
2146 ClearPageChecked(page);
2147 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002148 SetPageUptodate(page);
2149
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002151 char *page_data;
2152 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002153 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002154
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002155 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 /* this is probably better than directly calling
2157 partialpage_write since in this function the file handle is
2158 known which we might as well leverage */
2159 /* BB check if anything else missing out of ppw
2160 such as updating last write time */
2161 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002162 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002163 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002165
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002166 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002167 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002168 rc = copied;
2169 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002170 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 }
2172
Nick Piggind9414772008-09-24 11:32:59 -04002173 if (rc > 0) {
2174 spin_lock(&inode->i_lock);
2175 if (pos > inode->i_size)
2176 i_size_write(inode, pos);
2177 spin_unlock(&inode->i_lock);
2178 }
2179
2180 unlock_page(page);
2181 page_cache_release(page);
2182
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 return rc;
2184}
2185
Josef Bacik02c24a82011-07-16 20:44:56 -04002186int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2187 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002189 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002191 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002192 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002193 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002194 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002195 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196
Josef Bacik02c24a82011-07-16 20:44:56 -04002197 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2198 if (rc)
2199 return rc;
2200 mutex_lock(&inode->i_mutex);
2201
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002202 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203
Joe Perchesf96637b2013-05-04 22:12:25 -05002204 cifs_dbg(FYI, "Sync file - name: %s datasync: 0x%x\n",
2205 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002206
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002207 if (!CIFS_I(inode)->clientCanCacheRead) {
2208 rc = cifs_invalidate_mapping(inode);
2209 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002210 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002211 rc = 0; /* don't care about it in fsync */
2212 }
2213 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002214
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002215 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002216 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2217 server = tcon->ses->server;
2218 if (server->ops->flush)
2219 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2220 else
2221 rc = -ENOSYS;
2222 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002223
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002224 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002225 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002226 return rc;
2227}
2228
Josef Bacik02c24a82011-07-16 20:44:56 -04002229int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002230{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002231 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002232 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002233 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002234 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002235 struct cifsFileInfo *smbfile = file->private_data;
2236 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002237 struct inode *inode = file->f_mapping->host;
2238
2239 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2240 if (rc)
2241 return rc;
2242 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002243
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002244 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002245
Joe Perchesf96637b2013-05-04 22:12:25 -05002246 cifs_dbg(FYI, "Sync file - name: %s datasync: 0x%x\n",
2247 file->f_path.dentry->d_name.name, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002248
2249 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002250 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2251 server = tcon->ses->server;
2252 if (server->ops->flush)
2253 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2254 else
2255 rc = -ENOSYS;
2256 }
Steve Frenchb298f222009-02-21 21:17:43 +00002257
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002258 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002259 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 return rc;
2261}
2262
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263/*
2264 * As file closes, flush all cached write data for this inode checking
2265 * for write behind errors.
2266 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002267int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268{
Al Viro496ad9a2013-01-23 17:07:38 -05002269 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 int rc = 0;
2271
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002272 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002273 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002274
Joe Perchesf96637b2013-05-04 22:12:25 -05002275 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276
2277 return rc;
2278}
2279
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002280static int
2281cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2282{
2283 int rc = 0;
2284 unsigned long i;
2285
2286 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002287 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002288 if (!pages[i]) {
2289 /*
2290 * save number of pages we have already allocated and
2291 * return with ENOMEM error
2292 */
2293 num_pages = i;
2294 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002295 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002296 }
2297 }
2298
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002299 if (rc) {
2300 for (i = 0; i < num_pages; i++)
2301 put_page(pages[i]);
2302 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002303 return rc;
2304}
2305
2306static inline
2307size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2308{
2309 size_t num_pages;
2310 size_t clen;
2311
2312 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002313 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002314
2315 if (cur_len)
2316 *cur_len = clen;
2317
2318 return num_pages;
2319}
2320
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002321static void
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002322cifs_uncached_writev_complete(struct work_struct *work)
2323{
2324 int i;
2325 struct cifs_writedata *wdata = container_of(work,
2326 struct cifs_writedata, work);
2327 struct inode *inode = wdata->cfile->dentry->d_inode;
2328 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2329
2330 spin_lock(&inode->i_lock);
2331 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2332 if (cifsi->server_eof > inode->i_size)
2333 i_size_write(inode, cifsi->server_eof);
2334 spin_unlock(&inode->i_lock);
2335
2336 complete(&wdata->done);
2337
2338 if (wdata->result != -EAGAIN) {
2339 for (i = 0; i < wdata->nr_pages; i++)
2340 put_page(wdata->pages[i]);
2341 }
2342
2343 kref_put(&wdata->refcount, cifs_writedata_release);
2344}
2345
2346/* attempt to send write to server, retry on any -EAGAIN errors */
2347static int
2348cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2349{
2350 int rc;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002351 struct TCP_Server_Info *server;
2352
2353 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002354
2355 do {
2356 if (wdata->cfile->invalidHandle) {
2357 rc = cifs_reopen_file(wdata->cfile, false);
2358 if (rc != 0)
2359 continue;
2360 }
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002361 rc = server->ops->async_writev(wdata);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002362 } while (rc == -EAGAIN);
2363
2364 return rc;
2365}
2366
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002367static ssize_t
2368cifs_iovec_write(struct file *file, const struct iovec *iov,
2369 unsigned long nr_segs, loff_t *poffset)
2370{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002371 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002372 size_t copied, len, cur_len;
2373 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002374 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002375 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002376 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002377 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002378 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002379 struct cifs_writedata *wdata, *tmp;
2380 struct list_head wdata_list;
2381 int rc;
2382 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002383
2384 len = iov_length(iov, nr_segs);
2385 if (!len)
2386 return 0;
2387
2388 rc = generic_write_checks(file, poffset, &len, 0);
2389 if (rc)
2390 return rc;
2391
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002392 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002393 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002394 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002395 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002396
2397 if (!tcon->ses->server->ops->async_writev)
2398 return -ENOSYS;
2399
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002400 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002401
2402 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2403 pid = open_file->pid;
2404 else
2405 pid = current->tgid;
2406
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002407 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002408 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002409 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002410
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002411 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2412 wdata = cifs_writedata_alloc(nr_pages,
2413 cifs_uncached_writev_complete);
2414 if (!wdata) {
2415 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002416 break;
2417 }
2418
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002419 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2420 if (rc) {
2421 kfree(wdata);
2422 break;
2423 }
2424
2425 save_len = cur_len;
2426 for (i = 0; i < nr_pages; i++) {
2427 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2428 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2429 0, copied);
2430 cur_len -= copied;
2431 iov_iter_advance(&it, copied);
2432 }
2433 cur_len = save_len - cur_len;
2434
2435 wdata->sync_mode = WB_SYNC_ALL;
2436 wdata->nr_pages = nr_pages;
2437 wdata->offset = (__u64)offset;
2438 wdata->cfile = cifsFileInfo_get(open_file);
2439 wdata->pid = pid;
2440 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002441 wdata->pagesz = PAGE_SIZE;
2442 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002443 rc = cifs_uncached_retry_writev(wdata);
2444 if (rc) {
2445 kref_put(&wdata->refcount, cifs_writedata_release);
2446 break;
2447 }
2448
2449 list_add_tail(&wdata->list, &wdata_list);
2450 offset += cur_len;
2451 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002452 } while (len > 0);
2453
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002454 /*
2455 * If at least one write was successfully sent, then discard any rc
2456 * value from the later writes. If the other write succeeds, then
2457 * we'll end up returning whatever was written. If it fails, then
2458 * we'll get a new rc value from that.
2459 */
2460 if (!list_empty(&wdata_list))
2461 rc = 0;
2462
2463 /*
2464 * Wait for and collect replies for any successful sends in order of
2465 * increasing offset. Once an error is hit or we get a fatal signal
2466 * while waiting, then return without waiting for any more replies.
2467 */
2468restart_loop:
2469 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2470 if (!rc) {
2471 /* FIXME: freezable too? */
2472 rc = wait_for_completion_killable(&wdata->done);
2473 if (rc)
2474 rc = -EINTR;
2475 else if (wdata->result)
2476 rc = wdata->result;
2477 else
2478 total_written += wdata->bytes;
2479
2480 /* resend call if it's a retryable error */
2481 if (rc == -EAGAIN) {
2482 rc = cifs_uncached_retry_writev(wdata);
2483 goto restart_loop;
2484 }
2485 }
2486 list_del_init(&wdata->list);
2487 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002488 }
2489
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002490 if (total_written > 0)
2491 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002492
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002493 cifs_stats_bytes_written(tcon, total_written);
2494 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002495}
2496
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002497ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002498 unsigned long nr_segs, loff_t pos)
2499{
2500 ssize_t written;
2501 struct inode *inode;
2502
Al Viro496ad9a2013-01-23 17:07:38 -05002503 inode = file_inode(iocb->ki_filp);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002504
2505 /*
2506 * BB - optimize the way when signing is disabled. We can drop this
2507 * extra memory-to-memory copying and use iovec buffers for constructing
2508 * write request.
2509 */
2510
2511 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2512 if (written > 0) {
2513 CIFS_I(inode)->invalid_mapping = true;
2514 iocb->ki_pos = pos;
2515 }
2516
2517 return written;
2518}
2519
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002520static ssize_t
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002521cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2522 unsigned long nr_segs, loff_t pos)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002523{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002524 struct file *file = iocb->ki_filp;
2525 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2526 struct inode *inode = file->f_mapping->host;
2527 struct cifsInodeInfo *cinode = CIFS_I(inode);
2528 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2529 ssize_t rc = -EACCES;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002530
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002531 BUG_ON(iocb->ki_pos != pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002532
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002533 /*
2534 * We need to hold the sem to be sure nobody modifies lock list
2535 * with a brlock that prevents writing.
2536 */
2537 down_read(&cinode->lock_sem);
2538 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2539 server->vals->exclusive_lock_type, NULL,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04002540 CIFS_WRITE_OP)) {
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002541 mutex_lock(&inode->i_mutex);
2542 rc = __generic_file_aio_write(iocb, iov, nr_segs,
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002543 &iocb->ki_pos);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002544 mutex_unlock(&inode->i_mutex);
2545 }
2546
2547 if (rc > 0 || rc == -EIOCBQUEUED) {
2548 ssize_t err;
2549
2550 err = generic_write_sync(file, pos, rc);
2551 if (err < 0 && rc > 0)
2552 rc = err;
2553 }
2554
2555 up_read(&cinode->lock_sem);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002556 return rc;
2557}
2558
2559ssize_t
2560cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2561 unsigned long nr_segs, loff_t pos)
2562{
Al Viro496ad9a2013-01-23 17:07:38 -05002563 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002564 struct cifsInodeInfo *cinode = CIFS_I(inode);
2565 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2566 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2567 iocb->ki_filp->private_data;
2568 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002569 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002570
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002571 if (cinode->clientCanCacheAll) {
2572 if (cap_unix(tcon->ses) &&
2573 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
2574 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2575 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2576 return cifs_writev(iocb, iov, nr_segs, pos);
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002577 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002578 /*
2579 * For non-oplocked files in strict cache mode we need to write the data
2580 * to the server exactly from the pos to pos+len-1 rather than flush all
2581 * affected pages because it may cause a error with mandatory locks on
2582 * these pages but not on the region from pos to ppos+len-1.
2583 */
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002584 written = cifs_user_writev(iocb, iov, nr_segs, pos);
2585 if (written > 0 && cinode->clientCanCacheRead) {
2586 /*
2587 * Windows 7 server can delay breaking level2 oplock if a write
2588 * request comes - break it on the client to prevent reading
2589 * an old data.
2590 */
2591 cifs_invalidate_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05002592 cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
2593 inode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002594 cinode->clientCanCacheRead = false;
2595 }
2596 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002597}
2598
Jeff Layton0471ca32012-05-16 07:13:16 -04002599static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002600cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002601{
2602 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002603
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002604 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2605 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002606 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002607 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002608 INIT_LIST_HEAD(&rdata->list);
2609 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002610 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002611 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002612
Jeff Layton0471ca32012-05-16 07:13:16 -04002613 return rdata;
2614}
2615
Jeff Layton6993f742012-05-16 07:13:17 -04002616void
2617cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002618{
Jeff Layton6993f742012-05-16 07:13:17 -04002619 struct cifs_readdata *rdata = container_of(refcount,
2620 struct cifs_readdata, refcount);
2621
2622 if (rdata->cfile)
2623 cifsFileInfo_put(rdata->cfile);
2624
Jeff Layton0471ca32012-05-16 07:13:16 -04002625 kfree(rdata);
2626}
2627
Jeff Layton2a1bb132012-05-16 07:13:17 -04002628static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002629cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002630{
2631 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002632 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002633 unsigned int i;
2634
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002635 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002636 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2637 if (!page) {
2638 rc = -ENOMEM;
2639 break;
2640 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002641 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002642 }
2643
2644 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002645 for (i = 0; i < nr_pages; i++) {
2646 put_page(rdata->pages[i]);
2647 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002648 }
2649 }
2650 return rc;
2651}
2652
2653static void
2654cifs_uncached_readdata_release(struct kref *refcount)
2655{
Jeff Layton1c892542012-05-16 07:13:17 -04002656 struct cifs_readdata *rdata = container_of(refcount,
2657 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002658 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002659
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002660 for (i = 0; i < rdata->nr_pages; i++) {
2661 put_page(rdata->pages[i]);
2662 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002663 }
2664 cifs_readdata_release(refcount);
2665}
2666
2667static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002668cifs_retry_async_readv(struct cifs_readdata *rdata)
2669{
2670 int rc;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002671 struct TCP_Server_Info *server;
2672
2673 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
Jeff Layton2a1bb132012-05-16 07:13:17 -04002674
2675 do {
2676 if (rdata->cfile->invalidHandle) {
2677 rc = cifs_reopen_file(rdata->cfile, true);
2678 if (rc != 0)
2679 continue;
2680 }
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002681 rc = server->ops->async_readv(rdata);
Jeff Layton2a1bb132012-05-16 07:13:17 -04002682 } while (rc == -EAGAIN);
2683
2684 return rc;
2685}
2686
Jeff Layton1c892542012-05-16 07:13:17 -04002687/**
2688 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2689 * @rdata: the readdata response with list of pages holding data
2690 * @iov: vector in which we should copy the data
2691 * @nr_segs: number of segments in vector
2692 * @offset: offset into file of the first iovec
2693 * @copied: used to return the amount of data copied to the iov
2694 *
2695 * This function copies data from a list of pages in a readdata response into
2696 * an array of iovecs. It will first calculate where the data should go
2697 * based on the info in the readdata and then copy the data into that spot.
2698 */
2699static ssize_t
2700cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2701 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2702{
2703 int rc = 0;
2704 struct iov_iter ii;
2705 size_t pos = rdata->offset - offset;
Jeff Layton1c892542012-05-16 07:13:17 -04002706 ssize_t remaining = rdata->bytes;
2707 unsigned char *pdata;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002708 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002709
2710 /* set up iov_iter and advance to the correct offset */
2711 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2712 iov_iter_advance(&ii, pos);
2713
2714 *copied = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002715 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002716 ssize_t copy;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002717 struct page *page = rdata->pages[i];
Jeff Layton1c892542012-05-16 07:13:17 -04002718
2719 /* copy a whole page or whatever's left */
2720 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2721
2722 /* ...but limit it to whatever space is left in the iov */
2723 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2724
2725 /* go while there's data to be copied and no errors */
2726 if (copy && !rc) {
2727 pdata = kmap(page);
2728 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2729 (int)copy);
2730 kunmap(page);
2731 if (!rc) {
2732 *copied += copy;
2733 remaining -= copy;
2734 iov_iter_advance(&ii, copy);
2735 }
2736 }
Jeff Layton1c892542012-05-16 07:13:17 -04002737 }
2738
2739 return rc;
2740}
2741
2742static void
2743cifs_uncached_readv_complete(struct work_struct *work)
2744{
2745 struct cifs_readdata *rdata = container_of(work,
2746 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002747
2748 complete(&rdata->done);
2749 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2750}
2751
2752static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002753cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2754 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002755{
Jeff Layton8321fec2012-09-19 06:22:32 -07002756 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002757 unsigned int i;
2758 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07002759 struct kvec iov;
Jeff Layton1c892542012-05-16 07:13:17 -04002760
Jeff Layton8321fec2012-09-19 06:22:32 -07002761 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002762 for (i = 0; i < nr_pages; i++) {
2763 struct page *page = rdata->pages[i];
2764
Jeff Layton8321fec2012-09-19 06:22:32 -07002765 if (len >= PAGE_SIZE) {
Jeff Layton1c892542012-05-16 07:13:17 -04002766 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07002767 iov.iov_base = kmap(page);
2768 iov.iov_len = PAGE_SIZE;
Joe Perchesf96637b2013-05-04 22:12:25 -05002769 cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n",
2770 i, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07002771 len -= PAGE_SIZE;
2772 } else if (len > 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002773 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07002774 iov.iov_base = kmap(page);
2775 iov.iov_len = len;
Joe Perchesf96637b2013-05-04 22:12:25 -05002776 cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n",
2777 i, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07002778 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2779 rdata->tailsz = len;
2780 len = 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002781 } else {
2782 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002783 rdata->pages[i] = NULL;
2784 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002785 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002786 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002787 }
Jeff Layton8321fec2012-09-19 06:22:32 -07002788
2789 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2790 kunmap(page);
2791 if (result < 0)
2792 break;
2793
2794 total_read += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002795 }
2796
Jeff Layton8321fec2012-09-19 06:22:32 -07002797 return total_read > 0 ? total_read : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002798}
2799
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002800static ssize_t
2801cifs_iovec_read(struct file *file, const struct iovec *iov,
2802 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803{
Jeff Layton1c892542012-05-16 07:13:17 -04002804 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002805 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002806 ssize_t total_read = 0;
2807 loff_t offset = *poffset;
2808 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002810 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002812 struct cifs_readdata *rdata, *tmp;
2813 struct list_head rdata_list;
2814 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002815
2816 if (!nr_segs)
2817 return 0;
2818
2819 len = iov_length(iov, nr_segs);
2820 if (!len)
2821 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822
Jeff Layton1c892542012-05-16 07:13:17 -04002823 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002824 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002825 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002826 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002828 if (!tcon->ses->server->ops->async_readv)
2829 return -ENOSYS;
2830
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002831 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2832 pid = open_file->pid;
2833 else
2834 pid = current->tgid;
2835
Steve Frenchad7a2922008-02-07 23:25:02 +00002836 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05002837 cifs_dbg(FYI, "attempting read on write only file instance\n");
Steve Frenchad7a2922008-02-07 23:25:02 +00002838
Jeff Layton1c892542012-05-16 07:13:17 -04002839 do {
2840 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2841 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002842
Jeff Layton1c892542012-05-16 07:13:17 -04002843 /* allocate a readdata struct */
2844 rdata = cifs_readdata_alloc(npages,
2845 cifs_uncached_readv_complete);
2846 if (!rdata) {
2847 rc = -ENOMEM;
2848 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002850
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002851 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002852 if (rc)
2853 goto error;
2854
2855 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002856 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002857 rdata->offset = offset;
2858 rdata->bytes = cur_len;
2859 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07002860 rdata->pagesz = PAGE_SIZE;
2861 rdata->read_into_pages = cifs_uncached_read_into_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002862
2863 rc = cifs_retry_async_readv(rdata);
2864error:
2865 if (rc) {
2866 kref_put(&rdata->refcount,
2867 cifs_uncached_readdata_release);
2868 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869 }
Jeff Layton1c892542012-05-16 07:13:17 -04002870
2871 list_add_tail(&rdata->list, &rdata_list);
2872 offset += cur_len;
2873 len -= cur_len;
2874 } while (len > 0);
2875
2876 /* if at least one read request send succeeded, then reset rc */
2877 if (!list_empty(&rdata_list))
2878 rc = 0;
2879
2880 /* the loop below should proceed in the order of increasing offsets */
2881restart_loop:
2882 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2883 if (!rc) {
2884 ssize_t copied;
2885
2886 /* FIXME: freezable sleep too? */
2887 rc = wait_for_completion_killable(&rdata->done);
2888 if (rc)
2889 rc = -EINTR;
2890 else if (rdata->result)
2891 rc = rdata->result;
2892 else {
2893 rc = cifs_readdata_to_iov(rdata, iov,
2894 nr_segs, *poffset,
2895 &copied);
2896 total_read += copied;
2897 }
2898
2899 /* resend call if it's a retryable error */
2900 if (rc == -EAGAIN) {
2901 rc = cifs_retry_async_readv(rdata);
2902 goto restart_loop;
2903 }
2904 }
2905 list_del_init(&rdata->list);
2906 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002908
Jeff Layton1c892542012-05-16 07:13:17 -04002909 cifs_stats_bytes_read(tcon, total_read);
2910 *poffset += total_read;
2911
Pavel Shilovsky09a47072012-09-18 16:20:29 -07002912 /* mask nodata case */
2913 if (rc == -ENODATA)
2914 rc = 0;
2915
Jeff Layton1c892542012-05-16 07:13:17 -04002916 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917}
2918
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002919ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002920 unsigned long nr_segs, loff_t pos)
2921{
2922 ssize_t read;
2923
2924 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2925 if (read > 0)
2926 iocb->ki_pos = pos;
2927
2928 return read;
2929}
2930
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002931ssize_t
2932cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2933 unsigned long nr_segs, loff_t pos)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002934{
Al Viro496ad9a2013-01-23 17:07:38 -05002935 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002936 struct cifsInodeInfo *cinode = CIFS_I(inode);
2937 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2938 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2939 iocb->ki_filp->private_data;
2940 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2941 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002942
2943 /*
2944 * In strict cache mode we need to read from the server all the time
2945 * if we don't have level II oplock because the server can delay mtime
2946 * change - so we can't make a decision about inode invalidating.
2947 * And we can also fail with pagereading if there are mandatory locks
2948 * on pages affected by this read but not on the region from pos to
2949 * pos+len-1.
2950 */
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002951 if (!cinode->clientCanCacheRead)
2952 return cifs_user_readv(iocb, iov, nr_segs, pos);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002953
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002954 if (cap_unix(tcon->ses) &&
2955 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2956 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2957 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2958
2959 /*
2960 * We need to hold the sem to be sure nobody modifies lock list
2961 * with a brlock that prevents reading.
2962 */
2963 down_read(&cinode->lock_sem);
2964 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2965 tcon->ses->server->vals->shared_lock_type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04002966 NULL, CIFS_READ_OP))
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002967 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
2968 up_read(&cinode->lock_sem);
2969 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002970}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002972static ssize_t
2973cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974{
2975 int rc = -EACCES;
2976 unsigned int bytes_read = 0;
2977 unsigned int total_read;
2978 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002979 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002981 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002982 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002983 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002984 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002986 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002987 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002988 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002989
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002990 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002991 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002993 /* FIXME: set up handlers for larger reads and/or convert to async */
2994 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2995
Linus Torvalds1da177e2005-04-16 15:20:36 -07002996 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302997 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002998 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302999 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07003001 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003002 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003003 server = tcon->ses->server;
3004
3005 if (!server->ops->sync_read) {
3006 free_xid(xid);
3007 return -ENOSYS;
3008 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003010 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3011 pid = open_file->pid;
3012 else
3013 pid = current->tgid;
3014
Linus Torvalds1da177e2005-04-16 15:20:36 -07003015 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003016 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003017
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003018 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3019 total_read += bytes_read, cur_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003020 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003021 /*
3022 * For windows me and 9x we do not want to request more than it
3023 * negotiated since it will refuse the read then.
3024 */
3025 if ((tcon->ses) && !(tcon->ses->capabilities &
3026 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03003027 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04003028 CIFSMaxBufSize);
Steve Frenchf9f5c812005-09-15 23:06:38 -07003029 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003030 rc = -EAGAIN;
3031 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00003032 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003033 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034 if (rc != 0)
3035 break;
3036 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003037 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003038 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003039 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003040 io_parms.length = current_read_size;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003041 rc = server->ops->sync_read(xid, open_file, &io_parms,
3042 &bytes_read, &cur_offset,
3043 &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044 }
3045 if (rc || (bytes_read == 0)) {
3046 if (total_read) {
3047 break;
3048 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003049 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003050 return rc;
3051 }
3052 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003053 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003054 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055 }
3056 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003057 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003058 return total_read;
3059}
3060
Jeff Laytonca83ce32011-04-12 09:13:44 -04003061/*
3062 * If the page is mmap'ed into a process' page tables, then we need to make
3063 * sure that it doesn't change while being written back.
3064 */
3065static int
3066cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3067{
3068 struct page *page = vmf->page;
3069
3070 lock_page(page);
3071 return VM_FAULT_LOCKED;
3072}
3073
3074static struct vm_operations_struct cifs_file_vm_ops = {
3075 .fault = filemap_fault,
3076 .page_mkwrite = cifs_page_mkwrite,
Konstantin Khlebnikov0b173bc2012-10-08 16:28:46 -07003077 .remap_pages = generic_file_remap_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003078};
3079
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003080int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3081{
3082 int rc, xid;
Al Viro496ad9a2013-01-23 17:07:38 -05003083 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003084
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003085 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003086
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04003087 if (!CIFS_I(inode)->clientCanCacheRead) {
3088 rc = cifs_invalidate_mapping(inode);
3089 if (rc)
3090 return rc;
3091 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003092
3093 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003094 if (rc == 0)
3095 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003096 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003097 return rc;
3098}
3099
Linus Torvalds1da177e2005-04-16 15:20:36 -07003100int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3101{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003102 int rc, xid;
3103
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003104 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05003105 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003106 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003107 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3108 rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003109 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003110 return rc;
3111 }
3112 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003113 if (rc == 0)
3114 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003115 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003116 return rc;
3117}
3118
Jeff Layton0471ca32012-05-16 07:13:16 -04003119static void
3120cifs_readv_complete(struct work_struct *work)
3121{
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003122 unsigned int i;
Jeff Layton0471ca32012-05-16 07:13:16 -04003123 struct cifs_readdata *rdata = container_of(work,
3124 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003125
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003126 for (i = 0; i < rdata->nr_pages; i++) {
3127 struct page *page = rdata->pages[i];
3128
Jeff Layton0471ca32012-05-16 07:13:16 -04003129 lru_cache_add_file(page);
3130
3131 if (rdata->result == 0) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003132 flush_dcache_page(page);
3133 SetPageUptodate(page);
3134 }
3135
3136 unlock_page(page);
3137
3138 if (rdata->result == 0)
3139 cifs_readpage_to_fscache(rdata->mapping->host, page);
3140
3141 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003142 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003143 }
Jeff Layton6993f742012-05-16 07:13:17 -04003144 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003145}
3146
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003147static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003148cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3149 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003150{
Jeff Layton8321fec2012-09-19 06:22:32 -07003151 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003152 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003153 u64 eof;
3154 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003155 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07003156 struct kvec iov;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003157
3158 /* determine the eof that the server (probably) has */
3159 eof = CIFS_I(rdata->mapping->host)->server_eof;
3160 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05003161 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003162
Jeff Layton8321fec2012-09-19 06:22:32 -07003163 rdata->tailsz = PAGE_CACHE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003164 for (i = 0; i < nr_pages; i++) {
3165 struct page *page = rdata->pages[i];
3166
Jeff Layton8321fec2012-09-19 06:22:32 -07003167 if (len >= PAGE_CACHE_SIZE) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003168 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07003169 iov.iov_base = kmap(page);
3170 iov.iov_len = PAGE_CACHE_SIZE;
Joe Perchesf96637b2013-05-04 22:12:25 -05003171 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3172 i, page->index, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07003173 len -= PAGE_CACHE_SIZE;
3174 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003175 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07003176 iov.iov_base = kmap(page);
3177 iov.iov_len = len;
Joe Perchesf96637b2013-05-04 22:12:25 -05003178 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3179 i, page->index, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07003180 memset(iov.iov_base + len,
3181 '\0', PAGE_CACHE_SIZE - len);
3182 rdata->tailsz = len;
3183 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003184 } else if (page->index > eof_index) {
3185 /*
3186 * The VFS will not try to do readahead past the
3187 * i_size, but it's possible that we have outstanding
3188 * writes with gaps in the middle and the i_size hasn't
3189 * caught up yet. Populate those with zeroed out pages
3190 * to prevent the VFS from repeatedly attempting to
3191 * fill them until the writes are flushed.
3192 */
3193 zero_user(page, 0, PAGE_CACHE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003194 lru_cache_add_file(page);
3195 flush_dcache_page(page);
3196 SetPageUptodate(page);
3197 unlock_page(page);
3198 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003199 rdata->pages[i] = NULL;
3200 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003201 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003202 } else {
3203 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003204 lru_cache_add_file(page);
3205 unlock_page(page);
3206 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003207 rdata->pages[i] = NULL;
3208 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003209 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003210 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003211
3212 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3213 kunmap(page);
3214 if (result < 0)
3215 break;
3216
3217 total_read += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003218 }
3219
Jeff Layton8321fec2012-09-19 06:22:32 -07003220 return total_read > 0 ? total_read : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003221}
3222
Linus Torvalds1da177e2005-04-16 15:20:36 -07003223static int cifs_readpages(struct file *file, struct address_space *mapping,
3224 struct list_head *page_list, unsigned num_pages)
3225{
Jeff Layton690c5e32011-10-19 15:30:16 -04003226 int rc;
3227 struct list_head tmplist;
3228 struct cifsFileInfo *open_file = file->private_data;
3229 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3230 unsigned int rsize = cifs_sb->rsize;
3231 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232
Jeff Layton690c5e32011-10-19 15:30:16 -04003233 /*
3234 * Give up immediately if rsize is too small to read an entire page.
3235 * The VFS will fall back to readpage. We should never reach this
3236 * point however since we set ra_pages to 0 when the rsize is smaller
3237 * than a cache page.
3238 */
3239 if (unlikely(rsize < PAGE_CACHE_SIZE))
3240 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07003241
Suresh Jayaraman56698232010-07-05 18:13:25 +05303242 /*
3243 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3244 * immediately if the cookie is negative
3245 */
3246 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3247 &num_pages);
3248 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003249 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303250
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003251 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3252 pid = open_file->pid;
3253 else
3254 pid = current->tgid;
3255
Jeff Layton690c5e32011-10-19 15:30:16 -04003256 rc = 0;
3257 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003258
Joe Perchesf96637b2013-05-04 22:12:25 -05003259 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3260 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04003261
3262 /*
3263 * Start with the page at end of list and move it to private
3264 * list. Do the same with any following pages until we hit
3265 * the rsize limit, hit an index discontinuity, or run out of
3266 * pages. Issue the async read and then start the loop again
3267 * until the list is empty.
3268 *
3269 * Note that list order is important. The page_list is in
3270 * the order of declining indexes. When we put the pages in
3271 * the rdata->pages, then we want them in increasing order.
3272 */
3273 while (!list_empty(page_list)) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003274 unsigned int i;
Jeff Layton690c5e32011-10-19 15:30:16 -04003275 unsigned int bytes = PAGE_CACHE_SIZE;
3276 unsigned int expected_index;
3277 unsigned int nr_pages = 1;
3278 loff_t offset;
3279 struct page *page, *tpage;
3280 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281
3282 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283
Jeff Layton690c5e32011-10-19 15:30:16 -04003284 /*
3285 * Lock the page and put it in the cache. Since no one else
3286 * should have access to this page, we're safe to simply set
3287 * PG_locked without checking it first.
3288 */
3289 __set_page_locked(page);
3290 rc = add_to_page_cache_locked(page, mapping,
3291 page->index, GFP_KERNEL);
3292
3293 /* give up if we can't stick it in the cache */
3294 if (rc) {
3295 __clear_page_locked(page);
3296 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003298
Jeff Layton690c5e32011-10-19 15:30:16 -04003299 /* move first page to the tmplist */
3300 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3301 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302
Jeff Layton690c5e32011-10-19 15:30:16 -04003303 /* now try and add more pages onto the request */
3304 expected_index = page->index + 1;
3305 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3306 /* discontinuity ? */
3307 if (page->index != expected_index)
3308 break;
3309
3310 /* would this page push the read over the rsize? */
3311 if (bytes + PAGE_CACHE_SIZE > rsize)
3312 break;
3313
3314 __set_page_locked(page);
3315 if (add_to_page_cache_locked(page, mapping,
3316 page->index, GFP_KERNEL)) {
3317 __clear_page_locked(page);
3318 break;
3319 }
3320 list_move_tail(&page->lru, &tmplist);
3321 bytes += PAGE_CACHE_SIZE;
3322 expected_index++;
3323 nr_pages++;
3324 }
3325
Jeff Layton0471ca32012-05-16 07:13:16 -04003326 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003327 if (!rdata) {
3328 /* best to give up if we're out of mem */
3329 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3330 list_del(&page->lru);
3331 lru_cache_add_file(page);
3332 unlock_page(page);
3333 page_cache_release(page);
3334 }
3335 rc = -ENOMEM;
3336 break;
3337 }
3338
Jeff Layton6993f742012-05-16 07:13:17 -04003339 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003340 rdata->mapping = mapping;
3341 rdata->offset = offset;
3342 rdata->bytes = bytes;
3343 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003344 rdata->pagesz = PAGE_CACHE_SIZE;
3345 rdata->read_into_pages = cifs_readpages_read_into_pages;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003346
3347 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3348 list_del(&page->lru);
3349 rdata->pages[rdata->nr_pages++] = page;
3350 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003351
Jeff Layton2a1bb132012-05-16 07:13:17 -04003352 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003353 if (rc != 0) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003354 for (i = 0; i < rdata->nr_pages; i++) {
3355 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003356 lru_cache_add_file(page);
3357 unlock_page(page);
3358 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359 }
Jeff Layton6993f742012-05-16 07:13:17 -04003360 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003361 break;
3362 }
Jeff Layton6993f742012-05-16 07:13:17 -04003363
3364 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365 }
3366
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367 return rc;
3368}
3369
3370static int cifs_readpage_worker(struct file *file, struct page *page,
3371 loff_t *poffset)
3372{
3373 char *read_data;
3374 int rc;
3375
Suresh Jayaraman56698232010-07-05 18:13:25 +05303376 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05003377 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303378 if (rc == 0)
3379 goto read_complete;
3380
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381 page_cache_get(page);
3382 read_data = kmap(page);
3383 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003384
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003386
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387 if (rc < 0)
3388 goto io_error;
3389 else
Joe Perchesf96637b2013-05-04 22:12:25 -05003390 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003391
Al Viro496ad9a2013-01-23 17:07:38 -05003392 file_inode(file)->i_atime =
3393 current_fs_time(file_inode(file)->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003394
Linus Torvalds1da177e2005-04-16 15:20:36 -07003395 if (PAGE_CACHE_SIZE > rc)
3396 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3397
3398 flush_dcache_page(page);
3399 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303400
3401 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05003402 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303403
Linus Torvalds1da177e2005-04-16 15:20:36 -07003404 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003405
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003407 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303409
3410read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411 return rc;
3412}
3413
3414static int cifs_readpage(struct file *file, struct page *page)
3415{
3416 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3417 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003418 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003420 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421
3422 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303423 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003424 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303425 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003426 }
3427
Joe Perchesf96637b2013-05-04 22:12:25 -05003428 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003429 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003430
3431 rc = cifs_readpage_worker(file, page, &offset);
3432
3433 unlock_page(page);
3434
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003435 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436 return rc;
3437}
3438
Steve Frencha403a0a2007-07-26 15:54:16 +00003439static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3440{
3441 struct cifsFileInfo *open_file;
3442
Jeff Layton44772882010-10-15 15:34:03 -04003443 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003444 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003445 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003446 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003447 return 1;
3448 }
3449 }
Jeff Layton44772882010-10-15 15:34:03 -04003450 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003451 return 0;
3452}
3453
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454/* We do not want to update the file size from server for inodes
3455 open for write - to avoid races with writepage extending
3456 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003457 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458 but this is tricky to do without racing with writebehind
3459 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003460bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461{
Steve Frencha403a0a2007-07-26 15:54:16 +00003462 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003463 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003464
Steve Frencha403a0a2007-07-26 15:54:16 +00003465 if (is_inode_writable(cifsInode)) {
3466 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003467 struct cifs_sb_info *cifs_sb;
3468
Steve Frenchc32a0b62006-01-12 14:41:28 -08003469 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003470 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003471 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003472 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003473 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003474 }
3475
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003476 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003477 return true;
Steve French7ba52632007-02-08 18:14:13 +00003478
Steve French4b18f2a2008-04-29 00:06:05 +00003479 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003480 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003481 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482}
3483
Nick Piggind9414772008-09-24 11:32:59 -04003484static int cifs_write_begin(struct file *file, struct address_space *mapping,
3485 loff_t pos, unsigned len, unsigned flags,
3486 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003487{
Nick Piggind9414772008-09-24 11:32:59 -04003488 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3489 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003490 loff_t page_start = pos & PAGE_MASK;
3491 loff_t i_size;
3492 struct page *page;
3493 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003494
Joe Perchesf96637b2013-05-04 22:12:25 -05003495 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003496
Nick Piggin54566b22009-01-04 12:00:53 -08003497 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003498 if (!page) {
3499 rc = -ENOMEM;
3500 goto out;
3501 }
Nick Piggind9414772008-09-24 11:32:59 -04003502
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003503 if (PageUptodate(page))
3504 goto out;
Steve French8a236262007-03-06 00:31:00 +00003505
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003506 /*
3507 * If we write a full page it will be up to date, no need to read from
3508 * the server. If the write is short, we'll end up doing a sync write
3509 * instead.
3510 */
3511 if (len == PAGE_CACHE_SIZE)
3512 goto out;
3513
3514 /*
3515 * optimize away the read when we have an oplock, and we're not
3516 * expecting to use any of the data we'd be reading in. That
3517 * is, when the page lies beyond the EOF, or straddles the EOF
3518 * and the write will cover all of the existing data.
3519 */
3520 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3521 i_size = i_size_read(mapping->host);
3522 if (page_start >= i_size ||
3523 (offset == 0 && (pos + len) >= i_size)) {
3524 zero_user_segments(page, 0, offset,
3525 offset + len,
3526 PAGE_CACHE_SIZE);
3527 /*
3528 * PageChecked means that the parts of the page
3529 * to which we're not writing are considered up
3530 * to date. Once the data is copied to the
3531 * page, it can be set uptodate.
3532 */
3533 SetPageChecked(page);
3534 goto out;
3535 }
3536 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537
Nick Piggind9414772008-09-24 11:32:59 -04003538 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003539 /*
3540 * might as well read a page, it is fast enough. If we get
3541 * an error, we don't need to return it. cifs_write_end will
3542 * do a sync write instead since PG_uptodate isn't set.
3543 */
3544 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003545 } else {
3546 /* we could try using another file handle if there is one -
3547 but how would we lock it to prevent close of that handle
3548 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003549 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003550 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003551out:
3552 *pagep = page;
3553 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003554}
3555
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303556static int cifs_release_page(struct page *page, gfp_t gfp)
3557{
3558 if (PagePrivate(page))
3559 return 0;
3560
3561 return cifs_fscache_release_page(page, gfp);
3562}
3563
Lukas Czernerd47992f2013-05-21 23:17:23 -04003564static void cifs_invalidate_page(struct page *page, unsigned int offset,
3565 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303566{
3567 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3568
Lukas Czernerd47992f2013-05-21 23:17:23 -04003569 if (offset == 0 && length == PAGE_CACHE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303570 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3571}
3572
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003573static int cifs_launder_page(struct page *page)
3574{
3575 int rc = 0;
3576 loff_t range_start = page_offset(page);
3577 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3578 struct writeback_control wbc = {
3579 .sync_mode = WB_SYNC_ALL,
3580 .nr_to_write = 0,
3581 .range_start = range_start,
3582 .range_end = range_end,
3583 };
3584
Joe Perchesf96637b2013-05-04 22:12:25 -05003585 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003586
3587 if (clear_page_dirty_for_io(page))
3588 rc = cifs_writepage_locked(page, &wbc);
3589
3590 cifs_fscache_invalidate_page(page, page->mapping->host);
3591 return rc;
3592}
3593
Tejun Heo9b646972010-07-20 22:09:02 +02003594void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003595{
3596 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3597 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003598 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003599 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003600 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003601 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003602
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003603 if (!cinode->clientCanCacheAll && cinode->clientCanCacheRead &&
3604 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003605 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3606 inode);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003607 cinode->clientCanCacheRead = false;
3608 }
3609
Jeff Layton3bc303c2009-09-21 06:47:50 -04003610 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003611 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003612 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003613 else
Al Viro8737c932009-12-24 06:47:55 -05003614 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003615 rc = filemap_fdatawrite(inode->i_mapping);
3616 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003617 rc = filemap_fdatawait(inode->i_mapping);
3618 mapping_set_error(inode->i_mapping, rc);
Pavel Shilovsky03eca702012-12-06 21:24:33 +04003619 cifs_invalidate_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003620 }
Joe Perchesf96637b2013-05-04 22:12:25 -05003621 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003622 }
3623
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003624 rc = cifs_push_locks(cfile);
3625 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003626 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003627
Jeff Layton3bc303c2009-09-21 06:47:50 -04003628 /*
3629 * releasing stale oplock after recent reconnect of smb session using
3630 * a now incorrect file handle is not a data integrity issue but do
3631 * not bother sending an oplock release if session to server still is
3632 * disconnected since oplock already released by the server
3633 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003634 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003635 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3636 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05003637 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003638 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003639}
3640
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003641const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003642 .readpage = cifs_readpage,
3643 .readpages = cifs_readpages,
3644 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003645 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003646 .write_begin = cifs_write_begin,
3647 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003648 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303649 .releasepage = cifs_release_page,
3650 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003651 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003652};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003653
3654/*
3655 * cifs_readpages requires the server to support a buffer large enough to
3656 * contain the header plus one complete page of data. Otherwise, we need
3657 * to leave cifs_readpages out of the address space operations.
3658 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003659const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003660 .readpage = cifs_readpage,
3661 .writepage = cifs_writepage,
3662 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003663 .write_begin = cifs_write_begin,
3664 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003665 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303666 .releasepage = cifs_release_page,
3667 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003668 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003669};