blob: bcde7a0b76f125dcee19079cd75517ead40b70eb [file] [log] [blame]
David Teiglandb3b94fa2006-01-16 16:50:04 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/pagemap.h>
16#include <linux/uio.h>
17#include <linux/blkdev.h>
18#include <linux/mm.h>
19#include <linux/smp_lock.h>
20#include <linux/gfs2_ioctl.h>
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000021#include <linux/fs.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000022#include <asm/semaphore.h>
23#include <asm/uaccess.h>
24
25#include "gfs2.h"
26#include "bmap.h"
27#include "dir.h"
28#include "glock.h"
29#include "glops.h"
30#include "inode.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000031#include "lm.h"
32#include "log.h"
33#include "meta_io.h"
34#include "ops_file.h"
35#include "ops_vm.h"
36#include "quota.h"
37#include "rgrp.h"
38#include "trans.h"
39
40/* "bad" is for NFS support */
41struct filldir_bad_entry {
42 char *fbe_name;
43 unsigned int fbe_length;
44 uint64_t fbe_offset;
45 struct gfs2_inum fbe_inum;
46 unsigned int fbe_type;
47};
48
49struct filldir_bad {
50 struct gfs2_sbd *fdb_sbd;
51
52 struct filldir_bad_entry *fdb_entry;
53 unsigned int fdb_entry_num;
54 unsigned int fdb_entry_off;
55
56 char *fdb_name;
57 unsigned int fdb_name_size;
58 unsigned int fdb_name_off;
59};
60
61/* For regular, non-NFS */
62struct filldir_reg {
63 struct gfs2_sbd *fdr_sbd;
64 int fdr_prefetch;
65
66 filldir_t fdr_filldir;
67 void *fdr_opaque;
68};
69
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000070static int gfs2_read_actor(read_descriptor_t *desc, struct page *page,
71 unsigned long offset, unsigned long size)
72{
73 char *kaddr;
74 unsigned long count = desc->count;
75
76 if (size > count)
77 size = count;
78
79 kaddr = kmap(page);
80 memcpy(desc->arg.buf, kaddr + offset, size);
81 kunmap(page);
82
83 desc->count = count - size;
84 desc->written += size;
85 desc->arg.buf += size;
86 return size;
87}
88
89int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
90 char *buf, loff_t *pos, unsigned size)
91{
92 struct inode *inode = ip->i_vnode;
93 read_descriptor_t desc;
94 desc.written = 0;
95 desc.arg.buf = buf;
96 desc.count = size;
97 desc.error = 0;
98 do_generic_mapping_read(inode->i_mapping, ra_state, NULL, pos, &desc, gfs2_read_actor);
99 return desc.written ? desc.written : desc.error;
100}
David Teiglandb3b94fa2006-01-16 16:50:04 +0000101
102/**
103 * gfs2_llseek - seek to a location in a file
104 * @file: the file
105 * @offset: the offset
106 * @origin: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
107 *
108 * SEEK_END requires the glock for the file because it references the
109 * file's size.
110 *
111 * Returns: The new offset, or errno
112 */
113
114static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
115{
116 struct gfs2_inode *ip = get_v2ip(file->f_mapping->host);
117 struct gfs2_holder i_gh;
118 loff_t error;
119
120 atomic_inc(&ip->i_sbd->sd_ops_file);
121
122 if (origin == 2) {
123 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
124 &i_gh);
125 if (!error) {
126 error = remote_llseek(file, offset, origin);
127 gfs2_glock_dq_uninit(&i_gh);
128 }
129 } else
130 error = remote_llseek(file, offset, origin);
131
132 return error;
133}
134
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000135
136static ssize_t gfs2_direct_IO_read(struct kiocb *iocb, const struct iovec *iov,
137 loff_t offset, unsigned long nr_segs)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000138{
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000139 struct file *file = iocb->ki_filp;
140 struct address_space *mapping = file->f_mapping;
141 ssize_t retval;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000142
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000143 retval = filemap_write_and_wait(mapping);
144 if (retval == 0) {
145 retval = mapping->a_ops->direct_IO(READ, iocb, iov, offset,
146 nr_segs);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000147 }
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000148 return retval;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000149}
150
151/**
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000152 * __gfs2_file_aio_read - The main GFS2 read function
153 *
154 * N.B. This is almost, but not quite the same as __generic_file_aio_read()
155 * the important subtle different being that inode->i_size isn't valid
156 * unless we are holding a lock, and we do this _only_ on the O_DIRECT
157 * path since otherwise locking is done entirely at the page cache
158 * layer.
David Teiglandb3b94fa2006-01-16 16:50:04 +0000159 */
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000160static ssize_t __gfs2_file_aio_read(struct kiocb *iocb,
161 const struct iovec *iov,
162 unsigned long nr_segs, loff_t *ppos)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000163{
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000164 struct file *filp = iocb->ki_filp;
165 struct gfs2_inode *ip = get_v2ip(filp->f_mapping->host);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000166 struct gfs2_holder gh;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000167 ssize_t retval;
168 unsigned long seg;
169 size_t count;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000170
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000171 count = 0;
172 for (seg = 0; seg < nr_segs; seg++) {
173 const struct iovec *iv = &iov[seg];
David Teiglandb3b94fa2006-01-16 16:50:04 +0000174
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000175 /*
176 * If any segment has a negative length, or the cumulative
177 * length ever wraps negative then return -EINVAL.
178 */
Steven Whitehoused1665e42006-02-14 11:54:42 +0000179 count += iv->iov_len;
180 if (unlikely((ssize_t)(count|iv->iov_len) < 0))
181 return -EINVAL;
182 if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len))
183 continue;
184 if (seg == 0)
185 return -EFAULT;
186 nr_segs = seg;
187 count -= iv->iov_len; /* This segment is no good */
188 break;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000189 }
190
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000191 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
192 if (filp->f_flags & O_DIRECT) {
193 loff_t pos = *ppos, size;
194 struct address_space *mapping;
195 struct inode *inode;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000196
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000197 mapping = filp->f_mapping;
198 inode = mapping->host;
199 retval = 0;
200 if (!count)
201 goto out; /* skip atime */
David Teiglandb3b94fa2006-01-16 16:50:04 +0000202
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000203 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
204 retval = gfs2_glock_nq_m_atime(1, &gh);
205 if (retval)
206 goto out;
Steven Whitehoused1665e42006-02-14 11:54:42 +0000207 if (gfs2_is_stuffed(ip)) {
208 gfs2_glock_dq_m(1, &gh);
209 gfs2_holder_uninit(&gh);
210 goto fallback_to_normal;
211 }
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000212 size = i_size_read(inode);
213 if (pos < size) {
Steven Whitehoused1665e42006-02-14 11:54:42 +0000214 retval = gfs2_direct_IO_read(iocb, iov, pos, nr_segs);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000215 if (retval > 0 && !is_sync_kiocb(iocb))
216 retval = -EIOCBQUEUED;
217 if (retval > 0)
218 *ppos = pos + retval;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000219 }
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000220 file_accessed(filp);
221 gfs2_glock_dq_m(1, &gh);
222 gfs2_holder_uninit(&gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000223 goto out;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000224 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000225
Steven Whitehoused1665e42006-02-14 11:54:42 +0000226fallback_to_normal:
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000227 retval = 0;
228 if (count) {
229 for (seg = 0; seg < nr_segs; seg++) {
230 read_descriptor_t desc;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000231
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000232 desc.written = 0;
233 desc.arg.buf = iov[seg].iov_base;
234 desc.count = iov[seg].iov_len;
235 if (desc.count == 0)
236 continue;
237 desc.error = 0;
238 do_generic_file_read(filp,ppos,&desc,file_read_actor);
239 retval += desc.written;
240 if (desc.error) {
241 retval = retval ?: desc.error;
242 break;
243 }
244 }
245 }
246out:
247 return retval;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000248}
249
250/**
251 * gfs2_read - Read bytes from a file
252 * @file: The file to read from
253 * @buf: The buffer to copy into
254 * @size: The amount of data requested
255 * @offset: The current file offset
256 *
257 * Outputs: Offset - updated according to number of bytes read
258 *
259 * Returns: The number of bytes read, errno on failure
260 */
261
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000262static ssize_t gfs2_read(struct file *filp, char __user *buf, size_t size,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000263 loff_t *offset)
264{
David Teiglandb3b94fa2006-01-16 16:50:04 +0000265 struct iovec local_iov = { .iov_base = buf, .iov_len = size };
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000266 struct kiocb kiocb;
267 ssize_t ret;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000268
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000269 atomic_inc(&get_v2sdp(filp->f_mapping->host->i_sb)->sd_ops_file);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000270
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000271 init_sync_kiocb(&kiocb, filp);
272 ret = __gfs2_file_aio_read(&kiocb, &local_iov, 1, offset);
273 if (-EIOCBQUEUED == ret)
274 ret = wait_on_sync_kiocb(&kiocb);
275 return ret;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000276}
277
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000278static ssize_t gfs2_file_readv(struct file *filp, const struct iovec *iov,
279 unsigned long nr_segs, loff_t *ppos)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000280{
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000281 struct kiocb kiocb;
282 ssize_t ret;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000283
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000284 atomic_inc(&get_v2sdp(filp->f_mapping->host->i_sb)->sd_ops_file);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000285
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000286 init_sync_kiocb(&kiocb, filp);
287 ret = __gfs2_file_aio_read(&kiocb, iov, nr_segs, ppos);
288 if (-EIOCBQUEUED == ret)
289 ret = wait_on_sync_kiocb(&kiocb);
290 return ret;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000291}
292
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000293static ssize_t gfs2_file_aio_read(struct kiocb *iocb, char __user *buf,
294 size_t count, loff_t pos)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000295{
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000296 struct file *filp = iocb->ki_filp;
297 struct iovec local_iov = { .iov_base = buf, .iov_len = count };
David Teiglandb3b94fa2006-01-16 16:50:04 +0000298
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000299 atomic_inc(&get_v2sdp(filp->f_mapping->host->i_sb)->sd_ops_file);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000300
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000301 BUG_ON(iocb->ki_pos != pos);
302 return __gfs2_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000303}
304
David Teiglandb3b94fa2006-01-16 16:50:04 +0000305
306/**
307 * filldir_reg_func - Report a directory entry to the caller of gfs2_dir_read()
308 * @opaque: opaque data used by the function
309 * @name: the name of the directory entry
310 * @length: the length of the name
311 * @offset: the entry's offset in the directory
312 * @inum: the inode number the entry points to
313 * @type: the type of inode the entry points to
314 *
315 * Returns: 0 on success, 1 if buffer full
316 */
317
318static int filldir_reg_func(void *opaque, const char *name, unsigned int length,
319 uint64_t offset, struct gfs2_inum *inum,
320 unsigned int type)
321{
322 struct filldir_reg *fdr = (struct filldir_reg *)opaque;
323 struct gfs2_sbd *sdp = fdr->fdr_sbd;
324 int error;
325
326 error = fdr->fdr_filldir(fdr->fdr_opaque, name, length, offset,
327 inum->no_formal_ino, type);
328 if (error)
329 return 1;
330
331 if (fdr->fdr_prefetch && !(length == 1 && *name == '.')) {
332 gfs2_glock_prefetch_num(sdp,
333 inum->no_addr, &gfs2_inode_glops,
334 LM_ST_SHARED, LM_FLAG_TRY | LM_FLAG_ANY);
335 gfs2_glock_prefetch_num(sdp,
336 inum->no_addr, &gfs2_iopen_glops,
337 LM_ST_SHARED, LM_FLAG_TRY);
338 }
339
340 return 0;
341}
342
343/**
344 * readdir_reg - Read directory entries from a directory
345 * @file: The directory to read from
346 * @dirent: Buffer for dirents
347 * @filldir: Function used to do the copying
348 *
349 * Returns: errno
350 */
351
352static int readdir_reg(struct file *file, void *dirent, filldir_t filldir)
353{
354 struct gfs2_inode *dip = get_v2ip(file->f_mapping->host);
355 struct filldir_reg fdr;
356 struct gfs2_holder d_gh;
357 uint64_t offset = file->f_pos;
358 int error;
359
360 fdr.fdr_sbd = dip->i_sbd;
361 fdr.fdr_prefetch = 1;
362 fdr.fdr_filldir = filldir;
363 fdr.fdr_opaque = dirent;
364
365 gfs2_holder_init(dip->i_gl, LM_ST_SHARED, GL_ATIME, &d_gh);
366 error = gfs2_glock_nq_atime(&d_gh);
367 if (error) {
368 gfs2_holder_uninit(&d_gh);
369 return error;
370 }
371
372 error = gfs2_dir_read(dip, &offset, &fdr, filldir_reg_func);
373
374 gfs2_glock_dq_uninit(&d_gh);
375
376 file->f_pos = offset;
377
378 return error;
379}
380
381/**
382 * filldir_bad_func - Report a directory entry to the caller of gfs2_dir_read()
383 * @opaque: opaque data used by the function
384 * @name: the name of the directory entry
385 * @length: the length of the name
386 * @offset: the entry's offset in the directory
387 * @inum: the inode number the entry points to
388 * @type: the type of inode the entry points to
389 *
390 * For supporting NFS.
391 *
392 * Returns: 0 on success, 1 if buffer full
393 */
394
395static int filldir_bad_func(void *opaque, const char *name, unsigned int length,
396 uint64_t offset, struct gfs2_inum *inum,
397 unsigned int type)
398{
399 struct filldir_bad *fdb = (struct filldir_bad *)opaque;
400 struct gfs2_sbd *sdp = fdb->fdb_sbd;
401 struct filldir_bad_entry *fbe;
402
403 if (fdb->fdb_entry_off == fdb->fdb_entry_num ||
404 fdb->fdb_name_off + length > fdb->fdb_name_size)
405 return 1;
406
407 fbe = &fdb->fdb_entry[fdb->fdb_entry_off];
408 fbe->fbe_name = fdb->fdb_name + fdb->fdb_name_off;
409 memcpy(fbe->fbe_name, name, length);
410 fbe->fbe_length = length;
411 fbe->fbe_offset = offset;
412 fbe->fbe_inum = *inum;
413 fbe->fbe_type = type;
414
415 fdb->fdb_entry_off++;
416 fdb->fdb_name_off += length;
417
418 if (!(length == 1 && *name == '.')) {
419 gfs2_glock_prefetch_num(sdp,
420 inum->no_addr, &gfs2_inode_glops,
421 LM_ST_SHARED, LM_FLAG_TRY | LM_FLAG_ANY);
422 gfs2_glock_prefetch_num(sdp,
423 inum->no_addr, &gfs2_iopen_glops,
424 LM_ST_SHARED, LM_FLAG_TRY);
425 }
426
427 return 0;
428}
429
430/**
431 * readdir_bad - Read directory entries from a directory
432 * @file: The directory to read from
433 * @dirent: Buffer for dirents
434 * @filldir: Function used to do the copying
435 *
436 * For supporting NFS.
437 *
438 * Returns: errno
439 */
440
441static int readdir_bad(struct file *file, void *dirent, filldir_t filldir)
442{
443 struct gfs2_inode *dip = get_v2ip(file->f_mapping->host);
444 struct gfs2_sbd *sdp = dip->i_sbd;
445 struct filldir_reg fdr;
446 unsigned int entries, size;
447 struct filldir_bad *fdb;
448 struct gfs2_holder d_gh;
449 uint64_t offset = file->f_pos;
450 unsigned int x;
451 struct filldir_bad_entry *fbe;
452 int error;
453
454 entries = gfs2_tune_get(sdp, gt_entries_per_readdir);
455 size = sizeof(struct filldir_bad) +
456 entries * (sizeof(struct filldir_bad_entry) + GFS2_FAST_NAME_SIZE);
457
458 fdb = kzalloc(size, GFP_KERNEL);
459 if (!fdb)
460 return -ENOMEM;
461
462 fdb->fdb_sbd = sdp;
463 fdb->fdb_entry = (struct filldir_bad_entry *)(fdb + 1);
464 fdb->fdb_entry_num = entries;
465 fdb->fdb_name = ((char *)fdb) + sizeof(struct filldir_bad) +
466 entries * sizeof(struct filldir_bad_entry);
467 fdb->fdb_name_size = entries * GFS2_FAST_NAME_SIZE;
468
469 gfs2_holder_init(dip->i_gl, LM_ST_SHARED, GL_ATIME, &d_gh);
470 error = gfs2_glock_nq_atime(&d_gh);
471 if (error) {
472 gfs2_holder_uninit(&d_gh);
473 goto out;
474 }
475
476 error = gfs2_dir_read(dip, &offset, fdb, filldir_bad_func);
477
478 gfs2_glock_dq_uninit(&d_gh);
479
480 fdr.fdr_sbd = sdp;
481 fdr.fdr_prefetch = 0;
482 fdr.fdr_filldir = filldir;
483 fdr.fdr_opaque = dirent;
484
485 for (x = 0; x < fdb->fdb_entry_off; x++) {
486 fbe = &fdb->fdb_entry[x];
487
488 error = filldir_reg_func(&fdr,
489 fbe->fbe_name, fbe->fbe_length,
490 fbe->fbe_offset,
491 &fbe->fbe_inum, fbe->fbe_type);
492 if (error) {
493 file->f_pos = fbe->fbe_offset;
494 error = 0;
495 goto out;
496 }
497 }
498
499 file->f_pos = offset;
500
501 out:
502 kfree(fdb);
503
504 return error;
505}
506
507/**
508 * gfs2_readdir - Read directory entries from a directory
509 * @file: The directory to read from
510 * @dirent: Buffer for dirents
511 * @filldir: Function used to do the copying
512 *
513 * Returns: errno
514 */
515
516static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
517{
518 int error;
519
520 atomic_inc(&get_v2sdp(file->f_mapping->host->i_sb)->sd_ops_file);
521
522 if (strcmp(current->comm, "nfsd") != 0)
523 error = readdir_reg(file, dirent, filldir);
524 else
525 error = readdir_bad(file, dirent, filldir);
526
527 return error;
528}
529
530static int gfs2_ioctl_flags(struct gfs2_inode *ip, unsigned int cmd, unsigned long arg)
531{
532 unsigned int lmode = (cmd == GFS2_IOCTL_SETFLAGS) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
533 struct buffer_head *dibh;
534 struct gfs2_holder i_gh;
535 int error;
536 __u32 flags = 0, change;
537
538 if (cmd == GFS2_IOCTL_SETFLAGS) {
539 error = get_user(flags, (__u32 __user *)arg);
540 if (error)
541 return -EFAULT;
542 }
543
544 error = gfs2_glock_nq_init(ip->i_gl, lmode, 0, &i_gh);
545 if (error)
546 return error;
547
548 if (cmd == GFS2_IOCTL_SETFLAGS) {
549 change = flags ^ ip->i_di.di_flags;
550 error = -EPERM;
551 if (change & (GFS2_DIF_IMMUTABLE|GFS2_DIF_APPENDONLY)) {
552 if (!capable(CAP_LINUX_IMMUTABLE))
553 goto out;
554 }
555 error = -EINVAL;
556 if (flags & (GFS2_DIF_JDATA|GFS2_DIF_DIRECTIO)) {
557 if (!S_ISREG(ip->i_di.di_mode))
558 goto out;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000559 }
560 if (flags & (GFS2_DIF_INHERIT_JDATA|GFS2_DIF_INHERIT_DIRECTIO)) {
561 if (!S_ISDIR(ip->i_di.di_mode))
562 goto out;
563 }
564
565 error = gfs2_trans_begin(ip->i_sbd, RES_DINODE, 0);
566 if (error)
567 goto out;
568
569 error = gfs2_meta_inode_buffer(ip, &dibh);
570 if (error)
571 goto out_trans_end;
572
573 ip->i_di.di_flags = flags;
574
Steven Whitehoused4e9c4c2006-01-18 11:19:28 +0000575 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000576 gfs2_dinode_out(&ip->i_di, dibh->b_data);
577
578 brelse(dibh);
579
580out_trans_end:
581 gfs2_trans_end(ip->i_sbd);
582 } else {
583 flags = ip->i_di.di_flags;
584 }
585out:
586 gfs2_glock_dq_uninit(&i_gh);
587 if (cmd == GFS2_IOCTL_GETFLAGS) {
588 if (put_user(flags, (__u32 __user *)arg))
589 return -EFAULT;
590 }
591 return error;
592}
593
594/**
595 * gfs2_ioctl - do an ioctl on a file
596 * @inode: the inode
597 * @file: the file pointer
598 * @cmd: the ioctl command
599 * @arg: the argument
600 *
601 * Returns: errno
602 */
603
604static int gfs2_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
605 unsigned long arg)
606{
607 struct gfs2_inode *ip = get_v2ip(inode);
608
609 atomic_inc(&ip->i_sbd->sd_ops_file);
610
611 switch (cmd) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000612 case GFS2_IOCTL_SETFLAGS:
613 case GFS2_IOCTL_GETFLAGS:
614 return gfs2_ioctl_flags(ip, cmd, arg);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000615
616 default:
617 return -ENOTTY;
618 }
619}
620
621/**
622 * gfs2_mmap -
623 * @file: The file to map
624 * @vma: The VMA which described the mapping
625 *
626 * Returns: 0 or error code
627 */
628
629static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
630{
631 struct gfs2_inode *ip = get_v2ip(file->f_mapping->host);
632 struct gfs2_holder i_gh;
633 int error;
634
635 atomic_inc(&ip->i_sbd->sd_ops_file);
636
637 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh);
638 error = gfs2_glock_nq_atime(&i_gh);
639 if (error) {
640 gfs2_holder_uninit(&i_gh);
641 return error;
642 }
643
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000644 /* This is VM_MAYWRITE instead of VM_WRITE because a call
645 to mprotect() can turn on VM_WRITE later. */
David Teiglandb3b94fa2006-01-16 16:50:04 +0000646
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000647 if ((vma->vm_flags & (VM_MAYSHARE | VM_MAYWRITE)) ==
648 (VM_MAYSHARE | VM_MAYWRITE))
649 vma->vm_ops = &gfs2_vm_ops_sharewrite;
650 else
651 vma->vm_ops = &gfs2_vm_ops_private;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000652
653 gfs2_glock_dq_uninit(&i_gh);
654
655 return error;
656}
657
658/**
659 * gfs2_open - open a file
660 * @inode: the inode to open
661 * @file: the struct file for this opening
662 *
663 * Returns: errno
664 */
665
666static int gfs2_open(struct inode *inode, struct file *file)
667{
668 struct gfs2_inode *ip = get_v2ip(inode);
669 struct gfs2_holder i_gh;
670 struct gfs2_file *fp;
671 int error;
672
673 atomic_inc(&ip->i_sbd->sd_ops_file);
674
675 fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL);
676 if (!fp)
677 return -ENOMEM;
678
679 init_MUTEX(&fp->f_fl_mutex);
680
681 fp->f_inode = ip;
682 fp->f_vfile = file;
683
684 gfs2_assert_warn(ip->i_sbd, !get_v2fp(file));
685 set_v2fp(file, fp);
686
687 if (S_ISREG(ip->i_di.di_mode)) {
688 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
689 &i_gh);
690 if (error)
691 goto fail;
692
693 if (!(file->f_flags & O_LARGEFILE) &&
694 ip->i_di.di_size > MAX_NON_LFS) {
695 error = -EFBIG;
696 goto fail_gunlock;
697 }
698
699 /* Listen to the Direct I/O flag */
700
701 if (ip->i_di.di_flags & GFS2_DIF_DIRECTIO)
702 file->f_flags |= O_DIRECT;
703
David Teiglandb3b94fa2006-01-16 16:50:04 +0000704 gfs2_glock_dq_uninit(&i_gh);
705 }
706
707 return 0;
708
709 fail_gunlock:
710 gfs2_glock_dq_uninit(&i_gh);
711
712 fail:
713 set_v2fp(file, NULL);
714 kfree(fp);
715
716 return error;
717}
718
719/**
720 * gfs2_close - called to close a struct file
721 * @inode: the inode the struct file belongs to
722 * @file: the struct file being closed
723 *
724 * Returns: errno
725 */
726
727static int gfs2_close(struct inode *inode, struct file *file)
728{
729 struct gfs2_sbd *sdp = get_v2sdp(inode->i_sb);
730 struct gfs2_file *fp;
731
732 atomic_inc(&sdp->sd_ops_file);
733
734 fp = get_v2fp(file);
735 set_v2fp(file, NULL);
736
737 if (gfs2_assert_warn(sdp, fp))
738 return -EIO;
739
740 kfree(fp);
741
742 return 0;
743}
744
745/**
746 * gfs2_fsync - sync the dirty data for a file (across the cluster)
747 * @file: the file that points to the dentry (we ignore this)
748 * @dentry: the dentry that points to the inode to sync
749 *
750 * Returns: errno
751 */
752
753static int gfs2_fsync(struct file *file, struct dentry *dentry, int datasync)
754{
755 struct gfs2_inode *ip = get_v2ip(dentry->d_inode);
756
757 atomic_inc(&ip->i_sbd->sd_ops_file);
758 gfs2_log_flush_glock(ip->i_gl);
759
760 return 0;
761}
762
763/**
764 * gfs2_lock - acquire/release a posix lock on a file
765 * @file: the file pointer
766 * @cmd: either modify or retrieve lock state, possibly wait
767 * @fl: type and range of lock
768 *
769 * Returns: errno
770 */
771
772static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
773{
774 struct gfs2_inode *ip = get_v2ip(file->f_mapping->host);
775 struct gfs2_sbd *sdp = ip->i_sbd;
776 struct lm_lockname name =
777 { .ln_number = ip->i_num.no_addr,
778 .ln_type = LM_TYPE_PLOCK };
779
780 atomic_inc(&sdp->sd_ops_file);
781
782 if (!(fl->fl_flags & FL_POSIX))
783 return -ENOLCK;
784 if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
785 return -ENOLCK;
786
787 if (sdp->sd_args.ar_localflocks) {
788 if (IS_GETLK(cmd)) {
789 struct file_lock *tmp;
790 lock_kernel();
791 tmp = posix_test_lock(file, fl);
792 fl->fl_type = F_UNLCK;
793 if (tmp)
794 memcpy(fl, tmp, sizeof(struct file_lock));
795 unlock_kernel();
796 return 0;
797 } else {
798 int error;
799 lock_kernel();
800 error = posix_lock_file_wait(file, fl);
801 unlock_kernel();
802 return error;
803 }
804 }
805
806 if (IS_GETLK(cmd))
807 return gfs2_lm_plock_get(sdp, &name, file, fl);
808 else if (fl->fl_type == F_UNLCK)
809 return gfs2_lm_punlock(sdp, &name, file, fl);
810 else
811 return gfs2_lm_plock(sdp, &name, file, cmd, fl);
812}
813
814/**
815 * gfs2_sendfile - Send bytes to a file or socket
816 * @in_file: The file to read from
817 * @out_file: The file to write to
818 * @count: The amount of data
819 * @offset: The beginning file offset
820 *
821 * Outputs: offset - updated according to number of bytes read
822 *
823 * Returns: The number of bytes sent, errno on failure
824 */
825
826static ssize_t gfs2_sendfile(struct file *in_file, loff_t *offset, size_t count,
827 read_actor_t actor, void *target)
828{
829 struct gfs2_inode *ip = get_v2ip(in_file->f_mapping->host);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000830
831 atomic_inc(&ip->i_sbd->sd_ops_file);
832
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000833 return generic_file_sendfile(in_file, offset, count, actor, target);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000834}
835
836static int do_flock(struct file *file, int cmd, struct file_lock *fl)
837{
838 struct gfs2_file *fp = get_v2fp(file);
839 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
840 struct gfs2_inode *ip = fp->f_inode;
841 struct gfs2_glock *gl;
842 unsigned int state;
843 int flags;
844 int error = 0;
845
846 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
847 flags = ((IS_SETLKW(cmd)) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
848
849 down(&fp->f_fl_mutex);
850
851 gl = fl_gh->gh_gl;
852 if (gl) {
853 if (fl_gh->gh_state == state)
854 goto out;
855 gfs2_glock_hold(gl);
856 flock_lock_file_wait(file,
857 &(struct file_lock){.fl_type = F_UNLCK});
858 gfs2_glock_dq_uninit(fl_gh);
859 } else {
860 error = gfs2_glock_get(ip->i_sbd,
861 ip->i_num.no_addr, &gfs2_flock_glops,
862 CREATE, &gl);
863 if (error)
864 goto out;
865 }
866
867 gfs2_holder_init(gl, state, flags, fl_gh);
868 gfs2_glock_put(gl);
869
870 error = gfs2_glock_nq(fl_gh);
871 if (error) {
872 gfs2_holder_uninit(fl_gh);
873 if (error == GLR_TRYFAILED)
874 error = -EAGAIN;
875 } else {
876 error = flock_lock_file_wait(file, fl);
877 gfs2_assert_warn(ip->i_sbd, !error);
878 }
879
880 out:
881 up(&fp->f_fl_mutex);
882
883 return error;
884}
885
886static void do_unflock(struct file *file, struct file_lock *fl)
887{
888 struct gfs2_file *fp = get_v2fp(file);
889 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
890
891 down(&fp->f_fl_mutex);
892 flock_lock_file_wait(file, fl);
893 if (fl_gh->gh_gl)
894 gfs2_glock_dq_uninit(fl_gh);
895 up(&fp->f_fl_mutex);
896}
897
898/**
899 * gfs2_flock - acquire/release a flock lock on a file
900 * @file: the file pointer
901 * @cmd: either modify or retrieve lock state, possibly wait
902 * @fl: type and range of lock
903 *
904 * Returns: errno
905 */
906
907static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
908{
909 struct gfs2_inode *ip = get_v2ip(file->f_mapping->host);
910 struct gfs2_sbd *sdp = ip->i_sbd;
911
912 atomic_inc(&ip->i_sbd->sd_ops_file);
913
914 if (!(fl->fl_flags & FL_FLOCK))
915 return -ENOLCK;
916 if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
917 return -ENOLCK;
918
919 if (sdp->sd_args.ar_localflocks)
920 return flock_lock_file_wait(file, fl);
921
922 if (fl->fl_type == F_UNLCK) {
923 do_unflock(file, fl);
924 return 0;
925 } else
926 return do_flock(file, cmd, fl);
927}
928
929struct file_operations gfs2_file_fops = {
930 .llseek = gfs2_llseek,
931 .read = gfs2_read,
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000932 .readv = gfs2_file_readv,
933 .aio_read = gfs2_file_aio_read,
934 .write = generic_file_write,
935 .writev = generic_file_writev,
936 .aio_write = generic_file_aio_write,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000937 .ioctl = gfs2_ioctl,
938 .mmap = gfs2_mmap,
939 .open = gfs2_open,
940 .release = gfs2_close,
941 .fsync = gfs2_fsync,
942 .lock = gfs2_lock,
943 .sendfile = gfs2_sendfile,
944 .flock = gfs2_flock,
945};
946
947struct file_operations gfs2_dir_fops = {
948 .readdir = gfs2_readdir,
949 .ioctl = gfs2_ioctl,
950 .open = gfs2_open,
951 .release = gfs2_close,
952 .fsync = gfs2_fsync,
953 .lock = gfs2_lock,
954 .flock = gfs2_flock,
955};
956