blob: 6d912ab472929abab62a981c1a4412749c04e62f [file] [log] [blame]
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001/*
2 * blkfront.c
3 *
4 * XenLinux virtual block device driver.
5 *
6 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
7 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
8 * Copyright (c) 2004, Christian Limpach
9 * Copyright (c) 2004, Andrew Warfield
10 * Copyright (c) 2005, Christopher Clark
11 * Copyright (c) 2005, XenSource Ltd
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
18 *
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
25 *
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35 * IN THE SOFTWARE.
36 */
37
38#include <linux/interrupt.h>
39#include <linux/blkdev.h>
Ian Campbell597592d2008-02-21 13:03:45 -080040#include <linux/hdreg.h>
Christian Limpach440a01a2008-06-17 10:47:08 +020041#include <linux/cdrom.h>
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -070042#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090043#include <linux/slab.h>
Arnd Bergmann6e9624b2010-08-07 18:25:34 +020044#include <linux/smp_lock.h>
Jens Axboe9e973e62009-02-24 08:10:09 +010045#include <linux/scatterlist.h>
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -070046
Jeremy Fitzhardinge1ccbf532009-10-06 15:11:14 -070047#include <xen/xen.h>
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -070048#include <xen/xenbus.h>
49#include <xen/grant_table.h>
50#include <xen/events.h>
51#include <xen/page.h>
52
53#include <xen/interface/grant_table.h>
54#include <xen/interface/io/blkif.h>
Markus Armbruster3e334232008-04-02 10:54:02 -070055#include <xen/interface/io/protocols.h>
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -070056
57#include <asm/xen/hypervisor.h>
58
59enum blkif_state {
60 BLKIF_STATE_DISCONNECTED,
61 BLKIF_STATE_CONNECTED,
62 BLKIF_STATE_SUSPENDED,
63};
64
65struct blk_shadow {
66 struct blkif_request req;
67 unsigned long request;
68 unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
69};
70
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -070071static const struct block_device_operations xlvbd_block_fops;
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -070072
73#define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE)
74
75/*
76 * We have one of these per vbd, whether ide, scsi or 'other'. They
77 * hang in private_data off the gendisk structure. We may end up
78 * putting all kinds of interesting stuff here :-)
79 */
80struct blkfront_info
81{
Daniel Stoddenb70f5fa2010-04-30 22:01:19 +000082 struct mutex mutex;
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -070083 struct xenbus_device *xbdev;
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -070084 struct gendisk *gd;
85 int vdevice;
86 blkif_vdev_t handle;
87 enum blkif_state connected;
88 int ring_ref;
89 struct blkif_front_ring ring;
Jens Axboe9e973e62009-02-24 08:10:09 +010090 struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -070091 unsigned int evtchn, irq;
92 struct request_queue *rq;
93 struct work_struct work;
94 struct gnttab_free_callback callback;
95 struct blk_shadow shadow[BLK_RING_SIZE];
96 unsigned long shadow_free;
97 int feature_barrier;
Christian Limpach1d78d702008-04-02 10:54:04 -070098 int is_ready;
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -070099};
100
101static DEFINE_SPINLOCK(blkif_io_lock);
102
Jan Beulich0e345822010-08-07 18:28:55 +0200103static unsigned int nr_minors;
104static unsigned long *minors;
105static DEFINE_SPINLOCK(minor_lock);
106
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700107#define MAXIMUM_OUTSTANDING_BLOCK_REQS \
108 (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
109#define GRANT_INVALID_REF 0
110
111#define PARTS_PER_DISK 16
Chris Lalancette9246b5f2008-09-17 14:30:32 -0700112#define PARTS_PER_EXT_DISK 256
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700113
114#define BLKIF_MAJOR(dev) ((dev)>>8)
115#define BLKIF_MINOR(dev) ((dev) & 0xff)
116
Chris Lalancette9246b5f2008-09-17 14:30:32 -0700117#define EXT_SHIFT 28
118#define EXTENDED (1<<EXT_SHIFT)
119#define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
120#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700121
Chris Lalancette9246b5f2008-09-17 14:30:32 -0700122#define DEV_NAME "xvd" /* name in /dev */
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700123
124static int get_id_from_freelist(struct blkfront_info *info)
125{
126 unsigned long free = info->shadow_free;
Roel Kluinb9ed7252009-05-22 09:25:32 +0200127 BUG_ON(free >= BLK_RING_SIZE);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700128 info->shadow_free = info->shadow[free].req.id;
129 info->shadow[free].req.id = 0x0fffffee; /* debug */
130 return free;
131}
132
133static void add_id_to_freelist(struct blkfront_info *info,
134 unsigned long id)
135{
136 info->shadow[id].req.id = info->shadow_free;
137 info->shadow[id].request = 0;
138 info->shadow_free = id;
139}
140
Jan Beulich0e345822010-08-07 18:28:55 +0200141static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
142{
143 unsigned int end = minor + nr;
144 int rc;
145
146 if (end > nr_minors) {
147 unsigned long *bitmap, *old;
148
149 bitmap = kzalloc(BITS_TO_LONGS(end) * sizeof(*bitmap),
150 GFP_KERNEL);
151 if (bitmap == NULL)
152 return -ENOMEM;
153
154 spin_lock(&minor_lock);
155 if (end > nr_minors) {
156 old = minors;
157 memcpy(bitmap, minors,
158 BITS_TO_LONGS(nr_minors) * sizeof(*bitmap));
159 minors = bitmap;
160 nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG;
161 } else
162 old = bitmap;
163 spin_unlock(&minor_lock);
164 kfree(old);
165 }
166
167 spin_lock(&minor_lock);
168 if (find_next_bit(minors, end, minor) >= end) {
169 for (; minor < end; ++minor)
170 __set_bit(minor, minors);
171 rc = 0;
172 } else
173 rc = -EBUSY;
174 spin_unlock(&minor_lock);
175
176 return rc;
177}
178
179static void xlbd_release_minors(unsigned int minor, unsigned int nr)
180{
181 unsigned int end = minor + nr;
182
183 BUG_ON(end > nr_minors);
184 spin_lock(&minor_lock);
185 for (; minor < end; ++minor)
186 __clear_bit(minor, minors);
187 spin_unlock(&minor_lock);
188}
189
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700190static void blkif_restart_queue_callback(void *arg)
191{
192 struct blkfront_info *info = (struct blkfront_info *)arg;
193 schedule_work(&info->work);
194}
195
Harvey Harrisonafe42d72008-04-29 00:59:47 -0700196static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
Ian Campbell597592d2008-02-21 13:03:45 -0800197{
198 /* We don't have real geometry info, but let's at least return
199 values consistent with the size of the device */
200 sector_t nsect = get_capacity(bd->bd_disk);
201 sector_t cylinders = nsect;
202
203 hg->heads = 0xff;
204 hg->sectors = 0x3f;
205 sector_div(cylinders, hg->heads * hg->sectors);
206 hg->cylinders = cylinders;
207 if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
208 hg->cylinders = 0xffff;
209 return 0;
210}
211
Al Viroa63c8482008-03-02 10:23:47 -0500212static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
Adrian Bunk62aa0052008-08-04 11:59:05 +0200213 unsigned command, unsigned long argument)
Christian Limpach440a01a2008-06-17 10:47:08 +0200214{
Al Viroa63c8482008-03-02 10:23:47 -0500215 struct blkfront_info *info = bdev->bd_disk->private_data;
Christian Limpach440a01a2008-06-17 10:47:08 +0200216 int i;
217
218 dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n",
219 command, (long)argument);
220
221 switch (command) {
222 case CDROMMULTISESSION:
223 dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n");
224 for (i = 0; i < sizeof(struct cdrom_multisession); i++)
225 if (put_user(0, (char __user *)(argument + i)))
226 return -EFAULT;
227 return 0;
228
229 case CDROM_GET_CAPABILITY: {
230 struct gendisk *gd = info->gd;
231 if (gd->flags & GENHD_FL_CD)
232 return 0;
233 return -EINVAL;
234 }
235
236 default:
237 /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
238 command);*/
239 return -EINVAL; /* same return as native Linux */
240 }
241
242 return 0;
243}
244
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700245/*
246 * blkif_queue_request
247 *
248 * request block io
249 *
250 * id: for guest use only.
251 * operation: BLKIF_OP_{READ,WRITE,PROBE}
252 * buffer: buffer to read/write into. this should be a
253 * virtual address in the guest os.
254 */
255static int blkif_queue_request(struct request *req)
256{
257 struct blkfront_info *info = req->rq_disk->private_data;
258 unsigned long buffer_mfn;
259 struct blkif_request *ring_req;
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700260 unsigned long id;
261 unsigned int fsect, lsect;
Jens Axboe9e973e62009-02-24 08:10:09 +0100262 int i, ref;
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700263 grant_ref_t gref_head;
Jens Axboe9e973e62009-02-24 08:10:09 +0100264 struct scatterlist *sg;
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700265
266 if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
267 return 1;
268
269 if (gnttab_alloc_grant_references(
270 BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
271 gnttab_request_free_callback(
272 &info->callback,
273 blkif_restart_queue_callback,
274 info,
275 BLKIF_MAX_SEGMENTS_PER_REQUEST);
276 return 1;
277 }
278
279 /* Fill out a communications ring structure. */
280 ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
281 id = get_id_from_freelist(info);
282 info->shadow[id].request = (unsigned long)req;
283
284 ring_req->id = id;
Tejun Heo83096eb2009-05-07 22:24:39 +0900285 ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700286 ring_req->handle = info->handle;
287
288 ring_req->operation = rq_data_dir(req) ?
289 BLKIF_OP_WRITE : BLKIF_OP_READ;
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200290 if (req->cmd_flags & REQ_HARDBARRIER)
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700291 ring_req->operation = BLKIF_OP_WRITE_BARRIER;
292
Jens Axboe9e973e62009-02-24 08:10:09 +0100293 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
294 BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
295
296 for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
297 buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
298 fsect = sg->offset >> 9;
299 lsect = fsect + (sg->length >> 9) - 1;
Jens Axboe6c92e692007-08-16 13:43:12 +0200300 /* install a grant reference. */
301 ref = gnttab_claim_grant_reference(&gref_head);
302 BUG_ON(ref == -ENOSPC);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700303
Jens Axboe6c92e692007-08-16 13:43:12 +0200304 gnttab_grant_foreign_access_ref(
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700305 ref,
306 info->xbdev->otherend_id,
307 buffer_mfn,
308 rq_data_dir(req) );
309
Jens Axboe9e973e62009-02-24 08:10:09 +0100310 info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
311 ring_req->seg[i] =
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700312 (struct blkif_request_segment) {
313 .gref = ref,
314 .first_sect = fsect,
315 .last_sect = lsect };
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700316 }
317
318 info->ring.req_prod_pvt++;
319
320 /* Keep a private copy so we can reissue requests when recovering. */
321 info->shadow[id].req = *ring_req;
322
323 gnttab_free_grant_references(gref_head);
324
325 return 0;
326}
327
328
329static inline void flush_requests(struct blkfront_info *info)
330{
331 int notify;
332
333 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
334
335 if (notify)
336 notify_remote_via_irq(info->irq);
337}
338
339/*
340 * do_blkif_request
341 * read a block; request is in a request queue
342 */
Jens Axboe165125e2007-07-24 09:28:11 +0200343static void do_blkif_request(struct request_queue *rq)
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700344{
345 struct blkfront_info *info = NULL;
346 struct request *req;
347 int queued;
348
349 pr_debug("Entered do_blkif_request\n");
350
351 queued = 0;
352
Tejun Heo9934c8c2009-05-08 11:54:16 +0900353 while ((req = blk_peek_request(rq)) != NULL) {
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700354 info = req->rq_disk->private_data;
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700355
356 if (RING_FULL(&info->ring))
357 goto wait;
358
Tejun Heo9934c8c2009-05-08 11:54:16 +0900359 blk_start_request(req);
Tejun Heo296b2f62009-05-08 11:54:15 +0900360
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200361 if (req->cmd_type != REQ_TYPE_FS) {
Tejun Heo296b2f62009-05-08 11:54:15 +0900362 __blk_end_request_all(req, -EIO);
363 continue;
364 }
365
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700366 pr_debug("do_blk_req %p: cmd %p, sec %lx, "
Tejun Heo83096eb2009-05-07 22:24:39 +0900367 "(%u/%u) buffer:%p [%s]\n",
368 req, req->cmd, (unsigned long)blk_rq_pos(req),
369 blk_rq_cur_sectors(req), blk_rq_sectors(req),
370 req->buffer, rq_data_dir(req) ? "write" : "read");
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700371
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700372 if (blkif_queue_request(req)) {
373 blk_requeue_request(rq, req);
374wait:
375 /* Avoid pointless unplugs. */
376 blk_stop_queue(rq);
377 break;
378 }
379
380 queued++;
381 }
382
383 if (queued != 0)
384 flush_requests(info);
385}
386
387static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
388{
Jens Axboe165125e2007-07-24 09:28:11 +0200389 struct request_queue *rq;
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700390
391 rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
392 if (rq == NULL)
393 return -1;
394
Fernando Luis Vázquez Cao66d352e2008-10-27 18:45:54 +0900395 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700396
397 /* Hard sector size and max sectors impersonate the equiv. hardware. */
Martin K. Petersene1defc42009-05-22 17:17:49 -0400398 blk_queue_logical_block_size(rq, sector_size);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500399 blk_queue_max_hw_sectors(rq, 512);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700400
401 /* Each segment in a request is up to an aligned page in size. */
402 blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
403 blk_queue_max_segment_size(rq, PAGE_SIZE);
404
405 /* Ensure a merged request will fit in a single I/O ring slot. */
Martin K. Petersen8a783622010-02-26 00:20:39 -0500406 blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700407
408 /* Make sure buffer addresses are sector-aligned. */
409 blk_queue_dma_alignment(rq, 511);
410
Ian Campbell1c91fe12008-06-17 10:47:08 +0200411 /* Make sure we don't use bounce buffers. */
412 blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
413
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700414 gd->queue = rq;
415
416 return 0;
417}
418
419
420static int xlvbd_barrier(struct blkfront_info *info)
421{
422 int err;
Jeremy Fitzhardinge4dab46f2010-07-22 14:17:00 -0700423 unsigned ordered = QUEUE_ORDERED_NONE;
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700424
Jeremy Fitzhardinge4dab46f2010-07-22 14:17:00 -0700425 /*
426 * If we don't have barrier support, then there's really no
427 * way to guarantee write ordering, so we really just have to
428 * send writes to the backend and hope for the best. If
429 * barriers are supported then we can treat them as proper
430 * ordering tags.
431 */
432 if (info->feature_barrier)
433 ordered = QUEUE_ORDERED_TAG;
434
435 err = blk_queue_ordered(info->rq, ordered);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700436
437 if (err)
438 return err;
439
440 printk(KERN_INFO "blkfront: %s: barriers %s\n",
441 info->gd->disk_name,
442 info->feature_barrier ? "enabled" : "disabled");
443 return 0;
444}
445
446
Chris Lalancette9246b5f2008-09-17 14:30:32 -0700447static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
448 struct blkfront_info *info,
449 u16 vdisk_info, u16 sector_size)
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700450{
451 struct gendisk *gd;
452 int nr_minors = 1;
453 int err = -ENODEV;
Chris Lalancette9246b5f2008-09-17 14:30:32 -0700454 unsigned int offset;
455 int minor;
456 int nr_parts;
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700457
458 BUG_ON(info->gd != NULL);
459 BUG_ON(info->rq != NULL);
460
Chris Lalancette9246b5f2008-09-17 14:30:32 -0700461 if ((info->vdevice>>EXT_SHIFT) > 1) {
462 /* this is above the extended range; something is wrong */
463 printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice);
464 return -ENODEV;
465 }
466
467 if (!VDEV_IS_EXTENDED(info->vdevice)) {
468 minor = BLKIF_MINOR(info->vdevice);
469 nr_parts = PARTS_PER_DISK;
470 } else {
471 minor = BLKIF_MINOR_EXT(info->vdevice);
472 nr_parts = PARTS_PER_EXT_DISK;
473 }
474
475 if ((minor % nr_parts) == 0)
476 nr_minors = nr_parts;
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700477
Jan Beulich0e345822010-08-07 18:28:55 +0200478 err = xlbd_reserve_minors(minor, nr_minors);
479 if (err)
480 goto out;
481 err = -ENODEV;
482
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700483 gd = alloc_disk(nr_minors);
484 if (gd == NULL)
Jan Beulich0e345822010-08-07 18:28:55 +0200485 goto release;
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700486
Chris Lalancette9246b5f2008-09-17 14:30:32 -0700487 offset = minor / nr_parts;
488
489 if (nr_minors > 1) {
490 if (offset < 26)
491 sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset);
492 else
493 sprintf(gd->disk_name, "%s%c%c", DEV_NAME,
494 'a' + ((offset / 26)-1), 'a' + (offset % 26));
495 } else {
496 if (offset < 26)
497 sprintf(gd->disk_name, "%s%c%d", DEV_NAME,
498 'a' + offset,
499 minor & (nr_parts - 1));
500 else
501 sprintf(gd->disk_name, "%s%c%c%d", DEV_NAME,
502 'a' + ((offset / 26) - 1),
503 'a' + (offset % 26),
504 minor & (nr_parts - 1));
505 }
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700506
507 gd->major = XENVBD_MAJOR;
508 gd->first_minor = minor;
509 gd->fops = &xlvbd_block_fops;
510 gd->private_data = info;
511 gd->driverfs_dev = &(info->xbdev->dev);
512 set_capacity(gd, capacity);
513
514 if (xlvbd_init_blk_queue(gd, sector_size)) {
515 del_gendisk(gd);
Jan Beulich0e345822010-08-07 18:28:55 +0200516 goto release;
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700517 }
518
519 info->rq = gd->queue;
520 info->gd = gd;
521
Jeremy Fitzhardinge4dab46f2010-07-22 14:17:00 -0700522 xlvbd_barrier(info);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700523
524 if (vdisk_info & VDISK_READONLY)
525 set_disk_ro(gd, 1);
526
527 if (vdisk_info & VDISK_REMOVABLE)
528 gd->flags |= GENHD_FL_REMOVABLE;
529
530 if (vdisk_info & VDISK_CDROM)
531 gd->flags |= GENHD_FL_CD;
532
533 return 0;
534
Jan Beulich0e345822010-08-07 18:28:55 +0200535 release:
536 xlbd_release_minors(minor, nr_minors);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700537 out:
538 return err;
539}
540
Daniel Stoddena66b5ae2010-08-07 18:33:17 +0200541static void xlvbd_release_gendisk(struct blkfront_info *info)
542{
543 unsigned int minor, nr_minors;
544 unsigned long flags;
545
546 if (info->rq == NULL)
547 return;
548
549 spin_lock_irqsave(&blkif_io_lock, flags);
550
551 /* No more blkif_request(). */
552 blk_stop_queue(info->rq);
553
554 /* No more gnttab callback work. */
555 gnttab_cancel_free_callback(&info->callback);
556 spin_unlock_irqrestore(&blkif_io_lock, flags);
557
558 /* Flush gnttab callback work. Must be done with no locks held. */
559 flush_scheduled_work();
560
561 del_gendisk(info->gd);
562
563 minor = info->gd->first_minor;
564 nr_minors = info->gd->minors;
565 xlbd_release_minors(minor, nr_minors);
566
567 blk_cleanup_queue(info->rq);
568 info->rq = NULL;
569
570 put_disk(info->gd);
571 info->gd = NULL;
572}
573
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700574static void kick_pending_request_queues(struct blkfront_info *info)
575{
576 if (!RING_FULL(&info->ring)) {
577 /* Re-enable calldowns. */
578 blk_start_queue(info->rq);
579 /* Kick things off immediately. */
580 do_blkif_request(info->rq);
581 }
582}
583
584static void blkif_restart_queue(struct work_struct *work)
585{
586 struct blkfront_info *info = container_of(work, struct blkfront_info, work);
587
588 spin_lock_irq(&blkif_io_lock);
589 if (info->connected == BLKIF_STATE_CONNECTED)
590 kick_pending_request_queues(info);
591 spin_unlock_irq(&blkif_io_lock);
592}
593
594static void blkif_free(struct blkfront_info *info, int suspend)
595{
596 /* Prevent new requests being issued until we fix things up. */
597 spin_lock_irq(&blkif_io_lock);
598 info->connected = suspend ?
599 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
600 /* No more blkif_request(). */
601 if (info->rq)
602 blk_stop_queue(info->rq);
603 /* No more gnttab callback work. */
604 gnttab_cancel_free_callback(&info->callback);
605 spin_unlock_irq(&blkif_io_lock);
606
607 /* Flush gnttab callback work. Must be done with no locks held. */
608 flush_scheduled_work();
609
610 /* Free resources associated with old device channel. */
611 if (info->ring_ref != GRANT_INVALID_REF) {
612 gnttab_end_foreign_access(info->ring_ref, 0,
613 (unsigned long)info->ring.sring);
614 info->ring_ref = GRANT_INVALID_REF;
615 info->ring.sring = NULL;
616 }
617 if (info->irq)
618 unbind_from_irqhandler(info->irq, info);
619 info->evtchn = info->irq = 0;
620
621}
622
623static void blkif_completion(struct blk_shadow *s)
624{
625 int i;
626 for (i = 0; i < s->req.nr_segments; i++)
627 gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
628}
629
630static irqreturn_t blkif_interrupt(int irq, void *dev_id)
631{
632 struct request *req;
633 struct blkif_response *bret;
634 RING_IDX i, rp;
635 unsigned long flags;
636 struct blkfront_info *info = (struct blkfront_info *)dev_id;
Kiyoshi Uedaf530f0362007-12-11 17:47:36 -0500637 int error;
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700638
639 spin_lock_irqsave(&blkif_io_lock, flags);
640
641 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
642 spin_unlock_irqrestore(&blkif_io_lock, flags);
643 return IRQ_HANDLED;
644 }
645
646 again:
647 rp = info->ring.sring->rsp_prod;
648 rmb(); /* Ensure we see queued responses up to 'rp'. */
649
650 for (i = info->ring.rsp_cons; i != rp; i++) {
651 unsigned long id;
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700652
653 bret = RING_GET_RESPONSE(&info->ring, i);
654 id = bret->id;
655 req = (struct request *)info->shadow[id].request;
656
657 blkif_completion(&info->shadow[id]);
658
659 add_id_to_freelist(info, id);
660
Kiyoshi Uedaf530f0362007-12-11 17:47:36 -0500661 error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700662 switch (bret->operation) {
663 case BLKIF_OP_WRITE_BARRIER:
664 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
665 printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
666 info->gd->disk_name);
Kiyoshi Uedaf530f0362007-12-11 17:47:36 -0500667 error = -EOPNOTSUPP;
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700668 info->feature_barrier = 0;
669 xlvbd_barrier(info);
670 }
671 /* fall through */
672 case BLKIF_OP_READ:
673 case BLKIF_OP_WRITE:
674 if (unlikely(bret->status != BLKIF_RSP_OKAY))
675 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
676 "request: %x\n", bret->status);
677
Tejun Heo40cbbb72009-04-23 11:05:19 +0900678 __blk_end_request_all(req, error);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700679 break;
680 default:
681 BUG();
682 }
683 }
684
685 info->ring.rsp_cons = i;
686
687 if (i != info->ring.req_prod_pvt) {
688 int more_to_do;
689 RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
690 if (more_to_do)
691 goto again;
692 } else
693 info->ring.sring->rsp_event = i + 1;
694
695 kick_pending_request_queues(info);
696
697 spin_unlock_irqrestore(&blkif_io_lock, flags);
698
699 return IRQ_HANDLED;
700}
701
702
703static int setup_blkring(struct xenbus_device *dev,
704 struct blkfront_info *info)
705{
706 struct blkif_sring *sring;
707 int err;
708
709 info->ring_ref = GRANT_INVALID_REF;
710
Ian Campbella144ff02008-06-17 10:47:08 +0200711 sring = (struct blkif_sring *)__get_free_page(GFP_NOIO | __GFP_HIGH);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700712 if (!sring) {
713 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
714 return -ENOMEM;
715 }
716 SHARED_RING_INIT(sring);
717 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
718
Jens Axboe9e973e62009-02-24 08:10:09 +0100719 sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
720
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700721 err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
722 if (err < 0) {
723 free_page((unsigned long)sring);
724 info->ring.sring = NULL;
725 goto fail;
726 }
727 info->ring_ref = err;
728
729 err = xenbus_alloc_evtchn(dev, &info->evtchn);
730 if (err)
731 goto fail;
732
733 err = bind_evtchn_to_irqhandler(info->evtchn,
734 blkif_interrupt,
735 IRQF_SAMPLE_RANDOM, "blkif", info);
736 if (err <= 0) {
737 xenbus_dev_fatal(dev, err,
738 "bind_evtchn_to_irqhandler failed");
739 goto fail;
740 }
741 info->irq = err;
742
743 return 0;
744fail:
745 blkif_free(info, 0);
746 return err;
747}
748
749
750/* Common code used when first setting up, and when resuming. */
Ian Campbell203fd612009-12-04 15:33:54 +0000751static int talk_to_blkback(struct xenbus_device *dev,
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700752 struct blkfront_info *info)
753{
754 const char *message = NULL;
755 struct xenbus_transaction xbt;
756 int err;
757
758 /* Create shared ring, alloc event channel. */
759 err = setup_blkring(dev, info);
760 if (err)
761 goto out;
762
763again:
764 err = xenbus_transaction_start(&xbt);
765 if (err) {
766 xenbus_dev_fatal(dev, err, "starting transaction");
767 goto destroy_blkring;
768 }
769
770 err = xenbus_printf(xbt, dev->nodename,
771 "ring-ref", "%u", info->ring_ref);
772 if (err) {
773 message = "writing ring-ref";
774 goto abort_transaction;
775 }
776 err = xenbus_printf(xbt, dev->nodename,
777 "event-channel", "%u", info->evtchn);
778 if (err) {
779 message = "writing event-channel";
780 goto abort_transaction;
781 }
Markus Armbruster3e334232008-04-02 10:54:02 -0700782 err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
783 XEN_IO_PROTO_ABI_NATIVE);
784 if (err) {
785 message = "writing protocol";
786 goto abort_transaction;
787 }
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700788
789 err = xenbus_transaction_end(xbt, 0);
790 if (err) {
791 if (err == -EAGAIN)
792 goto again;
793 xenbus_dev_fatal(dev, err, "completing transaction");
794 goto destroy_blkring;
795 }
796
797 xenbus_switch_state(dev, XenbusStateInitialised);
798
799 return 0;
800
801 abort_transaction:
802 xenbus_transaction_end(xbt, 1);
803 if (message)
804 xenbus_dev_fatal(dev, err, "%s", message);
805 destroy_blkring:
806 blkif_free(info, 0);
807 out:
808 return err;
809}
810
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700811/**
812 * Entry point to this code when a new device is created. Allocate the basic
813 * structures and the ring buffer for communication with the backend, and
814 * inform the backend of the appropriate details for those. Switch to
815 * Initialised state.
816 */
817static int blkfront_probe(struct xenbus_device *dev,
818 const struct xenbus_device_id *id)
819{
820 int err, vdevice, i;
821 struct blkfront_info *info;
822
823 /* FIXME: Use dynamic device id if this is not set. */
824 err = xenbus_scanf(XBT_NIL, dev->nodename,
825 "virtual-device", "%i", &vdevice);
826 if (err != 1) {
Chris Lalancette9246b5f2008-09-17 14:30:32 -0700827 /* go looking in the extended area instead */
828 err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
829 "%i", &vdevice);
830 if (err != 1) {
831 xenbus_dev_fatal(dev, err, "reading virtual-device");
832 return err;
833 }
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700834 }
835
836 info = kzalloc(sizeof(*info), GFP_KERNEL);
837 if (!info) {
838 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
839 return -ENOMEM;
840 }
841
Daniel Stoddenb70f5fa2010-04-30 22:01:19 +0000842 mutex_init(&info->mutex);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700843 info->xbdev = dev;
844 info->vdevice = vdevice;
845 info->connected = BLKIF_STATE_DISCONNECTED;
846 INIT_WORK(&info->work, blkif_restart_queue);
847
848 for (i = 0; i < BLK_RING_SIZE; i++)
849 info->shadow[i].req.id = i+1;
850 info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
851
852 /* Front end dir is a number, which is used as the id. */
853 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
Greg Kroah-Hartmana1b4b122009-04-30 14:43:31 -0700854 dev_set_drvdata(&dev->dev, info);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700855
Ian Campbell203fd612009-12-04 15:33:54 +0000856 err = talk_to_blkback(dev, info);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700857 if (err) {
858 kfree(info);
Greg Kroah-Hartmana1b4b122009-04-30 14:43:31 -0700859 dev_set_drvdata(&dev->dev, NULL);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700860 return err;
861 }
862
863 return 0;
864}
865
866
867static int blkif_recover(struct blkfront_info *info)
868{
869 int i;
870 struct blkif_request *req;
871 struct blk_shadow *copy;
872 int j;
873
874 /* Stage 1: Make a safe copy of the shadow state. */
Ian Campbella144ff02008-06-17 10:47:08 +0200875 copy = kmalloc(sizeof(info->shadow),
876 GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700877 if (!copy)
878 return -ENOMEM;
879 memcpy(copy, info->shadow, sizeof(info->shadow));
880
881 /* Stage 2: Set up free list. */
882 memset(&info->shadow, 0, sizeof(info->shadow));
883 for (i = 0; i < BLK_RING_SIZE; i++)
884 info->shadow[i].req.id = i+1;
885 info->shadow_free = info->ring.req_prod_pvt;
886 info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
887
888 /* Stage 3: Find pending requests and requeue them. */
889 for (i = 0; i < BLK_RING_SIZE; i++) {
890 /* Not in use? */
891 if (copy[i].request == 0)
892 continue;
893
894 /* Grab a request slot and copy shadow state into it. */
895 req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
896 *req = copy[i].req;
897
898 /* We get a new request id, and must reset the shadow state. */
899 req->id = get_id_from_freelist(info);
900 memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
901
902 /* Rewrite any grant references invalidated by susp/resume. */
903 for (j = 0; j < req->nr_segments; j++)
904 gnttab_grant_foreign_access_ref(
905 req->seg[j].gref,
906 info->xbdev->otherend_id,
907 pfn_to_mfn(info->shadow[req->id].frame[j]),
908 rq_data_dir(
909 (struct request *)
910 info->shadow[req->id].request));
911 info->shadow[req->id].req = *req;
912
913 info->ring.req_prod_pvt++;
914 }
915
916 kfree(copy);
917
918 xenbus_switch_state(info->xbdev, XenbusStateConnected);
919
920 spin_lock_irq(&blkif_io_lock);
921
922 /* Now safe for us to use the shared ring */
923 info->connected = BLKIF_STATE_CONNECTED;
924
925 /* Send off requeued requests */
926 flush_requests(info);
927
928 /* Kick any other new requests queued since we resumed */
929 kick_pending_request_queues(info);
930
931 spin_unlock_irq(&blkif_io_lock);
932
933 return 0;
934}
935
936/**
937 * We are reconnecting to the backend, due to a suspend/resume, or a backend
938 * driver restart. We tear down our blkif structure and recreate it, but
939 * leave the device-layer structures intact so that this is transparent to the
940 * rest of the kernel.
941 */
942static int blkfront_resume(struct xenbus_device *dev)
943{
Greg Kroah-Hartmana1b4b122009-04-30 14:43:31 -0700944 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700945 int err;
946
947 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
948
949 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
950
Ian Campbell203fd612009-12-04 15:33:54 +0000951 err = talk_to_blkback(dev, info);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700952 if (info->connected == BLKIF_STATE_SUSPENDED && !err)
953 err = blkif_recover(info);
954
955 return err;
956}
957
Daniel Stoddenb70f5fa2010-04-30 22:01:19 +0000958static void
959blkfront_closing(struct blkfront_info *info)
960{
961 struct xenbus_device *xbdev = info->xbdev;
962 struct block_device *bdev = NULL;
963
964 mutex_lock(&info->mutex);
965
966 if (xbdev->state == XenbusStateClosing) {
967 mutex_unlock(&info->mutex);
968 return;
969 }
970
971 if (info->gd)
972 bdev = bdget_disk(info->gd, 0);
973
974 mutex_unlock(&info->mutex);
975
976 if (!bdev) {
977 xenbus_frontend_closed(xbdev);
978 return;
979 }
980
981 mutex_lock(&bdev->bd_mutex);
982
Daniel Stodden7b32d102010-04-30 22:01:23 +0000983 if (bdev->bd_openers) {
Daniel Stoddenb70f5fa2010-04-30 22:01:19 +0000984 xenbus_dev_error(xbdev, -EBUSY,
985 "Device in use; refusing to close");
986 xenbus_switch_state(xbdev, XenbusStateClosing);
987 } else {
988 xlvbd_release_gendisk(info);
989 xenbus_frontend_closed(xbdev);
990 }
991
992 mutex_unlock(&bdev->bd_mutex);
993 bdput(bdev);
994}
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -0700995
996/*
997 * Invoked when the backend is finally 'ready' (and has told produced
998 * the details about the physical device - #sectors, size, etc).
999 */
1000static void blkfront_connect(struct blkfront_info *info)
1001{
1002 unsigned long long sectors;
1003 unsigned long sector_size;
1004 unsigned int binfo;
1005 int err;
1006
K. Y. Srinivasan1fa73be2010-03-11 13:42:26 -08001007 switch (info->connected) {
1008 case BLKIF_STATE_CONNECTED:
1009 /*
1010 * Potentially, the back-end may be signalling
1011 * a capacity change; update the capacity.
1012 */
1013 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1014 "sectors", "%Lu", &sectors);
1015 if (XENBUS_EXIST_ERR(err))
1016 return;
1017 printk(KERN_INFO "Setting capacity to %Lu\n",
1018 sectors);
1019 set_capacity(info->gd, sectors);
K. Y. Srinivasan2def1412010-03-18 15:00:54 -07001020 revalidate_disk(info->gd);
K. Y. Srinivasan1fa73be2010-03-11 13:42:26 -08001021
1022 /* fall through */
1023 case BLKIF_STATE_SUSPENDED:
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001024 return;
Jeremy Fitzhardingeb4dddb42010-03-11 15:10:40 -08001025
1026 default:
1027 break;
K. Y. Srinivasan1fa73be2010-03-11 13:42:26 -08001028 }
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001029
1030 dev_dbg(&info->xbdev->dev, "%s:%s.\n",
1031 __func__, info->xbdev->otherend);
1032
1033 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1034 "sectors", "%llu", &sectors,
1035 "info", "%u", &binfo,
1036 "sector-size", "%lu", &sector_size,
1037 NULL);
1038 if (err) {
1039 xenbus_dev_fatal(info->xbdev, err,
1040 "reading backend fields at %s",
1041 info->xbdev->otherend);
1042 return;
1043 }
1044
1045 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1046 "feature-barrier", "%lu", &info->feature_barrier,
1047 NULL);
1048 if (err)
1049 info->feature_barrier = 0;
1050
Chris Lalancette9246b5f2008-09-17 14:30:32 -07001051 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001052 if (err) {
1053 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
1054 info->xbdev->otherend);
1055 return;
1056 }
1057
1058 xenbus_switch_state(info->xbdev, XenbusStateConnected);
1059
1060 /* Kick pending requests. */
1061 spin_lock_irq(&blkif_io_lock);
1062 info->connected = BLKIF_STATE_CONNECTED;
1063 kick_pending_request_queues(info);
1064 spin_unlock_irq(&blkif_io_lock);
1065
1066 add_disk(info->gd);
Christian Limpach1d78d702008-04-02 10:54:04 -07001067
1068 info->is_ready = 1;
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001069}
1070
1071/**
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001072 * Callback received when the backend's state changes.
1073 */
Ian Campbell203fd612009-12-04 15:33:54 +00001074static void blkback_changed(struct xenbus_device *dev,
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001075 enum xenbus_state backend_state)
1076{
Greg Kroah-Hartmana1b4b122009-04-30 14:43:31 -07001077 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001078
Ian Campbell203fd612009-12-04 15:33:54 +00001079 dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001080
1081 switch (backend_state) {
1082 case XenbusStateInitialising:
1083 case XenbusStateInitWait:
1084 case XenbusStateInitialised:
1085 case XenbusStateUnknown:
1086 case XenbusStateClosed:
1087 break;
1088
1089 case XenbusStateConnected:
1090 blkfront_connect(info);
1091 break;
1092
1093 case XenbusStateClosing:
Daniel Stoddenb70f5fa2010-04-30 22:01:19 +00001094 blkfront_closing(info);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001095 break;
1096 }
1097}
1098
Daniel Stoddenfa1bd352010-04-30 22:01:22 +00001099static int blkfront_remove(struct xenbus_device *xbdev)
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001100{
Daniel Stoddenfa1bd352010-04-30 22:01:22 +00001101 struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
1102 struct block_device *bdev = NULL;
1103 struct gendisk *disk;
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001104
Daniel Stoddenfa1bd352010-04-30 22:01:22 +00001105 dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001106
1107 blkif_free(info, 0);
1108
Daniel Stoddenfa1bd352010-04-30 22:01:22 +00001109 mutex_lock(&info->mutex);
1110
1111 disk = info->gd;
1112 if (disk)
1113 bdev = bdget_disk(disk, 0);
1114
1115 info->xbdev = NULL;
1116 mutex_unlock(&info->mutex);
1117
1118 if (!bdev) {
Jan Beulich0e345822010-08-07 18:28:55 +02001119 kfree(info);
Daniel Stoddenfa1bd352010-04-30 22:01:22 +00001120 return 0;
1121 }
1122
1123 /*
1124 * The xbdev was removed before we reached the Closed
1125 * state. See if it's safe to remove the disk. If the bdev
1126 * isn't closed yet, we let release take care of it.
1127 */
1128
1129 mutex_lock(&bdev->bd_mutex);
1130 info = disk->private_data;
1131
Daniel Stoddend54142c2010-08-07 18:51:21 +02001132 dev_warn(disk_to_dev(disk),
1133 "%s was hot-unplugged, %d stale handles\n",
1134 xbdev->nodename, bdev->bd_openers);
1135
Daniel Stodden7b32d102010-04-30 22:01:23 +00001136 if (info && !bdev->bd_openers) {
Daniel Stoddenfa1bd352010-04-30 22:01:22 +00001137 xlvbd_release_gendisk(info);
1138 disk->private_data = NULL;
1139 kfree(info);
1140 }
1141
1142 mutex_unlock(&bdev->bd_mutex);
1143 bdput(bdev);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001144
1145 return 0;
1146}
1147
Christian Limpach1d78d702008-04-02 10:54:04 -07001148static int blkfront_is_ready(struct xenbus_device *dev)
1149{
Greg Kroah-Hartmana1b4b122009-04-30 14:43:31 -07001150 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
Christian Limpach1d78d702008-04-02 10:54:04 -07001151
Jan Beulich5d7ed202010-08-07 18:31:12 +02001152 return info->is_ready && info->xbdev;
Christian Limpach1d78d702008-04-02 10:54:04 -07001153}
1154
Al Viroa63c8482008-03-02 10:23:47 -05001155static int blkif_open(struct block_device *bdev, fmode_t mode)
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001156{
Daniel Stodden13961742010-08-07 18:36:53 +02001157 struct gendisk *disk = bdev->bd_disk;
1158 struct blkfront_info *info;
1159 int err = 0;
Arnd Bergmann6e9624b2010-08-07 18:25:34 +02001160
1161 lock_kernel();
Arnd Bergmann6e9624b2010-08-07 18:25:34 +02001162
Daniel Stodden13961742010-08-07 18:36:53 +02001163 info = disk->private_data;
1164 if (!info) {
1165 /* xbdev gone */
1166 err = -ERESTARTSYS;
1167 goto out;
1168 }
1169
1170 mutex_lock(&info->mutex);
1171
1172 if (!info->gd)
1173 /* xbdev is closed */
1174 err = -ERESTARTSYS;
1175
1176 mutex_unlock(&info->mutex);
1177
Daniel Stodden13961742010-08-07 18:36:53 +02001178out:
Daniel Stodden7fd152f2010-08-07 18:45:12 +02001179 unlock_kernel();
Daniel Stodden13961742010-08-07 18:36:53 +02001180 return err;
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001181}
1182
Al Viroa63c8482008-03-02 10:23:47 -05001183static int blkif_release(struct gendisk *disk, fmode_t mode)
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001184{
Al Viroa63c8482008-03-02 10:23:47 -05001185 struct blkfront_info *info = disk->private_data;
Daniel Stodden7fd152f2010-08-07 18:45:12 +02001186 struct block_device *bdev;
1187 struct xenbus_device *xbdev;
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001188
Daniel Stodden7fd152f2010-08-07 18:45:12 +02001189 lock_kernel();
Daniel Stodden7fd152f2010-08-07 18:45:12 +02001190
1191 bdev = bdget_disk(disk, 0);
1192 bdput(bdev);
1193
Daniel Stoddenacfca3c2010-08-07 18:47:26 +02001194 if (bdev->bd_openers)
1195 goto out;
1196
Daniel Stodden7fd152f2010-08-07 18:45:12 +02001197 /*
1198 * Check if we have been instructed to close. We will have
1199 * deferred this request, because the bdev was still open.
1200 */
1201
1202 mutex_lock(&info->mutex);
1203 xbdev = info->xbdev;
1204
1205 if (xbdev && xbdev->state == XenbusStateClosing) {
1206 /* pending switch to state closed */
Daniel Stoddend54142c2010-08-07 18:51:21 +02001207 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
Daniel Stodden7fd152f2010-08-07 18:45:12 +02001208 xlvbd_release_gendisk(info);
1209 xenbus_frontend_closed(info->xbdev);
1210 }
1211
1212 mutex_unlock(&info->mutex);
1213
1214 if (!xbdev) {
1215 /* sudden device removal */
Daniel Stoddend54142c2010-08-07 18:51:21 +02001216 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
Daniel Stodden7fd152f2010-08-07 18:45:12 +02001217 xlvbd_release_gendisk(info);
1218 disk->private_data = NULL;
1219 kfree(info);
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001220 }
Daniel Stodden7fd152f2010-08-07 18:45:12 +02001221
Arnd Bergmann6e9624b2010-08-07 18:25:34 +02001222 unlock_kernel();
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001223 return 0;
1224}
1225
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07001226static const struct block_device_operations xlvbd_block_fops =
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001227{
1228 .owner = THIS_MODULE,
Al Viroa63c8482008-03-02 10:23:47 -05001229 .open = blkif_open,
1230 .release = blkif_release,
Ian Campbell597592d2008-02-21 13:03:45 -08001231 .getgeo = blkif_getgeo,
Arnd Bergmann8a6cfeb2010-07-08 10:18:46 +02001232 .ioctl = blkif_ioctl,
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001233};
1234
1235
Márton Némethec9c42e2010-01-10 13:39:52 +01001236static const struct xenbus_device_id blkfront_ids[] = {
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001237 { "vbd" },
1238 { "" }
1239};
1240
1241static struct xenbus_driver blkfront = {
1242 .name = "vbd",
1243 .owner = THIS_MODULE,
1244 .ids = blkfront_ids,
1245 .probe = blkfront_probe,
1246 .remove = blkfront_remove,
1247 .resume = blkfront_resume,
Ian Campbell203fd612009-12-04 15:33:54 +00001248 .otherend_changed = blkback_changed,
Christian Limpach1d78d702008-04-02 10:54:04 -07001249 .is_ready = blkfront_is_ready,
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001250};
1251
1252static int __init xlblk_init(void)
1253{
Jeremy Fitzhardinge6e833582008-08-19 13:16:17 -07001254 if (!xen_domain())
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001255 return -ENODEV;
1256
1257 if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
1258 printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
1259 XENVBD_MAJOR, DEV_NAME);
1260 return -ENODEV;
1261 }
1262
1263 return xenbus_register_frontend(&blkfront);
1264}
1265module_init(xlblk_init);
1266
1267
Jan Beulich5a60d0c2008-06-17 10:47:08 +02001268static void __exit xlblk_exit(void)
Jeremy Fitzhardinge9f27ee52007-07-17 18:37:06 -07001269{
1270 return xenbus_unregister_driver(&blkfront);
1271}
1272module_exit(xlblk_exit);
1273
1274MODULE_DESCRIPTION("Xen virtual block device frontend");
1275MODULE_LICENSE("GPL");
1276MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
Mark McLoughlind2f0c522008-04-02 10:54:05 -07001277MODULE_ALIAS("xen:vbd");
Mark McLoughlin4f93f092008-04-02 10:54:06 -07001278MODULE_ALIAS("xenblk");