]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge branch 'for-3.3/drivers' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 15 Jan 2012 20:48:41 +0000 (12:48 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 15 Jan 2012 20:48:41 +0000 (12:48 -0800)
* 'for-3.3/drivers' of git://git.kernel.dk/linux-block:
  mtip32xx: do rebuild monitoring asynchronously
  xen-blkfront: Use kcalloc instead of kzalloc to allocate array
  mtip32xx: uninitialized variable in mtip_quiesce_io()
  mtip32xx: updates based on feedback
  xen-blkback: convert hole punching to discard request on loop devices
  xen/blkback: Move processing of BLKIF_OP_DISCARD from dispatch_rw_block_io
  xen/blk[front|back]: Enhance discard support with secure erasing support.
  xen/blk[front|back]: Squash blkif_request_rw and blkif_request_discard together
  mtip32xx: update to new ->make_request() API
  mtip32xx: add module.h include to avoid conflict with moduleh tree
  mtip32xx: mark a few more items static
  mtip32xx: ensure that all local functions are static
  mtip32xx: cleanup compat ioctl handling
  mtip32xx: fix warnings/errors on 32-bit compiles
  block: Add driver for Micron RealSSD pcie flash cards

1  2 
drivers/block/xen-blkback/xenbus.c
drivers/block/xen-blkfront.c

index 37c794d312644d30430446d32e3ed844565cc362,187fd2c1a15d077cd8cf1c8f88877b3307f8a661..24a2fb57e5d012d57c5d3e1fc1929f27ac3998ee
@@@ -338,6 -338,9 +338,9 @@@ static int xen_vbd_create(struct xen_bl
        if (q && q->flush_flags)
                vbd->flush_support = true;
  
+       if (q && blk_queue_secdiscard(q))
+               vbd->discard_secure = true;
        DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
                handle, blkif->domid);
        return 0;
@@@ -420,6 -423,15 +423,15 @@@ int xen_blkbk_discard(struct xenbus_tra
                                state = 1;
                                blkif->blk_backend_type = BLKIF_BACKEND_PHY;
                        }
+                       /* Optional. */
+                       err = xenbus_printf(xbt, dev->nodename,
+                               "discard-secure", "%d",
+                               blkif->vbd.discard_secure);
+                       if (err) {
+                               xenbus_dev_fatal(dev, err,
+                                       "writting discard-secure");
+                               goto kfree;
+                       }
                }
        } else {
                err = PTR_ERR(type);
@@@ -613,7 -625,7 +625,7 @@@ static void frontend_changed(struct xen
        case XenbusStateConnected:
                /*
                 * Ensure we connect even when two watches fire in
 -               * close successsion and we miss the intermediate value
 +               * close succession and we miss the intermediate value
                 * of frontend_state.
                 */
                if (dev->state == XenbusStateConnected)
@@@ -787,14 -799,17 +799,14 @@@ static const struct xenbus_device_id xe
  };
  
  
 -static struct xenbus_driver xen_blkbk = {
 -      .name = "vbd",
 -      .owner = THIS_MODULE,
 -      .ids = xen_blkbk_ids,
 +static DEFINE_XENBUS_DRIVER(xen_blkbk, ,
        .probe = xen_blkbk_probe,
        .remove = xen_blkbk_remove,
        .otherend_changed = frontend_changed
 -};
 +);
  
  
  int xen_blkif_xenbus_init(void)
  {
 -      return xenbus_register_backend(&xen_blkbk);
 +      return xenbus_register_backend(&xen_blkbk_driver);
  }
index 9fd3ee203b1e7b5e450c3e32737e61a64f9cc6b1,8cb0c27f2654c52f1a6cd8dbc61e87c46f0911af..2f22874c0a374df53ef5f264b42901ac3fc15979
@@@ -98,7 -98,8 +98,8 @@@ struct blkfront_inf
        unsigned long shadow_free;
        unsigned int feature_flush;
        unsigned int flush_op;
-       unsigned int feature_discard;
+       unsigned int feature_discard:1;
+       unsigned int feature_secdiscard:1;
        unsigned int discard_granularity;
        unsigned int discard_alignment;
        int is_ready;
@@@ -135,15 -136,15 +136,15 @@@ static int get_id_from_freelist(struct 
  {
        unsigned long free = info->shadow_free;
        BUG_ON(free >= BLK_RING_SIZE);
-       info->shadow_free = info->shadow[free].req.id;
-       info->shadow[free].req.id = 0x0fffffee; /* debug */
+       info->shadow_free = info->shadow[free].req.u.rw.id;
+       info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
        return free;
  }
  
  static void add_id_to_freelist(struct blkfront_info *info,
                               unsigned long id)
  {
-       info->shadow[id].req.id  = info->shadow_free;
+       info->shadow[id].req.u.rw.id  = info->shadow_free;
        info->shadow[id].request = NULL;
        info->shadow_free = id;
  }
@@@ -156,7 -157,7 +157,7 @@@ static int xlbd_reserve_minors(unsigne
        if (end > nr_minors) {
                unsigned long *bitmap, *old;
  
-               bitmap = kzalloc(BITS_TO_LONGS(end) * sizeof(*bitmap),
+               bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap),
                                 GFP_KERNEL);
                if (bitmap == NULL)
                        return -ENOMEM;
@@@ -287,9 -288,9 +288,9 @@@ static int blkif_queue_request(struct r
        id = get_id_from_freelist(info);
        info->shadow[id].request = req;
  
-       ring_req->id = id;
+       ring_req->u.rw.id = id;
        ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
-       ring_req->handle = info->handle;
+       ring_req->u.rw.handle = info->handle;
  
        ring_req->operation = rq_data_dir(req) ?
                BLKIF_OP_WRITE : BLKIF_OP_READ;
                ring_req->operation = info->flush_op;
        }
  
-       if (unlikely(req->cmd_flags & REQ_DISCARD)) {
+       if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) {
                /* id, sector_number and handle are set above. */
                ring_req->operation = BLKIF_OP_DISCARD;
-               ring_req->nr_segments = 0;
                ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
+               if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
+                       ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
+               else
+                       ring_req->u.discard.flag = 0;
        } else {
-               ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
-               BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
+               ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req,
+                                                          info->sg);
+               BUG_ON(ring_req->u.rw.nr_segments >
+                      BLKIF_MAX_SEGMENTS_PER_REQUEST);
  
-               for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
+               for_each_sg(info->sg, sg, ring_req->u.rw.nr_segments, i) {
                        buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
                        fsect = sg->offset >> 9;
                        lsect = fsect + (sg->length >> 9) - 1;
@@@ -424,6 -430,8 +430,8 @@@ static int xlvbd_init_blk_queue(struct 
                blk_queue_max_discard_sectors(rq, get_capacity(gd));
                rq->limits.discard_granularity = info->discard_granularity;
                rq->limits.discard_alignment = info->discard_alignment;
+               if (info->feature_secdiscard)
+                       queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq);
        }
  
        /* Hard sector size and max sectors impersonate the equiv. hardware. */
@@@ -705,7 -713,9 +713,9 @@@ static void blkif_free(struct blkfront_
  static void blkif_completion(struct blk_shadow *s)
  {
        int i;
-       for (i = 0; i < s->req.nr_segments; i++)
+       /* Do not let BLKIF_OP_DISCARD as nr_segment is in the same place
+        * flag. */
+       for (i = 0; i < s->req.u.rw.nr_segments; i++)
                gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL);
  }
  
@@@ -736,7 -746,8 +746,8 @@@ static irqreturn_t blkif_interrupt(int 
                id   = bret->id;
                req  = info->shadow[id].request;
  
-               blkif_completion(&info->shadow[id]);
+               if (bret->operation != BLKIF_OP_DISCARD)
+                       blkif_completion(&info->shadow[id]);
  
                add_id_to_freelist(info, id);
  
                                           info->gd->disk_name);
                                error = -EOPNOTSUPP;
                                info->feature_discard = 0;
+                               info->feature_secdiscard = 0;
                                queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
+                               queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
                        }
                        __blk_end_request_all(req, error);
                        break;
                                error = -EOPNOTSUPP;
                        }
                        if (unlikely(bret->status == BLKIF_RSP_ERROR &&
-                                    info->shadow[id].req.nr_segments == 0)) {
+                                    info->shadow[id].req.u.rw.nr_segments == 0)) {
                                printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n",
                                       info->flush_op == BLKIF_OP_WRITE_BARRIER ?
                                       "barrier" :  "flush disk cache",
@@@ -984,8 -997,8 +997,8 @@@ static int blkfront_probe(struct xenbus
        INIT_WORK(&info->work, blkif_restart_queue);
  
        for (i = 0; i < BLK_RING_SIZE; i++)
-               info->shadow[i].req.id = i+1;
-       info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
+               info->shadow[i].req.u.rw.id = i+1;
+       info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;
  
        /* Front end dir is a number, which is used as the id. */
        info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
@@@ -1019,9 -1032,9 +1032,9 @@@ static int blkif_recover(struct blkfron
        /* Stage 2: Set up free list. */
        memset(&info->shadow, 0, sizeof(info->shadow));
        for (i = 0; i < BLK_RING_SIZE; i++)
-               info->shadow[i].req.id = i+1;
+               info->shadow[i].req.u.rw.id = i+1;
        info->shadow_free = info->ring.req_prod_pvt;
-       info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
+       info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;
  
        /* Stage 3: Find pending requests and requeue them. */
        for (i = 0; i < BLK_RING_SIZE; i++) {
                *req = copy[i].req;
  
                /* We get a new request id, and must reset the shadow state. */
-               req->id = get_id_from_freelist(info);
-               memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
+               req->u.rw.id = get_id_from_freelist(info);
+               memcpy(&info->shadow[req->u.rw.id], &copy[i], sizeof(copy[i]));
  
+               if (req->operation != BLKIF_OP_DISCARD) {
                /* Rewrite any grant references invalidated by susp/resume. */
-               for (j = 0; j < req->nr_segments; j++)
-                       gnttab_grant_foreign_access_ref(
-                               req->u.rw.seg[j].gref,
-                               info->xbdev->otherend_id,
-                               pfn_to_mfn(info->shadow[req->id].frame[j]),
-                               rq_data_dir(info->shadow[req->id].request));
-               info->shadow[req->id].req = *req;
+                       for (j = 0; j < req->u.rw.nr_segments; j++)
+                               gnttab_grant_foreign_access_ref(
+                                       req->u.rw.seg[j].gref,
+                                       info->xbdev->otherend_id,
+                                       pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]),
+                                       rq_data_dir(info->shadow[req->u.rw.id].request));
+               }
+               info->shadow[req->u.rw.id].req = *req;
  
                info->ring.req_prod_pvt++;
        }
@@@ -1135,11 -1150,13 +1150,13 @@@ static void blkfront_setup_discard(stru
        char *type;
        unsigned int discard_granularity;
        unsigned int discard_alignment;
+       unsigned int discard_secure;
  
        type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL);
        if (IS_ERR(type))
                return;
  
+       info->feature_secdiscard = 0;
        if (strncmp(type, "phy", 3) == 0) {
                err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
                        "discard-granularity", "%u", &discard_granularity,
                        info->discard_granularity = discard_granularity;
                        info->discard_alignment = discard_alignment;
                }
+               err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+                           "discard-secure", "%d", &discard_secure,
+                           NULL);
+               if (!err)
+                       info->feature_secdiscard = discard_secure;
        } else if (strncmp(type, "file", 4) == 0)
                info->feature_discard = 1;
  
@@@ -1437,13 -1460,16 +1460,13 @@@ static const struct xenbus_device_id bl
        { "" }
  };
  
 -static struct xenbus_driver blkfront = {
 -      .name = "vbd",
 -      .owner = THIS_MODULE,
 -      .ids = blkfront_ids,
 +static DEFINE_XENBUS_DRIVER(blkfront, ,
        .probe = blkfront_probe,
        .remove = blkfront_remove,
        .resume = blkfront_resume,
        .otherend_changed = blkback_changed,
        .is_ready = blkfront_is_ready,
 -};
 +);
  
  static int __init xlblk_init(void)
  {
                return -ENODEV;
        }
  
 -      ret = xenbus_register_frontend(&blkfront);
 +      ret = xenbus_register_frontend(&blkfront_driver);
        if (ret) {
                unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
                return ret;
@@@ -1471,7 -1497,7 +1494,7 @@@ module_init(xlblk_init)
  
  static void __exit xlblk_exit(void)
  {
 -      return xenbus_unregister_driver(&blkfront);
 +      return xenbus_unregister_driver(&blkfront_driver);
  }
  module_exit(xlblk_exit);