]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge branch 'for-3.16/core' into for-3.16/drivers
authorJens Axboe <axboe@fb.com>
Wed, 28 May 2014 16:18:51 +0000 (10:18 -0600)
committerJens Axboe <axboe@fb.com>
Wed, 28 May 2014 16:18:51 +0000 (10:18 -0600)
Pull in core changes (again), since we got rid of the alloc/free
hctx mq_ops hooks and mtip32xx then needed updating again.

Signed-off-by: Jens Axboe <axboe@fb.com>
1  2 
drivers/block/mtip32xx/mtip32xx.c
drivers/block/null_blk.c
drivers/block/virtio_blk.c

index ea323e91903b30d9535767d3265f13c0e9746d56,59c5abe32f06f055e939e5290e56055d66bd4c05..74abd49fabdcfe76d780c2b2d7268c50507e136c
@@@ -3723,121 -4056,76 +3723,119 @@@ static int mtip_submit_request(struct b
                }
                if (unlikely(test_bit(MTIP_DDF_WRITE_PROTECT_BIT,
                                                        &dd->dd_flag) &&
 -                              bio_data_dir(bio))) {
 -                      bio_endio(bio, -ENODATA);
 -                      return;
 -              }
 -              if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))) {
 -                      bio_endio(bio, -ENODATA);
 -                      return;
 -              }
 -              if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) {
 -                      bio_endio(bio, -ENXIO);
 -                      return;
 +                              rq_data_dir(rq))) {
 +                      return -ENODATA;
                }
 +              if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)))
 +                      return -ENODATA;
 +              if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
 +                      return -ENXIO;
        }
  
 -      if (unlikely(bio->bi_rw & REQ_DISCARD)) {
 -              bio_endio(bio, mtip_send_trim(dd, bio->bi_iter.bi_sector,
 -                                              bio_sectors(bio)));
 -              return;
 -      }
 +      if (rq->cmd_flags & REQ_DISCARD) {
 +              int err;
  
 -      if (unlikely(!bio_has_data(bio))) {
 -              blk_queue_flush(queue, 0);
 -              bio_endio(bio, 0);
 -              return;
 +              err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq));
 +              blk_mq_end_io(rq, err);
 +              return 0;
        }
  
 -      if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 &&
 -                                                      dd->unal_qdepth) {
 -              if (bio->bi_iter.bi_sector % 8 != 0)
 -                      /* Unaligned on 4k boundaries */
 -                      unaligned = 1;
 -              else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */
 -                      unaligned = 1;
 +      /* Create the scatter list for this request. */
 +      nents = blk_rq_map_sg(hctx->queue, rq, cmd->sg);
 +
 +      /* Issue the read/write. */
 +      mtip_hw_submit_io(dd, rq, cmd, nents, hctx);
 +      return 0;
 +}
 +
 +static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
 +                                struct request *rq)
 +{
 +      struct driver_data *dd = hctx->queue->queuedata;
 +      struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
 +
 +      if (!dd->unal_qdepth || rq_data_dir(rq) == READ)
 +              return false;
 +
 +      /*
 +       * If unaligned depth must be limited on this controller, mark it
 +       * as unaligned if the IO isn't on a 4k boundary (start of length).
 +       */
 +      if (blk_rq_sectors(rq) <= 64) {
 +              if ((blk_rq_pos(rq) & 7) || (blk_rq_sectors(rq) & 7))
 +                      cmd->unaligned = 1;
        }
  
 -      sg = mtip_hw_get_scatterlist(dd, &tag, unaligned);
 -      if (likely(sg != NULL)) {
 -              blk_queue_bounce(queue, &bio);
 +      if (cmd->unaligned && down_trylock(&dd->port->cmd_slot_unal))
 +              return true;
  
 -              if (unlikely((bio)->bi_vcnt > MTIP_MAX_SG)) {
 -                      dev_warn(&dd->pdev->dev,
 -                              "Maximum number of SGL entries exceeded\n");
 -                      bio_io_error(bio);
 -                      mtip_hw_release_scatterlist(dd, tag, unaligned);
 -                      return;
 -              }
 +      return false;
 +}
  
 -              /* Create the scatter list for this bio. */
 -              bio_for_each_segment(bvec, bio, iter) {
 -                      sg_set_page(&sg[nents],
 -                                      bvec.bv_page,
 -                                      bvec.bv_len,
 -                                      bvec.bv_offset);
 -                      nents++;
 -              }
 +static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
 +{
 +      int ret;
  
 -              /* Issue the read/write. */
 -              mtip_hw_submit_io(dd,
 -                              bio->bi_iter.bi_sector,
 -                              bio_sectors(bio),
 -                              nents,
 -                              tag,
 -                              bio_endio,
 -                              bio,
 -                              bio_data_dir(bio),
 -                              unaligned);
 -      } else
 -              bio_io_error(bio);
 +      if (mtip_check_unal_depth(hctx, rq))
 +              return BLK_MQ_RQ_QUEUE_BUSY;
 +
 +      ret = mtip_submit_request(hctx, rq);
 +      if (!ret)
 +              return BLK_MQ_RQ_QUEUE_OK;
 +
 +      rq->errors = ret;
 +      return BLK_MQ_RQ_QUEUE_ERROR;
 +}
 +
 +static void mtip_free_cmd(void *data, struct request *rq,
 +                        unsigned int hctx_idx, unsigned int request_idx)
 +{
 +      struct driver_data *dd = data;
 +      struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
 +
 +      if (!cmd->command)
 +              return;
 +
 +      dmam_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
 +                              cmd->command, cmd->command_dma);
 +}
 +
 +static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
 +                       unsigned int request_idx, unsigned int numa_node)
 +{
 +      struct driver_data *dd = data;
 +      struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
 +      u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64;
 +
 +      cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
 +                      &cmd->command_dma, GFP_KERNEL);
 +      if (!cmd->command)
 +              return -ENOMEM;
 +
 +      memset(cmd->command, 0, CMD_DMA_ALLOC_SZ);
 +
 +      /* Point the command headers at the command tables. */
 +      cmd->command_header = dd->port->command_list +
 +                              (sizeof(struct mtip_cmd_hdr) * request_idx);
 +      cmd->command_header_dma = dd->port->command_list_dma +
 +                              (sizeof(struct mtip_cmd_hdr) * request_idx);
 +
 +      if (host_cap_64)
 +              cmd->command_header->ctbau = __force_bit2int cpu_to_le32((cmd->command_dma >> 16) >> 16);
 +
 +      cmd->command_header->ctba = __force_bit2int cpu_to_le32(cmd->command_dma & 0xFFFFFFFF);
 +
 +      sg_init_table(cmd->sg, MTIP_MAX_SG);
 +      return 0;
  }
  
-       .alloc_hctx     = blk_mq_alloc_single_hw_queue,
-       .free_hctx      = blk_mq_free_single_hw_queue,
 +static struct blk_mq_ops mtip_mq_ops = {
 +      .queue_rq       = mtip_queue_rq,
 +      .map_queue      = blk_mq_map_queue,
 +      .init_request   = mtip_init_cmd,
 +      .exit_request   = mtip_free_cmd,
 +};
 +
  /*
   * Block layer initialization function.
   *
Simple merge
Simple merge