@@ -2278,6 +2278,24 @@ static const struct vm_operations_struct sg_mmap_vm_ops = {
.close = sg_vma_close,
};
+static inline void
+sg_bio_get(struct bio *bio)
+{
+ bio_get(bio);
+}
+
+static void
+sg_bio_put(struct bio *bio)
+{
+ if (unlikely(bio_flagged(bio, BIO_REFFED))) {
+ WARN_ON(!atomic_read(&bio->__bi_cnt));
+ if (!atomic_dec_and_test(&bio->__bi_cnt))
+ return;
+ }
+ bio_uninit(bio);
+ kfree(bio);
+}
+
/*
* Entry point for mmap(2) system call. For mmap(2) to work, request's
* scatter gather list needs to be order 0 which it is unlikely to be
@@ -2792,6 +2810,59 @@ exit_sg(void)
idr_destroy(&sg_index_idr);
}
+static struct bio *
+sg_mk_kern_bio(int bvec_cnt)
+{
+ struct bio *biop;
+
+ if (bvec_cnt > BIO_MAX_VECS)
+ return NULL;
+ biop = bio_kmalloc(bvec_cnt, GFP_ATOMIC);
+ if (!biop)
+ return NULL;
+ return biop;
+}
+
+/*
+ * Setup to move data between kernel buffers managed by this driver and a SCSI device. Note that
+ * there is no corresponding 'unmap' call as is required by blk_rq_map_user() . Uses a single
+ * bio with an expanded appended bvec if necessary.
+ */
+static int
+sg_rq_map_kern(struct sg_request *srp, struct request_queue *q, struct request *rqq, int rw_ind)
+{
+ struct sg_scatter_hold *schp = &srp->sgat_h;
+ struct bio *bio;
+ int k, ln;
+ int op_flags = 0;
+ int num_sgat = schp->num_sgat;
+ int dlen = schp->dlen;
+ int pg_sz = 1 << (PAGE_SHIFT + schp->page_order);
+
+ SG_LOG(4, srp->parentfp, "%s: dlen=%d, pg_sz=%d\n", __func__, dlen, pg_sz);
+ if (num_sgat <= 0)
+ return 0;
+ if (rw_ind == WRITE)
+ op_flags = REQ_SYNC | REQ_IDLE;
+ bio = sg_mk_kern_bio(num_sgat);
+ if (!bio)
+ return -ENOMEM;
+ bio_init(bio, NULL, bio->bi_inline_vecs, num_sgat, (req_op(rqq) | op_flags));
+ bio->bi_end_io = sg_bio_put;
+
+ for (k = 0; k < num_sgat && dlen > 0; ++k, dlen -= ln) {
+ ln = min_t(int, dlen, pg_sz);
+ if (bio_add_pc_page(q, bio, schp->pages[k], ln, 0) < ln) {
+ sg_bio_put(bio);
+ return -EINVAL;
+ }
+ }
+ /* used blk_rq_append_bio() before but this is simpler */
+ blk_rq_bio_prep(rqq, bio, num_sgat);
+ rqq->nr_phys_segments = (1 << schp->page_order) * num_sgat;
+ return 0;
+}
+
static inline void
sg_set_map_data(const struct sg_scatter_hold *schp, bool up_valid,
struct rq_map_data *mdp)
@@ -2916,8 +2987,11 @@ sg_start_req(struct sg_request *srp, struct sg_comm_wr_t *cwrp, int dxfer_dir)
md->from_user = (dxfer_dir == SG_DXFER_TO_FROM_DEV);
}
- res = blk_rq_map_user_io(rqq, md, up, dxfer_len, GFP_ATOMIC,
- iov_count, iov_count, 1, r0w);
+ if (us_xfer)
+ res = blk_rq_map_user_io(rqq, md, up, dxfer_len, GFP_ATOMIC,
+ iov_count, iov_count, 1, r0w);
+ else /* transfer data to/from kernel buffers */
+ res = sg_rq_map_kern(srp, q, rqq, r0w);
fini:
if (unlikely(res)) { /* failure, free up resources */
WRITE_ONCE(srp->rqq, NULL);