@@ -178,7 +178,8 @@ static void scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id)
scsifront_wake_up(info);
}
-static int scsifront_do_request(struct vscsifrnt_info *info,
+static int scsifront_do_request(struct scsi_device *sdev,
+ struct vscsifrnt_info *info,
struct vscsifrnt_shadow *shadow)
{
struct vscsiif_front_ring *ring = &(info->ring);
@@ -205,17 +206,25 @@ static int scsifront_do_request(struct vscsifrnt_info *info,
ring_req->ref_rqid = shadow->ref_rqid;
ring_req->nr_segments = shadow->nr_segments;
- ring_req->id = sc->device->id;
- ring_req->lun = sc->device->lun;
- ring_req->channel = sc->device->channel;
- ring_req->cmd_len = sc->cmd_len;
+ ring_req->id = sdev->id;
+ ring_req->lun = sdev->lun;
+ ring_req->channel = sdev->channel;
- BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
+ if (sc) {
+ ring_req->cmd_len = sc->cmd_len;
- memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
+ BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
- ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
- ring_req->timeout_per_command = scsi_cmd_to_rq(sc)->timeout / HZ;
+ memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
+
+ ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
+ ring_req->timeout_per_command =
+ scsi_cmd_to_rq(sc)->timeout / HZ;
+ } else {
+ ring_req->cmd_len = VSCSIIF_MAX_COMMAND_SIZE;
+ memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE);
+ ring_req->sc_data_direction = DMA_NONE;
+ }
for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++)
ring_req->seg[i] = shadow->seg[i];
@@ -637,7 +646,7 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
return 0;
}
- if (scsifront_do_request(info, shadow)) {
+ if (scsifront_do_request(sc->device, info, shadow)) {
scsifront_gnttab_done(info, shadow);
goto busy;
}
@@ -685,7 +694,7 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
if (scsifront_enter(info))
goto fail;
- if (!scsifront_do_request(info, shadow))
+ if (!scsifront_do_request(sc->device, info, shadow))
break;
scsifront_return(info);