@@ -144,7 +144,7 @@ struct sg_fd { /* holds the state of a file descriptor */
struct list_head sfd_entry; /* member sg_device::sfds list */
struct sg_device *parentdp; /* owning device */
wait_queue_head_t read_wait; /* queue read until command done */
- rwlock_t rq_list_lock; /* protect access to list in req_arr */
+ spinlock_t rq_list_lock; /* protect access to list in req_arr */
struct mutex f_mutex; /* protect against changes in this fd */
int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
@@ -815,9 +815,9 @@ srp_done(struct sg_fd *sfp, struct sg_request *srp)
unsigned long flags;
int ret;
- read_lock_irqsave(&sfp->rq_list_lock, flags);
+ spin_lock_irqsave(&sfp->rq_list_lock, flags);
ret = srp->done;
- read_unlock_irqrestore(&sfp->rq_list_lock, flags);
+ spin_unlock_irqrestore(&sfp->rq_list_lock, flags);
return ret;
}
@@ -1032,15 +1032,15 @@ sg_ioctl_common(struct file *filp, struct sg_device *sdp, struct sg_fd *sfp,
return result;
result = wait_event_interruptible(sfp->read_wait,
srp_done(sfp, srp));
- write_lock_irq(&sfp->rq_list_lock);
+ spin_lock_irq(&sfp->rq_list_lock);
if (srp->done) {
srp->done = 2;
- write_unlock_irq(&sfp->rq_list_lock);
+ spin_unlock_irq(&sfp->rq_list_lock);
result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
return (result < 0) ? result : 0;
}
srp->orphan = 1;
- write_unlock_irq(&sfp->rq_list_lock);
+ spin_unlock_irq(&sfp->rq_list_lock);
return result; /* -ERESTARTSYS because signal hit process */
case SG_SET_TIMEOUT:
result = get_user(val, ip);
@@ -1092,15 +1092,15 @@ sg_ioctl_common(struct file *filp, struct sg_device *sdp, struct sg_fd *sfp,
sfp->force_packid = val ? 1 : 0;
return 0;
case SG_GET_PACK_ID:
- read_lock_irqsave(&sfp->rq_list_lock, iflags);
+ spin_lock_irqsave(&sfp->rq_list_lock, iflags);
list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned)) {
- read_unlock_irqrestore(&sfp->rq_list_lock,
+ spin_unlock_irqrestore(&sfp->rq_list_lock,
iflags);
return put_user(srp->header.pack_id, ip);
}
}
- read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return put_user(-1, ip);
case SG_GET_NUM_WAITING:
return put_user(atomic_read(&sfp->waiting), ip);
@@ -1169,9 +1169,9 @@ sg_ioctl_common(struct file *filp, struct sg_device *sdp, struct sg_fd *sfp,
GFP_KERNEL);
if (!rinfo)
return -ENOMEM;
- read_lock_irqsave(&sfp->rq_list_lock, iflags);
+ spin_lock_irqsave(&sfp->rq_list_lock, iflags);
sg_fill_request_table(sfp, rinfo);
- read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
#ifdef CONFIG_COMPAT
if (in_compat_syscall())
result = put_compat_request_table(p, rinfo);
@@ -1466,7 +1466,7 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
srp->rq = NULL;
blk_mq_free_request(rq);
- write_lock_irqsave(&sfp->rq_list_lock, iflags);
+ spin_lock_irqsave(&sfp->rq_list_lock, iflags);
if (unlikely(srp->orphan)) {
if (sfp->keep_orphan)
srp->sg_io_owned = 0;
@@ -1474,7 +1474,7 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
done = 0;
}
srp->done = done;
- write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
if (likely(done)) {
/* Now wake up any sg_read() that is waiting for this
@@ -2129,7 +2129,7 @@ sg_get_rq_mark(struct sg_fd *sfp, int pack_id, bool *busy)
unsigned long iflags;
*busy = false;
- write_lock_irqsave(&sfp->rq_list_lock, iflags);
+ spin_lock_irqsave(&sfp->rq_list_lock, iflags);
list_for_each_entry(resp, &sfp->rq_list, entry) {
/* look for requests that are not SG_IO owned */
if ((!resp->sg_io_owned) &&
@@ -2140,14 +2140,14 @@ sg_get_rq_mark(struct sg_fd *sfp, int pack_id, bool *busy)
break;
case 1: /* request done; response ready to return */
resp->done = 2; /* guard against other readers */
- write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return resp;
case 2: /* response already being returned */
break;
}
}
}
- write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return NULL;
}
@@ -2159,7 +2159,7 @@ sg_setup_req(struct sg_fd *sfp)
unsigned long iflags;
struct sg_request *rp = sfp->req_arr;
- write_lock_irqsave(&sfp->rq_list_lock, iflags);
+ spin_lock_irqsave(&sfp->rq_list_lock, iflags);
if (!list_empty(&sfp->rq_list)) {
if (!sfp->cmd_q)
goto out_unlock;
@@ -2175,10 +2175,10 @@ sg_setup_req(struct sg_fd *sfp)
rp->parentfp = sfp;
rp->header.duration = jiffies_to_msecs(jiffies);
list_add_tail(&rp->entry, &sfp->rq_list);
- write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return rp;
out_unlock:
- write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return NULL;
}
@@ -2191,13 +2191,13 @@ sg_remove_request(struct sg_fd *sfp, struct sg_request *srp)
if (!sfp || !srp || list_empty(&sfp->rq_list))
return res;
- write_lock_irqsave(&sfp->rq_list_lock, iflags);
+ spin_lock_irqsave(&sfp->rq_list_lock, iflags);
if (!list_empty(&srp->entry)) {
list_del(&srp->entry);
srp->parentfp = NULL;
res = 1;
}
- write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
/*
* If the device is detaching, wakeup any readers in case we just
@@ -2222,7 +2222,7 @@ sg_add_sfp(struct sg_device *sdp)
return ERR_PTR(-ENOMEM);
init_waitqueue_head(&sfp->read_wait);
- rwlock_init(&sfp->rq_list_lock);
+ spin_lock_init(&sfp->rq_list_lock);
INIT_LIST_HEAD(&sfp->rq_list);
kref_init(&sfp->f_ref);
mutex_init(&sfp->f_mutex);
@@ -2267,14 +2267,14 @@ sg_remove_sfp_usercontext(struct work_struct *work)
unsigned long iflags;
/* Cleanup any responses which were never read(). */
- write_lock_irqsave(&sfp->rq_list_lock, iflags);
+ spin_lock_irqsave(&sfp->rq_list_lock, iflags);
while (!list_empty(&sfp->rq_list)) {
srp = list_first_entry(&sfp->rq_list, struct sg_request, entry);
sg_finish_scsi_blk_rq(srp);
list_del(&srp->entry);
srp->parentfp = NULL;
}
- write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
if (sfp->reserve.buflen > 0) {
SG_LOG(6, sfp, "%s: buflen=%d, num_sgat=%d\n", __func__,
@@ -2556,9 +2556,9 @@ sg_proc_seq_show_dev(struct seq_file *s, void *v)
scsidp->host->host_no, scsidp->channel,
scsidp->id, scsidp->lun, (int) scsidp->type,
1,
- (int) scsidp->queue_depth,
- (int) scsi_device_busy(scsidp),
- (int) scsi_device_online(scsidp));
+ (int)scsidp->queue_depth,
+ (int)scsi_device_busy(scsidp),
+ (int)scsi_device_online(scsidp));
}
read_unlock_irqrestore(&sg_index_lock, iflags);
return 0;
@@ -2598,7 +2598,7 @@ sg_proc_debug_helper(struct seq_file *s, struct sg_device *sdp)
k = 0;
list_for_each_entry(fp, &sdp->sfds, sfd_entry) {
k++;
- read_lock(&fp->rq_list_lock); /* irqs already disabled */
+ spin_lock(&fp->rq_list_lock); /* irqs already disabled */
seq_printf(s, " FD(%d): timeout=%dms buflen=%d (res)sgat=%d\n",
k, jiffies_to_msecs(fp->timeout),
fp->reserve.buflen,
@@ -2646,7 +2646,7 @@ sg_proc_debug_helper(struct seq_file *s, struct sg_device *sdp)
}
if (list_empty(&fp->rq_list))
seq_puts(s, " No requests active\n");
- read_unlock(&fp->rq_list_lock);
+ spin_unlock(&fp->rq_list_lock);
}
}