@@ -4624,6 +4624,149 @@ void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport, struct lpfc_vmid *vmid)
}
}
+/*
+ * lpfc_vmid_get_appid- get the vmid associated with the uuid
+ * @vport: The virtual port for which this call is being executed.
+ * @uuid: uuid associated with the VE
+ * @cmd: address of scsi cmmd descriptor
+ * @tag: VMID tag
+ * Returns status of the function
+ */
+static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct
+ scsi_cmnd * cmd, union lpfc_vmid_io_tag *tag)
+{
+ struct lpfc_vmid *vmp = NULL;
+ int hash, len, rc = 1, i;
+ u8 pending = 0;
+
+ /* check if QFPA is complete */
+ if (lpfc_vmid_is_type_priority_tag(vport) && !(vport->vmid_flag &
+ LPFC_VMID_QFPA_CMPL))
+ return 1;
+
+ /* search if the uuid has already been mapped to the vmid */
+ len = strlen(uuid);
+ hash = lpfc_vmid_hash_fn(uuid, len);
+
+ /* search for the VMID in the table */
+ read_lock(&vport->vmid_lock);
+ vmp = lpfc_get_vmid_from_hastable(vport, hash, uuid);
+ read_unlock(&vport->vmid_lock);
+
+ /* if found, check if its already registered */
+ if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
+ lpfc_vmid_update_entry(vport, cmd, vmp, tag);
+ rc = 0;
+ } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER ||
+ vmp->flag & LPFC_VMID_DE_REGISTER)) {
+ /* else if register or dereg request has already been sent */
+ /* Hence vmid tag will not be added for this IO */
+ rc = 1;
+ } else {
+ /* else, start the process to obtain one as per the */
+ /* switch connected */
+ write_lock(&vport->vmid_lock);
+ vmp = lpfc_get_vmid_from_hastable(vport, hash, uuid);
+
+ /* while the read lock was released, in case the entry was */
+ /* added by other context or is in process of being added */
+ if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
+ lpfc_vmid_update_entry(vport, cmd, vmp, tag);
+ write_unlock(&vport->vmid_lock);
+ return 0;
+ } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) {
+ write_unlock(&vport->vmid_lock);
+ return 1;
+ }
+
+ /* else search and allocate a free slot in the hash table */
+ if (vport->cur_vmid_cnt < vport->max_vmid) {
+ for (i = 0; i < vport->max_vmid; ++i) {
+ vmp = vport->vmid + i;
+ if (vmp->flag == LPFC_VMID_SLOT_FREE) {
+ vmp = vport->vmid + i;
+ break;
+ }
+ }
+ } else {
+ write_unlock(&vport->vmid_lock);
+ return 1;
+ }
+
+ if (vmp && (vmp->flag == LPFC_VMID_SLOT_FREE)) {
+ vmp->vmid_len = len;
+
+ /* Add the vmid and register */
+ memcpy(vmp->host_vmid, uuid, vmp->vmid_len);
+ vmp->io_rd_cnt = 0;
+ vmp->io_wr_cnt = 0;
+ vmp->flag = LPFC_VMID_SLOT_USED;
+ lpfc_put_vmid_in_hashtable(vport, hash, vmp);
+
+ vmp->delete_inactive =
+ vport->vmid_inactivity_timeout ? 1 : 0;
+
+ /* if type priority tag, get next available vmid */
+ if (lpfc_vmid_is_type_priority_tag(vport))
+ lpfc_vmid_assign_cs_ctl(vport, vmp);
+
+ /* allocate the per cpu variable for holding */
+ /* the last access time stamp only if vmid is enabled */
+ if (!vmp->last_io_time)
+ vmp->last_io_time =
+ __alloc_percpu(sizeof(u64),
+ __alignof__(struct
+ lpfc_vmid));
+
+ /* registration pending */
+ pending = 1;
+ rc = 1;
+ }
+ write_unlock(&vport->vmid_lock);
+
+ /* complete transaction with switch */
+ if (pending) {
+ if (lpfc_vmid_is_type_priority_tag(vport))
+ rc = lpfc_vmid_uvem(vport, vmp, true);
+ else
+ rc = lpfc_vmid_cmd(vport,
+ SLI_CTAS_RAPP_IDENT,
+ vmp);
+ if (!rc) {
+ write_lock(&vport->vmid_lock);
+ vport->cur_vmid_cnt++;
+ vmp->flag |= LPFC_VMID_REQ_REGISTER;
+ write_unlock(&vport->vmid_lock);
+ }
+ }
+
+ /* finally, enable the idle timer once */
+ if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
+ mod_timer(&vport->phba->inactive_vmid_poll,
+ jiffies +
+ msecs_to_jiffies(1000 * LPFC_VMID_TIMER));
+ vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
+ }
+ }
+ return rc;
+}
+
+/*
+ * lpfc_is_command_vm_io - get the uuid from blk cgroup
+ * @cmd:Pointer to scsi_cmnd data structure
+ * Returns uuid if present if not null
+ */
+static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd)
+{
+ char *uuid = NULL;
+
+ if (cmd->request) {
+ if (cmd->request->bio)
+ uuid = blkcg_get_app_identifier(cmd->request->bio);
+ }
+ return uuid;
+}
+
/**
* lpfc_queuecommand - scsi_host_template queuecommand entry point
* @cmnd: Pointer to scsi_cmnd data structure.
@@ -4649,6 +4792,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
int err, idx;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint64_t start = 0L;
+ u8 *uuid = NULL;
if (phba->ktime_on)
start = ktime_get_ns();
@@ -4772,6 +4916,25 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
+ /* check the necessary and sufficient condition to support VMID */
+ if (lpfc_is_vmid_enabled(phba) &&
+ (ndlp->vmid_support ||
+ phba->pport->vmid_priority_tagging ==
+ LPFC_VMID_PRIO_TAG_ALL_TARGETS)) {
+ /* is the IO generated by a VM, get the associated virtual */
+ /* entity id */
+ uuid = lpfc_is_command_vm_io(cmnd);
+
+ if (uuid) {
+ err = lpfc_vmid_get_appid(vport, uuid, cmnd,
+ (union lpfc_vmid_io_tag *)
+ &lpfc_cmd->cur_iocbq.vmid_tag);
+ if (!err)
+ lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_VMID;
+ }
+ }
+
+ atomic_inc(&ndlp->cmd_pending);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);