@@ -91,8 +91,10 @@ enum pfvf_compatibility_version {
ADF_PFVF_COMPAT_CAPABILITIES = 0x02,
/* In-use pattern cleared by receiver */
ADF_PFVF_COMPAT_FAST_ACK = 0x03,
+ /* Ring to service mapping support for non-standard mappings */
+ ADF_PFVF_COMPAT_RING_TO_SVC_MAP = 0x04,
/* Reference to the latest version */
- ADF_PFVF_COMPAT_THIS_VERSION = 0x03,
+ ADF_PFVF_COMPAT_THIS_VERSION = 0x04,
};
/* PF->VF Version Response */
@@ -139,6 +141,7 @@ enum pf2vf_blkmsg_error {
*/
enum vf2pf_blkmsg_req_type {
ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY = 0x02,
+ ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP = 0x03,
};
#define ADF_VF2PF_SMALL_BLOCK_TYPE_MAX \
@@ -202,4 +205,14 @@ struct capabilities_v3 {
u32 frequency;
} __packed;
+/* PF/VF Ring to service mapping values */
+enum blkmsg_ring_to_svc_versions {
+ ADF_PFVF_RING_TO_SVC_VERSION = 0x01,
+};
+
+struct ring_to_svc_map_v1 {
+ struct pfvf_blkmsg_header hdr;
+ u16 map;
+} __packed;
+
#endif /* ADF_PFVF_MSG_H */
@@ -36,3 +36,17 @@ int adf_pf_capabilities_msg_provider(struct adf_accel_dev *accel_dev,
return 0;
}
+
+int adf_pf_ring_to_svc_msg_provider(struct adf_accel_dev *accel_dev,
+ u8 *buffer, u8 compat)
+{
+ struct ring_to_svc_map_v1 rts_map_msg;
+
+ rts_map_msg.map = accel_dev->hw_device->ring_to_svc_map;
+ rts_map_msg.hdr.version = ADF_PFVF_RING_TO_SVC_VERSION;
+ rts_map_msg.hdr.payload_size = ADF_PFVF_BLKMSG_PAYLOAD_SIZE(rts_map_msg);
+
+ memcpy(buffer, &rts_map_msg, sizeof(rts_map_msg));
+
+ return 0;
+}
@@ -12,5 +12,7 @@ typedef int (*adf_pf2vf_blkmsg_provider)(struct adf_accel_dev *accel_dev,
int adf_pf_capabilities_msg_provider(struct adf_accel_dev *accel_dev,
u8 *buffer, u8 comapt);
+int adf_pf_ring_to_svc_msg_provider(struct adf_accel_dev *accel_dev,
+ u8 *buffer, u8 comapt);
#endif /* ADF_PFVF_PF_MSG_H */
@@ -16,6 +16,7 @@ static const adf_pf2vf_blkmsg_provider pf2vf_blkmsg_providers[] = {
NULL, /* no message type defined for value 0 */
NULL, /* no message type defined for value 1 */
adf_pf_capabilities_msg_provider, /* ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY */
+ adf_pf_ring_to_svc_msg_provider, /* ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP */
};
/**
@@ -138,3 +138,30 @@ int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev)
return 0;
}
+
+int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev)
+{
+ struct ring_to_svc_map_v1 rts_map_msg = { { 0 }, };
+ unsigned int len = sizeof(rts_map_msg);
+
+ if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_RING_TO_SVC_MAP)
+ /* Use already set default mappings */
+ return 0;
+
+ if (adf_send_vf2pf_blkmsg_req(accel_dev, ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP,
+ (u8 *)&rts_map_msg, &len)) {
+ dev_err(&GET_DEV(accel_dev),
+ "QAT: Failed to get block message response\n");
+ return -EFAULT;
+ }
+
+ if (unlikely(len < sizeof(struct ring_to_svc_map_v1))) {
+ dev_err(&GET_DEV(accel_dev),
+ "RING_TO_SVC message truncated to %d bytes\n", len);
+ return -EFAULT;
+ }
+
+ /* Only v1 at present */
+ accel_dev->hw_device->ring_to_svc_map = rts_map_msg.map;
+ return 0;
+}
@@ -8,6 +8,7 @@ int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev);
int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev);
+int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev);
#else
static inline int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
{
@@ -357,6 +357,10 @@ int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
return ret;
ret = adf_vf2pf_get_capabilities(accel_dev);
+ if (ret)
+ return ret;
+
+ ret = adf_vf2pf_get_ring_to_svc(accel_dev);
return ret;
}