mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-12 21:10:01 +00:00
crypto: qat - re-enable sriov after pf reset
When a Physical Function (PF) is reset, SR-IOV gets disabled, making the associated Virtual Functions (VFs) unavailable. Even after reset and using pci_restore_state, VFs remain uncreated because the numvfs still at 0. Therefore, it's necessary to reconfigure SR-IOV to re-enable VFs. This commit introduces the ADF_SRIOV_ENABLED configuration flag to cache the SR-IOV enablement state. SR-IOV is only re-enabled if it was previously configured. This commit also introduces a dedicated workqueue without `WQ_MEM_RECLAIM` flag for enabling SR-IOV during Heartbeat and CPM error resets, preventing workqueue flushing warning. This patch is based on earlier work done by Shashank Gupta. Signed-off-by: Mun Chun Yep <mun.chun.yep@intel.com> Reviewed-by: Ahsan Atta <ahsan.atta@intel.com> Reviewed-by: Markas Rapoportas <markas.rapoportas@intel.com> Reviewed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
@@ -15,6 +15,7 @@ struct adf_fatal_error_data {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static struct workqueue_struct *device_reset_wq;
|
static struct workqueue_struct *device_reset_wq;
|
||||||
|
static struct workqueue_struct *device_sriov_wq;
|
||||||
|
|
||||||
static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
|
static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
|
||||||
pci_channel_state_t state)
|
pci_channel_state_t state)
|
||||||
@@ -43,6 +44,13 @@ struct adf_reset_dev_data {
|
|||||||
struct work_struct reset_work;
|
struct work_struct reset_work;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* sriov dev data */
|
||||||
|
struct adf_sriov_dev_data {
|
||||||
|
struct adf_accel_dev *accel_dev;
|
||||||
|
struct completion compl;
|
||||||
|
struct work_struct sriov_work;
|
||||||
|
};
|
||||||
|
|
||||||
void adf_reset_sbr(struct adf_accel_dev *accel_dev)
|
void adf_reset_sbr(struct adf_accel_dev *accel_dev)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
|
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
|
||||||
@@ -88,11 +96,22 @@ void adf_dev_restore(struct adf_accel_dev *accel_dev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void adf_device_sriov_worker(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct adf_sriov_dev_data *sriov_data =
|
||||||
|
container_of(work, struct adf_sriov_dev_data, sriov_work);
|
||||||
|
|
||||||
|
adf_reenable_sriov(sriov_data->accel_dev);
|
||||||
|
complete(&sriov_data->compl);
|
||||||
|
}
|
||||||
|
|
||||||
static void adf_device_reset_worker(struct work_struct *work)
|
static void adf_device_reset_worker(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct adf_reset_dev_data *reset_data =
|
struct adf_reset_dev_data *reset_data =
|
||||||
container_of(work, struct adf_reset_dev_data, reset_work);
|
container_of(work, struct adf_reset_dev_data, reset_work);
|
||||||
struct adf_accel_dev *accel_dev = reset_data->accel_dev;
|
struct adf_accel_dev *accel_dev = reset_data->accel_dev;
|
||||||
|
unsigned long wait_jiffies = msecs_to_jiffies(10000);
|
||||||
|
struct adf_sriov_dev_data sriov_data;
|
||||||
|
|
||||||
adf_dev_restarting_notify(accel_dev);
|
adf_dev_restarting_notify(accel_dev);
|
||||||
if (adf_dev_restart(accel_dev)) {
|
if (adf_dev_restart(accel_dev)) {
|
||||||
@@ -103,6 +122,14 @@ static void adf_device_reset_worker(struct work_struct *work)
|
|||||||
WARN(1, "QAT: device restart failed. Device is unusable\n");
|
WARN(1, "QAT: device restart failed. Device is unusable\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sriov_data.accel_dev = accel_dev;
|
||||||
|
init_completion(&sriov_data.compl);
|
||||||
|
INIT_WORK(&sriov_data.sriov_work, adf_device_sriov_worker);
|
||||||
|
queue_work(device_sriov_wq, &sriov_data.sriov_work);
|
||||||
|
if (wait_for_completion_timeout(&sriov_data.compl, wait_jiffies))
|
||||||
|
adf_pf2vf_notify_restarted(accel_dev);
|
||||||
|
|
||||||
adf_dev_restarted_notify(accel_dev);
|
adf_dev_restarted_notify(accel_dev);
|
||||||
clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
|
clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
|
||||||
|
|
||||||
@@ -216,7 +243,14 @@ int adf_init_aer(void)
|
|||||||
{
|
{
|
||||||
device_reset_wq = alloc_workqueue("qat_device_reset_wq",
|
device_reset_wq = alloc_workqueue("qat_device_reset_wq",
|
||||||
WQ_MEM_RECLAIM, 0);
|
WQ_MEM_RECLAIM, 0);
|
||||||
return !device_reset_wq ? -EFAULT : 0;
|
if (!device_reset_wq)
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
device_sriov_wq = alloc_workqueue("qat_device_sriov_wq", 0, 0);
|
||||||
|
if (!device_sriov_wq)
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void adf_exit_aer(void)
|
void adf_exit_aer(void)
|
||||||
@@ -224,4 +258,8 @@ void adf_exit_aer(void)
|
|||||||
if (device_reset_wq)
|
if (device_reset_wq)
|
||||||
destroy_workqueue(device_reset_wq);
|
destroy_workqueue(device_reset_wq);
|
||||||
device_reset_wq = NULL;
|
device_reset_wq = NULL;
|
||||||
|
|
||||||
|
if (device_sriov_wq)
|
||||||
|
destroy_workqueue(device_sriov_wq);
|
||||||
|
device_sriov_wq = NULL;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -49,5 +49,6 @@
|
|||||||
ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY
|
ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY
|
||||||
#define ADF_ACCEL_STR "Accelerator%d"
|
#define ADF_ACCEL_STR "Accelerator%d"
|
||||||
#define ADF_HEARTBEAT_TIMER "HeartbeatTimer"
|
#define ADF_HEARTBEAT_TIMER "HeartbeatTimer"
|
||||||
|
#define ADF_SRIOV_ENABLED "SriovEnabled"
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -192,6 +192,7 @@ bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
|
|||||||
#if defined(CONFIG_PCI_IOV)
|
#if defined(CONFIG_PCI_IOV)
|
||||||
int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
|
int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
|
||||||
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
|
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
|
||||||
|
void adf_reenable_sriov(struct adf_accel_dev *accel_dev);
|
||||||
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask);
|
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask);
|
||||||
void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev);
|
void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev);
|
||||||
bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev);
|
bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev);
|
||||||
@@ -212,6 +213,10 @@ static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void adf_reenable_sriov(struct adf_accel_dev *accel_dev)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
static inline int adf_init_pf_wq(void)
|
static inline int adf_init_pf_wq(void)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@@ -60,7 +60,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
|
|||||||
/* This ptr will be populated when VFs will be created */
|
/* This ptr will be populated when VFs will be created */
|
||||||
vf_info->accel_dev = accel_dev;
|
vf_info->accel_dev = accel_dev;
|
||||||
vf_info->vf_nr = i;
|
vf_info->vf_nr = i;
|
||||||
vf_info->vf_compat_ver = 0;
|
|
||||||
|
|
||||||
mutex_init(&vf_info->pf2vf_lock);
|
mutex_init(&vf_info->pf2vf_lock);
|
||||||
ratelimit_state_init(&vf_info->vf2pf_ratelimit,
|
ratelimit_state_init(&vf_info->vf2pf_ratelimit,
|
||||||
@@ -84,6 +83,32 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
|
|||||||
return pci_enable_sriov(pdev, totalvfs);
|
return pci_enable_sriov(pdev, totalvfs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void adf_reenable_sriov(struct adf_accel_dev *accel_dev)
|
||||||
|
{
|
||||||
|
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
|
||||||
|
char cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
|
||||||
|
unsigned long val = 0;
|
||||||
|
|
||||||
|
if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
|
||||||
|
ADF_SRIOV_ENABLED, cfg))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!accel_dev->pf.vf_info)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
|
||||||
|
&val, ADF_DEC))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
|
||||||
|
&val, ADF_DEC))
|
||||||
|
return;
|
||||||
|
|
||||||
|
set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
|
||||||
|
dev_dbg(&pdev->dev, "Re-enabling SRIOV\n");
|
||||||
|
adf_enable_sriov(accel_dev);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* adf_disable_sriov() - Disable SRIOV for the device
|
* adf_disable_sriov() - Disable SRIOV for the device
|
||||||
* @accel_dev: Pointer to accel device.
|
* @accel_dev: Pointer to accel device.
|
||||||
@@ -116,8 +141,10 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
|
|||||||
for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++)
|
for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++)
|
||||||
mutex_destroy(&vf->pf2vf_lock);
|
mutex_destroy(&vf->pf2vf_lock);
|
||||||
|
|
||||||
kfree(accel_dev->pf.vf_info);
|
if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) {
|
||||||
accel_dev->pf.vf_info = NULL;
|
kfree(accel_dev->pf.vf_info);
|
||||||
|
accel_dev->pf.vf_info = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(adf_disable_sriov);
|
EXPORT_SYMBOL_GPL(adf_disable_sriov);
|
||||||
|
|
||||||
@@ -195,6 +222,10 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
val = 1;
|
||||||
|
adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, ADF_SRIOV_ENABLED,
|
||||||
|
&val, ADF_DEC);
|
||||||
|
|
||||||
return numvfs;
|
return numvfs;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(adf_sriov_configure);
|
EXPORT_SYMBOL_GPL(adf_sriov_configure);
|
||||||
|
|||||||
Reference in New Issue
Block a user