mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-06 10:00:17 +00:00
Merge remote-tracking branch 'stable/linux-6.12.y' into rpi-6.12.y
This commit is contained in:
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 6
|
VERSION = 6
|
||||||
PATCHLEVEL = 12
|
PATCHLEVEL = 12
|
||||||
SUBLEVEL = 27
|
SUBLEVEL = 28
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Baby Opossum Posse
|
NAME = Baby Opossum Posse
|
||||||
|
|
||||||
|
|||||||
@@ -40,6 +40,9 @@
|
|||||||
reg = <1>;
|
reg = <1>;
|
||||||
interrupt-parent = <&gpio4>;
|
interrupt-parent = <&gpio4>;
|
||||||
interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
|
interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
|
||||||
|
micrel,led-mode = <1>;
|
||||||
|
clocks = <&clks IMX6UL_CLK_ENET_REF>;
|
||||||
|
clock-names = "rmii-ref";
|
||||||
status = "okay";
|
status = "okay";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1478,7 +1478,7 @@
|
|||||||
reg = <0 0x4c300000 0 0x10000>,
|
reg = <0 0x4c300000 0 0x10000>,
|
||||||
<0 0x60100000 0 0xfe00000>,
|
<0 0x60100000 0 0xfe00000>,
|
||||||
<0 0x4c360000 0 0x10000>,
|
<0 0x4c360000 0 0x10000>,
|
||||||
<0 0x4c340000 0 0x2000>;
|
<0 0x4c340000 0 0x4000>;
|
||||||
reg-names = "dbi", "config", "atu", "app";
|
reg-names = "dbi", "config", "atu", "app";
|
||||||
ranges = <0x81000000 0x0 0x00000000 0x0 0x6ff00000 0 0x00100000>,
|
ranges = <0x81000000 0x0 0x00000000 0x0 0x6ff00000 0 0x00100000>,
|
||||||
<0x82000000 0x0 0x10000000 0x9 0x10000000 0 0x10000000>;
|
<0x82000000 0x0 0x10000000 0x9 0x10000000 0 0x10000000>;
|
||||||
@@ -1518,7 +1518,7 @@
|
|||||||
reg = <0 0x4c300000 0 0x10000>,
|
reg = <0 0x4c300000 0 0x10000>,
|
||||||
<0 0x4c360000 0 0x1000>,
|
<0 0x4c360000 0 0x1000>,
|
||||||
<0 0x4c320000 0 0x1000>,
|
<0 0x4c320000 0 0x1000>,
|
||||||
<0 0x4c340000 0 0x2000>,
|
<0 0x4c340000 0 0x4000>,
|
||||||
<0 0x4c370000 0 0x10000>,
|
<0 0x4c370000 0 0x10000>,
|
||||||
<0x9 0 1 0>;
|
<0x9 0 1 0>;
|
||||||
reg-names = "dbi","atu", "dbi2", "app", "dma", "addr_space";
|
reg-names = "dbi","atu", "dbi2", "app", "dma", "addr_space";
|
||||||
@@ -1545,7 +1545,7 @@
|
|||||||
reg = <0 0x4c380000 0 0x10000>,
|
reg = <0 0x4c380000 0 0x10000>,
|
||||||
<8 0x80100000 0 0xfe00000>,
|
<8 0x80100000 0 0xfe00000>,
|
||||||
<0 0x4c3e0000 0 0x10000>,
|
<0 0x4c3e0000 0 0x10000>,
|
||||||
<0 0x4c3c0000 0 0x2000>;
|
<0 0x4c3c0000 0 0x4000>;
|
||||||
reg-names = "dbi", "config", "atu", "app";
|
reg-names = "dbi", "config", "atu", "app";
|
||||||
ranges = <0x81000000 0 0x00000000 0x8 0x8ff00000 0 0x00100000>,
|
ranges = <0x81000000 0 0x00000000 0x8 0x8ff00000 0 0x00100000>,
|
||||||
<0x82000000 0 0x10000000 0xa 0x10000000 0 0x10000000>;
|
<0x82000000 0 0x10000000 0xa 0x10000000 0 0x10000000>;
|
||||||
@@ -1585,7 +1585,7 @@
|
|||||||
reg = <0 0x4c380000 0 0x10000>,
|
reg = <0 0x4c380000 0 0x10000>,
|
||||||
<0 0x4c3e0000 0 0x1000>,
|
<0 0x4c3e0000 0 0x1000>,
|
||||||
<0 0x4c3a0000 0 0x1000>,
|
<0 0x4c3a0000 0 0x1000>,
|
||||||
<0 0x4c3c0000 0 0x2000>,
|
<0 0x4c3c0000 0 0x4000>,
|
||||||
<0 0x4c3f0000 0 0x10000>,
|
<0 0x4c3f0000 0 0x10000>,
|
||||||
<0xa 0 1 0>;
|
<0xa 0 1 0>;
|
||||||
reg-names = "dbi", "atu", "dbi2", "app", "dma", "addr_space";
|
reg-names = "dbi", "atu", "dbi2", "app", "dma", "addr_space";
|
||||||
|
|||||||
@@ -114,14 +114,13 @@
|
|||||||
};
|
};
|
||||||
|
|
||||||
intc: interrupt-controller@4ac00000 {
|
intc: interrupt-controller@4ac00000 {
|
||||||
compatible = "arm,cortex-a7-gic";
|
compatible = "arm,gic-400";
|
||||||
#interrupt-cells = <3>;
|
#interrupt-cells = <3>;
|
||||||
#address-cells = <1>;
|
|
||||||
interrupt-controller;
|
interrupt-controller;
|
||||||
reg = <0x0 0x4ac10000 0x0 0x1000>,
|
reg = <0x0 0x4ac10000 0x0 0x1000>,
|
||||||
<0x0 0x4ac20000 0x0 0x2000>,
|
<0x0 0x4ac20000 0x0 0x20000>,
|
||||||
<0x0 0x4ac40000 0x0 0x2000>,
|
<0x0 0x4ac40000 0x0 0x20000>,
|
||||||
<0x0 0x4ac60000 0x0 0x2000>;
|
<0x0 0x4ac60000 0x0 0x20000>;
|
||||||
};
|
};
|
||||||
|
|
||||||
psci {
|
psci {
|
||||||
|
|||||||
@@ -879,10 +879,12 @@ static u8 spectre_bhb_loop_affected(void)
|
|||||||
static const struct midr_range spectre_bhb_k132_list[] = {
|
static const struct midr_range spectre_bhb_k132_list[] = {
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
|
||||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
|
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
|
||||||
|
{},
|
||||||
};
|
};
|
||||||
static const struct midr_range spectre_bhb_k38_list[] = {
|
static const struct midr_range spectre_bhb_k38_list[] = {
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
|
||||||
|
{},
|
||||||
};
|
};
|
||||||
static const struct midr_range spectre_bhb_k32_list[] = {
|
static const struct midr_range spectre_bhb_k32_list[] = {
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
|
||||||
|
|||||||
@@ -132,11 +132,15 @@
|
|||||||
#define SO_PASSPIDFD 0x404A
|
#define SO_PASSPIDFD 0x404A
|
||||||
#define SO_PEERPIDFD 0x404B
|
#define SO_PEERPIDFD 0x404B
|
||||||
|
|
||||||
#define SO_DEVMEM_LINEAR 78
|
#define SCM_TS_OPT_ID 0x404C
|
||||||
|
|
||||||
|
#define SO_RCVPRIORITY 0x404D
|
||||||
|
|
||||||
|
#define SO_DEVMEM_LINEAR 0x404E
|
||||||
#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR
|
#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR
|
||||||
#define SO_DEVMEM_DMABUF 79
|
#define SO_DEVMEM_DMABUF 0x404F
|
||||||
#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF
|
#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF
|
||||||
#define SO_DEVMEM_DONTNEED 80
|
#define SO_DEVMEM_DONTNEED 0x4050
|
||||||
|
|
||||||
#if !defined(__KERNEL__)
|
#if !defined(__KERNEL__)
|
||||||
|
|
||||||
|
|||||||
@@ -97,9 +97,19 @@ handle_fpe(struct pt_regs *regs)
|
|||||||
|
|
||||||
memcpy(regs->fr, frcopy, sizeof regs->fr);
|
memcpy(regs->fr, frcopy, sizeof regs->fr);
|
||||||
if (signalcode != 0) {
|
if (signalcode != 0) {
|
||||||
force_sig_fault(signalcode >> 24, signalcode & 0xffffff,
|
int sig = signalcode >> 24;
|
||||||
(void __user *) regs->iaoq[0]);
|
|
||||||
return -1;
|
if (sig == SIGFPE) {
|
||||||
|
/*
|
||||||
|
* Clear floating point trap bit to avoid trapping
|
||||||
|
* again on the first floating-point instruction in
|
||||||
|
* the userspace signal handler.
|
||||||
|
*/
|
||||||
|
regs->fr[0] &= ~(1ULL << 38);
|
||||||
|
}
|
||||||
|
force_sig_fault(sig, signalcode & 0xffffff,
|
||||||
|
(void __user *) regs->iaoq[0]);
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return signalcode ? -1 : 0;
|
return signalcode ? -1 : 0;
|
||||||
|
|||||||
@@ -234,10 +234,8 @@ fi
|
|||||||
|
|
||||||
# suppress some warnings in recent ld versions
|
# suppress some warnings in recent ld versions
|
||||||
nowarn="-z noexecstack"
|
nowarn="-z noexecstack"
|
||||||
if ! ld_is_lld; then
|
if "${CROSS}ld" -v --no-warn-rwx-segments >/dev/null 2>&1; then
|
||||||
if [ "$LD_VERSION" -ge "$(echo 2.39 | ld_version)" ]; then
|
nowarn="$nowarn --no-warn-rwx-segments"
|
||||||
nowarn="$nowarn --no-warn-rwx-segments"
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
platformo=$object/"$platform".o
|
platformo=$object/"$platform".o
|
||||||
|
|||||||
@@ -1132,6 +1132,19 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
|
|||||||
pmd_t *pmd;
|
pmd_t *pmd;
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make sure we align the start vmemmap addr so that we calculate
|
||||||
|
* the correct start_pfn in altmap boundary check to decided whether
|
||||||
|
* we should use altmap or RAM based backing memory allocation. Also
|
||||||
|
* the address need to be aligned for set_pte operation.
|
||||||
|
|
||||||
|
* If the start addr is already PMD_SIZE aligned we will try to use
|
||||||
|
* a pmd mapping. We don't want to be too aggressive here beacause
|
||||||
|
* that will cause more allocations in RAM. So only if the namespace
|
||||||
|
* vmemmap start addr is PMD_SIZE aligned we will use PMD mapping.
|
||||||
|
*/
|
||||||
|
|
||||||
|
start = ALIGN_DOWN(start, PAGE_SIZE);
|
||||||
for (addr = start; addr < end; addr = next) {
|
for (addr = start; addr < end; addr = next) {
|
||||||
next = pmd_addr_end(addr, end);
|
next = pmd_addr_end(addr, end);
|
||||||
|
|
||||||
@@ -1157,8 +1170,8 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
|
|||||||
* in altmap block allocation failures, in which case
|
* in altmap block allocation failures, in which case
|
||||||
* we fallback to RAM for vmemmap allocation.
|
* we fallback to RAM for vmemmap allocation.
|
||||||
*/
|
*/
|
||||||
if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) ||
|
if (!IS_ALIGNED(addr, PMD_SIZE) || (altmap &&
|
||||||
altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
|
altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
|
||||||
/*
|
/*
|
||||||
* make sure we don't create altmap mappings
|
* make sure we don't create altmap mappings
|
||||||
* covering things outside the device.
|
* covering things outside the device.
|
||||||
|
|||||||
@@ -34,14 +34,11 @@ static bool early_is_tdx_guest(void)
|
|||||||
|
|
||||||
void arch_accept_memory(phys_addr_t start, phys_addr_t end)
|
void arch_accept_memory(phys_addr_t start, phys_addr_t end)
|
||||||
{
|
{
|
||||||
static bool sevsnp;
|
|
||||||
|
|
||||||
/* Platform-specific memory-acceptance call goes here */
|
/* Platform-specific memory-acceptance call goes here */
|
||||||
if (early_is_tdx_guest()) {
|
if (early_is_tdx_guest()) {
|
||||||
if (!tdx_accept_memory(start, end))
|
if (!tdx_accept_memory(start, end))
|
||||||
panic("TDX: Failed to accept memory\n");
|
panic("TDX: Failed to accept memory\n");
|
||||||
} else if (sevsnp || (sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED)) {
|
} else if (early_is_sevsnp_guest()) {
|
||||||
sevsnp = true;
|
|
||||||
snp_accept_memory(start, end);
|
snp_accept_memory(start, end);
|
||||||
} else {
|
} else {
|
||||||
error("Cannot accept memory: unknown platform\n");
|
error("Cannot accept memory: unknown platform\n");
|
||||||
|
|||||||
@@ -644,3 +644,43 @@ void sev_prep_identity_maps(unsigned long top_level_pgt)
|
|||||||
|
|
||||||
sev_verify_cbit(top_level_pgt);
|
sev_verify_cbit(top_level_pgt);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool early_is_sevsnp_guest(void)
|
||||||
|
{
|
||||||
|
static bool sevsnp;
|
||||||
|
|
||||||
|
if (sevsnp)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (!(sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
sevsnp = true;
|
||||||
|
|
||||||
|
if (!snp_vmpl) {
|
||||||
|
unsigned int eax, ebx, ecx, edx;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* CPUID Fn8000_001F_EAX[28] - SVSM support
|
||||||
|
*/
|
||||||
|
eax = 0x8000001f;
|
||||||
|
ecx = 0;
|
||||||
|
native_cpuid(&eax, &ebx, &ecx, &edx);
|
||||||
|
if (eax & BIT(28)) {
|
||||||
|
struct msr m;
|
||||||
|
|
||||||
|
/* Obtain the address of the calling area to use */
|
||||||
|
boot_rdmsr(MSR_SVSM_CAA, &m);
|
||||||
|
boot_svsm_caa = (void *)m.q;
|
||||||
|
boot_svsm_caa_pa = m.q;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The real VMPL level cannot be discovered, but the
|
||||||
|
* memory acceptance routines make no use of that so
|
||||||
|
* any non-zero value suffices here.
|
||||||
|
*/
|
||||||
|
snp_vmpl = U8_MAX;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|||||||
@@ -13,12 +13,14 @@
|
|||||||
bool sev_snp_enabled(void);
|
bool sev_snp_enabled(void);
|
||||||
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
|
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
|
||||||
u64 sev_get_status(void);
|
u64 sev_get_status(void);
|
||||||
|
bool early_is_sevsnp_guest(void);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
static inline bool sev_snp_enabled(void) { return false; }
|
static inline bool sev_snp_enabled(void) { return false; }
|
||||||
static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
|
static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
|
||||||
static inline u64 sev_get_status(void) { return 0; }
|
static inline u64 sev_get_status(void) { return 0; }
|
||||||
|
static inline bool early_is_sevsnp_guest(void) { return false; }
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|||||||
@@ -753,7 +753,7 @@ void x86_pmu_enable_all(int added)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int is_x86_event(struct perf_event *event)
|
int is_x86_event(struct perf_event *event)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
|||||||
@@ -4333,7 +4333,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
|
|||||||
arr[pebs_enable] = (struct perf_guest_switch_msr){
|
arr[pebs_enable] = (struct perf_guest_switch_msr){
|
||||||
.msr = MSR_IA32_PEBS_ENABLE,
|
.msr = MSR_IA32_PEBS_ENABLE,
|
||||||
.host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
|
.host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
|
||||||
.guest = pebs_mask & ~cpuc->intel_ctrl_host_mask,
|
.guest = pebs_mask & ~cpuc->intel_ctrl_host_mask & kvm_pmu->pebs_enable,
|
||||||
};
|
};
|
||||||
|
|
||||||
if (arr[pebs_enable].host) {
|
if (arr[pebs_enable].host) {
|
||||||
|
|||||||
@@ -110,9 +110,16 @@ static inline bool is_topdown_event(struct perf_event *event)
|
|||||||
return is_metric_event(event) || is_slots_event(event);
|
return is_metric_event(event) || is_slots_event(event);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int is_x86_event(struct perf_event *event);
|
||||||
|
|
||||||
|
static inline bool check_leader_group(struct perf_event *leader, int flags)
|
||||||
|
{
|
||||||
|
return is_x86_event(leader) ? !!(leader->hw.flags & flags) : false;
|
||||||
|
}
|
||||||
|
|
||||||
static inline bool is_branch_counters_group(struct perf_event *event)
|
static inline bool is_branch_counters_group(struct perf_event *event)
|
||||||
{
|
{
|
||||||
return event->group_leader->hw.flags & PERF_X86_EVENT_BRANCH_COUNTERS;
|
return check_leader_group(event->group_leader, PERF_X86_EVENT_BRANCH_COUNTERS);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct amd_nb {
|
struct amd_nb {
|
||||||
|
|||||||
@@ -87,7 +87,6 @@ void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
fallback:
|
fallback:
|
||||||
WARN_ON_ONCE(qmap->nr_queues > 1);
|
blk_mq_map_queues(qmap);
|
||||||
blk_mq_clear_mq_map(qmap);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_mq_map_hw_queues);
|
EXPORT_SYMBOL_GPL(blk_mq_map_hw_queues);
|
||||||
|
|||||||
@@ -36,8 +36,6 @@
|
|||||||
__stringify(DRM_IVPU_DRIVER_MINOR) "."
|
__stringify(DRM_IVPU_DRIVER_MINOR) "."
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static struct lock_class_key submitted_jobs_xa_lock_class_key;
|
|
||||||
|
|
||||||
int ivpu_dbg_mask;
|
int ivpu_dbg_mask;
|
||||||
module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
|
module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
|
||||||
MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
|
MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
|
||||||
@@ -260,6 +258,9 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto err_xa_erase;
|
goto err_xa_erase;
|
||||||
|
|
||||||
|
file_priv->job_limit.min = FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
|
||||||
|
file_priv->job_limit.max = file_priv->job_limit.min | IVPU_JOB_ID_JOB_MASK;
|
||||||
|
|
||||||
mutex_unlock(&vdev->context_list_lock);
|
mutex_unlock(&vdev->context_list_lock);
|
||||||
drm_dev_exit(idx);
|
drm_dev_exit(idx);
|
||||||
|
|
||||||
@@ -452,26 +453,6 @@ static const struct drm_driver driver = {
|
|||||||
.minor = DRM_IVPU_DRIVER_MINOR,
|
.minor = DRM_IVPU_DRIVER_MINOR,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void ivpu_context_abort_invalid(struct ivpu_device *vdev)
|
|
||||||
{
|
|
||||||
struct ivpu_file_priv *file_priv;
|
|
||||||
unsigned long ctx_id;
|
|
||||||
|
|
||||||
mutex_lock(&vdev->context_list_lock);
|
|
||||||
|
|
||||||
xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
|
|
||||||
if (!file_priv->has_mmu_faults || file_priv->aborted)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
mutex_lock(&file_priv->lock);
|
|
||||||
ivpu_context_abort_locked(file_priv);
|
|
||||||
file_priv->aborted = true;
|
|
||||||
mutex_unlock(&file_priv->lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_unlock(&vdev->context_list_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg)
|
static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg)
|
||||||
{
|
{
|
||||||
struct ivpu_device *vdev = arg;
|
struct ivpu_device *vdev = arg;
|
||||||
@@ -485,9 +466,6 @@ static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg)
|
|||||||
case IVPU_HW_IRQ_SRC_IPC:
|
case IVPU_HW_IRQ_SRC_IPC:
|
||||||
ivpu_ipc_irq_thread_handler(vdev);
|
ivpu_ipc_irq_thread_handler(vdev);
|
||||||
break;
|
break;
|
||||||
case IVPU_HW_IRQ_SRC_MMU_EVTQ:
|
|
||||||
ivpu_context_abort_invalid(vdev);
|
|
||||||
break;
|
|
||||||
case IVPU_HW_IRQ_SRC_DCT:
|
case IVPU_HW_IRQ_SRC_DCT:
|
||||||
ivpu_pm_dct_irq_thread_handler(vdev);
|
ivpu_pm_dct_irq_thread_handler(vdev);
|
||||||
break;
|
break;
|
||||||
@@ -604,13 +582,21 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
|
|||||||
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
|
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
|
||||||
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
|
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
|
||||||
xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
|
xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
|
||||||
lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
|
|
||||||
INIT_LIST_HEAD(&vdev->bo_list);
|
INIT_LIST_HEAD(&vdev->bo_list);
|
||||||
|
|
||||||
|
vdev->db_limit.min = IVPU_MIN_DB;
|
||||||
|
vdev->db_limit.max = IVPU_MAX_DB;
|
||||||
|
|
||||||
|
INIT_WORK(&vdev->context_abort_work, ivpu_context_abort_thread_handler);
|
||||||
|
|
||||||
ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
|
ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_xa_destroy;
|
goto err_xa_destroy;
|
||||||
|
|
||||||
|
ret = drmm_mutex_init(&vdev->drm, &vdev->submitted_jobs_lock);
|
||||||
|
if (ret)
|
||||||
|
goto err_xa_destroy;
|
||||||
|
|
||||||
ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
|
ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_xa_destroy;
|
goto err_xa_destroy;
|
||||||
|
|||||||
@@ -46,6 +46,9 @@
|
|||||||
#define IVPU_MIN_DB 1
|
#define IVPU_MIN_DB 1
|
||||||
#define IVPU_MAX_DB 255
|
#define IVPU_MAX_DB 255
|
||||||
|
|
||||||
|
#define IVPU_JOB_ID_JOB_MASK GENMASK(7, 0)
|
||||||
|
#define IVPU_JOB_ID_CONTEXT_MASK GENMASK(31, 8)
|
||||||
|
|
||||||
#define IVPU_NUM_ENGINES 2
|
#define IVPU_NUM_ENGINES 2
|
||||||
#define IVPU_NUM_PRIORITIES 4
|
#define IVPU_NUM_PRIORITIES 4
|
||||||
#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_ENGINES * IVPU_NUM_PRIORITIES)
|
#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_ENGINES * IVPU_NUM_PRIORITIES)
|
||||||
@@ -134,12 +137,16 @@ struct ivpu_device {
|
|||||||
struct mutex context_list_lock; /* Protects user context addition/removal */
|
struct mutex context_list_lock; /* Protects user context addition/removal */
|
||||||
struct xarray context_xa;
|
struct xarray context_xa;
|
||||||
struct xa_limit context_xa_limit;
|
struct xa_limit context_xa_limit;
|
||||||
|
struct work_struct context_abort_work;
|
||||||
|
|
||||||
struct xarray db_xa;
|
struct xarray db_xa;
|
||||||
|
struct xa_limit db_limit;
|
||||||
|
u32 db_next;
|
||||||
|
|
||||||
struct mutex bo_list_lock; /* Protects bo_list */
|
struct mutex bo_list_lock; /* Protects bo_list */
|
||||||
struct list_head bo_list;
|
struct list_head bo_list;
|
||||||
|
|
||||||
|
struct mutex submitted_jobs_lock; /* Protects submitted_jobs */
|
||||||
struct xarray submitted_jobs_xa;
|
struct xarray submitted_jobs_xa;
|
||||||
struct ivpu_ipc_consumer job_done_consumer;
|
struct ivpu_ipc_consumer job_done_consumer;
|
||||||
|
|
||||||
@@ -171,6 +178,8 @@ struct ivpu_file_priv {
|
|||||||
struct mutex ms_lock; /* Protects ms_instance_list, ms_info_bo */
|
struct mutex ms_lock; /* Protects ms_instance_list, ms_info_bo */
|
||||||
struct list_head ms_instance_list;
|
struct list_head ms_instance_list;
|
||||||
struct ivpu_bo *ms_info_bo;
|
struct ivpu_bo *ms_info_bo;
|
||||||
|
struct xa_limit job_limit;
|
||||||
|
u32 job_id_next;
|
||||||
bool has_mmu_faults;
|
bool has_mmu_faults;
|
||||||
bool bound;
|
bool bound;
|
||||||
bool aborted;
|
bool aborted;
|
||||||
|
|||||||
@@ -14,7 +14,7 @@
|
|||||||
#define PLL_PROFILING_FREQ_DEFAULT 38400000
|
#define PLL_PROFILING_FREQ_DEFAULT 38400000
|
||||||
#define PLL_PROFILING_FREQ_HIGH 400000000
|
#define PLL_PROFILING_FREQ_HIGH 400000000
|
||||||
|
|
||||||
#define DCT_DEFAULT_ACTIVE_PERCENT 15u
|
#define DCT_DEFAULT_ACTIVE_PERCENT 30u
|
||||||
#define DCT_PERIOD_US 35300u
|
#define DCT_PERIOD_US 35300u
|
||||||
|
|
||||||
int ivpu_hw_btrs_info_init(struct ivpu_device *vdev);
|
int ivpu_hw_btrs_info_init(struct ivpu_device *vdev);
|
||||||
|
|||||||
@@ -21,8 +21,6 @@
|
|||||||
#include "vpu_boot_api.h"
|
#include "vpu_boot_api.h"
|
||||||
|
|
||||||
#define CMD_BUF_IDX 0
|
#define CMD_BUF_IDX 0
|
||||||
#define JOB_ID_JOB_MASK GENMASK(7, 0)
|
|
||||||
#define JOB_ID_CONTEXT_MASK GENMASK(31, 8)
|
|
||||||
#define JOB_MAX_BUFFER_COUNT 65535
|
#define JOB_MAX_BUFFER_COUNT 65535
|
||||||
|
|
||||||
static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
|
static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
|
||||||
@@ -79,7 +77,6 @@ static void ivpu_preemption_buffers_free(struct ivpu_device *vdev,
|
|||||||
|
|
||||||
static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
|
static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
|
||||||
{
|
{
|
||||||
struct xa_limit db_xa_limit = {.max = IVPU_MAX_DB, .min = IVPU_MIN_DB};
|
|
||||||
struct ivpu_device *vdev = file_priv->vdev;
|
struct ivpu_device *vdev = file_priv->vdev;
|
||||||
struct ivpu_cmdq *cmdq;
|
struct ivpu_cmdq *cmdq;
|
||||||
int ret;
|
int ret;
|
||||||
@@ -88,8 +85,9 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
|
|||||||
if (!cmdq)
|
if (!cmdq)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
ret = xa_alloc(&vdev->db_xa, &cmdq->db_id, NULL, db_xa_limit, GFP_KERNEL);
|
ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next,
|
||||||
if (ret) {
|
GFP_KERNEL);
|
||||||
|
if (ret < 0) {
|
||||||
ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret);
|
ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret);
|
||||||
goto err_free_cmdq;
|
goto err_free_cmdq;
|
||||||
}
|
}
|
||||||
@@ -337,6 +335,8 @@ void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)
|
|||||||
|
|
||||||
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS)
|
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS)
|
||||||
ivpu_jsm_context_release(vdev, file_priv->ctx.id);
|
ivpu_jsm_context_release(vdev, file_priv->ctx.id);
|
||||||
|
|
||||||
|
file_priv->aborted = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
|
static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
|
||||||
@@ -354,7 +354,7 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
|
|||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
entry = &cmdq->jobq->job[tail];
|
entry = &cmdq->jobq->slot[tail].job;
|
||||||
entry->batch_buf_addr = job->cmd_buf_vpu_addr;
|
entry->batch_buf_addr = job->cmd_buf_vpu_addr;
|
||||||
entry->job_id = job->job_id;
|
entry->job_id = job->job_id;
|
||||||
entry->flags = 0;
|
entry->flags = 0;
|
||||||
@@ -469,16 +469,14 @@ static struct ivpu_job *ivpu_job_remove_from_submitted_jobs(struct ivpu_device *
|
|||||||
{
|
{
|
||||||
struct ivpu_job *job;
|
struct ivpu_job *job;
|
||||||
|
|
||||||
xa_lock(&vdev->submitted_jobs_xa);
|
lockdep_assert_held(&vdev->submitted_jobs_lock);
|
||||||
job = __xa_erase(&vdev->submitted_jobs_xa, job_id);
|
|
||||||
|
|
||||||
|
job = xa_erase(&vdev->submitted_jobs_xa, job_id);
|
||||||
if (xa_empty(&vdev->submitted_jobs_xa) && job) {
|
if (xa_empty(&vdev->submitted_jobs_xa) && job) {
|
||||||
vdev->busy_time = ktime_add(ktime_sub(ktime_get(), vdev->busy_start_ts),
|
vdev->busy_time = ktime_add(ktime_sub(ktime_get(), vdev->busy_start_ts),
|
||||||
vdev->busy_time);
|
vdev->busy_time);
|
||||||
}
|
}
|
||||||
|
|
||||||
xa_unlock(&vdev->submitted_jobs_xa);
|
|
||||||
|
|
||||||
return job;
|
return job;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -486,6 +484,28 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32
|
|||||||
{
|
{
|
||||||
struct ivpu_job *job;
|
struct ivpu_job *job;
|
||||||
|
|
||||||
|
lockdep_assert_held(&vdev->submitted_jobs_lock);
|
||||||
|
|
||||||
|
job = xa_load(&vdev->submitted_jobs_xa, job_id);
|
||||||
|
if (!job)
|
||||||
|
return -ENOENT;
|
||||||
|
|
||||||
|
if (job_status == VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW) {
|
||||||
|
guard(mutex)(&job->file_priv->lock);
|
||||||
|
|
||||||
|
if (job->file_priv->has_mmu_faults)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Mark context as faulty and defer destruction of the job to jobs abort thread
|
||||||
|
* handler to synchronize between both faults and jobs returning context violation
|
||||||
|
* status and ensure both are handled in the same way
|
||||||
|
*/
|
||||||
|
job->file_priv->has_mmu_faults = true;
|
||||||
|
queue_work(system_wq, &vdev->context_abort_work);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
job = ivpu_job_remove_from_submitted_jobs(vdev, job_id);
|
job = ivpu_job_remove_from_submitted_jobs(vdev, job_id);
|
||||||
if (!job)
|
if (!job)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
@@ -503,6 +523,10 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32
|
|||||||
ivpu_stop_job_timeout_detection(vdev);
|
ivpu_stop_job_timeout_detection(vdev);
|
||||||
|
|
||||||
ivpu_rpm_put(vdev);
|
ivpu_rpm_put(vdev);
|
||||||
|
|
||||||
|
if (!xa_empty(&vdev->submitted_jobs_xa))
|
||||||
|
ivpu_start_job_timeout_detection(vdev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -511,15 +535,18 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev)
|
|||||||
struct ivpu_job *job;
|
struct ivpu_job *job;
|
||||||
unsigned long id;
|
unsigned long id;
|
||||||
|
|
||||||
|
mutex_lock(&vdev->submitted_jobs_lock);
|
||||||
|
|
||||||
xa_for_each(&vdev->submitted_jobs_xa, id, job)
|
xa_for_each(&vdev->submitted_jobs_xa, id, job)
|
||||||
ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
|
ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
|
||||||
|
|
||||||
|
mutex_unlock(&vdev->submitted_jobs_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
|
static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
|
||||||
{
|
{
|
||||||
struct ivpu_file_priv *file_priv = job->file_priv;
|
struct ivpu_file_priv *file_priv = job->file_priv;
|
||||||
struct ivpu_device *vdev = job->vdev;
|
struct ivpu_device *vdev = job->vdev;
|
||||||
struct xa_limit job_id_range;
|
|
||||||
struct ivpu_cmdq *cmdq;
|
struct ivpu_cmdq *cmdq;
|
||||||
bool is_first_job;
|
bool is_first_job;
|
||||||
int ret;
|
int ret;
|
||||||
@@ -528,27 +555,25 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
mutex_lock(&vdev->submitted_jobs_lock);
|
||||||
mutex_lock(&file_priv->lock);
|
mutex_lock(&file_priv->lock);
|
||||||
|
|
||||||
cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx, priority);
|
cmdq = ivpu_cmdq_acquire(file_priv, job->engine_idx, priority);
|
||||||
if (!cmdq) {
|
if (!cmdq) {
|
||||||
ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d engine %d prio %d\n",
|
ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d engine %d prio %d\n",
|
||||||
file_priv->ctx.id, job->engine_idx, priority);
|
file_priv->ctx.id, job->engine_idx, priority);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto err_unlock_file_priv;
|
goto err_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
job_id_range.min = FIELD_PREP(JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
|
|
||||||
job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK;
|
|
||||||
|
|
||||||
xa_lock(&vdev->submitted_jobs_xa);
|
|
||||||
is_first_job = xa_empty(&vdev->submitted_jobs_xa);
|
is_first_job = xa_empty(&vdev->submitted_jobs_xa);
|
||||||
ret = __xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
|
ret = xa_alloc_cyclic(&vdev->submitted_jobs_xa, &job->job_id, job, file_priv->job_limit,
|
||||||
if (ret) {
|
&file_priv->job_id_next, GFP_KERNEL);
|
||||||
|
if (ret < 0) {
|
||||||
ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
|
ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
|
||||||
file_priv->ctx.id);
|
file_priv->ctx.id);
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
goto err_unlock_submitted_jobs_xa;
|
goto err_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = ivpu_cmdq_push_job(cmdq, job);
|
ret = ivpu_cmdq_push_job(cmdq, job);
|
||||||
@@ -570,20 +595,20 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
|
|||||||
job->job_id, file_priv->ctx.id, job->engine_idx, priority,
|
job->job_id, file_priv->ctx.id, job->engine_idx, priority,
|
||||||
job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
|
job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
|
||||||
|
|
||||||
xa_unlock(&vdev->submitted_jobs_xa);
|
|
||||||
|
|
||||||
mutex_unlock(&file_priv->lock);
|
mutex_unlock(&file_priv->lock);
|
||||||
|
|
||||||
if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW))
|
if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) {
|
||||||
ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
|
ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&vdev->submitted_jobs_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_erase_xa:
|
err_erase_xa:
|
||||||
__xa_erase(&vdev->submitted_jobs_xa, job->job_id);
|
xa_erase(&vdev->submitted_jobs_xa, job->job_id);
|
||||||
err_unlock_submitted_jobs_xa:
|
err_unlock:
|
||||||
xa_unlock(&vdev->submitted_jobs_xa);
|
mutex_unlock(&vdev->submitted_jobs_lock);
|
||||||
err_unlock_file_priv:
|
|
||||||
mutex_unlock(&file_priv->lock);
|
mutex_unlock(&file_priv->lock);
|
||||||
ivpu_rpm_put(vdev);
|
ivpu_rpm_put(vdev);
|
||||||
return ret;
|
return ret;
|
||||||
@@ -753,7 +778,6 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
|
|||||||
struct vpu_jsm_msg *jsm_msg)
|
struct vpu_jsm_msg *jsm_msg)
|
||||||
{
|
{
|
||||||
struct vpu_ipc_msg_payload_job_done *payload;
|
struct vpu_ipc_msg_payload_job_done *payload;
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!jsm_msg) {
|
if (!jsm_msg) {
|
||||||
ivpu_err(vdev, "IPC message has no JSM payload\n");
|
ivpu_err(vdev, "IPC message has no JSM payload\n");
|
||||||
@@ -766,9 +790,10 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
|
payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
|
||||||
ret = ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
|
|
||||||
if (!ret && !xa_empty(&vdev->submitted_jobs_xa))
|
mutex_lock(&vdev->submitted_jobs_lock);
|
||||||
ivpu_start_job_timeout_detection(vdev);
|
ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
|
||||||
|
mutex_unlock(&vdev->submitted_jobs_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ivpu_job_done_consumer_init(struct ivpu_device *vdev)
|
void ivpu_job_done_consumer_init(struct ivpu_device *vdev)
|
||||||
@@ -781,3 +806,41 @@ void ivpu_job_done_consumer_fini(struct ivpu_device *vdev)
|
|||||||
{
|
{
|
||||||
ivpu_ipc_consumer_del(vdev, &vdev->job_done_consumer);
|
ivpu_ipc_consumer_del(vdev, &vdev->job_done_consumer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ivpu_context_abort_thread_handler(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct ivpu_device *vdev = container_of(work, struct ivpu_device, context_abort_work);
|
||||||
|
struct ivpu_file_priv *file_priv;
|
||||||
|
unsigned long ctx_id;
|
||||||
|
struct ivpu_job *job;
|
||||||
|
unsigned long id;
|
||||||
|
|
||||||
|
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
|
||||||
|
ivpu_jsm_reset_engine(vdev, 0);
|
||||||
|
|
||||||
|
mutex_lock(&vdev->context_list_lock);
|
||||||
|
xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
|
||||||
|
if (!file_priv->has_mmu_faults || file_priv->aborted)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
mutex_lock(&file_priv->lock);
|
||||||
|
ivpu_context_abort_locked(file_priv);
|
||||||
|
mutex_unlock(&file_priv->lock);
|
||||||
|
}
|
||||||
|
mutex_unlock(&vdev->context_list_lock);
|
||||||
|
|
||||||
|
if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
|
||||||
|
return;
|
||||||
|
|
||||||
|
ivpu_jsm_hws_resume_engine(vdev, 0);
|
||||||
|
/*
|
||||||
|
* In hardware scheduling mode NPU already has stopped processing jobs
|
||||||
|
* and won't send us any further notifications, thus we have to free job related resources
|
||||||
|
* and notify userspace
|
||||||
|
*/
|
||||||
|
mutex_lock(&vdev->submitted_jobs_lock);
|
||||||
|
xa_for_each(&vdev->submitted_jobs_xa, id, job)
|
||||||
|
if (job->file_priv->aborted)
|
||||||
|
ivpu_job_signal_and_destroy(vdev, job->job_id, DRM_IVPU_JOB_STATUS_ABORTED);
|
||||||
|
mutex_unlock(&vdev->submitted_jobs_lock);
|
||||||
|
}
|
||||||
|
|||||||
@@ -64,6 +64,7 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);
|
|||||||
|
|
||||||
void ivpu_job_done_consumer_init(struct ivpu_device *vdev);
|
void ivpu_job_done_consumer_init(struct ivpu_device *vdev);
|
||||||
void ivpu_job_done_consumer_fini(struct ivpu_device *vdev);
|
void ivpu_job_done_consumer_fini(struct ivpu_device *vdev);
|
||||||
|
void ivpu_context_abort_thread_handler(struct work_struct *work);
|
||||||
|
|
||||||
void ivpu_jobs_abort_all(struct ivpu_device *vdev);
|
void ivpu_jobs_abort_all(struct ivpu_device *vdev);
|
||||||
|
|
||||||
|
|||||||
@@ -48,9 +48,10 @@ const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
|
|||||||
IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE);
|
IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE);
|
||||||
IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP);
|
IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP);
|
||||||
IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP_RSP);
|
IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP_RSP);
|
||||||
IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT);
|
IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED);
|
||||||
IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL);
|
IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL);
|
||||||
IVPU_CASE_TO_STR(VPU_JSM_MSG_JOB_DONE);
|
IVPU_CASE_TO_STR(VPU_JSM_MSG_JOB_DONE);
|
||||||
|
IVPU_CASE_TO_STR(VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED);
|
||||||
IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET_DONE);
|
IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET_DONE);
|
||||||
IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT_DONE);
|
IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT_DONE);
|
||||||
IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB_DONE);
|
IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB_DONE);
|
||||||
|
|||||||
@@ -917,8 +917,7 @@ void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
|
|||||||
REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
|
REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_MMU_EVTQ))
|
queue_work(system_wq, &vdev->context_abort_work);
|
||||||
ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)
|
void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)
|
||||||
|
|||||||
@@ -421,16 +421,17 @@ int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent)
|
|||||||
active_us = (DCT_PERIOD_US * active_percent) / 100;
|
active_us = (DCT_PERIOD_US * active_percent) / 100;
|
||||||
inactive_us = DCT_PERIOD_US - active_us;
|
inactive_us = DCT_PERIOD_US - active_us;
|
||||||
|
|
||||||
|
vdev->pm->dct_active_percent = active_percent;
|
||||||
|
|
||||||
|
ivpu_dbg(vdev, PM, "DCT requested %u%% (D0: %uus, D0i2: %uus)\n",
|
||||||
|
active_percent, active_us, inactive_us);
|
||||||
|
|
||||||
ret = ivpu_jsm_dct_enable(vdev, active_us, inactive_us);
|
ret = ivpu_jsm_dct_enable(vdev, active_us, inactive_us);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ivpu_err_ratelimited(vdev, "Filed to enable DCT: %d\n", ret);
|
ivpu_err_ratelimited(vdev, "Filed to enable DCT: %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
vdev->pm->dct_active_percent = active_percent;
|
|
||||||
|
|
||||||
ivpu_dbg(vdev, PM, "DCT set to %u%% (D0: %uus, D0i2: %uus)\n",
|
|
||||||
active_percent, active_us, inactive_us);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -438,15 +439,16 @@ int ivpu_pm_dct_disable(struct ivpu_device *vdev)
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
vdev->pm->dct_active_percent = 0;
|
||||||
|
|
||||||
|
ivpu_dbg(vdev, PM, "DCT requested to be disabled\n");
|
||||||
|
|
||||||
ret = ivpu_jsm_dct_disable(vdev);
|
ret = ivpu_jsm_dct_disable(vdev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ivpu_err_ratelimited(vdev, "Filed to disable DCT: %d\n", ret);
|
ivpu_err_ratelimited(vdev, "Filed to disable DCT: %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
vdev->pm->dct_active_percent = 0;
|
|
||||||
|
|
||||||
ivpu_dbg(vdev, PM, "DCT disabled\n");
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -458,7 +460,7 @@ void ivpu_pm_dct_irq_thread_handler(struct ivpu_device *vdev)
|
|||||||
if (ivpu_hw_btrs_dct_get_request(vdev, &enable))
|
if (ivpu_hw_btrs_dct_get_request(vdev, &enable))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (vdev->pm->dct_active_percent)
|
if (enable)
|
||||||
ret = ivpu_pm_dct_enable(vdev, DCT_DEFAULT_ACTIVE_PERCENT);
|
ret = ivpu_pm_dct_enable(vdev, DCT_DEFAULT_ACTIVE_PERCENT);
|
||||||
else
|
else
|
||||||
ret = ivpu_pm_dct_disable(vdev);
|
ret = ivpu_pm_dct_disable(vdev);
|
||||||
|
|||||||
@@ -30,11 +30,12 @@ npu_busy_time_us_show(struct device *dev, struct device_attribute *attr, char *b
|
|||||||
struct ivpu_device *vdev = to_ivpu_device(drm);
|
struct ivpu_device *vdev = to_ivpu_device(drm);
|
||||||
ktime_t total, now = 0;
|
ktime_t total, now = 0;
|
||||||
|
|
||||||
xa_lock(&vdev->submitted_jobs_xa);
|
mutex_lock(&vdev->submitted_jobs_lock);
|
||||||
|
|
||||||
total = vdev->busy_time;
|
total = vdev->busy_time;
|
||||||
if (!xa_empty(&vdev->submitted_jobs_xa))
|
if (!xa_empty(&vdev->submitted_jobs_xa))
|
||||||
now = ktime_sub(ktime_get(), vdev->busy_start_ts);
|
now = ktime_sub(ktime_get(), vdev->busy_start_ts);
|
||||||
xa_unlock(&vdev->submitted_jobs_xa);
|
mutex_unlock(&vdev->submitted_jobs_lock);
|
||||||
|
|
||||||
return sysfs_emit(buf, "%lld\n", ktime_to_us(ktime_add(total, now)));
|
return sysfs_emit(buf, "%lld\n", ktime_to_us(ktime_add(total, now)));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,14 +1,13 @@
|
|||||||
/* SPDX-License-Identifier: MIT */
|
/* SPDX-License-Identifier: MIT */
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2020-2023, Intel Corporation.
|
* Copyright (c) 2020-2024, Intel Corporation.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef VPU_BOOT_API_H
|
#ifndef VPU_BOOT_API_H
|
||||||
#define VPU_BOOT_API_H
|
#define VPU_BOOT_API_H
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* =========== FW API version information beginning ================
|
* The below values will be used to construct the version info this way:
|
||||||
* The bellow values will be used to construct the version info this way:
|
|
||||||
* fw_bin_header->api_version[VPU_BOOT_API_VER_ID] = (VPU_BOOT_API_VER_MAJOR << 16) |
|
* fw_bin_header->api_version[VPU_BOOT_API_VER_ID] = (VPU_BOOT_API_VER_MAJOR << 16) |
|
||||||
* VPU_BOOT_API_VER_MINOR;
|
* VPU_BOOT_API_VER_MINOR;
|
||||||
* VPU_BOOT_API_VER_PATCH will be ignored. KMD and compatibility is not affected if this changes
|
* VPU_BOOT_API_VER_PATCH will be ignored. KMD and compatibility is not affected if this changes
|
||||||
@@ -27,19 +26,18 @@
|
|||||||
* Minor version changes when API backward compatibility is preserved.
|
* Minor version changes when API backward compatibility is preserved.
|
||||||
* Resets to 0 if Major version is incremented.
|
* Resets to 0 if Major version is incremented.
|
||||||
*/
|
*/
|
||||||
#define VPU_BOOT_API_VER_MINOR 24
|
#define VPU_BOOT_API_VER_MINOR 26
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* API header changed (field names, documentation, formatting) but API itself has not been changed
|
* API header changed (field names, documentation, formatting) but API itself has not been changed
|
||||||
*/
|
*/
|
||||||
#define VPU_BOOT_API_VER_PATCH 0
|
#define VPU_BOOT_API_VER_PATCH 3
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Index in the API version table
|
* Index in the API version table
|
||||||
* Must be unique for each API
|
* Must be unique for each API
|
||||||
*/
|
*/
|
||||||
#define VPU_BOOT_API_VER_INDEX 0
|
#define VPU_BOOT_API_VER_INDEX 0
|
||||||
/* ------------ FW API version information end ---------------------*/
|
|
||||||
|
|
||||||
#pragma pack(push, 4)
|
#pragma pack(push, 4)
|
||||||
|
|
||||||
@@ -164,8 +162,6 @@ enum vpu_trace_destination {
|
|||||||
/* VPU 30xx HW component IDs are sequential, so define first and last IDs. */
|
/* VPU 30xx HW component IDs are sequential, so define first and last IDs. */
|
||||||
#define VPU_TRACE_PROC_BIT_30XX_FIRST VPU_TRACE_PROC_BIT_LRT
|
#define VPU_TRACE_PROC_BIT_30XX_FIRST VPU_TRACE_PROC_BIT_LRT
|
||||||
#define VPU_TRACE_PROC_BIT_30XX_LAST VPU_TRACE_PROC_BIT_SHV_15
|
#define VPU_TRACE_PROC_BIT_30XX_LAST VPU_TRACE_PROC_BIT_SHV_15
|
||||||
#define VPU_TRACE_PROC_BIT_KMB_FIRST VPU_TRACE_PROC_BIT_30XX_FIRST
|
|
||||||
#define VPU_TRACE_PROC_BIT_KMB_LAST VPU_TRACE_PROC_BIT_30XX_LAST
|
|
||||||
|
|
||||||
struct vpu_boot_l2_cache_config {
|
struct vpu_boot_l2_cache_config {
|
||||||
u8 use;
|
u8 use;
|
||||||
@@ -199,6 +195,17 @@ struct vpu_warm_boot_section {
|
|||||||
*/
|
*/
|
||||||
#define POWER_PROFILE_SURVIVABILITY 0x1
|
#define POWER_PROFILE_SURVIVABILITY 0x1
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Enum for dvfs_mode boot param.
|
||||||
|
*/
|
||||||
|
enum vpu_governor {
|
||||||
|
VPU_GOV_DEFAULT = 0, /* Default Governor for the system */
|
||||||
|
VPU_GOV_MAX_PERFORMANCE = 1, /* Maximum performance governor */
|
||||||
|
VPU_GOV_ON_DEMAND = 2, /* On Demand frequency control governor */
|
||||||
|
VPU_GOV_POWER_SAVE = 3, /* Power save governor */
|
||||||
|
VPU_GOV_ON_DEMAND_PRIORITY_AWARE = 4 /* On Demand priority based governor */
|
||||||
|
};
|
||||||
|
|
||||||
struct vpu_boot_params {
|
struct vpu_boot_params {
|
||||||
u32 magic;
|
u32 magic;
|
||||||
u32 vpu_id;
|
u32 vpu_id;
|
||||||
@@ -301,7 +308,14 @@ struct vpu_boot_params {
|
|||||||
u32 temp_sensor_period_ms;
|
u32 temp_sensor_period_ms;
|
||||||
/** PLL ratio for efficient clock frequency */
|
/** PLL ratio for efficient clock frequency */
|
||||||
u32 pn_freq_pll_ratio;
|
u32 pn_freq_pll_ratio;
|
||||||
/** DVFS Mode: Default: 0, Max Performance: 1, On Demand: 2, Power Save: 3 */
|
/**
|
||||||
|
* DVFS Mode:
|
||||||
|
* 0 - Default, DVFS mode selected by the firmware
|
||||||
|
* 1 - Max Performance
|
||||||
|
* 2 - On Demand
|
||||||
|
* 3 - Power Save
|
||||||
|
* 4 - On Demand Priority Aware
|
||||||
|
*/
|
||||||
u32 dvfs_mode;
|
u32 dvfs_mode;
|
||||||
/**
|
/**
|
||||||
* Depending on DVFS Mode:
|
* Depending on DVFS Mode:
|
||||||
@@ -332,8 +346,8 @@ struct vpu_boot_params {
|
|||||||
u64 d0i3_entry_vpu_ts;
|
u64 d0i3_entry_vpu_ts;
|
||||||
/*
|
/*
|
||||||
* The system time of the host operating system in microseconds.
|
* The system time of the host operating system in microseconds.
|
||||||
* E.g the number of microseconds since 1st of January 1970, or whatever date the
|
* E.g the number of microseconds since 1st of January 1970, or whatever
|
||||||
* host operating system uses to maintain system time.
|
* date the host operating system uses to maintain system time.
|
||||||
* This value will be used to track system time on the VPU.
|
* This value will be used to track system time on the VPU.
|
||||||
* The KMD is required to update this value on every VPU reset.
|
* The KMD is required to update this value on every VPU reset.
|
||||||
*/
|
*/
|
||||||
@@ -382,10 +396,7 @@ struct vpu_boot_params {
|
|||||||
u32 pad6[734];
|
u32 pad6[734];
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/* Magic numbers set between host and vpu to detect corruption of tracing init */
|
||||||
* Magic numbers set between host and vpu to detect corruptio of tracing init
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define VPU_TRACING_BUFFER_CANARY (0xCAFECAFE)
|
#define VPU_TRACING_BUFFER_CANARY (0xCAFECAFE)
|
||||||
|
|
||||||
/* Tracing buffer message format definitions */
|
/* Tracing buffer message format definitions */
|
||||||
@@ -405,7 +416,9 @@ struct vpu_tracing_buffer_header {
|
|||||||
u32 host_canary_start;
|
u32 host_canary_start;
|
||||||
/* offset from start of buffer for trace entries */
|
/* offset from start of buffer for trace entries */
|
||||||
u32 read_index;
|
u32 read_index;
|
||||||
u32 pad_to_cache_line_size_0[14];
|
/* keeps track of wrapping on the reader side */
|
||||||
|
u32 read_wrap_count;
|
||||||
|
u32 pad_to_cache_line_size_0[13];
|
||||||
/* End of first cache line */
|
/* End of first cache line */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -22,7 +22,7 @@
|
|||||||
/*
|
/*
|
||||||
* Minor version changes when API backward compatibility is preserved.
|
* Minor version changes when API backward compatibility is preserved.
|
||||||
*/
|
*/
|
||||||
#define VPU_JSM_API_VER_MINOR 16
|
#define VPU_JSM_API_VER_MINOR 25
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* API header changed (field names, documentation, formatting) but API itself has not been changed
|
* API header changed (field names, documentation, formatting) but API itself has not been changed
|
||||||
@@ -36,7 +36,7 @@
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Number of Priority Bands for Hardware Scheduling
|
* Number of Priority Bands for Hardware Scheduling
|
||||||
* Bands: RealTime, Focus, Normal, Idle
|
* Bands: Idle(0), Normal(1), Focus(2), RealTime(3)
|
||||||
*/
|
*/
|
||||||
#define VPU_HWS_NUM_PRIORITY_BANDS 4
|
#define VPU_HWS_NUM_PRIORITY_BANDS 4
|
||||||
|
|
||||||
@@ -74,6 +74,7 @@
|
|||||||
#define VPU_JSM_STATUS_MVNCI_INTERNAL_ERROR 0xCU
|
#define VPU_JSM_STATUS_MVNCI_INTERNAL_ERROR 0xCU
|
||||||
/* Job status returned when the job was preempted mid-inference */
|
/* Job status returned when the job was preempted mid-inference */
|
||||||
#define VPU_JSM_STATUS_PREEMPTED_MID_INFERENCE 0xDU
|
#define VPU_JSM_STATUS_PREEMPTED_MID_INFERENCE 0xDU
|
||||||
|
#define VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW 0xEU
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Host <-> VPU IPC channels.
|
* Host <-> VPU IPC channels.
|
||||||
@@ -86,18 +87,58 @@
|
|||||||
/*
|
/*
|
||||||
* Job flags bit masks.
|
* Job flags bit masks.
|
||||||
*/
|
*/
|
||||||
#define VPU_JOB_FLAGS_NULL_SUBMISSION_MASK 0x00000001
|
enum {
|
||||||
#define VPU_JOB_FLAGS_PRIVATE_DATA_MASK 0xFF000000
|
/*
|
||||||
|
* Null submission mask.
|
||||||
|
* When set, batch buffer's commands are not processed but returned as
|
||||||
|
* successful immediately, except fences and timestamps.
|
||||||
|
* When cleared, batch buffer's commands are processed normally.
|
||||||
|
* Used for testing and profiling purposes.
|
||||||
|
*/
|
||||||
|
VPU_JOB_FLAGS_NULL_SUBMISSION_MASK = (1 << 0U),
|
||||||
|
/*
|
||||||
|
* Inline command mask.
|
||||||
|
* When set, the object in job queue is an inline command (see struct vpu_inline_cmd below).
|
||||||
|
* When cleared, the object in job queue is a job (see struct vpu_job_queue_entry below).
|
||||||
|
*/
|
||||||
|
VPU_JOB_FLAGS_INLINE_CMD_MASK = (1 << 1U),
|
||||||
|
/*
|
||||||
|
* VPU private data mask.
|
||||||
|
* Reserved for the VPU to store private data about the job (or inline command)
|
||||||
|
* while being processed.
|
||||||
|
*/
|
||||||
|
VPU_JOB_FLAGS_PRIVATE_DATA_MASK = 0xFFFF0000U
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sizes of the reserved areas in jobs, in bytes.
|
* Job queue flags bit masks.
|
||||||
*/
|
*/
|
||||||
#define VPU_JOB_RESERVED_BYTES 8
|
enum {
|
||||||
|
/*
|
||||||
|
* No job done notification mask.
|
||||||
|
* When set, indicates that no job done notification should be sent for any
|
||||||
|
* job from this queue. When cleared, indicates that job done notification
|
||||||
|
* should be sent for every job completed from this queue.
|
||||||
|
*/
|
||||||
|
VPU_JOB_QUEUE_FLAGS_NO_JOB_DONE_MASK = (1 << 0U),
|
||||||
|
/*
|
||||||
|
* Native fence usage mask.
|
||||||
|
* When set, indicates that job queue uses native fences (as inline commands
|
||||||
|
* in job queue). Such queues may also use legacy fences (as commands in batch buffers).
|
||||||
|
* When cleared, indicates the job queue only uses legacy fences.
|
||||||
|
* NOTE: For queues using native fences, VPU expects that all jobs in the queue
|
||||||
|
* are immediately followed by an inline command object. This object is expected
|
||||||
|
* to be a fence signal command in most cases, but can also be a NOP in case the host
|
||||||
|
* does not need per-job fence signalling. Other inline commands objects can be
|
||||||
|
* inserted between "job and inline command" pairs.
|
||||||
|
*/
|
||||||
|
VPU_JOB_QUEUE_FLAGS_USE_NATIVE_FENCE_MASK = (1 << 1U),
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sizes of the reserved areas in job queues, in bytes.
|
* Enable turbo mode for testing NPU performance; not recommended for regular usage.
|
||||||
*/
|
*/
|
||||||
#define VPU_JOB_QUEUE_RESERVED_BYTES 52
|
VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U)
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Max length (including trailing NULL char) of trace entity name (e.g., the
|
* Max length (including trailing NULL char) of trace entity name (e.g., the
|
||||||
@@ -140,24 +181,113 @@
|
|||||||
*/
|
*/
|
||||||
#define VPU_HWS_INVALID_CMDQ_HANDLE 0ULL
|
#define VPU_HWS_INVALID_CMDQ_HANDLE 0ULL
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Inline commands types.
|
||||||
|
*/
|
||||||
|
/*
|
||||||
|
* NOP.
|
||||||
|
* VPU does nothing other than consuming the inline command object.
|
||||||
|
*/
|
||||||
|
#define VPU_INLINE_CMD_TYPE_NOP 0x0
|
||||||
|
/*
|
||||||
|
* Fence wait.
|
||||||
|
* VPU waits for the fence current value to reach monitored value.
|
||||||
|
* Fence wait operations are executed upon job dispatching. While waiting for
|
||||||
|
* the fence to be satisfied, VPU blocks fetching of the next objects in the queue.
|
||||||
|
* Jobs present in the queue prior to the fence wait object may be processed
|
||||||
|
* concurrently.
|
||||||
|
*/
|
||||||
|
#define VPU_INLINE_CMD_TYPE_FENCE_WAIT 0x1
|
||||||
|
/*
|
||||||
|
* Fence signal.
|
||||||
|
* VPU sets the fence current value to the provided value. If new current value
|
||||||
|
* is equal to or higher than monitored value, VPU sends fence signalled notification
|
||||||
|
* to the host. Fence signal operations are executed upon completion of all the jobs
|
||||||
|
* present in the queue prior to them, and in-order relative to each other in the queue.
|
||||||
|
* But jobs in-between them may be processed concurrently and may complete out-of-order.
|
||||||
|
*/
|
||||||
|
#define VPU_INLINE_CMD_TYPE_FENCE_SIGNAL 0x2
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Job scheduling priority bands for both hardware scheduling and OS scheduling.
|
||||||
|
*/
|
||||||
|
enum vpu_job_scheduling_priority_band {
|
||||||
|
VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE = 0,
|
||||||
|
VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL = 1,
|
||||||
|
VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS = 2,
|
||||||
|
VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME = 3,
|
||||||
|
VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT = 4,
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Job format.
|
* Job format.
|
||||||
|
* Jobs defines the actual workloads to be executed by a given engine.
|
||||||
*/
|
*/
|
||||||
struct vpu_job_queue_entry {
|
struct vpu_job_queue_entry {
|
||||||
u64 batch_buf_addr; /**< Address of VPU commands batch buffer */
|
/**< Address of VPU commands batch buffer */
|
||||||
u32 job_id; /**< Job ID */
|
u64 batch_buf_addr;
|
||||||
u32 flags; /**< Flags bit field, see VPU_JOB_FLAGS_* above */
|
/**< Job ID */
|
||||||
u64 root_page_table_addr; /**< Address of root page table to use for this job */
|
u32 job_id;
|
||||||
u64 root_page_table_update_counter; /**< Page tables update events counter */
|
/**< Flags bit field, see VPU_JOB_FLAGS_* above */
|
||||||
u64 primary_preempt_buf_addr;
|
u32 flags;
|
||||||
|
/**
|
||||||
|
* Doorbell ring timestamp taken by KMD from SoC's global system clock, in
|
||||||
|
* microseconds. NPU can convert this value to its own fixed clock's timebase,
|
||||||
|
* to match other profiling timestamps.
|
||||||
|
*/
|
||||||
|
u64 doorbell_timestamp;
|
||||||
|
/**< Extra id for job tracking, used only in the firmware perf traces */
|
||||||
|
u64 host_tracking_id;
|
||||||
/**< Address of the primary preemption buffer to use for this job */
|
/**< Address of the primary preemption buffer to use for this job */
|
||||||
u32 primary_preempt_buf_size;
|
u64 primary_preempt_buf_addr;
|
||||||
/**< Size of the primary preemption buffer to use for this job */
|
/**< Size of the primary preemption buffer to use for this job */
|
||||||
u32 secondary_preempt_buf_size;
|
u32 primary_preempt_buf_size;
|
||||||
/**< Size of secondary preemption buffer to use for this job */
|
/**< Size of secondary preemption buffer to use for this job */
|
||||||
u64 secondary_preempt_buf_addr;
|
u32 secondary_preempt_buf_size;
|
||||||
/**< Address of secondary preemption buffer to use for this job */
|
/**< Address of secondary preemption buffer to use for this job */
|
||||||
u8 reserved_0[VPU_JOB_RESERVED_BYTES];
|
u64 secondary_preempt_buf_addr;
|
||||||
|
u64 reserved_0;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Inline command format.
|
||||||
|
* Inline commands are the commands executed at scheduler level (typically,
|
||||||
|
* synchronization directives). Inline command and job objects must be of
|
||||||
|
* the same size and have flags field at same offset.
|
||||||
|
*/
|
||||||
|
struct vpu_inline_cmd {
|
||||||
|
u64 reserved_0;
|
||||||
|
/* Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */
|
||||||
|
u32 type;
|
||||||
|
/* Flags bit field, see VPU_JOB_FLAGS_* above. */
|
||||||
|
u32 flags;
|
||||||
|
/* Inline command payload. Depends on inline command type. */
|
||||||
|
union {
|
||||||
|
/* Fence (wait and signal) commands' payload. */
|
||||||
|
struct {
|
||||||
|
/* Fence object handle. */
|
||||||
|
u64 fence_handle;
|
||||||
|
/* User VA of the current fence value. */
|
||||||
|
u64 current_value_va;
|
||||||
|
/* User VA of the monitored fence value (read-only). */
|
||||||
|
u64 monitored_value_va;
|
||||||
|
/* Value to wait for or write in fence location. */
|
||||||
|
u64 value;
|
||||||
|
/* User VA of the log buffer in which to add log entry on completion. */
|
||||||
|
u64 log_buffer_va;
|
||||||
|
} fence;
|
||||||
|
/* Other commands do not have a payload. */
|
||||||
|
/* Payload definition for future inline commands can be inserted here. */
|
||||||
|
u64 reserved_1[6];
|
||||||
|
} payload;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Job queue slots can be populated either with job objects or inline command objects.
|
||||||
|
*/
|
||||||
|
union vpu_jobq_slot {
|
||||||
|
struct vpu_job_queue_entry job;
|
||||||
|
struct vpu_inline_cmd inline_cmd;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -167,7 +297,21 @@ struct vpu_job_queue_header {
|
|||||||
u32 engine_idx;
|
u32 engine_idx;
|
||||||
u32 head;
|
u32 head;
|
||||||
u32 tail;
|
u32 tail;
|
||||||
u8 reserved_0[VPU_JOB_QUEUE_RESERVED_BYTES];
|
u32 flags;
|
||||||
|
/* Set to 1 to indicate priority_band field is valid */
|
||||||
|
u32 priority_band_valid;
|
||||||
|
/*
|
||||||
|
* Priority for the work of this job queue, valid only if the HWS is NOT used
|
||||||
|
* and the `priority_band_valid` is set to 1. It is applied only during
|
||||||
|
* the VPU_JSM_MSG_REGISTER_DB message processing.
|
||||||
|
* The device firmware might use the `priority_band` to optimize the power
|
||||||
|
* management logic, but it will not affect the order of jobs.
|
||||||
|
* Available priority bands: @see enum vpu_job_scheduling_priority_band
|
||||||
|
*/
|
||||||
|
u32 priority_band;
|
||||||
|
/* Inside realtime band assigns a further priority, limited to 0..31 range */
|
||||||
|
u32 realtime_priority_level;
|
||||||
|
u32 reserved_0[9];
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -175,7 +319,7 @@ struct vpu_job_queue_header {
|
|||||||
*/
|
*/
|
||||||
struct vpu_job_queue {
|
struct vpu_job_queue {
|
||||||
struct vpu_job_queue_header header;
|
struct vpu_job_queue_header header;
|
||||||
struct vpu_job_queue_entry job[];
|
union vpu_jobq_slot slot[];
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -197,9 +341,7 @@ enum vpu_trace_entity_type {
|
|||||||
struct vpu_hws_log_buffer_header {
|
struct vpu_hws_log_buffer_header {
|
||||||
/* Written by VPU after adding a log entry. Initialised by host to 0. */
|
/* Written by VPU after adding a log entry. Initialised by host to 0. */
|
||||||
u32 first_free_entry_index;
|
u32 first_free_entry_index;
|
||||||
/* Incremented by VPU every time the VPU overwrites the 0th entry;
|
/* Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */
|
||||||
* initialised by host to 0.
|
|
||||||
*/
|
|
||||||
u32 wraparound_count;
|
u32 wraparound_count;
|
||||||
/*
|
/*
|
||||||
* This is the number of buffers that can be stored in the log buffer provided by the host.
|
* This is the number of buffers that can be stored in the log buffer provided by the host.
|
||||||
@@ -230,14 +372,80 @@ struct vpu_hws_log_buffer_entry {
|
|||||||
u64 operation_data[2];
|
u64 operation_data[2];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Native fence log buffer types. */
|
||||||
|
enum vpu_hws_native_fence_log_type {
|
||||||
|
VPU_HWS_NATIVE_FENCE_LOG_TYPE_WAITS = 1,
|
||||||
|
VPU_HWS_NATIVE_FENCE_LOG_TYPE_SIGNALS = 2
|
||||||
|
};
|
||||||
|
|
||||||
|
/* HWS native fence log buffer header. */
|
||||||
|
struct vpu_hws_native_fence_log_header {
|
||||||
|
union {
|
||||||
|
struct {
|
||||||
|
/* Index of the first free entry in buffer. */
|
||||||
|
u32 first_free_entry_idx;
|
||||||
|
/* Incremented each time NPU wraps around the buffer to write next entry. */
|
||||||
|
u32 wraparound_count;
|
||||||
|
};
|
||||||
|
/* Field allowing atomic update of both fields above. */
|
||||||
|
u64 atomic_wraparound_and_entry_idx;
|
||||||
|
};
|
||||||
|
/* Log buffer type, see enum vpu_hws_native_fence_log_type. */
|
||||||
|
u64 type;
|
||||||
|
/* Allocated number of entries in the log buffer. */
|
||||||
|
u64 entry_nb;
|
||||||
|
u64 reserved[2];
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Native fence log operation types. */
|
||||||
|
enum vpu_hws_native_fence_log_op {
|
||||||
|
VPU_HWS_NATIVE_FENCE_LOG_OP_SIGNAL_EXECUTED = 0,
|
||||||
|
VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED = 1
|
||||||
|
};
|
||||||
|
|
||||||
|
/* HWS native fence log entry. */
|
||||||
|
struct vpu_hws_native_fence_log_entry {
|
||||||
|
/* Newly signaled/unblocked fence value. */
|
||||||
|
u64 fence_value;
|
||||||
|
/* Native fence object handle to which this operation belongs. */
|
||||||
|
u64 fence_handle;
|
||||||
|
/* Operation type, see enum vpu_hws_native_fence_log_op. */
|
||||||
|
u64 op_type;
|
||||||
|
u64 reserved_0;
|
||||||
|
/*
|
||||||
|
* VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED only: Timestamp at which fence
|
||||||
|
* wait was started (in NPU SysTime).
|
||||||
|
*/
|
||||||
|
u64 fence_wait_start_ts;
|
||||||
|
u64 reserved_1;
|
||||||
|
/* Timestamp at which fence operation was completed (in NPU SysTime). */
|
||||||
|
u64 fence_end_ts;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Native fence log buffer. */
|
||||||
|
struct vpu_hws_native_fence_log_buffer {
|
||||||
|
struct vpu_hws_native_fence_log_header header;
|
||||||
|
struct vpu_hws_native_fence_log_entry entry[];
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Host <-> VPU IPC messages types.
|
* Host <-> VPU IPC messages types.
|
||||||
*/
|
*/
|
||||||
enum vpu_ipc_msg_type {
|
enum vpu_ipc_msg_type {
|
||||||
VPU_JSM_MSG_UNKNOWN = 0xFFFFFFFF,
|
VPU_JSM_MSG_UNKNOWN = 0xFFFFFFFF,
|
||||||
|
|
||||||
/* IPC Host -> Device, Async commands */
|
/* IPC Host -> Device, Async commands */
|
||||||
VPU_JSM_MSG_ASYNC_CMD = 0x1100,
|
VPU_JSM_MSG_ASYNC_CMD = 0x1100,
|
||||||
VPU_JSM_MSG_ENGINE_RESET = VPU_JSM_MSG_ASYNC_CMD,
|
VPU_JSM_MSG_ENGINE_RESET = VPU_JSM_MSG_ASYNC_CMD,
|
||||||
|
/**
|
||||||
|
* Preempt engine. The NPU stops (preempts) all the jobs currently
|
||||||
|
* executing on the target engine making the engine become idle and ready to
|
||||||
|
* execute new jobs.
|
||||||
|
* NOTE: The NPU does not remove unstarted jobs (if any) from job queues of
|
||||||
|
* the target engine, but it stops processing them (until the queue doorbell
|
||||||
|
* is rung again); the host is responsible to reset the job queue, either
|
||||||
|
* after preemption or when resubmitting jobs to the queue.
|
||||||
|
*/
|
||||||
VPU_JSM_MSG_ENGINE_PREEMPT = 0x1101,
|
VPU_JSM_MSG_ENGINE_PREEMPT = 0x1101,
|
||||||
VPU_JSM_MSG_REGISTER_DB = 0x1102,
|
VPU_JSM_MSG_REGISTER_DB = 0x1102,
|
||||||
VPU_JSM_MSG_UNREGISTER_DB = 0x1103,
|
VPU_JSM_MSG_UNREGISTER_DB = 0x1103,
|
||||||
@@ -323,9 +531,10 @@ enum vpu_ipc_msg_type {
|
|||||||
* NOTE: Please introduce new ASYNC commands before this one. *
|
* NOTE: Please introduce new ASYNC commands before this one. *
|
||||||
*/
|
*/
|
||||||
VPU_JSM_MSG_STATE_DUMP = 0x11FF,
|
VPU_JSM_MSG_STATE_DUMP = 0x11FF,
|
||||||
|
|
||||||
/* IPC Host -> Device, General commands */
|
/* IPC Host -> Device, General commands */
|
||||||
VPU_JSM_MSG_GENERAL_CMD = 0x1200,
|
VPU_JSM_MSG_GENERAL_CMD = 0x1200,
|
||||||
VPU_JSM_MSG_BLOB_DEINIT = VPU_JSM_MSG_GENERAL_CMD,
|
VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED = VPU_JSM_MSG_GENERAL_CMD,
|
||||||
/**
|
/**
|
||||||
* Control dyndbg behavior by executing a dyndbg command; equivalent to
|
* Control dyndbg behavior by executing a dyndbg command; equivalent to
|
||||||
* Linux command: `echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control`.
|
* Linux command: `echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control`.
|
||||||
@@ -335,8 +544,12 @@ enum vpu_ipc_msg_type {
|
|||||||
* Perform the save procedure for the D0i3 entry
|
* Perform the save procedure for the D0i3 entry
|
||||||
*/
|
*/
|
||||||
VPU_JSM_MSG_PWR_D0I3_ENTER = 0x1202,
|
VPU_JSM_MSG_PWR_D0I3_ENTER = 0x1202,
|
||||||
|
|
||||||
/* IPC Device -> Host, Job completion */
|
/* IPC Device -> Host, Job completion */
|
||||||
VPU_JSM_MSG_JOB_DONE = 0x2100,
|
VPU_JSM_MSG_JOB_DONE = 0x2100,
|
||||||
|
/* IPC Device -> Host, Fence signalled */
|
||||||
|
VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED = 0x2101,
|
||||||
|
|
||||||
/* IPC Device -> Host, Async command completion */
|
/* IPC Device -> Host, Async command completion */
|
||||||
VPU_JSM_MSG_ASYNC_CMD_DONE = 0x2200,
|
VPU_JSM_MSG_ASYNC_CMD_DONE = 0x2200,
|
||||||
VPU_JSM_MSG_ENGINE_RESET_DONE = VPU_JSM_MSG_ASYNC_CMD_DONE,
|
VPU_JSM_MSG_ENGINE_RESET_DONE = VPU_JSM_MSG_ASYNC_CMD_DONE,
|
||||||
@@ -422,6 +635,7 @@ enum vpu_ipc_msg_type {
|
|||||||
* NOTE: Please introduce new ASYNC responses before this one. *
|
* NOTE: Please introduce new ASYNC responses before this one. *
|
||||||
*/
|
*/
|
||||||
VPU_JSM_MSG_STATE_DUMP_RSP = 0x22FF,
|
VPU_JSM_MSG_STATE_DUMP_RSP = 0x22FF,
|
||||||
|
|
||||||
/* IPC Device -> Host, General command completion */
|
/* IPC Device -> Host, General command completion */
|
||||||
VPU_JSM_MSG_GENERAL_CMD_DONE = 0x2300,
|
VPU_JSM_MSG_GENERAL_CMD_DONE = 0x2300,
|
||||||
VPU_JSM_MSG_BLOB_DEINIT_DONE = VPU_JSM_MSG_GENERAL_CMD_DONE,
|
VPU_JSM_MSG_BLOB_DEINIT_DONE = VPU_JSM_MSG_GENERAL_CMD_DONE,
|
||||||
@@ -600,11 +814,6 @@ struct vpu_jsm_metric_streamer_update {
|
|||||||
u64 next_buffer_size;
|
u64 next_buffer_size;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct vpu_ipc_msg_payload_blob_deinit {
|
|
||||||
/* 64-bit unique ID for the blob to be de-initialized. */
|
|
||||||
u64 blob_id;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct vpu_ipc_msg_payload_job_done {
|
struct vpu_ipc_msg_payload_job_done {
|
||||||
/* Engine to which the job was submitted. */
|
/* Engine to which the job was submitted. */
|
||||||
u32 engine_idx;
|
u32 engine_idx;
|
||||||
@@ -622,6 +831,21 @@ struct vpu_ipc_msg_payload_job_done {
|
|||||||
u64 cmdq_id;
|
u64 cmdq_id;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Notification message upon native fence signalling.
|
||||||
|
* @see VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED
|
||||||
|
*/
|
||||||
|
struct vpu_ipc_msg_payload_native_fence_signalled {
|
||||||
|
/* Engine ID. */
|
||||||
|
u32 engine_idx;
|
||||||
|
/* Host SSID. */
|
||||||
|
u32 host_ssid;
|
||||||
|
/* CMDQ ID */
|
||||||
|
u64 cmdq_id;
|
||||||
|
/* Fence object handle. */
|
||||||
|
u64 fence_handle;
|
||||||
|
};
|
||||||
|
|
||||||
struct vpu_jsm_engine_reset_context {
|
struct vpu_jsm_engine_reset_context {
|
||||||
/* Host SSID */
|
/* Host SSID */
|
||||||
u32 host_ssid;
|
u32 host_ssid;
|
||||||
@@ -700,11 +924,6 @@ struct vpu_ipc_msg_payload_get_power_level_count_done {
|
|||||||
u8 power_limit[16];
|
u8 power_limit[16];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct vpu_ipc_msg_payload_blob_deinit_done {
|
|
||||||
/* 64-bit unique ID for the blob de-initialized. */
|
|
||||||
u64 blob_id;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* HWS priority band setup request / response */
|
/* HWS priority band setup request / response */
|
||||||
struct vpu_ipc_msg_payload_hws_priority_band_setup {
|
struct vpu_ipc_msg_payload_hws_priority_band_setup {
|
||||||
/*
|
/*
|
||||||
@@ -794,7 +1013,10 @@ struct vpu_ipc_msg_payload_hws_set_context_sched_properties {
|
|||||||
u32 reserved_0;
|
u32 reserved_0;
|
||||||
/* Command queue id */
|
/* Command queue id */
|
||||||
u64 cmdq_id;
|
u64 cmdq_id;
|
||||||
/* Priority band to assign to work of this context */
|
/*
|
||||||
|
* Priority band to assign to work of this context.
|
||||||
|
* Available priority bands: @see enum vpu_job_scheduling_priority_band
|
||||||
|
*/
|
||||||
u32 priority_band;
|
u32 priority_band;
|
||||||
/* Inside realtime band assigns a further priority */
|
/* Inside realtime band assigns a further priority */
|
||||||
u32 realtime_priority_level;
|
u32 realtime_priority_level;
|
||||||
@@ -869,9 +1091,7 @@ struct vpu_ipc_msg_payload_hws_set_scheduling_log {
|
|||||||
*/
|
*/
|
||||||
u64 notify_index;
|
u64 notify_index;
|
||||||
/*
|
/*
|
||||||
* Enable extra events to be output to log for debug of scheduling algorithm.
|
* Field is now deprecated, will be removed when KMD is updated to support removal
|
||||||
* Interpreted by VPU as a boolean to enable or disable, expected values are
|
|
||||||
* 0 and 1.
|
|
||||||
*/
|
*/
|
||||||
u32 enable_extra_events;
|
u32 enable_extra_events;
|
||||||
/* Zero Padding */
|
/* Zero Padding */
|
||||||
@@ -1243,10 +1463,10 @@ union vpu_ipc_msg_payload {
|
|||||||
struct vpu_jsm_metric_streamer_start metric_streamer_start;
|
struct vpu_jsm_metric_streamer_start metric_streamer_start;
|
||||||
struct vpu_jsm_metric_streamer_stop metric_streamer_stop;
|
struct vpu_jsm_metric_streamer_stop metric_streamer_stop;
|
||||||
struct vpu_jsm_metric_streamer_update metric_streamer_update;
|
struct vpu_jsm_metric_streamer_update metric_streamer_update;
|
||||||
struct vpu_ipc_msg_payload_blob_deinit blob_deinit;
|
|
||||||
struct vpu_ipc_msg_payload_ssid_release ssid_release;
|
struct vpu_ipc_msg_payload_ssid_release ssid_release;
|
||||||
struct vpu_jsm_hws_register_db hws_register_db;
|
struct vpu_jsm_hws_register_db hws_register_db;
|
||||||
struct vpu_ipc_msg_payload_job_done job_done;
|
struct vpu_ipc_msg_payload_job_done job_done;
|
||||||
|
struct vpu_ipc_msg_payload_native_fence_signalled native_fence_signalled;
|
||||||
struct vpu_ipc_msg_payload_engine_reset_done engine_reset_done;
|
struct vpu_ipc_msg_payload_engine_reset_done engine_reset_done;
|
||||||
struct vpu_ipc_msg_payload_engine_preempt_done engine_preempt_done;
|
struct vpu_ipc_msg_payload_engine_preempt_done engine_preempt_done;
|
||||||
struct vpu_ipc_msg_payload_register_db_done register_db_done;
|
struct vpu_ipc_msg_payload_register_db_done register_db_done;
|
||||||
@@ -1254,7 +1474,6 @@ union vpu_ipc_msg_payload {
|
|||||||
struct vpu_ipc_msg_payload_query_engine_hb_done query_engine_hb_done;
|
struct vpu_ipc_msg_payload_query_engine_hb_done query_engine_hb_done;
|
||||||
struct vpu_ipc_msg_payload_get_power_level_count_done get_power_level_count_done;
|
struct vpu_ipc_msg_payload_get_power_level_count_done get_power_level_count_done;
|
||||||
struct vpu_jsm_metric_streamer_done metric_streamer_done;
|
struct vpu_jsm_metric_streamer_done metric_streamer_done;
|
||||||
struct vpu_ipc_msg_payload_blob_deinit_done blob_deinit_done;
|
|
||||||
struct vpu_ipc_msg_payload_trace_config trace_config;
|
struct vpu_ipc_msg_payload_trace_config trace_config;
|
||||||
struct vpu_ipc_msg_payload_trace_capability_rsp trace_capability;
|
struct vpu_ipc_msg_payload_trace_capability_rsp trace_capability;
|
||||||
struct vpu_ipc_msg_payload_trace_get_name trace_get_name;
|
struct vpu_ipc_msg_payload_trace_get_name trace_get_name;
|
||||||
|
|||||||
@@ -6374,7 +6374,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
|
|||||||
seq_printf(m, " node %d", buffer->target_node->debug_id);
|
seq_printf(m, " node %d", buffer->target_node->debug_id);
|
||||||
seq_printf(m, " size %zd:%zd offset %lx\n",
|
seq_printf(m, " size %zd:%zd offset %lx\n",
|
||||||
buffer->data_size, buffer->offsets_size,
|
buffer->data_size, buffer->offsets_size,
|
||||||
proc->alloc.buffer - buffer->user_data);
|
buffer->user_data - proc->alloc.buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void print_binder_work_ilocked(struct seq_file *m,
|
static void print_binder_work_ilocked(struct seq_file *m,
|
||||||
|
|||||||
@@ -42,16 +42,13 @@ int module_add_driver(struct module *mod, const struct device_driver *drv)
|
|||||||
if (mod)
|
if (mod)
|
||||||
mk = &mod->mkobj;
|
mk = &mod->mkobj;
|
||||||
else if (drv->mod_name) {
|
else if (drv->mod_name) {
|
||||||
struct kobject *mkobj;
|
/* Lookup or create built-in module entry in /sys/modules */
|
||||||
|
mk = lookup_or_create_module_kobject(drv->mod_name);
|
||||||
/* Lookup built-in module entry in /sys/modules */
|
if (mk) {
|
||||||
mkobj = kset_find_obj(module_kset, drv->mod_name);
|
|
||||||
if (mkobj) {
|
|
||||||
mk = container_of(mkobj, struct module_kobject, kobj);
|
|
||||||
/* remember our module structure */
|
/* remember our module structure */
|
||||||
drv->p->mkobj = mk;
|
drv->p->mkobj = mk;
|
||||||
/* kset_find_obj took a reference */
|
/* lookup_or_create_module_kobject took a reference */
|
||||||
kobject_put(mkobj);
|
kobject_put(&mk->kobj);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -581,8 +581,10 @@ static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
|
|||||||
/* This is a debug event that comes from IML and OP image when it
|
/* This is a debug event that comes from IML and OP image when it
|
||||||
* starts execution. There is no need pass this event to stack.
|
* starts execution. There is no need pass this event to stack.
|
||||||
*/
|
*/
|
||||||
if (skb->data[2] == 0x97)
|
if (skb->data[2] == 0x97) {
|
||||||
|
hci_recv_diag(hdev, skb);
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return hci_recv_frame(hdev, skb);
|
return hci_recv_frame(hdev, skb);
|
||||||
@@ -598,7 +600,6 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
|
|||||||
u8 pkt_type;
|
u8 pkt_type;
|
||||||
u16 plen;
|
u16 plen;
|
||||||
u32 pcie_pkt_type;
|
u32 pcie_pkt_type;
|
||||||
struct sk_buff *new_skb;
|
|
||||||
void *pdata;
|
void *pdata;
|
||||||
struct hci_dev *hdev = data->hdev;
|
struct hci_dev *hdev = data->hdev;
|
||||||
|
|
||||||
@@ -675,24 +676,20 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
|
|||||||
|
|
||||||
bt_dev_dbg(hdev, "pkt_type: 0x%2.2x len: %u", pkt_type, plen);
|
bt_dev_dbg(hdev, "pkt_type: 0x%2.2x len: %u", pkt_type, plen);
|
||||||
|
|
||||||
new_skb = bt_skb_alloc(plen, GFP_ATOMIC);
|
hci_skb_pkt_type(skb) = pkt_type;
|
||||||
if (!new_skb) {
|
|
||||||
bt_dev_err(hdev, "Failed to allocate memory for skb of len: %u",
|
|
||||||
skb->len);
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto exit_error;
|
|
||||||
}
|
|
||||||
|
|
||||||
hci_skb_pkt_type(new_skb) = pkt_type;
|
|
||||||
skb_put_data(new_skb, skb->data, plen);
|
|
||||||
hdev->stat.byte_rx += plen;
|
hdev->stat.byte_rx += plen;
|
||||||
|
skb_trim(skb, plen);
|
||||||
|
|
||||||
if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT)
|
if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT)
|
||||||
ret = btintel_pcie_recv_event(hdev, new_skb);
|
ret = btintel_pcie_recv_event(hdev, skb);
|
||||||
else
|
else
|
||||||
ret = hci_recv_frame(hdev, new_skb);
|
ret = hci_recv_frame(hdev, skb);
|
||||||
|
skb = NULL; /* skb is freed in the callee */
|
||||||
|
|
||||||
exit_error:
|
exit_error:
|
||||||
|
if (skb)
|
||||||
|
kfree_skb(skb);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
hdev->stat.err_rx++;
|
hdev->stat.err_rx++;
|
||||||
|
|
||||||
@@ -706,16 +703,10 @@ static void btintel_pcie_rx_work(struct work_struct *work)
|
|||||||
struct btintel_pcie_data *data = container_of(work,
|
struct btintel_pcie_data *data = container_of(work,
|
||||||
struct btintel_pcie_data, rx_work);
|
struct btintel_pcie_data, rx_work);
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
int err;
|
|
||||||
struct hci_dev *hdev = data->hdev;
|
|
||||||
|
|
||||||
/* Process the sk_buf in queue and send to the HCI layer */
|
/* Process the sk_buf in queue and send to the HCI layer */
|
||||||
while ((skb = skb_dequeue(&data->rx_skb_q))) {
|
while ((skb = skb_dequeue(&data->rx_skb_q))) {
|
||||||
err = btintel_pcie_recv_frame(data, skb);
|
btintel_pcie_recv_frame(data, skb);
|
||||||
if (err)
|
|
||||||
bt_dev_err(hdev, "Failed to send received frame: %d",
|
|
||||||
err);
|
|
||||||
kfree_skb(skb);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -770,10 +761,8 @@ static void btintel_pcie_msix_rx_handle(struct btintel_pcie_data *data)
|
|||||||
bt_dev_dbg(hdev, "RXQ: cr_hia: %u cr_tia: %u", cr_hia, cr_tia);
|
bt_dev_dbg(hdev, "RXQ: cr_hia: %u cr_tia: %u", cr_hia, cr_tia);
|
||||||
|
|
||||||
/* Check CR_TIA and CR_HIA for change */
|
/* Check CR_TIA and CR_HIA for change */
|
||||||
if (cr_tia == cr_hia) {
|
if (cr_tia == cr_hia)
|
||||||
bt_dev_warn(hdev, "RXQ: no new CD found");
|
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
|
|
||||||
rxq = &data->rxq;
|
rxq = &data->rxq;
|
||||||
|
|
||||||
@@ -809,6 +798,16 @@ static irqreturn_t btintel_pcie_msix_isr(int irq, void *data)
|
|||||||
return IRQ_WAKE_THREAD;
|
return IRQ_WAKE_THREAD;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool btintel_pcie_is_rxq_empty(struct btintel_pcie_data *data)
|
||||||
|
{
|
||||||
|
return data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM] == data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool btintel_pcie_is_txackq_empty(struct btintel_pcie_data *data)
|
||||||
|
{
|
||||||
|
return data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] == data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
|
||||||
|
}
|
||||||
|
|
||||||
static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
|
static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
struct msix_entry *entry = dev_id;
|
struct msix_entry *entry = dev_id;
|
||||||
@@ -836,12 +835,18 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
|
|||||||
btintel_pcie_msix_gp0_handler(data);
|
btintel_pcie_msix_gp0_handler(data);
|
||||||
|
|
||||||
/* For TX */
|
/* For TX */
|
||||||
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0)
|
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
|
||||||
btintel_pcie_msix_tx_handle(data);
|
btintel_pcie_msix_tx_handle(data);
|
||||||
|
if (!btintel_pcie_is_rxq_empty(data))
|
||||||
|
btintel_pcie_msix_rx_handle(data);
|
||||||
|
}
|
||||||
|
|
||||||
/* For RX */
|
/* For RX */
|
||||||
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1)
|
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) {
|
||||||
btintel_pcie_msix_rx_handle(data);
|
btintel_pcie_msix_rx_handle(data);
|
||||||
|
if (!btintel_pcie_is_txackq_empty(data))
|
||||||
|
btintel_pcie_msix_tx_handle(data);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Before sending the interrupt the HW disables it to prevent a nested
|
* Before sending the interrupt the HW disables it to prevent a nested
|
||||||
|
|||||||
@@ -371,6 +371,42 @@ static const struct usb_device_id quirks_table[] = {
|
|||||||
/* QCA WCN785x chipset */
|
/* QCA WCN785x chipset */
|
||||||
{ USB_DEVICE(0x0cf3, 0xe700), .driver_info = BTUSB_QCA_WCN6855 |
|
{ USB_DEVICE(0x0cf3, 0xe700), .driver_info = BTUSB_QCA_WCN6855 |
|
||||||
BTUSB_WIDEBAND_SPEECH },
|
BTUSB_WIDEBAND_SPEECH },
|
||||||
|
{ USB_DEVICE(0x0489, 0xe0fc), .driver_info = BTUSB_QCA_WCN6855 |
|
||||||
|
BTUSB_WIDEBAND_SPEECH },
|
||||||
|
{ USB_DEVICE(0x0489, 0xe0f3), .driver_info = BTUSB_QCA_WCN6855 |
|
||||||
|
BTUSB_WIDEBAND_SPEECH },
|
||||||
|
{ USB_DEVICE(0x0489, 0xe100), .driver_info = BTUSB_QCA_WCN6855 |
|
||||||
|
BTUSB_WIDEBAND_SPEECH },
|
||||||
|
{ USB_DEVICE(0x0489, 0xe103), .driver_info = BTUSB_QCA_WCN6855 |
|
||||||
|
BTUSB_WIDEBAND_SPEECH },
|
||||||
|
{ USB_DEVICE(0x0489, 0xe10a), .driver_info = BTUSB_QCA_WCN6855 |
|
||||||
|
BTUSB_WIDEBAND_SPEECH },
|
||||||
|
{ USB_DEVICE(0x0489, 0xe10d), .driver_info = BTUSB_QCA_WCN6855 |
|
||||||
|
BTUSB_WIDEBAND_SPEECH },
|
||||||
|
{ USB_DEVICE(0x0489, 0xe11b), .driver_info = BTUSB_QCA_WCN6855 |
|
||||||
|
BTUSB_WIDEBAND_SPEECH },
|
||||||
|
{ USB_DEVICE(0x0489, 0xe11c), .driver_info = BTUSB_QCA_WCN6855 |
|
||||||
|
BTUSB_WIDEBAND_SPEECH },
|
||||||
|
{ USB_DEVICE(0x0489, 0xe11f), .driver_info = BTUSB_QCA_WCN6855 |
|
||||||
|
BTUSB_WIDEBAND_SPEECH },
|
||||||
|
{ USB_DEVICE(0x0489, 0xe141), .driver_info = BTUSB_QCA_WCN6855 |
|
||||||
|
BTUSB_WIDEBAND_SPEECH },
|
||||||
|
{ USB_DEVICE(0x0489, 0xe14a), .driver_info = BTUSB_QCA_WCN6855 |
|
||||||
|
BTUSB_WIDEBAND_SPEECH },
|
||||||
|
{ USB_DEVICE(0x0489, 0xe14b), .driver_info = BTUSB_QCA_WCN6855 |
|
||||||
|
BTUSB_WIDEBAND_SPEECH },
|
||||||
|
{ USB_DEVICE(0x0489, 0xe14d), .driver_info = BTUSB_QCA_WCN6855 |
|
||||||
|
BTUSB_WIDEBAND_SPEECH },
|
||||||
|
{ USB_DEVICE(0x13d3, 0x3623), .driver_info = BTUSB_QCA_WCN6855 |
|
||||||
|
BTUSB_WIDEBAND_SPEECH },
|
||||||
|
{ USB_DEVICE(0x13d3, 0x3624), .driver_info = BTUSB_QCA_WCN6855 |
|
||||||
|
BTUSB_WIDEBAND_SPEECH },
|
||||||
|
{ USB_DEVICE(0x2c7c, 0x0130), .driver_info = BTUSB_QCA_WCN6855 |
|
||||||
|
BTUSB_WIDEBAND_SPEECH },
|
||||||
|
{ USB_DEVICE(0x2c7c, 0x0131), .driver_info = BTUSB_QCA_WCN6855 |
|
||||||
|
BTUSB_WIDEBAND_SPEECH },
|
||||||
|
{ USB_DEVICE(0x2c7c, 0x0132), .driver_info = BTUSB_QCA_WCN6855 |
|
||||||
|
BTUSB_WIDEBAND_SPEECH },
|
||||||
|
|
||||||
/* Broadcom BCM2035 */
|
/* Broadcom BCM2035 */
|
||||||
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
|
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
|
||||||
@@ -2939,22 +2975,16 @@ static void btusb_coredump_qca(struct hci_dev *hdev)
|
|||||||
bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
|
bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/* Return: 0 on success, negative errno on failure. */
|
||||||
* ==0: not a dump pkt.
|
|
||||||
* < 0: fails to handle a dump pkt
|
|
||||||
* > 0: otherwise.
|
|
||||||
*/
|
|
||||||
static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
int ret = 1;
|
int ret = 0;
|
||||||
u8 pkt_type;
|
u8 pkt_type;
|
||||||
u8 *sk_ptr;
|
u8 *sk_ptr;
|
||||||
unsigned int sk_len;
|
unsigned int sk_len;
|
||||||
u16 seqno;
|
u16 seqno;
|
||||||
u32 dump_size;
|
u32 dump_size;
|
||||||
|
|
||||||
struct hci_event_hdr *event_hdr;
|
|
||||||
struct hci_acl_hdr *acl_hdr;
|
|
||||||
struct qca_dump_hdr *dump_hdr;
|
struct qca_dump_hdr *dump_hdr;
|
||||||
struct btusb_data *btdata = hci_get_drvdata(hdev);
|
struct btusb_data *btdata = hci_get_drvdata(hdev);
|
||||||
struct usb_device *udev = btdata->udev;
|
struct usb_device *udev = btdata->udev;
|
||||||
@@ -2964,30 +2994,14 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
|||||||
sk_len = skb->len;
|
sk_len = skb->len;
|
||||||
|
|
||||||
if (pkt_type == HCI_ACLDATA_PKT) {
|
if (pkt_type == HCI_ACLDATA_PKT) {
|
||||||
acl_hdr = hci_acl_hdr(skb);
|
|
||||||
if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
|
|
||||||
return 0;
|
|
||||||
sk_ptr += HCI_ACL_HDR_SIZE;
|
sk_ptr += HCI_ACL_HDR_SIZE;
|
||||||
sk_len -= HCI_ACL_HDR_SIZE;
|
sk_len -= HCI_ACL_HDR_SIZE;
|
||||||
event_hdr = (struct hci_event_hdr *)sk_ptr;
|
|
||||||
} else {
|
|
||||||
event_hdr = hci_event_hdr(skb);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((event_hdr->evt != HCI_VENDOR_PKT)
|
|
||||||
|| (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
sk_ptr += HCI_EVENT_HDR_SIZE;
|
sk_ptr += HCI_EVENT_HDR_SIZE;
|
||||||
sk_len -= HCI_EVENT_HDR_SIZE;
|
sk_len -= HCI_EVENT_HDR_SIZE;
|
||||||
|
|
||||||
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
|
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
|
||||||
if ((sk_len < offsetof(struct qca_dump_hdr, data))
|
|
||||||
|| (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS)
|
|
||||||
|| (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/*it is dump pkt now*/
|
|
||||||
seqno = le16_to_cpu(dump_hdr->seqno);
|
seqno = le16_to_cpu(dump_hdr->seqno);
|
||||||
if (seqno == 0) {
|
if (seqno == 0) {
|
||||||
set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags);
|
set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags);
|
||||||
@@ -3061,17 +3075,84 @@ out:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Return: true if the ACL packet is a dump packet, false otherwise. */
|
||||||
|
static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
u8 *sk_ptr;
|
||||||
|
unsigned int sk_len;
|
||||||
|
|
||||||
|
struct hci_event_hdr *event_hdr;
|
||||||
|
struct hci_acl_hdr *acl_hdr;
|
||||||
|
struct qca_dump_hdr *dump_hdr;
|
||||||
|
|
||||||
|
sk_ptr = skb->data;
|
||||||
|
sk_len = skb->len;
|
||||||
|
|
||||||
|
acl_hdr = hci_acl_hdr(skb);
|
||||||
|
if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
sk_ptr += HCI_ACL_HDR_SIZE;
|
||||||
|
sk_len -= HCI_ACL_HDR_SIZE;
|
||||||
|
event_hdr = (struct hci_event_hdr *)sk_ptr;
|
||||||
|
|
||||||
|
if ((event_hdr->evt != HCI_VENDOR_PKT) ||
|
||||||
|
(event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
sk_ptr += HCI_EVENT_HDR_SIZE;
|
||||||
|
sk_len -= HCI_EVENT_HDR_SIZE;
|
||||||
|
|
||||||
|
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
|
||||||
|
if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
|
||||||
|
(dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
|
||||||
|
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Return: true if the event packet is a dump packet, false otherwise. */
|
||||||
|
static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
u8 *sk_ptr;
|
||||||
|
unsigned int sk_len;
|
||||||
|
|
||||||
|
struct hci_event_hdr *event_hdr;
|
||||||
|
struct qca_dump_hdr *dump_hdr;
|
||||||
|
|
||||||
|
sk_ptr = skb->data;
|
||||||
|
sk_len = skb->len;
|
||||||
|
|
||||||
|
event_hdr = hci_event_hdr(skb);
|
||||||
|
|
||||||
|
if ((event_hdr->evt != HCI_VENDOR_PKT)
|
||||||
|
|| (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
sk_ptr += HCI_EVENT_HDR_SIZE;
|
||||||
|
sk_len -= HCI_EVENT_HDR_SIZE;
|
||||||
|
|
||||||
|
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
|
||||||
|
if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
|
||||||
|
(dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
|
||||||
|
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
if (handle_dump_pkt_qca(hdev, skb))
|
if (acl_pkt_is_dump_qca(hdev, skb))
|
||||||
return 0;
|
return handle_dump_pkt_qca(hdev, skb);
|
||||||
return hci_recv_frame(hdev, skb);
|
return hci_recv_frame(hdev, skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
if (handle_dump_pkt_qca(hdev, skb))
|
if (evt_pkt_is_dump_qca(hdev, skb))
|
||||||
return 0;
|
return handle_dump_pkt_qca(hdev, skb);
|
||||||
return hci_recv_frame(hdev, skb);
|
return hci_recv_frame(hdev, skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -534,16 +534,18 @@ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
|
|||||||
EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
|
EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
|
||||||
|
|
||||||
static unsigned int __resolve_freq(struct cpufreq_policy *policy,
|
static unsigned int __resolve_freq(struct cpufreq_policy *policy,
|
||||||
unsigned int target_freq, unsigned int relation)
|
unsigned int target_freq,
|
||||||
|
unsigned int min, unsigned int max,
|
||||||
|
unsigned int relation)
|
||||||
{
|
{
|
||||||
unsigned int idx;
|
unsigned int idx;
|
||||||
|
|
||||||
target_freq = clamp_val(target_freq, policy->min, policy->max);
|
target_freq = clamp_val(target_freq, min, max);
|
||||||
|
|
||||||
if (!policy->freq_table)
|
if (!policy->freq_table)
|
||||||
return target_freq;
|
return target_freq;
|
||||||
|
|
||||||
idx = cpufreq_frequency_table_target(policy, target_freq, relation);
|
idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation);
|
||||||
policy->cached_resolved_idx = idx;
|
policy->cached_resolved_idx = idx;
|
||||||
policy->cached_target_freq = target_freq;
|
policy->cached_target_freq = target_freq;
|
||||||
return policy->freq_table[idx].frequency;
|
return policy->freq_table[idx].frequency;
|
||||||
@@ -563,7 +565,21 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
|
|||||||
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
|
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
|
||||||
unsigned int target_freq)
|
unsigned int target_freq)
|
||||||
{
|
{
|
||||||
return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
|
unsigned int min = READ_ONCE(policy->min);
|
||||||
|
unsigned int max = READ_ONCE(policy->max);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If this function runs in parallel with cpufreq_set_policy(), it may
|
||||||
|
* read policy->min before the update and policy->max after the update
|
||||||
|
* or the other way around, so there is no ordering guarantee.
|
||||||
|
*
|
||||||
|
* Resolve this by always honoring the max (in case it comes from
|
||||||
|
* thermal throttling or similar).
|
||||||
|
*/
|
||||||
|
if (unlikely(min > max))
|
||||||
|
min = max;
|
||||||
|
|
||||||
|
return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
|
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
|
||||||
|
|
||||||
@@ -2323,7 +2339,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
|
|||||||
if (cpufreq_disabled())
|
if (cpufreq_disabled())
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
target_freq = __resolve_freq(policy, target_freq, relation);
|
target_freq = __resolve_freq(policy, target_freq, policy->min,
|
||||||
|
policy->max, relation);
|
||||||
|
|
||||||
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
|
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
|
||||||
policy->cpu, target_freq, relation, old_target_freq);
|
policy->cpu, target_freq, relation, old_target_freq);
|
||||||
@@ -2647,11 +2664,18 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
|
|||||||
* Resolve policy min/max to available frequencies. It ensures
|
* Resolve policy min/max to available frequencies. It ensures
|
||||||
* no frequency resolution will neither overshoot the requested maximum
|
* no frequency resolution will neither overshoot the requested maximum
|
||||||
* nor undershoot the requested minimum.
|
* nor undershoot the requested minimum.
|
||||||
|
*
|
||||||
|
* Avoid storing intermediate values in policy->max or policy->min and
|
||||||
|
* compiler optimizations around them because they may be accessed
|
||||||
|
* concurrently by cpufreq_driver_resolve_freq() during the update.
|
||||||
*/
|
*/
|
||||||
policy->min = new_data.min;
|
WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max,
|
||||||
policy->max = new_data.max;
|
new_data.min, new_data.max,
|
||||||
policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
|
CPUFREQ_RELATION_H));
|
||||||
policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
|
new_data.min = __resolve_freq(policy, new_data.min, new_data.min,
|
||||||
|
new_data.max, CPUFREQ_RELATION_L);
|
||||||
|
WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min);
|
||||||
|
|
||||||
trace_cpu_frequency_limits(policy);
|
trace_cpu_frequency_limits(policy);
|
||||||
|
|
||||||
cpufreq_update_pressure(policy);
|
cpufreq_update_pressure(policy);
|
||||||
|
|||||||
@@ -76,7 +76,8 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
|
|||||||
return freq_next;
|
return freq_next;
|
||||||
}
|
}
|
||||||
|
|
||||||
index = cpufreq_frequency_table_target(policy, freq_next, relation);
|
index = cpufreq_frequency_table_target(policy, freq_next, policy->min,
|
||||||
|
policy->max, relation);
|
||||||
freq_req = freq_table[index].frequency;
|
freq_req = freq_table[index].frequency;
|
||||||
freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
|
freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
|
||||||
freq_avg = freq_req - freq_reduc;
|
freq_avg = freq_req - freq_reduc;
|
||||||
|
|||||||
@@ -116,8 +116,8 @@ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
|
|||||||
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
|
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
|
||||||
|
|
||||||
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
|
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
|
||||||
unsigned int target_freq,
|
unsigned int target_freq, unsigned int min,
|
||||||
unsigned int relation)
|
unsigned int max, unsigned int relation)
|
||||||
{
|
{
|
||||||
struct cpufreq_frequency_table optimal = {
|
struct cpufreq_frequency_table optimal = {
|
||||||
.driver_data = ~0,
|
.driver_data = ~0,
|
||||||
@@ -148,7 +148,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
|
|||||||
cpufreq_for_each_valid_entry_idx(pos, table, i) {
|
cpufreq_for_each_valid_entry_idx(pos, table, i) {
|
||||||
freq = pos->frequency;
|
freq = pos->frequency;
|
||||||
|
|
||||||
if ((freq < policy->min) || (freq > policy->max))
|
if (freq < min || freq > max)
|
||||||
continue;
|
continue;
|
||||||
if (freq == target_freq) {
|
if (freq == target_freq) {
|
||||||
optimal.driver_data = i;
|
optimal.driver_data = i;
|
||||||
|
|||||||
@@ -600,6 +600,9 @@ static bool turbo_is_disabled(void)
|
|||||||
{
|
{
|
||||||
u64 misc_en;
|
u64 misc_en;
|
||||||
|
|
||||||
|
if (!cpu_feature_enabled(X86_FEATURE_IDA))
|
||||||
|
return true;
|
||||||
|
|
||||||
rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
|
rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
|
||||||
|
|
||||||
return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
|
return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
|
||||||
|
|||||||
@@ -99,7 +99,7 @@ static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id)
|
|||||||
if (status & priv->ecc_stat_ce_mask) {
|
if (status & priv->ecc_stat_ce_mask) {
|
||||||
regmap_read(drvdata->mc_vbase, priv->ecc_saddr_offset,
|
regmap_read(drvdata->mc_vbase, priv->ecc_saddr_offset,
|
||||||
&err_addr);
|
&err_addr);
|
||||||
if (priv->ecc_uecnt_offset)
|
if (priv->ecc_cecnt_offset)
|
||||||
regmap_read(drvdata->mc_vbase, priv->ecc_cecnt_offset,
|
regmap_read(drvdata->mc_vbase, priv->ecc_cecnt_offset,
|
||||||
&err_count);
|
&err_count);
|
||||||
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count,
|
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count,
|
||||||
@@ -1005,9 +1005,6 @@ altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Interrupt mode set to every SBERR */
|
|
||||||
regmap_write(ecc_mgr_map, ALTR_A10_ECC_INTMODE_OFST,
|
|
||||||
ALTR_A10_ECC_INTMODE);
|
|
||||||
/* Enable ECC */
|
/* Enable ECC */
|
||||||
ecc_set_bits(ecc_ctrl_en_mask, (ecc_block_base +
|
ecc_set_bits(ecc_ctrl_en_mask, (ecc_block_base +
|
||||||
ALTR_A10_ECC_CTRL_OFST));
|
ALTR_A10_ECC_CTRL_OFST));
|
||||||
@@ -2127,6 +2124,10 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
|
|||||||
return PTR_ERR(edac->ecc_mgr_map);
|
return PTR_ERR(edac->ecc_mgr_map);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Set irq mask for DDR SBE to avoid any pending irq before registration */
|
||||||
|
regmap_write(edac->ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_SET_OFST,
|
||||||
|
(A10_SYSMGR_ECC_INTMASK_SDMMCB | A10_SYSMGR_ECC_INTMASK_DDR0));
|
||||||
|
|
||||||
edac->irq_chip.name = pdev->dev.of_node->name;
|
edac->irq_chip.name = pdev->dev.of_node->name;
|
||||||
edac->irq_chip.irq_mask = a10_eccmgr_irq_mask;
|
edac->irq_chip.irq_mask = a10_eccmgr_irq_mask;
|
||||||
edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask;
|
edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask;
|
||||||
|
|||||||
@@ -249,6 +249,8 @@ struct altr_sdram_mc_data {
|
|||||||
#define A10_SYSMGR_ECC_INTMASK_SET_OFST 0x94
|
#define A10_SYSMGR_ECC_INTMASK_SET_OFST 0x94
|
||||||
#define A10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98
|
#define A10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98
|
||||||
#define A10_SYSMGR_ECC_INTMASK_OCRAM BIT(1)
|
#define A10_SYSMGR_ECC_INTMASK_OCRAM BIT(1)
|
||||||
|
#define A10_SYSMGR_ECC_INTMASK_SDMMCB BIT(16)
|
||||||
|
#define A10_SYSMGR_ECC_INTMASK_DDR0 BIT(17)
|
||||||
|
|
||||||
#define A10_SYSMGR_ECC_INTSTAT_SERR_OFST 0x9C
|
#define A10_SYSMGR_ECC_INTSTAT_SERR_OFST 0x9C
|
||||||
#define A10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0
|
#define A10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0
|
||||||
|
|||||||
@@ -280,7 +280,8 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
|
|||||||
memcpy(buffer + idx, drv_info->rx_buffer + idx * sz,
|
memcpy(buffer + idx, drv_info->rx_buffer + idx * sz,
|
||||||
buf_sz);
|
buf_sz);
|
||||||
|
|
||||||
ffa_rx_release();
|
if (!(flags & PARTITION_INFO_GET_RETURN_COUNT_ONLY))
|
||||||
|
ffa_rx_release();
|
||||||
|
|
||||||
mutex_unlock(&drv_info->rx_lock);
|
mutex_unlock(&drv_info->rx_lock);
|
||||||
|
|
||||||
|
|||||||
@@ -260,6 +260,9 @@ static struct scmi_device *scmi_child_dev_find(struct device *parent,
|
|||||||
if (!dev)
|
if (!dev)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
/* Drop the refcnt bumped implicitly by device_find_child */
|
||||||
|
put_device(dev);
|
||||||
|
|
||||||
return to_scmi_dev(dev);
|
return to_scmi_dev(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -185,7 +185,7 @@ config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
|
|||||||
bool "Enable refcount backtrace history in the DP MST helpers"
|
bool "Enable refcount backtrace history in the DP MST helpers"
|
||||||
depends on STACKTRACE_SUPPORT
|
depends on STACKTRACE_SUPPORT
|
||||||
select STACKDEPOT
|
select STACKDEPOT
|
||||||
depends on DRM_KMS_HELPER
|
select DRM_KMS_HELPER
|
||||||
depends on DEBUG_KERNEL
|
depends on DEBUG_KERNEL
|
||||||
depends on EXPERT
|
depends on EXPERT
|
||||||
help
|
help
|
||||||
|
|||||||
@@ -361,7 +361,7 @@ static void nbio_v7_11_get_clockgating_state(struct amdgpu_device *adev,
|
|||||||
*flags |= AMD_CG_SUPPORT_BIF_LS;
|
*flags |= AMD_CG_SUPPORT_BIF_LS;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
|
#define MMIO_REG_HOLE_OFFSET 0x44000
|
||||||
|
|
||||||
static void nbio_v7_11_set_reg_remap(struct amdgpu_device *adev)
|
static void nbio_v7_11_set_reg_remap(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1887,26 +1887,6 @@ static enum dmub_ips_disable_type dm_get_default_ips_mode(
|
|||||||
|
|
||||||
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
|
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
|
||||||
case IP_VERSION(3, 5, 0):
|
case IP_VERSION(3, 5, 0):
|
||||||
/*
|
|
||||||
* On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to
|
|
||||||
* cause a hard hang. A fix exists for newer PMFW.
|
|
||||||
*
|
|
||||||
* As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest
|
|
||||||
* IPS state in all cases, except for s0ix and all displays off (DPMS),
|
|
||||||
* where IPS2 is allowed.
|
|
||||||
*
|
|
||||||
* When checking pmfw version, use the major and minor only.
|
|
||||||
*/
|
|
||||||
if ((adev->pm.fw_version & 0x00FFFF00) < 0x005D6300)
|
|
||||||
ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
|
|
||||||
else if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(11, 5, 0))
|
|
||||||
/*
|
|
||||||
* Other ASICs with DCN35 that have residency issues with
|
|
||||||
* IPS2 in idle.
|
|
||||||
* We want them to use IPS2 only in display off cases.
|
|
||||||
*/
|
|
||||||
ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
|
|
||||||
break;
|
|
||||||
case IP_VERSION(3, 5, 1):
|
case IP_VERSION(3, 5, 1):
|
||||||
ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
|
ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -172,7 +172,10 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
|
|||||||
struct mod_hdcp_display_adjustment display_adjust;
|
struct mod_hdcp_display_adjustment display_adjust;
|
||||||
unsigned int conn_index = aconnector->base.index;
|
unsigned int conn_index = aconnector->base.index;
|
||||||
|
|
||||||
mutex_lock(&hdcp_w->mutex);
|
guard(mutex)(&hdcp_w->mutex);
|
||||||
|
drm_connector_get(&aconnector->base);
|
||||||
|
if (hdcp_w->aconnector[conn_index])
|
||||||
|
drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
|
||||||
hdcp_w->aconnector[conn_index] = aconnector;
|
hdcp_w->aconnector[conn_index] = aconnector;
|
||||||
|
|
||||||
memset(&link_adjust, 0, sizeof(link_adjust));
|
memset(&link_adjust, 0, sizeof(link_adjust));
|
||||||
@@ -209,7 +212,6 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
|
|||||||
mod_hdcp_update_display(&hdcp_w->hdcp, conn_index, &link_adjust, &display_adjust, &hdcp_w->output);
|
mod_hdcp_update_display(&hdcp_w->hdcp, conn_index, &link_adjust, &display_adjust, &hdcp_w->output);
|
||||||
|
|
||||||
process_output(hdcp_w);
|
process_output(hdcp_w);
|
||||||
mutex_unlock(&hdcp_w->mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
|
static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
|
||||||
@@ -220,8 +222,7 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
|
|||||||
struct drm_connector_state *conn_state = aconnector->base.state;
|
struct drm_connector_state *conn_state = aconnector->base.state;
|
||||||
unsigned int conn_index = aconnector->base.index;
|
unsigned int conn_index = aconnector->base.index;
|
||||||
|
|
||||||
mutex_lock(&hdcp_w->mutex);
|
guard(mutex)(&hdcp_w->mutex);
|
||||||
hdcp_w->aconnector[conn_index] = aconnector;
|
|
||||||
|
|
||||||
/* the removal of display will invoke auth reset -> hdcp destroy and
|
/* the removal of display will invoke auth reset -> hdcp destroy and
|
||||||
* we'd expect the Content Protection (CP) property changed back to
|
* we'd expect the Content Protection (CP) property changed back to
|
||||||
@@ -237,9 +238,11 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
|
|||||||
}
|
}
|
||||||
|
|
||||||
mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
|
mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
|
||||||
|
if (hdcp_w->aconnector[conn_index]) {
|
||||||
|
drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
|
||||||
|
hdcp_w->aconnector[conn_index] = NULL;
|
||||||
|
}
|
||||||
process_output(hdcp_w);
|
process_output(hdcp_w);
|
||||||
mutex_unlock(&hdcp_w->mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
|
void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
|
||||||
@@ -247,7 +250,7 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde
|
|||||||
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
|
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
|
||||||
unsigned int conn_index;
|
unsigned int conn_index;
|
||||||
|
|
||||||
mutex_lock(&hdcp_w->mutex);
|
guard(mutex)(&hdcp_w->mutex);
|
||||||
|
|
||||||
mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output);
|
mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output);
|
||||||
|
|
||||||
@@ -256,11 +259,13 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde
|
|||||||
for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) {
|
for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) {
|
||||||
hdcp_w->encryption_status[conn_index] =
|
hdcp_w->encryption_status[conn_index] =
|
||||||
MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
|
MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
|
||||||
|
if (hdcp_w->aconnector[conn_index]) {
|
||||||
|
drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
|
||||||
|
hdcp_w->aconnector[conn_index] = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
process_output(hdcp_w);
|
process_output(hdcp_w);
|
||||||
|
|
||||||
mutex_unlock(&hdcp_w->mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
|
void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
|
||||||
@@ -277,7 +282,7 @@ static void event_callback(struct work_struct *work)
|
|||||||
hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue,
|
hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue,
|
||||||
callback_dwork);
|
callback_dwork);
|
||||||
|
|
||||||
mutex_lock(&hdcp_work->mutex);
|
guard(mutex)(&hdcp_work->mutex);
|
||||||
|
|
||||||
cancel_delayed_work(&hdcp_work->callback_dwork);
|
cancel_delayed_work(&hdcp_work->callback_dwork);
|
||||||
|
|
||||||
@@ -285,8 +290,6 @@ static void event_callback(struct work_struct *work)
|
|||||||
&hdcp_work->output);
|
&hdcp_work->output);
|
||||||
|
|
||||||
process_output(hdcp_work);
|
process_output(hdcp_work);
|
||||||
|
|
||||||
mutex_unlock(&hdcp_work->mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void event_property_update(struct work_struct *work)
|
static void event_property_update(struct work_struct *work)
|
||||||
@@ -323,7 +326,7 @@ static void event_property_update(struct work_struct *work)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
|
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
|
||||||
mutex_lock(&hdcp_work->mutex);
|
guard(mutex)(&hdcp_work->mutex);
|
||||||
|
|
||||||
if (conn_state->commit) {
|
if (conn_state->commit) {
|
||||||
ret = wait_for_completion_interruptible_timeout(&conn_state->commit->hw_done,
|
ret = wait_for_completion_interruptible_timeout(&conn_state->commit->hw_done,
|
||||||
@@ -355,7 +358,6 @@ static void event_property_update(struct work_struct *work)
|
|||||||
drm_hdcp_update_content_protection(connector,
|
drm_hdcp_update_content_protection(connector,
|
||||||
DRM_MODE_CONTENT_PROTECTION_DESIRED);
|
DRM_MODE_CONTENT_PROTECTION_DESIRED);
|
||||||
}
|
}
|
||||||
mutex_unlock(&hdcp_work->mutex);
|
|
||||||
drm_modeset_unlock(&dev->mode_config.connection_mutex);
|
drm_modeset_unlock(&dev->mode_config.connection_mutex);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -368,7 +370,7 @@ static void event_property_validate(struct work_struct *work)
|
|||||||
struct amdgpu_dm_connector *aconnector;
|
struct amdgpu_dm_connector *aconnector;
|
||||||
unsigned int conn_index;
|
unsigned int conn_index;
|
||||||
|
|
||||||
mutex_lock(&hdcp_work->mutex);
|
guard(mutex)(&hdcp_work->mutex);
|
||||||
|
|
||||||
for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX;
|
for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX;
|
||||||
conn_index++) {
|
conn_index++) {
|
||||||
@@ -408,8 +410,6 @@ static void event_property_validate(struct work_struct *work)
|
|||||||
schedule_work(&hdcp_work->property_update_work);
|
schedule_work(&hdcp_work->property_update_work);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&hdcp_work->mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void event_watchdog_timer(struct work_struct *work)
|
static void event_watchdog_timer(struct work_struct *work)
|
||||||
@@ -420,7 +420,7 @@ static void event_watchdog_timer(struct work_struct *work)
|
|||||||
struct hdcp_workqueue,
|
struct hdcp_workqueue,
|
||||||
watchdog_timer_dwork);
|
watchdog_timer_dwork);
|
||||||
|
|
||||||
mutex_lock(&hdcp_work->mutex);
|
guard(mutex)(&hdcp_work->mutex);
|
||||||
|
|
||||||
cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
|
cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
|
||||||
|
|
||||||
@@ -429,8 +429,6 @@ static void event_watchdog_timer(struct work_struct *work)
|
|||||||
&hdcp_work->output);
|
&hdcp_work->output);
|
||||||
|
|
||||||
process_output(hdcp_work);
|
process_output(hdcp_work);
|
||||||
|
|
||||||
mutex_unlock(&hdcp_work->mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void event_cpirq(struct work_struct *work)
|
static void event_cpirq(struct work_struct *work)
|
||||||
@@ -439,13 +437,11 @@ static void event_cpirq(struct work_struct *work)
|
|||||||
|
|
||||||
hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work);
|
hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work);
|
||||||
|
|
||||||
mutex_lock(&hdcp_work->mutex);
|
guard(mutex)(&hdcp_work->mutex);
|
||||||
|
|
||||||
mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output);
|
mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output);
|
||||||
|
|
||||||
process_output(hdcp_work);
|
process_output(hdcp_work);
|
||||||
|
|
||||||
mutex_unlock(&hdcp_work->mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
|
void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
|
||||||
@@ -479,7 +475,7 @@ static bool enable_assr(void *handle, struct dc_link *link)
|
|||||||
|
|
||||||
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
|
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
|
||||||
|
|
||||||
mutex_lock(&psp->dtm_context.mutex);
|
guard(mutex)(&psp->dtm_context.mutex);
|
||||||
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
|
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
|
||||||
|
|
||||||
dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE;
|
dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE;
|
||||||
@@ -494,8 +490,6 @@ static bool enable_assr(void *handle, struct dc_link *link)
|
|||||||
res = false;
|
res = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&psp->dtm_context.mutex);
|
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -504,6 +498,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
|
|||||||
struct hdcp_workqueue *hdcp_work = handle;
|
struct hdcp_workqueue *hdcp_work = handle;
|
||||||
struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx;
|
struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx;
|
||||||
int link_index = aconnector->dc_link->link_index;
|
int link_index = aconnector->dc_link->link_index;
|
||||||
|
unsigned int conn_index = aconnector->base.index;
|
||||||
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
|
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
|
||||||
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
|
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
|
||||||
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
|
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
|
||||||
@@ -557,13 +552,14 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
|
|||||||
(!!aconnector->base.state) ?
|
(!!aconnector->base.state) ?
|
||||||
aconnector->base.state->hdcp_content_type : -1);
|
aconnector->base.state->hdcp_content_type : -1);
|
||||||
|
|
||||||
mutex_lock(&hdcp_w->mutex);
|
guard(mutex)(&hdcp_w->mutex);
|
||||||
|
|
||||||
mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
|
mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
|
||||||
|
drm_connector_get(&aconnector->base);
|
||||||
|
if (hdcp_w->aconnector[conn_index])
|
||||||
|
drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
|
||||||
|
hdcp_w->aconnector[conn_index] = aconnector;
|
||||||
process_output(hdcp_w);
|
process_output(hdcp_w);
|
||||||
mutex_unlock(&hdcp_w->mutex);
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -938,6 +938,10 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f)
|
|||||||
struct drm_file *file = f->private_data;
|
struct drm_file *file = f->private_data;
|
||||||
struct drm_device *dev = file->minor->dev;
|
struct drm_device *dev = file->minor->dev;
|
||||||
struct drm_printer p = drm_seq_file_printer(m);
|
struct drm_printer p = drm_seq_file_printer(m);
|
||||||
|
int idx;
|
||||||
|
|
||||||
|
if (!drm_dev_enter(dev, &idx))
|
||||||
|
return;
|
||||||
|
|
||||||
drm_printf(&p, "drm-driver:\t%s\n", dev->driver->name);
|
drm_printf(&p, "drm-driver:\t%s\n", dev->driver->name);
|
||||||
drm_printf(&p, "drm-client-id:\t%llu\n", file->client_id);
|
drm_printf(&p, "drm-client-id:\t%llu\n", file->client_id);
|
||||||
@@ -952,6 +956,8 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f)
|
|||||||
|
|
||||||
if (dev->driver->show_fdinfo)
|
if (dev->driver->show_fdinfo)
|
||||||
dev->driver->show_fdinfo(&p, file);
|
dev->driver->show_fdinfo(&p, file);
|
||||||
|
|
||||||
|
drm_dev_exit(idx);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_show_fdinfo);
|
EXPORT_SYMBOL(drm_show_fdinfo);
|
||||||
|
|
||||||
|
|||||||
@@ -404,12 +404,16 @@ static void mipi_dbi_blank(struct mipi_dbi_dev *dbidev)
|
|||||||
u16 height = drm->mode_config.min_height;
|
u16 height = drm->mode_config.min_height;
|
||||||
u16 width = drm->mode_config.min_width;
|
u16 width = drm->mode_config.min_width;
|
||||||
struct mipi_dbi *dbi = &dbidev->dbi;
|
struct mipi_dbi *dbi = &dbidev->dbi;
|
||||||
size_t len = width * height * 2;
|
const struct drm_format_info *dst_format;
|
||||||
|
size_t len;
|
||||||
int idx;
|
int idx;
|
||||||
|
|
||||||
if (!drm_dev_enter(drm, &idx))
|
if (!drm_dev_enter(drm, &idx))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
dst_format = drm_format_info(dbidev->pixel_format);
|
||||||
|
len = drm_format_info_min_pitch(dst_format, 0, width) * height;
|
||||||
|
|
||||||
memset(dbidev->tx_buf, 0, len);
|
memset(dbidev->tx_buf, 0, len);
|
||||||
|
|
||||||
mipi_dbi_set_window_address(dbidev, 0, width - 1, 0, height - 1);
|
mipi_dbi_set_window_address(dbidev, 0, width - 1, 0, height - 1);
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ int intel_pxp_gsccs_init(struct intel_pxp *pxp);
|
|||||||
|
|
||||||
int intel_pxp_gsccs_create_session(struct intel_pxp *pxp, int arb_session_id);
|
int intel_pxp_gsccs_create_session(struct intel_pxp *pxp, int arb_session_id);
|
||||||
void intel_pxp_gsccs_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id);
|
void intel_pxp_gsccs_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id);
|
||||||
|
bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
static inline void intel_pxp_gsccs_fini(struct intel_pxp *pxp)
|
static inline void intel_pxp_gsccs_fini(struct intel_pxp *pxp)
|
||||||
@@ -34,8 +35,11 @@ static inline int intel_pxp_gsccs_init(struct intel_pxp *pxp)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp);
|
|
||||||
|
|
||||||
#endif /*__INTEL_PXP_GSCCS_H__ */
|
#endif /*__INTEL_PXP_GSCCS_H__ */
|
||||||
|
|||||||
@@ -790,13 +790,13 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
|
|||||||
FREQ_1000_1001(params[i].pixel_freq));
|
FREQ_1000_1001(params[i].pixel_freq));
|
||||||
DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n",
|
DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n",
|
||||||
i, params[i].phy_freq,
|
i, params[i].phy_freq,
|
||||||
FREQ_1000_1001(params[i].phy_freq/1000)*1000);
|
FREQ_1000_1001(params[i].phy_freq/10)*10);
|
||||||
/* Match strict frequency */
|
/* Match strict frequency */
|
||||||
if (phy_freq == params[i].phy_freq &&
|
if (phy_freq == params[i].phy_freq &&
|
||||||
vclk_freq == params[i].vclk_freq)
|
vclk_freq == params[i].vclk_freq)
|
||||||
return MODE_OK;
|
return MODE_OK;
|
||||||
/* Match 1000/1001 variant */
|
/* Match 1000/1001 variant */
|
||||||
if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/1000)*1000) &&
|
if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/10)*10) &&
|
||||||
vclk_freq == FREQ_1000_1001(params[i].vclk_freq))
|
vclk_freq == FREQ_1000_1001(params[i].vclk_freq))
|
||||||
return MODE_OK;
|
return MODE_OK;
|
||||||
}
|
}
|
||||||
@@ -1070,7 +1070,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
|
|||||||
|
|
||||||
for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
|
for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
|
||||||
if ((phy_freq == params[freq].phy_freq ||
|
if ((phy_freq == params[freq].phy_freq ||
|
||||||
phy_freq == FREQ_1000_1001(params[freq].phy_freq/1000)*1000) &&
|
phy_freq == FREQ_1000_1001(params[freq].phy_freq/10)*10) &&
|
||||||
(vclk_freq == params[freq].vclk_freq ||
|
(vclk_freq == params[freq].vclk_freq ||
|
||||||
vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) {
|
vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) {
|
||||||
if (vclk_freq != params[freq].vclk_freq)
|
if (vclk_freq != params[freq].vclk_freq)
|
||||||
|
|||||||
@@ -90,7 +90,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
|
|||||||
while (!list_empty(&fctx->pending)) {
|
while (!list_empty(&fctx->pending)) {
|
||||||
fence = list_entry(fctx->pending.next, typeof(*fence), head);
|
fence = list_entry(fctx->pending.next, typeof(*fence), head);
|
||||||
|
|
||||||
if (error)
|
if (error && !dma_fence_is_signaled_locked(&fence->base))
|
||||||
dma_fence_set_error(&fence->base, error);
|
dma_fence_set_error(&fence->base, error);
|
||||||
|
|
||||||
if (nouveau_fence_signal(fence))
|
if (nouveau_fence_signal(fence))
|
||||||
|
|||||||
@@ -216,6 +216,9 @@ static void drm_gem_shmem_test_get_pages_sgt(struct kunit *test)
|
|||||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
|
||||||
KUNIT_EXPECT_NULL(test, shmem->sgt);
|
KUNIT_EXPECT_NULL(test, shmem->sgt);
|
||||||
|
|
||||||
|
ret = kunit_add_action_or_reset(test, kfree_wrapper, sgt);
|
||||||
|
KUNIT_ASSERT_EQ(test, ret, 0);
|
||||||
|
|
||||||
ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
|
ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
|
||||||
KUNIT_ASSERT_EQ(test, ret, 0);
|
KUNIT_ASSERT_EQ(test, ret, 0);
|
||||||
|
|
||||||
|
|||||||
@@ -381,12 +381,6 @@ xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
|
|||||||
blit_cctl_val,
|
blit_cctl_val,
|
||||||
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
|
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
|
||||||
},
|
},
|
||||||
/* Use Fixed slice CCS mode */
|
|
||||||
{ XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
|
|
||||||
XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
|
|
||||||
XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
|
|
||||||
RCU_MODE_FIXED_SLICE_CCS_MODE))
|
|
||||||
},
|
|
||||||
/* Disable WMTP if HW doesn't support it */
|
/* Disable WMTP if HW doesn't support it */
|
||||||
{ XE_RTP_NAME("DISABLE_WMTP_ON_UNSUPPORTED_HW"),
|
{ XE_RTP_NAME("DISABLE_WMTP_ON_UNSUPPORTED_HW"),
|
||||||
XE_RTP_RULES(FUNC(xe_rtp_cfeg_wmtp_disabled)),
|
XE_RTP_RULES(FUNC(xe_rtp_cfeg_wmtp_disabled)),
|
||||||
@@ -454,6 +448,12 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe)
|
|||||||
XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), CS_PRIORITY_MEM_READ,
|
XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), CS_PRIORITY_MEM_READ,
|
||||||
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
|
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
|
||||||
},
|
},
|
||||||
|
/* Use Fixed slice CCS mode */
|
||||||
|
{ XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
|
||||||
|
XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
|
||||||
|
XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
|
||||||
|
RCU_MODE_FIXED_SLICE_CCS_MODE))
|
||||||
|
},
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -652,9 +652,9 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
rpm_disable:
|
rpm_disable:
|
||||||
pm_runtime_put(&pdev->dev);
|
|
||||||
pm_runtime_disable(&pdev->dev);
|
|
||||||
pm_runtime_dont_use_autosuspend(&pdev->dev);
|
pm_runtime_dont_use_autosuspend(&pdev->dev);
|
||||||
|
pm_runtime_put_sync(&pdev->dev);
|
||||||
|
pm_runtime_disable(&pdev->dev);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3655,6 +3655,14 @@ found:
|
|||||||
while (*uid == '0' && *(uid + 1))
|
while (*uid == '0' && *(uid + 1))
|
||||||
uid++;
|
uid++;
|
||||||
|
|
||||||
|
if (strlen(hid) >= ACPIHID_HID_LEN) {
|
||||||
|
pr_err("Invalid command line: hid is too long\n");
|
||||||
|
return 1;
|
||||||
|
} else if (strlen(uid) >= ACPIHID_UID_LEN) {
|
||||||
|
pr_err("Invalid command line: uid is too long\n");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
i = early_acpihid_map_size++;
|
i = early_acpihid_map_size++;
|
||||||
memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
|
memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
|
||||||
memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
|
memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
|
||||||
|
|||||||
@@ -397,6 +397,12 @@ struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
|
|||||||
return ERR_CAST(smmu_domain);
|
return ERR_CAST(smmu_domain);
|
||||||
smmu_domain->domain.type = IOMMU_DOMAIN_SVA;
|
smmu_domain->domain.type = IOMMU_DOMAIN_SVA;
|
||||||
smmu_domain->domain.ops = &arm_smmu_sva_domain_ops;
|
smmu_domain->domain.ops = &arm_smmu_sva_domain_ops;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Choose page_size as the leaf page size for invalidation when
|
||||||
|
* ARM_SMMU_FEAT_RANGE_INV is present
|
||||||
|
*/
|
||||||
|
smmu_domain->domain.pgsize_bitmap = PAGE_SIZE;
|
||||||
smmu_domain->smmu = smmu;
|
smmu_domain->smmu = smmu;
|
||||||
|
|
||||||
ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain,
|
ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain,
|
||||||
|
|||||||
@@ -3220,6 +3220,7 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
|
|||||||
mutex_lock(&smmu->streams_mutex);
|
mutex_lock(&smmu->streams_mutex);
|
||||||
for (i = 0; i < fwspec->num_ids; i++) {
|
for (i = 0; i < fwspec->num_ids; i++) {
|
||||||
struct arm_smmu_stream *new_stream = &master->streams[i];
|
struct arm_smmu_stream *new_stream = &master->streams[i];
|
||||||
|
struct rb_node *existing;
|
||||||
u32 sid = fwspec->ids[i];
|
u32 sid = fwspec->ids[i];
|
||||||
|
|
||||||
new_stream->id = sid;
|
new_stream->id = sid;
|
||||||
@@ -3230,10 +3231,20 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
/* Insert into SID tree */
|
/* Insert into SID tree */
|
||||||
if (rb_find_add(&new_stream->node, &smmu->streams,
|
existing = rb_find_add(&new_stream->node, &smmu->streams,
|
||||||
arm_smmu_streams_cmp_node)) {
|
arm_smmu_streams_cmp_node);
|
||||||
dev_warn(master->dev, "stream %u already in tree\n",
|
if (existing) {
|
||||||
sid);
|
struct arm_smmu_master *existing_master =
|
||||||
|
rb_entry(existing, struct arm_smmu_stream, node)
|
||||||
|
->master;
|
||||||
|
|
||||||
|
/* Bridged PCI devices may end up with duplicated IDs */
|
||||||
|
if (existing_master == master)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
dev_warn(master->dev,
|
||||||
|
"stream %u already in tree from dev %s\n", sid,
|
||||||
|
dev_name(existing_master->dev));
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4666,6 +4666,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx);
|
|||||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
|
||||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);
|
||||||
|
|
||||||
|
/* QM57/QS57 integrated gfx malfunctions with dmar */
|
||||||
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_iommu_igfx);
|
||||||
|
|
||||||
/* Broadwell igfx malfunctions with dmar */
|
/* Broadwell igfx malfunctions with dmar */
|
||||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
|
||||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
|
||||||
@@ -4743,7 +4746,6 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
|
||||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
|
|
||||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
|
||||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
|
||||||
|
|
||||||
|
|||||||
@@ -227,6 +227,9 @@ static int qcom_mpm_alloc(struct irq_domain *domain, unsigned int virq,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
if (pin == GPIO_NO_WAKE_IRQ)
|
||||||
|
return irq_domain_disconnect_hierarchy(domain, virq);
|
||||||
|
|
||||||
ret = irq_domain_set_hwirq_and_chip(domain, virq, pin,
|
ret = irq_domain_set_hwirq_and_chip(domain, virq, pin,
|
||||||
&qcom_mpm_chip, priv);
|
&qcom_mpm_chip, priv);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|||||||
@@ -68,6 +68,8 @@
|
|||||||
#define LIST_DIRTY 1
|
#define LIST_DIRTY 1
|
||||||
#define LIST_SIZE 2
|
#define LIST_SIZE 2
|
||||||
|
|
||||||
|
#define SCAN_RESCHED_CYCLE 16
|
||||||
|
|
||||||
/*--------------------------------------------------------------*/
|
/*--------------------------------------------------------------*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2414,7 +2416,12 @@ static void __scan(struct dm_bufio_client *c)
|
|||||||
|
|
||||||
atomic_long_dec(&c->need_shrink);
|
atomic_long_dec(&c->need_shrink);
|
||||||
freed++;
|
freed++;
|
||||||
cond_resched();
|
|
||||||
|
if (unlikely(freed % SCAN_RESCHED_CYCLE == 0)) {
|
||||||
|
dm_bufio_unlock(c);
|
||||||
|
cond_resched();
|
||||||
|
dm_bufio_lock(c);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5173,7 +5173,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
|
|||||||
BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
|
BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
|
||||||
BUG_ON(!list_empty(&ic->wait_list));
|
BUG_ON(!list_empty(&ic->wait_list));
|
||||||
|
|
||||||
if (ic->mode == 'B')
|
if (ic->mode == 'B' && ic->bitmap_flush_work.work.func)
|
||||||
cancel_delayed_work_sync(&ic->bitmap_flush_work);
|
cancel_delayed_work_sync(&ic->bitmap_flush_work);
|
||||||
if (ic->metadata_wq)
|
if (ic->metadata_wq)
|
||||||
destroy_workqueue(ic->metadata_wq);
|
destroy_workqueue(ic->metadata_wq);
|
||||||
|
|||||||
@@ -523,8 +523,9 @@ static char **realloc_argv(unsigned int *size, char **old_argv)
|
|||||||
gfp = GFP_NOIO;
|
gfp = GFP_NOIO;
|
||||||
}
|
}
|
||||||
argv = kmalloc_array(new_size, sizeof(*argv), gfp);
|
argv = kmalloc_array(new_size, sizeof(*argv), gfp);
|
||||||
if (argv && old_argv) {
|
if (argv) {
|
||||||
memcpy(argv, old_argv, *size * sizeof(*argv));
|
if (old_argv)
|
||||||
|
memcpy(argv, old_argv, *size * sizeof(*argv));
|
||||||
*size = new_size;
|
*size = new_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1112,26 +1112,26 @@ int renesas_sdhi_probe(struct platform_device *pdev,
|
|||||||
num_irqs = platform_irq_count(pdev);
|
num_irqs = platform_irq_count(pdev);
|
||||||
if (num_irqs < 0) {
|
if (num_irqs < 0) {
|
||||||
ret = num_irqs;
|
ret = num_irqs;
|
||||||
goto eirq;
|
goto edisclk;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* There must be at least one IRQ source */
|
/* There must be at least one IRQ source */
|
||||||
if (!num_irqs) {
|
if (!num_irqs) {
|
||||||
ret = -ENXIO;
|
ret = -ENXIO;
|
||||||
goto eirq;
|
goto edisclk;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < num_irqs; i++) {
|
for (i = 0; i < num_irqs; i++) {
|
||||||
irq = platform_get_irq(pdev, i);
|
irq = platform_get_irq(pdev, i);
|
||||||
if (irq < 0) {
|
if (irq < 0) {
|
||||||
ret = irq;
|
ret = irq;
|
||||||
goto eirq;
|
goto edisclk;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
|
ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
|
||||||
dev_name(&pdev->dev), host);
|
dev_name(&pdev->dev), host);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto eirq;
|
goto edisclk;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = tmio_mmc_host_probe(host);
|
ret = tmio_mmc_host_probe(host);
|
||||||
@@ -1143,8 +1143,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
|
|||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
eirq:
|
|
||||||
tmio_mmc_host_remove(host);
|
|
||||||
edisclk:
|
edisclk:
|
||||||
renesas_sdhi_clk_disable(host);
|
renesas_sdhi_clk_disable(host);
|
||||||
efree:
|
efree:
|
||||||
|
|||||||
@@ -1543,7 +1543,7 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
|
|||||||
struct tc_taprio_qopt_offload *taprio;
|
struct tc_taprio_qopt_offload *taprio;
|
||||||
struct ocelot_port *ocelot_port;
|
struct ocelot_port *ocelot_port;
|
||||||
struct timespec64 base_ts;
|
struct timespec64 base_ts;
|
||||||
int port;
|
int i, port;
|
||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
mutex_lock(&ocelot->fwd_domain_lock);
|
mutex_lock(&ocelot->fwd_domain_lock);
|
||||||
@@ -1575,6 +1575,9 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
|
|||||||
QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M,
|
QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M,
|
||||||
QSYS_PARAM_CFG_REG_3);
|
QSYS_PARAM_CFG_REG_3);
|
||||||
|
|
||||||
|
for (i = 0; i < taprio->num_entries; i++)
|
||||||
|
vsc9959_tas_gcl_set(ocelot, i, &taprio->entries[i]);
|
||||||
|
|
||||||
ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
|
ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
|
||||||
QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
|
QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
|
||||||
QSYS_TAS_PARAM_CFG_CTRL);
|
QSYS_TAS_PARAM_CFG_CTRL);
|
||||||
|
|||||||
@@ -172,34 +172,31 @@ static struct pds_auxiliary_dev *pdsc_auxbus_dev_register(struct pdsc *cf,
|
|||||||
return padev;
|
return padev;
|
||||||
}
|
}
|
||||||
|
|
||||||
int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
|
void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf,
|
||||||
|
struct pds_auxiliary_dev **pd_ptr)
|
||||||
{
|
{
|
||||||
struct pds_auxiliary_dev *padev;
|
struct pds_auxiliary_dev *padev;
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
if (!cf)
|
if (!*pd_ptr)
|
||||||
return -ENODEV;
|
return;
|
||||||
|
|
||||||
mutex_lock(&pf->config_lock);
|
mutex_lock(&pf->config_lock);
|
||||||
|
|
||||||
padev = pf->vfs[cf->vf_id].padev;
|
padev = *pd_ptr;
|
||||||
if (padev) {
|
pds_client_unregister(pf, padev->client_id);
|
||||||
pds_client_unregister(pf, padev->client_id);
|
auxiliary_device_delete(&padev->aux_dev);
|
||||||
auxiliary_device_delete(&padev->aux_dev);
|
auxiliary_device_uninit(&padev->aux_dev);
|
||||||
auxiliary_device_uninit(&padev->aux_dev);
|
*pd_ptr = NULL;
|
||||||
padev->client_id = 0;
|
|
||||||
}
|
|
||||||
pf->vfs[cf->vf_id].padev = NULL;
|
|
||||||
|
|
||||||
mutex_unlock(&pf->config_lock);
|
mutex_unlock(&pf->config_lock);
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
|
int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf,
|
||||||
|
enum pds_core_vif_types vt,
|
||||||
|
struct pds_auxiliary_dev **pd_ptr)
|
||||||
{
|
{
|
||||||
struct pds_auxiliary_dev *padev;
|
struct pds_auxiliary_dev *padev;
|
||||||
char devname[PDS_DEVNAME_LEN];
|
char devname[PDS_DEVNAME_LEN];
|
||||||
enum pds_core_vif_types vt;
|
|
||||||
unsigned long mask;
|
unsigned long mask;
|
||||||
u16 vt_support;
|
u16 vt_support;
|
||||||
int client_id;
|
int client_id;
|
||||||
@@ -208,6 +205,9 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
|
|||||||
if (!cf)
|
if (!cf)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
if (vt >= PDS_DEV_TYPE_MAX)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
mutex_lock(&pf->config_lock);
|
mutex_lock(&pf->config_lock);
|
||||||
|
|
||||||
mask = BIT_ULL(PDSC_S_FW_DEAD) |
|
mask = BIT_ULL(PDSC_S_FW_DEAD) |
|
||||||
@@ -219,17 +219,10 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
|
|||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We only support vDPA so far, so it is the only one to
|
|
||||||
* be verified that it is available in the Core device and
|
|
||||||
* enabled in the devlink param. In the future this might
|
|
||||||
* become a loop for several VIF types.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Verify that the type is supported and enabled. It is not
|
/* Verify that the type is supported and enabled. It is not
|
||||||
* an error if there is no auxbus device support for this
|
* an error if there is no auxbus device support for this
|
||||||
* VF, it just means something else needs to happen with it.
|
* VF, it just means something else needs to happen with it.
|
||||||
*/
|
*/
|
||||||
vt = PDS_DEV_TYPE_VDPA;
|
|
||||||
vt_support = !!le16_to_cpu(pf->dev_ident.vif_types[vt]);
|
vt_support = !!le16_to_cpu(pf->dev_ident.vif_types[vt]);
|
||||||
if (!(vt_support &&
|
if (!(vt_support &&
|
||||||
pf->viftype_status[vt].supported &&
|
pf->viftype_status[vt].supported &&
|
||||||
@@ -255,7 +248,7 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
|
|||||||
err = PTR_ERR(padev);
|
err = PTR_ERR(padev);
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
pf->vfs[cf->vf_id].padev = padev;
|
*pd_ptr = padev;
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&pf->config_lock);
|
mutex_unlock(&pf->config_lock);
|
||||||
|
|||||||
@@ -303,8 +303,11 @@ void pdsc_health_thread(struct work_struct *work);
|
|||||||
int pdsc_register_notify(struct notifier_block *nb);
|
int pdsc_register_notify(struct notifier_block *nb);
|
||||||
void pdsc_unregister_notify(struct notifier_block *nb);
|
void pdsc_unregister_notify(struct notifier_block *nb);
|
||||||
void pdsc_notify(unsigned long event, void *data);
|
void pdsc_notify(unsigned long event, void *data);
|
||||||
int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf);
|
int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf,
|
||||||
int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf);
|
enum pds_core_vif_types vt,
|
||||||
|
struct pds_auxiliary_dev **pd_ptr);
|
||||||
|
void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf,
|
||||||
|
struct pds_auxiliary_dev **pd_ptr);
|
||||||
|
|
||||||
void pdsc_process_adminq(struct pdsc_qcq *qcq);
|
void pdsc_process_adminq(struct pdsc_qcq *qcq);
|
||||||
void pdsc_work_thread(struct work_struct *work);
|
void pdsc_work_thread(struct work_struct *work);
|
||||||
|
|||||||
@@ -56,8 +56,11 @@ int pdsc_dl_enable_set(struct devlink *dl, u32 id,
|
|||||||
for (vf_id = 0; vf_id < pdsc->num_vfs; vf_id++) {
|
for (vf_id = 0; vf_id < pdsc->num_vfs; vf_id++) {
|
||||||
struct pdsc *vf = pdsc->vfs[vf_id].vf;
|
struct pdsc *vf = pdsc->vfs[vf_id].vf;
|
||||||
|
|
||||||
err = ctx->val.vbool ? pdsc_auxbus_dev_add(vf, pdsc) :
|
if (ctx->val.vbool)
|
||||||
pdsc_auxbus_dev_del(vf, pdsc);
|
err = pdsc_auxbus_dev_add(vf, pdsc, vt_entry->vif_id,
|
||||||
|
&pdsc->vfs[vf_id].padev);
|
||||||
|
else
|
||||||
|
pdsc_auxbus_dev_del(vf, pdsc, &pdsc->vfs[vf_id].padev);
|
||||||
}
|
}
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
|||||||
@@ -190,7 +190,8 @@ static int pdsc_init_vf(struct pdsc *vf)
|
|||||||
devl_unlock(dl);
|
devl_unlock(dl);
|
||||||
|
|
||||||
pf->vfs[vf->vf_id].vf = vf;
|
pf->vfs[vf->vf_id].vf = vf;
|
||||||
err = pdsc_auxbus_dev_add(vf, pf);
|
err = pdsc_auxbus_dev_add(vf, pf, PDS_DEV_TYPE_VDPA,
|
||||||
|
&pf->vfs[vf->vf_id].padev);
|
||||||
if (err) {
|
if (err) {
|
||||||
devl_lock(dl);
|
devl_lock(dl);
|
||||||
devl_unregister(dl);
|
devl_unregister(dl);
|
||||||
@@ -417,7 +418,7 @@ static void pdsc_remove(struct pci_dev *pdev)
|
|||||||
|
|
||||||
pf = pdsc_get_pf_struct(pdsc->pdev);
|
pf = pdsc_get_pf_struct(pdsc->pdev);
|
||||||
if (!IS_ERR(pf)) {
|
if (!IS_ERR(pf)) {
|
||||||
pdsc_auxbus_dev_del(pdsc, pf);
|
pdsc_auxbus_dev_del(pdsc, pf, &pf->vfs[pdsc->vf_id].padev);
|
||||||
pf->vfs[pdsc->vf_id].vf = NULL;
|
pf->vfs[pdsc->vf_id].vf = NULL;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -482,7 +483,8 @@ static void pdsc_reset_prepare(struct pci_dev *pdev)
|
|||||||
|
|
||||||
pf = pdsc_get_pf_struct(pdsc->pdev);
|
pf = pdsc_get_pf_struct(pdsc->pdev);
|
||||||
if (!IS_ERR(pf))
|
if (!IS_ERR(pf))
|
||||||
pdsc_auxbus_dev_del(pdsc, pf);
|
pdsc_auxbus_dev_del(pdsc, pf,
|
||||||
|
&pf->vfs[pdsc->vf_id].padev);
|
||||||
}
|
}
|
||||||
|
|
||||||
pdsc_unmap_bars(pdsc);
|
pdsc_unmap_bars(pdsc);
|
||||||
@@ -527,7 +529,8 @@ static void pdsc_reset_done(struct pci_dev *pdev)
|
|||||||
|
|
||||||
pf = pdsc_get_pf_struct(pdsc->pdev);
|
pf = pdsc_get_pf_struct(pdsc->pdev);
|
||||||
if (!IS_ERR(pf))
|
if (!IS_ERR(pf))
|
||||||
pdsc_auxbus_dev_add(pdsc, pf);
|
pdsc_auxbus_dev_add(pdsc, pf, PDS_DEV_TYPE_VDPA,
|
||||||
|
&pf->vfs[pdsc->vf_id].padev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -373,8 +373,13 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Set up the header page info */
|
/* Set up the header page info */
|
||||||
xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
|
if (pdata->netdev->features & NETIF_F_RXCSUM) {
|
||||||
XGBE_SKB_ALLOC_SIZE);
|
xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
|
||||||
|
XGBE_SKB_ALLOC_SIZE);
|
||||||
|
} else {
|
||||||
|
xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
|
||||||
|
pdata->rx_buf_size);
|
||||||
|
}
|
||||||
|
|
||||||
/* Set up the buffer page info */
|
/* Set up the buffer page info */
|
||||||
xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
|
xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
|
||||||
|
|||||||
@@ -320,6 +320,18 @@ static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
|
|||||||
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
|
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata)
|
||||||
|
{
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
|
for (i = 0; i < pdata->channel_count; i++) {
|
||||||
|
if (!pdata->channel[i]->rx_ring)
|
||||||
|
break;
|
||||||
|
|
||||||
|
XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
|
static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
|
||||||
unsigned int index, unsigned int val)
|
unsigned int index, unsigned int val)
|
||||||
{
|
{
|
||||||
@@ -3545,8 +3557,12 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
|
|||||||
xgbe_config_tx_coalesce(pdata);
|
xgbe_config_tx_coalesce(pdata);
|
||||||
xgbe_config_rx_buffer_size(pdata);
|
xgbe_config_rx_buffer_size(pdata);
|
||||||
xgbe_config_tso_mode(pdata);
|
xgbe_config_tso_mode(pdata);
|
||||||
xgbe_config_sph_mode(pdata);
|
|
||||||
xgbe_config_rss(pdata);
|
if (pdata->netdev->features & NETIF_F_RXCSUM) {
|
||||||
|
xgbe_config_sph_mode(pdata);
|
||||||
|
xgbe_config_rss(pdata);
|
||||||
|
}
|
||||||
|
|
||||||
desc_if->wrapper_tx_desc_init(pdata);
|
desc_if->wrapper_tx_desc_init(pdata);
|
||||||
desc_if->wrapper_rx_desc_init(pdata);
|
desc_if->wrapper_rx_desc_init(pdata);
|
||||||
xgbe_enable_dma_interrupts(pdata);
|
xgbe_enable_dma_interrupts(pdata);
|
||||||
@@ -3702,5 +3718,9 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
|
|||||||
hw_if->disable_vxlan = xgbe_disable_vxlan;
|
hw_if->disable_vxlan = xgbe_disable_vxlan;
|
||||||
hw_if->set_vxlan_id = xgbe_set_vxlan_id;
|
hw_if->set_vxlan_id = xgbe_set_vxlan_id;
|
||||||
|
|
||||||
|
/* For Split Header*/
|
||||||
|
hw_if->enable_sph = xgbe_config_sph_mode;
|
||||||
|
hw_if->disable_sph = xgbe_disable_sph_mode;
|
||||||
|
|
||||||
DBGPR("<--xgbe_init_function_ptrs\n");
|
DBGPR("<--xgbe_init_function_ptrs\n");
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2257,10 +2257,17 @@ static int xgbe_set_features(struct net_device *netdev,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if ((features & NETIF_F_RXCSUM) && !rxcsum)
|
if ((features & NETIF_F_RXCSUM) && !rxcsum) {
|
||||||
|
hw_if->enable_sph(pdata);
|
||||||
|
hw_if->enable_vxlan(pdata);
|
||||||
hw_if->enable_rx_csum(pdata);
|
hw_if->enable_rx_csum(pdata);
|
||||||
else if (!(features & NETIF_F_RXCSUM) && rxcsum)
|
schedule_work(&pdata->restart_work);
|
||||||
|
} else if (!(features & NETIF_F_RXCSUM) && rxcsum) {
|
||||||
|
hw_if->disable_sph(pdata);
|
||||||
|
hw_if->disable_vxlan(pdata);
|
||||||
hw_if->disable_rx_csum(pdata);
|
hw_if->disable_rx_csum(pdata);
|
||||||
|
schedule_work(&pdata->restart_work);
|
||||||
|
}
|
||||||
|
|
||||||
if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
|
if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
|
||||||
hw_if->enable_rx_vlan_stripping(pdata);
|
hw_if->enable_rx_vlan_stripping(pdata);
|
||||||
|
|||||||
@@ -865,6 +865,10 @@ struct xgbe_hw_if {
|
|||||||
void (*enable_vxlan)(struct xgbe_prv_data *);
|
void (*enable_vxlan)(struct xgbe_prv_data *);
|
||||||
void (*disable_vxlan)(struct xgbe_prv_data *);
|
void (*disable_vxlan)(struct xgbe_prv_data *);
|
||||||
void (*set_vxlan_id)(struct xgbe_prv_data *);
|
void (*set_vxlan_id)(struct xgbe_prv_data *);
|
||||||
|
|
||||||
|
/* For Split Header */
|
||||||
|
void (*enable_sph)(struct xgbe_prv_data *pdata);
|
||||||
|
void (*disable_sph)(struct xgbe_prv_data *pdata);
|
||||||
};
|
};
|
||||||
|
|
||||||
/* This structure represents implementation specific routines for an
|
/* This structure represents implementation specific routines for an
|
||||||
|
|||||||
@@ -1986,6 +1986,7 @@ static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
|
|||||||
}
|
}
|
||||||
return skb;
|
return skb;
|
||||||
vlan_err:
|
vlan_err:
|
||||||
|
skb_mark_for_recycle(skb);
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb(skb);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@@ -3320,6 +3321,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
|
|||||||
}
|
}
|
||||||
netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
|
netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
|
||||||
|
bnxt_ptp_free_txts_skbs(bp->ptp_cfg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
|
static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
|
||||||
@@ -11076,6 +11080,9 @@ static void bnxt_init_napi(struct bnxt *bp)
|
|||||||
poll_fn = bnxt_poll_p5;
|
poll_fn = bnxt_poll_p5;
|
||||||
else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
|
else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
|
||||||
cp_nr_rings--;
|
cp_nr_rings--;
|
||||||
|
|
||||||
|
set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
|
||||||
|
|
||||||
for (i = 0; i < cp_nr_rings; i++) {
|
for (i = 0; i < cp_nr_rings; i++) {
|
||||||
bnapi = bp->bnapi[i];
|
bnapi = bp->bnapi[i];
|
||||||
netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
|
netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
|
||||||
@@ -11844,13 +11851,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
|
|||||||
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
|
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
/* IRQ will be initialized later in bnxt_request_irq()*/
|
||||||
bnxt_clear_int_mode(bp);
|
bnxt_clear_int_mode(bp);
|
||||||
rc = bnxt_init_int_mode(bp);
|
|
||||||
if (rc) {
|
|
||||||
clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
|
|
||||||
netdev_err(bp->dev, "init int mode failed\n");
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
rc = bnxt_cancel_reservations(bp, fw_reset);
|
rc = bnxt_cancel_reservations(bp, fw_reset);
|
||||||
}
|
}
|
||||||
@@ -12249,8 +12251,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
|
|||||||
/* VF-reps may need to be re-opened after the PF is re-opened */
|
/* VF-reps may need to be re-opened after the PF is re-opened */
|
||||||
if (BNXT_PF(bp))
|
if (BNXT_PF(bp))
|
||||||
bnxt_vf_reps_open(bp);
|
bnxt_vf_reps_open(bp);
|
||||||
if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
|
|
||||||
WRITE_ONCE(bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
|
|
||||||
bnxt_ptp_init_rtc(bp, true);
|
bnxt_ptp_init_rtc(bp, true);
|
||||||
bnxt_ptp_cfg_tstamp_filters(bp);
|
bnxt_ptp_cfg_tstamp_filters(bp);
|
||||||
if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
|
if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
|
||||||
@@ -15421,8 +15421,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
|
|||||||
|
|
||||||
bnxt_rdma_aux_device_del(bp);
|
bnxt_rdma_aux_device_del(bp);
|
||||||
|
|
||||||
bnxt_ptp_clear(bp);
|
|
||||||
unregister_netdev(dev);
|
unregister_netdev(dev);
|
||||||
|
bnxt_ptp_clear(bp);
|
||||||
|
|
||||||
bnxt_rdma_aux_device_uninit(bp);
|
bnxt_rdma_aux_device_uninit(bp);
|
||||||
|
|
||||||
|
|||||||
@@ -66,20 +66,30 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (info->dest_buf) {
|
|
||||||
if ((info->seg_start + off + len) <=
|
|
||||||
BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
|
|
||||||
memcpy(info->dest_buf + off, dma_buf, len);
|
|
||||||
} else {
|
|
||||||
rc = -ENOBUFS;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cmn_req->req_type ==
|
if (cmn_req->req_type ==
|
||||||
cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
|
cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
|
||||||
info->dest_buf_size += len;
|
info->dest_buf_size += len;
|
||||||
|
|
||||||
|
if (info->dest_buf) {
|
||||||
|
if ((info->seg_start + off + len) <=
|
||||||
|
BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
|
||||||
|
u16 copylen = min_t(u16, len,
|
||||||
|
info->dest_buf_size - off);
|
||||||
|
|
||||||
|
memcpy(info->dest_buf + off, dma_buf, copylen);
|
||||||
|
if (copylen < len)
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
rc = -ENOBUFS;
|
||||||
|
if (cmn_req->req_type ==
|
||||||
|
cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
|
||||||
|
kfree(info->dest_buf);
|
||||||
|
info->dest_buf = NULL;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
|
if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|||||||
@@ -2041,6 +2041,17 @@ static int bnxt_get_regs_len(struct net_device *dev)
|
|||||||
return reg_len;
|
return reg_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define BNXT_PCIE_32B_ENTRY(start, end) \
|
||||||
|
{ offsetof(struct pcie_ctx_hw_stats, start), \
|
||||||
|
offsetof(struct pcie_ctx_hw_stats, end) }
|
||||||
|
|
||||||
|
static const struct {
|
||||||
|
u16 start;
|
||||||
|
u16 end;
|
||||||
|
} bnxt_pcie_32b_entries[] = {
|
||||||
|
BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]),
|
||||||
|
};
|
||||||
|
|
||||||
static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
|
static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
|
||||||
void *_p)
|
void *_p)
|
||||||
{
|
{
|
||||||
@@ -2072,12 +2083,27 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
|
|||||||
req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
|
req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
|
||||||
rc = hwrm_req_send(bp, req);
|
rc = hwrm_req_send(bp, req);
|
||||||
if (!rc) {
|
if (!rc) {
|
||||||
__le64 *src = (__le64 *)hw_pcie_stats;
|
u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN);
|
||||||
u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
|
u8 *src = (u8 *)hw_pcie_stats;
|
||||||
int i;
|
int i, j;
|
||||||
|
|
||||||
for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
|
for (i = 0, j = 0; i < sizeof(*hw_pcie_stats); ) {
|
||||||
dst[i] = le64_to_cpu(src[i]);
|
if (i >= bnxt_pcie_32b_entries[j].start &&
|
||||||
|
i <= bnxt_pcie_32b_entries[j].end) {
|
||||||
|
u32 *dst32 = (u32 *)(dst + i);
|
||||||
|
|
||||||
|
*dst32 = le32_to_cpu(*(__le32 *)(src + i));
|
||||||
|
i += 4;
|
||||||
|
if (i > bnxt_pcie_32b_entries[j].end &&
|
||||||
|
j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1)
|
||||||
|
j++;
|
||||||
|
} else {
|
||||||
|
u64 *dst64 = (u64 *)(dst + i);
|
||||||
|
|
||||||
|
*dst64 = le64_to_cpu(*(__le64 *)(src + i));
|
||||||
|
i += 8;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
hwrm_req_drop(bp, req);
|
hwrm_req_drop(bp, req);
|
||||||
}
|
}
|
||||||
@@ -4848,6 +4874,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
|
|||||||
if (!bp->num_tests || !BNXT_PF(bp))
|
if (!bp->num_tests || !BNXT_PF(bp))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
memset(buf, 0, sizeof(u64) * bp->num_tests);
|
||||||
if (etest->flags & ETH_TEST_FL_OFFLINE &&
|
if (etest->flags & ETH_TEST_FL_OFFLINE &&
|
||||||
bnxt_ulp_registered(bp->edev)) {
|
bnxt_ulp_registered(bp->edev)) {
|
||||||
etest->flags |= ETH_TEST_FL_FAILED;
|
etest->flags |= ETH_TEST_FL_FAILED;
|
||||||
@@ -4855,7 +4882,6 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(buf, 0, sizeof(u64) * bp->num_tests);
|
|
||||||
if (!netif_running(dev)) {
|
if (!netif_running(dev)) {
|
||||||
etest->flags |= ETH_TEST_FL_FAILED;
|
etest->flags |= ETH_TEST_FL_FAILED;
|
||||||
return;
|
return;
|
||||||
|
|||||||
@@ -777,6 +777,27 @@ next_slot:
|
|||||||
return HZ;
|
return HZ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp)
|
||||||
|
{
|
||||||
|
struct bnxt_ptp_tx_req *txts_req;
|
||||||
|
u16 cons = ptp->txts_cons;
|
||||||
|
|
||||||
|
/* make sure ptp aux worker finished with
|
||||||
|
* possible BNXT_STATE_OPEN set
|
||||||
|
*/
|
||||||
|
ptp_cancel_worker_sync(ptp->ptp_clock);
|
||||||
|
|
||||||
|
ptp->tx_avail = BNXT_MAX_TX_TS;
|
||||||
|
while (cons != ptp->txts_prod) {
|
||||||
|
txts_req = &ptp->txts_req[cons];
|
||||||
|
if (!IS_ERR_OR_NULL(txts_req->tx_skb))
|
||||||
|
dev_kfree_skb_any(txts_req->tx_skb);
|
||||||
|
cons = NEXT_TXTS(cons);
|
||||||
|
}
|
||||||
|
ptp->txts_cons = cons;
|
||||||
|
ptp_schedule_worker(ptp->ptp_clock, 0);
|
||||||
|
}
|
||||||
|
|
||||||
int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod)
|
int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod)
|
||||||
{
|
{
|
||||||
spin_lock_bh(&ptp->ptp_tx_lock);
|
spin_lock_bh(&ptp->ptp_tx_lock);
|
||||||
@@ -1095,7 +1116,6 @@ out:
|
|||||||
void bnxt_ptp_clear(struct bnxt *bp)
|
void bnxt_ptp_clear(struct bnxt *bp)
|
||||||
{
|
{
|
||||||
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
|
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
|
||||||
int i;
|
|
||||||
|
|
||||||
if (!ptp)
|
if (!ptp)
|
||||||
return;
|
return;
|
||||||
@@ -1107,12 +1127,5 @@ void bnxt_ptp_clear(struct bnxt *bp)
|
|||||||
kfree(ptp->ptp_info.pin_config);
|
kfree(ptp->ptp_info.pin_config);
|
||||||
ptp->ptp_info.pin_config = NULL;
|
ptp->ptp_info.pin_config = NULL;
|
||||||
|
|
||||||
for (i = 0; i < BNXT_MAX_TX_TS; i++) {
|
|
||||||
if (ptp->txts_req[i].tx_skb) {
|
|
||||||
dev_kfree_skb_any(ptp->txts_req[i].tx_skb);
|
|
||||||
ptp->txts_req[i].tx_skb = NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bnxt_unmap_ptp_regs(bp);
|
bnxt_unmap_ptp_regs(bp);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -174,6 +174,7 @@ int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
|
|||||||
void bnxt_ptp_reapply_pps(struct bnxt *bp);
|
void bnxt_ptp_reapply_pps(struct bnxt *bp);
|
||||||
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
|
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
|
||||||
int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
|
int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
|
||||||
|
void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp);
|
||||||
int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod);
|
int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod);
|
||||||
void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod);
|
void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod);
|
||||||
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
|
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
|
||||||
|
|||||||
@@ -352,7 +352,7 @@ parse_eeprom (struct net_device *dev)
|
|||||||
eth_hw_addr_set(dev, psrom->mac_addr);
|
eth_hw_addr_set(dev, psrom->mac_addr);
|
||||||
|
|
||||||
if (np->chip_id == CHIP_IP1000A) {
|
if (np->chip_id == CHIP_IP1000A) {
|
||||||
np->led_mode = psrom->led_mode;
|
np->led_mode = le16_to_cpu(psrom->led_mode);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -335,7 +335,7 @@ typedef struct t_SROM {
|
|||||||
u16 sub_system_id; /* 0x06 */
|
u16 sub_system_id; /* 0x06 */
|
||||||
u16 pci_base_1; /* 0x08 (IP1000A only) */
|
u16 pci_base_1; /* 0x08 (IP1000A only) */
|
||||||
u16 pci_base_2; /* 0x0a (IP1000A only) */
|
u16 pci_base_2; /* 0x0a (IP1000A only) */
|
||||||
u16 led_mode; /* 0x0c (IP1000A only) */
|
__le16 led_mode; /* 0x0c (IP1000A only) */
|
||||||
u16 reserved1[9]; /* 0x0e-0x1f */
|
u16 reserved1[9]; /* 0x0e-0x1f */
|
||||||
u8 mac_addr[6]; /* 0x20-0x25 */
|
u8 mac_addr[6]; /* 0x20-0x25 */
|
||||||
u8 reserved2[10]; /* 0x26-0x2f */
|
u8 reserved2[10]; /* 0x26-0x2f */
|
||||||
|
|||||||
@@ -714,7 +714,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
|
|||||||
txq->bd.cur = bdp;
|
txq->bd.cur = bdp;
|
||||||
|
|
||||||
/* Trigger transmission start */
|
/* Trigger transmission start */
|
||||||
writel(0, txq->bd.reg_desc_active);
|
if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
|
||||||
|
!readl(txq->bd.reg_desc_active) ||
|
||||||
|
!readl(txq->bd.reg_desc_active) ||
|
||||||
|
!readl(txq->bd.reg_desc_active) ||
|
||||||
|
!readl(txq->bd.reg_desc_active))
|
||||||
|
writel(0, txq->bd.reg_desc_active);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -60,7 +60,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
|
|||||||
.name = "tm_qset",
|
.name = "tm_qset",
|
||||||
.cmd = HNAE3_DBG_CMD_TM_QSET,
|
.cmd = HNAE3_DBG_CMD_TM_QSET,
|
||||||
.dentry = HNS3_DBG_DENTRY_TM,
|
.dentry = HNS3_DBG_DENTRY_TM,
|
||||||
.buf_len = HNS3_DBG_READ_LEN,
|
.buf_len = HNS3_DBG_READ_LEN_1MB,
|
||||||
.init = hns3_dbg_common_file_init,
|
.init = hns3_dbg_common_file_init,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -473,20 +473,14 @@ static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
|
|||||||
writel(mask_en, tqp_vector->mask_addr);
|
writel(mask_en, tqp_vector->mask_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
|
static void hns3_irq_enable(struct hns3_enet_tqp_vector *tqp_vector)
|
||||||
{
|
{
|
||||||
napi_enable(&tqp_vector->napi);
|
napi_enable(&tqp_vector->napi);
|
||||||
enable_irq(tqp_vector->vector_irq);
|
enable_irq(tqp_vector->vector_irq);
|
||||||
|
|
||||||
/* enable vector */
|
|
||||||
hns3_mask_vector_irq(tqp_vector, 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
|
static void hns3_irq_disable(struct hns3_enet_tqp_vector *tqp_vector)
|
||||||
{
|
{
|
||||||
/* disable vector */
|
|
||||||
hns3_mask_vector_irq(tqp_vector, 0);
|
|
||||||
|
|
||||||
disable_irq(tqp_vector->vector_irq);
|
disable_irq(tqp_vector->vector_irq);
|
||||||
napi_disable(&tqp_vector->napi);
|
napi_disable(&tqp_vector->napi);
|
||||||
cancel_work_sync(&tqp_vector->rx_group.dim.work);
|
cancel_work_sync(&tqp_vector->rx_group.dim.work);
|
||||||
@@ -707,11 +701,42 @@ static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void hns3_enable_irqs_and_tqps(struct net_device *netdev)
|
||||||
|
{
|
||||||
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||||
|
struct hnae3_handle *h = priv->ae_handle;
|
||||||
|
u16 i;
|
||||||
|
|
||||||
|
for (i = 0; i < priv->vector_num; i++)
|
||||||
|
hns3_irq_enable(&priv->tqp_vector[i]);
|
||||||
|
|
||||||
|
for (i = 0; i < priv->vector_num; i++)
|
||||||
|
hns3_mask_vector_irq(&priv->tqp_vector[i], 1);
|
||||||
|
|
||||||
|
for (i = 0; i < h->kinfo.num_tqps; i++)
|
||||||
|
hns3_tqp_enable(h->kinfo.tqp[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void hns3_disable_irqs_and_tqps(struct net_device *netdev)
|
||||||
|
{
|
||||||
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||||
|
struct hnae3_handle *h = priv->ae_handle;
|
||||||
|
u16 i;
|
||||||
|
|
||||||
|
for (i = 0; i < h->kinfo.num_tqps; i++)
|
||||||
|
hns3_tqp_disable(h->kinfo.tqp[i]);
|
||||||
|
|
||||||
|
for (i = 0; i < priv->vector_num; i++)
|
||||||
|
hns3_mask_vector_irq(&priv->tqp_vector[i], 0);
|
||||||
|
|
||||||
|
for (i = 0; i < priv->vector_num; i++)
|
||||||
|
hns3_irq_disable(&priv->tqp_vector[i]);
|
||||||
|
}
|
||||||
|
|
||||||
static int hns3_nic_net_up(struct net_device *netdev)
|
static int hns3_nic_net_up(struct net_device *netdev)
|
||||||
{
|
{
|
||||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||||
struct hnae3_handle *h = priv->ae_handle;
|
struct hnae3_handle *h = priv->ae_handle;
|
||||||
int i, j;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = hns3_nic_reset_all_ring(h);
|
ret = hns3_nic_reset_all_ring(h);
|
||||||
@@ -720,23 +745,13 @@ static int hns3_nic_net_up(struct net_device *netdev)
|
|||||||
|
|
||||||
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
|
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
|
||||||
|
|
||||||
/* enable the vectors */
|
hns3_enable_irqs_and_tqps(netdev);
|
||||||
for (i = 0; i < priv->vector_num; i++)
|
|
||||||
hns3_vector_enable(&priv->tqp_vector[i]);
|
|
||||||
|
|
||||||
/* enable rcb */
|
|
||||||
for (j = 0; j < h->kinfo.num_tqps; j++)
|
|
||||||
hns3_tqp_enable(h->kinfo.tqp[j]);
|
|
||||||
|
|
||||||
/* start the ae_dev */
|
/* start the ae_dev */
|
||||||
ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
|
ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
|
||||||
if (ret) {
|
if (ret) {
|
||||||
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
|
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
|
||||||
while (j--)
|
hns3_disable_irqs_and_tqps(netdev);
|
||||||
hns3_tqp_disable(h->kinfo.tqp[j]);
|
|
||||||
|
|
||||||
for (j = i - 1; j >= 0; j--)
|
|
||||||
hns3_vector_disable(&priv->tqp_vector[j]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@@ -823,17 +838,9 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h)
|
|||||||
static void hns3_nic_net_down(struct net_device *netdev)
|
static void hns3_nic_net_down(struct net_device *netdev)
|
||||||
{
|
{
|
||||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||||
struct hnae3_handle *h = hns3_get_handle(netdev);
|
|
||||||
const struct hnae3_ae_ops *ops;
|
const struct hnae3_ae_ops *ops;
|
||||||
int i;
|
|
||||||
|
|
||||||
/* disable vectors */
|
hns3_disable_irqs_and_tqps(netdev);
|
||||||
for (i = 0; i < priv->vector_num; i++)
|
|
||||||
hns3_vector_disable(&priv->tqp_vector[i]);
|
|
||||||
|
|
||||||
/* disable rcb */
|
|
||||||
for (i = 0; i < h->kinfo.num_tqps; i++)
|
|
||||||
hns3_tqp_disable(h->kinfo.tqp[i]);
|
|
||||||
|
|
||||||
/* stop ae_dev */
|
/* stop ae_dev */
|
||||||
ops = priv->ae_handle->ae_algo->ops;
|
ops = priv->ae_handle->ae_algo->ops;
|
||||||
@@ -5864,8 +5871,6 @@ int hns3_set_channels(struct net_device *netdev,
|
|||||||
void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
|
void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
|
||||||
{
|
{
|
||||||
struct hns3_nic_priv *priv = netdev_priv(ndev);
|
struct hns3_nic_priv *priv = netdev_priv(ndev);
|
||||||
struct hnae3_handle *h = priv->ae_handle;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
if (!if_running)
|
if (!if_running)
|
||||||
return;
|
return;
|
||||||
@@ -5876,11 +5881,7 @@ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
|
|||||||
netif_carrier_off(ndev);
|
netif_carrier_off(ndev);
|
||||||
netif_tx_disable(ndev);
|
netif_tx_disable(ndev);
|
||||||
|
|
||||||
for (i = 0; i < priv->vector_num; i++)
|
hns3_disable_irqs_and_tqps(ndev);
|
||||||
hns3_vector_disable(&priv->tqp_vector[i]);
|
|
||||||
|
|
||||||
for (i = 0; i < h->kinfo.num_tqps; i++)
|
|
||||||
hns3_tqp_disable(h->kinfo.tqp[i]);
|
|
||||||
|
|
||||||
/* delay ring buffer clearing to hns3_reset_notify_uninit_enet
|
/* delay ring buffer clearing to hns3_reset_notify_uninit_enet
|
||||||
* during reset process, because driver may not be able
|
* during reset process, because driver may not be able
|
||||||
@@ -5896,7 +5897,6 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
|
|||||||
{
|
{
|
||||||
struct hns3_nic_priv *priv = netdev_priv(ndev);
|
struct hns3_nic_priv *priv = netdev_priv(ndev);
|
||||||
struct hnae3_handle *h = priv->ae_handle;
|
struct hnae3_handle *h = priv->ae_handle;
|
||||||
int i;
|
|
||||||
|
|
||||||
if (!if_running)
|
if (!if_running)
|
||||||
return;
|
return;
|
||||||
@@ -5912,11 +5912,7 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
|
|||||||
|
|
||||||
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
|
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
|
||||||
|
|
||||||
for (i = 0; i < priv->vector_num; i++)
|
hns3_enable_irqs_and_tqps(ndev);
|
||||||
hns3_vector_enable(&priv->tqp_vector[i]);
|
|
||||||
|
|
||||||
for (i = 0; i < h->kinfo.num_tqps; i++)
|
|
||||||
hns3_tqp_enable(h->kinfo.tqp[i]);
|
|
||||||
|
|
||||||
netif_tx_wake_all_queues(ndev);
|
netif_tx_wake_all_queues(ndev);
|
||||||
|
|
||||||
|
|||||||
@@ -439,6 +439,13 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
|
|||||||
ptp->info.settime64 = hclge_ptp_settime;
|
ptp->info.settime64 = hclge_ptp_settime;
|
||||||
|
|
||||||
ptp->info.n_alarm = 0;
|
ptp->info.n_alarm = 0;
|
||||||
|
|
||||||
|
spin_lock_init(&ptp->lock);
|
||||||
|
ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
|
||||||
|
ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
|
||||||
|
ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
|
||||||
|
hdev->ptp = ptp;
|
||||||
|
|
||||||
ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev);
|
ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev);
|
||||||
if (IS_ERR(ptp->clock)) {
|
if (IS_ERR(ptp->clock)) {
|
||||||
dev_err(&hdev->pdev->dev,
|
dev_err(&hdev->pdev->dev,
|
||||||
@@ -450,12 +457,6 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_init(&ptp->lock);
|
|
||||||
ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
|
|
||||||
ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
|
|
||||||
ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
|
|
||||||
hdev->ptp = ptp;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1294,9 +1294,8 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
|
|||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
|
static int hclgevf_en_hw_strip_rxvtag_cmd(struct hclgevf_dev *hdev, bool enable)
|
||||||
{
|
{
|
||||||
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
|
||||||
struct hclge_vf_to_pf_msg send_msg;
|
struct hclge_vf_to_pf_msg send_msg;
|
||||||
|
|
||||||
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
|
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
|
||||||
@@ -1305,6 +1304,19 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
|
|||||||
return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
|
return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
|
||||||
|
{
|
||||||
|
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = hclgevf_en_hw_strip_rxvtag_cmd(hdev, enable);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
hdev->rxvtag_strip_en = enable;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int hclgevf_reset_tqp(struct hnae3_handle *handle)
|
static int hclgevf_reset_tqp(struct hnae3_handle *handle)
|
||||||
{
|
{
|
||||||
#define HCLGEVF_RESET_ALL_QUEUE_DONE 1U
|
#define HCLGEVF_RESET_ALL_QUEUE_DONE 1U
|
||||||
@@ -2206,12 +2218,13 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
|
|||||||
tc_valid, tc_size);
|
tc_valid, tc_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
|
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev,
|
||||||
|
bool rxvtag_strip_en)
|
||||||
{
|
{
|
||||||
struct hnae3_handle *nic = &hdev->nic;
|
struct hnae3_handle *nic = &hdev->nic;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = hclgevf_en_hw_strip_rxvtag(nic, true);
|
ret = hclgevf_en_hw_strip_rxvtag(nic, rxvtag_strip_en);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(&hdev->pdev->dev,
|
dev_err(&hdev->pdev->dev,
|
||||||
"failed to enable rx vlan offload, ret = %d\n", ret);
|
"failed to enable rx vlan offload, ret = %d\n", ret);
|
||||||
@@ -2881,7 +2894,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = hclgevf_init_vlan_config(hdev);
|
ret = hclgevf_init_vlan_config(hdev, hdev->rxvtag_strip_en);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(&hdev->pdev->dev,
|
dev_err(&hdev->pdev->dev,
|
||||||
"failed(%d) to initialize VLAN config\n", ret);
|
"failed(%d) to initialize VLAN config\n", ret);
|
||||||
@@ -2996,7 +3009,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
|
|||||||
goto err_config;
|
goto err_config;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = hclgevf_init_vlan_config(hdev);
|
ret = hclgevf_init_vlan_config(hdev, true);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(&hdev->pdev->dev,
|
dev_err(&hdev->pdev->dev,
|
||||||
"failed(%d) to initialize VLAN config\n", ret);
|
"failed(%d) to initialize VLAN config\n", ret);
|
||||||
|
|||||||
@@ -253,6 +253,7 @@ struct hclgevf_dev {
|
|||||||
int *vector_irq;
|
int *vector_irq;
|
||||||
|
|
||||||
bool gro_en;
|
bool gro_en;
|
||||||
|
bool rxvtag_strip_en;
|
||||||
|
|
||||||
unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
|
unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
|
||||||
|
|
||||||
|
|||||||
@@ -2091,6 +2091,11 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
|
|||||||
pf = vf->pf;
|
pf = vf->pf;
|
||||||
dev = ice_pf_to_dev(pf);
|
dev = ice_pf_to_dev(pf);
|
||||||
vf_vsi = ice_get_vf_vsi(vf);
|
vf_vsi = ice_get_vf_vsi(vf);
|
||||||
|
if (!vf_vsi) {
|
||||||
|
dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id);
|
||||||
|
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||||
|
goto err_exit;
|
||||||
|
}
|
||||||
|
|
||||||
#define ICE_VF_MAX_FDIR_FILTERS 128
|
#define ICE_VF_MAX_FDIR_FILTERS 128
|
||||||
if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
|
if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
|
||||||
|
|||||||
@@ -629,13 +629,13 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
|
|||||||
VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |\
|
VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |\
|
||||||
VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6)
|
VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6)
|
||||||
|
|
||||||
#define IDPF_CAP_RX_CSUM_L4V4 (\
|
#define IDPF_CAP_TX_CSUM_L4V4 (\
|
||||||
VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |\
|
VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP |\
|
||||||
VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP)
|
VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP)
|
||||||
|
|
||||||
#define IDPF_CAP_RX_CSUM_L4V6 (\
|
#define IDPF_CAP_TX_CSUM_L4V6 (\
|
||||||
VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\
|
VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP |\
|
||||||
VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP)
|
VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP)
|
||||||
|
|
||||||
#define IDPF_CAP_RX_CSUM (\
|
#define IDPF_CAP_RX_CSUM (\
|
||||||
VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 |\
|
VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 |\
|
||||||
@@ -644,11 +644,9 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
|
|||||||
VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\
|
VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\
|
||||||
VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP)
|
VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP)
|
||||||
|
|
||||||
#define IDPF_CAP_SCTP_CSUM (\
|
#define IDPF_CAP_TX_SCTP_CSUM (\
|
||||||
VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP |\
|
VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP |\
|
||||||
VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP |\
|
VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP)
|
||||||
VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP |\
|
|
||||||
VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP)
|
|
||||||
|
|
||||||
#define IDPF_CAP_TUNNEL_TX_CSUM (\
|
#define IDPF_CAP_TUNNEL_TX_CSUM (\
|
||||||
VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |\
|
VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |\
|
||||||
|
|||||||
@@ -703,8 +703,10 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
|
|||||||
{
|
{
|
||||||
struct idpf_adapter *adapter = vport->adapter;
|
struct idpf_adapter *adapter = vport->adapter;
|
||||||
struct idpf_vport_config *vport_config;
|
struct idpf_vport_config *vport_config;
|
||||||
|
netdev_features_t other_offloads = 0;
|
||||||
|
netdev_features_t csum_offloads = 0;
|
||||||
|
netdev_features_t tso_offloads = 0;
|
||||||
netdev_features_t dflt_features;
|
netdev_features_t dflt_features;
|
||||||
netdev_features_t offloads = 0;
|
|
||||||
struct idpf_netdev_priv *np;
|
struct idpf_netdev_priv *np;
|
||||||
struct net_device *netdev;
|
struct net_device *netdev;
|
||||||
u16 idx = vport->idx;
|
u16 idx = vport->idx;
|
||||||
@@ -766,53 +768,32 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
|
|||||||
|
|
||||||
if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
|
if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
|
||||||
dflt_features |= NETIF_F_RXHASH;
|
dflt_features |= NETIF_F_RXHASH;
|
||||||
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V4))
|
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V4))
|
||||||
dflt_features |= NETIF_F_IP_CSUM;
|
csum_offloads |= NETIF_F_IP_CSUM;
|
||||||
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V6))
|
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V6))
|
||||||
dflt_features |= NETIF_F_IPV6_CSUM;
|
csum_offloads |= NETIF_F_IPV6_CSUM;
|
||||||
if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM))
|
if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM))
|
||||||
dflt_features |= NETIF_F_RXCSUM;
|
csum_offloads |= NETIF_F_RXCSUM;
|
||||||
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_SCTP_CSUM))
|
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_SCTP_CSUM))
|
||||||
dflt_features |= NETIF_F_SCTP_CRC;
|
csum_offloads |= NETIF_F_SCTP_CRC;
|
||||||
|
|
||||||
if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP))
|
if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP))
|
||||||
dflt_features |= NETIF_F_TSO;
|
tso_offloads |= NETIF_F_TSO;
|
||||||
if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP))
|
if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP))
|
||||||
dflt_features |= NETIF_F_TSO6;
|
tso_offloads |= NETIF_F_TSO6;
|
||||||
if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS,
|
if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS,
|
||||||
VIRTCHNL2_CAP_SEG_IPV4_UDP |
|
VIRTCHNL2_CAP_SEG_IPV4_UDP |
|
||||||
VIRTCHNL2_CAP_SEG_IPV6_UDP))
|
VIRTCHNL2_CAP_SEG_IPV6_UDP))
|
||||||
dflt_features |= NETIF_F_GSO_UDP_L4;
|
tso_offloads |= NETIF_F_GSO_UDP_L4;
|
||||||
if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC))
|
if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC))
|
||||||
offloads |= NETIF_F_GRO_HW;
|
other_offloads |= NETIF_F_GRO_HW;
|
||||||
/* advertise to stack only if offloads for encapsulated packets is
|
|
||||||
* supported
|
|
||||||
*/
|
|
||||||
if (idpf_is_cap_ena(vport->adapter, IDPF_SEG_CAPS,
|
|
||||||
VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL)) {
|
|
||||||
offloads |= NETIF_F_GSO_UDP_TUNNEL |
|
|
||||||
NETIF_F_GSO_GRE |
|
|
||||||
NETIF_F_GSO_GRE_CSUM |
|
|
||||||
NETIF_F_GSO_PARTIAL |
|
|
||||||
NETIF_F_GSO_UDP_TUNNEL_CSUM |
|
|
||||||
NETIF_F_GSO_IPXIP4 |
|
|
||||||
NETIF_F_GSO_IPXIP6 |
|
|
||||||
0;
|
|
||||||
|
|
||||||
if (!idpf_is_cap_ena_all(vport->adapter, IDPF_CSUM_CAPS,
|
|
||||||
IDPF_CAP_TUNNEL_TX_CSUM))
|
|
||||||
netdev->gso_partial_features |=
|
|
||||||
NETIF_F_GSO_UDP_TUNNEL_CSUM;
|
|
||||||
|
|
||||||
netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
|
|
||||||
offloads |= NETIF_F_TSO_MANGLEID;
|
|
||||||
}
|
|
||||||
if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK))
|
if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK))
|
||||||
offloads |= NETIF_F_LOOPBACK;
|
other_offloads |= NETIF_F_LOOPBACK;
|
||||||
|
|
||||||
netdev->features |= dflt_features;
|
netdev->features |= dflt_features | csum_offloads | tso_offloads;
|
||||||
netdev->hw_features |= dflt_features | offloads;
|
netdev->hw_features |= netdev->features | other_offloads;
|
||||||
netdev->hw_enc_features |= dflt_features | offloads;
|
netdev->vlan_features |= netdev->features | other_offloads;
|
||||||
|
netdev->hw_enc_features |= dflt_features | other_offloads;
|
||||||
idpf_set_ethtool_ops(netdev);
|
idpf_set_ethtool_ops(netdev);
|
||||||
SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
|
SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
|
||||||
|
|
||||||
@@ -1127,11 +1108,9 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
|
|||||||
|
|
||||||
num_max_q = max(max_q->max_txq, max_q->max_rxq);
|
num_max_q = max(max_q->max_txq, max_q->max_rxq);
|
||||||
vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
|
vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
|
||||||
if (!vport->q_vector_idxs) {
|
if (!vport->q_vector_idxs)
|
||||||
kfree(vport);
|
goto free_vport;
|
||||||
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
idpf_vport_init(vport, max_q);
|
idpf_vport_init(vport, max_q);
|
||||||
|
|
||||||
/* This alloc is done separate from the LUT because it's not strictly
|
/* This alloc is done separate from the LUT because it's not strictly
|
||||||
@@ -1141,11 +1120,9 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
|
|||||||
*/
|
*/
|
||||||
rss_data = &adapter->vport_config[idx]->user_config.rss_data;
|
rss_data = &adapter->vport_config[idx]->user_config.rss_data;
|
||||||
rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
|
rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
|
||||||
if (!rss_data->rss_key) {
|
if (!rss_data->rss_key)
|
||||||
kfree(vport);
|
goto free_vector_idxs;
|
||||||
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
/* Initialize default rss key */
|
/* Initialize default rss key */
|
||||||
netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
|
netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
|
||||||
|
|
||||||
@@ -1158,6 +1135,13 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
|
|||||||
adapter->next_vport = idpf_get_free_slot(adapter);
|
adapter->next_vport = idpf_get_free_slot(adapter);
|
||||||
|
|
||||||
return vport;
|
return vport;
|
||||||
|
|
||||||
|
free_vector_idxs:
|
||||||
|
kfree(vport->q_vector_idxs);
|
||||||
|
free_vport:
|
||||||
|
kfree(vport);
|
||||||
|
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -89,6 +89,7 @@ static void idpf_shutdown(struct pci_dev *pdev)
|
|||||||
{
|
{
|
||||||
struct idpf_adapter *adapter = pci_get_drvdata(pdev);
|
struct idpf_adapter *adapter = pci_get_drvdata(pdev);
|
||||||
|
|
||||||
|
cancel_delayed_work_sync(&adapter->serv_task);
|
||||||
cancel_delayed_work_sync(&adapter->vc_event_task);
|
cancel_delayed_work_sync(&adapter->vc_event_task);
|
||||||
idpf_vc_core_deinit(adapter);
|
idpf_vc_core_deinit(adapter);
|
||||||
idpf_deinit_dflt_mbx(adapter);
|
idpf_deinit_dflt_mbx(adapter);
|
||||||
|
|||||||
@@ -1290,6 +1290,8 @@ void igc_ptp_reset(struct igc_adapter *adapter)
|
|||||||
/* reset the tstamp_config */
|
/* reset the tstamp_config */
|
||||||
igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
|
igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
|
||||||
|
|
||||||
|
mutex_lock(&adapter->ptm_lock);
|
||||||
|
|
||||||
spin_lock_irqsave(&adapter->tmreg_lock, flags);
|
spin_lock_irqsave(&adapter->tmreg_lock, flags);
|
||||||
|
|
||||||
switch (adapter->hw.mac.type) {
|
switch (adapter->hw.mac.type) {
|
||||||
@@ -1308,7 +1310,6 @@ void igc_ptp_reset(struct igc_adapter *adapter)
|
|||||||
if (!igc_is_crosststamp_supported(adapter))
|
if (!igc_is_crosststamp_supported(adapter))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
mutex_lock(&adapter->ptm_lock);
|
|
||||||
wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT);
|
wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT);
|
||||||
wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT);
|
wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT);
|
||||||
|
|
||||||
@@ -1332,7 +1333,6 @@ void igc_ptp_reset(struct igc_adapter *adapter)
|
|||||||
netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n");
|
netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n");
|
||||||
|
|
||||||
igc_ptm_reset(hw);
|
igc_ptm_reset(hw);
|
||||||
mutex_unlock(&adapter->ptm_lock);
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
/* No work to do. */
|
/* No work to do. */
|
||||||
@@ -1349,5 +1349,7 @@ void igc_ptp_reset(struct igc_adapter *adapter)
|
|||||||
out:
|
out:
|
||||||
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
|
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
|
||||||
|
|
||||||
|
mutex_unlock(&adapter->ptm_lock);
|
||||||
|
|
||||||
wrfl();
|
wrfl();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1184,7 +1184,7 @@ static void octep_hb_timeout_task(struct work_struct *work)
|
|||||||
miss_cnt);
|
miss_cnt);
|
||||||
rtnl_lock();
|
rtnl_lock();
|
||||||
if (netif_running(oct->netdev))
|
if (netif_running(oct->netdev))
|
||||||
octep_stop(oct->netdev);
|
dev_close(oct->netdev);
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -835,7 +835,9 @@ static void octep_vf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
|
|||||||
struct octep_vf_device *oct = netdev_priv(netdev);
|
struct octep_vf_device *oct = netdev_priv(netdev);
|
||||||
|
|
||||||
netdev_hold(netdev, NULL, GFP_ATOMIC);
|
netdev_hold(netdev, NULL, GFP_ATOMIC);
|
||||||
schedule_work(&oct->tx_timeout_task);
|
if (!schedule_work(&oct->tx_timeout_task))
|
||||||
|
netdev_put(netdev, NULL);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int octep_vf_set_mac(struct net_device *netdev, void *p)
|
static int octep_vf_set_mac(struct net_device *netdev, void *p)
|
||||||
|
|||||||
@@ -269,12 +269,8 @@ static const char * const mtk_clks_source_name[] = {
|
|||||||
"ethwarp_wocpu2",
|
"ethwarp_wocpu2",
|
||||||
"ethwarp_wocpu1",
|
"ethwarp_wocpu1",
|
||||||
"ethwarp_wocpu0",
|
"ethwarp_wocpu0",
|
||||||
"top_usxgmii0_sel",
|
|
||||||
"top_usxgmii1_sel",
|
|
||||||
"top_sgm0_sel",
|
"top_sgm0_sel",
|
||||||
"top_sgm1_sel",
|
"top_sgm1_sel",
|
||||||
"top_xfi_phy0_xtal_sel",
|
|
||||||
"top_xfi_phy1_xtal_sel",
|
|
||||||
"top_eth_gmii_sel",
|
"top_eth_gmii_sel",
|
||||||
"top_eth_refck_50m_sel",
|
"top_eth_refck_50m_sel",
|
||||||
"top_eth_sys_200m_sel",
|
"top_eth_sys_200m_sel",
|
||||||
@@ -2206,14 +2202,18 @@ skip_rx:
|
|||||||
ring->data[idx] = new_data;
|
ring->data[idx] = new_data;
|
||||||
rxd->rxd1 = (unsigned int)dma_addr;
|
rxd->rxd1 = (unsigned int)dma_addr;
|
||||||
release_desc:
|
release_desc:
|
||||||
|
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
|
||||||
|
if (unlikely(dma_addr == DMA_MAPPING_ERROR))
|
||||||
|
addr64 = FIELD_GET(RX_DMA_ADDR64_MASK,
|
||||||
|
rxd->rxd2);
|
||||||
|
else
|
||||||
|
addr64 = RX_DMA_PREP_ADDR64(dma_addr);
|
||||||
|
}
|
||||||
|
|
||||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
|
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
|
||||||
rxd->rxd2 = RX_DMA_LSO;
|
rxd->rxd2 = RX_DMA_LSO;
|
||||||
else
|
else
|
||||||
rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
|
rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size) | addr64;
|
||||||
|
|
||||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
|
|
||||||
likely(dma_addr != DMA_MAPPING_ERROR))
|
|
||||||
rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
|
|
||||||
|
|
||||||
ring->calc_idx = idx;
|
ring->calc_idx = idx;
|
||||||
done++;
|
done++;
|
||||||
|
|||||||
@@ -1163,6 +1163,7 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
|
|||||||
struct net_device *ndev = priv->ndev;
|
struct net_device *ndev = priv->ndev;
|
||||||
unsigned int head = ring->head;
|
unsigned int head = ring->head;
|
||||||
unsigned int entry = ring->tail;
|
unsigned int entry = ring->tail;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
|
while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
|
||||||
ret = mtk_star_tx_complete_one(priv);
|
ret = mtk_star_tx_complete_one(priv);
|
||||||
@@ -1182,9 +1183,9 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
|
|||||||
netif_wake_queue(ndev);
|
netif_wake_queue(ndev);
|
||||||
|
|
||||||
if (napi_complete(napi)) {
|
if (napi_complete(napi)) {
|
||||||
spin_lock(&priv->lock);
|
spin_lock_irqsave(&priv->lock, flags);
|
||||||
mtk_star_enable_dma_irq(priv, false, true);
|
mtk_star_enable_dma_irq(priv, false, true);
|
||||||
spin_unlock(&priv->lock);
|
spin_unlock_irqrestore(&priv->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -1341,16 +1342,16 @@ push_new_skb:
|
|||||||
static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
|
static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
|
||||||
{
|
{
|
||||||
struct mtk_star_priv *priv;
|
struct mtk_star_priv *priv;
|
||||||
|
unsigned long flags;
|
||||||
int work_done = 0;
|
int work_done = 0;
|
||||||
|
|
||||||
priv = container_of(napi, struct mtk_star_priv, rx_napi);
|
priv = container_of(napi, struct mtk_star_priv, rx_napi);
|
||||||
|
|
||||||
work_done = mtk_star_rx(priv, budget);
|
work_done = mtk_star_rx(priv, budget);
|
||||||
if (work_done < budget) {
|
if (work_done < budget && napi_complete_done(napi, work_done)) {
|
||||||
napi_complete_done(napi, work_done);
|
spin_lock_irqsave(&priv->lock, flags);
|
||||||
spin_lock(&priv->lock);
|
|
||||||
mtk_star_enable_dma_irq(priv, true, false);
|
mtk_star_enable_dma_irq(priv, true, false);
|
||||||
spin_unlock(&priv->lock);
|
spin_unlock_irqrestore(&priv->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
return work_done;
|
return work_done;
|
||||||
|
|||||||
@@ -177,6 +177,7 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
|
|||||||
|
|
||||||
priv = ptpsq->txqsq.priv;
|
priv = ptpsq->txqsq.priv;
|
||||||
|
|
||||||
|
rtnl_lock();
|
||||||
mutex_lock(&priv->state_lock);
|
mutex_lock(&priv->state_lock);
|
||||||
chs = &priv->channels;
|
chs = &priv->channels;
|
||||||
netdev = priv->netdev;
|
netdev = priv->netdev;
|
||||||
@@ -184,22 +185,19 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
|
|||||||
carrier_ok = netif_carrier_ok(netdev);
|
carrier_ok = netif_carrier_ok(netdev);
|
||||||
netif_carrier_off(netdev);
|
netif_carrier_off(netdev);
|
||||||
|
|
||||||
rtnl_lock();
|
|
||||||
mlx5e_deactivate_priv_channels(priv);
|
mlx5e_deactivate_priv_channels(priv);
|
||||||
rtnl_unlock();
|
|
||||||
|
|
||||||
mlx5e_ptp_close(chs->ptp);
|
mlx5e_ptp_close(chs->ptp);
|
||||||
err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
|
err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
|
||||||
|
|
||||||
rtnl_lock();
|
|
||||||
mlx5e_activate_priv_channels(priv);
|
mlx5e_activate_priv_channels(priv);
|
||||||
rtnl_unlock();
|
|
||||||
|
|
||||||
/* return carrier back if needed */
|
/* return carrier back if needed */
|
||||||
if (carrier_ok)
|
if (carrier_ok)
|
||||||
netif_carrier_on(netdev);
|
netif_carrier_on(netdev);
|
||||||
|
|
||||||
mutex_unlock(&priv->state_lock);
|
mutex_unlock(&priv->state_lock);
|
||||||
|
rtnl_unlock();
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -165,9 +165,6 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
|
|||||||
struct flow_match_enc_keyid enc_keyid;
|
struct flow_match_enc_keyid enc_keyid;
|
||||||
void *misc_c, *misc_v;
|
void *misc_c, *misc_v;
|
||||||
|
|
||||||
misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
|
|
||||||
misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
|
|
||||||
|
|
||||||
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
|
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@@ -182,6 +179,30 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
|
|||||||
err = mlx5e_tc_tun_parse_vxlan_gbp_option(priv, spec, f);
|
err = mlx5e_tc_tun_parse_vxlan_gbp_option(priv, spec, f);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
/* We can't mix custom tunnel headers with symbolic ones and we
|
||||||
|
* don't have a symbolic field name for GBP, so we use custom
|
||||||
|
* tunnel headers in this case. We need hardware support to
|
||||||
|
* match on custom tunnel headers, but we already know it's
|
||||||
|
* supported because the previous call successfully checked for
|
||||||
|
* that.
|
||||||
|
*/
|
||||||
|
misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
|
||||||
|
misc_parameters_5);
|
||||||
|
misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
|
||||||
|
misc_parameters_5);
|
||||||
|
|
||||||
|
/* Shift by 8 to account for the reserved bits in the vxlan
|
||||||
|
* header after the VNI.
|
||||||
|
*/
|
||||||
|
MLX5_SET(fte_match_set_misc5, misc_c, tunnel_header_1,
|
||||||
|
be32_to_cpu(enc_keyid.mask->keyid) << 8);
|
||||||
|
MLX5_SET(fte_match_set_misc5, misc_v, tunnel_header_1,
|
||||||
|
be32_to_cpu(enc_keyid.key->keyid) << 8);
|
||||||
|
|
||||||
|
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* match on VNI is required */
|
/* match on VNI is required */
|
||||||
@@ -195,6 +216,11 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
|
|||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
|
||||||
|
misc_parameters);
|
||||||
|
misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
|
||||||
|
misc_parameters);
|
||||||
|
|
||||||
MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
|
MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
|
||||||
be32_to_cpu(enc_keyid.mask->keyid));
|
be32_to_cpu(enc_keyid.mask->keyid));
|
||||||
MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
|
MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
|
||||||
|
|||||||
@@ -1750,9 +1750,6 @@ extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr
|
|||||||
!list_is_first(&attr->list, &flow->attrs))
|
!list_is_first(&attr->list, &flow->attrs))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (flow_flag_test(flow, SLOW))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
esw_attr = attr->esw_attr;
|
esw_attr = attr->esw_attr;
|
||||||
if (!esw_attr->split_count ||
|
if (!esw_attr->split_count ||
|
||||||
esw_attr->split_count == esw_attr->out_count - 1)
|
esw_attr->split_count == esw_attr->out_count - 1)
|
||||||
@@ -1766,7 +1763,7 @@ extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr
|
|||||||
for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
|
for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
|
||||||
/* external dest with encap is considered as internal by firmware */
|
/* external dest with encap is considered as internal by firmware */
|
||||||
if (esw_attr->dests[i].vport == MLX5_VPORT_UPLINK &&
|
if (esw_attr->dests[i].vport == MLX5_VPORT_UPLINK &&
|
||||||
!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID))
|
!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
|
||||||
ext_dest = true;
|
ext_dest = true;
|
||||||
else
|
else
|
||||||
int_dest = true;
|
int_dest = true;
|
||||||
|
|||||||
@@ -3514,7 +3514,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
|
|||||||
int err;
|
int err;
|
||||||
|
|
||||||
mutex_init(&esw->offloads.termtbl_mutex);
|
mutex_init(&esw->offloads.termtbl_mutex);
|
||||||
mlx5_rdma_enable_roce(esw->dev);
|
err = mlx5_rdma_enable_roce(esw->dev);
|
||||||
|
if (err)
|
||||||
|
goto err_roce;
|
||||||
|
|
||||||
err = mlx5_esw_host_number_init(esw);
|
err = mlx5_esw_host_number_init(esw);
|
||||||
if (err)
|
if (err)
|
||||||
@@ -3575,6 +3577,7 @@ err_vport_metadata:
|
|||||||
esw_offloads_metadata_uninit(esw);
|
esw_offloads_metadata_uninit(esw);
|
||||||
err_metadata:
|
err_metadata:
|
||||||
mlx5_rdma_disable_roce(esw->dev);
|
mlx5_rdma_disable_roce(esw->dev);
|
||||||
|
err_roce:
|
||||||
mutex_destroy(&esw->offloads.termtbl_mutex);
|
mutex_destroy(&esw->offloads.termtbl_mutex);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -118,8 +118,8 @@ static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *
|
|||||||
|
|
||||||
static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
|
static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
|
||||||
{
|
{
|
||||||
|
u8 mac[ETH_ALEN] = {};
|
||||||
union ib_gid gid;
|
union ib_gid gid;
|
||||||
u8 mac[ETH_ALEN];
|
|
||||||
|
|
||||||
mlx5_rdma_make_default_gid(dev, &gid);
|
mlx5_rdma_make_default_gid(dev, &gid);
|
||||||
return mlx5_core_roce_gid_set(dev, 0,
|
return mlx5_core_roce_gid_set(dev, 0,
|
||||||
@@ -140,17 +140,17 @@ void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev)
|
|||||||
mlx5_nic_vport_disable_roce(dev);
|
mlx5_nic_vport_disable_roce(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
|
int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!MLX5_CAP_GEN(dev, roce))
|
if (!MLX5_CAP_GEN(dev, roce))
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
err = mlx5_nic_vport_enable_roce(dev);
|
err = mlx5_nic_vport_enable_roce(dev);
|
||||||
if (err) {
|
if (err) {
|
||||||
mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
|
mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
|
||||||
return;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = mlx5_rdma_add_roce_addr(dev);
|
err = mlx5_rdma_add_roce_addr(dev);
|
||||||
@@ -165,10 +165,11 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
|
|||||||
goto del_roce_addr;
|
goto del_roce_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
return;
|
return err;
|
||||||
|
|
||||||
del_roce_addr:
|
del_roce_addr:
|
||||||
mlx5_rdma_del_roce_addr(dev);
|
mlx5_rdma_del_roce_addr(dev);
|
||||||
disable_roce:
|
disable_roce:
|
||||||
mlx5_nic_vport_disable_roce(dev);
|
mlx5_nic_vport_disable_roce(dev);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,12 +8,12 @@
|
|||||||
|
|
||||||
#ifdef CONFIG_MLX5_ESWITCH
|
#ifdef CONFIG_MLX5_ESWITCH
|
||||||
|
|
||||||
void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
|
int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
|
||||||
void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev);
|
void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev);
|
||||||
|
|
||||||
#else /* CONFIG_MLX5_ESWITCH */
|
#else /* CONFIG_MLX5_ESWITCH */
|
||||||
|
|
||||||
static inline void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) {}
|
static inline int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) { return 0; }
|
||||||
static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {}
|
static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {}
|
||||||
|
|
||||||
#endif /* CONFIG_MLX5_ESWITCH */
|
#endif /* CONFIG_MLX5_ESWITCH */
|
||||||
|
|||||||
@@ -1815,6 +1815,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
|
|||||||
if (nr_frags <= 0) {
|
if (nr_frags <= 0) {
|
||||||
tx->frame_data0 |= TX_DESC_DATA0_LS_;
|
tx->frame_data0 |= TX_DESC_DATA0_LS_;
|
||||||
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
|
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
|
||||||
|
tx->frame_last = tx->frame_first;
|
||||||
}
|
}
|
||||||
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
|
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
|
||||||
tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
|
tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
|
||||||
@@ -1884,6 +1885,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
|
|||||||
tx->frame_first = 0;
|
tx->frame_first = 0;
|
||||||
tx->frame_data0 = 0;
|
tx->frame_data0 = 0;
|
||||||
tx->frame_tail = 0;
|
tx->frame_tail = 0;
|
||||||
|
tx->frame_last = 0;
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1924,16 +1926,18 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
|
|||||||
TX_DESC_DATA0_DTYPE_DATA_) {
|
TX_DESC_DATA0_DTYPE_DATA_) {
|
||||||
tx->frame_data0 |= TX_DESC_DATA0_LS_;
|
tx->frame_data0 |= TX_DESC_DATA0_LS_;
|
||||||
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
|
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
|
||||||
|
tx->frame_last = tx->frame_tail;
|
||||||
}
|
}
|
||||||
|
|
||||||
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
|
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_last];
|
||||||
buffer_info = &tx->buffer_info[tx->frame_tail];
|
buffer_info = &tx->buffer_info[tx->frame_last];
|
||||||
buffer_info->skb = skb;
|
buffer_info->skb = skb;
|
||||||
if (time_stamp)
|
if (time_stamp)
|
||||||
buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
|
buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
|
||||||
if (ignore_sync)
|
if (ignore_sync)
|
||||||
buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
|
buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
|
||||||
|
|
||||||
|
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
|
||||||
tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
|
tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
|
||||||
tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
|
tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
|
||||||
tx->last_tail = tx->frame_tail;
|
tx->last_tail = tx->frame_tail;
|
||||||
|
|||||||
@@ -980,6 +980,7 @@ struct lan743x_tx {
|
|||||||
u32 frame_first;
|
u32 frame_first;
|
||||||
u32 frame_data0;
|
u32 frame_data0;
|
||||||
u32 frame_tail;
|
u32 frame_tail;
|
||||||
|
u32 frame_last;
|
||||||
|
|
||||||
struct lan743x_tx_buffer_info *buffer_info;
|
struct lan743x_tx_buffer_info *buffer_info;
|
||||||
|
|
||||||
|
|||||||
@@ -830,6 +830,7 @@ EXPORT_SYMBOL(ocelot_vlan_prepare);
|
|||||||
int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
|
int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
|
||||||
bool untagged)
|
bool untagged)
|
||||||
{
|
{
|
||||||
|
struct ocelot_port *ocelot_port = ocelot->ports[port];
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
/* Ignore VID 0 added to our RX filter by the 8021q module, since
|
/* Ignore VID 0 added to our RX filter by the 8021q module, since
|
||||||
@@ -849,6 +850,11 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
|
|||||||
ocelot_bridge_vlan_find(ocelot, vid));
|
ocelot_bridge_vlan_find(ocelot, vid));
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
} else if (ocelot_port->pvid_vlan &&
|
||||||
|
ocelot_bridge_vlan_find(ocelot, vid) == ocelot_port->pvid_vlan) {
|
||||||
|
err = ocelot_port_set_pvid(ocelot, port, NULL);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Untagged egress vlan clasification */
|
/* Untagged egress vlan clasification */
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user