Merge remote-tracking branch 'stable/linux-6.6.y' into rpi-6.6.y

This commit is contained in:
Dom Cobley
2025-01-10 11:47:21 +00:00
257 changed files with 3086 additions and 1789 deletions

View File

@@ -15,7 +15,7 @@ Please notice, however, that, if:
you should use the main media development tree ``master`` branch:
https://git.linuxtv.org/media_tree.git/
https://git.linuxtv.org/media.git/
In this case, you may find some useful information at the
`LinuxTv wiki pages <https://linuxtv.org/wiki>`_:

View File

@@ -67,7 +67,7 @@ Changes / Fixes
Please mail to linux-media AT vger.kernel.org unified diffs against
the linux media git tree:
https://git.linuxtv.org/media_tree.git/
https://git.linuxtv.org/media.git/
This is done by committing a patch at a clone of the git tree and
submitting the patch using ``git send-email``. Don't forget to

View File

@@ -244,8 +244,9 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| Hisilicon | Hip08 SMMU PMCG | #162001800 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
| Hisilicon | Hip08 SMMU PMCG | #162001900 | N/A |
| | Hip09 SMMU PMCG | | |
| Hisilicon | Hip{08,09,09A,10| #162001900 | N/A |
| | ,10C,11} | | |
| | SMMU PMCG | | |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
| Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |

View File

@@ -87,7 +87,7 @@ properties:
adi,dsi-lanes:
description: Number of DSI data lanes connected to the DSI host.
$ref: /schemas/types.yaml#/definitions/uint32
enum: [ 1, 2, 3, 4 ]
enum: [ 2, 3, 4 ]
ports:
description:

View File

@@ -48,6 +48,8 @@ Supported adapters:
* Intel Raptor Lake (PCH)
* Intel Meteor Lake (SOC and PCH)
* Intel Birch Stream (SOC)
* Intel Arrow Lake (SOC)
* Intel Panther Lake (SOC)
Datasheets: Publicly available at the Intel website

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 69
SUBLEVEL = 70
EXTRAVERSION =
NAME = Pinguïn Aangedreven

View File

@@ -6,7 +6,7 @@
KBUILD_DEFCONFIG := haps_hs_smp_defconfig
ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE := $(call cc-cross-prefix, arc-linux- arceb-linux-)
CROSS_COMPILE := $(call cc-cross-prefix, arc-linux- arceb-linux- arc-linux-gnu-)
endif
cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__

View File

@@ -226,32 +226,6 @@ static void __init node_mem_init(unsigned int node)
#ifdef CONFIG_ACPI_NUMA
/*
* Sanity check to catch more bad NUMA configurations (they are amazingly
* common). Make sure the nodes cover all memory.
*/
static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
{
int i;
u64 numaram, biosram;
numaram = 0;
for (i = 0; i < mi->nr_blks; i++) {
u64 s = mi->blk[i].start >> PAGE_SHIFT;
u64 e = mi->blk[i].end >> PAGE_SHIFT;
numaram += e - s;
numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
if ((s64)numaram < 0)
numaram = 0;
}
max_pfn = max_low_pfn;
biosram = max_pfn - absent_pages_in_range(0, max_pfn);
BUG_ON((s64)(biosram - numaram) >= (1 << (20 - PAGE_SHIFT)));
return true;
}
static void __init add_node_intersection(u32 node, u64 start, u64 size, u32 type)
{
static unsigned long num_physpages;
@@ -396,7 +370,7 @@ int __init init_numa_memory(void)
return -EINVAL;
init_node_memblock();
if (numa_meminfo_cover_memory(&numa_meminfo) == false)
if (!memblock_validate_numa_coverage(SZ_1M))
return -EINVAL;
for_each_node_mask(node, node_possible_map) {

View File

@@ -601,7 +601,6 @@ struct seq_buf ppc_hw_desc __initdata = {
.buffer = ppc_hw_desc_buf,
.size = sizeof(ppc_hw_desc_buf),
.len = 0,
.readpos = 0,
};
static __init void probe_machine(void)

View File

@@ -76,7 +76,7 @@ static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
if (!show_unhandled_signals)
return;
printk_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
printk_ratelimited("%s%s[%d] %s ip:%lx cs:%x sp:%lx ax:%lx si:%lx di:%lx\n",
level, current->comm, task_pid_nr(current),
message, regs->ip, regs->cs,
regs->sp, regs->ax, regs->si, regs->di);

View File

@@ -56,18 +56,64 @@ struct pt_regs {
#else /* __i386__ */
struct fred_cs {
/* CS selector */
u64 cs : 16,
/* Stack level at event time */
sl : 2,
/* IBT in WAIT_FOR_ENDBRANCH state */
wfe : 1,
: 45;
};
struct fred_ss {
/* SS selector */
u64 ss : 16,
/* STI state */
sti : 1,
/* Set if syscall, sysenter or INT n */
swevent : 1,
/* Event is NMI type */
nmi : 1,
: 13,
/* Event vector */
vector : 8,
: 8,
/* Event type */
type : 4,
: 4,
/* Event was incident to enclave execution */
enclave : 1,
/* CPU was in long mode */
lm : 1,
/*
* Nested exception during FRED delivery, not set
* for #DF.
*/
nested : 1,
: 1,
/*
* The length of the instruction causing the event.
* Only set for INTO, INT1, INT3, INT n, SYSCALL
* and SYSENTER. 0 otherwise.
*/
insnlen : 4;
};
struct pt_regs {
/*
* C ABI says these regs are callee-preserved. They aren't saved on kernel entry
* unless syscall needs a complete, fully filled "struct pt_regs".
*/
/*
* C ABI says these regs are callee-preserved. They aren't saved on
* kernel entry unless syscall needs a complete, fully filled
* "struct pt_regs".
*/
unsigned long r15;
unsigned long r14;
unsigned long r13;
unsigned long r12;
unsigned long bp;
unsigned long bx;
/* These regs are callee-clobbered. Always saved on kernel entry. */
/* These regs are callee-clobbered. Always saved on kernel entry. */
unsigned long r11;
unsigned long r10;
unsigned long r9;
@@ -77,18 +123,50 @@ struct pt_regs {
unsigned long dx;
unsigned long si;
unsigned long di;
/*
* On syscall entry, this is syscall#. On CPU exception, this is error code.
* On hw interrupt, it's IRQ number:
*/
/*
* orig_ax is used on entry for:
* - the syscall number (syscall, sysenter, int80)
* - error_code stored by the CPU on traps and exceptions
* - the interrupt number for device interrupts
*
* A FRED stack frame starts here:
* 1) It _always_ includes an error code;
*
* 2) The return frame for ERET[US] starts here, but
* the content of orig_ax is ignored.
*/
unsigned long orig_ax;
/* Return frame for iretq */
/* The IRETQ return frame starts here */
unsigned long ip;
unsigned long cs;
union {
/* CS selector */
u16 cs;
/* The extended 64-bit data slot containing CS */
u64 csx;
/* The FRED CS extension */
struct fred_cs fred_cs;
};
unsigned long flags;
unsigned long sp;
unsigned long ss;
/* top of stack page */
union {
/* SS selector */
u16 ss;
/* The extended 64-bit data slot containing SS */
u64 ssx;
/* The FRED SS extension */
struct fred_ss fred_ss;
};
/*
* Top of stack on IDT systems, while FRED systems have extra fields
* defined above for storing exception related information, e.g. CR2 or
* DR6.
*/
};
#endif /* !__i386__ */

View File

@@ -34,4 +34,8 @@ static inline void __tlb_remove_table(void *table)
free_page_and_swap_cache(table);
}
static inline void invlpg(unsigned long addr)
{
asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
}
#endif /* _ASM_X86_TLB_H */

View File

@@ -99,9 +99,9 @@ obj-$(CONFIG_TRACING) += trace.o
obj-$(CONFIG_RETHOOK) += rethook.o
obj-$(CONFIG_CRASH_CORE) += crash_core_$(BITS).o
obj-$(CONFIG_KEXEC_CORE) += machine_kexec_$(BITS).o
obj-$(CONFIG_KEXEC_CORE) += relocate_kernel_$(BITS).o crash.o
obj-$(CONFIG_KEXEC_CORE) += relocate_kernel_$(BITS).o
obj-$(CONFIG_KEXEC_FILE) += kexec-bzimage64.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o crash.o
obj-y += kprobes/
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_X86_32) += doublefault_32.o

View File

@@ -81,6 +81,34 @@ static void do_user_cp_fault(struct pt_regs *regs, unsigned long error_code)
static __ro_after_init bool ibt_fatal = true;
/*
* By definition, all missing-ENDBRANCH #CPs are a result of WFE && !ENDBR.
*
* For the kernel IBT no ENDBR selftest where #CPs are deliberately triggered,
* the WFE state of the interrupted context needs to be cleared to let execution
* continue. Otherwise when the CPU resumes from the instruction that just
* caused the previous #CP, another missing-ENDBRANCH #CP is raised and the CPU
* enters a dead loop.
*
* This is not a problem with IDT because it doesn't preserve WFE and IRET doesn't
* set WFE. But FRED provides space on the entry stack (in an expanded CS area)
* to save and restore the WFE state, thus the WFE state is no longer clobbered,
* so software must clear it.
*/
static void ibt_clear_fred_wfe(struct pt_regs *regs)
{
/*
* No need to do any FRED checks.
*
* For IDT event delivery, the high-order 48 bits of CS are pushed
* as 0s into the stack, and later IRET ignores these bits.
*
* For FRED, a test to check if fred_cs.wfe is set would be dropped
* by compilers.
*/
regs->fred_cs.wfe = 0;
}
static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
{
if ((error_code & CP_EC) != CP_ENDBR) {
@@ -90,6 +118,7 @@ static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
if (unlikely(regs->ip == (unsigned long)&ibt_selftest_noendbr)) {
regs->ax = 0;
ibt_clear_fred_wfe(regs);
return;
}
@@ -97,6 +126,7 @@ static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
if (!ibt_fatal) {
printk(KERN_DEFAULT CUT_HERE);
__warn(__FILE__, __LINE__, (void *)regs->ip, TAINT_WARN, regs, NULL);
ibt_clear_fred_wfe(regs);
return;
}
BUG();

View File

@@ -209,7 +209,9 @@ static void hv_machine_shutdown(void)
if (kexec_in_progress)
hyperv_cleanup();
}
#endif /* CONFIG_KEXEC_CORE */
#ifdef CONFIG_CRASH_DUMP
static void hv_machine_crash_shutdown(struct pt_regs *regs)
{
if (hv_crash_handler)
@@ -221,7 +223,64 @@ static void hv_machine_crash_shutdown(struct pt_regs *regs)
/* Disable the hypercall page when there is only 1 active CPU. */
hyperv_cleanup();
}
#endif /* CONFIG_KEXEC_CORE */
#endif /* CONFIG_CRASH_DUMP */
static u64 hv_ref_counter_at_suspend;
static void (*old_save_sched_clock_state)(void);
static void (*old_restore_sched_clock_state)(void);
/*
* Hyper-V clock counter resets during hibernation. Save and restore clock
* offset during suspend/resume, while also considering the time passed
* before suspend. This is to make sure that sched_clock using hv tsc page
* based clocksource, proceeds from where it left off during suspend and
* it shows correct time for the timestamps of kernel messages after resume.
*/
static void save_hv_clock_tsc_state(void)
{
hv_ref_counter_at_suspend = hv_read_reference_counter();
}
static void restore_hv_clock_tsc_state(void)
{
/*
* Adjust the offsets used by hv tsc clocksource to
* account for the time spent before hibernation.
* adjusted value = reference counter (time) at suspend
* - reference counter (time) now.
*/
hv_adj_sched_clock_offset(hv_ref_counter_at_suspend - hv_read_reference_counter());
}
/*
* Functions to override save_sched_clock_state and restore_sched_clock_state
* functions of x86_platform. The Hyper-V clock counter is reset during
* suspend-resume and the offset used to measure time needs to be
* corrected, post resume.
*/
static void hv_save_sched_clock_state(void)
{
old_save_sched_clock_state();
save_hv_clock_tsc_state();
}
static void hv_restore_sched_clock_state(void)
{
restore_hv_clock_tsc_state();
old_restore_sched_clock_state();
}
static void __init x86_setup_ops_for_tsc_pg_clock(void)
{
if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
return;
old_save_sched_clock_state = x86_platform.save_sched_clock_state;
x86_platform.save_sched_clock_state = hv_save_sched_clock_state;
old_restore_sched_clock_state = x86_platform.restore_sched_clock_state;
x86_platform.restore_sched_clock_state = hv_restore_sched_clock_state;
}
#endif /* CONFIG_HYPERV */
static uint32_t __init ms_hyperv_platform(void)
@@ -493,9 +552,13 @@ static void __init ms_hyperv_init_platform(void)
no_timer_check = 1;
#endif
#if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE)
#if IS_ENABLED(CONFIG_HYPERV)
#if defined(CONFIG_KEXEC_CORE)
machine_ops.shutdown = hv_machine_shutdown;
#endif
#if defined(CONFIG_CRASH_DUMP)
machine_ops.crash_shutdown = hv_machine_crash_shutdown;
#endif
#endif
if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) {
/*
@@ -572,6 +635,7 @@ static void __init ms_hyperv_init_platform(void)
/* Register Hyper-V specific clocksource */
hv_init_clocksource();
x86_setup_ops_for_tsc_pg_clock();
hv_vtl_init_platform();
#endif
/*

View File

@@ -263,11 +263,13 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params,
memset(&params->hd0_info, 0, sizeof(params->hd0_info));
memset(&params->hd1_info, 0, sizeof(params->hd1_info));
#ifdef CONFIG_CRASH_DUMP
if (image->type == KEXEC_TYPE_CRASH) {
ret = crash_setup_memmap_entries(image, params);
if (ret)
return ret;
} else
#endif
setup_e820_entries(params);
nr_e820_entries = params->e820_entries;
@@ -428,12 +430,14 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
return ERR_PTR(-EINVAL);
}
#ifdef CONFIG_CRASH_DUMP
/* Allocate and load backup region */
if (image->type == KEXEC_TYPE_CRASH) {
ret = crash_load_segments(image);
if (ret)
return ERR_PTR(ret);
}
#endif
/*
* Load purgatory. For 64bit entry point, purgatory code can be

View File

@@ -769,7 +769,7 @@ static struct notifier_block kvm_pv_reboot_nb = {
* won't be valid. In cases like kexec, in which you install a new kernel, this
* means a random memory location will be kept being written.
*/
#ifdef CONFIG_KEXEC_CORE
#ifdef CONFIG_CRASH_DUMP
static void kvm_crash_shutdown(struct pt_regs *regs)
{
kvm_guest_cpu_offline(true);
@@ -852,7 +852,7 @@ static void __init kvm_guest_init(void)
kvm_guest_cpu_init();
#endif
#ifdef CONFIG_KEXEC_CORE
#ifdef CONFIG_CRASH_DUMP
machine_ops.crash_shutdown = kvm_crash_shutdown;
#endif

View File

@@ -545,6 +545,8 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image)
}
#endif /* CONFIG_KEXEC_FILE */
#ifdef CONFIG_CRASH_DUMP
static int
kexec_mark_range(unsigned long start, unsigned long end, bool protect)
{
@@ -589,6 +591,7 @@ void arch_kexec_unprotect_crashkres(void)
{
kexec_mark_crashkres(false);
}
#endif
/*
* During a traditional boot under SME, SME will encrypt the kernel,

View File

@@ -117,7 +117,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
printk("%sFS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
log_lvl, fs, fsindex, gs, gsindex, shadowgs);
printk("%sCS: %04lx DS: %04x ES: %04x CR0: %016lx\n",
printk("%sCS: %04x DS: %04x ES: %04x CR0: %016lx\n",
log_lvl, regs->cs, ds, es, cr0);
printk("%sCR2: %016lx CR3: %016lx CR4: %016lx\n",
log_lvl, cr2, cr3, cr4);

View File

@@ -796,7 +796,7 @@ struct machine_ops machine_ops __ro_after_init = {
.emergency_restart = native_machine_emergency_restart,
.restart = native_machine_restart,
.halt = native_machine_halt,
#ifdef CONFIG_KEXEC_CORE
#ifdef CONFIG_CRASH_DUMP
.crash_shutdown = native_machine_crash_shutdown,
#endif
};
@@ -826,7 +826,7 @@ void machine_halt(void)
machine_ops.halt();
}
#ifdef CONFIG_KEXEC_CORE
#ifdef CONFIG_CRASH_DUMP
void machine_crash_shutdown(struct pt_regs *regs)
{
machine_ops.crash_shutdown(regs);

View File

@@ -547,7 +547,7 @@ static void __init reserve_crashkernel(void)
bool high = false;
int ret;
if (!IS_ENABLED(CONFIG_KEXEC_CORE))
if (!IS_ENABLED(CONFIG_CRASH_RESERVE))
return;
total_mem = memblock_phys_mem_size();

View File

@@ -282,7 +282,7 @@ struct smp_ops smp_ops = {
.smp_cpus_done = native_smp_cpus_done,
.stop_other_cpus = native_stop_other_cpus,
#if defined(CONFIG_KEXEC_CORE)
#if defined(CONFIG_CRASH_DUMP)
.crash_stop_other_cpus = kdump_nmi_shootdown_cpus,
#endif
.smp_send_reschedule = native_smp_send_reschedule,

View File

@@ -448,37 +448,6 @@ int __node_distance(int from, int to)
}
EXPORT_SYMBOL(__node_distance);
/*
* Sanity check to catch more bad NUMA configurations (they are amazingly
* common). Make sure the nodes cover all memory.
*/
static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
{
u64 numaram, e820ram;
int i;
numaram = 0;
for (i = 0; i < mi->nr_blks; i++) {
u64 s = mi->blk[i].start >> PAGE_SHIFT;
u64 e = mi->blk[i].end >> PAGE_SHIFT;
numaram += e - s;
numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
if ((s64)numaram < 0)
numaram = 0;
}
e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
/* We seem to lose 3 pages somewhere. Allow 1M of slack. */
if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
(numaram << PAGE_SHIFT) >> 20,
(e820ram << PAGE_SHIFT) >> 20);
return false;
}
return true;
}
/*
* Mark all currently memblock-reserved physical memory (which covers the
* kernel's own memory ranges) as hot-unswappable.
@@ -584,7 +553,8 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
return -EINVAL;
}
}
if (!numa_meminfo_cover_memory(mi))
if (!memblock_validate_numa_coverage(SZ_1M))
return -EINVAL;
/* Finally register nodes. */

View File

@@ -19,6 +19,7 @@
#include <asm/cacheflush.h>
#include <asm/apic.h>
#include <asm/perf_event.h>
#include <asm/tlb.h>
#include "mm_internal.h"
@@ -1145,7 +1146,7 @@ STATIC_NOPV void native_flush_tlb_one_user(unsigned long addr)
bool cpu_pcide;
/* Flush 'addr' from the kernel PCID: */
asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
invlpg(addr);
/* If PTI is off there is no user PCID and nothing to flush. */
if (!static_cpu_has(X86_FEATURE_PTI))

View File

@@ -141,7 +141,9 @@ static void xen_hvm_shutdown(void)
if (kexec_in_progress)
xen_reboot(SHUTDOWN_soft_reset);
}
#endif
#ifdef CONFIG_CRASH_DUMP
static void xen_hvm_crash_shutdown(struct pt_regs *regs)
{
native_machine_crash_shutdown(regs);
@@ -229,6 +231,8 @@ static void __init xen_hvm_guest_init(void)
#ifdef CONFIG_KEXEC_CORE
machine_ops.shutdown = xen_hvm_shutdown;
#endif
#ifdef CONFIG_CRASH_DUMP
machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
#endif
}

View File

@@ -2517,7 +2517,7 @@ out:
}
EXPORT_SYMBOL_GPL(xen_remap_pfn);
#ifdef CONFIG_KEXEC_CORE
#ifdef CONFIG_VMCORE_INFO
phys_addr_t paddr_vmcoreinfo_note(void)
{
if (xen_pv_domain())

View File

@@ -66,6 +66,28 @@ const struct ecc_curve *ecc_get_curve(unsigned int curve_id)
}
EXPORT_SYMBOL(ecc_get_curve);
void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes,
u64 *out, unsigned int ndigits)
{
int diff = ndigits - DIV_ROUND_UP(nbytes, sizeof(u64));
unsigned int o = nbytes & 7;
__be64 msd = 0;
/* diff > 0: not enough input bytes: set most significant digits to 0 */
if (diff > 0) {
ndigits -= diff;
memset(&out[ndigits - 1], 0, diff * sizeof(u64));
}
if (o) {
memcpy((u8 *)&msd + sizeof(msd) - o, in, o);
out[--ndigits] = be64_to_cpu(msd);
in += o;
}
ecc_swap_digits(in, out, ndigits);
}
EXPORT_SYMBOL(ecc_digits_from_bytes);
static u64 *ecc_alloc_digits_space(unsigned int ndigits)
{
size_t len = ndigits * sizeof(u64);

View File

@@ -35,40 +35,27 @@ struct ecdsa_signature_ctx {
static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen, unsigned int ndigits)
{
size_t keylen = ndigits * sizeof(u64);
ssize_t diff = vlen - keylen;
size_t bufsize = ndigits * sizeof(u64);
const char *d = value;
u8 rs[ECC_MAX_BYTES];
if (!value || !vlen)
if (!value || !vlen || vlen > bufsize + 1)
return -EINVAL;
/* diff = 0: 'value' has exacly the right size
* diff > 0: 'value' has too many bytes; one leading zero is allowed that
* makes the value a positive integer; error on more
* diff < 0: 'value' is missing leading zeros, which we add
/*
* vlen may be 1 byte larger than bufsize due to a leading zero byte
* (necessary if the most significant bit of the integer is set).
*/
if (diff > 0) {
if (vlen > bufsize) {
/* skip over leading zeros that make 'value' a positive int */
if (*d == 0) {
vlen -= 1;
diff--;
d++;
}
if (diff)
} else {
return -EINVAL;
}
if (-diff >= keylen)
return -EINVAL;
if (diff) {
/* leading zeros not given in 'value' */
memset(rs, 0, -diff);
}
}
memcpy(&rs[-diff], d, vlen);
ecc_swap_digits((u64 *)rs, dest, ndigits);
ecc_digits_from_bytes(d, vlen, dest, ndigits);
return 0;
}
@@ -138,7 +125,7 @@ static int ecdsa_verify(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
size_t keylen = ctx->curve->g.ndigits * sizeof(u64);
size_t bufsize = ctx->curve->g.ndigits * sizeof(u64);
struct ecdsa_signature_ctx sig_ctx = {
.curve = ctx->curve,
};
@@ -165,14 +152,14 @@ static int ecdsa_verify(struct akcipher_request *req)
goto error;
/* if the hash is shorter then we will add leading zeros to fit to ndigits */
diff = keylen - req->dst_len;
diff = bufsize - req->dst_len;
if (diff >= 0) {
if (diff)
memset(rawhash, 0, diff);
memcpy(&rawhash[diff], buffer + req->src_len, req->dst_len);
} else if (diff < 0) {
/* given hash is longer, we take the left-most bytes */
memcpy(&rawhash, buffer + req->src_len, keylen);
memcpy(&rawhash, buffer + req->src_len, bufsize);
}
ecc_swap_digits((u64 *)rawhash, hash, ctx->curve->g.ndigits);
@@ -222,9 +209,8 @@ static int ecdsa_ecc_ctx_reset(struct ecc_ctx *ctx)
static int ecdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen)
{
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
unsigned int digitlen, ndigits;
const unsigned char *d = key;
const u64 *digits = (const u64 *)&d[1];
unsigned int ndigits;
int ret;
ret = ecdsa_ecc_ctx_reset(ctx);
@@ -238,12 +224,17 @@ static int ecdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsig
return -EINVAL;
keylen--;
ndigits = (keylen >> 1) / sizeof(u64);
digitlen = keylen >> 1;
ndigits = DIV_ROUND_UP(digitlen, sizeof(u64));
if (ndigits != ctx->curve->g.ndigits)
return -EINVAL;
ecc_swap_digits(digits, ctx->pub_key.x, ndigits);
ecc_swap_digits(&digits[ndigits], ctx->pub_key.y, ndigits);
d++;
ecc_digits_from_bytes(d, digitlen, ctx->pub_key.x, ndigits);
ecc_digits_from_bytes(&d[digitlen], digitlen, ctx->pub_key.y, ndigits);
ret = ecc_is_pubkey_valid_full(ctx->curve, &ctx->pub_key);
ctx->pub_key_set = ret == 0;

View File

@@ -1712,6 +1712,15 @@ static struct acpi_platform_list pmcg_plat_info[] __initdata = {
/* HiSilicon Hip09 Platform */
{"HISI ", "HIP09 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
"Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
{"HISI ", "HIP09A ", 0, ACPI_SIG_IORT, greater_than_or_equal,
"Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
/* HiSilicon Hip10/11 Platform uses the same SMMU IP with Hip09 */
{"HISI ", "HIP10 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
"Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
{"HISI ", "HIP10C ", 0, ACPI_SIG_IORT, greater_than_or_equal,
"Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
{"HISI ", "HIP11 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
"Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
{ }
};

View File

@@ -620,6 +620,9 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x0e8d, 0x0608), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x13d3, 0x3606), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
/* MediaTek MT7922A Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe0d8), .driver_info = BTUSB_MEDIATEK |
@@ -658,6 +661,37 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x04ca, 0x3804), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x35f5, 0x7922), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x13d3, 0x3614), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x13d3, 0x3615), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x04ca, 0x38e4), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x13d3, 0x3605), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x13d3, 0x3607), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
/* Additional MediaTek MT7925 Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe111), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe113), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x13d3, 0x3602), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x13d3, 0x3603), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
@@ -858,6 +892,10 @@ struct btusb_data {
int (*setup_on_usb)(struct hci_dev *hdev);
int (*suspend)(struct hci_dev *hdev);
int (*resume)(struct hci_dev *hdev);
int (*disconnect)(struct hci_dev *hdev);
int oob_wake_irq; /* irq for out-of-band wake-on-bt */
unsigned cmd_timeout_cnt;
@@ -4609,6 +4647,9 @@ static void btusb_disconnect(struct usb_interface *intf)
if (data->diag)
usb_set_intfdata(data->diag, NULL);
if (data->disconnect)
data->disconnect(hdev);
hci_unregister_dev(hdev);
if (intf == data->intf) {
@@ -4657,6 +4698,9 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
cancel_work_sync(&data->work);
if (data->suspend)
data->suspend(data->hdev);
btusb_stop_traffic(data);
usb_kill_anchored_urbs(&data->tx_anchor);
@@ -4760,6 +4804,9 @@ static int btusb_resume(struct usb_interface *intf)
btusb_submit_isoc_urb(hdev, GFP_NOIO);
}
if (data->resume)
data->resume(hdev);
spin_lock_irq(&data->txlock);
play_deferred(data);
clear_bit(BTUSB_SUSPENDING, &data->flags);

View File

@@ -52,6 +52,7 @@
#define PLL_CONFIG_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL])
#define PLL_CONFIG_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL_U])
#define PLL_CONFIG_CTL_U1(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL_U1])
#define PLL_CONFIG_CTL_U2(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL_U2])
#define PLL_TEST_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL])
#define PLL_TEST_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U])
#define PLL_TEST_CTL_U1(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U1])
@@ -227,6 +228,32 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_ALPHA_VAL] = 0x24,
[PLL_OFF_ALPHA_VAL_U] = 0x28,
},
[CLK_ALPHA_PLL_TYPE_ZONDA_OLE] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_USER_CTL] = 0x0c,
[PLL_OFF_USER_CTL_U] = 0x10,
[PLL_OFF_CONFIG_CTL] = 0x14,
[PLL_OFF_CONFIG_CTL_U] = 0x18,
[PLL_OFF_CONFIG_CTL_U1] = 0x1c,
[PLL_OFF_CONFIG_CTL_U2] = 0x20,
[PLL_OFF_TEST_CTL] = 0x24,
[PLL_OFF_TEST_CTL_U] = 0x28,
[PLL_OFF_TEST_CTL_U1] = 0x2c,
[PLL_OFF_OPMODE] = 0x30,
[PLL_OFF_STATUS] = 0x3c,
},
[CLK_ALPHA_PLL_TYPE_NSS_HUAYRA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_TEST_CTL] = 0x0c,
[PLL_OFF_TEST_CTL_U] = 0x10,
[PLL_OFF_USER_CTL] = 0x14,
[PLL_OFF_CONFIG_CTL] = 0x18,
[PLL_OFF_CONFIG_CTL_U] = 0x1c,
[PLL_OFF_STATUS] = 0x20,
},
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);

View File

@@ -21,6 +21,7 @@ enum {
CLK_ALPHA_PLL_TYPE_LUCID = CLK_ALPHA_PLL_TYPE_TRION,
CLK_ALPHA_PLL_TYPE_AGERA,
CLK_ALPHA_PLL_TYPE_ZONDA,
CLK_ALPHA_PLL_TYPE_ZONDA_OLE,
CLK_ALPHA_PLL_TYPE_LUCID_EVO,
CLK_ALPHA_PLL_TYPE_LUCID_OLE,
CLK_ALPHA_PLL_TYPE_RIVIAN_EVO,
@@ -28,6 +29,7 @@ enum {
CLK_ALPHA_PLL_TYPE_BRAMMO_EVO,
CLK_ALPHA_PLL_TYPE_STROMER,
CLK_ALPHA_PLL_TYPE_STROMER_PLUS,
CLK_ALPHA_PLL_TYPE_NSS_HUAYRA,
CLK_ALPHA_PLL_TYPE_MAX,
};
@@ -42,6 +44,7 @@ enum {
PLL_OFF_CONFIG_CTL,
PLL_OFF_CONFIG_CTL_U,
PLL_OFF_CONFIG_CTL_U1,
PLL_OFF_CONFIG_CTL_U2,
PLL_OFF_TEST_CTL,
PLL_OFF_TEST_CTL_U,
PLL_OFF_TEST_CTL_U1,
@@ -119,6 +122,7 @@ struct alpha_pll_config {
u32 config_ctl_val;
u32 config_ctl_hi_val;
u32 config_ctl_hi1_val;
u32 config_ctl_hi2_val;
u32 user_ctl_val;
u32 user_ctl_hi_val;
u32 user_ctl_hi1_val;
@@ -173,6 +177,7 @@ extern const struct clk_ops clk_alpha_pll_postdiv_lucid_5lpe_ops;
extern const struct clk_ops clk_alpha_pll_zonda_ops;
#define clk_alpha_pll_postdiv_zonda_ops clk_alpha_pll_postdiv_fabia_ops
#define clk_alpha_pll_zonda_ole_ops clk_alpha_pll_zonda_ops
extern const struct clk_ops clk_alpha_pll_lucid_evo_ops;
extern const struct clk_ops clk_alpha_pll_reset_lucid_evo_ops;

View File

@@ -27,7 +27,8 @@
#include <asm/mshyperv.h>
static struct clock_event_device __percpu *hv_clock_event;
static u64 hv_sched_clock_offset __ro_after_init;
/* Note: offset can hold negative values after hibernation. */
static u64 hv_sched_clock_offset __read_mostly;
/*
* If false, we're using the old mechanism for stimer0 interrupts
@@ -456,6 +457,17 @@ static void resume_hv_clock_tsc(struct clocksource *arg)
hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
}
/*
* Called during resume from hibernation, from overridden
* x86_platform.restore_sched_clock_state routine. This is to adjust offsets
* used to calculate time for hv tsc page based sched_clock, to account for
* time spent before hibernation.
*/
void hv_adj_sched_clock_offset(u64 offset)
{
hv_sched_clock_offset -= offset;
}
#ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
static int hv_cs_enable(struct clocksource *cs)
{

View File

@@ -319,7 +319,7 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
spage = migrate_pfn_to_page(migrate->src[i]);
if (spage && !is_zone_device_page(spage)) {
src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
DMA_TO_DEVICE);
DMA_BIDIRECTIONAL);
r = dma_mapping_error(dev, src[i]);
if (r) {
dev_err(dev, "%s: fail %d dma_map_page\n",
@@ -634,7 +634,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
goto out_oom;
}
dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE);
dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
r = dma_mapping_error(dev, dst[i]);
if (r) {
dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r);

View File

@@ -1219,10 +1219,6 @@ static bool is_dsc_need_re_compute(
if (dc_link->type != dc_connection_mst_branch)
return false;
if (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT ||
dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))
return false;
for (i = 0; i < MAX_PIPES; i++)
stream_on_link[i] = NULL;
@@ -1243,6 +1239,18 @@ static bool is_dsc_need_re_compute(
if (!aconnector)
continue;
/*
* Check if cached virtual MST DSC caps are available and DSC is supported
* this change takes care of newer MST DSC capable devices that report their
* DPCD caps as per specifications in their Virtual DPCD registers.
* TODO: implement the check for older MST DSC devices that do not conform to
* specifications.
*/
if (!(aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported ||
aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))
continue;
stream_on_link[new_stream_on_link_num] = aconnector;
new_stream_on_link_num++;

View File

@@ -153,7 +153,16 @@ static int adv7511_hdmi_hw_params(struct device *dev, void *data,
ADV7511_AUDIO_CFG3_LEN_MASK, len);
regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG,
ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4);
regmap_write(adv7511->regmap, 0x73, 0x1);
/* send current Audio infoframe values while updating */
regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
BIT(5), BIT(5));
regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME(0), 0x1);
/* use Audio infoframe updated info */
regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
BIT(5), 0);
return 0;
}
@@ -184,8 +193,9 @@ static int audio_startup(struct device *dev, void *data)
regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0),
BIT(7) | BIT(6), BIT(7));
/* use Audio infoframe updated info */
regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1),
regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
BIT(5), 0);
/* enable SPDIF receiver */
if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,

View File

@@ -1225,8 +1225,10 @@ static int adv7511_probe(struct i2c_client *i2c)
return ret;
ret = adv7511_init_regulators(adv7511);
if (ret)
return dev_err_probe(dev, ret, "failed to init regulators\n");
if (ret) {
dev_err_probe(dev, ret, "failed to init regulators\n");
goto err_of_node_put;
}
/*
* The power down GPIO is optional. If present, toggle it from active to
@@ -1346,6 +1348,8 @@ err_i2c_unregister_edid:
i2c_unregister_device(adv7511->i2c_edid);
uninit_regulators:
adv7511_uninit_regulators(adv7511);
err_of_node_put:
of_node_put(adv7511->host_node);
return ret;
}
@@ -1354,6 +1358,8 @@ static void adv7511_remove(struct i2c_client *i2c)
{
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
of_node_put(adv7511->host_node);
adv7511_uninit_regulators(adv7511);
drm_bridge_remove(&adv7511->bridge);

View File

@@ -175,7 +175,7 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
if (num_lanes < 1 || num_lanes > 4)
if (num_lanes < 2 || num_lanes > 4)
return -EINVAL;
adv->num_dsi_lanes = num_lanes;
@@ -184,8 +184,6 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
if (!adv->host_node)
return -ENODEV;
of_node_put(adv->host_node);
adv->use_timing_gen = !of_property_read_bool(np,
"adi,disable-timing-generator");

View File

@@ -133,7 +133,7 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
GEN9_MEDIA_PG_ENABLE |
GEN11_MEDIA_SAMPLER_PG_ENABLE;
if (GRAPHICS_VER(gt->i915) >= 12) {
if (GRAPHICS_VER(gt->i915) >= 12 && !IS_DG1(gt->i915)) {
for (i = 0; i < I915_MAX_VCS; i++)
if (HAS_ENGINE(gt, _VCS(i)))
pg_enable |= (VDN_HCP_POWERGATE_ENABLE(i) |

View File

@@ -178,6 +178,8 @@ config I2C_I801
Raptor Lake (PCH)
Meteor Lake (SOC and PCH)
Birch Stream (SOC)
Arrow Lake (SOC)
Panther Lake (SOC)
This driver can also be built as a module. If so, the module
will be called i2c-i801.

View File

@@ -80,6 +80,9 @@
* Meteor Lake SoC-S (SOC) 0xae22 32 hard yes yes yes
* Meteor Lake PCH-S (PCH) 0x7f23 32 hard yes yes yes
* Birch Stream (SOC) 0x5796 32 hard yes yes yes
* Arrow Lake-H (SOC) 0x7722 32 hard yes yes yes
* Panther Lake-H (SOC) 0xe322 32 hard yes yes yes
* Panther Lake-P (SOC) 0xe422 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -234,6 +237,7 @@
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS 0x54a3
#define PCI_DEVICE_ID_INTEL_BIRCH_STREAM_SMBUS 0x5796
#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
#define PCI_DEVICE_ID_INTEL_ARROW_LAKE_H_SMBUS 0x7722
#define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_S_SMBUS 0x7a23
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS 0x7aa3
#define PCI_DEVICE_ID_INTEL_METEOR_LAKE_P_SMBUS 0x7e22
@@ -256,6 +260,8 @@
#define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323
#define PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS 0xa3a3
#define PCI_DEVICE_ID_INTEL_METEOR_LAKE_SOC_S_SMBUS 0xae22
#define PCI_DEVICE_ID_INTEL_PANTHER_LAKE_H_SMBUS 0xe322
#define PCI_DEVICE_ID_INTEL_PANTHER_LAKE_P_SMBUS 0xe422
struct i801_mux_config {
char *gpio_chip;
@@ -1046,6 +1052,9 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE_DATA(INTEL, METEOR_LAKE_SOC_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, ARROW_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ 0, }
};

View File

@@ -91,14 +91,6 @@
#define SLIMPRO_IIC_MSG_DWORD_COUNT 3
/* PCC related defines */
#define PCC_SIGNATURE 0x50424300
#define PCC_STS_CMD_COMPLETE BIT(0)
#define PCC_STS_SCI_DOORBELL BIT(1)
#define PCC_STS_ERR BIT(2)
#define PCC_STS_PLAT_NOTIFY BIT(3)
#define PCC_CMD_GENERATE_DB_INT BIT(15)
struct slimpro_i2c_dev {
struct i2c_adapter adapter;
struct device *dev;
@@ -160,11 +152,11 @@ static void slimpro_i2c_pcc_rx_cb(struct mbox_client *cl, void *msg)
/* Check if platform sends interrupt */
if (!xgene_word_tst_and_clr(&generic_comm_base->status,
PCC_STS_SCI_DOORBELL))
PCC_STATUS_SCI_DOORBELL))
return;
if (xgene_word_tst_and_clr(&generic_comm_base->status,
PCC_STS_CMD_COMPLETE)) {
PCC_STATUS_CMD_COMPLETE)) {
msg = generic_comm_base + 1;
/* Response message msg[1] contains the return value. */
@@ -186,10 +178,10 @@ static void slimpro_i2c_pcc_tx_prepare(struct slimpro_i2c_dev *ctx, u32 *msg)
cpu_to_le32(PCC_SIGNATURE | ctx->mbox_idx));
WRITE_ONCE(generic_comm_base->command,
cpu_to_le16(SLIMPRO_MSG_TYPE(msg[0]) | PCC_CMD_GENERATE_DB_INT));
cpu_to_le16(SLIMPRO_MSG_TYPE(msg[0]) | PCC_CMD_GENERATE_DB_INTR));
status = le16_to_cpu(READ_ONCE(generic_comm_base->status));
status &= ~PCC_STS_CMD_COMPLETE;
status &= ~PCC_STATUS_CMD_COMPLETE;
WRITE_ONCE(generic_comm_base->status, cpu_to_le16(status));
/* Copy the message to the PCC comm space */

View File

@@ -16,7 +16,9 @@
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -360,19 +362,19 @@ static inline bool ad7192_valid_external_frequency(u32 freq)
freq <= AD7192_EXT_FREQ_MHZ_MAX);
}
static int ad7192_of_clock_select(struct ad7192_state *st)
static int ad7192_clock_select(struct ad7192_state *st)
{
struct device_node *np = st->sd.spi->dev.of_node;
struct device *dev = &st->sd.spi->dev;
unsigned int clock_sel;
clock_sel = AD7192_CLK_INT;
/* use internal clock */
if (!st->mclk) {
if (of_property_read_bool(np, "adi,int-clock-output-enable"))
if (device_property_read_bool(dev, "adi,int-clock-output-enable"))
clock_sel = AD7192_CLK_INT_CO;
} else {
if (of_property_read_bool(np, "adi,clock-xtal"))
if (device_property_read_bool(dev, "adi,clock-xtal"))
clock_sel = AD7192_CLK_EXT_MCLK1_2;
else
clock_sel = AD7192_CLK_EXT_MCLK2;
@@ -381,7 +383,7 @@ static int ad7192_of_clock_select(struct ad7192_state *st)
return clock_sel;
}
static int ad7192_setup(struct iio_dev *indio_dev, struct device_node *np)
static int ad7192_setup(struct iio_dev *indio_dev, struct device *dev)
{
struct ad7192_state *st = iio_priv(indio_dev);
bool rej60_en, refin2_en;
@@ -403,7 +405,7 @@ static int ad7192_setup(struct iio_dev *indio_dev, struct device_node *np)
id &= AD7192_ID_MASK;
if (id != st->chip_info->chip_id)
dev_warn(&st->sd.spi->dev, "device ID query failed (0x%X != 0x%X)\n",
dev_warn(dev, "device ID query failed (0x%X != 0x%X)\n",
id, st->chip_info->chip_id);
st->mode = AD7192_MODE_SEL(AD7192_MODE_IDLE) |
@@ -412,31 +414,31 @@ static int ad7192_setup(struct iio_dev *indio_dev, struct device_node *np)
st->conf = AD7192_CONF_GAIN(0);
rej60_en = of_property_read_bool(np, "adi,rejection-60-Hz-enable");
rej60_en = device_property_read_bool(dev, "adi,rejection-60-Hz-enable");
if (rej60_en)
st->mode |= AD7192_MODE_REJ60;
refin2_en = of_property_read_bool(np, "adi,refin2-pins-enable");
refin2_en = device_property_read_bool(dev, "adi,refin2-pins-enable");
if (refin2_en && st->chip_info->chip_id != CHIPID_AD7195)
st->conf |= AD7192_CONF_REFSEL;
st->conf &= ~AD7192_CONF_CHOP;
st->f_order = AD7192_NO_SYNC_FILTER;
buf_en = of_property_read_bool(np, "adi,buffer-enable");
buf_en = device_property_read_bool(dev, "adi,buffer-enable");
if (buf_en)
st->conf |= AD7192_CONF_BUF;
bipolar = of_property_read_bool(np, "bipolar");
bipolar = device_property_read_bool(dev, "bipolar");
if (!bipolar)
st->conf |= AD7192_CONF_UNIPOLAR;
burnout_curr_en = of_property_read_bool(np,
"adi,burnout-currents-enable");
burnout_curr_en = device_property_read_bool(dev,
"adi,burnout-currents-enable");
if (burnout_curr_en && buf_en) {
st->conf |= AD7192_CONF_BURN;
} else if (burnout_curr_en) {
dev_warn(&st->sd.spi->dev,
dev_warn(dev,
"Can't enable burnout currents: see CHOP or buffer\n");
}
@@ -1036,9 +1038,10 @@ static int ad7192_probe(struct spi_device *spi)
}
st->int_vref_mv = ret / 1000;
st->chip_info = of_device_get_match_data(&spi->dev);
st->chip_info = spi_get_device_match_data(spi);
if (!st->chip_info)
st->chip_info = (void *)spi_get_device_id(spi)->driver_data;
return -ENODEV;
indio_dev->name = st->chip_info->name;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -1065,7 +1068,7 @@ static int ad7192_probe(struct spi_device *spi)
if (IS_ERR(st->mclk))
return PTR_ERR(st->mclk);
st->clock_sel = ad7192_of_clock_select(st);
st->clock_sel = ad7192_clock_select(st);
if (st->clock_sel == AD7192_CLK_EXT_MCLK1_2 ||
st->clock_sel == AD7192_CLK_EXT_MCLK2) {
@@ -1077,7 +1080,7 @@ static int ad7192_probe(struct spi_device *spi)
}
}
ret = ad7192_setup(indio_dev, spi->dev.of_node);
ret = ad7192_setup(indio_dev, &spi->dev);
if (ret)
return ret;

View File

@@ -161,7 +161,7 @@ static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter,
{
const void __user *res = iter->cur;
if (iter->cur + len > iter->end)
if (len > iter->end - iter->cur)
return (void __force __user *)ERR_PTR(-ENOSPC);
iter->cur += len;
return res;
@@ -2009,11 +2009,13 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
if (ret)
return ret;
wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count);
wqes = uverbs_request_next_ptr(&iter, size_mul(cmd.wqe_size,
cmd.wr_count));
if (IS_ERR(wqes))
return PTR_ERR(wqes);
sgls = uverbs_request_next_ptr(
&iter, cmd.sge_count * sizeof(struct ib_uverbs_sge));
sgls = uverbs_request_next_ptr(&iter,
size_mul(cmd.sge_count,
sizeof(struct ib_uverbs_sge)));
if (IS_ERR(sgls))
return PTR_ERR(sgls);
ret = uverbs_request_finish(&iter);
@@ -2199,11 +2201,11 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
if (wqe_size < sizeof(struct ib_uverbs_recv_wr))
return ERR_PTR(-EINVAL);
wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
wqes = uverbs_request_next_ptr(iter, size_mul(wqe_size, wr_count));
if (IS_ERR(wqes))
return ERR_CAST(wqes);
sgls = uverbs_request_next_ptr(
iter, sge_count * sizeof(struct ib_uverbs_sge));
sgls = uverbs_request_next_ptr(iter, size_mul(sge_count,
sizeof(struct ib_uverbs_sge)));
if (IS_ERR(sgls))
return ERR_CAST(sgls);
ret = uverbs_request_finish(iter);

View File

@@ -147,7 +147,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
ib_attr->hw_ver = rdev->en_dev->pdev->revision;
ib_attr->max_qp = dev_attr->max_qp;
ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
ib_attr->device_cap_flags =
@@ -992,23 +992,22 @@ static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
align = sizeof(struct sq_send_hdr);
ilsize = ALIGN(init_attr->cap.max_inline_data, align);
sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
return -EINVAL;
/* For gen p4 and gen p5 backward compatibility mode
* wqe size is fixed to 128 bytes
/* For gen p4 and gen p5 fixed wqe compatibility mode
* wqe size is fixed to 128 bytes - ie 6 SGEs
*/
if (sq->wqe_size < bnxt_re_get_swqe_size(dev_attr->max_qp_sges) &&
qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
sq->wqe_size = bnxt_re_get_swqe_size(dev_attr->max_qp_sges);
if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) {
sq->wqe_size = bnxt_re_get_swqe_size(BNXT_STATIC_MAX_SGE);
sq->max_sge = BNXT_STATIC_MAX_SGE;
} else {
sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
return -EINVAL;
}
if (init_attr->cap.max_inline_data) {
qplqp->max_inline_data = sq->wqe_size -
sizeof(struct sq_send_hdr);
init_attr->cap.max_inline_data = qplqp->max_inline_data;
if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
sq->max_sge = qplqp->max_inline_data /
sizeof(struct sq_sge);
}
return 0;
@@ -1154,6 +1153,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
/* Shadow QP SQ depth should be same as QP1 RQ depth */
qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6);
qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.sq.max_sw_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.sq.max_sge = 2;
/* Q full delta can be 1 since it is internal QP */
qp->qplib_qp.sq.q_full_delta = 1;
@@ -1165,6 +1165,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6);
qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.rq.max_sw_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
/* Q full delta can be 1 since it is internal QP */
qp->qplib_qp.rq.q_full_delta = 1;
@@ -1226,6 +1227,7 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
*/
entries = bnxt_re_init_depth(init_attr->cap.max_recv_wr + 1, uctx);
rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
rq->max_sw_wqe = rq->max_wqe;
rq->q_full_delta = 0;
rq->sg_info.pgsize = PAGE_SIZE;
rq->sg_info.pgshft = PAGE_SHIFT;
@@ -1285,6 +1287,7 @@ static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
0 : BNXT_QPLIB_RESERVED_QP_WRS;
entries = bnxt_re_init_depth(entries + diff + 1, uctx);
sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
sq->max_sw_wqe = bnxt_qplib_get_depth(sq, qplqp->wqe_mode, true);
sq->q_full_delta = diff + 1;
/*
* Reserving one slot for Phantom WQE. Application can
@@ -2055,18 +2058,20 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
}
}
if (qp_attr_mask & IB_QP_PATH_MTU) {
qp->qplib_qp.modify_flags |=
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
} else if (qp_attr->qp_state == IB_QPS_RTR) {
qp->qplib_qp.modify_flags |=
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
qp->qplib_qp.path_mtu =
__from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
qp->qplib_qp.mtu =
ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
if (qp_attr->qp_state == IB_QPS_RTR) {
enum ib_mtu qpmtu;
qpmtu = iboe_get_mtu(rdev->netdev->mtu);
if (qp_attr_mask & IB_QP_PATH_MTU) {
if (ib_mtu_enum_to_int(qp_attr->path_mtu) >
ib_mtu_enum_to_int(qpmtu))
return -EINVAL;
qpmtu = qp_attr->path_mtu;
}
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
qp->qplib_qp.path_mtu = __from_ib_mtu(qpmtu);
qp->qplib_qp.mtu = ib_mtu_enum_to_int(qpmtu);
}
if (qp_attr_mask & IB_QP_TIMEOUT) {
@@ -2153,6 +2158,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
entries = bnxt_re_init_depth(qp_attr->cap.max_recv_wr, uctx);
qp->qplib_qp.rq.max_wqe =
min_t(u32, entries, dev_attr->max_qp_wqes + 1);
qp->qplib_qp.rq.max_sw_wqe = qp->qplib_qp.rq.max_wqe;
qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
qp_attr->cap.max_recv_wr;
qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
@@ -2710,7 +2716,8 @@ bad:
wr = wr->next;
}
bnxt_qplib_post_send_db(&qp->qplib_qp);
bnxt_ud_qp_hw_stall_workaround(qp);
if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
bnxt_ud_qp_hw_stall_workaround(qp);
spin_unlock_irqrestore(&qp->sq_lock, flags);
return rc;
}
@@ -2822,7 +2829,8 @@ bad:
wr = wr->next;
}
bnxt_qplib_post_send_db(&qp->qplib_qp);
bnxt_ud_qp_hw_stall_workaround(qp);
if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
bnxt_ud_qp_hw_stall_workaround(qp);
spin_unlock_irqrestore(&qp->sq_lock, flags);
return rc;
@@ -4167,9 +4175,6 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
resp.cqe_sz = sizeof(struct cq_base);
resp.max_cqd = dev_attr->max_cq_wqes;
resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
resp.mode = rdev->chip_ctx->modes.wqe_mode;
if (rdev->chip_ctx->modes.db_push)
resp.comp_mask |= BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED;

View File

@@ -128,13 +128,13 @@ static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
}
}
static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_chip_ctx *cctx;
cctx = rdev->chip_ctx;
cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
mode : BNXT_QPLIB_WQE_MODE_STATIC;
cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ?
BNXT_QPLIB_WQE_MODE_VARIABLE : BNXT_QPLIB_WQE_MODE_STATIC;
if (bnxt_re_hwrm_qcaps(rdev))
dev_err(rdev_to_dev(rdev),
"Failed to query hwrm qcaps\n");
@@ -155,7 +155,7 @@ static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
kfree(chip_ctx);
}
static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev;
@@ -177,7 +177,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
rdev->qplib_res.dattr = &rdev->dev_attr;
rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev);
bnxt_re_set_drv_mode(rdev, wqe_mode);
bnxt_re_set_drv_mode(rdev);
bnxt_re_set_db_offset(rdev);
rc = bnxt_qplib_map_db_bar(&rdev->qplib_res);
@@ -1440,7 +1440,7 @@ static void bnxt_re_worker(struct work_struct *work)
schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
}
static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
static int bnxt_re_dev_init(struct bnxt_re_dev *rdev)
{
struct bnxt_re_ring_attr rattr = {};
struct bnxt_qplib_creq_ctx *creq;
@@ -1458,7 +1458,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
}
set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
rc = bnxt_re_setup_chip_ctx(rdev, wqe_mode);
rc = bnxt_re_setup_chip_ctx(rdev);
if (rc) {
bnxt_unregister_dev(rdev->en_dev);
clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
@@ -1609,7 +1609,7 @@ fail:
return rc;
}
static int bnxt_re_add_device(struct auxiliary_device *adev, u8 wqe_mode)
static int bnxt_re_add_device(struct auxiliary_device *adev)
{
struct bnxt_aux_priv *aux_priv =
container_of(adev, struct bnxt_aux_priv, aux_dev);
@@ -1626,7 +1626,7 @@ static int bnxt_re_add_device(struct auxiliary_device *adev, u8 wqe_mode)
goto exit;
}
rc = bnxt_re_dev_init(rdev, wqe_mode);
rc = bnxt_re_dev_init(rdev);
if (rc)
goto re_dev_dealloc;
@@ -1756,7 +1756,8 @@ static int bnxt_re_probe(struct auxiliary_device *adev,
int rc;
mutex_lock(&bnxt_re_mutex);
rc = bnxt_re_add_device(adev, BNXT_QPLIB_WQE_MODE_STATIC);
rc = bnxt_re_add_device(adev);
if (rc) {
mutex_unlock(&bnxt_re_mutex);
return rc;

View File

@@ -639,13 +639,6 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
if (rc)
return rc;
srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
GFP_KERNEL);
if (!srq->swq) {
rc = -ENOMEM;
goto fail;
}
srq->dbinfo.flags = 0;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_CREATE_SRQ,
@@ -674,9 +667,17 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
spin_lock_init(&srq->lock);
srq->start_idx = 0;
srq->last_idx = srq->hwq.max_elements - 1;
for (idx = 0; idx < srq->hwq.max_elements; idx++)
srq->swq[idx].next_idx = idx + 1;
srq->swq[srq->last_idx].next_idx = -1;
if (!srq->hwq.is_user) {
srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
GFP_KERNEL);
if (!srq->swq) {
rc = -ENOMEM;
goto fail;
}
for (idx = 0; idx < srq->hwq.max_elements; idx++)
srq->swq[idx].next_idx = idx + 1;
srq->swq[srq->last_idx].next_idx = -1;
}
srq->id = le32_to_cpu(resp.xid);
srq->dbinfo.hwq = &srq->hwq;
@@ -806,13 +807,13 @@ static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
{
int indx;
que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
que->swq = kcalloc(que->max_sw_wqe, sizeof(*que->swq), GFP_KERNEL);
if (!que->swq)
return -ENOMEM;
que->swq_start = 0;
que->swq_last = que->max_wqe - 1;
for (indx = 0; indx < que->max_wqe; indx++)
que->swq_last = que->max_sw_wqe - 1;
for (indx = 0; indx < que->max_sw_wqe; indx++)
que->swq[indx].next_idx = indx + 1;
que->swq[que->swq_last].next_idx = 0; /* Make it circular */
que->swq_last = 0;
@@ -848,7 +849,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &sq->sg_info;
hwq_attr.stride = sizeof(struct sq_sge);
hwq_attr.depth = bnxt_qplib_get_depth(sq);
hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, false);
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
if (rc)
@@ -876,7 +877,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &rq->sg_info;
hwq_attr.stride = sizeof(struct sq_sge);
hwq_attr.depth = bnxt_qplib_get_depth(rq);
hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
if (rc)
@@ -980,9 +981,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
u32 tbl_indx;
u16 nsge;
if (res->dattr)
qp->dev_cap_flags = res->dattr->dev_cap_flags;
qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
sq->dbinfo.flags = 0;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_CREATE_QP,
@@ -999,7 +998,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
sizeof(struct sq_psn_search_ext) :
sizeof(struct sq_psn_search);
if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
if (qp->is_host_msn_tbl) {
psn_sz = sizeof(struct sq_msn_search);
qp->msn = 0;
}
@@ -1008,13 +1007,18 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &sq->sg_info;
hwq_attr.stride = sizeof(struct sq_sge);
hwq_attr.depth = bnxt_qplib_get_depth(sq);
hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, true);
hwq_attr.aux_stride = psn_sz;
hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode)
: 0;
/* Update msn tbl size */
if (BNXT_RE_HW_RETX(qp->dev_cap_flags) && psn_sz) {
hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
if (qp->is_host_msn_tbl && psn_sz) {
if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
hwq_attr.aux_depth =
roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
else
hwq_attr.aux_depth =
roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)) / 2;
qp->msn_tbl_sz = hwq_attr.aux_depth;
qp->msn = 0;
}
@@ -1024,13 +1028,14 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
if (rc)
return rc;
rc = bnxt_qplib_alloc_init_swq(sq);
if (rc)
goto fail_sq;
if (psn_sz)
bnxt_qplib_init_psn_ptr(qp, psn_sz);
if (!sq->hwq.is_user) {
rc = bnxt_qplib_alloc_init_swq(sq);
if (rc)
goto fail_sq;
if (psn_sz)
bnxt_qplib_init_psn_ptr(qp, psn_sz);
}
req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
pbl = &sq->hwq.pbl[PBL_LVL_0];
req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
@@ -1049,16 +1054,18 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &rq->sg_info;
hwq_attr.stride = sizeof(struct sq_sge);
hwq_attr.depth = bnxt_qplib_get_depth(rq);
hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
hwq_attr.aux_stride = 0;
hwq_attr.aux_depth = 0;
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
if (rc)
goto sq_swq;
rc = bnxt_qplib_alloc_init_swq(rq);
if (rc)
goto fail_rq;
if (!rq->hwq.is_user) {
rc = bnxt_qplib_alloc_init_swq(rq);
if (rc)
goto fail_rq;
}
req.rq_size = cpu_to_le32(rq->max_wqe);
pbl = &rq->hwq.pbl[PBL_LVL_0];
@@ -1154,9 +1161,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
rq->dbinfo.db = qp->dpi->dbr;
rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
}
spin_lock_bh(&rcfw->tbl_lock);
tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
spin_unlock_bh(&rcfw->tbl_lock);
return 0;
fail:
@@ -1638,7 +1647,7 @@ static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
if (!swq->psn_search)
return;
/* Handle MSN differently on cap flags */
if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
if (qp->is_host_msn_tbl) {
bnxt_qplib_fill_msn_search(qp, wqe, swq);
return;
}
@@ -1820,7 +1829,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
}
swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
bnxt_qplib_pull_psn_buff(qp, sq, swq, BNXT_RE_HW_RETX(qp->dev_cap_flags));
bnxt_qplib_pull_psn_buff(qp, sq, swq, qp->is_host_msn_tbl);
idx = 0;
swq->slot_idx = hwq->prod;
@@ -2010,7 +2019,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
rc = -EINVAL;
goto done;
}
if (!BNXT_RE_HW_RETX(qp->dev_cap_flags) || msn_update) {
if (!qp->is_host_msn_tbl || msn_update) {
swq->next_psn = sq->psn & BTH_PSN_MASK;
bnxt_qplib_fill_psn_search(qp, wqe, swq);
}
@@ -2491,7 +2500,7 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
}
sq = &qp->sq;
cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_sw_wqe;
if (qp->sq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
"%s: QP in Flush QP = %p\n", __func__, qp);
@@ -2534,10 +2543,12 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
bnxt_qplib_add_flush_qp(qp);
} else {
/* Before we complete, do WA 9060 */
if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
cqe_sq_cons)) {
*lib_qp = qp;
goto out;
if (!bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
cqe_sq_cons)) {
*lib_qp = qp;
goto out;
}
}
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
cqe->status = CQ_REQ_STATUS_OK;
@@ -2881,7 +2892,7 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
if (cqe_cons == 0xFFFF)
goto do_rq;
cqe_cons %= sq->max_wqe;
cqe_cons %= sq->max_sw_wqe;
if (qp->sq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,

View File

@@ -113,7 +113,6 @@ struct bnxt_qplib_sge {
u32 size;
};
#define BNXT_QPLIB_QP_MAX_SGL 6
struct bnxt_qplib_swq {
u64 wr_id;
int next_idx;
@@ -153,7 +152,7 @@ struct bnxt_qplib_swqe {
#define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE BIT(2)
#define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT BIT(3)
#define BNXT_QPLIB_SWQE_FLAGS_INLINE BIT(4)
struct bnxt_qplib_sge sg_list[BNXT_QPLIB_QP_MAX_SGL];
struct bnxt_qplib_sge sg_list[BNXT_VAR_MAX_SGE];
int num_sge;
/* Max inline data is 96 bytes */
u32 inline_len;
@@ -251,6 +250,7 @@ struct bnxt_qplib_q {
struct bnxt_qplib_db_info dbinfo;
struct bnxt_qplib_sg_info sg_info;
u32 max_wqe;
u32 max_sw_wqe;
u16 wqe_size;
u16 q_full_delta;
u16 max_sge;
@@ -340,7 +340,7 @@ struct bnxt_qplib_qp {
struct list_head rq_flush;
u32 msn;
u32 msn_tbl_sz;
u16 dev_cap_flags;
bool is_host_msn_tbl;
};
#define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)
@@ -585,15 +585,22 @@ static inline void bnxt_qplib_swq_mod_start(struct bnxt_qplib_q *que, u32 idx)
que->swq_start = que->swq[idx].next_idx;
}
static inline u32 bnxt_qplib_get_depth(struct bnxt_qplib_q *que)
static inline u32 bnxt_qplib_get_depth(struct bnxt_qplib_q *que, u8 wqe_mode, bool is_sq)
{
return (que->wqe_size * que->max_wqe) / sizeof(struct sq_sge);
u32 slots;
/* Queue depth is the number of slots. */
slots = (que->wqe_size * que->max_wqe) / sizeof(struct sq_sge);
/* For variable WQE mode, need to align the slots to 256 */
if (wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE && is_sq)
slots = ALIGN(slots, BNXT_VAR_MAX_SLOT_ALIGN);
return slots;
}
static inline u32 bnxt_qplib_set_sq_size(struct bnxt_qplib_q *que, u8 wqe_mode)
{
return (wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
que->max_wqe : bnxt_qplib_get_depth(que);
que->max_wqe : bnxt_qplib_get_depth(que, wqe_mode, true);
}
static inline u32 bnxt_qplib_set_sq_max_slot(u8 wqe_mode)

View File

@@ -523,6 +523,12 @@ static inline bool _is_hw_retx_supported(u16 dev_cap_flags)
#define BNXT_RE_HW_RETX(a) _is_hw_retx_supported((a))
static inline bool _is_host_msn_table(u16 dev_cap_ext_flags2)
{
return (dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_MASK) ==
CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_HOST_MSN_TABLE;
}
static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx)
{
return cctx->modes.dbr_pacing;

View File

@@ -95,11 +95,13 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_cmdqmsg msg = {};
struct creq_query_func_resp_sb *sb;
struct bnxt_qplib_rcfw_sbuf sbuf;
struct bnxt_qplib_chip_ctx *cctx;
struct cmdq_query_func req = {};
u8 *tqm_alloc;
int i, rc;
u32 temp;
cctx = rcfw->res->cctx;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_QUERY_FUNC,
sizeof(req));
@@ -127,14 +129,21 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->max_qp_init_rd_atom =
sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr);
/*
* 128 WQEs needs to be reserved for the HW (8916). Prevent
* reporting the max number
*/
attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
attr->max_qp_sges = bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx) ?
6 : sb->max_sge;
attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr) - 1;
if (!bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx)) {
/*
* 128 WQEs needs to be reserved for the HW (8916). Prevent
* reporting the max number on legacy devices
*/
attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
}
/* Adjust for max_qp_wqes for variable wqe */
if (cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
attr->max_qp_wqes = BNXT_VAR_MAX_WQE - 1;
attr->max_qp_sges = cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE ?
min_t(u32, sb->max_sge_var_wqe, BNXT_VAR_MAX_SGE) : 6;
attr->max_cq = le32_to_cpu(sb->max_cq);
attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
if (!bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx))
@@ -165,6 +174,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->max_sgid = le32_to_cpu(sb->max_gid);
attr->max_sgid = min_t(u32, BNXT_QPLIB_NUM_GIDS_SUPPORTED, 2 * attr->max_sgid);
attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags);
attr->dev_cap_flags2 = le16_to_cpu(sb->dev_cap_ext_flags_2);
bnxt_qplib_query_version(rcfw, attr->fw_ver);

View File

@@ -40,6 +40,7 @@
#ifndef __BNXT_QPLIB_SP_H__
#define __BNXT_QPLIB_SP_H__
#include <rdma/bnxt_re-abi.h>
#define BNXT_QPLIB_RESERVED_QP_WRS 128
struct bnxt_qplib_dev_attr {
@@ -73,6 +74,7 @@ struct bnxt_qplib_dev_attr {
u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ];
bool is_atomic;
u16 dev_cap_flags;
u16 dev_cap_flags2;
u32 max_dpi;
};
@@ -351,4 +353,11 @@ int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res,
struct bnxt_qplib_cc_param *cc_param);
#define BNXT_VAR_MAX_WQE 4352
#define BNXT_VAR_MAX_SLOT_ALIGN 256
#define BNXT_VAR_MAX_SGE 13
#define BNXT_RE_MAX_RQ_WQES 65536
#define BNXT_STATIC_MAX_SGE 6
#endif /* __BNXT_QPLIB_SP_H__*/

View File

@@ -2157,8 +2157,36 @@ struct creq_query_func_resp_sb {
__le32 tqm_alloc_reqs[12];
__le32 max_dpi;
u8 max_sge_var_wqe;
u8 reserved_8;
u8 dev_cap_ext_flags;
#define CREQ_QUERY_FUNC_RESP_SB_ATOMIC_OPS_NOT_SUPPORTED 0x1UL
#define CREQ_QUERY_FUNC_RESP_SB_DRV_VERSION_RGTR_SUPPORTED 0x2UL
#define CREQ_QUERY_FUNC_RESP_SB_CREATE_QP_BATCH_SUPPORTED 0x4UL
#define CREQ_QUERY_FUNC_RESP_SB_DESTROY_QP_BATCH_SUPPORTED 0x8UL
#define CREQ_QUERY_FUNC_RESP_SB_ROCE_STATS_EXT_CTX_SUPPORTED 0x10UL
#define CREQ_QUERY_FUNC_RESP_SB_CREATE_SRQ_SGE_SUPPORTED 0x20UL
#define CREQ_QUERY_FUNC_RESP_SB_FIXED_SIZE_WQE_DISABLED 0x40UL
#define CREQ_QUERY_FUNC_RESP_SB_DCN_SUPPORTED 0x80UL
__le16 max_inline_data_var_wqe;
__le32 start_qid;
u8 max_msn_table_size;
u8 reserved8_1;
__le16 dev_cap_ext_flags_2;
#define CREQ_QUERY_FUNC_RESP_SB_OPTIMIZE_MODIFY_QP_SUPPORTED 0x1UL
#define CREQ_QUERY_FUNC_RESP_SB_CHANGE_UDP_SRC_PORT_WQE_SUPPORTED 0x2UL
#define CREQ_QUERY_FUNC_RESP_SB_CQ_COALESCING_SUPPORTED 0x4UL
#define CREQ_QUERY_FUNC_RESP_SB_MEMORY_REGION_RO_SUPPORTED 0x8UL
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_MASK 0x30UL
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_SFT 4
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_HOST_PSN_TABLE (0x0UL << 4)
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_HOST_MSN_TABLE (0x1UL << 4)
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE (0x2UL << 4)
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_LAST \
CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE
__le16 max_xp_qp_size;
__le16 create_qp_batch_size;
__le16 destroy_qp_batch_size;
__le16 reserved16;
__le64 reserved64;
};
/* cmdq_set_func_resources (size:448b/56B) */

View File

@@ -153,8 +153,7 @@ int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
return total;
}
int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
int buf_cnt, struct ib_umem *umem,
int hns_roce_get_umem_bufs(dma_addr_t *bufs, int buf_cnt, struct ib_umem *umem,
unsigned int page_shift)
{
struct ib_block_iter biter;

View File

@@ -133,14 +133,12 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct ib_device *ibdev = &hr_dev->ib_dev;
u64 mtts[MTT_MIN_COUNT] = {};
dma_addr_t dma_handle;
int ret;
ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts),
&dma_handle);
if (!ret) {
ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts));
if (ret) {
ibdev_err(ibdev, "failed to find CQ mtr, ret = %d.\n", ret);
return -EINVAL;
return ret;
}
/* Get CQC memory HEM(Hardware Entry Memory) table */
@@ -157,7 +155,8 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
goto err_put;
}
ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts, dma_handle);
ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts,
hns_roce_get_mtr_ba(&hr_cq->mtr));
if (ret)
goto err_xa;

View File

@@ -892,8 +892,7 @@ struct hns_roce_hw {
int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr, int flags,
void *mb_buf);
int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
struct hns_roce_mr *mr);
int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr);
int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
@@ -1129,8 +1128,13 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
/* hns roce hw need current block and next block addr from mtt */
#define MTT_MIN_COUNT 2
static inline dma_addr_t hns_roce_get_mtr_ba(struct hns_roce_mtr *mtr)
{
return mtr->hem_cfg.root_ba;
}
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
u32 offset, u64 *mtt_buf, int mtt_max);
int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct hns_roce_buf_attr *buf_attr,
unsigned int page_shift, struct ib_udata *udata,
@@ -1188,7 +1192,7 @@ struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
int buf_cnt, struct hns_roce_buf *buf,
unsigned int page_shift);
int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
int hns_roce_get_umem_bufs(dma_addr_t *bufs,
int buf_cnt, struct ib_umem *umem,
unsigned int page_shift);

View File

@@ -986,6 +986,7 @@ struct hns_roce_hem_item {
size_t count; /* max ba numbers */
int start; /* start buf offset in this hem */
int end; /* end buf offset in this hem */
bool exist_bt;
};
/* All HEM items are linked in a tree structure */
@@ -1014,6 +1015,7 @@ hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
}
}
hem->exist_bt = exist_bt;
hem->count = count;
hem->start = start;
hem->end = end;
@@ -1024,34 +1026,32 @@ hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
}
static void hem_list_free_item(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_item *hem, bool exist_bt)
struct hns_roce_hem_item *hem)
{
if (exist_bt)
if (hem->exist_bt)
dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN,
hem->addr, hem->dma_addr);
kfree(hem);
}
static void hem_list_free_all(struct hns_roce_dev *hr_dev,
struct list_head *head, bool exist_bt)
struct list_head *head)
{
struct hns_roce_hem_item *hem, *temp_hem;
list_for_each_entry_safe(hem, temp_hem, head, list) {
list_del(&hem->list);
hem_list_free_item(hr_dev, hem, exist_bt);
hem_list_free_item(hr_dev, hem);
}
}
static void hem_list_link_bt(struct hns_roce_dev *hr_dev, void *base_addr,
u64 table_addr)
static void hem_list_link_bt(void *base_addr, u64 table_addr)
{
*(u64 *)(base_addr) = table_addr;
}
/* assign L0 table address to hem from root bt */
static void hem_list_assign_bt(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_item *hem, void *cpu_addr,
static void hem_list_assign_bt(struct hns_roce_hem_item *hem, void *cpu_addr,
u64 phy_addr)
{
hem->addr = cpu_addr;
@@ -1141,6 +1141,10 @@ int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
for (i = 0; i < region_cnt; i++) {
r = (struct hns_roce_buf_region *)&regions[i];
/* when r->hopnum = 0, the region should not occupy root_ba. */
if (!r->hopnum)
continue;
if (r->hopnum > 1) {
step = hem_list_calc_ba_range(r->hopnum, 1, unit);
if (step > 0)
@@ -1222,8 +1226,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
if (level > 1) {
pre = hem_ptrs[level - 1];
step = (cur->start - pre->start) / step * BA_BYTE_LEN;
hem_list_link_bt(hr_dev, pre->addr + step,
cur->dma_addr);
hem_list_link_bt(pre->addr + step, cur->dma_addr);
}
}
@@ -1235,7 +1238,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
err_exit:
for (level = 1; level < hopnum; level++)
hem_list_free_all(hr_dev, &temp_list[level], true);
hem_list_free_all(hr_dev, &temp_list[level]);
return ret;
}
@@ -1276,16 +1279,26 @@ static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
{
struct hns_roce_hem_item *hem;
/* This is on the has_mtt branch, if r->hopnum
* is 0, there is no root_ba to reuse for the
* region's fake hem, so a dma_alloc request is
* necessary here.
*/
hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1,
r->count, false);
r->count, !r->hopnum);
if (!hem)
return -ENOMEM;
hem_list_assign_bt(hr_dev, hem, cpu_base, phy_base);
/* The root_ba can be reused only when r->hopnum > 0. */
if (r->hopnum)
hem_list_assign_bt(hem, cpu_base, phy_base);
list_add(&hem->list, branch_head);
list_add(&hem->sibling, leaf_head);
return r->count;
/* If r->hopnum == 0, 0 is returned,
* so that the root_bt entry is not occupied.
*/
return r->hopnum ? r->count : 0;
}
static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
@@ -1304,7 +1317,7 @@ static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
/* if exist mid bt, link L1 to L0 */
list_for_each_entry_safe(hem, temp_hem, branch_head, list) {
offset = (hem->start - r->offset) / step * BA_BYTE_LEN;
hem_list_link_bt(hr_dev, cpu_base + offset, hem->dma_addr);
hem_list_link_bt(cpu_base + offset, hem->dma_addr);
total++;
}
@@ -1329,7 +1342,7 @@ setup_root_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list,
return -ENOMEM;
total = 0;
for (i = 0; i < region_cnt && total < max_ba_num; i++) {
for (i = 0; i < region_cnt && total <= max_ba_num; i++) {
r = &regions[i];
if (!r->count)
continue;
@@ -1395,9 +1408,9 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
region_cnt);
if (ret) {
for (i = 0; i < region_cnt; i++)
hem_list_free_all(hr_dev, &head.branch[i], false);
hem_list_free_all(hr_dev, &head.branch[i]);
hem_list_free_all(hr_dev, &head.root, true);
hem_list_free_all(hr_dev, &head.root);
}
return ret;
@@ -1460,10 +1473,9 @@ void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j],
j != 0);
hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j]);
hem_list_free_all(hr_dev, &hem_list->root_bt, true);
hem_list_free_all(hr_dev, &hem_list->root_bt);
INIT_LIST_HEAD(&hem_list->btm_bt);
hem_list->root_ba = 0;
}

View File

@@ -471,7 +471,7 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
valid_num_sge = calc_wr_sge_num(wr, &msg_len);
ret = set_ud_opcode(ud_sq_wqe, wr);
if (WARN_ON(ret))
if (WARN_ON_ONCE(ret))
return ret;
ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
@@ -575,7 +575,7 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr);
if (WARN_ON(ret))
if (WARN_ON_ONCE(ret))
return ret;
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SO,
@@ -673,6 +673,10 @@ static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
#define HNS_ROCE_SL_SHIFT 2
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
if (unlikely(qp->state == IB_QPS_ERR)) {
flush_cqe(hr_dev, qp);
return;
}
/* All kinds of DirectWQE have the same header field layout */
hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_FLAG);
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl);
@@ -3181,21 +3185,22 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
struct ib_device *ibdev = &hr_dev->ib_dev;
dma_addr_t pbl_ba;
int i, count;
int ret;
int i;
count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
min_t(int, ARRAY_SIZE(pages), mr->npages),
&pbl_ba);
if (count < 1) {
ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n",
count);
return -ENOBUFS;
ret = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
min_t(int, ARRAY_SIZE(pages), mr->npages));
if (ret) {
ibdev_err(ibdev, "failed to find PBL mtr, ret = %d.\n", ret);
return ret;
}
/* Aligned to the hardware address access unit */
for (i = 0; i < count; i++)
for (i = 0; i < ARRAY_SIZE(pages); i++)
pages[i] >>= 6;
pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr);
mpt_entry->pbl_size = cpu_to_le32(mr->npages);
mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3);
hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3));
@@ -3291,21 +3296,14 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
return ret;
}
static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
void *mb_buf, struct hns_roce_mr *mr)
static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
dma_addr_t pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr);
struct hns_roce_v2_mpt_entry *mpt_entry;
dma_addr_t pbl_ba = 0;
mpt_entry = mb_buf;
memset(mpt_entry, 0, sizeof(*mpt_entry));
if (hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, NULL, 0, &pbl_ba) < 0) {
ibdev_err(ibdev, "failed to find frmr mtr.\n");
return -ENOBUFS;
}
hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE);
hr_reg_write(mpt_entry, MPT_PD, mr->pd);
@@ -4213,8 +4211,7 @@ static void set_access_flags(struct hns_roce_qp *hr_qp,
}
static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
struct hns_roce_v2_qp_context *context)
{
hr_reg_write(context, QPC_SGE_SHIFT,
to_hr_hem_entries_shift(hr_qp->sge.sge_cnt,
@@ -4236,7 +4233,6 @@ static inline int get_pdn(struct ib_pd *ib_pd)
}
static void modify_qp_reset_to_init(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
@@ -4255,7 +4251,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
hr_reg_write(context, QPC_RQWS, ilog2(hr_qp->rq.max_gs));
set_qpc_wqe_cnt(hr_qp, context, qpc_mask);
set_qpc_wqe_cnt(hr_qp, context);
/* No VLAN need to set 0xFFF */
hr_reg_write(context, QPC_VLAN_ID, 0xfff);
@@ -4296,7 +4292,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
}
static void modify_qp_init_to_init(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
@@ -4333,17 +4328,20 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
{
u64 mtts[MTT_MIN_COUNT] = { 0 };
u64 wqe_sge_ba;
int count;
int ret;
/* Search qp buf's mtts */
count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts,
MTT_MIN_COUNT, &wqe_sge_ba);
if (hr_qp->rq.wqe_cnt && count < 1) {
ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts,
MTT_MIN_COUNT);
if (hr_qp->rq.wqe_cnt && ret) {
ibdev_err(&hr_dev->ib_dev,
"failed to find RQ WQE, QPN = 0x%lx.\n", hr_qp->qpn);
return -EINVAL;
"failed to find QP(0x%lx) RQ WQE buf, ret = %d.\n",
hr_qp->qpn, ret);
return ret;
}
wqe_sge_ba = hns_roce_get_mtr_ba(&hr_qp->mtr);
context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
qpc_mask->wqe_sge_ba = 0;
@@ -4407,23 +4405,23 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
struct ib_device *ibdev = &hr_dev->ib_dev;
u64 sge_cur_blk = 0;
u64 sq_cur_blk = 0;
int count;
int ret;
/* search qp buf's mtts */
count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
if (count < 1) {
ibdev_err(ibdev, "failed to find QP(0x%lx) SQ buf.\n",
hr_qp->qpn);
return -EINVAL;
ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->sq.offset,
&sq_cur_blk, 1);
if (ret) {
ibdev_err(ibdev, "failed to find QP(0x%lx) SQ WQE buf, ret = %d.\n",
hr_qp->qpn, ret);
return ret;
}
if (hr_qp->sge.sge_cnt > 0) {
count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
hr_qp->sge.offset,
&sge_cur_blk, 1, NULL);
if (count < 1) {
ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n",
hr_qp->qpn);
return -EINVAL;
ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
hr_qp->sge.offset, &sge_cur_blk, 1);
if (ret) {
ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf, ret = %d.\n",
hr_qp->qpn, ret);
return ret;
}
}
@@ -4614,8 +4612,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
return 0;
}
static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, int attr_mask,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
@@ -4984,15 +4981,14 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
modify_qp_reset_to_init(ibqp, attr, context, qpc_mask);
modify_qp_reset_to_init(ibqp, context, qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
modify_qp_init_to_init(ibqp, attr, context, qpc_mask);
modify_qp_init_to_init(ibqp, context, qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
qpc_mask, udata);
} else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
qpc_mask);
ret = modify_qp_rtr_to_rts(ibqp, attr_mask, context, qpc_mask);
}
return ret;
@@ -5550,18 +5546,20 @@ static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq,
struct ib_device *ibdev = srq->ibsrq.device;
struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
u64 mtts_idx[MTT_MIN_COUNT] = {};
dma_addr_t dma_handle_idx = 0;
dma_addr_t dma_handle_idx;
int ret;
/* Get physical address of idx que buf */
ret = hns_roce_mtr_find(hr_dev, &idx_que->mtr, 0, mtts_idx,
ARRAY_SIZE(mtts_idx), &dma_handle_idx);
if (ret < 1) {
ARRAY_SIZE(mtts_idx));
if (ret) {
ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n",
ret);
return -ENOBUFS;
return ret;
}
dma_handle_idx = hns_roce_get_mtr_ba(&idx_que->mtr);
hr_reg_write(ctx, SRQC_IDX_HOP_NUM,
to_hr_hem_hopnum(hr_dev->caps.idx_hop_num, srq->wqe_cnt));
@@ -5593,20 +5591,22 @@ static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf)
struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
struct hns_roce_srq_context *ctx = mb_buf;
u64 mtts_wqe[MTT_MIN_COUNT] = {};
dma_addr_t dma_handle_wqe = 0;
dma_addr_t dma_handle_wqe;
int ret;
memset(ctx, 0, sizeof(*ctx));
/* Get the physical address of srq buf */
ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
if (ret < 1) {
ARRAY_SIZE(mtts_wqe));
if (ret) {
ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n",
ret);
return -ENOBUFS;
return ret;
}
dma_handle_wqe = hns_roce_get_mtr_ba(&srq->buf_mtr);
hr_reg_write(ctx, SRQC_SRQ_ST, 1);
hr_reg_write_bool(ctx, SRQC_SRQ_TYPE,
srq->ibsrq.srq_type == IB_SRQT_XRC);
@@ -6327,7 +6327,7 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
u64 eqe_ba[MTT_MIN_COUNT] = { 0 };
struct hns_roce_eq_context *eqc;
u64 bt_ba = 0;
int count;
int ret;
eqc = mb_buf;
memset(eqc, 0, sizeof(struct hns_roce_eq_context));
@@ -6335,13 +6335,15 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
init_eq_config(hr_dev, eq);
/* if not multi-hop, eqe buffer only use one trunk */
count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT,
&bt_ba);
if (count < 1) {
dev_err(hr_dev->dev, "failed to find EQE mtr\n");
return -ENOBUFS;
ret = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba,
ARRAY_SIZE(eqe_ba));
if (ret) {
dev_err(hr_dev->dev, "failed to find EQE mtr, ret = %d\n", ret);
return ret;
}
bt_ba = hns_roce_get_mtr_ba(&eq->mtr);
hr_reg_write(eqc, EQC_EQ_ST, HNS_ROCE_V2_EQ_STATE_VALID);
hr_reg_write(eqc, EQC_EQE_HOP_NUM, eq->hop_num);
hr_reg_write(eqc, EQC_OVER_IGNORE, eq->over_ignore);

View File

@@ -154,7 +154,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
if (mr->type != MR_TYPE_FRMR)
ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr);
else
ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr);
if (ret) {
dev_err(dev, "failed to write mtpt, ret = %d.\n", ret);
goto err_page;
@@ -714,7 +714,7 @@ static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
return -ENOMEM;
if (mtr->umem)
npage = hns_roce_get_umem_bufs(hr_dev, pages, page_count,
npage = hns_roce_get_umem_bufs(pages, page_count,
mtr->umem, page_shift);
else
npage = hns_roce_get_kmem_bufs(hr_dev, pages, page_count,
@@ -767,11 +767,6 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
mapped_cnt < page_cnt; i++) {
r = &mtr->hem_cfg.region[i];
/* if hopnum is 0, no need to map pages in this region */
if (!r->hopnum) {
mapped_cnt += r->count;
continue;
}
if (r->offset + r->count > page_cnt) {
ret = -EINVAL;
@@ -802,47 +797,53 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
return ret;
}
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
static int hns_roce_get_direct_addr_mtt(struct hns_roce_hem_cfg *cfg,
u32 start_index, u64 *mtt_buf,
int mtt_cnt)
{
struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
int mtt_count, left;
u32 start_index;
int mtt_count;
int total = 0;
__le64 *mtts;
u32 npage;
u64 addr;
if (!mtt_buf || mtt_max < 1)
goto done;
if (mtt_cnt > cfg->region_count)
return -EINVAL;
/* no mtt memory in direct mode, so just return the buffer address */
if (cfg->is_direct) {
start_index = offset >> HNS_HW_PAGE_SHIFT;
for (mtt_count = 0; mtt_count < cfg->region_count &&
total < mtt_max; mtt_count++) {
npage = cfg->region[mtt_count].offset;
if (npage < start_index)
continue;
for (mtt_count = 0; mtt_count < cfg->region_count && total < mtt_cnt;
mtt_count++) {
npage = cfg->region[mtt_count].offset;
if (npage < start_index)
continue;
addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
mtt_buf[total] = addr;
addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
mtt_buf[total] = addr;
total++;
}
goto done;
total++;
}
start_index = offset >> cfg->buf_pg_shift;
left = mtt_max;
if (!total)
return -ENOENT;
return 0;
}
static int hns_roce_get_mhop_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_mtr *mtr, u32 start_index,
u64 *mtt_buf, int mtt_cnt)
{
int left = mtt_cnt;
int total = 0;
int mtt_count;
__le64 *mtts;
u32 npage;
while (left > 0) {
mtt_count = 0;
mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
start_index + total,
&mtt_count);
if (!mtts || !mtt_count)
goto done;
break;
npage = min(mtt_count, left);
left -= npage;
@@ -850,11 +851,33 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
mtt_buf[total++] = le64_to_cpu(mtts[mtt_count]);
}
done:
if (base_addr)
*base_addr = cfg->root_ba;
if (!total)
return -ENOENT;
return total;
return 0;
}
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
u32 offset, u64 *mtt_buf, int mtt_max)
{
struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
u32 start_index;
int ret;
if (!mtt_buf || mtt_max < 1)
return -EINVAL;
/* no mtt memory in direct mode, so just return the buffer address */
if (cfg->is_direct) {
start_index = offset >> HNS_HW_PAGE_SHIFT;
ret = hns_roce_get_direct_addr_mtt(cfg, start_index,
mtt_buf, mtt_max);
} else {
start_index = offset >> cfg->buf_pg_shift;
ret = hns_roce_get_mhop_mtt(hr_dev, mtr, start_index,
mtt_buf, mtt_max);
}
return ret;
}
static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,

View File

@@ -1075,7 +1075,6 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
}
static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
struct ib_pd *ib_pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata,
struct hns_roce_qp *hr_qp)
@@ -1229,7 +1228,6 @@ int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
struct ib_device *ibdev = qp->device;
struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
struct hns_roce_qp *hr_qp = to_hr_qp(qp);
struct ib_pd *pd = qp->pd;
int ret;
ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata);
@@ -1244,7 +1242,7 @@ int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
}
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp);
ret = hns_roce_create_qp_common(hr_dev, init_attr, udata, hr_qp);
if (ret)
ibdev_err(ibdev, "create QP type 0x%x failed(%d)\n",
init_attr->qp_type, ret);

View File

@@ -249,7 +249,7 @@ static void free_srq_wqe_buf(struct hns_roce_dev *hr_dev,
hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr);
}
static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
static int alloc_srq_wrid(struct hns_roce_srq *srq)
{
srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
if (!srq->wrid)
@@ -365,7 +365,7 @@ static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
goto err_idx;
if (!udata) {
ret = alloc_srq_wrid(hr_dev, srq);
ret = alloc_srq_wrid(srq);
if (ret)
goto err_wqe_buf;
}

View File

@@ -3372,7 +3372,8 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
list) {
if (dev->sys_image_guid == mpi->sys_image_guid &&
(mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
(mlx5_core_native_port_num(mpi->mdev) - 1) == i &&
mlx5_core_same_coredev_type(dev->mdev, mpi->mdev)) {
bound = mlx5_ib_bind_slave_port(dev, mpi);
}
@@ -4406,7 +4407,8 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev,
mutex_lock(&mlx5_ib_multiport_mutex);
list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
if (dev->sys_image_guid == mpi->sys_image_guid)
if (dev->sys_image_guid == mpi->sys_image_guid &&
mlx5_core_same_coredev_type(dev->mdev, mpi->mdev))
bound = mlx5_ib_bind_slave_port(dev, mpi);
if (bound) {

View File

@@ -346,6 +346,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
struct rtrs_srv_mr *srv_mr;
bool need_inval = false;
enum ib_send_flags flags;
struct ib_sge list;
u32 imm;
int err;
@@ -398,7 +399,6 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
imm_wr.wr.next = NULL;
if (always_invalidate) {
struct ib_sge list;
struct rtrs_msg_rkey_rsp *msg;
srv_mr = &srv_path->mrs[id->msg_id];

View File

@@ -64,7 +64,7 @@ static void gic_check_cpu_features(void)
union gic_base {
void __iomem *common_base;
void __percpu * __iomem *percpu_base;
void __iomem * __percpu *percpu_base;
};
struct gic_chip_data {

View File

@@ -91,6 +91,14 @@ struct pcc_chan_reg {
* @cmd_update: PCC register bundle for the command complete update register
* @error: PCC register bundle for the error status register
* @plat_irq: platform interrupt
* @type: PCC subspace type
* @plat_irq_flags: platform interrupt flags
* @chan_in_use: this flag is used just to check if the interrupt needs
* handling when it is shared. Since only one transfer can occur
* at a time and mailbox takes care of locking, this flag can be
* accessed without a lock. Note: the type only support the
* communication from OSPM to Platform, like type3, use it, and
* other types completely ignore it.
*/
struct pcc_chan_info {
struct pcc_mbox_chan chan;
@@ -100,12 +108,17 @@ struct pcc_chan_info {
struct pcc_chan_reg cmd_update;
struct pcc_chan_reg error;
int plat_irq;
u8 type;
unsigned int plat_irq_flags;
bool chan_in_use;
};
#define to_pcc_chan_info(c) container_of(c, struct pcc_chan_info, chan)
static struct pcc_chan_info *chan_info;
static int pcc_chan_count;
static int pcc_send_data(struct mbox_chan *chan, void *data);
/*
* PCC can be used with perf critical drivers such as CPPC
* So it makes sense to locally cache the virtual address and
@@ -221,6 +234,70 @@ static int pcc_map_interrupt(u32 interrupt, u32 flags)
return acpi_register_gsi(NULL, interrupt, trigger, polarity);
}
static bool pcc_chan_plat_irq_can_be_shared(struct pcc_chan_info *pchan)
{
return (pchan->plat_irq_flags & ACPI_PCCT_INTERRUPT_MODE) ==
ACPI_LEVEL_SENSITIVE;
}
static bool pcc_mbox_cmd_complete_check(struct pcc_chan_info *pchan)
{
u64 val;
int ret;
ret = pcc_chan_reg_read(&pchan->cmd_complete, &val);
if (ret)
return false;
if (!pchan->cmd_complete.gas)
return true;
/*
* Judge if the channel respond the interrupt based on the value of
* command complete.
*/
val &= pchan->cmd_complete.status_mask;
/*
* If this is PCC slave subspace channel, and the command complete
* bit 0 indicates that Platform is sending a notification and OSPM
* needs to respond this interrupt to process this command.
*/
if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
return !val;
return !!val;
}
static void check_and_ack(struct pcc_chan_info *pchan, struct mbox_chan *chan)
{
struct acpi_pcct_ext_pcc_shared_memory pcc_hdr;
if (pchan->type != ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
return;
/* If the memory region has not been mapped, we cannot
* determine if we need to send the message, but we still
* need to set the cmd_update flag before returning.
*/
if (pchan->chan.shmem == NULL) {
pcc_chan_reg_read_modify_write(&pchan->cmd_update);
return;
}
memcpy_fromio(&pcc_hdr, pchan->chan.shmem,
sizeof(struct acpi_pcct_ext_pcc_shared_memory));
/*
* The PCC slave subspace channel needs to set the command complete bit
* after processing message. If the PCC_ACK_FLAG is set, it should also
* ring the doorbell.
*
* The PCC master subspace channel clears chan_in_use to free channel.
*/
if (le32_to_cpup(&pcc_hdr.flags) & PCC_ACK_FLAG_MASK)
pcc_send_data(chan, NULL);
else
pcc_chan_reg_read_modify_write(&pchan->cmd_update);
}
/**
* pcc_mbox_irq - PCC mailbox interrupt handler
* @irq: interrupt number
@@ -236,16 +313,12 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
int ret;
pchan = chan->con_priv;
ret = pcc_chan_reg_read(&pchan->cmd_complete, &val);
if (ret)
if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE &&
!pchan->chan_in_use)
return IRQ_NONE;
if (val) { /* Ensure GAS exists and value is non-zero */
val &= pchan->cmd_complete.status_mask;
if (!val)
return IRQ_NONE;
}
if (!pcc_mbox_cmd_complete_check(pchan))
return IRQ_NONE;
ret = pcc_chan_reg_read(&pchan->error, &val);
if (ret)
@@ -262,6 +335,9 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
mbox_chan_received_data(chan, NULL);
check_and_ack(pchan, chan);
pchan->chan_in_use = false;
return IRQ_HANDLED;
}
@@ -311,14 +387,37 @@ EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
void pcc_mbox_free_channel(struct pcc_mbox_chan *pchan)
{
struct mbox_chan *chan = pchan->mchan;
struct pcc_chan_info *pchan_info;
struct pcc_mbox_chan *pcc_mbox_chan;
if (!chan || !chan->cl)
return;
pchan_info = chan->con_priv;
pcc_mbox_chan = &pchan_info->chan;
if (pcc_mbox_chan->shmem) {
iounmap(pcc_mbox_chan->shmem);
pcc_mbox_chan->shmem = NULL;
}
mbox_free_channel(chan);
}
EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
int pcc_mbox_ioremap(struct mbox_chan *chan)
{
struct pcc_chan_info *pchan_info;
struct pcc_mbox_chan *pcc_mbox_chan;
if (!chan || !chan->cl)
return -1;
pchan_info = chan->con_priv;
pcc_mbox_chan = &pchan_info->chan;
pcc_mbox_chan->shmem = ioremap(pcc_mbox_chan->shmem_base_addr,
pcc_mbox_chan->shmem_size);
return 0;
}
EXPORT_SYMBOL_GPL(pcc_mbox_ioremap);
/**
* pcc_send_data - Called from Mailbox Controller code. Used
* here only to ring the channel doorbell. The PCC client
@@ -340,7 +439,11 @@ static int pcc_send_data(struct mbox_chan *chan, void *data)
if (ret)
return ret;
return pcc_chan_reg_read_modify_write(&pchan->db);
ret = pcc_chan_reg_read_modify_write(&pchan->db);
if (!ret && pchan->plat_irq > 0)
pchan->chan_in_use = true;
return ret;
}
/**
@@ -353,11 +456,14 @@ static int pcc_send_data(struct mbox_chan *chan, void *data)
static int pcc_startup(struct mbox_chan *chan)
{
struct pcc_chan_info *pchan = chan->con_priv;
unsigned long irqflags;
int rc;
if (pchan->plat_irq > 0) {
rc = devm_request_irq(chan->mbox->dev, pchan->plat_irq, pcc_mbox_irq, 0,
MBOX_IRQ_NAME, chan);
irqflags = pcc_chan_plat_irq_can_be_shared(pchan) ?
IRQF_SHARED | IRQF_ONESHOT : 0;
rc = devm_request_irq(chan->mbox->dev, pchan->plat_irq, pcc_mbox_irq,
irqflags, MBOX_IRQ_NAME, chan);
if (unlikely(rc)) {
dev_err(chan->mbox->dev, "failed to register PCC interrupt %d\n",
pchan->plat_irq);
@@ -463,6 +569,7 @@ static int pcc_parse_subspace_irq(struct pcc_chan_info *pchan,
pcct_ss->platform_interrupt);
return -EINVAL;
}
pchan->plat_irq_flags = pcct_ss->flags;
if (pcct_ss->header.type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
struct acpi_pcct_hw_reduced_type2 *pcct2_ss = (void *)pcct_ss;
@@ -484,6 +591,12 @@ static int pcc_parse_subspace_irq(struct pcc_chan_info *pchan,
"PLAT IRQ ACK");
}
if (pcc_chan_plat_irq_can_be_shared(pchan) &&
!pchan->plat_irq_ack.gas) {
pr_err("PCC subspace has level IRQ with no ACK register\n");
return -EINVAL;
}
return ret;
}
@@ -698,6 +811,7 @@ static int pcc_mbox_probe(struct platform_device *pdev)
pcc_parse_subspace_shmem(pchan, pcct_entry);
pchan->type = pcct_entry->type;
pcct_entry = (struct acpi_subtable_header *)
((unsigned long) pcct_entry + pcct_entry->length);
}

View File

@@ -2519,6 +2519,28 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = UVC_PC_PROTOCOL_15,
.driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited },
/* Quanta ACER HD User Facing */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x0408,
.idProduct = 0x4033,
.bInterfaceClass = USB_CLASS_VIDEO,
.bInterfaceSubClass = 1,
.bInterfaceProtocol = UVC_PC_PROTOCOL_15,
.driver_info = (kernel_ulong_t)&(const struct uvc_device_info){
.uvc_version = 0x010a,
} },
/* Quanta ACER HD User Facing */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x0408,
.idProduct = 0x4035,
.bInterfaceClass = USB_CLASS_VIDEO,
.bInterfaceSubClass = 1,
.bInterfaceProtocol = UVC_PC_PROTOCOL_15,
.driver_info = (kernel_ulong_t)&(const struct uvc_device_info){
.uvc_version = 0x010a,
} },
/* LogiLink Wireless Webcam */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,

View File

@@ -1867,20 +1867,20 @@ static int sdhci_msm_program_key(struct cqhci_host *cq_host,
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
union cqhci_crypto_cap_entry cap;
if (!(cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE))
return qcom_ice_evict_key(msm_host->ice, slot);
/* Only AES-256-XTS has been tested so far. */
cap = cq_host->crypto_cap_array[cfg->crypto_cap_idx];
if (cap.algorithm_id != CQHCI_CRYPTO_ALG_AES_XTS ||
cap.key_size != CQHCI_CRYPTO_KEY_SIZE_256)
return -EINVAL;
if (cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE)
return qcom_ice_program_key(msm_host->ice,
QCOM_ICE_CRYPTO_ALG_AES_XTS,
QCOM_ICE_CRYPTO_KEY_SIZE_256,
cfg->crypto_key,
cfg->data_unit_size, slot);
else
return qcom_ice_evict_key(msm_host->ice, slot);
return qcom_ice_program_key(msm_host->ice,
QCOM_ICE_CRYPTO_ALG_AES_XTS,
QCOM_ICE_CRYPTO_KEY_SIZE_256,
cfg->crypto_key,
cfg->data_unit_size, slot);
}
#else /* CONFIG_MMC_CRYPTO */

View File

@@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 switch driver main logic
*
* Copyright (C) 2017-2019 Microchip Technology Inc.
* Copyright (C) 2017-2024 Microchip Technology Inc.
*/
#include <linux/kernel.h>
@@ -916,26 +916,51 @@ void ksz9477_get_caps(struct ksz_device *dev, int port,
int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
{
u32 secs = msecs / 1000;
u8 value;
u8 data;
u8 data, mult, value;
u32 max_val;
int ret;
value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
#define MAX_TIMER_VAL ((1 << 8) - 1)
ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
if (ret < 0)
return ret;
/* The aging timer comprises a 3-bit multiplier and an 8-bit second
* value. Either of them cannot be zero. The maximum timer is then
* 7 * 255 = 1785 seconds.
*/
if (!secs)
secs = 1;
data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
/* Return error if too large. */
else if (secs > 7 * MAX_TIMER_VAL)
return -EINVAL;
ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
if (ret < 0)
return ret;
value &= ~SW_AGE_CNT_M;
value |= FIELD_PREP(SW_AGE_CNT_M, data);
/* Check whether there is need to update the multiplier. */
mult = FIELD_GET(SW_AGE_CNT_M, value);
max_val = MAX_TIMER_VAL;
if (mult > 0) {
/* Try to use the same multiplier already in the register as
* the hardware default uses multiplier 4 and 75 seconds for
* 300 seconds.
*/
max_val = DIV_ROUND_UP(secs, mult);
if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
max_val = MAX_TIMER_VAL;
}
return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
data = DIV_ROUND_UP(secs, max_val);
if (mult != data) {
value &= ~SW_AGE_CNT_M;
value |= FIELD_PREP(SW_AGE_CNT_M, data);
ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
if (ret < 0)
return ret;
}
value = DIV_ROUND_UP(secs, data);
return ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
}
void ksz9477_port_queue_split(struct ksz_device *dev, int port)

View File

@@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 register definitions
*
* Copyright (C) 2017-2018 Microchip Technology Inc.
* Copyright (C) 2017-2024 Microchip Technology Inc.
*/
#ifndef __KSZ9477_REGS_H
@@ -190,8 +190,6 @@
#define SW_VLAN_ENABLE BIT(7)
#define SW_DROP_INVALID_VID BIT(6)
#define SW_AGE_CNT_M GENMASK(5, 3)
#define SW_AGE_CNT_S 3
#define SW_AGE_PERIOD_10_8_M GENMASK(10, 8)
#define SW_RESV_MCAST_ENABLE BIT(2)
#define SW_HASH_OPTION_M 0x03
#define SW_HASH_OPTION_CRC 1

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Microchip LAN937X switch driver main logic
* Copyright (C) 2019-2022 Microchip Technology Inc.
* Copyright (C) 2019-2024 Microchip Technology Inc.
*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -257,10 +257,66 @@ int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu)
int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
{
u32 secs = msecs / 1000;
u32 value;
u8 data, mult, value8;
bool in_msec = false;
u32 max_val, value;
u32 secs = msecs;
int ret;
#define MAX_TIMER_VAL ((1 << 20) - 1)
/* The aging timer comprises a 3-bit multiplier and a 20-bit second
* value. Either of them cannot be zero. The maximum timer is then
* 7 * 1048575 = 7340025 seconds. As this value is too large for
* practical use it can be interpreted as microseconds, making the
* maximum timer 7340 seconds with finer control. This allows for
* maximum 122 minutes compared to 29 minutes in KSZ9477 switch.
*/
if (msecs % 1000)
in_msec = true;
else
secs /= 1000;
if (!secs)
secs = 1;
/* Return error if too large. */
else if (secs > 7 * MAX_TIMER_VAL)
return -EINVAL;
/* Configure how to interpret the number value. */
ret = ksz_rmw8(dev, REG_SW_LUE_CTRL_2, SW_AGE_CNT_IN_MICROSEC,
in_msec ? SW_AGE_CNT_IN_MICROSEC : 0);
if (ret < 0)
return ret;
ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value8);
if (ret < 0)
return ret;
/* Check whether there is need to update the multiplier. */
mult = FIELD_GET(SW_AGE_CNT_M, value8);
max_val = MAX_TIMER_VAL;
if (mult > 0) {
/* Try to use the same multiplier already in the register as
* the hardware default uses multiplier 4 and 75 seconds for
* 300 seconds.
*/
max_val = DIV_ROUND_UP(secs, mult);
if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
max_val = MAX_TIMER_VAL;
}
data = DIV_ROUND_UP(secs, max_val);
if (mult != data) {
value8 &= ~SW_AGE_CNT_M;
value8 |= FIELD_PREP(SW_AGE_CNT_M, data);
ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value8);
if (ret < 0)
return ret;
}
secs = DIV_ROUND_UP(secs, data);
value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
ret = ksz_write8(dev, REG_SW_AGE_PERIOD__1, value);

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Microchip LAN937X switch register definitions
* Copyright (C) 2019-2021 Microchip Technology Inc.
* Copyright (C) 2019-2024 Microchip Technology Inc.
*/
#ifndef __LAN937X_REG_H
#define __LAN937X_REG_H
@@ -48,8 +48,7 @@
#define SW_VLAN_ENABLE BIT(7)
#define SW_DROP_INVALID_VID BIT(6)
#define SW_AGE_CNT_M 0x7
#define SW_AGE_CNT_S 3
#define SW_AGE_CNT_M GENMASK(5, 3)
#define SW_RESV_MCAST_ENABLE BIT(2)
#define REG_SW_LUE_CTRL_1 0x0311
@@ -62,6 +61,10 @@
#define SW_FAST_AGING BIT(1)
#define SW_LINK_AUTO_AGING BIT(0)
#define REG_SW_LUE_CTRL_2 0x0312
#define SW_AGE_CNT_IN_MICROSEC BIT(7)
#define REG_SW_AGE_PERIOD__1 0x0313
#define SW_AGE_PERIOD_7_0_M GENMASK(7, 0)

View File

@@ -1967,7 +1967,11 @@ static int bcm_sysport_open(struct net_device *dev)
unsigned int i;
int ret;
clk_prepare_enable(priv->clk);
ret = clk_prepare_enable(priv->clk);
if (ret) {
netdev_err(dev, "could not enable priv clock\n");
return ret;
}
/* Reset UniMAC */
umac_reset(priv);
@@ -2625,7 +2629,11 @@ static int bcm_sysport_probe(struct platform_device *pdev)
goto err_deregister_notifier;
}
clk_prepare_enable(priv->clk);
ret = clk_prepare_enable(priv->clk);
if (ret) {
dev_err(&pdev->dev, "could not enable priv clock\n");
goto err_deregister_netdev;
}
priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
dev_info(&pdev->dev,
@@ -2639,6 +2647,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)
return 0;
err_deregister_netdev:
unregister_netdev(dev);
err_deregister_notifier:
unregister_netdevice_notifier(&priv->netdev_notifier);
err_deregister_fixed_link:
@@ -2810,7 +2820,12 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
if (!netif_running(dev))
return 0;
clk_prepare_enable(priv->clk);
ret = clk_prepare_enable(priv->clk);
if (ret) {
netdev_err(dev, "could not enable priv clock\n");
return ret;
}
if (priv->wolopts)
clk_disable_unprepare(priv->wol_clk);

View File

@@ -1528,8 +1528,8 @@ static int gve_xsk_pool_enable(struct net_device *dev,
if (err)
return err;
/* If XDP prog is not installed, return */
if (!priv->xdp_prog)
/* If XDP prog is not installed or interface is down, return. */
if (!priv->xdp_prog || !netif_running(dev))
return 0;
rx = &priv->rx[qid];
@@ -1574,21 +1574,16 @@ static int gve_xsk_pool_disable(struct net_device *dev,
if (qid >= priv->rx_cfg.num_queues)
return -EINVAL;
/* If XDP prog is not installed, unmap DMA and return */
if (!priv->xdp_prog)
/* If XDP prog is not installed or interface is down, unmap DMA and
* return.
*/
if (!priv->xdp_prog || !netif_running(dev))
goto done;
tx_qid = gve_xdp_tx_queue_id(priv, qid);
if (!netif_running(dev)) {
priv->rx[qid].xsk_pool = NULL;
xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
priv->tx[tx_qid].xsk_pool = NULL;
goto done;
}
napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
napi_disable(napi_rx); /* make sure current rx poll is done */
tx_qid = gve_xdp_tx_queue_id(priv, qid);
napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
napi_disable(napi_tx); /* make sure current tx poll is done */
@@ -1616,6 +1611,9 @@ static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
struct gve_priv *priv = netdev_priv(dev);
int tx_queue_id = gve_xdp_tx_queue_id(priv, queue_id);
if (!gve_get_napi_enabled(priv))
return -ENETDOWN;
if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog)
return -EINVAL;
@@ -1757,6 +1755,9 @@ static void gve_turndown(struct gve_priv *priv)
gve_clear_napi_enabled(priv);
gve_clear_report_stats(priv);
/* Make sure that all traffic is finished processing. */
synchronize_net();
}
static void gve_turnup(struct gve_priv *priv)

View File

@@ -777,9 +777,12 @@ int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
struct gve_tx_ring *tx;
int i, err = 0, qid;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK) || !priv->xdp_prog)
return -EINVAL;
if (!gve_get_napi_enabled(priv))
return -ENETDOWN;
qid = gve_xdp_tx_queue_id(priv,
smp_processor_id() % priv->num_xdp_queues);

View File

@@ -2708,9 +2708,15 @@ static struct platform_device *port_platdev[3];
static void mv643xx_eth_shared_of_remove(void)
{
struct mv643xx_eth_platform_data *pd;
int n;
for (n = 0; n < 3; n++) {
if (!port_platdev[n])
continue;
pd = dev_get_platdata(&port_platdev[n]->dev);
if (pd)
of_node_put(pd->phy_node);
platform_device_del(port_platdev[n]);
port_platdev[n] = NULL;
}
@@ -2773,8 +2779,10 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
}
ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
if (!ppdev)
return -ENOMEM;
if (!ppdev) {
ret = -ENOMEM;
goto put_err;
}
ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
ppdev->dev.of_node = pnp;
@@ -2796,6 +2804,8 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
port_err:
platform_device_put(ppdev);
put_err:
of_node_put(ppd.phy_node);
return ret;
}

View File

@@ -129,6 +129,7 @@ static const struct pci_device_id sky2_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4373) }, /* 88E8075 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */

View File

@@ -339,9 +339,13 @@ static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx,
{
struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs;
const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
struct mlx5_macsec_rule_attrs rule_attrs;
union mlx5_macsec_rule *macsec_rule;
if (is_tx && tx_sc->encoding_sa != sa->assoc_num)
return 0;
rule_attrs.macsec_obj_id = sa->macsec_obj_id;
rule_attrs.sci = sa->sci;
rule_attrs.assoc_num = sa->assoc_num;

View File

@@ -150,11 +150,11 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
unsigned long i;
int err;
xa_for_each(&esw->offloads.vport_reps, i, rep) {
rpriv = rep->rep_data[REP_ETH].priv;
if (!rpriv || !rpriv->netdev)
mlx5_esw_for_each_rep(esw, i, rep) {
if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
continue;
rpriv = rep->rep_data[REP_ETH].priv;
rhashtable_walk_enter(&rpriv->tc_ht, &iter);
rhashtable_walk_start(&iter);
while ((flow = rhashtable_walk_next(&iter)) != NULL) {

View File

@@ -713,6 +713,9 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\
(last) - 1)
#define mlx5_esw_for_each_rep(esw, i, rep) \
xa_for_each(&((esw)->offloads.vport_reps), i, rep)
struct mlx5_eswitch *__must_check
mlx5_devlink_eswitch_get(struct devlink *devlink);

View File

@@ -52,9 +52,6 @@
#include "lag/lag.h"
#include "en/tc/post_meter.h"
#define mlx5_esw_for_each_rep(esw, i, rep) \
xa_for_each(&((esw)->offloads.vport_reps), i, rep)
/* There are two match-all miss flows, one for unicast dst mac and
* one for multicast.
*/

View File

@@ -3478,6 +3478,7 @@ void mlx5_fs_core_free(struct mlx5_core_dev *dev)
int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering;
char name[80];
int err = 0;
err = mlx5_init_fc_stats(dev);
@@ -3502,10 +3503,12 @@ int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
else
steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
snprintf(name, sizeof(name), "%s-mlx5_fs_fgs", dev_name(dev->device));
steering->fgs_cache = kmem_cache_create(name,
sizeof(struct mlx5_flow_group), 0,
0, NULL);
steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
snprintf(name, sizeof(name), "%s-mlx5_fs_ftes", dev_name(dev->device));
steering->ftes_cache = kmem_cache_create(name, sizeof(struct fs_fte), 0,
0, NULL);
if (!steering->ftes_cache || !steering->fgs_cache) {
err = -ENOMEM;

View File

@@ -1067,7 +1067,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
int inlen, err, eqn;
void *cqc, *in;
__be64 *pas;
int vector;
u32 i;
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -1096,8 +1095,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
if (!in)
goto err_cqwq;
vector = raw_smp_processor_id() % mlx5_comp_vectors_max(mdev);
err = mlx5_comp_eqn_get(mdev, vector, &eqn);
err = mlx5_comp_eqn_get(mdev, 0, &eqn);
if (err) {
kvfree(in);
goto err_cqwq;

View File

@@ -423,8 +423,7 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0,
0);
0, 0, tun->net, parms.link, tun->fwmark, 0, 0);
rt = ip_route_output_key(tun->net, &fl4);
if (IS_ERR(rt))

View File

@@ -1632,8 +1632,9 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
if (dma_mapping_error(ndev->dev.parent, dma_addr_orig))
goto err_kfree;
gq->skbs[gq->cur] = skb;
gq->unmap_addrs[gq->cur] = dma_addr_orig;
/* Stored the skb at the last descriptor to avoid skb free before hardware completes send */
gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = skb;
gq->unmap_addrs[(gq->cur + nr_desc - 1) % gq->ring_size] = dma_addr_orig;
dma_wmb();

View File

@@ -16,7 +16,7 @@ static int efx_tc_flow_block(enum tc_setup_type type, void *type_data,
void *cb_priv);
static const struct rhashtable_params efx_tc_ct_zone_ht_params = {
.key_len = offsetof(struct efx_tc_ct_zone, linkage),
.key_len = sizeof_field(struct efx_tc_ct_zone, zone),
.key_offset = 0,
.head_offset = offsetof(struct efx_tc_ct_zone, linkage),
};

View File

@@ -296,62 +296,80 @@ out:
}
/**
* stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
* @plat: driver data platform structure
* @np: device tree node
* @dev: device pointer
* Description:
* The mdio bus will be allocated in case of a phy transceiver is on board;
* it will be NULL if the fixed-link is configured.
* If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated
* in any case (for DSA, mdio must be registered even if fixed-link).
* The table below sums the supported configurations:
* -------------------------------
* snps,phy-addr | Y
* -------------------------------
* phy-handle | Y
* -------------------------------
* fixed-link | N
* -------------------------------
* snps,dwmac-mdio |
* even if | Y
* fixed-link |
* -------------------------------
* stmmac_of_get_mdio() - Gets the MDIO bus from the devicetree.
* @np: devicetree node
*
* It returns 0 in case of success otherwise -ENODEV.
* The MDIO bus will be searched for in the following ways:
* 1. The compatible is "snps,dwc-qos-ethernet-4.10" && a "mdio" named
* child node exists
* 2. A child node with the "snps,dwmac-mdio" compatible is present
*
* Return: The MDIO node if present otherwise NULL
*/
static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
struct device_node *np, struct device *dev)
static struct device_node *stmmac_of_get_mdio(struct device_node *np)
{
bool mdio = !of_phy_is_fixed_link(np);
static const struct of_device_id need_mdio_ids[] = {
{ .compatible = "snps,dwc-qos-ethernet-4.10" },
{},
};
struct device_node *mdio_node = NULL;
if (of_match_node(need_mdio_ids, np)) {
plat->mdio_node = of_get_child_by_name(np, "mdio");
mdio_node = of_get_child_by_name(np, "mdio");
} else {
/**
* If snps,dwmac-mdio is passed from DT, always register
* the MDIO
*/
for_each_child_of_node(np, plat->mdio_node) {
if (of_device_is_compatible(plat->mdio_node,
for_each_child_of_node(np, mdio_node) {
if (of_device_is_compatible(mdio_node,
"snps,dwmac-mdio"))
break;
}
}
if (plat->mdio_node) {
dev_dbg(dev, "Found MDIO subnode\n");
mdio = true;
}
return mdio_node;
}
if (mdio) {
plat->mdio_bus_data =
devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data),
GFP_KERNEL);
/**
* stmmac_mdio_setup() - Populate platform related MDIO structures.
* @plat: driver data platform structure
* @np: devicetree node
* @dev: device pointer
*
* This searches for MDIO information from the devicetree.
* If an MDIO node is found, it's assigned to plat->mdio_node and
* plat->mdio_bus_data is allocated.
* If no connection can be determined, just plat->mdio_bus_data is allocated
* to indicate a bus should be created and scanned for a phy.
* If it's determined there's no MDIO bus needed, both are left NULL.
*
* This expects that plat->phy_node has already been searched for.
*
* Return: 0 on success, errno otherwise.
*/
static int stmmac_mdio_setup(struct plat_stmmacenet_data *plat,
struct device_node *np, struct device *dev)
{
bool legacy_mdio;
plat->mdio_node = stmmac_of_get_mdio(np);
if (plat->mdio_node)
dev_dbg(dev, "Found MDIO subnode\n");
/* Legacy devicetrees allowed for no MDIO bus description and expect
* the bus to be scanned for devices. If there's no phy or fixed-link
* described assume this is the case since there must be something
* connected to the MAC.
*/
legacy_mdio = !of_phy_is_fixed_link(np) && !plat->phy_node;
if (legacy_mdio)
dev_info(dev, "Deprecated MDIO bus assumption used\n");
if (plat->mdio_node || legacy_mdio) {
plat->mdio_bus_data = devm_kzalloc(dev,
sizeof(*plat->mdio_bus_data),
GFP_KERNEL);
if (!plat->mdio_bus_data)
return -ENOMEM;
@@ -455,10 +473,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
/* To Configure PHY by using all device-tree supported properties */
rc = stmmac_dt_phy(plat, np, &pdev->dev);
if (rc)
return ERR_PTR(rc);
rc = stmmac_mdio_setup(plat, np, &pdev->dev);
if (rc) {
ret = ERR_PTR(rc);
goto error_put_phy;
}
of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
@@ -547,8 +566,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
GFP_KERNEL);
if (!dma_cfg) {
stmmac_remove_config_dt(pdev, plat);
return ERR_PTR(-ENOMEM);
ret = ERR_PTR(-ENOMEM);
goto error_put_mdio;
}
plat->dma_cfg = dma_cfg;
@@ -576,8 +595,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
rc = stmmac_mtl_setup(pdev, plat);
if (rc) {
stmmac_remove_config_dt(pdev, plat);
return ERR_PTR(rc);
ret = ERR_PTR(rc);
goto error_put_mdio;
}
/* clock setup */
@@ -629,6 +648,10 @@ error_hw_init:
clk_disable_unprepare(plat->pclk);
error_pclk_get:
clk_disable_unprepare(plat->stmmac_clk);
error_put_mdio:
of_node_put(plat->mdio_node);
error_put_phy:
of_node_put(plat->phy_node);
return ret;
}
@@ -637,16 +660,17 @@ static void devm_stmmac_remove_config_dt(void *data)
{
struct plat_stmmacenet_data *plat = data;
/* Platform data argument is unused */
stmmac_remove_config_dt(NULL, plat);
clk_disable_unprepare(plat->stmmac_clk);
clk_disable_unprepare(plat->pclk);
of_node_put(plat->mdio_node);
of_node_put(plat->phy_node);
}
/**
* devm_stmmac_probe_config_dt
* @pdev: platform_device structure
* @mac: MAC address to use
* Description: Devres variant of stmmac_probe_config_dt(). Does not require
* the user to call stmmac_remove_config_dt() at driver detach.
* Description: Devres variant of stmmac_probe_config_dt().
*/
struct plat_stmmacenet_data *
devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)

View File

@@ -290,6 +290,9 @@ static void icss_iep_enable_shadow_mode(struct icss_iep *iep)
for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) {
regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG,
IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp));
regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
IEP_CMP_CFG_CMP_EN(cmp), 0);
}
/* enable reset counter on CMP0 event */
@@ -808,6 +811,11 @@ int icss_iep_exit(struct icss_iep *iep)
}
icss_iep_disable(iep);
if (iep->pps_enabled)
icss_iep_pps_enable(iep, false);
else if (iep->perout_enabled)
icss_iep_perout_enable(iep, NULL, false);
return 0;
}
EXPORT_SYMBOL_GPL(icss_iep_exit);

View File

@@ -1373,6 +1373,9 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a9, 0)}, /* Telit FN920C04 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10c0, 0)}, /* Telit FE910C04 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10c4, 0)}, /* Telit FE910C04 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10c8, 0)}, /* Telit FE910C04 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */

View File

@@ -2,6 +2,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "bmi.h"

View File

@@ -3,6 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hif.h"

View File

@@ -3,6 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>

View File

@@ -3,6 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CORE_H_

View File

@@ -2,6 +2,7 @@
/*
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "coredump.h"

View File

@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _COREDUMP_H_

View File

@@ -3,6 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>

View File

@@ -2,6 +2,7 @@
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"

View File

@@ -2,6 +2,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"

View File

@@ -3,6 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2021, 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _HTT_H_

View File

@@ -3,6 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"

View File

@@ -2,6 +2,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/etherdevice.h>

View File

@@ -1,6 +1,7 @@
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/types.h>

Some files were not shown because too many files have changed in this diff Show More