Merge remote-tracking branch 'stable/linux-5.10.y' into rpi-5.10.y

This commit is contained in:
Dom Cobley
2021-06-08 12:12:03 +01:00
372 changed files with 3393 additions and 1405 deletions

View File

@@ -96,6 +96,16 @@ auxiliary vector.
scv 0 syscalls will always behave as PPC_FEATURE2_HTM_NOSC. scv 0 syscalls will always behave as PPC_FEATURE2_HTM_NOSC.
ptrace
------
When ptracing system calls (PTRACE_SYSCALL), the pt_regs.trap value contains
the system call type that can be used to distinguish between sc and scv 0
system calls, and the different register conventions can be accounted for.
If the value of (pt_regs.trap & 0xfff0) is 0xc00 then the system call was
performed with the sc instruction, if it is 0x3000 then the system call was
performed with the scv 0 instruction.
vsyscall vsyscall
======== ========

View File

@@ -250,14 +250,14 @@ Users can read via ``ioctl(SECCOMP_IOCTL_NOTIF_RECV)`` (or ``poll()``) on a
seccomp notification fd to receive a ``struct seccomp_notif``, which contains seccomp notification fd to receive a ``struct seccomp_notif``, which contains
five members: the input length of the structure, a unique-per-filter ``id``, five members: the input length of the structure, a unique-per-filter ``id``,
the ``pid`` of the task which triggered this request (which may be 0 if the the ``pid`` of the task which triggered this request (which may be 0 if the
task is in a pid ns not visible from the listener's pid namespace), a ``flags`` task is in a pid ns not visible from the listener's pid namespace). The
member which for now only has ``SECCOMP_NOTIF_FLAG_SIGNALED``, representing notification also contains the ``data`` passed to seccomp, and a filters flag.
whether or not the notification is a result of a non-fatal signal, and the The structure should be zeroed out prior to calling the ioctl.
``data`` passed to seccomp. Userspace can then make a decision based on this
information about what to do, and ``ioctl(SECCOMP_IOCTL_NOTIF_SEND)`` a Userspace can then make a decision based on this information about what to do,
response, indicating what should be returned to userspace. The ``id`` member of and ``ioctl(SECCOMP_IOCTL_NOTIF_SEND)`` a response, indicating what should be
``struct seccomp_notif_resp`` should be the same ``id`` as in ``struct returned to userspace. The ``id`` member of ``struct seccomp_notif_resp`` should
seccomp_notif``. be the same ``id`` as in ``struct seccomp_notif``.
It is worth noting that ``struct seccomp_data`` contains the values of register It is worth noting that ``struct seccomp_data`` contains the values of register
arguments to the syscall, but does not contain pointers to memory. The task's arguments to the syscall, but does not contain pointers to memory. The task's

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 39 SUBLEVEL = 42
EXTRAVERSION = EXTRAVERSION =
NAME = Dare mighty things NAME = Dare mighty things

View File

@@ -505,4 +505,9 @@ static __always_inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR); write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
} }
static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
{
return test_bit(feature, vcpu->arch.features);
}
#endif /* __ARM64_KVM_EMULATE_H__ */ #endif /* __ARM64_KVM_EMULATE_H__ */

View File

@@ -223,6 +223,25 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu *tmp;
bool is32bit;
int i;
is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
return false;
/* Check that the vcpus are either all 32bit or all 64bit */
kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit)
return false;
}
return true;
}
/** /**
* kvm_reset_vcpu - sets core registers and sys_regs to reset value * kvm_reset_vcpu - sets core registers and sys_regs to reset value
* @vcpu: The VCPU pointer * @vcpu: The VCPU pointer
@@ -274,13 +293,14 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
} }
} }
if (!vcpu_allowed_register_width(vcpu)) {
ret = -EINVAL;
goto out;
}
switch (vcpu->arch.target) { switch (vcpu->arch.target) {
default: default:
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) {
ret = -EINVAL;
goto out;
}
pstate = VCPU_RESET_PSTATE_SVC; pstate = VCPU_RESET_PSTATE_SVC;
} else { } else {
pstate = VCPU_RESET_PSTATE_EL1; pstate = VCPU_RESET_PSTATE_EL1;

View File

@@ -18,6 +18,7 @@
#include <asm/reboot.h> #include <asm/reboot.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/mach-au1x00/au1000.h> #include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/gpio-au1000.h>
#include <prom.h> #include <prom.h>
const char *get_system_type(void) const char *get_system_type(void)

View File

@@ -8,6 +8,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/export.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/sizes.h> #include <linux/sizes.h>
#include <linux/of_fdt.h> #include <linux/of_fdt.h>
@@ -25,6 +26,7 @@
__iomem void *rt_sysc_membase; __iomem void *rt_sysc_membase;
__iomem void *rt_memc_membase; __iomem void *rt_memc_membase;
EXPORT_SYMBOL_GPL(rt_sysc_membase);
__iomem void *plat_of_remap_node(const char *node) __iomem void *plat_of_remap_node(const char *node)
{ {

View File

@@ -0,0 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_BARRIER_H
#define __ASM_BARRIER_H
#define mb() asm volatile ("l.msync" ::: "memory")
#include <asm-generic/barrier.h>
#endif /* __ASM_BARRIER_H */

View File

@@ -278,6 +278,8 @@ void calibrate_delay(void)
pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n", pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
loops_per_jiffy / (500000 / HZ), loops_per_jiffy / (500000 / HZ),
(loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy); (loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy);
of_node_put(cpu);
} }
void __init setup_arch(char **cmdline_p) void __init setup_arch(char **cmdline_p)

View File

@@ -76,7 +76,6 @@ static void __init map_ram(void)
/* These mark extents of read-only kernel pages... /* These mark extents of read-only kernel pages...
* ...from vmlinux.lds.S * ...from vmlinux.lds.S
*/ */
struct memblock_region *region;
v = PAGE_OFFSET; v = PAGE_OFFSET;
@@ -122,7 +121,7 @@ static void __init map_ram(void)
} }
printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__, printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
region->base, region->base + region->size); start, end);
} }
} }

View File

@@ -437,6 +437,9 @@
*/ */
long plpar_hcall_norets(unsigned long opcode, ...); long plpar_hcall_norets(unsigned long opcode, ...);
/* Variant which does not do hcall tracing */
long plpar_hcall_norets_notrace(unsigned long opcode, ...);
/** /**
* plpar_hcall: - Make a pseries hypervisor call * plpar_hcall: - Make a pseries hypervisor call
* @opcode: The hypervisor call to make. * @opcode: The hypervisor call to make.

View File

@@ -24,19 +24,35 @@ static inline u32 yield_count_of(int cpu)
return be32_to_cpu(yield_count); return be32_to_cpu(yield_count);
} }
/*
* Spinlock code confers and prods, so don't trace the hcalls because the
* tracing code takes spinlocks which can cause recursion deadlocks.
*
* These calls are made while the lock is not held: the lock slowpath yields if
* it can not acquire the lock, and unlock slow path might prod if a waiter has
* yielded). So this may not be a problem for simple spin locks because the
* tracing does not technically recurse on the lock, but we avoid it anyway.
*
* However the queued spin lock contended path is more strictly ordered: the
* H_CONFER hcall is made after the task has queued itself on the lock, so then
* recursing on that lock will cause the task to then queue up again behind the
* first instance (or worse: queued spinlocks use tricks that assume a context
* never waits on more than one spinlock, so such recursion may cause random
* corruption in the lock code).
*/
static inline void yield_to_preempted(int cpu, u32 yield_count) static inline void yield_to_preempted(int cpu, u32 yield_count)
{ {
plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count); plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
} }
static inline void prod_cpu(int cpu) static inline void prod_cpu(int cpu)
{ {
plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu)); plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
} }
static inline void yield_to_any(void) static inline void yield_to_any(void)
{ {
plpar_hcall_norets(H_CONFER, -1, 0); plpar_hcall_norets_notrace(H_CONFER, -1, 0);
} }
#else #else
static inline bool is_shared_processor(void) static inline bool is_shared_processor(void)

View File

@@ -19,6 +19,7 @@
#ifndef _ASM_POWERPC_PTRACE_H #ifndef _ASM_POWERPC_PTRACE_H
#define _ASM_POWERPC_PTRACE_H #define _ASM_POWERPC_PTRACE_H
#include <linux/err.h>
#include <uapi/asm/ptrace.h> #include <uapi/asm/ptrace.h>
#include <asm/asm-const.h> #include <asm/asm-const.h>
@@ -144,25 +145,6 @@ extern unsigned long profile_pc(struct pt_regs *regs);
long do_syscall_trace_enter(struct pt_regs *regs); long do_syscall_trace_enter(struct pt_regs *regs);
void do_syscall_trace_leave(struct pt_regs *regs); void do_syscall_trace_leave(struct pt_regs *regs);
#define kernel_stack_pointer(regs) ((regs)->gpr[1])
static inline int is_syscall_success(struct pt_regs *regs)
{
return !(regs->ccr & 0x10000000);
}
static inline long regs_return_value(struct pt_regs *regs)
{
if (is_syscall_success(regs))
return regs->gpr[3];
else
return -regs->gpr[3];
}
static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
{
regs->gpr[3] = rc;
}
#ifdef __powerpc64__ #ifdef __powerpc64__
#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1) #define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
#else #else
@@ -245,6 +227,31 @@ static inline void set_trap_norestart(struct pt_regs *regs)
regs->trap |= 0x10; regs->trap |= 0x10;
} }
#define kernel_stack_pointer(regs) ((regs)->gpr[1])
static inline int is_syscall_success(struct pt_regs *regs)
{
if (trap_is_scv(regs))
return !IS_ERR_VALUE((unsigned long)regs->gpr[3]);
else
return !(regs->ccr & 0x10000000);
}
static inline long regs_return_value(struct pt_regs *regs)
{
if (trap_is_scv(regs))
return regs->gpr[3];
if (is_syscall_success(regs))
return regs->gpr[3];
else
return -regs->gpr[3];
}
static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
{
regs->gpr[3] = rc;
}
#define arch_has_single_step() (1) #define arch_has_single_step() (1)
#define arch_has_block_step() (true) #define arch_has_block_step() (true)
#define ARCH_HAS_USER_SINGLE_STEP_REPORT #define ARCH_HAS_USER_SINGLE_STEP_REPORT

View File

@@ -41,11 +41,17 @@ static inline void syscall_rollback(struct task_struct *task,
static inline long syscall_get_error(struct task_struct *task, static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs) struct pt_regs *regs)
{ {
/* if (trap_is_scv(regs)) {
* If the system call failed, unsigned long error = regs->gpr[3];
* regs->gpr[3] contains a positive ERRORCODE.
*/ return IS_ERR_VALUE(error) ? error : 0;
return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0; } else {
/*
* If the system call failed,
* regs->gpr[3] contains a positive ERRORCODE.
*/
return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
}
} }
static inline long syscall_get_return_value(struct task_struct *task, static inline long syscall_get_return_value(struct task_struct *task,
@@ -58,18 +64,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
int error, long val) int error, long val)
{ {
/* if (trap_is_scv(regs)) {
* In the general case it's not obvious that we must deal with CCR regs->gpr[3] = (long) error ?: val;
* here, as the syscall exit path will also do that for us. However
* there are some places, eg. the signal code, which check ccr to
* decide if the value in r3 is actually an error.
*/
if (error) {
regs->ccr |= 0x10000000L;
regs->gpr[3] = error;
} else { } else {
regs->ccr &= ~0x10000000L; /*
regs->gpr[3] = val; * In the general case it's not obvious that we must deal with
* CCR here, as the syscall exit path will also do that for us.
* However there are some places, eg. the signal code, which
* check ccr to decide if the value in r3 is actually an error.
*/
if (error) {
regs->ccr |= 0x10000000L;
regs->gpr[3] = error;
} else {
regs->ccr &= ~0x10000000L;
regs->gpr[3] = val;
}
} }
} }

View File

@@ -368,11 +368,11 @@ void __init early_setup(unsigned long dt_ptr)
apply_feature_fixups(); apply_feature_fixups();
setup_feature_keys(); setup_feature_keys();
early_ioremap_setup();
/* Initialize the hash table or TLB handling */ /* Initialize the hash table or TLB handling */
early_init_mmu(); early_init_mmu();
early_ioremap_setup();
/* /*
* After firmware and early platform setup code has set things up, * After firmware and early platform setup code has set things up,
* we note the SPR values for configurable control/performance * we note the SPR values for configurable control/performance

View File

@@ -102,6 +102,16 @@ END_FTR_SECTION(0, 1); \
#define HCALL_BRANCH(LABEL) #define HCALL_BRANCH(LABEL)
#endif #endif
_GLOBAL_TOC(plpar_hcall_norets_notrace)
HMT_MEDIUM
mfcr r0
stw r0,8(r1)
HVSC /* invoke the hypervisor */
lwz r0,8(r1)
mtcrf 0xff,r0
blr /* return r3 = status */
_GLOBAL_TOC(plpar_hcall_norets) _GLOBAL_TOC(plpar_hcall_norets)
HMT_MEDIUM HMT_MEDIUM

View File

@@ -1827,8 +1827,7 @@ void hcall_tracepoint_unregfunc(void)
/* /*
* Since the tracing code might execute hcalls we need to guard against * Since the tracing code might execute hcalls we need to guard against
* recursion. One example of this are spinlocks calling H_YIELD on * recursion.
* shared processor partitions.
*/ */
static DEFINE_PER_CPU(unsigned int, hcall_trace_depth); static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);

View File

@@ -172,11 +172,21 @@ SYM_FUNC_START(startup_32)
*/ */
call get_sev_encryption_bit call get_sev_encryption_bit
xorl %edx, %edx xorl %edx, %edx
#ifdef CONFIG_AMD_MEM_ENCRYPT
testl %eax, %eax testl %eax, %eax
jz 1f jz 1f
subl $32, %eax /* Encryption bit is always above bit 31 */ subl $32, %eax /* Encryption bit is always above bit 31 */
bts %eax, %edx /* Set encryption mask for page tables */ bts %eax, %edx /* Set encryption mask for page tables */
/*
* Mark SEV as active in sev_status so that startup32_check_sev_cbit()
* will do a check. The sev_status memory will be fully initialized
* with the contents of MSR_AMD_SEV_STATUS later in
* set_sev_encryption_mask(). For now it is sufficient to know that SEV
* is active.
*/
movl $1, rva(sev_status)(%ebp)
1: 1:
#endif
/* Initialize Page tables to 0 */ /* Initialize Page tables to 0 */
leal rva(pgtable)(%ebx), %edi leal rva(pgtable)(%ebx), %edi
@@ -261,6 +271,9 @@ SYM_FUNC_START(startup_32)
movl %esi, %edx movl %esi, %edx
1: 1:
#endif #endif
/* Check if the C-bit position is correct when SEV is active */
call startup32_check_sev_cbit
pushl $__KERNEL_CS pushl $__KERNEL_CS
pushl %eax pushl %eax
@@ -786,6 +799,78 @@ SYM_DATA_START_LOCAL(loaded_image_proto)
SYM_DATA_END(loaded_image_proto) SYM_DATA_END(loaded_image_proto)
#endif #endif
/*
* Check for the correct C-bit position when the startup_32 boot-path is used.
*
* The check makes use of the fact that all memory is encrypted when paging is
* disabled. The function creates 64 bits of random data using the RDRAND
* instruction. RDRAND is mandatory for SEV guests, so always available. If the
* hypervisor violates that the kernel will crash right here.
*
* The 64 bits of random data are stored to a memory location and at the same
* time kept in the %eax and %ebx registers. Since encryption is always active
* when paging is off the random data will be stored encrypted in main memory.
*
* Then paging is enabled. When the C-bit position is correct all memory is
* still mapped encrypted and comparing the register values with memory will
* succeed. An incorrect C-bit position will map all memory unencrypted, so that
* the compare will use the encrypted random data and fail.
*/
__HEAD
.code32
SYM_FUNC_START(startup32_check_sev_cbit)
#ifdef CONFIG_AMD_MEM_ENCRYPT
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
/* Check for non-zero sev_status */
movl rva(sev_status)(%ebp), %eax
testl %eax, %eax
jz 4f
/*
* Get two 32-bit random values - Don't bail out if RDRAND fails
* because it is better to prevent forward progress if no random value
* can be gathered.
*/
1: rdrand %eax
jnc 1b
2: rdrand %ebx
jnc 2b
/* Store to memory and keep it in the registers */
movl %eax, rva(sev_check_data)(%ebp)
movl %ebx, rva(sev_check_data+4)(%ebp)
/* Enable paging to see if encryption is active */
movl %cr0, %edx /* Backup %cr0 in %edx */
movl $(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */
movl %ecx, %cr0
cmpl %eax, rva(sev_check_data)(%ebp)
jne 3f
cmpl %ebx, rva(sev_check_data+4)(%ebp)
jne 3f
movl %edx, %cr0 /* Restore previous %cr0 */
jmp 4f
3: /* Check failed - hlt the machine */
hlt
jmp 3b
4:
popl %edx
popl %ecx
popl %ebx
popl %eax
#endif
ret
SYM_FUNC_END(startup32_check_sev_cbit)
/* /*
* Stack and heap for uncompression * Stack and heap for uncompression
*/ */

View File

@@ -5563,7 +5563,7 @@ __init int intel_pmu_init(void)
* Check all LBT MSR here. * Check all LBT MSR here.
* Disable LBR access if any LBR MSRs can not be accessed. * Disable LBR access if any LBR MSRs can not be accessed.
*/ */
if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL)) if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
x86_pmu.lbr_nr = 0; x86_pmu.lbr_nr = 0;
for (i = 0; i < x86_pmu.lbr_nr; i++) { for (i = 0; i < x86_pmu.lbr_nr; i++) {
if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) && if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&

View File

@@ -63,6 +63,7 @@ static bool sev_es_negotiate_protocol(void)
static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb) static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb)
{ {
ghcb->save.sw_exit_code = 0;
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
} }

View File

@@ -191,8 +191,18 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
if (unlikely(data->ghcb_active)) { if (unlikely(data->ghcb_active)) {
/* GHCB is already in use - save its contents */ /* GHCB is already in use - save its contents */
if (unlikely(data->backup_ghcb_active)) if (unlikely(data->backup_ghcb_active)) {
return NULL; /*
* Backup-GHCB is also already in use. There is no way
* to continue here so just kill the machine. To make
* panic() work, mark GHCBs inactive so that messages
* can be printed out.
*/
data->ghcb_active = false;
data->backup_ghcb_active = false;
panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
}
/* Mark backup_ghcb active before writing to it */ /* Mark backup_ghcb active before writing to it */
data->backup_ghcb_active = true; data->backup_ghcb_active = true;
@@ -209,24 +219,6 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
return ghcb; return ghcb;
} }
static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
{
struct sev_es_runtime_data *data;
struct ghcb *ghcb;
data = this_cpu_read(runtime_data);
ghcb = &data->ghcb_page;
if (state->ghcb) {
/* Restore GHCB from Backup */
*ghcb = *state->ghcb;
data->backup_ghcb_active = false;
state->ghcb = NULL;
} else {
data->ghcb_active = false;
}
}
/* Needed in vc_early_forward_exception */ /* Needed in vc_early_forward_exception */
void do_early_exception(struct pt_regs *regs, int trapnr); void do_early_exception(struct pt_regs *regs, int trapnr);
@@ -296,31 +288,44 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
u16 d2; u16 d2;
u8 d1; u8 d1;
/* If instruction ran in kernel mode and the I/O buffer is in kernel space */ /*
if (!user_mode(ctxt->regs) && !access_ok(target, size)) { * This function uses __put_user() independent of whether kernel or user
memcpy(dst, buf, size); * memory is accessed. This works fine because __put_user() does no
return ES_OK; * sanity checks of the pointer being accessed. All that it does is
} * to report when the access failed.
*
* Also, this function runs in atomic context, so __put_user() is not
* allowed to sleep. The page-fault handler detects that it is running
* in atomic context and will not try to take mmap_sem and handle the
* fault, so additional pagefault_enable()/disable() calls are not
* needed.
*
* The access can't be done via copy_to_user() here because
* vc_write_mem() must not use string instructions to access unsafe
* memory. The reason is that MOVS is emulated by the #VC handler by
* splitting the move up into a read and a write and taking a nested #VC
* exception on whatever of them is the MMIO access. Using string
* instructions here would cause infinite nesting.
*/
switch (size) { switch (size) {
case 1: case 1:
memcpy(&d1, buf, 1); memcpy(&d1, buf, 1);
if (put_user(d1, target)) if (__put_user(d1, target))
goto fault; goto fault;
break; break;
case 2: case 2:
memcpy(&d2, buf, 2); memcpy(&d2, buf, 2);
if (put_user(d2, target)) if (__put_user(d2, target))
goto fault; goto fault;
break; break;
case 4: case 4:
memcpy(&d4, buf, 4); memcpy(&d4, buf, 4);
if (put_user(d4, target)) if (__put_user(d4, target))
goto fault; goto fault;
break; break;
case 8: case 8:
memcpy(&d8, buf, 8); memcpy(&d8, buf, 8);
if (put_user(d8, target)) if (__put_user(d8, target))
goto fault; goto fault;
break; break;
default: default:
@@ -351,30 +356,43 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
u16 d2; u16 d2;
u8 d1; u8 d1;
/* If instruction ran in kernel mode and the I/O buffer is in kernel space */ /*
if (!user_mode(ctxt->regs) && !access_ok(s, size)) { * This function uses __get_user() independent of whether kernel or user
memcpy(buf, src, size); * memory is accessed. This works fine because __get_user() does no
return ES_OK; * sanity checks of the pointer being accessed. All that it does is
} * to report when the access failed.
*
* Also, this function runs in atomic context, so __get_user() is not
* allowed to sleep. The page-fault handler detects that it is running
* in atomic context and will not try to take mmap_sem and handle the
* fault, so additional pagefault_enable()/disable() calls are not
* needed.
*
* The access can't be done via copy_from_user() here because
* vc_read_mem() must not use string instructions to access unsafe
* memory. The reason is that MOVS is emulated by the #VC handler by
* splitting the move up into a read and a write and taking a nested #VC
* exception on whatever of them is the MMIO access. Using string
* instructions here would cause infinite nesting.
*/
switch (size) { switch (size) {
case 1: case 1:
if (get_user(d1, s)) if (__get_user(d1, s))
goto fault; goto fault;
memcpy(buf, &d1, 1); memcpy(buf, &d1, 1);
break; break;
case 2: case 2:
if (get_user(d2, s)) if (__get_user(d2, s))
goto fault; goto fault;
memcpy(buf, &d2, 2); memcpy(buf, &d2, 2);
break; break;
case 4: case 4:
if (get_user(d4, s)) if (__get_user(d4, s))
goto fault; goto fault;
memcpy(buf, &d4, 4); memcpy(buf, &d4, 4);
break; break;
case 8: case 8:
if (get_user(d8, s)) if (__get_user(d8, s))
goto fault; goto fault;
memcpy(buf, &d8, 8); memcpy(buf, &d8, 8);
break; break;
@@ -434,6 +452,29 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
/* Include code shared with pre-decompression boot stage */ /* Include code shared with pre-decompression boot stage */
#include "sev-es-shared.c" #include "sev-es-shared.c"
static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
{
struct sev_es_runtime_data *data;
struct ghcb *ghcb;
data = this_cpu_read(runtime_data);
ghcb = &data->ghcb_page;
if (state->ghcb) {
/* Restore GHCB from Backup */
*ghcb = *state->ghcb;
data->backup_ghcb_active = false;
state->ghcb = NULL;
} else {
/*
* Invalidate the GHCB so a VMGEXIT instruction issued
* from userspace won't appear to be valid.
*/
vc_ghcb_invalidate(ghcb);
data->ghcb_active = false;
}
}
void noinstr __sev_es_nmi_complete(void) void noinstr __sev_es_nmi_complete(void)
{ {
struct ghcb_state state; struct ghcb_state state;
@@ -1228,6 +1269,10 @@ static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
case X86_TRAP_UD: case X86_TRAP_UD:
exc_invalid_op(ctxt->regs); exc_invalid_op(ctxt->regs);
break; break;
case X86_TRAP_PF:
write_cr2(ctxt->fi.cr2);
exc_page_fault(ctxt->regs, error_code);
break;
case X86_TRAP_AC: case X86_TRAP_AC:
exc_alignment_check(ctxt->regs, error_code); exc_alignment_check(ctxt->regs, error_code);
break; break;
@@ -1257,7 +1302,6 @@ static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
*/ */
DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication) DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
{ {
struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
irqentry_state_t irq_state; irqentry_state_t irq_state;
struct ghcb_state state; struct ghcb_state state;
struct es_em_ctxt ctxt; struct es_em_ctxt ctxt;
@@ -1283,16 +1327,6 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
*/ */
ghcb = sev_es_get_ghcb(&state); ghcb = sev_es_get_ghcb(&state);
if (!ghcb) {
/*
* Mark GHCBs inactive so that panic() is able to print the
* message.
*/
data->ghcb_active = false;
data->backup_ghcb_active = false;
panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
}
vc_ghcb_invalidate(ghcb); vc_ghcb_invalidate(ghcb);
result = vc_init_em_ctxt(&ctxt, regs, error_code); result = vc_init_em_ctxt(&ctxt, regs, error_code);

View File

@@ -3532,15 +3532,15 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
* have them in state 'on' as recorded before entering guest mode. * have them in state 'on' as recorded before entering guest mode.
* Same as enter_from_user_mode(). * Same as enter_from_user_mode().
* *
* guest_exit_irqoff() restores host context and reinstates RCU if * context_tracking_guest_exit() restores host context and reinstates
* enabled and required. * RCU if enabled and required.
* *
* This needs to be done before the below as native_read_msr() * This needs to be done before the below as native_read_msr()
* contains a tracepoint and x86_spec_ctrl_restore_host() calls * contains a tracepoint and x86_spec_ctrl_restore_host() calls
* into world and some more. * into world and some more.
*/ */
lockdep_hardirqs_off(CALLER_ADDR0); lockdep_hardirqs_off(CALLER_ADDR0);
guest_exit_irqoff(); context_tracking_guest_exit();
instrumentation_begin(); instrumentation_begin();
trace_hardirqs_off_finish(); trace_hardirqs_off_finish();

View File

@@ -6640,15 +6640,15 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
* have them in state 'on' as recorded before entering guest mode. * have them in state 'on' as recorded before entering guest mode.
* Same as enter_from_user_mode(). * Same as enter_from_user_mode().
* *
* guest_exit_irqoff() restores host context and reinstates RCU if * context_tracking_guest_exit() restores host context and reinstates
* enabled and required. * RCU if enabled and required.
* *
* This needs to be done before the below as native_read_msr() * This needs to be done before the below as native_read_msr()
* contains a tracepoint and x86_spec_ctrl_restore_host() calls * contains a tracepoint and x86_spec_ctrl_restore_host() calls
* into world and some more. * into world and some more.
*/ */
lockdep_hardirqs_off(CALLER_ADDR0); lockdep_hardirqs_off(CALLER_ADDR0);
guest_exit_irqoff(); context_tracking_guest_exit();
instrumentation_begin(); instrumentation_begin();
trace_hardirqs_off_finish(); trace_hardirqs_off_finish();

View File

@@ -3006,6 +3006,8 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
st->preempted & KVM_VCPU_FLUSH_TLB); st->preempted & KVM_VCPU_FLUSH_TLB);
if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB) if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
kvm_vcpu_flush_tlb_guest(vcpu); kvm_vcpu_flush_tlb_guest(vcpu);
} else {
st->preempted = 0;
} }
vcpu->arch.st.preempted = 0; vcpu->arch.st.preempted = 0;
@@ -9063,6 +9065,15 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
local_irq_disable(); local_irq_disable();
kvm_after_interrupt(vcpu); kvm_after_interrupt(vcpu);
/*
* Wait until after servicing IRQs to account guest time so that any
* ticks that occurred while running the guest are properly accounted
* to the guest. Waiting until IRQs are enabled degrades the accuracy
* of accounting via context tracking, but the loss of accuracy is
* acceptable for all known use cases.
*/
vtime_account_guest_exit();
if (lapic_in_kernel(vcpu)) { if (lapic_in_kernel(vcpu)) {
s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta; s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta;
if (delta != S64_MIN) { if (delta != S64_MIN) {

View File

@@ -1262,16 +1262,16 @@ asmlinkage __visible void __init xen_start_kernel(void)
/* Get mfn list */ /* Get mfn list */
xen_build_dynamic_phys_to_machine(); xen_build_dynamic_phys_to_machine();
/* Work out if we support NX */
get_cpu_cap(&boot_cpu_data);
x86_configure_nx();
/* /*
* Set up kernel GDT and segment registers, mainly so that * Set up kernel GDT and segment registers, mainly so that
* -fstack-protector code can be executed. * -fstack-protector code can be executed.
*/ */
xen_setup_gdt(0); xen_setup_gdt(0);
/* Work out if we support NX */
get_cpu_cap(&boot_cpu_data);
x86_configure_nx();
/* Determine virtual and physical address sizes */ /* Determine virtual and physical address sizes */
get_cpu_address_sizes(&boot_cpu_data); get_cpu_address_sizes(&boot_cpu_data);

View File

@@ -226,6 +226,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
{ "AMDI0010", APD_ADDR(wt_i2c_desc) }, { "AMDI0010", APD_ADDR(wt_i2c_desc) },
{ "AMD0020", APD_ADDR(cz_uart_desc) }, { "AMD0020", APD_ADDR(cz_uart_desc) },
{ "AMDI0020", APD_ADDR(cz_uart_desc) }, { "AMDI0020", APD_ADDR(cz_uart_desc) },
{ "AMDI0022", APD_ADDR(cz_uart_desc) },
{ "AMD0030", }, { "AMD0030", },
{ "AMD0040", APD_ADDR(fch_misc_desc)}, { "AMD0040", APD_ADDR(fch_misc_desc)},
{ "HYGO0010", APD_ADDR(wt_i2c_desc) }, { "HYGO0010", APD_ADDR(wt_i2c_desc) },

View File

@@ -83,6 +83,11 @@ int device_links_read_lock_held(void)
{ {
return srcu_read_lock_held(&device_links_srcu); return srcu_read_lock_held(&device_links_srcu);
} }
static void device_link_synchronize_removal(void)
{
synchronize_srcu(&device_links_srcu);
}
#else /* !CONFIG_SRCU */ #else /* !CONFIG_SRCU */
static DECLARE_RWSEM(device_links_lock); static DECLARE_RWSEM(device_links_lock);
@@ -113,6 +118,10 @@ int device_links_read_lock_held(void)
return lockdep_is_held(&device_links_lock); return lockdep_is_held(&device_links_lock);
} }
#endif #endif
static inline void device_link_synchronize_removal(void)
{
}
#endif /* !CONFIG_SRCU */ #endif /* !CONFIG_SRCU */
static bool device_is_ancestor(struct device *dev, struct device *target) static bool device_is_ancestor(struct device *dev, struct device *target)
@@ -332,8 +341,13 @@ static struct attribute *devlink_attrs[] = {
}; };
ATTRIBUTE_GROUPS(devlink); ATTRIBUTE_GROUPS(devlink);
static void device_link_free(struct device_link *link) static void device_link_release_fn(struct work_struct *work)
{ {
struct device_link *link = container_of(work, struct device_link, rm_work);
/* Ensure that all references to the link object have been dropped. */
device_link_synchronize_removal();
while (refcount_dec_not_one(&link->rpm_active)) while (refcount_dec_not_one(&link->rpm_active))
pm_runtime_put(link->supplier); pm_runtime_put(link->supplier);
@@ -342,24 +356,19 @@ static void device_link_free(struct device_link *link)
kfree(link); kfree(link);
} }
#ifdef CONFIG_SRCU
static void __device_link_free_srcu(struct rcu_head *rhead)
{
device_link_free(container_of(rhead, struct device_link, rcu_head));
}
static void devlink_dev_release(struct device *dev) static void devlink_dev_release(struct device *dev)
{ {
struct device_link *link = to_devlink(dev); struct device_link *link = to_devlink(dev);
call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu); INIT_WORK(&link->rm_work, device_link_release_fn);
/*
* It may take a while to complete this work because of the SRCU
* synchronization in device_link_release_fn() and if the consumer or
* supplier devices get deleted when it runs, so put it into the "long"
* workqueue.
*/
queue_work(system_long_wq, &link->rm_work);
} }
#else
static void devlink_dev_release(struct device *dev)
{
device_link_free(to_devlink(dev));
}
#endif
static struct class devlink_class = { static struct class devlink_class = {
.name = "devlink", .name = "devlink",

View File

@@ -743,6 +743,13 @@ static const struct blk_mq_ops gdrom_mq_ops = {
static int probe_gdrom(struct platform_device *devptr) static int probe_gdrom(struct platform_device *devptr)
{ {
int err; int err;
/*
* Ensure our "one" device is initialized properly in case of previous
* usages of it
*/
memset(&gd, 0, sizeof(gd));
/* Start the device */ /* Start the device */
if (gdrom_execute_diagnostic() != 1) { if (gdrom_execute_diagnostic() != 1) {
pr_warn("ATA Probe for GDROM failed\n"); pr_warn("ATA Probe for GDROM failed\n");
@@ -831,6 +838,8 @@ static int remove_gdrom(struct platform_device *devptr)
if (gdrom_major) if (gdrom_major)
unregister_blkdev(gdrom_major, GDROM_DEV_NAME); unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
unregister_cdrom(gd.cd_info); unregister_cdrom(gd.cd_info);
kfree(gd.cd_info);
kfree(gd.toc);
return 0; return 0;
} }
@@ -846,7 +855,7 @@ static struct platform_driver gdrom_driver = {
static int __init init_gdrom(void) static int __init init_gdrom(void)
{ {
int rc; int rc;
gd.toc = NULL;
rc = platform_driver_register(&gdrom_driver); rc = platform_driver_register(&gdrom_driver);
if (rc) if (rc)
return rc; return rc;
@@ -862,8 +871,6 @@ static void __exit exit_gdrom(void)
{ {
platform_device_unregister(pd); platform_device_unregister(pd);
platform_driver_unregister(&gdrom_driver); platform_driver_unregister(&gdrom_driver);
kfree(gd.toc);
kfree(gd.cd_info);
} }
module_init(init_gdrom); module_init(init_gdrom);

View File

@@ -984,6 +984,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
hdp->hd_phys_address = fixmem32->address; hdp->hd_phys_address = fixmem32->address;
hdp->hd_address = ioremap(fixmem32->address, hdp->hd_address = ioremap(fixmem32->address,
HPET_RANGE_SIZE); HPET_RANGE_SIZE);
if (!hdp->hd_address)
return AE_ERROR;
if (hpet_is_known(hdp)) { if (hpet_is_known(hdp)) {
iounmap(hdp->hd_address); iounmap(hdp->hd_address);

View File

@@ -451,7 +451,6 @@ static int nitrox_probe(struct pci_dev *pdev,
err = pci_request_mem_regions(pdev, nitrox_driver_name); err = pci_request_mem_regions(pdev, nitrox_driver_name);
if (err) { if (err) {
pci_disable_device(pdev); pci_disable_device(pdev);
dev_err(&pdev->dev, "Failed to request mem regions!\n");
return err; return err;
} }
pci_set_master(pdev); pci_set_master(pdev);

View File

@@ -418,8 +418,23 @@ static int __init hidma_mgmt_init(void)
hidma_mgmt_of_populate_channels(child); hidma_mgmt_of_populate_channels(child);
} }
#endif #endif
return platform_driver_register(&hidma_mgmt_driver); /*
* We do not check for return value here, as it is assumed that
* platform_driver_register must not fail. The reason for this is that
* the (potential) hidma_mgmt_of_populate_channels calls above are not
* cleaned up if it does fail, and to do this work is quite
* complicated. In particular, various calls of of_address_to_resource,
* of_irq_to_resource, platform_device_register_full, of_dma_configure,
* and of_msi_configure which then call other functions and so on, must
* be cleaned up - this is not a trivial exercise.
*
* Currently, this module is not intended to be unloaded, and there is
* no module_exit function defined which does the needed cleanup. For
* this reason, we have to assume success here.
*/
platform_driver_register(&hidma_mgmt_driver);
return 0;
} }
module_init(hidma_mgmt_init); module_init(hidma_mgmt_init);
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");

View File

@@ -552,8 +552,10 @@ static unsigned long scpi_clk_get_val(u16 clk_id)
ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id, ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
sizeof(le_clk_id), &rate, sizeof(rate)); sizeof(le_clk_id), &rate, sizeof(rate));
if (ret)
return 0;
return ret ? ret : le32_to_cpu(rate); return le32_to_cpu(rate);
} }
static int scpi_clk_set_val(u16 clk_id, unsigned long rate) static int scpi_clk_set_val(u16 clk_id, unsigned long rate)

View File

@@ -278,6 +278,7 @@ static const struct of_device_id cdns_of_ids[] = {
{ .compatible = "cdns,gpio-r1p02" }, { .compatible = "cdns,gpio-r1p02" },
{ /* sentinel */ }, { /* sentinel */ },
}; };
MODULE_DEVICE_TABLE(of, cdns_of_ids);
static struct platform_driver cdns_gpio_driver = { static struct platform_driver cdns_gpio_driver = {
.driver = { .driver = {

View File

@@ -157,16 +157,16 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break; break;
case 1: case 1:
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0, sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break; break;
case 2: case 2:
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA2, 0, sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL; mmSDMA2_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break; break;
case 3: case 3:
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA3, 0, sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA3_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL; mmSDMA3_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break; break;
} }
@@ -451,7 +451,7 @@ static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd,
engine_id, queue_id); engine_id, queue_id);
uint32_t i = 0, reg; uint32_t i = 0, reg;
#undef HQD_N_REGS #undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10) #define HQD_N_REGS (19+6+7+12)
*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL) if (*dump == NULL)

View File

@@ -4368,7 +4368,6 @@ out:
r = amdgpu_ib_ring_tests(tmp_adev); r = amdgpu_ib_ring_tests(tmp_adev);
if (r) { if (r) {
dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r); dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
r = amdgpu_device_ip_suspend(tmp_adev);
need_full_reset = true; need_full_reset = true;
r = -EAGAIN; r = -EAGAIN;
goto end; goto end;

View File

@@ -289,10 +289,13 @@ out:
static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev) static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
{ {
struct amdgpu_framebuffer *rfb = &rfbdev->rfb; struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
int i;
drm_fb_helper_unregister_fbi(&rfbdev->helper); drm_fb_helper_unregister_fbi(&rfbdev->helper);
if (rfb->base.obj[0]) { if (rfb->base.obj[0]) {
for (i = 0; i < rfb->base.format->num_planes; i++)
drm_gem_object_put(rfb->base.obj[0]);
amdgpufb_destroy_pinned_object(rfb->base.obj[0]); amdgpufb_destroy_pinned_object(rfb->base.obj[0]);
rfb->base.obj[0] = NULL; rfb->base.obj[0] = NULL;
drm_framebuffer_unregister_private(&rfb->base); drm_framebuffer_unregister_private(&rfb->base);

View File

@@ -267,7 +267,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
*addr += offset & ~PAGE_MASK; *addr += offset & ~PAGE_MASK;
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = num_pages * 8; num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_DELAYED, &job); AMDGPU_IB_POOL_DELAYED, &job);
@@ -1381,6 +1381,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *
if (gtt && gtt->userptr) { if (gtt && gtt->userptr) {
amdgpu_ttm_tt_set_user_pages(ttm, NULL); amdgpu_ttm_tt_set_user_pages(ttm, NULL);
kfree(ttm->sg); kfree(ttm->sg);
ttm->sg = NULL;
ttm->page_flags &= ~TTM_PAGE_FLAG_SG; ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
return; return;
} }

View File

@@ -1334,9 +1334,10 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04900000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1800ff, 0x00000044),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x00007fff, 0x000001fe), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x00007fff, 0x000001fe),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
@@ -1354,12 +1355,13 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000820, 0x00000820), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000820, 0x00000820),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101), SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070104),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0xffdf80ff, 0x479c0010), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0xffdf80ff, 0x479c0010),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000) SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000)
}; };
static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v) static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)

View File

@@ -4859,7 +4859,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
amdgpu_gfx_rlc_enter_safe_mode(adev); amdgpu_gfx_rlc_enter_safe_mode(adev);
/* Enable 3D CGCG/CGLS */ /* Enable 3D CGCG/CGLS */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) { if (enable) {
/* write cmd to clear cgcg/cgls ov */ /* write cmd to clear cgcg/cgls ov */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
/* unset CGCG override */ /* unset CGCG override */
@@ -4871,8 +4871,12 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
/* enable 3Dcgcg FSM(0x0000363f) */ /* enable 3Dcgcg FSM(0x0000363f) */
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
else
data = 0x0 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT;
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) | data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;

View File

@@ -172,6 +172,8 @@ static int jpeg_v2_0_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cancel_delayed_work_sync(&adev->vcn.idle_work);
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS)) RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE); jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);

View File

@@ -198,8 +198,6 @@ static int jpeg_v2_5_hw_fini(void *handle)
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS)) RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
ring->sched.ready = false;
} }
return 0; return 0;

View File

@@ -166,8 +166,6 @@ static int jpeg_v3_0_hw_fini(void *handle)
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS)) RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE); jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
ring->sched.ready = false;
return 0; return 0;
} }

View File

@@ -124,6 +124,10 @@ static const struct soc15_reg_golden golden_settings_sdma_nv14[] = {
static const struct soc15_reg_golden golden_settings_sdma_nv12[] = { static const struct soc15_reg_golden golden_settings_sdma_nv12[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
}; };

View File

@@ -476,11 +476,6 @@ static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
} }
sdma0->sched.ready = false;
sdma1->sched.ready = false;
sdma2->sched.ready = false;
sdma3->sched.ready = false;
} }
/** /**

View File

@@ -1183,7 +1183,6 @@ static int soc15_common_early_init(void *handle)
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS | AMD_CG_SUPPORT_GFX_3D_CGLS |
AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_GFX_CGLS |
@@ -1203,7 +1202,6 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_RLC_LS |
AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS | AMD_CG_SUPPORT_GFX_3D_CGLS |
AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_GFX_CGLS |

View File

@@ -232,9 +232,13 @@ static int vcn_v1_0_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cancel_delayed_work_sync(&adev->vcn.idle_work);
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
RREG32_SOC15(VCN, 0, mmUVD_STATUS)) (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE); vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
}
return 0; return 0;
} }

View File

@@ -262,6 +262,8 @@ static int vcn_v2_0_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cancel_delayed_work_sync(&adev->vcn.idle_work);
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE && (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, 0, mmUVD_STATUS))) RREG32_SOC15(VCN, 0, mmUVD_STATUS)))

View File

@@ -321,6 +321,8 @@ static int vcn_v2_5_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i; int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i)) if (adev->vcn.harvest_config & (1 << i))
continue; continue;

View File

@@ -346,7 +346,7 @@ static int vcn_v3_0_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring; struct amdgpu_ring *ring;
int i, j; int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i)) if (adev->vcn.harvest_config & (1 << i))
@@ -361,12 +361,6 @@ static int vcn_v3_0_hw_fini(void *handle)
vcn_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE); vcn_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
} }
} }
ring->sched.ready = false;
for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
ring = &adev->vcn.inst[i].ring_enc[j];
ring->sched.ready = false;
}
} }
return 0; return 0;

View File

@@ -1049,6 +1049,24 @@ static bool dc_link_detect_helper(struct dc_link *link,
dc_is_dvi_signal(link->connector_signal)) { dc_is_dvi_signal(link->connector_signal)) {
if (prev_sink) if (prev_sink)
dc_sink_release(prev_sink); dc_sink_release(prev_sink);
link_disconnect_sink(link);
return false;
}
/*
* Abort detection for DP connectors if we have
* no EDID and connector is active converter
* as there are no display downstream
*
*/
if (dc_is_dp_sst_signal(link->connector_signal) &&
(link->dpcd_caps.dongle_type ==
DISPLAY_DONGLE_DP_VGA_CONVERTER ||
link->dpcd_caps.dongle_type ==
DISPLAY_DONGLE_DP_DVI_CONVERTER)) {
if (prev_sink)
dc_sink_release(prev_sink);
link_disconnect_sink(link);
return false; return false;
} }

View File

@@ -2606,6 +2606,8 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
static int navi10_enable_mgpu_fan_boost(struct smu_context *smu) static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
{ {
struct smu_table_context *table_context = &smu->smu_table;
PPTable_t *smc_pptable = table_context->driver_pptable;
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
uint32_t param = 0; uint32_t param = 0;
@@ -2613,6 +2615,13 @@ static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
if (adev->asic_type == CHIP_NAVI12) if (adev->asic_type == CHIP_NAVI12)
return 0; return 0;
/*
* Skip the MGpuFanBoost setting for those ASICs
* which do not support it
*/
if (!smc_pptable->MGpuFanBoostLimitRpm)
return 0;
/* Workaround for WS SKU */ /* Workaround for WS SKU */
if (adev->pdev->device == 0x7312 && if (adev->pdev->device == 0x7312 &&
adev->pdev->revision == 0) adev->pdev->revision == 0)

View File

@@ -2715,6 +2715,16 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu) static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu)
{ {
struct smu_table_context *table_context = &smu->smu_table;
PPTable_t *smc_pptable = table_context->driver_pptable;
/*
* Skip the MGpuFanBoost setting for those ASICs
* which do not support it
*/
if (!smc_pptable->MGpuFanBoostLimitRpm)
return 0;
return smu_cmn_send_smc_msg_with_param(smu, return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetMGpuFanBoostLimitRpm, SMU_MSG_SetMGpuFanBoostLimitRpm,
0, 0,

View File

@@ -4136,7 +4136,7 @@ static void chv_dp_post_pll_disable(struct intel_atomic_state *state,
* link status information * link status information
*/ */
bool bool
intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]) intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status)
{ {
return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status, return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;

View File

@@ -397,7 +397,10 @@ static void emit_batch(struct i915_vma * const vma,
gen7_emit_pipeline_invalidate(&cmds); gen7_emit_pipeline_invalidate(&cmds);
batch_add(&cmds, MI_LOAD_REGISTER_IMM(2)); batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7)); batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
batch_add(&cmds, 0xffff0000); batch_add(&cmds, 0xffff0000 |
((IS_IVB_GT1(i915) || IS_VALLEYVIEW(i915)) ?
HIZ_RAW_STALL_OPT_DISABLE :
0));
batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1)); batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE); batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
gen7_emit_pipeline_invalidate(&cmds); gen7_emit_pipeline_invalidate(&cmds);

View File

@@ -485,11 +485,12 @@ static int meson_probe_remote(struct platform_device *pdev,
static void meson_drv_shutdown(struct platform_device *pdev) static void meson_drv_shutdown(struct platform_device *pdev)
{ {
struct meson_drm *priv = dev_get_drvdata(&pdev->dev); struct meson_drm *priv = dev_get_drvdata(&pdev->dev);
struct drm_device *drm = priv->drm;
DRM_DEBUG_DRIVER("\n"); if (!priv)
drm_kms_helper_poll_fini(drm); return;
drm_atomic_helper_shutdown(drm);
drm_kms_helper_poll_fini(priv->drm);
drm_atomic_helper_shutdown(priv->drm);
} }
static int meson_drv_probe(struct platform_device *pdev) static int meson_drv_probe(struct platform_device *pdev)

View File

@@ -596,7 +596,6 @@ static int lm80_probe(struct i2c_client *client)
struct device *dev = &client->dev; struct device *dev = &client->dev;
struct device *hwmon_dev; struct device *hwmon_dev;
struct lm80_data *data; struct lm80_data *data;
int rv;
data = devm_kzalloc(dev, sizeof(struct lm80_data), GFP_KERNEL); data = devm_kzalloc(dev, sizeof(struct lm80_data), GFP_KERNEL);
if (!data) if (!data)
@@ -609,14 +608,8 @@ static int lm80_probe(struct i2c_client *client)
lm80_init_client(client); lm80_init_client(client);
/* A few vars need to be filled upon startup */ /* A few vars need to be filled upon startup */
rv = lm80_read_value(client, LM80_REG_FAN_MIN(1)); data->fan[f_min][0] = lm80_read_value(client, LM80_REG_FAN_MIN(1));
if (rv < 0) data->fan[f_min][1] = lm80_read_value(client, LM80_REG_FAN_MIN(2));
return rv;
data->fan[f_min][0] = rv;
rv = lm80_read_value(client, LM80_REG_FAN_MIN(2));
if (rv < 0)
return rv;
data->fan[f_min][1] = rv;
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
data, lm80_groups); data, lm80_groups);

View File

@@ -391,11 +391,9 @@ static int i801_check_post(struct i801_priv *priv, int status)
dev_err(&priv->pci_dev->dev, "Transaction timeout\n"); dev_err(&priv->pci_dev->dev, "Transaction timeout\n");
/* try to stop the current command */ /* try to stop the current command */
dev_dbg(&priv->pci_dev->dev, "Terminating the current operation\n"); dev_dbg(&priv->pci_dev->dev, "Terminating the current operation\n");
outb_p(inb_p(SMBHSTCNT(priv)) | SMBHSTCNT_KILL, outb_p(SMBHSTCNT_KILL, SMBHSTCNT(priv));
SMBHSTCNT(priv));
usleep_range(1000, 2000); usleep_range(1000, 2000);
outb_p(inb_p(SMBHSTCNT(priv)) & (~SMBHSTCNT_KILL), outb_p(0, SMBHSTCNT(priv));
SMBHSTCNT(priv));
/* Check if it worked */ /* Check if it worked */
status = inb_p(SMBHSTSTS(priv)); status = inb_p(SMBHSTSTS(priv));

View File

@@ -478,6 +478,11 @@ static void mtk_i2c_clock_disable(struct mtk_i2c *i2c)
static void mtk_i2c_init_hw(struct mtk_i2c *i2c) static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
{ {
u16 control_reg; u16 control_reg;
u16 intr_stat_reg;
mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_START);
intr_stat_reg = mtk_i2c_readw(i2c, OFFSET_INTR_STAT);
mtk_i2c_writew(i2c, intr_stat_reg, OFFSET_INTR_STAT);
if (i2c->dev_comp->apdma_sync) { if (i2c->dev_comp->apdma_sync) {
writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST); writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);

View File

@@ -483,7 +483,10 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
* forces us to send a new START * forces us to send a new START
* when we change direction * when we change direction
*/ */
dev_dbg(i2c->dev,
"missing START before write->read\n");
s3c24xx_i2c_stop(i2c, -EINVAL); s3c24xx_i2c_stop(i2c, -EINVAL);
break;
} }
goto retry_write; goto retry_write;

View File

@@ -807,7 +807,7 @@ static const struct sh_mobile_dt_config r8a7740_dt_config = {
static const struct of_device_id sh_mobile_i2c_dt_ids[] = { static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
{ .compatible = "renesas,iic-r8a73a4", .data = &fast_clock_dt_config }, { .compatible = "renesas,iic-r8a73a4", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-r8a7740", .data = &r8a7740_dt_config }, { .compatible = "renesas,iic-r8a7740", .data = &r8a7740_dt_config },
{ .compatible = "renesas,iic-r8a774c0", .data = &fast_clock_dt_config }, { .compatible = "renesas,iic-r8a774c0", .data = &v2_freq_calc_dt_config },
{ .compatible = "renesas,iic-r8a7790", .data = &v2_freq_calc_dt_config }, { .compatible = "renesas,iic-r8a7790", .data = &v2_freq_calc_dt_config },
{ .compatible = "renesas,iic-r8a7791", .data = &v2_freq_calc_dt_config }, { .compatible = "renesas,iic-r8a7791", .data = &v2_freq_calc_dt_config },
{ .compatible = "renesas,iic-r8a7792", .data = &v2_freq_calc_dt_config }, { .compatible = "renesas,iic-r8a7792", .data = &v2_freq_calc_dt_config },

View File

@@ -616,6 +616,13 @@ static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev,
if (ret) if (ret)
goto err; goto err;
if (channel >= indio_dev->num_channels) {
dev_err(indio_dev->dev.parent,
"Channel index >= number of channels\n");
ret = -EINVAL;
goto err;
}
ret = of_property_read_u32_array(child, "diff-channels", ret = of_property_read_u32_array(child, "diff-channels",
ain, 2); ain, 2);
if (ret) if (ret)
@@ -707,6 +714,11 @@ static int ad7124_setup(struct ad7124_state *st)
return ret; return ret;
} }
static void ad7124_reg_disable(void *r)
{
regulator_disable(r);
}
static int ad7124_probe(struct spi_device *spi) static int ad7124_probe(struct spi_device *spi)
{ {
const struct ad7124_chip_info *info; const struct ad7124_chip_info *info;
@@ -752,17 +764,20 @@ static int ad7124_probe(struct spi_device *spi)
ret = regulator_enable(st->vref[i]); ret = regulator_enable(st->vref[i]);
if (ret) if (ret)
return ret; return ret;
ret = devm_add_action_or_reset(&spi->dev, ad7124_reg_disable,
st->vref[i]);
if (ret)
return ret;
} }
st->mclk = devm_clk_get(&spi->dev, "mclk"); st->mclk = devm_clk_get(&spi->dev, "mclk");
if (IS_ERR(st->mclk)) { if (IS_ERR(st->mclk))
ret = PTR_ERR(st->mclk); return PTR_ERR(st->mclk);
goto error_regulator_disable;
}
ret = clk_prepare_enable(st->mclk); ret = clk_prepare_enable(st->mclk);
if (ret < 0) if (ret < 0)
goto error_regulator_disable; return ret;
ret = ad7124_soft_reset(st); ret = ad7124_soft_reset(st);
if (ret < 0) if (ret < 0)
@@ -792,11 +807,6 @@ error_remove_trigger:
ad_sd_cleanup_buffer_and_trigger(indio_dev); ad_sd_cleanup_buffer_and_trigger(indio_dev);
error_clk_disable_unprepare: error_clk_disable_unprepare:
clk_disable_unprepare(st->mclk); clk_disable_unprepare(st->mclk);
error_regulator_disable:
for (i = ARRAY_SIZE(st->vref) - 1; i >= 0; i--) {
if (!IS_ERR_OR_NULL(st->vref[i]))
regulator_disable(st->vref[i]);
}
return ret; return ret;
} }
@@ -805,17 +815,11 @@ static int ad7124_remove(struct spi_device *spi)
{ {
struct iio_dev *indio_dev = spi_get_drvdata(spi); struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad7124_state *st = iio_priv(indio_dev); struct ad7124_state *st = iio_priv(indio_dev);
int i;
iio_device_unregister(indio_dev); iio_device_unregister(indio_dev);
ad_sd_cleanup_buffer_and_trigger(indio_dev); ad_sd_cleanup_buffer_and_trigger(indio_dev);
clk_disable_unprepare(st->mclk); clk_disable_unprepare(st->mclk);
for (i = ARRAY_SIZE(st->vref) - 1; i >= 0; i--) {
if (!IS_ERR_OR_NULL(st->vref[i]))
regulator_disable(st->vref[i]);
}
return 0; return 0;
} }

View File

@@ -912,7 +912,7 @@ static int ad7192_probe(struct spi_device *spi)
{ {
struct ad7192_state *st; struct ad7192_state *st;
struct iio_dev *indio_dev; struct iio_dev *indio_dev;
int ret, voltage_uv = 0; int ret;
if (!spi->irq) { if (!spi->irq) {
dev_err(&spi->dev, "no IRQ?\n"); dev_err(&spi->dev, "no IRQ?\n");
@@ -949,15 +949,12 @@ static int ad7192_probe(struct spi_device *spi)
goto error_disable_avdd; goto error_disable_avdd;
} }
voltage_uv = regulator_get_voltage(st->avdd); ret = regulator_get_voltage(st->avdd);
if (ret < 0) {
if (voltage_uv > 0) {
st->int_vref_mv = voltage_uv / 1000;
} else {
ret = voltage_uv;
dev_err(&spi->dev, "Device tree error, reference voltage undefined\n"); dev_err(&spi->dev, "Device tree error, reference voltage undefined\n");
goto error_disable_avdd; goto error_disable_avdd;
} }
st->int_vref_mv = ret / 1000;
spi_set_drvdata(spi, indio_dev); spi_set_drvdata(spi, indio_dev);
st->chip_info = of_device_get_match_data(&spi->dev); st->chip_info = of_device_get_match_data(&spi->dev);
@@ -1014,7 +1011,9 @@ static int ad7192_probe(struct spi_device *spi)
return 0; return 0;
error_disable_clk: error_disable_clk:
clk_disable_unprepare(st->mclk); if (st->clock_sel == AD7192_CLK_EXT_MCLK1_2 ||
st->clock_sel == AD7192_CLK_EXT_MCLK2)
clk_disable_unprepare(st->mclk);
error_remove_trigger: error_remove_trigger:
ad_sd_cleanup_buffer_and_trigger(indio_dev); ad_sd_cleanup_buffer_and_trigger(indio_dev);
error_disable_dvdd: error_disable_dvdd:
@@ -1031,7 +1030,9 @@ static int ad7192_remove(struct spi_device *spi)
struct ad7192_state *st = iio_priv(indio_dev); struct ad7192_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev); iio_device_unregister(indio_dev);
clk_disable_unprepare(st->mclk); if (st->clock_sel == AD7192_CLK_EXT_MCLK1_2 ||
st->clock_sel == AD7192_CLK_EXT_MCLK2)
clk_disable_unprepare(st->mclk);
ad_sd_cleanup_buffer_and_trigger(indio_dev); ad_sd_cleanup_buffer_and_trigger(indio_dev);
regulator_disable(st->dvdd); regulator_disable(st->dvdd);

View File

@@ -166,6 +166,10 @@ struct ad7768_state {
* transfer buffers to live in their own cache lines. * transfer buffers to live in their own cache lines.
*/ */
union { union {
struct {
__be32 chan;
s64 timestamp;
} scan;
__be32 d32; __be32 d32;
u8 d8[2]; u8 d8[2];
} data ____cacheline_aligned; } data ____cacheline_aligned;
@@ -459,11 +463,11 @@ static irqreturn_t ad7768_trigger_handler(int irq, void *p)
mutex_lock(&st->lock); mutex_lock(&st->lock);
ret = spi_read(st->spi, &st->data.d32, 3); ret = spi_read(st->spi, &st->data.scan.chan, 3);
if (ret < 0) if (ret < 0)
goto err_unlock; goto err_unlock;
iio_push_to_buffers_with_timestamp(indio_dev, &st->data.d32, iio_push_to_buffers_with_timestamp(indio_dev, &st->data.scan,
iio_get_time_ns(indio_dev)); iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig); iio_trigger_notify_done(indio_dev->trig);

View File

@@ -279,6 +279,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
id &= AD7793_ID_MASK; id &= AD7793_ID_MASK;
if (id != st->chip_info->id) { if (id != st->chip_info->id) {
ret = -ENODEV;
dev_err(&st->sd.spi->dev, "device ID query failed\n"); dev_err(&st->sd.spi->dev, "device ID query failed\n");
goto out; goto out;
} }

View File

@@ -59,8 +59,10 @@ struct ad7923_state {
/* /*
* DMA (thus cache coherency maintenance) requires the * DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines. * transfer buffers to live in their own cache lines.
* Ensure rx_buf can be directly used in iio_push_to_buffers_with_timetamp
* Length = 8 channels + 4 extra for 8 byte timestamp
*/ */
__be16 rx_buf[4] ____cacheline_aligned; __be16 rx_buf[12] ____cacheline_aligned;
__be16 tx_buf[4]; __be16 tx_buf[4];
}; };

View File

@@ -524,23 +524,29 @@ static int ad5770r_channel_config(struct ad5770r_state *st)
device_for_each_child_node(&st->spi->dev, child) { device_for_each_child_node(&st->spi->dev, child) {
ret = fwnode_property_read_u32(child, "num", &num); ret = fwnode_property_read_u32(child, "num", &num);
if (ret) if (ret)
return ret; goto err_child_out;
if (num >= AD5770R_MAX_CHANNELS) if (num >= AD5770R_MAX_CHANNELS) {
return -EINVAL; ret = -EINVAL;
goto err_child_out;
}
ret = fwnode_property_read_u32_array(child, ret = fwnode_property_read_u32_array(child,
"adi,range-microamp", "adi,range-microamp",
tmp, 2); tmp, 2);
if (ret) if (ret)
return ret; goto err_child_out;
min = tmp[0] / 1000; min = tmp[0] / 1000;
max = tmp[1] / 1000; max = tmp[1] / 1000;
ret = ad5770r_store_output_range(st, min, max, num); ret = ad5770r_store_output_range(st, min, max, num);
if (ret) if (ret)
return ret; goto err_child_out;
} }
return 0;
err_child_out:
fwnode_handle_put(child);
return ret; return ret;
} }

View File

@@ -399,6 +399,7 @@ static int fxas21002c_temp_get(struct fxas21002c_data *data, int *val)
ret = regmap_field_read(data->regmap_fields[F_TEMP], &temp); ret = regmap_field_read(data->regmap_fields[F_TEMP], &temp);
if (ret < 0) { if (ret < 0) {
dev_err(dev, "failed to read temp: %d\n", ret); dev_err(dev, "failed to read temp: %d\n", ret);
fxas21002c_pm_put(data);
goto data_unlock; goto data_unlock;
} }
@@ -432,6 +433,7 @@ static int fxas21002c_axis_get(struct fxas21002c_data *data,
&axis_be, sizeof(axis_be)); &axis_be, sizeof(axis_be));
if (ret < 0) { if (ret < 0) {
dev_err(dev, "failed to read axis: %d: %d\n", index, ret); dev_err(dev, "failed to read axis: %d: %d\n", index, ret);
fxas21002c_pm_put(data);
goto data_unlock; goto data_unlock;
} }

View File

@@ -482,6 +482,7 @@ static void cma_release_dev(struct rdma_id_private *id_priv)
list_del(&id_priv->list); list_del(&id_priv->list);
cma_dev_put(id_priv->cma_dev); cma_dev_put(id_priv->cma_dev);
id_priv->cma_dev = NULL; id_priv->cma_dev = NULL;
id_priv->id.device = NULL;
if (id_priv->id.route.addr.dev_addr.sgid_attr) { if (id_priv->id.route.addr.dev_addr.sgid_attr) {
rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr); rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
id_priv->id.route.addr.dev_addr.sgid_attr = NULL; id_priv->id.route.addr.dev_addr.sgid_attr = NULL;
@@ -1864,6 +1865,7 @@ static void _destroy_id(struct rdma_id_private *id_priv,
iw_destroy_cm_id(id_priv->cm_id.iw); iw_destroy_cm_id(id_priv->cm_id.iw);
} }
cma_leave_mc_groups(id_priv); cma_leave_mc_groups(id_priv);
rdma_restrack_del(&id_priv->res);
cma_release_dev(id_priv); cma_release_dev(id_priv);
} }
@@ -1877,7 +1879,6 @@ static void _destroy_id(struct rdma_id_private *id_priv,
kfree(id_priv->id.route.path_rec); kfree(id_priv->id.route.path_rec);
put_net(id_priv->id.route.addr.dev_addr.net); put_net(id_priv->id.route.addr.dev_addr.net);
rdma_restrack_del(&id_priv->res);
kfree(id_priv); kfree(id_priv);
} }
@@ -3740,7 +3741,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
} }
id_priv->backlog = backlog; id_priv->backlog = backlog;
if (id->device) { if (id_priv->cma_dev) {
if (rdma_cap_ib_cm(id->device, 1)) { if (rdma_cap_ib_cm(id->device, 1)) {
ret = cma_ib_listen(id_priv); ret = cma_ib_listen(id_priv);
if (ret) if (ret)

View File

@@ -117,8 +117,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_INFO_HANDLES)(
return ret; return ret;
uapi_object = uapi_get_object(attrs->ufile->device->uapi, object_id); uapi_object = uapi_get_object(attrs->ufile->device->uapi, object_id);
if (!uapi_object) if (IS_ERR(uapi_object))
return -EINVAL; return PTR_ERR(uapi_object);
handles = gather_objects_handle(attrs->ufile, uapi_object, attrs, handles = gather_objects_handle(attrs->ufile, uapi_object, attrs,
out_len, &total); out_len, &total);
@@ -331,6 +331,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_TABLE)(
if (ret) if (ret)
return ret; return ret;
if (!user_entry_size)
return -EINVAL;
max_entries = uverbs_attr_ptr_get_array_size( max_entries = uverbs_attr_ptr_get_array_size(
attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES, attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
user_entry_size); user_entry_size);

View File

@@ -559,9 +559,8 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
case UVERBS_OBJECT_QP: case UVERBS_OBJECT_QP:
{ {
struct mlx5_ib_qp *qp = to_mqp(uobj->object); struct mlx5_ib_qp *qp = to_mqp(uobj->object);
enum ib_qp_type qp_type = qp->ibqp.qp_type;
if (qp_type == IB_QPT_RAW_PACKET || if (qp->type == IB_QPT_RAW_PACKET ||
(qp->flags & IB_QP_CREATE_SOURCE_QPN)) { (qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
struct mlx5_ib_raw_packet_qp *raw_packet_qp = struct mlx5_ib_raw_packet_qp *raw_packet_qp =
&qp->raw_packet_qp; &qp->raw_packet_qp;
@@ -578,10 +577,9 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
sq->tisn) == obj_id); sq->tisn) == obj_id);
} }
if (qp_type == MLX5_IB_QPT_DCT) if (qp->type == MLX5_IB_QPT_DCT)
return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT, return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
qp->dct.mdct.mqp.qpn) == obj_id; qp->dct.mdct.mqp.qpn) == obj_id;
return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
qp->ibqp.qp_num) == obj_id; qp->ibqp.qp_num) == obj_id;
} }

View File

@@ -4762,6 +4762,7 @@ static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
if (bound) { if (bound) {
rdma_roce_rescan_device(&dev->ib_dev); rdma_roce_rescan_device(&dev->ib_dev);
mpi->ibdev->ib_active = true;
break; break;
} }
} }

View File

@@ -231,6 +231,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
if (err) { if (err) {
vfree(qp->sq.queue->buf); vfree(qp->sq.queue->buf);
kfree(qp->sq.queue); kfree(qp->sq.queue);
qp->sq.queue = NULL;
return err; return err;
} }
@@ -284,6 +285,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
if (err) { if (err) {
vfree(qp->rq.queue->buf); vfree(qp->rq.queue->buf);
kfree(qp->rq.queue); kfree(qp->rq.queue);
qp->rq.queue = NULL;
return err; return err;
} }
} }
@@ -344,6 +346,11 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
err2: err2:
rxe_queue_cleanup(qp->sq.queue); rxe_queue_cleanup(qp->sq.queue);
err1: err1:
qp->pd = NULL;
qp->rcq = NULL;
qp->scq = NULL;
qp->srq = NULL;
if (srq) if (srq)
rxe_drop_ref(srq); rxe_drop_ref(srq);
rxe_drop_ref(scq); rxe_drop_ref(scq);

View File

@@ -300,7 +300,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
struct siw_ucontext *uctx = struct siw_ucontext *uctx =
rdma_udata_to_drv_context(udata, struct siw_ucontext, rdma_udata_to_drv_context(udata, struct siw_ucontext,
base_ucontext); base_ucontext);
struct siw_cq *scq = NULL, *rcq = NULL;
unsigned long flags; unsigned long flags;
int num_sqe, num_rqe, rv = 0; int num_sqe, num_rqe, rv = 0;
size_t length; size_t length;
@@ -340,10 +339,8 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
rv = -EINVAL; rv = -EINVAL;
goto err_out; goto err_out;
} }
scq = to_siw_cq(attrs->send_cq);
rcq = to_siw_cq(attrs->recv_cq);
if (!scq || (!rcq && !attrs->srq)) { if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) {
siw_dbg(base_dev, "send CQ or receive CQ invalid\n"); siw_dbg(base_dev, "send CQ or receive CQ invalid\n");
rv = -EINVAL; rv = -EINVAL;
goto err_out; goto err_out;
@@ -375,7 +372,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
else { else {
/* Zero sized SQ is not supported */ /* Zero sized SQ is not supported */
rv = -EINVAL; rv = -EINVAL;
goto err_out; goto err_out_xa;
} }
if (num_rqe) if (num_rqe)
num_rqe = roundup_pow_of_two(num_rqe); num_rqe = roundup_pow_of_two(num_rqe);
@@ -398,8 +395,8 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
} }
} }
qp->pd = pd; qp->pd = pd;
qp->scq = scq; qp->scq = to_siw_cq(attrs->send_cq);
qp->rcq = rcq; qp->rcq = to_siw_cq(attrs->recv_cq);
if (attrs->srq) { if (attrs->srq) {
/* /*

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* /*
* Copyright (c) 2020, The Linux Foundation. All rights reserved. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
*/ */
#include <asm/div64.h> #include <asm/div64.h>
@@ -212,6 +212,7 @@ struct bcm_voter *of_bcm_voter_get(struct device *dev, const char *name)
} }
mutex_unlock(&bcm_voter_lock); mutex_unlock(&bcm_voter_lock);
of_node_put(node);
return voter; return voter;
} }
EXPORT_SYMBOL_GPL(of_bcm_voter_get); EXPORT_SYMBOL_GPL(of_bcm_voter_get);
@@ -369,6 +370,7 @@ static const struct of_device_id bcm_voter_of_match[] = {
{ .compatible = "qcom,bcm-voter" }, { .compatible = "qcom,bcm-voter" },
{ } { }
}; };
MODULE_DEVICE_TABLE(of, bcm_voter_of_match);
static struct platform_driver qcom_icc_bcm_voter_driver = { static struct platform_driver qcom_icc_bcm_voter_driver = {
.probe = qcom_icc_bcm_voter_probe, .probe = qcom_icc_bcm_voter_probe,

View File

@@ -1137,7 +1137,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
err = iommu_device_register(&iommu->iommu); err = iommu_device_register(&iommu->iommu);
if (err) if (err)
goto err_unmap; goto err_sysfs;
} }
drhd->iommu = iommu; drhd->iommu = iommu;
@@ -1145,6 +1145,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
return 0; return 0;
err_sysfs:
iommu_device_sysfs_remove(&iommu->iommu);
err_unmap: err_unmap:
unmap_iommu(iommu); unmap_iommu(iommu);
error_free_seq_id: error_free_seq_id:

View File

@@ -2606,9 +2606,9 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
struct device *dev, struct device *dev,
u32 pasid) u32 pasid)
{ {
int flags = PASID_FLAG_SUPERVISOR_MODE;
struct dma_pte *pgd = domain->pgd; struct dma_pte *pgd = domain->pgd;
int agaw, level; int agaw, level;
int flags = 0;
/* /*
* Skip top levels of page tables for iommu which has * Skip top levels of page tables for iommu which has
@@ -2624,7 +2624,10 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
if (level != 4 && level != 5) if (level != 4 && level != 5)
return -EINVAL; return -EINVAL;
flags |= (level == 5) ? PASID_FLAG_FL5LP : 0; if (pasid != PASID_RID2PASID)
flags |= PASID_FLAG_SUPERVISOR_MODE;
if (level == 5)
flags |= PASID_FLAG_FL5LP;
if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED) if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
flags |= PASID_FLAG_PAGE_SNOOP; flags |= PASID_FLAG_PAGE_SNOOP;

View File

@@ -677,7 +677,8 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
* Since it is a second level only translation setup, we should * Since it is a second level only translation setup, we should
* set SRE bit as well (addresses are expected to be GPAs). * set SRE bit as well (addresses are expected to be GPAs).
*/ */
pasid_set_sre(pte); if (pasid != PASID_RID2PASID)
pasid_set_sre(pte);
pasid_set_present(pte); pasid_set_present(pte);
pasid_flush_caches(iommu, pte, pasid, did); pasid_flush_caches(iommu, pte, pasid, did);

View File

@@ -1138,6 +1138,7 @@ static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID }, { VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID },
{ 0 }, { 0 },
}; };
MODULE_DEVICE_TABLE(virtio, id_table);
static struct virtio_driver virtio_iommu_drv = { static struct virtio_driver virtio_iommu_drv = {
.driver.name = KBUILD_MODNAME, .driver.name = KBUILD_MODNAME,

View File

@@ -46,7 +46,7 @@ static void hfcsusb_start_endpoint(struct hfcsusb *hw, int channel);
static void hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel); static void hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel);
static int hfcsusb_setup_bch(struct bchannel *bch, int protocol); static int hfcsusb_setup_bch(struct bchannel *bch, int protocol);
static void deactivate_bchannel(struct bchannel *bch); static void deactivate_bchannel(struct bchannel *bch);
static void hfcsusb_ph_info(struct hfcsusb *hw); static int hfcsusb_ph_info(struct hfcsusb *hw);
/* start next background transfer for control channel */ /* start next background transfer for control channel */
static void static void
@@ -241,7 +241,7 @@ hfcusb_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
* send full D/B channel status information * send full D/B channel status information
* as MPH_INFORMATION_IND * as MPH_INFORMATION_IND
*/ */
static void static int
hfcsusb_ph_info(struct hfcsusb *hw) hfcsusb_ph_info(struct hfcsusb *hw)
{ {
struct ph_info *phi; struct ph_info *phi;
@@ -250,7 +250,7 @@ hfcsusb_ph_info(struct hfcsusb *hw)
phi = kzalloc(struct_size(phi, bch, dch->dev.nrbchan), GFP_ATOMIC); phi = kzalloc(struct_size(phi, bch, dch->dev.nrbchan), GFP_ATOMIC);
if (!phi) if (!phi)
return; return -ENOMEM;
phi->dch.ch.protocol = hw->protocol; phi->dch.ch.protocol = hw->protocol;
phi->dch.ch.Flags = dch->Flags; phi->dch.ch.Flags = dch->Flags;
@@ -263,6 +263,8 @@ hfcsusb_ph_info(struct hfcsusb *hw)
_queue_data(&dch->dev.D, MPH_INFORMATION_IND, MISDN_ID_ANY, _queue_data(&dch->dev.D, MPH_INFORMATION_IND, MISDN_ID_ANY,
struct_size(phi, bch, dch->dev.nrbchan), phi, GFP_ATOMIC); struct_size(phi, bch, dch->dev.nrbchan), phi, GFP_ATOMIC);
kfree(phi); kfree(phi);
return 0;
} }
/* /*
@@ -347,8 +349,7 @@ hfcusb_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
ret = l1_event(dch->l1, hh->prim); ret = l1_event(dch->l1, hh->prim);
break; break;
case MPH_INFORMATION_REQ: case MPH_INFORMATION_REQ:
hfcsusb_ph_info(hw); ret = hfcsusb_ph_info(hw);
ret = 0;
break; break;
} }
@@ -403,8 +404,7 @@ hfc_l1callback(struct dchannel *dch, u_int cmd)
hw->name, __func__, cmd); hw->name, __func__, cmd);
return -1; return -1;
} }
hfcsusb_ph_info(hw); return hfcsusb_ph_info(hw);
return 0;
} }
static int static int
@@ -746,8 +746,7 @@ hfcsusb_setup_bch(struct bchannel *bch, int protocol)
handle_led(hw, (bch->nr == 1) ? LED_B1_OFF : handle_led(hw, (bch->nr == 1) ? LED_B1_OFF :
LED_B2_OFF); LED_B2_OFF);
} }
hfcsusb_ph_info(hw); return hfcsusb_ph_info(hw);
return 0;
} }
static void static void

View File

@@ -630,17 +630,19 @@ static void
release_io(struct inf_hw *hw) release_io(struct inf_hw *hw)
{ {
if (hw->cfg.mode) { if (hw->cfg.mode) {
if (hw->cfg.p) { if (hw->cfg.mode == AM_MEMIO) {
release_mem_region(hw->cfg.start, hw->cfg.size); release_mem_region(hw->cfg.start, hw->cfg.size);
iounmap(hw->cfg.p); if (hw->cfg.p)
iounmap(hw->cfg.p);
} else } else
release_region(hw->cfg.start, hw->cfg.size); release_region(hw->cfg.start, hw->cfg.size);
hw->cfg.mode = AM_NONE; hw->cfg.mode = AM_NONE;
} }
if (hw->addr.mode) { if (hw->addr.mode) {
if (hw->addr.p) { if (hw->addr.mode == AM_MEMIO) {
release_mem_region(hw->addr.start, hw->addr.size); release_mem_region(hw->addr.start, hw->addr.size);
iounmap(hw->addr.p); if (hw->addr.p)
iounmap(hw->addr.p);
} else } else
release_region(hw->addr.start, hw->addr.size); release_region(hw->addr.start, hw->addr.size);
hw->addr.mode = AM_NONE; hw->addr.mode = AM_NONE;
@@ -670,9 +672,12 @@ setup_io(struct inf_hw *hw)
(ulong)hw->cfg.start, (ulong)hw->cfg.size); (ulong)hw->cfg.start, (ulong)hw->cfg.size);
return err; return err;
} }
if (hw->ci->cfg_mode == AM_MEMIO)
hw->cfg.p = ioremap(hw->cfg.start, hw->cfg.size);
hw->cfg.mode = hw->ci->cfg_mode; hw->cfg.mode = hw->ci->cfg_mode;
if (hw->ci->cfg_mode == AM_MEMIO) {
hw->cfg.p = ioremap(hw->cfg.start, hw->cfg.size);
if (!hw->cfg.p)
return -ENOMEM;
}
if (debug & DEBUG_HW) if (debug & DEBUG_HW)
pr_notice("%s: IO cfg %lx (%lu bytes) mode%d\n", pr_notice("%s: IO cfg %lx (%lu bytes) mode%d\n",
hw->name, (ulong)hw->cfg.start, hw->name, (ulong)hw->cfg.start,
@@ -697,12 +702,12 @@ setup_io(struct inf_hw *hw)
(ulong)hw->addr.start, (ulong)hw->addr.size); (ulong)hw->addr.start, (ulong)hw->addr.size);
return err; return err;
} }
hw->addr.mode = hw->ci->addr_mode;
if (hw->ci->addr_mode == AM_MEMIO) { if (hw->ci->addr_mode == AM_MEMIO) {
hw->addr.p = ioremap(hw->addr.start, hw->addr.size); hw->addr.p = ioremap(hw->addr.start, hw->addr.size);
if (unlikely(!hw->addr.p)) if (!hw->addr.p)
return -ENOMEM; return -ENOMEM;
} }
hw->addr.mode = hw->ci->addr_mode;
if (debug & DEBUG_HW) if (debug & DEBUG_HW)
pr_notice("%s: IO addr %lx (%lu bytes) mode%d\n", pr_notice("%s: IO addr %lx (%lu bytes) mode%d\n",
hw->name, (ulong)hw->addr.start, hw->name, (ulong)hw->addr.start,

View File

@@ -307,7 +307,7 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
usleep_range(3000, 6000); usleep_range(3000, 6000);
ret = lp55xx_read(chip, LP5523_REG_STATUS, &status); ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
if (ret) if (ret)
return ret; goto out;
status &= LP5523_ENG_STATUS_MASK; status &= LP5523_ENG_STATUS_MASK;
if (status != LP5523_ENG_STATUS_MASK) { if (status != LP5523_ENG_STATUS_MASK) {

View File

@@ -854,7 +854,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
static uint32_t __minimum_chunk_size(struct origin *o) static uint32_t __minimum_chunk_size(struct origin *o)
{ {
struct dm_snapshot *snap; struct dm_snapshot *snap;
unsigned chunk_size = 0; unsigned chunk_size = rounddown_pow_of_two(UINT_MAX);
if (o) if (o)
list_for_each_entry(snap, &o->snapshots, list) list_for_each_entry(snap, &o->snapshots, list)
@@ -1408,6 +1408,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (!s->store->chunk_size) { if (!s->store->chunk_size) {
ti->error = "Chunk size not set"; ti->error = "Chunk size not set";
r = -EINVAL;
goto bad_read_metadata; goto bad_read_metadata;
} }

View File

@@ -281,7 +281,7 @@ static int sp8870_set_frontend_parameters(struct dvb_frontend *fe)
// read status reg in order to clear pending irqs // read status reg in order to clear pending irqs
err = sp8870_readreg(state, 0x200); err = sp8870_readreg(state, 0x200);
if (err) if (err < 0)
return err; return err;
// system controller start // system controller start

View File

@@ -915,7 +915,6 @@ static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv,
{ {
struct rcar_drif_sdr *sdr = video_drvdata(file); struct rcar_drif_sdr *sdr = video_drvdata(file);
memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
f->fmt.sdr.pixelformat = sdr->fmt->pixelformat; f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
f->fmt.sdr.buffersize = sdr->fmt->buffersize; f->fmt.sdr.buffersize = sdr->fmt->buffersize;

View File

@@ -1424,7 +1424,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
{ {
struct sd *sd = (struct sd *) gspca_dev; struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam; struct cam *cam;
int ret;
sd->mainsFreq = FREQ_DEF == V4L2_CID_POWER_LINE_FREQUENCY_60HZ; sd->mainsFreq = FREQ_DEF == V4L2_CID_POWER_LINE_FREQUENCY_60HZ;
reset_camera_params(gspca_dev); reset_camera_params(gspca_dev);
@@ -1436,10 +1435,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->cam_mode = mode; cam->cam_mode = mode;
cam->nmodes = ARRAY_SIZE(mode); cam->nmodes = ARRAY_SIZE(mode);
ret = goto_low_power(gspca_dev); goto_low_power(gspca_dev);
if (ret)
gspca_err(gspca_dev, "Cannot go to low power mode: %d\n",
ret);
/* Check the firmware version. */ /* Check the firmware version. */
sd->params.version.firmwareVersion = 0; sd->params.version.firmwareVersion = 0;
get_version_information(gspca_dev); get_version_information(gspca_dev);

View File

@@ -195,7 +195,7 @@ static const struct v4l2_ctrl_config mt9m111_greenbal_cfg = {
int mt9m111_probe(struct sd *sd) int mt9m111_probe(struct sd *sd)
{ {
u8 data[2] = {0x00, 0x00}; u8 data[2] = {0x00, 0x00};
int i, rc = 0; int i, err;
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
if (force_sensor) { if (force_sensor) {
@@ -213,18 +213,18 @@ int mt9m111_probe(struct sd *sd)
/* Do the preinit */ /* Do the preinit */
for (i = 0; i < ARRAY_SIZE(preinit_mt9m111); i++) { for (i = 0; i < ARRAY_SIZE(preinit_mt9m111); i++) {
if (preinit_mt9m111[i][0] == BRIDGE) { if (preinit_mt9m111[i][0] == BRIDGE) {
rc |= m5602_write_bridge(sd, err = m5602_write_bridge(sd,
preinit_mt9m111[i][1], preinit_mt9m111[i][1],
preinit_mt9m111[i][2]); preinit_mt9m111[i][2]);
} else { } else {
data[0] = preinit_mt9m111[i][2]; data[0] = preinit_mt9m111[i][2];
data[1] = preinit_mt9m111[i][3]; data[1] = preinit_mt9m111[i][3];
rc |= m5602_write_sensor(sd, err = m5602_write_sensor(sd,
preinit_mt9m111[i][1], data, 2); preinit_mt9m111[i][1], data, 2);
} }
if (err < 0)
return err;
} }
if (rc < 0)
return rc;
if (m5602_read_sensor(sd, MT9M111_SC_CHIPVER, data, 2)) if (m5602_read_sensor(sd, MT9M111_SC_CHIPVER, data, 2))
return -ENODEV; return -ENODEV;

View File

@@ -154,8 +154,8 @@ static const struct v4l2_ctrl_config po1030_greenbal_cfg = {
int po1030_probe(struct sd *sd) int po1030_probe(struct sd *sd)
{ {
int rc = 0;
u8 dev_id_h = 0, i; u8 dev_id_h = 0, i;
int err;
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
if (force_sensor) { if (force_sensor) {
@@ -174,14 +174,14 @@ int po1030_probe(struct sd *sd)
for (i = 0; i < ARRAY_SIZE(preinit_po1030); i++) { for (i = 0; i < ARRAY_SIZE(preinit_po1030); i++) {
u8 data = preinit_po1030[i][2]; u8 data = preinit_po1030[i][2];
if (preinit_po1030[i][0] == SENSOR) if (preinit_po1030[i][0] == SENSOR)
rc |= m5602_write_sensor(sd, err = m5602_write_sensor(sd, preinit_po1030[i][1],
preinit_po1030[i][1], &data, 1); &data, 1);
else else
rc |= m5602_write_bridge(sd, preinit_po1030[i][1], err = m5602_write_bridge(sd, preinit_po1030[i][1],
data); data);
if (err < 0)
return err;
} }
if (rc < 0)
return rc;
if (m5602_read_sensor(sd, PO1030_DEVID_H, &dev_id_h, 1)) if (m5602_read_sensor(sd, PO1030_DEVID_H, &dev_id_h, 1))
return -ENODEV; return -ENODEV;

View File

@@ -763,7 +763,8 @@ static int at24_probe(struct i2c_client *client)
at24->nvmem = devm_nvmem_register(dev, &nvmem_config); at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
if (IS_ERR(at24->nvmem)) { if (IS_ERR(at24->nvmem)) {
pm_runtime_disable(dev); pm_runtime_disable(dev);
regulator_disable(at24->vcc_reg); if (!pm_runtime_status_suspended(dev))
regulator_disable(at24->vcc_reg);
return PTR_ERR(at24->nvmem); return PTR_ERR(at24->nvmem);
} }
@@ -774,7 +775,8 @@ static int at24_probe(struct i2c_client *client)
err = at24_read(at24, 0, &test_byte, 1); err = at24_read(at24, 0, &test_byte, 1);
if (err) { if (err) {
pm_runtime_disable(dev); pm_runtime_disable(dev);
regulator_disable(at24->vcc_reg); if (!pm_runtime_status_suspended(dev))
regulator_disable(at24->vcc_reg);
return -ENODEV; return -ENODEV;
} }

View File

@@ -134,7 +134,7 @@ static struct ics932s401_data *ics932s401_update_device(struct device *dev)
for (i = 0; i < NUM_MIRRORED_REGS; i++) { for (i = 0; i < NUM_MIRRORED_REGS; i++) {
temp = i2c_smbus_read_word_data(client, regs_to_copy[i]); temp = i2c_smbus_read_word_data(client, regs_to_copy[i]);
if (temp < 0) if (temp < 0)
data->regs[regs_to_copy[i]] = 0; temp = 0;
data->regs[regs_to_copy[i]] = temp >> 8; data->regs[regs_to_copy[i]] = temp >> 8;
} }

View File

@@ -100,8 +100,9 @@
printk(KERN_INFO a); \ printk(KERN_INFO a); \
} while (0) } while (0)
#define v2printk(a...) do { \ #define v2printk(a...) do { \
if (verbose > 1) \ if (verbose > 1) { \
printk(KERN_INFO a); \ printk(KERN_INFO a); \
} \
touch_nmi_watchdog(); \ touch_nmi_watchdog(); \
} while (0) } while (0)
#define eprintk(a...) do { \ #define eprintk(a...) do { \

View File

@@ -271,6 +271,7 @@ struct lis3lv02d {
int regs_size; int regs_size;
u8 *reg_cache; u8 *reg_cache;
bool regs_stored; bool regs_stored;
bool init_required;
u8 odr_mask; /* ODR bit mask */ u8 odr_mask; /* ODR bit mask */
u8 whoami; /* indicates measurement precision */ u8 whoami; /* indicates measurement precision */
s16 (*read_data) (struct lis3lv02d *lis3, int reg); s16 (*read_data) (struct lis3lv02d *lis3, int reg);

View File

@@ -277,6 +277,9 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
return ret; return ret;
} }
pm_runtime_mark_last_busy(dev->dev);
pm_request_autosuspend(dev->dev);
list_move_tail(&cb->list, &cl->rd_pending); list_move_tail(&cb->list, &cl->rd_pending);
return 0; return 0;

View File

@@ -555,8 +555,13 @@ static void sdhci_gli_voltage_switch(struct sdhci_host *host)
* *
* Wait 5ms after set 1.8V signal enable in Host Control 2 register * Wait 5ms after set 1.8V signal enable in Host Control 2 register
* to ensure 1.8V signal enable bit is set by GL9750/GL9755. * to ensure 1.8V signal enable bit is set by GL9750/GL9755.
*
* ...however, the controller in the NUC10i3FNK4 (a 9755) requires
* slightly longer than 5ms before the control register reports that
* 1.8V is ready, and far longer still before the card will actually
* work reliably.
*/ */
usleep_range(5000, 5500); usleep_range(100000, 110000);
} }
static void sdhci_gl9750_reset(struct sdhci_host *host, u8 mask) static void sdhci_gl9750_reset(struct sdhci_host *host, u8 mask)

View File

@@ -270,9 +270,6 @@ static netdev_tx_t caif_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct ser_device *ser; struct ser_device *ser;
if (WARN_ON(!dev))
return -EINVAL;
ser = netdev_priv(dev); ser = netdev_priv(dev);
/* Send flow off once, on high water mark */ /* Send flow off once, on high water mark */

View File

@@ -1128,14 +1128,6 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
{ {
struct mt7530_priv *priv = ds->priv; struct mt7530_priv *priv = ds->priv;
/* The real fabric path would be decided on the membership in the
* entry of VLAN table. PCR_MATRIX set up here with ALL_MEMBERS
* means potential VLAN can be consisting of certain subset of all
* ports.
*/
mt7530_rmw(priv, MT7530_PCR_P(port),
PCR_MATRIX_MASK, PCR_MATRIX(MT7530_ALL_MEMBERS));
/* Trapped into security mode allows packet forwarding through VLAN /* Trapped into security mode allows packet forwarding through VLAN
* table lookup. CPU port is set to fallback mode to let untagged * table lookup. CPU port is set to fallback mode to let untagged
* frames pass through. * frames pass through.

View File

@@ -167,9 +167,10 @@ enum sja1105_hostcmd {
SJA1105_HOSTCMD_INVALIDATE = 4, SJA1105_HOSTCMD_INVALIDATE = 4,
}; };
/* Command and entry overlap */
static void static void
sja1105_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, sja1105et_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op) enum packing_op op)
{ {
const int size = SJA1105_SIZE_DYN_CMD; const int size = SJA1105_SIZE_DYN_CMD;
@@ -179,6 +180,20 @@ sja1105_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
sja1105_packing(buf, &cmd->index, 9, 0, size, op); sja1105_packing(buf, &cmd->index, 9, 0, size, op);
} }
/* Command and entry are separate */
static void
sja1105pqrs_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1105_SIZE_VL_LOOKUP_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->errors, 30, 30, size, op);
sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
sja1105_packing(p, &cmd->index, 9, 0, size, op);
}
static size_t sja1105et_vl_lookup_entry_packing(void *buf, void *entry_ptr, static size_t sja1105et_vl_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op) enum packing_op op)
{ {
@@ -641,7 +656,7 @@ static size_t sja1105pqrs_cbs_entry_packing(void *buf, void *entry_ptr,
const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = { const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_VL_LOOKUP] = { [BLK_IDX_VL_LOOKUP] = {
.entry_packing = sja1105et_vl_lookup_entry_packing, .entry_packing = sja1105et_vl_lookup_entry_packing,
.cmd_packing = sja1105_vl_lookup_cmd_packing, .cmd_packing = sja1105et_vl_lookup_cmd_packing,
.access = OP_WRITE, .access = OP_WRITE,
.max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT, .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
.packed_size = SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD, .packed_size = SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD,
@@ -725,7 +740,7 @@ const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = { const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_VL_LOOKUP] = { [BLK_IDX_VL_LOOKUP] = {
.entry_packing = sja1105_vl_lookup_entry_packing, .entry_packing = sja1105_vl_lookup_entry_packing,
.cmd_packing = sja1105_vl_lookup_cmd_packing, .cmd_packing = sja1105pqrs_vl_lookup_cmd_packing,
.access = (OP_READ | OP_WRITE), .access = (OP_READ | OP_WRITE),
.max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT, .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
.packed_size = SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD, .packed_size = SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD,

View File

@@ -25,6 +25,8 @@
#include "sja1105_sgmii.h" #include "sja1105_sgmii.h"
#include "sja1105_tas.h" #include "sja1105_tas.h"
#define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1)
static const struct dsa_switch_ops sja1105_switch_ops; static const struct dsa_switch_ops sja1105_switch_ops;
static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len, static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
@@ -204,6 +206,7 @@ static int sja1105_init_mii_settings(struct sja1105_private *priv,
default: default:
dev_err(dev, "Unsupported PHY mode %s!\n", dev_err(dev, "Unsupported PHY mode %s!\n",
phy_modes(ports[i].phy_mode)); phy_modes(ports[i].phy_mode));
return -EINVAL;
} }
/* Even though the SerDes port is able to drive SGMII autoneg /* Even though the SerDes port is able to drive SGMII autoneg
@@ -292,6 +295,13 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
return 0; return 0;
} }
/* Set up a default VLAN for untagged traffic injected from the CPU
* using management routes (e.g. STP, PTP) as opposed to tag_8021q.
* All DT-defined ports are members of this VLAN, and there are no
* restrictions on forwarding (since the CPU selects the destination).
* Frames from this VLAN will always be transmitted as untagged, and
* neither the bridge nor the 8021q module cannot create this VLAN ID.
*/
static int sja1105_init_static_vlan(struct sja1105_private *priv) static int sja1105_init_static_vlan(struct sja1105_private *priv)
{ {
struct sja1105_table *table; struct sja1105_table *table;
@@ -301,17 +311,13 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
.vmemb_port = 0, .vmemb_port = 0,
.vlan_bc = 0, .vlan_bc = 0,
.tag_port = 0, .tag_port = 0,
.vlanid = 1, .vlanid = SJA1105_DEFAULT_VLAN,
}; };
struct dsa_switch *ds = priv->ds; struct dsa_switch *ds = priv->ds;
int port; int port;
table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
/* The static VLAN table will only contain the initial pvid of 1.
* All other VLANs are to be configured through dynamic entries,
* and kept in the static configuration table as backing memory.
*/
if (table->entry_count) { if (table->entry_count) {
kfree(table->entries); kfree(table->entries);
table->entry_count = 0; table->entry_count = 0;
@@ -324,9 +330,6 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
table->entry_count = 1; table->entry_count = 1;
/* VLAN 1: all DT-defined ports are members; no restrictions on
* forwarding; always transmit as untagged.
*/
for (port = 0; port < ds->num_ports; port++) { for (port = 0; port < ds->num_ports; port++) {
struct sja1105_bridge_vlan *v; struct sja1105_bridge_vlan *v;
@@ -337,15 +340,12 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
pvid.vlan_bc |= BIT(port); pvid.vlan_bc |= BIT(port);
pvid.tag_port &= ~BIT(port); pvid.tag_port &= ~BIT(port);
/* Let traffic that don't need dsa_8021q (e.g. STP, PTP) be
* transmitted as untagged.
*/
v = kzalloc(sizeof(*v), GFP_KERNEL); v = kzalloc(sizeof(*v), GFP_KERNEL);
if (!v) if (!v)
return -ENOMEM; return -ENOMEM;
v->port = port; v->port = port;
v->vid = 1; v->vid = SJA1105_DEFAULT_VLAN;
v->untagged = true; v->untagged = true;
if (dsa_is_cpu_port(ds, port)) if (dsa_is_cpu_port(ds, port))
v->pvid = true; v->pvid = true;
@@ -2756,11 +2756,22 @@ static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid,
bool pvid = flags & BRIDGE_VLAN_INFO_PVID; bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
struct sja1105_bridge_vlan *v; struct sja1105_bridge_vlan *v;
list_for_each_entry(v, vlan_list, list) list_for_each_entry(v, vlan_list, list) {
if (v->port == port && v->vid == vid && if (v->port == port && v->vid == vid) {
v->untagged == untagged && v->pvid == pvid)
/* Already added */ /* Already added */
return 0; if (v->untagged == untagged && v->pvid == pvid)
/* Nothing changed */
return 0;
/* It's the same VLAN, but some of the flags changed
* and the user did not bother to delete it first.
* Update it and trigger sja1105_build_vlan_table.
*/
v->untagged = untagged;
v->pvid = pvid;
return 1;
}
}
v = kzalloc(sizeof(*v), GFP_KERNEL); v = kzalloc(sizeof(*v), GFP_KERNEL);
if (!v) { if (!v) {
@@ -2911,13 +2922,13 @@ static int sja1105_setup(struct dsa_switch *ds)
rc = sja1105_static_config_load(priv, ports); rc = sja1105_static_config_load(priv, ports);
if (rc < 0) { if (rc < 0) {
dev_err(ds->dev, "Failed to load static config: %d\n", rc); dev_err(ds->dev, "Failed to load static config: %d\n", rc);
return rc; goto out_ptp_clock_unregister;
} }
/* Configure the CGU (PHY link modes and speeds) */ /* Configure the CGU (PHY link modes and speeds) */
rc = sja1105_clocking_setup(priv); rc = sja1105_clocking_setup(priv);
if (rc < 0) { if (rc < 0) {
dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc); dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
return rc; goto out_static_config_free;
} }
/* On SJA1105, VLAN filtering per se is always enabled in hardware. /* On SJA1105, VLAN filtering per se is always enabled in hardware.
* The only thing we can do to disable it is lie about what the 802.1Q * The only thing we can do to disable it is lie about what the 802.1Q
@@ -2938,7 +2949,7 @@ static int sja1105_setup(struct dsa_switch *ds)
rc = sja1105_devlink_setup(ds); rc = sja1105_devlink_setup(ds);
if (rc < 0) if (rc < 0)
return rc; goto out_static_config_free;
/* The DSA/switchdev model brings up switch ports in standalone mode by /* The DSA/switchdev model brings up switch ports in standalone mode by
* default, and that means vlan_filtering is 0 since they're not under * default, and that means vlan_filtering is 0 since they're not under
@@ -2947,6 +2958,17 @@ static int sja1105_setup(struct dsa_switch *ds)
rtnl_lock(); rtnl_lock();
rc = sja1105_setup_8021q_tagging(ds, true); rc = sja1105_setup_8021q_tagging(ds, true);
rtnl_unlock(); rtnl_unlock();
if (rc)
goto out_devlink_teardown;
return 0;
out_devlink_teardown:
sja1105_devlink_teardown(ds);
out_ptp_clock_unregister:
sja1105_ptp_clock_unregister(ds);
out_static_config_free:
sja1105_static_config_free(&priv->static_config);
return rc; return rc;
} }
@@ -3461,8 +3483,10 @@ static int sja1105_probe(struct spi_device *spi)
priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers, priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
sizeof(struct sja1105_cbs_entry), sizeof(struct sja1105_cbs_entry),
GFP_KERNEL); GFP_KERNEL);
if (!priv->cbs) if (!priv->cbs) {
return -ENOMEM; rc = -ENOMEM;
goto out_unregister_switch;
}
} }
/* Connections between dsa_port and sja1105_port */ /* Connections between dsa_port and sja1105_port */
@@ -3487,7 +3511,7 @@ static int sja1105_probe(struct spi_device *spi)
dev_err(ds->dev, dev_err(ds->dev,
"failed to create deferred xmit thread: %d\n", "failed to create deferred xmit thread: %d\n",
rc); rc);
goto out; goto out_destroy_workers;
} }
skb_queue_head_init(&sp->xmit_queue); skb_queue_head_init(&sp->xmit_queue);
sp->xmit_tpid = ETH_P_SJA1105; sp->xmit_tpid = ETH_P_SJA1105;
@@ -3497,7 +3521,8 @@ static int sja1105_probe(struct spi_device *spi)
} }
return 0; return 0;
out:
out_destroy_workers:
while (port-- > 0) { while (port-- > 0) {
struct sja1105_port *sp = &priv->ports[port]; struct sja1105_port *sp = &priv->ports[port];
@@ -3506,6 +3531,10 @@ out:
kthread_destroy_worker(sp->xmit_worker); kthread_destroy_worker(sp->xmit_worker);
} }
out_unregister_switch:
dsa_unregister_switch(ds);
return rc; return rc;
} }

View File

@@ -8247,9 +8247,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
BNX2_WR(bp, PCI_COMMAND, reg); BNX2_WR(bp, PCI_COMMAND, reg);
} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) && } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
!(bp->flags & BNX2_FLAG_PCIX)) { !(bp->flags & BNX2_FLAG_PCIX)) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"5706 A1 can only be used in a PCIX bus, aborting\n"); "5706 A1 can only be used in a PCIX bus, aborting\n");
rc = -EPERM;
goto err_out_unmap; goto err_out_unmap;
} }

View File

@@ -280,7 +280,8 @@ static bool bnxt_vf_pciid(enum board_idx idx)
{ {
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV || idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF); idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
idx == NETXTREME_E_P5_VF_HV);
} }
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
@@ -6833,14 +6834,7 @@ ctx_err:
static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
__le64 *pg_dir) __le64 *pg_dir)
{ {
u8 pg_size = 0; BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
if (BNXT_PAGE_SHIFT == 13)
pg_size = 1 << 4;
else if (BNXT_PAGE_SIZE == 16)
pg_size = 2 << 4;
*pg_attr = pg_size;
if (rmem->depth >= 1) { if (rmem->depth >= 1) {
if (rmem->depth == 2) if (rmem->depth == 2)
*pg_attr |= 2; *pg_attr |= 2;

View File

@@ -1440,6 +1440,16 @@ struct bnxt_ctx_pg_info {
#define BNXT_MAX_TQM_RINGS \ #define BNXT_MAX_TQM_RINGS \
(BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_RINGS) (BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_RINGS)
#define BNXT_SET_CTX_PAGE_ATTR(attr) \
do { \
if (BNXT_PAGE_SIZE == 0x2000) \
attr = FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K; \
else if (BNXT_PAGE_SIZE == 0x10000) \
attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K; \
else \
attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K; \
} while (0)
struct bnxt_ctx_mem_info { struct bnxt_ctx_mem_info {
u32 qp_max_entries; u32 qp_max_entries;
u16 qp_min_qp1_entries; u16 qp_min_qp1_entries;

Some files were not shown because too many files have changed in this diff Show More