Merge remote-tracking branch 'stable/linux-6.12.y' into rpi-6.12.y

This commit is contained in:
Dom Cobley
2025-05-06 14:14:05 +01:00
300 changed files with 5872 additions and 4639 deletions

View File

@@ -382,6 +382,14 @@ In case of new BPF instructions, once the changes have been accepted
into the Linux kernel, please implement support into LLVM's BPF back
end. See LLVM_ section below for further information.
Q: What "BPF_INTERNAL" symbol namespace is for?
-----------------------------------------------
A: Symbols exported as BPF_INTERNAL can only be used by BPF infrastructure
like preload kernel modules with light skeleton. Most symbols outside
of BPF_INTERNAL are not expected to be used by code outside of BPF either.
Symbols may lack the designation because they predate the namespaces,
or due to an oversight.
Stable submission
=================

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 12
SUBLEVEL = 25
SUBLEVEL = 27
EXTRAVERSION =
NAME = Baby Opossum Posse

View File

@@ -3,10 +3,12 @@
menu "Accelerated Cryptographic Algorithms for CPU (arm)"
config CRYPTO_CURVE25519_NEON
tristate "Public key crypto: Curve25519 (NEON)"
tristate
depends on KERNEL_MODE_NEON
select CRYPTO_KPP
select CRYPTO_LIB_CURVE25519_GENERIC
select CRYPTO_ARCH_HAVE_LIB_CURVE25519
default CRYPTO_LIB_CURVE25519_INTERNAL
help
Curve25519 algorithm
@@ -45,9 +47,10 @@ config CRYPTO_NHPOLY1305_NEON
- NEON (Advanced SIMD) extensions
config CRYPTO_POLY1305_ARM
tristate "Hash functions: Poly1305 (NEON)"
tristate
select CRYPTO_HASH
select CRYPTO_ARCH_HAVE_LIB_POLY1305
default CRYPTO_LIB_POLY1305_INTERNAL
help
Poly1305 authenticator algorithm (RFC7539)
@@ -212,9 +215,10 @@ config CRYPTO_AES_ARM_CE
- ARMv8 Crypto Extensions
config CRYPTO_CHACHA20_NEON
tristate "Ciphers: ChaCha20, XChaCha20, XChaCha12 (NEON)"
tristate
select CRYPTO_SKCIPHER
select CRYPTO_ARCH_HAVE_LIB_CHACHA
default CRYPTO_LIB_CHACHA_INTERNAL
help
Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
stream cipher algorithms

View File

@@ -0,0 +1,148 @@
// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
* Device Tree Source for J784S4 and J742S2 SoC Family
*
* TRM (j784s4) (SPRUJ43 JULY 2022): https://www.ti.com/lit/zip/spruj52
* TRM (j742s2): https://www.ti.com/lit/pdf/spruje3
*
* Copyright (C) 2022-2024 Texas Instruments Incorporated - https://www.ti.com/
*
*/
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/soc/ti,sci_pm_domain.h>
#include "k3-pinctrl.h"
/ {
interrupt-parent = <&gic500>;
#address-cells = <2>;
#size-cells = <2>;
L2_0: l2-cache0 {
compatible = "cache";
cache-level = <2>;
cache-unified;
cache-size = <0x200000>;
cache-line-size = <64>;
cache-sets = <1024>;
next-level-cache = <&msmc_l3>;
};
L2_1: l2-cache1 {
compatible = "cache";
cache-level = <2>;
cache-unified;
cache-size = <0x200000>;
cache-line-size = <64>;
cache-sets = <1024>;
next-level-cache = <&msmc_l3>;
};
msmc_l3: l3-cache0 {
compatible = "cache";
cache-level = <3>;
cache-unified;
};
firmware {
optee {
compatible = "linaro,optee-tz";
method = "smc";
};
psci: psci {
compatible = "arm,psci-1.0";
method = "smc";
};
};
a72_timer0: timer-cl0-cpu0 {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>, /* cntpsirq */
<GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>, /* cntpnsirq */
<GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>, /* cntvirq */
<GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>; /* cnthpirq */
};
pmu: pmu {
compatible = "arm,cortex-a72-pmu";
/* Recommendation from GIC500 TRM Table A.3 */
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
};
cbass_main: bus@100000 {
bootph-all;
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
ranges = <0x00 0x00100000 0x00 0x00100000 0x00 0x00020000>, /* ctrl mmr */
<0x00 0x00600000 0x00 0x00600000 0x00 0x00031100>, /* GPIO */
<0x00 0x00700000 0x00 0x00700000 0x00 0x00001000>, /* ESM */
<0x00 0x01000000 0x00 0x01000000 0x00 0x0d000000>, /* Most peripherals */
<0x00 0x04210000 0x00 0x04210000 0x00 0x00010000>, /* VPU0 */
<0x00 0x04220000 0x00 0x04220000 0x00 0x00010000>, /* VPU1 */
<0x00 0x0d000000 0x00 0x0d000000 0x00 0x00800000>, /* PCIe0 Core*/
<0x00 0x0d800000 0x00 0x0d800000 0x00 0x00800000>, /* PCIe1 Core*/
<0x00 0x0e000000 0x00 0x0e000000 0x00 0x00800000>, /* PCIe2 Core*/
<0x00 0x0e800000 0x00 0x0e800000 0x00 0x00800000>, /* PCIe3 Core*/
<0x00 0x10000000 0x00 0x10000000 0x00 0x08000000>, /* PCIe0 DAT0 */
<0x00 0x18000000 0x00 0x18000000 0x00 0x08000000>, /* PCIe1 DAT0 */
<0x00 0x64800000 0x00 0x64800000 0x00 0x0070c000>, /* C71_1 */
<0x00 0x65800000 0x00 0x65800000 0x00 0x0070c000>, /* C71_2 */
<0x00 0x66800000 0x00 0x66800000 0x00 0x0070c000>, /* C71_3 */
<0x00 0x67800000 0x00 0x67800000 0x00 0x0070c000>, /* C71_4 */
<0x00 0x6f000000 0x00 0x6f000000 0x00 0x00310000>, /* A72 PERIPHBASE */
<0x00 0x70000000 0x00 0x70000000 0x00 0x00400000>, /* MSMC RAM */
<0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>, /* MAIN NAVSS */
<0x40 0x00000000 0x40 0x00000000 0x01 0x00000000>, /* PCIe0 DAT1 */
<0x41 0x00000000 0x41 0x00000000 0x01 0x00000000>, /* PCIe1 DAT1 */
<0x42 0x00000000 0x42 0x00000000 0x01 0x00000000>, /* PCIe2 DAT1 */
<0x43 0x00000000 0x43 0x00000000 0x01 0x00000000>, /* PCIe3 DAT1 */
<0x44 0x00000000 0x44 0x00000000 0x00 0x08000000>, /* PCIe2 DAT0 */
<0x44 0x10000000 0x44 0x10000000 0x00 0x08000000>, /* PCIe3 DAT0 */
<0x4e 0x20000000 0x4e 0x20000000 0x00 0x00080000>, /* GPU */
/* MCUSS_WKUP Range */
<0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>,
<0x00 0x40200000 0x00 0x40200000 0x00 0x00998400>,
<0x00 0x40f00000 0x00 0x40f00000 0x00 0x00020000>,
<0x00 0x41000000 0x00 0x41000000 0x00 0x00020000>,
<0x00 0x41400000 0x00 0x41400000 0x00 0x00020000>,
<0x00 0x41c00000 0x00 0x41c00000 0x00 0x00100000>,
<0x00 0x42040000 0x00 0x42040000 0x00 0x03ac2400>,
<0x00 0x45100000 0x00 0x45100000 0x00 0x00c24000>,
<0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>,
<0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>,
<0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>,
<0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>;
cbass_mcu_wakeup: bus@28380000 {
bootph-all;
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
ranges = <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>, /* MCU NAVSS*/
<0x00 0x40200000 0x00 0x40200000 0x00 0x00998400>, /* First peripheral window */
<0x00 0x40f00000 0x00 0x40f00000 0x00 0x00020000>, /* CTRL_MMR0 */
<0x00 0x41000000 0x00 0x41000000 0x00 0x00020000>, /* MCU R5F Core0 */
<0x00 0x41400000 0x00 0x41400000 0x00 0x00020000>, /* MCU R5F Core1 */
<0x00 0x41c00000 0x00 0x41c00000 0x00 0x00100000>, /* MCU SRAM */
<0x00 0x42040000 0x00 0x42040000 0x00 0x03ac2400>, /* WKUP peripheral window */
<0x00 0x45100000 0x00 0x45100000 0x00 0x00c24000>, /* MMRs, remaining NAVSS */
<0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>, /* CPSW */
<0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>, /* OSPI register space */
<0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, /* FSS data region 1 */
<0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>; /* FSS data region 0/3 */
};
};
thermal_zones: thermal-zones {
#include "k3-j784s4-j742s2-thermal-common.dtsi"
};
};
/* Now include peripherals from each bus segment */
#include "k3-j784s4-j742s2-main-common.dtsi"
#include "k3-j784s4-j742s2-mcu-wakeup-common.dtsi"

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
* Device Tree Source for J784S4 SoC Family MCU/WAKEUP Domain peripherals
* Device Tree Source for J784S4 and J742S2 SoC Family MCU/WAKEUP Domain peripherals
*
* Copyright (C) 2022-2024 Texas Instruments Incorporated - https://www.ti.com/
*/

File diff suppressed because it is too large Load Diff

View File

@@ -8,18 +8,11 @@
*
*/
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/soc/ti,sci_pm_domain.h>
#include "k3-pinctrl.h"
#include "k3-j784s4-j742s2-common.dtsi"
/ {
model = "Texas Instruments K3 J784S4 SoC";
compatible = "ti,j784s4";
interrupt-parent = <&gic500>;
#address-cells = <2>;
#size-cells = <2>;
cpus {
#address-cells = <1>;
@@ -174,130 +167,6 @@
next-level-cache = <&L2_1>;
};
};
L2_0: l2-cache0 {
compatible = "cache";
cache-level = <2>;
cache-unified;
cache-size = <0x200000>;
cache-line-size = <64>;
cache-sets = <1024>;
next-level-cache = <&msmc_l3>;
};
L2_1: l2-cache1 {
compatible = "cache";
cache-level = <2>;
cache-unified;
cache-size = <0x200000>;
cache-line-size = <64>;
cache-sets = <1024>;
next-level-cache = <&msmc_l3>;
};
msmc_l3: l3-cache0 {
compatible = "cache";
cache-level = <3>;
cache-unified;
};
firmware {
optee {
compatible = "linaro,optee-tz";
method = "smc";
};
psci: psci {
compatible = "arm,psci-1.0";
method = "smc";
};
};
a72_timer0: timer-cl0-cpu0 {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>, /* cntpsirq */
<GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>, /* cntpnsirq */
<GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>, /* cntvirq */
<GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>; /* cnthpirq */
};
pmu: pmu {
compatible = "arm,cortex-a72-pmu";
/* Recommendation from GIC500 TRM Table A.3 */
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
};
cbass_main: bus@100000 {
bootph-all;
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
ranges = <0x00 0x00100000 0x00 0x00100000 0x00 0x00020000>, /* ctrl mmr */
<0x00 0x00600000 0x00 0x00600000 0x00 0x00031100>, /* GPIO */
<0x00 0x00700000 0x00 0x00700000 0x00 0x00001000>, /* ESM */
<0x00 0x01000000 0x00 0x01000000 0x00 0x0d000000>, /* Most peripherals */
<0x00 0x04210000 0x00 0x04210000 0x00 0x00010000>, /* VPU0 */
<0x00 0x04220000 0x00 0x04220000 0x00 0x00010000>, /* VPU1 */
<0x00 0x0d000000 0x00 0x0d000000 0x00 0x00800000>, /* PCIe0 Core*/
<0x00 0x0d800000 0x00 0x0d800000 0x00 0x00800000>, /* PCIe1 Core*/
<0x00 0x0e000000 0x00 0x0e000000 0x00 0x00800000>, /* PCIe2 Core*/
<0x00 0x0e800000 0x00 0x0e800000 0x00 0x00800000>, /* PCIe3 Core*/
<0x00 0x10000000 0x00 0x10000000 0x00 0x08000000>, /* PCIe0 DAT0 */
<0x00 0x18000000 0x00 0x18000000 0x00 0x08000000>, /* PCIe1 DAT0 */
<0x00 0x64800000 0x00 0x64800000 0x00 0x0070c000>, /* C71_1 */
<0x00 0x65800000 0x00 0x65800000 0x00 0x0070c000>, /* C71_2 */
<0x00 0x66800000 0x00 0x66800000 0x00 0x0070c000>, /* C71_3 */
<0x00 0x67800000 0x00 0x67800000 0x00 0x0070c000>, /* C71_4 */
<0x00 0x6f000000 0x00 0x6f000000 0x00 0x00310000>, /* A72 PERIPHBASE */
<0x00 0x70000000 0x00 0x70000000 0x00 0x00400000>, /* MSMC RAM */
<0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>, /* MAIN NAVSS */
<0x40 0x00000000 0x40 0x00000000 0x01 0x00000000>, /* PCIe0 DAT1 */
<0x41 0x00000000 0x41 0x00000000 0x01 0x00000000>, /* PCIe1 DAT1 */
<0x42 0x00000000 0x42 0x00000000 0x01 0x00000000>, /* PCIe2 DAT1 */
<0x43 0x00000000 0x43 0x00000000 0x01 0x00000000>, /* PCIe3 DAT1 */
<0x44 0x00000000 0x44 0x00000000 0x00 0x08000000>, /* PCIe2 DAT0 */
<0x44 0x10000000 0x44 0x10000000 0x00 0x08000000>, /* PCIe3 DAT0 */
<0x4e 0x20000000 0x4e 0x20000000 0x00 0x00080000>, /* GPU */
/* MCUSS_WKUP Range */
<0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>,
<0x00 0x40200000 0x00 0x40200000 0x00 0x00998400>,
<0x00 0x40f00000 0x00 0x40f00000 0x00 0x00020000>,
<0x00 0x41000000 0x00 0x41000000 0x00 0x00020000>,
<0x00 0x41400000 0x00 0x41400000 0x00 0x00020000>,
<0x00 0x41c00000 0x00 0x41c00000 0x00 0x00100000>,
<0x00 0x42040000 0x00 0x42040000 0x00 0x03ac2400>,
<0x00 0x45100000 0x00 0x45100000 0x00 0x00c24000>,
<0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>,
<0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>,
<0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>,
<0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>;
cbass_mcu_wakeup: bus@28380000 {
bootph-all;
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
ranges = <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>, /* MCU NAVSS*/
<0x00 0x40200000 0x00 0x40200000 0x00 0x00998400>, /* First peripheral window */
<0x00 0x40f00000 0x00 0x40f00000 0x00 0x00020000>, /* CTRL_MMR0 */
<0x00 0x41000000 0x00 0x41000000 0x00 0x00020000>, /* MCU R5F Core0 */
<0x00 0x41400000 0x00 0x41400000 0x00 0x00020000>, /* MCU R5F Core1 */
<0x00 0x41c00000 0x00 0x41c00000 0x00 0x00100000>, /* MCU SRAM */
<0x00 0x42040000 0x00 0x42040000 0x00 0x03ac2400>, /* WKUP peripheral window */
<0x00 0x45100000 0x00 0x45100000 0x00 0x00c24000>, /* MMRs, remaining NAVSS */
<0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>, /* CPSW */
<0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>, /* OSPI register space */
<0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, /* FSS data region 1 */
<0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>; /* FSS data region 0/3 */
};
};
thermal_zones: thermal-zones {
#include "k3-j784s4-thermal.dtsi"
};
};
/* Now include peripherals from each bus segment */
#include "k3-j784s4-main.dtsi"
#include "k3-j784s4-mcu-wakeup.dtsi"

View File

@@ -26,10 +26,11 @@ config CRYPTO_NHPOLY1305_NEON
- NEON (Advanced SIMD) extensions
config CRYPTO_POLY1305_NEON
tristate "Hash functions: Poly1305 (NEON)"
tristate
depends on KERNEL_MODE_NEON
select CRYPTO_HASH
select CRYPTO_ARCH_HAVE_LIB_POLY1305
default CRYPTO_LIB_POLY1305_INTERNAL
help
Poly1305 authenticator algorithm (RFC7539)
@@ -186,11 +187,12 @@ config CRYPTO_AES_ARM64_NEON_BLK
- NEON (Advanced SIMD) extensions
config CRYPTO_CHACHA20_NEON
tristate "Ciphers: ChaCha (NEON)"
tristate
depends on KERNEL_MODE_NEON
select CRYPTO_SKCIPHER
select CRYPTO_LIB_CHACHA_GENERIC
select CRYPTO_ARCH_HAVE_LIB_CHACHA
default CRYPTO_LIB_CHACHA_INTERNAL
help
Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
stream cipher algorithms

View File

@@ -68,6 +68,7 @@ config LOONGARCH
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_DEFAULT_BPF_JIT

View File

@@ -22,22 +22,29 @@
struct sigcontext;
#define kernel_fpu_available() cpu_has_fpu
extern void kernel_fpu_begin(void);
extern void kernel_fpu_end(void);
extern void _init_fpu(unsigned int);
extern void _save_fp(struct loongarch_fpu *);
extern void _restore_fp(struct loongarch_fpu *);
void kernel_fpu_begin(void);
void kernel_fpu_end(void);
extern void _save_lsx(struct loongarch_fpu *fpu);
extern void _restore_lsx(struct loongarch_fpu *fpu);
extern void _init_lsx_upper(void);
extern void _restore_lsx_upper(struct loongarch_fpu *fpu);
asmlinkage void _init_fpu(unsigned int);
asmlinkage void _save_fp(struct loongarch_fpu *);
asmlinkage void _restore_fp(struct loongarch_fpu *);
asmlinkage int _save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
asmlinkage int _restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
extern void _save_lasx(struct loongarch_fpu *fpu);
extern void _restore_lasx(struct loongarch_fpu *fpu);
extern void _init_lasx_upper(void);
extern void _restore_lasx_upper(struct loongarch_fpu *fpu);
asmlinkage void _save_lsx(struct loongarch_fpu *fpu);
asmlinkage void _restore_lsx(struct loongarch_fpu *fpu);
asmlinkage void _init_lsx_upper(void);
asmlinkage void _restore_lsx_upper(struct loongarch_fpu *fpu);
asmlinkage int _save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
asmlinkage int _restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
asmlinkage void _save_lasx(struct loongarch_fpu *fpu);
asmlinkage void _restore_lasx(struct loongarch_fpu *fpu);
asmlinkage void _init_lasx_upper(void);
asmlinkage void _restore_lasx_upper(struct loongarch_fpu *fpu);
asmlinkage int _save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
asmlinkage int _restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
static inline void enable_lsx(void);
static inline void disable_lsx(void);

View File

@@ -12,9 +12,13 @@
#include <asm/loongarch.h>
#include <asm/processor.h>
extern void _init_lbt(void);
extern void _save_lbt(struct loongarch_lbt *);
extern void _restore_lbt(struct loongarch_lbt *);
asmlinkage void _init_lbt(void);
asmlinkage void _save_lbt(struct loongarch_lbt *);
asmlinkage void _restore_lbt(struct loongarch_lbt *);
asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
asmlinkage int _save_ftop_context(void __user *ftop);
asmlinkage int _restore_ftop_context(void __user *ftop);
static inline int is_lbt_enabled(void)
{

View File

@@ -33,9 +33,9 @@ struct pt_regs {
unsigned long __last[];
} __aligned(8);
static inline int regs_irqs_disabled(struct pt_regs *regs)
static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
{
return arch_irqs_disabled_flags(regs->csr_prmd);
return !(regs->csr_prmd & CSR_PRMD_PIE);
}
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)

View File

@@ -458,6 +458,7 @@ SYM_FUNC_START(_save_fp_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_fp_context)
EXPORT_SYMBOL_GPL(_save_fp_context)
/*
* a0: fpregs
@@ -471,6 +472,7 @@ SYM_FUNC_START(_restore_fp_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_fp_context)
EXPORT_SYMBOL_GPL(_restore_fp_context)
/*
* a0: fpregs
@@ -484,6 +486,7 @@ SYM_FUNC_START(_save_lsx_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_lsx_context)
EXPORT_SYMBOL_GPL(_save_lsx_context)
/*
* a0: fpregs
@@ -497,6 +500,7 @@ SYM_FUNC_START(_restore_lsx_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_lsx_context)
EXPORT_SYMBOL_GPL(_restore_lsx_context)
/*
* a0: fpregs
@@ -510,6 +514,7 @@ SYM_FUNC_START(_save_lasx_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_lasx_context)
EXPORT_SYMBOL_GPL(_save_lasx_context)
/*
* a0: fpregs
@@ -523,6 +528,7 @@ SYM_FUNC_START(_restore_lasx_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_lasx_context)
EXPORT_SYMBOL_GPL(_restore_lasx_context)
.L_fpu_fault:
li.w a0, -EFAULT # failure

View File

@@ -90,6 +90,7 @@ SYM_FUNC_START(_save_lbt_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_lbt_context)
EXPORT_SYMBOL_GPL(_save_lbt_context)
/*
* a0: scr
@@ -110,6 +111,7 @@ SYM_FUNC_START(_restore_lbt_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_lbt_context)
EXPORT_SYMBOL_GPL(_restore_lbt_context)
/*
* a0: ftop
@@ -120,6 +122,7 @@ SYM_FUNC_START(_save_ftop_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_ftop_context)
EXPORT_SYMBOL_GPL(_save_ftop_context)
/*
* a0: ftop
@@ -150,6 +153,7 @@ SYM_FUNC_START(_restore_ftop_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_ftop_context)
EXPORT_SYMBOL_GPL(_restore_ftop_context)
.L_lbt_fault:
li.w a0, -EFAULT # failure

View File

@@ -51,27 +51,6 @@
#define lock_lbt_owner() ({ preempt_disable(); pagefault_disable(); })
#define unlock_lbt_owner() ({ pagefault_enable(); preempt_enable(); })
/* Assembly functions to move context to/from the FPU */
extern asmlinkage int
_save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
extern asmlinkage int
_restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
extern asmlinkage int
_save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
extern asmlinkage int
_restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
extern asmlinkage int
_save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
extern asmlinkage int
_restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
#ifdef CONFIG_CPU_HAS_LBT
extern asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
extern asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
extern asmlinkage int _save_ftop_context(void __user *ftop);
extern asmlinkage int _restore_ftop_context(void __user *ftop);
#endif
struct rt_sigframe {
struct siginfo rs_info;
struct ucontext rs_uctx;

View File

@@ -553,9 +553,10 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs)
die_if_kernel("Kernel ale access", regs);
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
#else
bool pie = regs_irqs_disabled(regs);
unsigned int *pc;
if (regs->csr_prmd & CSR_PRMD_PIE)
if (!pie)
local_irq_enable();
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
@@ -582,7 +583,7 @@ sigbus:
die_if_kernel("Kernel ale access", regs);
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
out:
if (regs->csr_prmd & CSR_PRMD_PIE)
if (!pie)
local_irq_disable();
#endif
irqentry_exit(regs, state);
@@ -614,12 +615,13 @@ static void bug_handler(struct pt_regs *regs)
asmlinkage void noinstr do_bce(struct pt_regs *regs)
{
bool user = user_mode(regs);
bool pie = regs_irqs_disabled(regs);
unsigned long era = exception_era(regs);
u64 badv = 0, lower = 0, upper = ULONG_MAX;
union loongarch_instruction insn;
irqentry_state_t state = irqentry_enter(regs);
if (regs->csr_prmd & CSR_PRMD_PIE)
if (!pie)
local_irq_enable();
current->thread.trap_nr = read_csr_excode();
@@ -685,7 +687,7 @@ asmlinkage void noinstr do_bce(struct pt_regs *regs)
force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper);
out:
if (regs->csr_prmd & CSR_PRMD_PIE)
if (!pie)
local_irq_disable();
irqentry_exit(regs, state);
@@ -703,11 +705,12 @@ bad_era:
asmlinkage void noinstr do_bp(struct pt_regs *regs)
{
bool user = user_mode(regs);
bool pie = regs_irqs_disabled(regs);
unsigned int opcode, bcode;
unsigned long era = exception_era(regs);
irqentry_state_t state = irqentry_enter(regs);
if (regs->csr_prmd & CSR_PRMD_PIE)
if (!pie)
local_irq_enable();
if (__get_inst(&opcode, (u32 *)era, user))
@@ -773,7 +776,7 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
}
out:
if (regs->csr_prmd & CSR_PRMD_PIE)
if (!pie)
local_irq_disable();
irqentry_exit(regs, state);
@@ -1008,6 +1011,7 @@ static void init_restore_lbt(void)
asmlinkage void noinstr do_lbt(struct pt_regs *regs)
{
bool pie = regs_irqs_disabled(regs);
irqentry_state_t state = irqentry_enter(regs);
/*
@@ -1017,7 +1021,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
* (including the user using 'MOVGR2GCSR' to turn on TM, which
* will not trigger the BTE), we need to check PRMD first.
*/
if (regs->csr_prmd & CSR_PRMD_PIE)
if (!pie)
local_irq_enable();
if (!cpu_has_lbt) {
@@ -1031,7 +1035,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
preempt_enable();
out:
if (regs->csr_prmd & CSR_PRMD_PIE)
if (!pie)
local_irq_disable();
irqentry_exit(regs, state);

View File

@@ -294,6 +294,7 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
kvm_lose_pmu(vcpu);
/* make sure the vcpu mode has been written */
smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
local_irq_enable();
@@ -874,6 +875,13 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
vcpu->arch.st.guest_addr = 0;
memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
/*
* When vCPU reset, clear the ESTAT and GINTC registers
* Other CSR registers are cleared with function _kvm_setcsr().
*/
kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0);
kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0);
break;
default:
ret = -EINVAL;

View File

@@ -47,7 +47,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
pmd = pmd_offset(pud, addr);
}
}
return (pte_t *) pmd;
return pmd_none(pmdp_get(pmd)) ? NULL : (pte_t *) pmd;
}
uint64_t pmd_to_entrylo(unsigned long pmd_val)

View File

@@ -65,9 +65,6 @@ void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
#endif
#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
#endif

View File

@@ -12,9 +12,11 @@ config CRYPTO_CRC32_MIPS
Architecture: mips
config CRYPTO_POLY1305_MIPS
tristate "Hash functions: Poly1305"
tristate
depends on MIPS
select CRYPTO_HASH
select CRYPTO_ARCH_HAVE_LIB_POLY1305
default CRYPTO_LIB_POLY1305_INTERNAL
help
Poly1305 authenticator algorithm (RFC7539)
@@ -61,10 +63,11 @@ config CRYPTO_SHA512_OCTEON
Architecture: mips OCTEON using crypto instructions, when available
config CRYPTO_CHACHA_MIPS
tristate "Ciphers: ChaCha20, XChaCha20, XChaCha12 (MIPS32r2)"
tristate
depends on CPU_MIPS32_R2
select CRYPTO_SKCIPHER
select CRYPTO_ARCH_HAVE_LIB_CHACHA
default CRYPTO_LIB_CHACHA_INTERNAL
help
Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
stream cipher algorithms

View File

@@ -59,6 +59,16 @@ extern phys_addr_t mips_cm_l2sync_phys_base(void);
*/
extern int mips_cm_is64;
/*
* mips_cm_is_l2_hci_broken - determine if HCI is broken
*
* Some CM reports show that Hardware Cache Initialization is
* complete, but in reality it's not the case. They also incorrectly
* indicate that Hardware Cache Initialization is supported. This
* flags allows warning about this broken feature.
*/
extern bool mips_cm_is_l2_hci_broken;
/**
* mips_cm_error_report - Report CM cache errors
*/
@@ -97,6 +107,18 @@ static inline bool mips_cm_present(void)
#endif
}
/**
* mips_cm_update_property - update property from the device tree
*
* Retrieve the properties from the device tree if a CM node exist and
* update the internal variable based on this.
*/
#ifdef CONFIG_MIPS_CM
extern void mips_cm_update_property(void);
#else
static inline void mips_cm_update_property(void) {}
#endif
/**
* mips_cm_has_l2sync - determine whether an L2-only sync region is present
*

View File

@@ -5,6 +5,7 @@
*/
#include <linux/errno.h>
#include <linux/of.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
@@ -14,6 +15,7 @@
void __iomem *mips_gcr_base;
void __iomem *mips_cm_l2sync_base;
int mips_cm_is64;
bool mips_cm_is_l2_hci_broken;
static char *cm2_tr[8] = {
"mem", "gcr", "gic", "mmio",
@@ -237,6 +239,18 @@ static void mips_cm_probe_l2sync(void)
mips_cm_l2sync_base = ioremap(addr, MIPS_CM_L2SYNC_SIZE);
}
void mips_cm_update_property(void)
{
struct device_node *cm_node;
cm_node = of_find_compatible_node(of_root, NULL, "mobileye,eyeq6-cm");
if (!cm_node)
return;
pr_info("HCI (Hardware Cache Init for the L2 cache) in GCR_L2_RAM_CONFIG from the CM3 is broken");
mips_cm_is_l2_hci_broken = true;
of_node_put(cm_node);
}
int mips_cm_probe(void)
{
phys_addr_t addr;

View File

@@ -63,6 +63,7 @@ static unsigned long pdt_entry[MAX_PDT_ENTRIES] __page_aligned_bss;
#define PDT_ADDR_PERM_ERR (pdt_type != PDT_PDC ? 2UL : 0UL)
#define PDT_ADDR_SINGLE_ERR 1UL
#ifdef CONFIG_PROC_FS
/* report PDT entries via /proc/meminfo */
void arch_report_meminfo(struct seq_file *m)
{
@@ -74,6 +75,7 @@ void arch_report_meminfo(struct seq_file *m)
seq_printf(m, "PDT_cur_entries: %7lu\n",
pdt_status.pdt_entries);
}
#endif
static int get_info_pat_new(void)
{

View File

@@ -3,10 +3,12 @@
menu "Accelerated Cryptographic Algorithms for CPU (powerpc)"
config CRYPTO_CURVE25519_PPC64
tristate "Public key crypto: Curve25519 (PowerPC64)"
tristate
depends on PPC64 && CPU_LITTLE_ENDIAN
select CRYPTO_KPP
select CRYPTO_LIB_CURVE25519_GENERIC
select CRYPTO_ARCH_HAVE_LIB_CURVE25519
default CRYPTO_LIB_CURVE25519_INTERNAL
help
Curve25519 algorithm
@@ -124,11 +126,12 @@ config CRYPTO_AES_GCM_P10
later CPU. This module supports stitched acceleration for AES/GCM.
config CRYPTO_CHACHA20_P10
tristate "Ciphers: ChaCha20, XChacha20, XChacha12 (P10 or later)"
tristate
depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
select CRYPTO_SKCIPHER
select CRYPTO_LIB_CHACHA_GENERIC
select CRYPTO_ARCH_HAVE_LIB_CHACHA
default CRYPTO_LIB_CHACHA_INTERNAL
help
Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
stream cipher algorithms

View File

@@ -22,7 +22,6 @@ config CRYPTO_CHACHA_RISCV64
tristate "Ciphers: ChaCha"
depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
select CRYPTO_SKCIPHER
select CRYPTO_LIB_CHACHA_GENERIC
help
Length-preserving ciphers: ChaCha20 stream cipher algorithm

View File

@@ -115,24 +115,19 @@
\old_c
.endm
#define _ALTERNATIVE_CFG(old_c, ...) \
ALTERNATIVE_CFG old_c
#define _ALTERNATIVE_CFG_2(old_c, ...) \
ALTERNATIVE_CFG old_c
#define __ALTERNATIVE_CFG(old_c, ...) ALTERNATIVE_CFG old_c
#define __ALTERNATIVE_CFG_2(old_c, ...) ALTERNATIVE_CFG old_c
#else /* !__ASSEMBLY__ */
#define __ALTERNATIVE_CFG(old_c) \
old_c "\n"
#define _ALTERNATIVE_CFG(old_c, ...) \
__ALTERNATIVE_CFG(old_c)
#define _ALTERNATIVE_CFG_2(old_c, ...) \
__ALTERNATIVE_CFG(old_c)
#define __ALTERNATIVE_CFG(old_c, ...) old_c "\n"
#define __ALTERNATIVE_CFG_2(old_c, ...) old_c "\n"
#endif /* __ASSEMBLY__ */
#define _ALTERNATIVE_CFG(old_c, ...) __ALTERNATIVE_CFG(old_c)
#define _ALTERNATIVE_CFG_2(old_c, ...) __ALTERNATIVE_CFG_2(old_c)
#endif /* CONFIG_RISCV_ALTERNATIVE */
/*

View File

@@ -34,11 +34,6 @@ static inline void flush_dcache_page(struct page *page)
flush_dcache_folio(page_folio(page));
}
/*
* RISC-V doesn't have an instruction to flush parts of the instruction cache,
* so instead we just flush the whole thing.
*/
#define flush_icache_range(start, end) flush_icache_all()
#define flush_icache_user_page(vma, pg, addr, len) \
do { \
if (vma->vm_flags & VM_EXEC) \
@@ -78,6 +73,16 @@ void flush_icache_mm(struct mm_struct *mm, bool local);
#endif /* CONFIG_SMP */
/*
* RISC-V doesn't have an instruction to flush parts of the instruction cache,
* so instead we just flush the whole thing.
*/
#define flush_icache_range flush_icache_range
static inline void flush_icache_range(unsigned long start, unsigned long end)
{
flush_icache_all();
}
extern unsigned int riscv_cbom_block_size;
extern unsigned int riscv_cboz_block_size;
void riscv_init_cbo_blocksizes(void);

View File

@@ -167,6 +167,7 @@ void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
/* Initialize the slot */
void *kaddr = kmap_atomic(page);
void *dst = kaddr + (vaddr & ~PAGE_MASK);
unsigned long start = (unsigned long)dst;
memcpy(dst, src, len);
@@ -176,13 +177,6 @@ void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
*(uprobe_opcode_t *)dst = __BUG_INSN_32;
}
flush_icache_range(start, start + len);
kunmap_atomic(kaddr);
/*
* We probably need flush_icache_user_page() but it needs vma.
* This should work on most of architectures by default. If
* architecture needs to do something different it can define
* its own version of the function.
*/
flush_dcache_page(page);
}

View File

@@ -120,11 +120,12 @@ config CRYPTO_DES_S390
As of z196 the CTR mode is hardware accelerated.
config CRYPTO_CHACHA_S390
tristate "Ciphers: ChaCha20"
tristate
depends on S390
select CRYPTO_SKCIPHER
select CRYPTO_LIB_CHACHA_GENERIC
select CRYPTO_ARCH_HAVE_LIB_CHACHA
default CRYPTO_LIB_CHACHA_INTERNAL
help
Length-preserving cipher: ChaCha20 stream cipher (RFC 7539)

View File

@@ -94,7 +94,7 @@ static int handle_validity(struct kvm_vcpu *vcpu)
vcpu->stat.exit_validity++;
trace_kvm_s390_intercept_validity(vcpu, viwhy);
KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)", viwhy,
KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%p)", viwhy,
current->pid, vcpu->kvm);
/* do not warn on invalid runtime instrumentation mode */

View File

@@ -3161,7 +3161,7 @@ void kvm_s390_gisa_clear(struct kvm *kvm)
if (!gi->origin)
return;
gisa_clear_ipm(gi->origin);
VM_EVENT(kvm, 3, "gisa 0x%pK cleared", gi->origin);
VM_EVENT(kvm, 3, "gisa 0x%p cleared", gi->origin);
}
void kvm_s390_gisa_init(struct kvm *kvm)
@@ -3178,7 +3178,7 @@ void kvm_s390_gisa_init(struct kvm *kvm)
gi->timer.function = gisa_vcpu_kicker;
memset(gi->origin, 0, sizeof(struct kvm_s390_gisa));
gi->origin->next_alert = (u32)virt_to_phys(gi->origin);
VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
VM_EVENT(kvm, 3, "gisa 0x%p initialized", gi->origin);
}
void kvm_s390_gisa_enable(struct kvm *kvm)
@@ -3219,7 +3219,7 @@ void kvm_s390_gisa_destroy(struct kvm *kvm)
process_gib_alert_list();
hrtimer_cancel(&gi->timer);
gi->origin = NULL;
VM_EVENT(kvm, 3, "gisa 0x%pK destroyed", gisa);
VM_EVENT(kvm, 3, "gisa 0x%p destroyed", gisa);
}
void kvm_s390_gisa_disable(struct kvm *kvm)
@@ -3468,7 +3468,7 @@ int __init kvm_s390_gib_init(u8 nisc)
}
}
KVM_EVENT(3, "gib 0x%pK (nisc=%d) initialized", gib, gib->nisc);
KVM_EVENT(3, "gib 0x%p (nisc=%d) initialized", gib, gib->nisc);
goto out;
out_unreg_gal:

View File

@@ -998,7 +998,7 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
}
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
VM_EVENT(kvm, 3, "New guest asce: 0x%p",
(void *) kvm->arch.gmap->asce);
break;
}
@@ -3421,7 +3421,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm_s390_gisa_init(kvm);
INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup);
kvm->arch.pv.set_aside = NULL;
KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid);
return 0;
out_err:
@@ -3484,7 +3484,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_s390_destroy_adapters(kvm);
kvm_s390_clear_float_irqs(kvm);
kvm_s390_vsie_destroy(kvm);
KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
KVM_EVENT(3, "vm 0x%p destroyed", kvm);
}
/* Section: vcpu related */
@@ -3605,7 +3605,7 @@ static int sca_switch_to_extended(struct kvm *kvm)
free_page((unsigned long)old_sca);
VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
VM_EVENT(kvm, 2, "Switched to ESCA (0x%p -> 0x%p)",
old_sca, kvm->arch.sca);
return 0;
}
@@ -3978,7 +3978,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
goto out_free_sie_block;
}
VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%p, sie block at 0x%p",
vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);

View File

@@ -56,7 +56,7 @@ TRACE_EVENT(kvm_s390_create_vcpu,
__entry->sie_block = sie_block;
),
TP_printk("create cpu %d at 0x%pK, sie block at 0x%pK",
TP_printk("create cpu %d at 0x%p, sie block at 0x%p",
__entry->id, __entry->vcpu, __entry->sie_block)
);
@@ -255,7 +255,7 @@ TRACE_EVENT(kvm_s390_enable_css,
__entry->kvm = kvm;
),
TP_printk("enabling channel I/O support (kvm @ %pK)\n",
TP_printk("enabling channel I/O support (kvm @ %p)\n",
__entry->kvm)
);

View File

@@ -83,6 +83,8 @@ extern void time_travel_not_configured(void);
#define time_travel_del_event(...) time_travel_not_configured()
#endif /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
extern unsigned long tt_extra_sched_jiffies;
/*
* Without CONFIG_UML_TIME_TRAVEL_SUPPORT this is a linker error if used,
* which is intentional since we really shouldn't link it in that case.

View File

@@ -31,6 +31,17 @@ void handle_syscall(struct uml_pt_regs *r)
goto out;
syscall = UPT_SYSCALL_NR(r);
/*
* If no time passes, then sched_yield may not actually yield, causing
* broken spinlock implementations in userspace (ASAN) to hang for long
* periods of time.
*/
if ((time_travel_mode == TT_MODE_INFCPU ||
time_travel_mode == TT_MODE_EXTERNAL) &&
syscall == __NR_sched_yield)
tt_extra_sched_jiffies += 1;
if (syscall >= 0 && syscall < __NR_syscalls) {
unsigned long ret = EXECUTE_SYSCALL(syscall, regs);

View File

@@ -3,10 +3,12 @@
menu "Accelerated Cryptographic Algorithms for CPU (x86)"
config CRYPTO_CURVE25519_X86
tristate "Public key crypto: Curve25519 (ADX)"
tristate
depends on X86 && 64BIT
select CRYPTO_KPP
select CRYPTO_LIB_CURVE25519_GENERIC
select CRYPTO_ARCH_HAVE_LIB_CURVE25519
default CRYPTO_LIB_CURVE25519_INTERNAL
help
Curve25519 algorithm
@@ -348,11 +350,12 @@ config CRYPTO_ARIA_GFNI_AVX512_X86_64
Processes 64 blocks in parallel.
config CRYPTO_CHACHA20_X86_64
tristate "Ciphers: ChaCha20, XChaCha20, XChaCha12 (SSSE3/AVX2/AVX-512VL)"
tristate
depends on X86 && 64BIT
select CRYPTO_SKCIPHER
select CRYPTO_LIB_CHACHA_GENERIC
select CRYPTO_ARCH_HAVE_LIB_CHACHA
default CRYPTO_LIB_CHACHA_INTERNAL
help
Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
stream cipher algorithms
@@ -417,10 +420,12 @@ config CRYPTO_POLYVAL_CLMUL_NI
- CLMUL-NI (carry-less multiplication new instructions)
config CRYPTO_POLY1305_X86_64
tristate "Hash functions: Poly1305 (SSE2/AVX2)"
tristate
depends on X86 && 64BIT
select CRYPTO_HASH
select CRYPTO_LIB_POLY1305_GENERIC
select CRYPTO_ARCH_HAVE_LIB_POLY1305
default CRYPTO_LIB_POLY1305_INTERNAL
help
Poly1305 authenticator algorithm (RFC7539)

View File

@@ -18,7 +18,7 @@
SYM_FUNC_START(entry_ibpb)
movl $MSR_IA32_PRED_CMD, %ecx
movl $PRED_CMD_IBPB, %eax
movl _ASM_RIP(x86_pred_cmd), %eax
xorl %edx, %edx
wrmsr

View File

@@ -628,7 +628,7 @@ int x86_pmu_hw_config(struct perf_event *event)
if (event->attr.type == event->pmu->type)
event->hw.config |= x86_pmu_get_event_config(event);
if (!event->attr.freq && x86_pmu.limit_period) {
if (is_sampling_event(event) && !event->attr.freq && x86_pmu.limit_period) {
s64 left = event->attr.sample_period;
x86_pmu.limit_period(event, &left);
if (left > event->attr.sample_period)

View File

@@ -449,6 +449,7 @@
#define X86_FEATURE_SME_COHERENT (19*32+10) /* AMD hardware-enforced cache coherency */
#define X86_FEATURE_DEBUG_SWAP (19*32+14) /* "debug_swap" AMD SEV-ES full debug state swap support */
#define X86_FEATURE_SVSM (19*32+28) /* "svsm" SVSM present */
#define X86_FEATURE_HV_INUSE_WR_ALLOWED (19*32+30) /* Allow Write to in-use hypervisor-owned pages */
/* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* No Nested Data Breakpoints */

View File

@@ -115,6 +115,8 @@
#define INTEL_GRANITERAPIDS_X IFM(6, 0xAD)
#define INTEL_GRANITERAPIDS_D IFM(6, 0xAE)
#define INTEL_BARTLETTLAKE IFM(6, 0xD7) /* Raptor Cove */
/* "Hybrid" Processors (P-Core/E-Core) */
#define INTEL_LAKEFIELD IFM(6, 0x8A) /* Sunny Cove / Tremont */

View File

@@ -1578,7 +1578,7 @@ static void __init spec_ctrl_disable_kernel_rrsba(void)
rrsba_disabled = true;
}
static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)
{
/*
* Similar to context switches, there are two types of RSB attacks
@@ -1602,27 +1602,30 @@ static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_
*/
switch (mode) {
case SPECTRE_V2_NONE:
return;
break;
case SPECTRE_V2_EIBRS_LFENCE:
case SPECTRE_V2_EIBRS:
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
}
return;
case SPECTRE_V2_EIBRS_LFENCE:
case SPECTRE_V2_EIBRS_RETPOLINE:
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
}
break;
case SPECTRE_V2_RETPOLINE:
case SPECTRE_V2_LFENCE:
case SPECTRE_V2_IBRS:
pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n");
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
return;
}
break;
pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
dump_stack();
default:
pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n");
dump_stack();
break;
}
}
/*
@@ -1854,10 +1857,7 @@ static void __init spectre_v2_select_mitigation(void)
*
* FIXME: Is this pointless for retbleed-affected AMD?
*/
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
spectre_v2_select_rsb_mitigation(mode);
/*
* Retpoline protects the kernel, but doesn't protect firmware. IBRS

View File

@@ -46,7 +46,8 @@ bool __init pit_timer_init(void)
* VMMs otherwise steal CPU time just to pointlessly waggle
* the (masked) IRQ.
*/
clockevent_i8253_disable();
scoped_guard(irq)
clockevent_i8253_disable();
return false;
}
clockevent_i8253_init(true);

View File

@@ -820,7 +820,7 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
* Allocating new amd_iommu_pi_data, which will get
* add to the per-vcpu ir_list.
*/
ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL_ACCOUNT);
ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_ATOMIC | __GFP_ACCOUNT);
if (!ir) {
ret = -ENOMEM;
goto out;
@@ -896,6 +896,7 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
{
struct kvm_kernel_irq_routing_entry *e;
struct kvm_irq_routing_table *irq_rt;
bool enable_remapped_mode = true;
int idx, ret = 0;
if (!kvm_arch_has_assigned_device(kvm) ||
@@ -933,6 +934,8 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
kvm_vcpu_apicv_active(&svm->vcpu)) {
struct amd_iommu_pi_data pi;
enable_remapped_mode = false;
/* Try to enable guest_mode in IRTE */
pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
AVIC_HPA_MASK);
@@ -951,33 +954,6 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
*/
if (!ret && pi.is_guest_mode)
svm_ir_list_add(svm, &pi);
} else {
/* Use legacy mode in IRTE */
struct amd_iommu_pi_data pi;
/**
* Here, pi is used to:
* - Tell IOMMU to use legacy mode for this interrupt.
* - Retrieve ga_tag of prior interrupt remapping data.
*/
pi.prev_ga_tag = 0;
pi.is_guest_mode = false;
ret = irq_set_vcpu_affinity(host_irq, &pi);
/**
* Check if the posted interrupt was previously
* setup with the guest_mode by checking if the ga_tag
* was cached. If so, we need to clean up the per-vcpu
* ir_list.
*/
if (!ret && pi.prev_ga_tag) {
int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
struct kvm_vcpu *vcpu;
vcpu = kvm_get_vcpu_by_id(kvm, id);
if (vcpu)
svm_ir_list_del(to_svm(vcpu), &pi);
}
}
if (!ret && svm) {
@@ -993,6 +969,34 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
}
ret = 0;
if (enable_remapped_mode) {
/* Use legacy mode in IRTE */
struct amd_iommu_pi_data pi;
/**
* Here, pi is used to:
* - Tell IOMMU to use legacy mode for this interrupt.
* - Retrieve ga_tag of prior interrupt remapping data.
*/
pi.prev_ga_tag = 0;
pi.is_guest_mode = false;
ret = irq_set_vcpu_affinity(host_irq, &pi);
/**
* Check if the posted interrupt was previously
* setup with the guest_mode by checking if the ga_tag
* was cached. If so, we need to clean up the per-vcpu
* ir_list.
*/
if (!ret && pi.prev_ga_tag) {
int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
struct kvm_vcpu *vcpu;
vcpu = kvm_get_vcpu_by_id(kvm, id);
if (vcpu)
svm_ir_list_del(to_svm(vcpu), &pi);
}
}
out:
srcu_read_unlock(&kvm->irq_srcu, idx);
return ret;
@@ -1199,6 +1203,12 @@ bool avic_hardware_setup(void)
return false;
}
if (cc_platform_has(CC_ATTR_HOST_SEV_SNP) &&
!boot_cpu_has(X86_FEATURE_HV_INUSE_WR_ALLOWED)) {
pr_warn("AVIC disabled: missing HvInUseWrAllowed on SNP-enabled system\n");
return false;
}
if (boot_cpu_has(X86_FEATURE_AVIC)) {
pr_info("AVIC enabled\n");
} else if (force_avic) {

View File

@@ -274,6 +274,7 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
{
struct kvm_kernel_irq_routing_entry *e;
struct kvm_irq_routing_table *irq_rt;
bool enable_remapped_mode = true;
struct kvm_lapic_irq irq;
struct kvm_vcpu *vcpu;
struct vcpu_data vcpu_info;
@@ -312,21 +313,8 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
kvm_set_msi_irq(kvm, e, &irq);
if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
!kvm_irq_is_postable(&irq)) {
/*
* Make sure the IRTE is in remapped mode if
* we don't handle it in posted mode.
*/
ret = irq_set_vcpu_affinity(host_irq, NULL);
if (ret < 0) {
printk(KERN_INFO
"failed to back to remapped mode, irq: %u\n",
host_irq);
goto out;
}
!kvm_irq_is_postable(&irq))
continue;
}
vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
vcpu_info.vector = irq.vector;
@@ -334,11 +322,12 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
vcpu_info.vector, vcpu_info.pi_desc_addr, set);
if (set)
ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
else
ret = irq_set_vcpu_affinity(host_irq, NULL);
if (!set)
continue;
enable_remapped_mode = false;
ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
if (ret < 0) {
printk(KERN_INFO "%s: failed to update PI IRTE\n",
__func__);
@@ -346,6 +335,9 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
}
}
if (enable_remapped_mode)
ret = irq_set_vcpu_affinity(host_irq, NULL);
ret = 0;
out:
srcu_read_unlock(&kvm->irq_srcu, idx);

View File

@@ -13555,15 +13555,22 @@ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
{
struct kvm_kernel_irqfd *irqfd =
container_of(cons, struct kvm_kernel_irqfd, consumer);
struct kvm *kvm = irqfd->kvm;
int ret;
irqfd->producer = prod;
kvm_arch_start_assignment(irqfd->kvm);
spin_lock_irq(&kvm->irqfds.lock);
irqfd->producer = prod;
ret = kvm_x86_call(pi_update_irte)(irqfd->kvm,
prod->irq, irqfd->gsi, 1);
if (ret)
kvm_arch_end_assignment(irqfd->kvm);
spin_unlock_irq(&kvm->irqfds.lock);
return ret;
}
@@ -13573,9 +13580,9 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
int ret;
struct kvm_kernel_irqfd *irqfd =
container_of(cons, struct kvm_kernel_irqfd, consumer);
struct kvm *kvm = irqfd->kvm;
WARN_ON(irqfd->producer != prod);
irqfd->producer = NULL;
/*
* When producer of consumer is unregistered, we change back to
@@ -13583,12 +13590,18 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
* when the irq is masked/disabled or the consumer side (KVM
* int this case doesn't want to receive the interrupts.
*/
spin_lock_irq(&kvm->irqfds.lock);
irqfd->producer = NULL;
ret = kvm_x86_call(pi_update_irte)(irqfd->kvm,
prod->irq, irqfd->gsi, 0);
if (ret)
printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
" fails: %d\n", irqfd->consumer.token, ret);
spin_unlock_irq(&kvm->irqfds.lock);
kvm_arch_end_assignment(irqfd->kvm);
}
@@ -13601,7 +13614,8 @@ int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old,
struct kvm_kernel_irq_routing_entry *new)
{
if (new->type != KVM_IRQ_ROUTING_MSI)
if (old->type != KVM_IRQ_ROUTING_MSI ||
new->type != KVM_IRQ_ROUTING_MSI)
return true;
return !!memcmp(&old->msi, &new->msi, sizeof(new->msi));

View File

@@ -996,8 +996,8 @@ AVXcode: 4
83: Grp1 Ev,Ib (1A),(es)
# CTESTSCC instructions are: CTESTB, CTESTBE, CTESTF, CTESTL, CTESTLE, CTESTNB, CTESTNBE, CTESTNL,
# CTESTNLE, CTESTNO, CTESTNS, CTESTNZ, CTESTO, CTESTS, CTESTT, CTESTZ
84: CTESTSCC (ev)
85: CTESTSCC (es) | CTESTSCC (66),(es)
84: CTESTSCC Eb,Gb (ev)
85: CTESTSCC Ev,Gv (es) | CTESTSCC Ev,Gv (66),(es)
88: POPCNT Gv,Ev (es) | POPCNT Gv,Ev (66),(es)
8f: POP2 Bq,Rq (000),(11B),(ev)
a5: SHLD Ev,Gv,CL (es) | SHLD Ev,Gv,CL (66),(es)

View File

@@ -389,9 +389,9 @@ static void cond_mitigation(struct task_struct *next)
prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_spec);
/*
* Avoid user/user BTB poisoning by flushing the branch predictor
* when switching between processes. This stops one process from
* doing Spectre-v2 attacks on another.
* Avoid user->user BTB/RSB poisoning by flushing them when switching
* between processes. This stops one process from doing Spectre-v2
* attacks on another.
*
* Both, the conditional and the always IBPB mode use the mm
* pointer to avoid the IBPB when switching between tasks of the

View File

@@ -436,7 +436,8 @@ static struct msi_domain_ops xen_pci_msi_domain_ops = {
};
static struct msi_domain_info xen_pci_msi_domain_info = {
.flags = MSI_FLAG_PCI_MSIX | MSI_FLAG_FREE_MSI_DESCS | MSI_FLAG_DEV_SYSFS,
.flags = MSI_FLAG_PCI_MSIX | MSI_FLAG_FREE_MSI_DESCS |
MSI_FLAG_DEV_SYSFS | MSI_FLAG_NO_MASK,
.ops = &xen_pci_msi_domain_ops,
};
@@ -484,11 +485,6 @@ static __init void xen_setup_pci_msi(void)
* in allocating the native domain and never use it.
*/
x86_init.irqs.create_pci_msi_domain = xen_create_pci_msi_domain;
/*
* With XEN PIRQ/Eventchannels in use PCI/MSI[-X] masking is solely
* controlled by the hypervisor.
*/
pci_msi_ignore_mask = 1;
}
#else /* CONFIG_PCI_MSI */

View File

@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/acpi.h>
#include <linux/cpufreq.h>
#include <linux/cpuidle.h>
#include <linux/export.h>
#include <linux/mm.h>
@@ -123,8 +125,23 @@ static void __init pvh_arch_setup(void)
{
pvh_reserve_extra_memory();
if (xen_initial_domain())
if (xen_initial_domain()) {
xen_add_preferred_consoles();
/*
* Disable usage of CPU idle and frequency drivers: when
* running as hardware domain the exposed native ACPI tables
* causes idle and/or frequency drivers to attach and
* malfunction. It's Xen the entity that controls the idle and
* frequency states.
*
* For unprivileged domains the exposed ACPI tables are
* fabricated and don't contain such data.
*/
disable_cpuidle();
disable_cpufreq();
WARN_ON(xen_set_default_idle());
}
}
void __init xen_pvh_init(struct boot_params *boot_params)

View File

@@ -864,12 +864,13 @@ static struct request *attempt_merge(struct request_queue *q,
if (rq_data_dir(req) != rq_data_dir(next))
return NULL;
/* Don't merge requests with different write hints. */
if (req->write_hint != next->write_hint)
return NULL;
if (req->ioprio != next->ioprio)
return NULL;
if (req->bio && next->bio) {
/* Don't merge requests with different write hints. */
if (req->bio->bi_write_hint != next->bio->bi_write_hint)
return NULL;
if (req->bio->bi_ioprio != next->bio->bi_ioprio)
return NULL;
}
if (!blk_atomic_write_mergeable_rqs(req, next))
return NULL;
@@ -998,12 +999,13 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
if (!bio_crypt_rq_ctx_compatible(rq, bio))
return false;
/* Don't merge requests with different write hints. */
if (rq->write_hint != bio->bi_write_hint)
return false;
if (rq->ioprio != bio_prio(bio))
return false;
if (rq->bio) {
/* Don't merge requests with different write hints. */
if (rq->bio->bi_write_hint != bio->bi_write_hint)
return false;
if (rq->bio->bi_ioprio != bio->bi_ioprio)
return false;
}
if (blk_atomic_write_mergeable_rq_bio(rq, bio) == false)
return false;

View File

@@ -870,7 +870,7 @@ static void blk_print_req_error(struct request *req, blk_status_t status)
blk_op_str(req_op(req)),
(__force u32)(req->cmd_flags & ~REQ_OP_MASK),
req->nr_phys_segments,
IOPRIO_PRIO_CLASS(req->ioprio));
IOPRIO_PRIO_CLASS(req_get_ioprio(req)));
}
/*
@@ -2654,7 +2654,6 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
rq->cmd_flags |= REQ_FAILFAST_MASK;
rq->__sector = bio->bi_iter.bi_sector;
rq->write_hint = bio->bi_write_hint;
blk_rq_bio_prep(rq, bio, nr_segs);
if (bio_integrity(bio))
rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q,
@@ -3307,8 +3306,7 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
rq->special_vec = rq_src->special_vec;
}
rq->nr_phys_segments = rq_src->nr_phys_segments;
rq->ioprio = rq_src->ioprio;
rq->write_hint = rq_src->write_hint;
rq->nr_integrity_segments = rq_src->nr_integrity_segments;
if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
goto free_and_out;

View File

@@ -61,8 +61,14 @@ void blk_apply_bdi_limits(struct backing_dev_info *bdi,
/*
* For read-ahead of large files to be effective, we need to read ahead
* at least twice the optimal I/O size.
*
* There is no hardware limitation for the read-ahead size and the user
* might have increased the read-ahead size through sysfs, so don't ever
* decrease it.
*/
bdi->ra_pages = max(lim->io_opt * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
bdi->ra_pages = max3(bdi->ra_pages,
lim->io_opt * 2 / PAGE_SIZE,
VM_READAHEAD_PAGES);
bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
}

View File

@@ -685,10 +685,9 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
prio = ioprio_class_to_prio[ioprio_class];
per_prio = &dd->per_prio[prio];
if (!rq->elv.priv[0]) {
if (!rq->elv.priv[0])
per_prio->stats.inserted++;
rq->elv.priv[0] = (void *)(uintptr_t)1;
}
rq->elv.priv[0] = per_prio;
if (blk_mq_sched_try_insert_merge(q, rq, free))
return;
@@ -753,18 +752,14 @@ static void dd_prepare_request(struct request *rq)
*/
static void dd_finish_request(struct request *rq)
{
struct request_queue *q = rq->q;
struct deadline_data *dd = q->elevator->elevator_data;
const u8 ioprio_class = dd_rq_ioclass(rq);
const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
struct dd_per_prio *per_prio = &dd->per_prio[prio];
struct dd_per_prio *per_prio = rq->elv.priv[0];
/*
* The block layer core may call dd_finish_request() without having
* called dd_insert_requests(). Skip requests that bypassed I/O
* scheduling. See also blk_mq_request_bypass_insert().
*/
if (rq->elv.priv[0])
if (per_prio)
atomic_inc(&per_prio->stats.completed);
}

View File

@@ -317,6 +317,7 @@ config CRYPTO_CURVE25519
tristate "Curve25519"
select CRYPTO_KPP
select CRYPTO_LIB_CURVE25519_GENERIC
select CRYPTO_LIB_CURVE25519_INTERNAL
help
Curve25519 elliptic curve (RFC7748)
@@ -615,6 +616,7 @@ config CRYPTO_ARC4
config CRYPTO_CHACHA20
tristate "ChaCha"
select CRYPTO_LIB_CHACHA_GENERIC
select CRYPTO_LIB_CHACHA_INTERNAL
select CRYPTO_SKCIPHER
help
The ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms
@@ -944,6 +946,7 @@ config CRYPTO_POLY1305
tristate "Poly1305"
select CRYPTO_HASH
select CRYPTO_LIB_POLY1305_GENERIC
select CRYPTO_LIB_POLY1305_INTERNAL
help
Poly1305 authenticator algorithm (RFC7539)

View File

@@ -17,10 +17,10 @@
#include <crypto/internal/skcipher.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/string.h>
static DEFINE_MUTEX(crypto_default_null_skcipher_lock);
static DEFINE_SPINLOCK(crypto_default_null_skcipher_lock);
static struct crypto_sync_skcipher *crypto_default_null_skcipher;
static int crypto_default_null_skcipher_refcnt;
@@ -152,23 +152,32 @@ MODULE_ALIAS_CRYPTO("cipher_null");
struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void)
{
struct crypto_sync_skcipher *ntfm = NULL;
struct crypto_sync_skcipher *tfm;
mutex_lock(&crypto_default_null_skcipher_lock);
spin_lock_bh(&crypto_default_null_skcipher_lock);
tfm = crypto_default_null_skcipher;
if (!tfm) {
tfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0);
if (IS_ERR(tfm))
goto unlock;
spin_unlock_bh(&crypto_default_null_skcipher_lock);
crypto_default_null_skcipher = tfm;
ntfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0);
if (IS_ERR(ntfm))
return ntfm;
spin_lock_bh(&crypto_default_null_skcipher_lock);
tfm = crypto_default_null_skcipher;
if (!tfm) {
tfm = ntfm;
ntfm = NULL;
crypto_default_null_skcipher = tfm;
}
}
crypto_default_null_skcipher_refcnt++;
spin_unlock_bh(&crypto_default_null_skcipher_lock);
unlock:
mutex_unlock(&crypto_default_null_skcipher_lock);
crypto_free_sync_skcipher(ntfm);
return tfm;
}
@@ -176,12 +185,16 @@ EXPORT_SYMBOL_GPL(crypto_get_default_null_skcipher);
void crypto_put_default_null_skcipher(void)
{
mutex_lock(&crypto_default_null_skcipher_lock);
struct crypto_sync_skcipher *tfm = NULL;
spin_lock_bh(&crypto_default_null_skcipher_lock);
if (!--crypto_default_null_skcipher_refcnt) {
crypto_free_sync_skcipher(crypto_default_null_skcipher);
tfm = crypto_default_null_skcipher;
crypto_default_null_skcipher = NULL;
}
mutex_unlock(&crypto_default_null_skcipher_lock);
spin_unlock_bh(&crypto_default_null_skcipher_lock);
crypto_free_sync_skcipher(tfm);
}
EXPORT_SYMBOL_GPL(crypto_put_default_null_skcipher);

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2024 Intel Corporation
* Copyright (C) 2020-2025 Intel Corporation
*/
#include <linux/firmware.h>
@@ -54,9 +54,9 @@ u8 ivpu_pll_max_ratio = U8_MAX;
module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency");
int ivpu_sched_mode;
int ivpu_sched_mode = IVPU_SCHED_MODE_AUTO;
module_param_named(sched_mode, ivpu_sched_mode, int, 0444);
MODULE_PARM_DESC(sched_mode, "Scheduler mode: 0 - Default scheduler, 1 - Force HW scheduler");
MODULE_PARM_DESC(sched_mode, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler, 1 - Use HW scheduler");
bool ivpu_disable_mmu_cont_pages;
module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0444);
@@ -165,7 +165,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = vdev->platform;
break;
case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
args->value = ivpu_hw_ratio_to_freq(vdev, vdev->hw->pll.max_ratio);
args->value = ivpu_hw_dpu_max_freq_get(vdev);
break;
case DRM_IVPU_PARAM_NUM_CONTEXTS:
args->value = ivpu_get_context_count(vdev);
@@ -347,7 +347,7 @@ static int ivpu_hw_sched_init(struct ivpu_device *vdev)
{
int ret = 0;
if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) {
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
ret = ivpu_jsm_hws_setup_priority_bands(vdev);
if (ret) {
ivpu_err(vdev, "Failed to enable hw scheduler: %d", ret);

View File

@@ -57,6 +57,8 @@
#define IVPU_PLATFORM_FPGA 3
#define IVPU_PLATFORM_INVALID 8
#define IVPU_SCHED_MODE_AUTO -1
#define IVPU_DBG_REG BIT(0)
#define IVPU_DBG_IRQ BIT(1)
#define IVPU_DBG_MMU BIT(2)

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2024 Intel Corporation
* Copyright (C) 2020-2025 Intel Corporation
*/
#include <linux/firmware.h>
@@ -134,6 +134,15 @@ static bool is_within_range(u64 addr, size_t size, u64 range_start, size_t range
return true;
}
static u32
ivpu_fw_sched_mode_select(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr)
{
if (ivpu_sched_mode != IVPU_SCHED_MODE_AUTO)
return ivpu_sched_mode;
return VPU_SCHEDULING_MODE_OS;
}
static int ivpu_fw_parse(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
@@ -215,8 +224,10 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
fw->dvfs_mode = 0;
fw->sched_mode = ivpu_fw_sched_mode_select(vdev, fw_hdr);
fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size;
fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size;
ivpu_info(vdev, "Scheduler mode: %s\n", fw->sched_mode ? "HW" : "OS");
if (fw_hdr->ro_section_start_address && !is_within_range(fw_hdr->ro_section_start_address,
fw_hdr->ro_section_size,
@@ -545,7 +556,6 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->magic = VPU_BOOT_PARAMS_MAGIC;
boot_params->vpu_id = to_pci_dev(vdev->drm.dev)->bus->number;
boot_params->frequency = ivpu_hw_pll_freq_get(vdev);
/*
* This param is a debug firmware feature. It switches default clock
@@ -605,8 +615,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->punit_telemetry_sram_base = ivpu_hw_telemetry_offset_get(vdev);
boot_params->punit_telemetry_sram_size = ivpu_hw_telemetry_size_get(vdev);
boot_params->vpu_telemetry_enable = ivpu_hw_telemetry_enable_get(vdev);
boot_params->vpu_scheduling_mode = vdev->hw->sched_mode;
if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW)
boot_params->vpu_scheduling_mode = vdev->fw->sched_mode;
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
boot_params->vpu_focus_present_timer_ms = IVPU_FOCUS_PRESENT_TIMER_MS;
boot_params->dvfs_mode = vdev->fw->dvfs_mode;
if (!IVPU_WA(disable_d0i3_msg))

View File

@@ -6,6 +6,8 @@
#ifndef __IVPU_FW_H__
#define __IVPU_FW_H__
#include "vpu_jsm_api.h"
#define FW_VERSION_HEADER_SIZE SZ_4K
#define FW_VERSION_STR_SIZE SZ_256
@@ -36,6 +38,7 @@ struct ivpu_fw_info {
u32 secondary_preempt_buf_size;
u64 read_only_addr;
u32 read_only_size;
u32 sched_mode;
};
int ivpu_fw_init(struct ivpu_device *vdev);

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020-2024 Intel Corporation
* Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_HW_H__
@@ -46,7 +46,6 @@ struct ivpu_hw_info {
u32 profiling_freq;
} pll;
u32 tile_fuse;
u32 sched_mode;
u32 sku;
u16 config;
int dma_bits;
@@ -87,9 +86,9 @@ static inline u64 ivpu_hw_range_size(const struct ivpu_addr_range *range)
return range->end - range->start;
}
static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
static inline u32 ivpu_hw_dpu_max_freq_get(struct ivpu_device *vdev)
{
return ivpu_hw_btrs_ratio_to_freq(vdev, ratio);
return ivpu_hw_btrs_dpu_max_freq_get(vdev);
}
static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev)
@@ -97,11 +96,6 @@ static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev)
ivpu_hw_ip_irq_clear(vdev);
}
static inline u32 ivpu_hw_pll_freq_get(struct ivpu_device *vdev)
{
return ivpu_hw_btrs_pll_freq_get(vdev);
}
static inline u32 ivpu_hw_profiling_freq_get(struct ivpu_device *vdev)
{
return vdev->hw->pll.profiling_freq;

View File

@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2024 Intel Corporation
* Copyright (C) 2020-2025 Intel Corporation
*/
#include <linux/units.h>
#include "ivpu_drv.h"
#include "ivpu_hw.h"
#include "ivpu_hw_btrs.h"
@@ -28,17 +30,13 @@
#define BTRS_LNL_ALL_IRQ_MASK ((u32)-1)
#define BTRS_MTL_WP_CONFIG_1_TILE_5_3_RATIO WP_CONFIG(MTL_CONFIG_1_TILE, MTL_PLL_RATIO_5_3)
#define BTRS_MTL_WP_CONFIG_1_TILE_4_3_RATIO WP_CONFIG(MTL_CONFIG_1_TILE, MTL_PLL_RATIO_4_3)
#define BTRS_MTL_WP_CONFIG_2_TILE_5_3_RATIO WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_5_3)
#define BTRS_MTL_WP_CONFIG_2_TILE_4_3_RATIO WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_4_3)
#define BTRS_MTL_WP_CONFIG_0_TILE_PLL_OFF WP_CONFIG(0, 0)
#define PLL_CDYN_DEFAULT 0x80
#define PLL_EPP_DEFAULT 0x80
#define PLL_CONFIG_DEFAULT 0x0
#define PLL_SIMULATION_FREQ 10000000
#define PLL_REF_CLK_FREQ 50000000
#define PLL_REF_CLK_FREQ 50000000ull
#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
#define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC)
#define TIMEOUT_US (150 * USEC_PER_MSEC)
@@ -62,6 +60,8 @@
#define DCT_ENABLE 0x1
#define DCT_DISABLE 0x0
static u32 pll_ratio_to_dpu_freq(struct ivpu_device *vdev, u32 ratio);
int ivpu_hw_btrs_irqs_clear_with_0_mtl(struct ivpu_device *vdev)
{
REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, BTRS_MTL_ALL_IRQ_MASK);
@@ -162,8 +162,7 @@ static int info_init_mtl(struct ivpu_device *vdev)
hw->tile_fuse = BTRS_MTL_TILE_FUSE_ENABLE_BOTH;
hw->sku = BTRS_MTL_TILE_SKU_BOTH;
hw->config = BTRS_MTL_WP_CONFIG_2_TILE_4_3_RATIO;
hw->sched_mode = ivpu_sched_mode;
hw->config = WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_4_3);
return 0;
}
@@ -178,7 +177,6 @@ static int info_init_lnl(struct ivpu_device *vdev)
if (ret)
return ret;
hw->sched_mode = ivpu_sched_mode;
hw->tile_fuse = tile_fuse_config;
hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
@@ -346,8 +344,8 @@ int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable)
prepare_wp_request(vdev, &wp, enable);
ivpu_dbg(vdev, PM, "PLL workpoint request: %u Hz, config: 0x%x, epp: 0x%x, cdyn: 0x%x\n",
PLL_RATIO_TO_FREQ(wp.target), wp.cfg, wp.epp, wp.cdyn);
ivpu_dbg(vdev, PM, "PLL workpoint request: %lu MHz, config: 0x%x, epp: 0x%x, cdyn: 0x%x\n",
pll_ratio_to_dpu_freq(vdev, wp.target) / HZ_PER_MHZ, wp.cfg, wp.epp, wp.cdyn);
ret = wp_request_send(vdev, &wp);
if (ret) {
@@ -588,6 +586,39 @@ int ivpu_hw_btrs_wait_for_idle(struct ivpu_device *vdev)
return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US);
}
static u32 pll_config_get_mtl(struct ivpu_device *vdev)
{
return REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL);
}
static u32 pll_config_get_lnl(struct ivpu_device *vdev)
{
return REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ);
}
static u32 pll_ratio_to_dpu_freq_mtl(u16 ratio)
{
return (PLL_RATIO_TO_FREQ(ratio) * 2) / 3;
}
static u32 pll_ratio_to_dpu_freq_lnl(u16 ratio)
{
return PLL_RATIO_TO_FREQ(ratio) / 2;
}
static u32 pll_ratio_to_dpu_freq(struct ivpu_device *vdev, u32 ratio)
{
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
return pll_ratio_to_dpu_freq_mtl(ratio);
else
return pll_ratio_to_dpu_freq_lnl(ratio);
}
u32 ivpu_hw_btrs_dpu_max_freq_get(struct ivpu_device *vdev)
{
return pll_ratio_to_dpu_freq(vdev, vdev->hw->pll.max_ratio);
}
/* Handler for IRQs from Buttress core (irqB) */
bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq)
{
@@ -597,9 +628,12 @@ bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq)
if (!status)
return false;
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, FREQ_CHANGE, status))
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x",
REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL));
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, FREQ_CHANGE, status)) {
u32 pll = pll_config_get_mtl(vdev);
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq, wp %08x, %lu MHz",
pll, pll_ratio_to_dpu_freq_mtl(pll) / HZ_PER_MHZ);
}
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR, status)) {
ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0));
@@ -649,8 +683,12 @@ bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq)
ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
}
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status))
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ));
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status)) {
u32 pll = pll_config_get_lnl(vdev);
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq, wp %08x, %lu MHz",
pll, pll_ratio_to_dpu_freq_lnl(pll) / HZ_PER_MHZ);
}
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR, status)) {
ivpu_err(vdev, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
@@ -733,60 +771,6 @@ void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 acti
REGB_WR32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, val);
}
static u32 pll_ratio_to_freq_mtl(u32 ratio, u32 config)
{
u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
u32 cpu_clock;
if ((config & 0xff) == MTL_PLL_RATIO_4_3)
cpu_clock = pll_clock * 2 / 4;
else
cpu_clock = pll_clock * 2 / 5;
return cpu_clock;
}
u32 ivpu_hw_btrs_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
{
struct ivpu_hw_info *hw = vdev->hw;
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
return pll_ratio_to_freq_mtl(ratio, hw->config);
else
return PLL_RATIO_TO_FREQ(ratio);
}
static u32 pll_freq_get_mtl(struct ivpu_device *vdev)
{
u32 pll_curr_ratio;
pll_curr_ratio = REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL);
pll_curr_ratio &= VPU_HW_BTRS_MTL_CURRENT_PLL_RATIO_MASK;
if (!ivpu_is_silicon(vdev))
return PLL_SIMULATION_FREQ;
return pll_ratio_to_freq_mtl(pll_curr_ratio, vdev->hw->config);
}
static u32 pll_freq_get_lnl(struct ivpu_device *vdev)
{
u32 pll_curr_ratio;
pll_curr_ratio = REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ);
pll_curr_ratio &= VPU_HW_BTRS_LNL_PLL_FREQ_RATIO_MASK;
return PLL_RATIO_TO_FREQ(pll_curr_ratio);
}
u32 ivpu_hw_btrs_pll_freq_get(struct ivpu_device *vdev)
{
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
return pll_freq_get_mtl(vdev);
else
return pll_freq_get_lnl(vdev);
}
u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev)
{
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020-2024 Intel Corporation
* Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_HW_BTRS_H__
@@ -13,7 +13,6 @@
#define PLL_PROFILING_FREQ_DEFAULT 38400000
#define PLL_PROFILING_FREQ_HIGH 400000000
#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
#define DCT_DEFAULT_ACTIVE_PERCENT 15u
#define DCT_PERIOD_US 35300u
@@ -32,12 +31,11 @@ int ivpu_hw_btrs_ip_reset(struct ivpu_device *vdev);
void ivpu_hw_btrs_profiling_freq_reg_set_lnl(struct ivpu_device *vdev);
void ivpu_hw_btrs_ats_print_lnl(struct ivpu_device *vdev);
void ivpu_hw_btrs_clock_relinquish_disable_lnl(struct ivpu_device *vdev);
u32 ivpu_hw_btrs_dpu_max_freq_get(struct ivpu_device *vdev);
bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq);
bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq);
int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable);
void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 dct_percent);
u32 ivpu_hw_btrs_pll_freq_get(struct ivpu_device *vdev);
u32 ivpu_hw_btrs_ratio_to_freq(struct ivpu_device *vdev, u32 ratio);
u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev);
u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev);
u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev);

View File

@@ -37,7 +37,7 @@ static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,
u64 secondary_size = ALIGN(vdev->fw->secondary_preempt_buf_size, PAGE_SIZE);
struct ivpu_addr_range range;
if (vdev->hw->sched_mode != VPU_SCHEDULING_MODE_HW)
if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
return 0;
range.start = vdev->hw->ranges.user.end - (primary_size * IVPU_NUM_CMDQS_PER_CTX);
@@ -68,7 +68,7 @@ err_free_primary:
static void ivpu_preemption_buffers_free(struct ivpu_device *vdev,
struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
if (vdev->hw->sched_mode != VPU_SCHEDULING_MODE_HW)
if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
return;
drm_WARN_ON(&vdev->drm, !cmdq->primary_preempt_buf);
@@ -149,7 +149,7 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *
struct ivpu_device *vdev = file_priv->vdev;
int ret;
if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW)
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->db_id, cmdq->db_id,
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
else
@@ -184,7 +184,7 @@ ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 eng
jobq_header->tail = 0;
wmb(); /* Flush WC buffer for jobq->header */
if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) {
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
ret = ivpu_hws_cmdq_init(file_priv, cmdq, engine, priority);
if (ret)
return ret;
@@ -211,7 +211,7 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm
cmdq->db_registered = false;
if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) {
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->db_id);
if (!ret)
ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->db_id);
@@ -335,7 +335,7 @@ void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)
ivpu_cmdq_fini_all(file_priv);
if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_OS)
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS)
ivpu_jsm_context_release(vdev, file_priv->ctx.id);
}
@@ -361,7 +361,7 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION))
entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK;
if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW &&
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW &&
(unlikely(!(ivpu_test_mode & IVPU_TEST_MODE_PREEMPTION_DISABLE)))) {
entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr;
entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf);

View File

@@ -6,6 +6,8 @@
#include <linux/device.h>
#include <linux/err.h>
#include "ivpu_drv.h"
#include "ivpu_fw.h"
#include "ivpu_hw.h"
#include "ivpu_sysfs.h"
@@ -39,8 +41,30 @@ npu_busy_time_us_show(struct device *dev, struct device_attribute *attr, char *b
static DEVICE_ATTR_RO(npu_busy_time_us);
/**
* DOC: sched_mode
*
* The sched_mode is used to report current NPU scheduling mode.
*
* It returns following strings:
* - "HW" - Hardware Scheduler mode
* - "OS" - Operating System Scheduler mode
*
*/
static ssize_t
sched_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct ivpu_device *vdev = to_ivpu_device(drm);
return sysfs_emit(buf, "%s\n", vdev->fw->sched_mode ? "HW" : "OS");
}
static DEVICE_ATTR_RO(sched_mode);
static struct attribute *ivpu_dev_attrs[] = {
&dev_attr_npu_busy_time_us.attr,
&dev_attr_sched_mode.attr,
NULL,
};

View File

@@ -2301,6 +2301,34 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = {
DMI_MATCH(DMI_PRODUCT_FAMILY, "103C_5336AN HP ZHAN 66 Pro"),
},
},
/*
* Lenovo Legion Go S; touchscreen blocks HW sleep when woken up from EC
* https://gitlab.freedesktop.org/drm/amd/-/issues/3929
*/
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "83L3"),
}
},
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "83N6"),
}
},
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "83Q2"),
}
},
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"),
}
},
{ },
};

View File

@@ -229,7 +229,7 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
node_entry = ACPI_PTR_DIFF(node, table_hdr);
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
sizeof(struct acpi_table_pptt));
proc_sz = sizeof(struct acpi_pptt_processor *);
proc_sz = sizeof(struct acpi_pptt_processor);
while ((unsigned long)entry + proc_sz < table_end) {
cpu_node = (struct acpi_pptt_processor *)entry;
@@ -270,7 +270,7 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he
table_end = (unsigned long)table_hdr + table_hdr->length;
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
sizeof(struct acpi_table_pptt));
proc_sz = sizeof(struct acpi_pptt_processor *);
proc_sz = sizeof(struct acpi_pptt_processor);
/* find the processor structure associated with this cpuid */
while ((unsigned long)entry + proc_sz < table_end) {

View File

@@ -2325,8 +2325,8 @@ static unsigned int ata_msense_control_ata_feature(struct ata_device *dev,
*/
put_unaligned_be16(ATA_FEATURE_SUB_MPAGE_LEN - 4, &buf[2]);
if (dev->flags & ATA_DFLAG_CDL)
buf[4] = 0x02; /* Support T2A and T2B pages */
if (dev->flags & ATA_DFLAG_CDL_ENABLED)
buf[4] = 0x02; /* T2A and T2B pages enabled */
else
buf[4] = 0;
@@ -3734,12 +3734,11 @@ static int ata_mselect_control_spg0(struct ata_queued_cmd *qc,
}
/*
* Translate MODE SELECT control mode page, sub-pages f2h (ATA feature mode
* Translate MODE SELECT control mode page, sub-page f2h (ATA feature mode
* page) into a SET FEATURES command.
*/
static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
const u8 *buf, int len,
u16 *fp)
static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
const u8 *buf, int len, u16 *fp)
{
struct ata_device *dev = qc->dev;
struct ata_taskfile *tf = &qc->tf;
@@ -3757,17 +3756,27 @@ static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
/* Check cdl_ctrl */
switch (buf[0] & 0x03) {
case 0:
/* Disable CDL */
/* Disable CDL if it is enabled */
if (!(dev->flags & ATA_DFLAG_CDL_ENABLED))
return 0;
ata_dev_dbg(dev, "Disabling CDL\n");
cdl_action = 0;
dev->flags &= ~ATA_DFLAG_CDL_ENABLED;
break;
case 0x02:
/* Enable CDL T2A/T2B: NCQ priority must be disabled */
/*
* Enable CDL if not already enabled. Since this is mutually
* exclusive with NCQ priority, allow this only if NCQ priority
* is disabled.
*/
if (dev->flags & ATA_DFLAG_CDL_ENABLED)
return 0;
if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) {
ata_dev_err(dev,
"NCQ priority must be disabled to enable CDL\n");
return -EINVAL;
}
ata_dev_dbg(dev, "Enabling CDL\n");
cdl_action = 1;
dev->flags |= ATA_DFLAG_CDL_ENABLED;
break;

View File

@@ -73,6 +73,7 @@ static inline void subsys_put(struct subsys_private *sp)
kset_put(&sp->subsys);
}
struct subsys_private *bus_to_subsys(const struct bus_type *bus);
struct subsys_private *class_to_subsys(const struct class *class);
struct driver_private {
@@ -179,6 +180,22 @@ int driver_add_groups(const struct device_driver *drv, const struct attribute_gr
void driver_remove_groups(const struct device_driver *drv, const struct attribute_group **groups);
void device_driver_detach(struct device *dev);
static inline void device_set_driver(struct device *dev, const struct device_driver *drv)
{
/*
* Majority (all?) read accesses to dev->driver happens either
* while holding device lock or in bus/driver code that is only
* invoked when the device is bound to a driver and there is no
* concern of the pointer being changed while it is being read.
* However when reading device's uevent file we read driver pointer
* without taking device lock (so we do not block there for
* arbitrary amount of time). We use WRITE_ONCE() here to prevent
* tearing so that READ_ONCE() can safely be used in uevent code.
*/
// FIXME - this cast should not be needed "soon"
WRITE_ONCE(dev->driver, (struct device_driver *)drv);
}
int devres_release_all(struct device *dev);
void device_block_probing(void);
void device_unblock_probing(void);

View File

@@ -57,7 +57,7 @@ static int __must_check bus_rescan_devices_helper(struct device *dev,
* NULL. A call to subsys_put() must be done when finished with the pointer in
* order for it to be properly freed.
*/
static struct subsys_private *bus_to_subsys(const struct bus_type *bus)
struct subsys_private *bus_to_subsys(const struct bus_type *bus)
{
struct subsys_private *sp = NULL;
struct kobject *kobj;

View File

@@ -2624,6 +2624,35 @@ static const char *dev_uevent_name(const struct kobject *kobj)
return NULL;
}
/*
* Try filling "DRIVER=<name>" uevent variable for a device. Because this
* function may race with binding and unbinding the device from a driver,
* we need to be careful. Binding is generally safe, at worst we miss the
* fact that the device is already bound to a driver (but the driver
* information that is delivered through uevents is best-effort, it may
* become obsolete as soon as it is generated anyways). Unbinding is more
* risky as driver pointer is transitioning to NULL, so READ_ONCE() should
* be used to make sure we are dealing with the same pointer, and to
* ensure that driver structure is not going to disappear from under us
* we take bus' drivers klist lock. The assumption that only registered
* driver can be bound to a device, and to unregister a driver bus code
* will take the same lock.
*/
static void dev_driver_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
struct subsys_private *sp = bus_to_subsys(dev->bus);
if (sp) {
scoped_guard(spinlock, &sp->klist_drivers.k_lock) {
struct device_driver *drv = READ_ONCE(dev->driver);
if (drv)
add_uevent_var(env, "DRIVER=%s", drv->name);
}
subsys_put(sp);
}
}
static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
{
const struct device *dev = kobj_to_dev(kobj);
@@ -2655,8 +2684,8 @@ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
if (dev->type && dev->type->name)
add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
if (dev->driver)
add_uevent_var(env, "DRIVER=%s", dev->driver->name);
/* Add "DRIVER=%s" variable if the device is bound to a driver */
dev_driver_uevent(dev, env);
/* Add common DT information about the device */
of_device_uevent(dev, env);
@@ -2726,11 +2755,8 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
if (!env)
return -ENOMEM;
/* Synchronize with really_probe() */
device_lock(dev);
/* let the kset specific function add its keys */
retval = kset->uevent_ops->uevent(&dev->kobj, env);
device_unlock(dev);
if (retval)
goto out;
@@ -3700,7 +3726,7 @@ done:
device_pm_remove(dev);
dpm_sysfs_remove(dev);
DPMError:
dev->driver = NULL;
device_set_driver(dev, NULL);
bus_remove_device(dev);
BusError:
device_remove_attrs(dev);

View File

@@ -550,7 +550,7 @@ static void device_unbind_cleanup(struct device *dev)
arch_teardown_dma_ops(dev);
kfree(dev->dma_range_map);
dev->dma_range_map = NULL;
dev->driver = NULL;
device_set_driver(dev, NULL);
dev_set_drvdata(dev, NULL);
if (dev->pm_domain && dev->pm_domain->dismiss)
dev->pm_domain->dismiss(dev);
@@ -629,8 +629,7 @@ static int really_probe(struct device *dev, const struct device_driver *drv)
}
re_probe:
// FIXME - this cast should not be needed "soon"
dev->driver = (struct device_driver *)drv;
device_set_driver(dev, drv);
/* If using pinctrl, bind pins now before probing */
ret = pinctrl_bind_pins(dev);
@@ -1014,7 +1013,7 @@ static int __device_attach(struct device *dev, bool allow_async)
if (ret == 0)
ret = 1;
else {
dev->driver = NULL;
device_set_driver(dev, NULL);
ret = 0;
}
} else {

View File

@@ -315,7 +315,7 @@ static int __init misc_init(void)
goto fail_remove;
err = -EIO;
if (register_chrdev(MISC_MAJOR, "misc", &misc_fops))
if (__register_chrdev(MISC_MAJOR, 0, MINORMASK + 1, "misc", &misc_fops))
goto fail_printk;
return 0;

View File

@@ -1579,8 +1579,8 @@ static void handle_control_message(struct virtio_device *vdev,
break;
case VIRTIO_CONSOLE_RESIZE: {
struct {
__u16 rows;
__u16 cols;
__virtio16 rows;
__virtio16 cols;
} size;
if (!is_console_port(port))
@@ -1588,7 +1588,8 @@ static void handle_control_message(struct virtio_device *vdev,
memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
sizeof(size));
set_console_size(port, size.rows, size.cols);
set_console_size(port, virtio16_to_cpu(vdev, size.rows),
virtio16_to_cpu(vdev, size.cols));
port->cons.hvc->irq_requested = 1;
resize_console(port);

View File

@@ -5264,6 +5264,10 @@ of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
if (!clkspec)
return ERR_PTR(-EINVAL);
/* Check if node in clkspec is in disabled/fail state */
if (!of_device_is_available(clkspec->np))
return ERR_PTR(-ENOENT);
mutex_lock(&of_clk_mutex);
list_for_each_entry(provider, &of_clk_providers, link) {
if (provider->node == clkspec->np) {

View File

@@ -758,7 +758,7 @@ static void jr3_pci_detach(struct comedi_device *dev)
struct jr3_pci_dev_private *devpriv = dev->private;
if (devpriv)
del_timer_sync(&devpriv->timer);
timer_shutdown_sync(&devpriv->timer);
comedi_pci_detach(dev);
}

View File

@@ -67,7 +67,7 @@ config ARM_VEXPRESS_SPC_CPUFREQ
config ARM_BRCMSTB_AVS_CPUFREQ
tristate "Broadcom STB AVS CPUfreq driver"
depends on (ARCH_BRCMSTB && !ARM_SCMI_CPUFREQ) || COMPILE_TEST
default y
default y if ARCH_BRCMSTB && !ARM_SCMI_CPUFREQ
help
Some Broadcom STB SoCs use a co-processor running proprietary firmware
("AVS") to handle voltage and frequency scaling. This driver provides
@@ -79,7 +79,7 @@ config ARM_HIGHBANK_CPUFREQ
tristate "Calxeda Highbank-based"
depends on ARCH_HIGHBANK || COMPILE_TEST
depends on CPUFREQ_DT && REGULATOR && PL320_MBOX
default m
default m if ARCH_HIGHBANK
help
This adds the CPUFreq driver for Calxeda Highbank SoC
based boards.
@@ -124,7 +124,7 @@ config ARM_MEDIATEK_CPUFREQ
config ARM_MEDIATEK_CPUFREQ_HW
tristate "MediaTek CPUFreq HW driver"
depends on ARCH_MEDIATEK || COMPILE_TEST
default m
default m if ARCH_MEDIATEK
help
Support for the CPUFreq HW driver.
Some MediaTek chipsets have a HW engine to offload the steps
@@ -172,7 +172,7 @@ config ARM_RASPBERRYPI_CPUFREQ
config ARM_S3C64XX_CPUFREQ
bool "Samsung S3C64XX"
depends on CPU_S3C6410 || COMPILE_TEST
default y
default CPU_S3C6410
help
This adds the CPUFreq driver for Samsung S3C6410 SoC.
@@ -181,7 +181,7 @@ config ARM_S3C64XX_CPUFREQ
config ARM_S5PV210_CPUFREQ
bool "Samsung S5PV210 and S5PC110"
depends on CPU_S5PV210 || COMPILE_TEST
default y
default CPU_S5PV210
help
This adds the CPUFreq driver for Samsung S5PV210 and
S5PC110 SoCs.
@@ -205,7 +205,7 @@ config ARM_SCMI_CPUFREQ
config ARM_SPEAR_CPUFREQ
bool "SPEAr CPUFreq support"
depends on PLAT_SPEAR || COMPILE_TEST
default y
default PLAT_SPEAR
help
This adds the CPUFreq driver support for SPEAr SOCs.
@@ -224,7 +224,7 @@ config ARM_TEGRA20_CPUFREQ
tristate "Tegra20/30 CPUFreq support"
depends on ARCH_TEGRA || COMPILE_TEST
depends on CPUFREQ_DT
default y
default ARCH_TEGRA
help
This adds the CPUFreq driver support for Tegra20/30 SOCs.
@@ -232,7 +232,7 @@ config ARM_TEGRA124_CPUFREQ
bool "Tegra124 CPUFreq support"
depends on ARCH_TEGRA || COMPILE_TEST
depends on CPUFREQ_DT
default y
default ARCH_TEGRA
help
This adds the CPUFreq driver support for Tegra124 SOCs.
@@ -247,14 +247,14 @@ config ARM_TEGRA194_CPUFREQ
tristate "Tegra194 CPUFreq support"
depends on ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC || (64BIT && COMPILE_TEST)
depends on TEGRA_BPMP
default y
default ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC
help
This adds CPU frequency driver support for Tegra194 SOCs.
config ARM_TI_CPUFREQ
bool "Texas Instruments CPUFreq support"
depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST
default y
default ARCH_OMAP2PLUS || ARCH_K3
help
This driver enables valid OPPs on the running platform based on
values contained within the SoC in use. Enable this in order to

View File

@@ -103,11 +103,17 @@ static const struct of_device_id apple_soc_cpufreq_of_match[] __maybe_unused = {
static unsigned int apple_soc_cpufreq_get_rate(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
struct apple_cpu_priv *priv = policy->driver_data;
struct cpufreq_policy *policy;
struct apple_cpu_priv *priv;
struct cpufreq_frequency_table *p;
unsigned int pstate;
policy = cpufreq_cpu_get_raw(cpu);
if (unlikely(!policy))
return 0;
priv = policy->driver_data;
if (priv->info->cur_pstate_mask) {
u64 reg = readq_relaxed(priv->reg_base + APPLE_DVFS_STATUS);

View File

@@ -767,7 +767,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
int ret;
if (!policy)
return -ENODEV;
return 0;
cpu_data = policy->driver_data;

View File

@@ -34,11 +34,17 @@ static struct cpufreq_driver scmi_cpufreq_driver;
static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
struct scmi_data *priv = policy->driver_data;
struct cpufreq_policy *policy;
struct scmi_data *priv;
unsigned long rate;
int ret;
policy = cpufreq_cpu_get_raw(cpu);
if (unlikely(!policy))
return 0;
priv = policy->driver_data;
ret = perf_ops->freq_get(ph, priv->domain_id, &rate, false);
if (ret)
return 0;

View File

@@ -29,9 +29,16 @@ static struct scpi_ops *scpi_ops;
static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
struct scpi_data *priv = policy->driver_data;
unsigned long rate = clk_get_rate(priv->clk);
struct cpufreq_policy *policy;
struct scpi_data *priv;
unsigned long rate;
policy = cpufreq_cpu_get_raw(cpu);
if (unlikely(!policy))
return 0;
priv = policy->driver_data;
rate = clk_get_rate(priv->clk);
return rate / 1000;
}

View File

@@ -167,7 +167,9 @@ static int sun50i_cpufreq_get_efuse(void)
struct nvmem_cell *speedbin_nvmem;
const struct of_device_id *match;
struct device *cpu_dev;
u32 *speedbin;
void *speedbin_ptr;
u32 speedbin = 0;
size_t len;
int ret;
cpu_dev = get_cpu_device(0);
@@ -190,14 +192,18 @@ static int sun50i_cpufreq_get_efuse(void)
return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
"Could not get nvmem cell\n");
speedbin = nvmem_cell_read(speedbin_nvmem, NULL);
speedbin_ptr = nvmem_cell_read(speedbin_nvmem, &len);
nvmem_cell_put(speedbin_nvmem);
if (IS_ERR(speedbin))
return PTR_ERR(speedbin);
if (IS_ERR(speedbin_ptr))
return PTR_ERR(speedbin_ptr);
ret = opp_data->efuse_xlate(*speedbin);
if (len <= 4)
memcpy(&speedbin, speedbin_ptr, len);
speedbin = le32_to_cpu(speedbin);
kfree(speedbin);
ret = opp_data->efuse_xlate(speedbin);
kfree(speedbin_ptr);
return ret;
};

View File

@@ -163,6 +163,12 @@ static int atmel_sha204a_probe(struct i2c_client *client)
i2c_priv->hwrng.name = dev_name(&client->dev);
i2c_priv->hwrng.read = atmel_sha204a_rng_read;
/*
* According to review by Bill Cox [1], this HWRNG has very low entropy.
* [1] https://www.metzdowd.com/pipermail/cryptography/2014-December/023858.html
*/
i2c_priv->hwrng.quality = 1;
ret = devm_hwrng_register(&client->dev, &i2c_priv->hwrng);
if (ret)
dev_warn(&client->dev, "failed to register RNG (%d)\n", ret);

View File

@@ -532,6 +532,7 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
{ PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] },
{ PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] },
{ PCI_VDEVICE(AMD, 0x1134), (kernel_ulong_t)&dev_vdata[7] },
{ PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] },
{ PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] },
/* Last entry must be zero */

View File

@@ -513,7 +513,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri
resource_size_t rcrb = ri->base;
void __iomem *addr;
u32 bar0, bar1;
u16 cmd;
u32 id;
if (which == CXL_RCRB_UPSTREAM)
@@ -535,7 +534,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri
}
id = readl(addr + PCI_VENDOR_ID);
cmd = readw(addr + PCI_COMMAND);
bar0 = readl(addr + PCI_BASE_ADDRESS_0);
bar1 = readl(addr + PCI_BASE_ADDRESS_1);
iounmap(addr);
@@ -550,8 +548,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri
dev_err(dev, "Failed to access Downstream Port RCRB\n");
return CXL_RESOURCE_NONE;
}
if (!(cmd & PCI_COMMAND_MEMORY))
return CXL_RESOURCE_NONE;
/* The RCRB is a Memory Window, and the MEM_TYPE_1M bit is obsolete */
if (bar0 & (PCI_BASE_ADDRESS_MEM_TYPE_1M | PCI_BASE_ADDRESS_SPACE_IO))
return CXL_RESOURCE_NONE;

View File

@@ -841,9 +841,9 @@ static int dmatest_func(void *data)
} else {
dma_async_issue_pending(chan);
wait_event_freezable_timeout(thread->done_wait,
done->done,
msecs_to_jiffies(params->timeout));
wait_event_timeout(thread->done_wait,
done->done,
msecs_to_jiffies(params->timeout));
status = dma_async_is_tx_complete(chan, cookie, NULL,
NULL);

View File

@@ -1227,22 +1227,28 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
if (!svc->intel_svc_fcs) {
dev_err(dev, "failed to allocate %s device\n", INTEL_FCS);
ret = -ENOMEM;
goto err_unregister_dev;
goto err_unregister_rsu_dev;
}
ret = platform_device_add(svc->intel_svc_fcs);
if (ret) {
platform_device_put(svc->intel_svc_fcs);
goto err_unregister_dev;
goto err_unregister_rsu_dev;
}
ret = of_platform_default_populate(dev_of_node(dev), NULL, dev);
if (ret)
goto err_unregister_fcs_dev;
dev_set_drvdata(dev, svc);
pr_info("Intel Service Layer Driver Initialized\n");
return 0;
err_unregister_dev:
err_unregister_fcs_dev:
platform_device_unregister(svc->intel_svc_fcs);
err_unregister_rsu_dev:
platform_device_unregister(svc->stratix10_svc_rsu);
err_free_kfifo:
kfifo_free(&controller->svc_fifo);
@@ -1256,6 +1262,8 @@ static void stratix10_svc_drv_remove(struct platform_device *pdev)
struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev);
struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev);
of_platform_depopulate(ctrl->dev);
platform_device_unregister(svc->intel_svc_fcs);
platform_device_unregister(svc->stratix10_svc_rsu);

View File

@@ -259,6 +259,9 @@ static void of_gpio_set_polarity_by_property(const struct device_node *np,
{ "fsl,imx8qm-fec", "phy-reset-gpios", "phy-reset-active-high" },
{ "fsl,s32v234-fec", "phy-reset-gpios", "phy-reset-active-high" },
#endif
#if IS_ENABLED(CONFIG_MMC_ATMELMCI)
{ "atmel,hsmci", "cd-gpios", "cd-inverted" },
#endif
#if IS_ENABLED(CONFIG_PCI_IMX6)
{ "fsl,imx6q-pcie", "reset-gpio", "reset-gpio-active-high" },
{ "fsl,imx6sx-pcie", "reset-gpio", "reset-gpio-active-high" },
@@ -284,9 +287,6 @@ static void of_gpio_set_polarity_by_property(const struct device_node *np,
#if IS_ENABLED(CONFIG_REGULATOR_GPIO)
{ "regulator-gpio", "enable-gpio", "enable-active-high" },
{ "regulator-gpio", "enable-gpios", "enable-active-high" },
#endif
#if IS_ENABLED(CONFIG_MMC_ATMELMCI)
{ "atmel,hsmci", "cd-gpios", "cd-inverted" },
#endif
};
unsigned int i;

View File

@@ -350,7 +350,6 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
AMDGPU_CP_KIQ_IRQ_LAST
};
#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
#define MAX_KIQ_REG_TRY 1000

View File

@@ -1426,9 +1426,11 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
struct drm_gpu_scheduler *sched = &ring->sched;
struct drm_sched_entity entity;
static atomic_t counter;
struct dma_fence *f;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
void *owner;
int i, r;
/* Initialize the scheduler entity */
@@ -1439,9 +1441,15 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
goto err;
}
r = amdgpu_job_alloc_with_ib(ring->adev, &entity, NULL,
64, 0,
&job);
/*
* Use some unique dummy value as the owner to make sure we execute
* the cleaner shader on each submission. The value just need to change
* for each submission and is otherwise meaningless.
*/
owner = (void *)(unsigned long)atomic_inc_return(&counter);
r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner,
64, 0, &job);
if (r)
goto err;

View File

@@ -678,12 +678,10 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
uint32_t flush_type, bool all_hub,
uint32_t inst)
{
u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT :
adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
unsigned int ndw;
int r;
int r, cnt = 0;
uint32_t seq;
/*
@@ -740,10 +738,21 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq[inst].ring_lock);
if (amdgpu_fence_wait_polling(ring, seq, usec_timeout) < 1) {
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
might_sleep();
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY &&
!amdgpu_reset_pending(adev->reset_domain)) {
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
}
if (cnt > MAX_KIQ_REG_TRY) {
dev_err(adev->dev, "timeout waiting for kiq fence\n");
r = -ETIME;
}
} else
r = 0;
}
error_unlock_reset:

View File

@@ -5998,7 +5998,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
adev->hdp.funcs->flush_hdp(adev, NULL);
amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
@@ -6076,7 +6076,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
adev->hdp.funcs->flush_hdp(adev, NULL);
amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
@@ -6153,7 +6153,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
adev->hdp.funcs->flush_hdp(adev, NULL);
amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
@@ -6528,7 +6528,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
adev->hdp.funcs->flush_hdp(adev, NULL);
amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);

View File

@@ -2327,7 +2327,7 @@ static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr)
}
if (amdgpu_emu_mode == 1)
adev->hdp.funcs->flush_hdp(adev, NULL);
amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
@@ -2371,7 +2371,7 @@ static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr)
}
if (amdgpu_emu_mode == 1)
adev->hdp.funcs->flush_hdp(adev, NULL);
amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
@@ -2416,7 +2416,7 @@ static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr)
}
if (amdgpu_emu_mode == 1)
adev->hdp.funcs->flush_hdp(adev, NULL);
amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
@@ -3051,7 +3051,7 @@ static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
if (amdgpu_emu_mode == 1)
adev->hdp.funcs->flush_hdp(adev, NULL);
amdgpu_device_flush_hdp(adev, NULL);
WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
@@ -3269,7 +3269,7 @@ static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
if (amdgpu_emu_mode == 1)
adev->hdp.funcs->flush_hdp(adev, NULL);
amdgpu_device_flush_hdp(adev, NULL);
WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
@@ -4487,7 +4487,7 @@ static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev)
if (r)
return r;
adev->hdp.funcs->flush_hdp(adev, NULL);
amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;

View File

@@ -2264,7 +2264,7 @@ static int gfx_v12_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
if (amdgpu_emu_mode == 1)
adev->hdp.funcs->flush_hdp(adev, NULL);
amdgpu_device_flush_hdp(adev, NULL);
WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
@@ -2408,7 +2408,7 @@ static int gfx_v12_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
if (amdgpu_emu_mode == 1)
adev->hdp.funcs->flush_hdp(adev, NULL);
amdgpu_device_flush_hdp(adev, NULL);
WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
@@ -3429,7 +3429,7 @@ static int gfx_v12_0_gfxhub_enable(struct amdgpu_device *adev)
if (r)
return r;
adev->hdp.funcs->flush_hdp(adev, NULL);
amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;

View File

@@ -265,7 +265,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
/* flush hdp cache */
adev->hdp.funcs->flush_hdp(adev, NULL);
amdgpu_device_flush_hdp(adev, NULL);
/* This is necessary for SRIOV as well as for GFXOFF to function
* properly under bare metal
@@ -966,7 +966,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
adev->hdp.funcs->init_registers(adev);
/* Flush HDP after it is initialized */
adev->hdp.funcs->flush_hdp(adev, NULL);
amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;

View File

@@ -226,7 +226,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
/* flush hdp cache */
adev->hdp.funcs->flush_hdp(adev, NULL);
amdgpu_device_flush_hdp(adev, NULL);
/* This is necessary for SRIOV as well as for GFXOFF to function
* properly under bare metal
@@ -893,7 +893,7 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
return r;
/* Flush HDP after it is initialized */
adev->hdp.funcs->flush_hdp(adev, NULL);
amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;

View File

@@ -294,7 +294,7 @@ static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
return;
/* flush hdp cache */
adev->hdp.funcs->flush_hdp(adev, NULL);
amdgpu_device_flush_hdp(adev, NULL);
/* This is necessary for SRIOV as well as for GFXOFF to function
* properly under bare metal
@@ -862,7 +862,7 @@ static int gmc_v12_0_gart_enable(struct amdgpu_device *adev)
return r;
/* Flush HDP after it is initialized */
adev->hdp.funcs->flush_hdp(adev, NULL);
amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;

View File

@@ -2351,7 +2351,7 @@ static int gmc_v9_0_hw_init(void *handle)
adev->hdp.funcs->init_registers(adev);
/* After HDP is initialized, flush HDP.*/
adev->hdp.funcs->flush_hdp(adev, NULL);
amdgpu_device_flush_hdp(adev, NULL);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
value = false;

View File

@@ -532,7 +532,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
}
memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
adev->hdp.funcs->flush_hdp(adev, NULL);
amdgpu_device_flush_hdp(adev, NULL);
vfree(buf);
drm_dev_exit(idx);
} else {

Some files were not shown because too many files have changed in this diff Show More