mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-06 01:49:46 +00:00
Merge remote-tracking branch 'stable/linux-5.10.y' into rpi-5.10.y
This commit is contained in:
@@ -1,62 +0,0 @@
|
||||
What: /sys/bus/iio/devices/iio:deviceX/in_count0_preset
|
||||
KernelVersion: 4.13
|
||||
Contact: fabrice.gasnier@st.com
|
||||
Description:
|
||||
Reading returns the current preset value. Writing sets the
|
||||
preset value. Encoder counts continuously from 0 to preset
|
||||
value, depending on direction (up/down).
|
||||
|
||||
What: /sys/bus/iio/devices/iio:deviceX/in_count_quadrature_mode_available
|
||||
KernelVersion: 4.13
|
||||
Contact: fabrice.gasnier@st.com
|
||||
Description:
|
||||
Reading returns the list possible quadrature modes.
|
||||
|
||||
What: /sys/bus/iio/devices/iio:deviceX/in_count0_quadrature_mode
|
||||
KernelVersion: 4.13
|
||||
Contact: fabrice.gasnier@st.com
|
||||
Description:
|
||||
Configure the device counter quadrature modes:
|
||||
|
||||
- non-quadrature:
|
||||
Encoder IN1 input servers as the count input (up
|
||||
direction).
|
||||
|
||||
- quadrature:
|
||||
Encoder IN1 and IN2 inputs are mixed to get direction
|
||||
and count.
|
||||
|
||||
What: /sys/bus/iio/devices/iio:deviceX/in_count_polarity_available
|
||||
KernelVersion: 4.13
|
||||
Contact: fabrice.gasnier@st.com
|
||||
Description:
|
||||
Reading returns the list possible active edges.
|
||||
|
||||
What: /sys/bus/iio/devices/iio:deviceX/in_count0_polarity
|
||||
KernelVersion: 4.13
|
||||
Contact: fabrice.gasnier@st.com
|
||||
Description:
|
||||
Configure the device encoder/counter active edge:
|
||||
|
||||
- rising-edge
|
||||
- falling-edge
|
||||
- both-edges
|
||||
|
||||
In non-quadrature mode, device counts up on active edge.
|
||||
|
||||
In quadrature mode, encoder counting scenarios are as follows:
|
||||
|
||||
+---------+----------+--------------------+--------------------+
|
||||
| Active | Level on | IN1 signal | IN2 signal |
|
||||
| edge | opposite +----------+---------+----------+---------+
|
||||
| | signal | Rising | Falling | Rising | Falling |
|
||||
+---------+----------+----------+---------+----------+---------+
|
||||
| Rising | High -> | Down | - | Up | - |
|
||||
| edge | Low -> | Up | - | Down | - |
|
||||
+---------+----------+----------+---------+----------+---------+
|
||||
| Falling | High -> | - | Up | - | Down |
|
||||
| edge | Low -> | - | Down | - | Up |
|
||||
+---------+----------+----------+---------+----------+---------+
|
||||
| Both | High -> | Down | Up | Up | Down |
|
||||
| edges | Low -> | Up | Down | Down | Up |
|
||||
+---------+----------+----------+---------+----------+---------+
|
||||
@@ -468,7 +468,7 @@ Spectre variant 2
|
||||
before invoking any firmware code to prevent Spectre variant 2 exploits
|
||||
using the firmware.
|
||||
|
||||
Using kernel address space randomization (CONFIG_RANDOMIZE_SLAB=y
|
||||
Using kernel address space randomization (CONFIG_RANDOMIZE_BASE=y
|
||||
and CONFIG_SLAB_FREELIST_RANDOM=y in the kernel configuration) makes
|
||||
attacks on the kernel generally more difficult.
|
||||
|
||||
|
||||
@@ -10,6 +10,9 @@ title: Amlogic specific extensions to the Synopsys Designware HDMI Controller
|
||||
maintainers:
|
||||
- Neil Armstrong <narmstrong@baylibre.com>
|
||||
|
||||
allOf:
|
||||
- $ref: /schemas/sound/name-prefix.yaml#
|
||||
|
||||
description: |
|
||||
The Amlogic Meson Synopsys Designware Integration is composed of
|
||||
- A Synopsys DesignWare HDMI Controller IP
|
||||
@@ -99,6 +102,8 @@ properties:
|
||||
"#sound-dai-cells":
|
||||
const: 0
|
||||
|
||||
sound-name-prefix: true
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
||||
@@ -78,6 +78,10 @@ properties:
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
amlogic,canvas:
|
||||
description: should point to a canvas provider node
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
description: phandle to the associated power domain
|
||||
@@ -106,6 +110,7 @@ required:
|
||||
- port@1
|
||||
- "#address-cells"
|
||||
- "#size-cells"
|
||||
- amlogic,canvas
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
@@ -118,6 +123,7 @@ examples:
|
||||
interrupts = <3>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
amlogic,canvas = <&canvas>;
|
||||
|
||||
/* CVBS VDAC output port */
|
||||
port@0 {
|
||||
|
||||
@@ -199,12 +199,11 @@ patternProperties:
|
||||
|
||||
contribution:
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
minimum: 0
|
||||
maximum: 100
|
||||
description:
|
||||
The percentage contribution of the cooling devices at the
|
||||
specific trip temperature referenced in this map
|
||||
to this thermal zone
|
||||
The cooling contribution to the thermal zone of the referred
|
||||
cooling device at the referred trip point. The contribution is
|
||||
a ratio of the sum of all cooling contributions within a
|
||||
thermal zone.
|
||||
|
||||
required:
|
||||
- trip
|
||||
|
||||
@@ -39,8 +39,8 @@ properties:
|
||||
samsung,syscon-phandle:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
description:
|
||||
Phandle to the PMU system controller node (in case of Exynos5250
|
||||
and Exynos5420).
|
||||
Phandle to the PMU system controller node (in case of Exynos5250,
|
||||
Exynos5420 and Exynos7).
|
||||
|
||||
required:
|
||||
- compatible
|
||||
@@ -58,6 +58,7 @@ allOf:
|
||||
enum:
|
||||
- samsung,exynos5250-wdt
|
||||
- samsung,exynos5420-wdt
|
||||
- samsung,exynos7-wdt
|
||||
then:
|
||||
required:
|
||||
- samsung,syscon-phandle
|
||||
|
||||
@@ -143,13 +143,14 @@ Part 5 - Handling channel allocation
|
||||
Allocating Channels
|
||||
-------------------
|
||||
|
||||
Channels are required to be configured prior to starting the test run.
|
||||
Attempting to run the test without configuring the channels will fail.
|
||||
Channels do not need to be configured prior to starting a test run. Attempting
|
||||
to run the test without configuring the channels will result in testing any
|
||||
channels that are available.
|
||||
|
||||
Example::
|
||||
|
||||
% echo 1 > /sys/module/dmatest/parameters/run
|
||||
dmatest: Could not start test, no channels configured
|
||||
dmatest: No channels configured, continue with any
|
||||
|
||||
Channels are registered using the "channel" parameter. Channels can be requested by their
|
||||
name, once requested, the channel is registered and a pending thread is added to the test list.
|
||||
|
||||
@@ -19,7 +19,7 @@ of kernel interfaces is available via exported symbols in `firewire-core` module
|
||||
Firewire char device data structures
|
||||
====================================
|
||||
|
||||
.. include:: /ABI/stable/firewire-cdev
|
||||
.. include:: ../ABI/stable/firewire-cdev
|
||||
:literal:
|
||||
|
||||
.. kernel-doc:: include/uapi/linux/firewire-cdev.h
|
||||
@@ -28,7 +28,7 @@ Firewire char device data structures
|
||||
Firewire device probing and sysfs interfaces
|
||||
============================================
|
||||
|
||||
.. include:: /ABI/stable/sysfs-bus-firewire
|
||||
.. include:: ../ABI/stable/sysfs-bus-firewire
|
||||
:literal:
|
||||
|
||||
.. kernel-doc:: drivers/firewire/core-device.c
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
Referencing hierarchical data nodes
|
||||
===================================
|
||||
|
||||
:Copyright: |copy| 2018 Intel Corporation
|
||||
:Copyright: |copy| 2018, 2021 Intel Corporation
|
||||
:Author: Sakari Ailus <sakari.ailus@linux.intel.com>
|
||||
|
||||
ACPI in general allows referring to device objects in the tree only.
|
||||
@@ -52,12 +52,14 @@ the ANOD object which is also the final target node of the reference.
|
||||
Name (NOD0, Package() {
|
||||
ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
|
||||
Package () {
|
||||
Package () { "reg", 0 },
|
||||
Package () { "random-property", 3 },
|
||||
}
|
||||
})
|
||||
Name (NOD1, Package() {
|
||||
ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
|
||||
Package () {
|
||||
Package () { "reg", 1 },
|
||||
Package () { "anothernode", "ANOD" },
|
||||
}
|
||||
})
|
||||
@@ -74,7 +76,11 @@ the ANOD object which is also the final target node of the reference.
|
||||
Name (_DSD, Package () {
|
||||
ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
|
||||
Package () {
|
||||
Package () { "reference", ^DEV0, "node@1", "anothernode" },
|
||||
Package () {
|
||||
"reference", Package () {
|
||||
^DEV0, "node@1", "anothernode"
|
||||
}
|
||||
},
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
4
Makefile
4
Makefile
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 92
|
||||
SUBLEVEL = 94
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
@@ -1073,7 +1073,7 @@ export mod_sign_cmd
|
||||
HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
|
||||
|
||||
has_libelf = $(call try-run,\
|
||||
echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0)
|
||||
echo "int main() {}" | $(HOSTCC) $(KBUILD_HOSTLDFLAGS) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0)
|
||||
|
||||
ifdef CONFIG_STACK_VALIDATION
|
||||
ifeq ($(has_libelf),1)
|
||||
|
||||
@@ -400,12 +400,12 @@ choice
|
||||
Say Y here if you want kernel low-level debugging support
|
||||
on i.MX25.
|
||||
|
||||
config DEBUG_IMX21_IMX27_UART
|
||||
bool "i.MX21 and i.MX27 Debug UART"
|
||||
depends on SOC_IMX21 || SOC_IMX27
|
||||
config DEBUG_IMX27_UART
|
||||
bool "i.MX27 Debug UART"
|
||||
depends on SOC_IMX27
|
||||
help
|
||||
Say Y here if you want kernel low-level debugging support
|
||||
on i.MX21 or i.MX27.
|
||||
on i.MX27.
|
||||
|
||||
config DEBUG_IMX28_UART
|
||||
bool "i.MX28 Debug UART"
|
||||
@@ -1523,7 +1523,7 @@ config DEBUG_IMX_UART_PORT
|
||||
int "i.MX Debug UART Port Selection"
|
||||
depends on DEBUG_IMX1_UART || \
|
||||
DEBUG_IMX25_UART || \
|
||||
DEBUG_IMX21_IMX27_UART || \
|
||||
DEBUG_IMX27_UART || \
|
||||
DEBUG_IMX31_UART || \
|
||||
DEBUG_IMX35_UART || \
|
||||
DEBUG_IMX50_UART || \
|
||||
@@ -1591,12 +1591,12 @@ config DEBUG_LL_INCLUDE
|
||||
default "debug/icedcc.S" if DEBUG_ICEDCC
|
||||
default "debug/imx.S" if DEBUG_IMX1_UART || \
|
||||
DEBUG_IMX25_UART || \
|
||||
DEBUG_IMX21_IMX27_UART || \
|
||||
DEBUG_IMX27_UART || \
|
||||
DEBUG_IMX31_UART || \
|
||||
DEBUG_IMX35_UART || \
|
||||
DEBUG_IMX50_UART || \
|
||||
DEBUG_IMX51_UART || \
|
||||
DEBUG_IMX53_UART ||\
|
||||
DEBUG_IMX53_UART || \
|
||||
DEBUG_IMX6Q_UART || \
|
||||
DEBUG_IMX6SL_UART || \
|
||||
DEBUG_IMX6SX_UART || \
|
||||
|
||||
@@ -9,16 +9,22 @@
|
||||
#include <linux/sizes.h>
|
||||
|
||||
.macro __nop
|
||||
#ifdef CONFIG_EFI_STUB
|
||||
@ This is almost but not quite a NOP, since it does clobber the
|
||||
@ condition flags. But it is the best we can do for EFI, since
|
||||
@ PE/COFF expects the magic string "MZ" at offset 0, while the
|
||||
@ ARM/Linux boot protocol expects an executable instruction
|
||||
@ there.
|
||||
.inst MZ_MAGIC | (0x1310 << 16) @ tstne r0, #0x4d000
|
||||
#else
|
||||
AR_CLASS( mov r0, r0 )
|
||||
M_CLASS( nop.w )
|
||||
.endm
|
||||
|
||||
.macro __initial_nops
|
||||
#ifdef CONFIG_EFI_STUB
|
||||
@ This is a two-instruction NOP, which happens to bear the
|
||||
@ PE/COFF signature "MZ" in the first two bytes, so the kernel
|
||||
@ is accepted as an EFI binary. Booting via the UEFI stub
|
||||
@ will not execute those instructions, but the ARM/Linux
|
||||
@ boot protocol does, so we need some NOPs here.
|
||||
.inst MZ_MAGIC | (0xe225 << 16) @ eor r5, r5, 0x4d000
|
||||
eor r5, r5, 0x4d000 @ undo previous insn
|
||||
#else
|
||||
__nop
|
||||
__nop
|
||||
#endif
|
||||
.endm
|
||||
|
||||
|
||||
@@ -190,7 +190,8 @@ start:
|
||||
* were patching the initial instructions of the kernel, i.e
|
||||
* had started to exploit this "patch area".
|
||||
*/
|
||||
.rept 7
|
||||
__initial_nops
|
||||
.rept 5
|
||||
__nop
|
||||
.endr
|
||||
#ifndef CONFIG_THUMB2_KERNEL
|
||||
|
||||
@@ -168,7 +168,7 @@
|
||||
};
|
||||
|
||||
uart0: serial@12000 {
|
||||
compatible = "marvell,armada-38x-uart";
|
||||
compatible = "marvell,armada-38x-uart", "ns16550a";
|
||||
reg = <0x12000 0x100>;
|
||||
reg-shift = <2>;
|
||||
interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
|
||||
@@ -178,7 +178,7 @@
|
||||
};
|
||||
|
||||
uart1: serial@12100 {
|
||||
compatible = "marvell,armada-38x-uart";
|
||||
compatible = "marvell,armada-38x-uart", "ns16550a";
|
||||
reg = <0x12100 0x100>;
|
||||
reg-shift = <2>;
|
||||
interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
|
||||
|
||||
@@ -84,7 +84,7 @@
|
||||
partitions {
|
||||
compatible = "redboot-fis";
|
||||
/* Eraseblock at 0xfe0000 */
|
||||
fis-index-block = <0x1fc>;
|
||||
fis-index-block = <0x7f>;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
#include "omap34xx.dtsi"
|
||||
#include <dt-bindings/input/input.h>
|
||||
#include <dt-bindings/leds/common.h>
|
||||
|
||||
/*
|
||||
* Default secure signed bootloader (Nokia X-Loader) does not enable L3 firewall
|
||||
@@ -630,63 +631,92 @@
|
||||
};
|
||||
|
||||
lp5523: lp5523@32 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
compatible = "national,lp5523";
|
||||
reg = <0x32>;
|
||||
clock-mode = /bits/ 8 <0>; /* LP55XX_CLOCK_AUTO */
|
||||
enable-gpio = <&gpio2 9 GPIO_ACTIVE_HIGH>; /* 41 */
|
||||
enable-gpios = <&gpio2 9 GPIO_ACTIVE_HIGH>; /* 41 */
|
||||
|
||||
chan0 {
|
||||
led@0 {
|
||||
reg = <0>;
|
||||
chan-name = "lp5523:kb1";
|
||||
led-cur = /bits/ 8 <50>;
|
||||
max-cur = /bits/ 8 <100>;
|
||||
color = <LED_COLOR_ID_WHITE>;
|
||||
function = LED_FUNCTION_KBD_BACKLIGHT;
|
||||
};
|
||||
|
||||
chan1 {
|
||||
led@1 {
|
||||
reg = <1>;
|
||||
chan-name = "lp5523:kb2";
|
||||
led-cur = /bits/ 8 <50>;
|
||||
max-cur = /bits/ 8 <100>;
|
||||
color = <LED_COLOR_ID_WHITE>;
|
||||
function = LED_FUNCTION_KBD_BACKLIGHT;
|
||||
};
|
||||
|
||||
chan2 {
|
||||
led@2 {
|
||||
reg = <2>;
|
||||
chan-name = "lp5523:kb3";
|
||||
led-cur = /bits/ 8 <50>;
|
||||
max-cur = /bits/ 8 <100>;
|
||||
color = <LED_COLOR_ID_WHITE>;
|
||||
function = LED_FUNCTION_KBD_BACKLIGHT;
|
||||
};
|
||||
|
||||
chan3 {
|
||||
led@3 {
|
||||
reg = <3>;
|
||||
chan-name = "lp5523:kb4";
|
||||
led-cur = /bits/ 8 <50>;
|
||||
max-cur = /bits/ 8 <100>;
|
||||
color = <LED_COLOR_ID_WHITE>;
|
||||
function = LED_FUNCTION_KBD_BACKLIGHT;
|
||||
};
|
||||
|
||||
chan4 {
|
||||
led@4 {
|
||||
reg = <4>;
|
||||
chan-name = "lp5523:b";
|
||||
led-cur = /bits/ 8 <50>;
|
||||
max-cur = /bits/ 8 <100>;
|
||||
color = <LED_COLOR_ID_BLUE>;
|
||||
function = LED_FUNCTION_STATUS;
|
||||
};
|
||||
|
||||
chan5 {
|
||||
led@5 {
|
||||
reg = <5>;
|
||||
chan-name = "lp5523:g";
|
||||
led-cur = /bits/ 8 <50>;
|
||||
max-cur = /bits/ 8 <100>;
|
||||
color = <LED_COLOR_ID_GREEN>;
|
||||
function = LED_FUNCTION_STATUS;
|
||||
};
|
||||
|
||||
chan6 {
|
||||
led@6 {
|
||||
reg = <6>;
|
||||
chan-name = "lp5523:r";
|
||||
led-cur = /bits/ 8 <50>;
|
||||
max-cur = /bits/ 8 <100>;
|
||||
color = <LED_COLOR_ID_RED>;
|
||||
function = LED_FUNCTION_STATUS;
|
||||
};
|
||||
|
||||
chan7 {
|
||||
led@7 {
|
||||
reg = <7>;
|
||||
chan-name = "lp5523:kb5";
|
||||
led-cur = /bits/ 8 <50>;
|
||||
max-cur = /bits/ 8 <100>;
|
||||
color = <LED_COLOR_ID_WHITE>;
|
||||
function = LED_FUNCTION_KBD_BACKLIGHT;
|
||||
};
|
||||
|
||||
chan8 {
|
||||
led@8 {
|
||||
reg = <8>;
|
||||
chan-name = "lp5523:kb6";
|
||||
led-cur = /bits/ 8 <50>;
|
||||
max-cur = /bits/ 8 <100>;
|
||||
color = <LED_COLOR_ID_WHITE>;
|
||||
function = LED_FUNCTION_KBD_BACKLIGHT;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -192,7 +192,7 @@
|
||||
|
||||
display: display@1{
|
||||
/* Connect panel-ilitek-9341 to ltdc */
|
||||
compatible = "st,sf-tc240t-9370-t";
|
||||
compatible = "st,sf-tc240t-9370-t", "ilitek,ili9341";
|
||||
reg = <1>;
|
||||
spi-3wire;
|
||||
spi-max-frequency = <10000000>;
|
||||
|
||||
@@ -11,13 +11,6 @@
|
||||
#define IMX1_UART_BASE_ADDR(n) IMX1_UART##n##_BASE_ADDR
|
||||
#define IMX1_UART_BASE(n) IMX1_UART_BASE_ADDR(n)
|
||||
|
||||
#define IMX21_UART1_BASE_ADDR 0x1000a000
|
||||
#define IMX21_UART2_BASE_ADDR 0x1000b000
|
||||
#define IMX21_UART3_BASE_ADDR 0x1000c000
|
||||
#define IMX21_UART4_BASE_ADDR 0x1000d000
|
||||
#define IMX21_UART_BASE_ADDR(n) IMX21_UART##n##_BASE_ADDR
|
||||
#define IMX21_UART_BASE(n) IMX21_UART_BASE_ADDR(n)
|
||||
|
||||
#define IMX25_UART1_BASE_ADDR 0x43f90000
|
||||
#define IMX25_UART2_BASE_ADDR 0x43f94000
|
||||
#define IMX25_UART3_BASE_ADDR 0x5000c000
|
||||
@@ -26,6 +19,13 @@
|
||||
#define IMX25_UART_BASE_ADDR(n) IMX25_UART##n##_BASE_ADDR
|
||||
#define IMX25_UART_BASE(n) IMX25_UART_BASE_ADDR(n)
|
||||
|
||||
#define IMX27_UART1_BASE_ADDR 0x1000a000
|
||||
#define IMX27_UART2_BASE_ADDR 0x1000b000
|
||||
#define IMX27_UART3_BASE_ADDR 0x1000c000
|
||||
#define IMX27_UART4_BASE_ADDR 0x1000d000
|
||||
#define IMX27_UART_BASE_ADDR(n) IMX27_UART##n##_BASE_ADDR
|
||||
#define IMX27_UART_BASE(n) IMX27_UART_BASE_ADDR(n)
|
||||
|
||||
#define IMX31_UART1_BASE_ADDR 0x43f90000
|
||||
#define IMX31_UART2_BASE_ADDR 0x43f94000
|
||||
#define IMX31_UART3_BASE_ADDR 0x5000c000
|
||||
@@ -112,10 +112,10 @@
|
||||
|
||||
#ifdef CONFIG_DEBUG_IMX1_UART
|
||||
#define UART_PADDR IMX_DEBUG_UART_BASE(IMX1)
|
||||
#elif defined(CONFIG_DEBUG_IMX21_IMX27_UART)
|
||||
#define UART_PADDR IMX_DEBUG_UART_BASE(IMX21)
|
||||
#elif defined(CONFIG_DEBUG_IMX25_UART)
|
||||
#define UART_PADDR IMX_DEBUG_UART_BASE(IMX25)
|
||||
#elif defined(CONFIG_DEBUG_IMX27_UART)
|
||||
#define UART_PADDR IMX_DEBUG_UART_BASE(IMX27)
|
||||
#elif defined(CONFIG_DEBUG_IMX31_UART)
|
||||
#define UART_PADDR IMX_DEBUG_UART_BASE(IMX31)
|
||||
#elif defined(CONFIG_DEBUG_IMX35_UART)
|
||||
|
||||
@@ -62,9 +62,10 @@ user_backtrace(struct frame_tail __user *tail,
|
||||
void
|
||||
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||
{
|
||||
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||
struct frame_tail __user *tail;
|
||||
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||
/* We don't support guest os callchain now */
|
||||
return;
|
||||
}
|
||||
@@ -98,9 +99,10 @@ callchain_trace(struct stackframe *fr,
|
||||
void
|
||||
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||
{
|
||||
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||
struct stackframe fr;
|
||||
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||
/* We don't support guest os callchain now */
|
||||
return;
|
||||
}
|
||||
@@ -111,18 +113,21 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
|
||||
|
||||
unsigned long perf_instruction_pointer(struct pt_regs *regs)
|
||||
{
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
|
||||
return perf_guest_cbs->get_guest_ip();
|
||||
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||
|
||||
if (guest_cbs && guest_cbs->is_in_guest())
|
||||
return guest_cbs->get_guest_ip();
|
||||
|
||||
return instruction_pointer(regs);
|
||||
}
|
||||
|
||||
unsigned long perf_misc_flags(struct pt_regs *regs)
|
||||
{
|
||||
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||
int misc = 0;
|
||||
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||
if (perf_guest_cbs->is_user_mode())
|
||||
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||
if (guest_cbs->is_user_mode())
|
||||
misc |= PERF_RECORD_MISC_GUEST_USER;
|
||||
else
|
||||
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
|
||||
|
||||
@@ -154,8 +154,10 @@ static int __init rcar_gen2_regulator_quirk(void)
|
||||
return -ENODEV;
|
||||
|
||||
for_each_matching_node_and_match(np, rcar_gen2_quirk_match, &id) {
|
||||
if (!of_device_is_available(np))
|
||||
if (!of_device_is_available(np)) {
|
||||
of_node_put(np);
|
||||
break;
|
||||
}
|
||||
|
||||
ret = of_property_read_u32(np, "reg", &addr);
|
||||
if (ret) /* Skip invalid entry and continue */
|
||||
@@ -164,6 +166,7 @@ static int __init rcar_gen2_regulator_quirk(void)
|
||||
quirk = kzalloc(sizeof(*quirk), GFP_KERNEL);
|
||||
if (!quirk) {
|
||||
ret = -ENOMEM;
|
||||
of_node_put(np);
|
||||
goto err_mem;
|
||||
}
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@
|
||||
secure-monitor = <&sm>;
|
||||
};
|
||||
|
||||
gpu_opp_table: gpu-opp-table {
|
||||
gpu_opp_table: opp-table-gpu {
|
||||
compatible = "operating-points-v2";
|
||||
|
||||
opp-124999998 {
|
||||
|
||||
@@ -543,7 +543,7 @@
|
||||
pinctrl-0 = <&nor_pins>;
|
||||
pinctrl-names = "default";
|
||||
|
||||
mx25u64: spi-flash@0 {
|
||||
mx25u64: flash@0 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
compatible = "mxicy,mx25u6435f", "jedec,spi-nor";
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
*/
|
||||
|
||||
#include "meson-gxbb.dtsi"
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
|
||||
/ {
|
||||
aliases {
|
||||
@@ -64,6 +65,7 @@
|
||||
regulator-name = "VDDIO_AO18";
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <1800000>;
|
||||
regulator-always-on;
|
||||
};
|
||||
|
||||
vcc_3v3: regulator-vcc_3v3 {
|
||||
@@ -161,6 +163,7 @@
|
||||
status = "okay";
|
||||
pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
|
||||
pinctrl-names = "default";
|
||||
hdmi-supply = <&vddio_ao18>;
|
||||
};
|
||||
|
||||
&hdmi_tx_tmds_port {
|
||||
|
||||
@@ -261,11 +261,6 @@
|
||||
vcc-supply = <&sb_3v3>;
|
||||
};
|
||||
|
||||
rtc@51 {
|
||||
compatible = "nxp,pcf2129";
|
||||
reg = <0x51>;
|
||||
};
|
||||
|
||||
eeprom@56 {
|
||||
compatible = "atmel,24c512";
|
||||
reg = <0x56>;
|
||||
@@ -307,6 +302,15 @@
|
||||
|
||||
};
|
||||
|
||||
&i2c1 {
|
||||
status = "okay";
|
||||
|
||||
rtc@51 {
|
||||
compatible = "nxp,pcf2129";
|
||||
reg = <0x51>;
|
||||
};
|
||||
};
|
||||
|
||||
&enetc_port1 {
|
||||
phy-handle = <&qds_phy1>;
|
||||
phy-connection-type = "rgmii-id";
|
||||
|
||||
@@ -11,6 +11,13 @@
|
||||
model = "Marvell Armada CN9130 SoC";
|
||||
compatible = "marvell,cn9130", "marvell,armada-ap807-quad",
|
||||
"marvell,armada-ap807";
|
||||
|
||||
aliases {
|
||||
gpio1 = &cp0_gpio1;
|
||||
gpio2 = &cp0_gpio2;
|
||||
spi1 = &cp0_spi0;
|
||||
spi2 = &cp0_spi1;
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -35,3 +42,11 @@
|
||||
#undef CP11X_PCIE0_BASE
|
||||
#undef CP11X_PCIE1_BASE
|
||||
#undef CP11X_PCIE2_BASE
|
||||
|
||||
&cp0_gpio1 {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&cp0_gpio2 {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
@@ -985,7 +985,7 @@
|
||||
|
||||
ccplex@e000000 {
|
||||
compatible = "nvidia,tegra186-ccplex-cluster";
|
||||
reg = <0x0 0x0e000000 0x0 0x3fffff>;
|
||||
reg = <0x0 0x0e000000 0x0 0x400000>;
|
||||
|
||||
nvidia,bpmp = <&bpmp>;
|
||||
};
|
||||
|
||||
@@ -782,13 +782,12 @@
|
||||
reg = <0x3510000 0x10000>;
|
||||
interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&bpmp TEGRA194_CLK_HDA>,
|
||||
<&bpmp TEGRA194_CLK_HDA2CODEC_2X>,
|
||||
<&bpmp TEGRA194_CLK_HDA2HDMICODEC>;
|
||||
clock-names = "hda", "hda2codec_2x", "hda2hdmi";
|
||||
<&bpmp TEGRA194_CLK_HDA2HDMICODEC>,
|
||||
<&bpmp TEGRA194_CLK_HDA2CODEC_2X>;
|
||||
clock-names = "hda", "hda2hdmi", "hda2codec_2x";
|
||||
resets = <&bpmp TEGRA194_RESET_HDA>,
|
||||
<&bpmp TEGRA194_RESET_HDA2CODEC_2X>,
|
||||
<&bpmp TEGRA194_RESET_HDA2HDMICODEC>;
|
||||
reset-names = "hda", "hda2codec_2x", "hda2hdmi";
|
||||
reset-names = "hda", "hda2hdmi";
|
||||
power-domains = <&bpmp TEGRA194_POWER_DOMAIN_DISP>;
|
||||
interconnects = <&mc TEGRA194_MEMORY_CLIENT_HDAR &emc>,
|
||||
<&mc TEGRA194_MEMORY_CLIENT_HDAW &emc>;
|
||||
|
||||
@@ -221,7 +221,7 @@
|
||||
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
gpio-ranges = <&tlmm 0 80>;
|
||||
gpio-ranges = <&tlmm 0 0 80>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
|
||||
|
||||
@@ -18,8 +18,8 @@
|
||||
#size-cells = <2>;
|
||||
|
||||
aliases {
|
||||
sdhc1 = &sdhc_1; /* SDC1 eMMC slot */
|
||||
sdhc2 = &sdhc_2; /* SDC2 SD card slot */
|
||||
mmc0 = &sdhc_1; /* SDC1 eMMC slot */
|
||||
mmc1 = &sdhc_2; /* SDC2 SD card slot */
|
||||
};
|
||||
|
||||
chosen { };
|
||||
|
||||
@@ -645,9 +645,6 @@
|
||||
nvmem-cells = <&gpu_speed_bin>;
|
||||
nvmem-cell-names = "speed_bin";
|
||||
|
||||
qcom,gpu-quirk-two-pass-use-wfi;
|
||||
qcom,gpu-quirk-fault-detect-mask;
|
||||
|
||||
operating-points-v2 = <&gpu_opp_table>;
|
||||
|
||||
gpu_opp_table: opp-table {
|
||||
|
||||
@@ -365,6 +365,10 @@
|
||||
dai@1 {
|
||||
reg = <1>;
|
||||
};
|
||||
|
||||
dai@2 {
|
||||
reg = <2>;
|
||||
};
|
||||
};
|
||||
|
||||
&sound {
|
||||
@@ -377,6 +381,7 @@
|
||||
"SpkrLeft IN", "SPK1 OUT",
|
||||
"SpkrRight IN", "SPK2 OUT",
|
||||
"MM_DL1", "MultiMedia1 Playback",
|
||||
"MM_DL3", "MultiMedia3 Playback",
|
||||
"MultiMedia2 Capture", "MM_UL2";
|
||||
|
||||
mm1-dai-link {
|
||||
@@ -393,6 +398,13 @@
|
||||
};
|
||||
};
|
||||
|
||||
mm3-dai-link {
|
||||
link-name = "MultiMedia3";
|
||||
cpu {
|
||||
sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA3>;
|
||||
};
|
||||
};
|
||||
|
||||
slim-dai-link {
|
||||
link-name = "SLIM Playback";
|
||||
cpu {
|
||||
@@ -422,6 +434,21 @@
|
||||
sound-dai = <&wcd9340 1>;
|
||||
};
|
||||
};
|
||||
|
||||
slim-wcd-dai-link {
|
||||
link-name = "SLIM WCD Playback";
|
||||
cpu {
|
||||
sound-dai = <&q6afedai SLIMBUS_1_RX>;
|
||||
};
|
||||
|
||||
platform {
|
||||
sound-dai = <&q6routing>;
|
||||
};
|
||||
|
||||
codec {
|
||||
sound-dai = <&wcd9340 2>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
&tlmm {
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
pinctrl-names = "default";
|
||||
renesas,no-ether-link;
|
||||
phy-handle = <&phy0>;
|
||||
phy-mode = "rgmii-id";
|
||||
status = "okay";
|
||||
|
||||
phy0: ethernet-phy@0 {
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
#size-cells = <1>;
|
||||
ranges = <0x00 0x00 0x00100000 0x1c000>;
|
||||
|
||||
serdes_ln_ctrl: serdes-ln-ctrl@4080 {
|
||||
serdes_ln_ctrl: mux-controller@4080 {
|
||||
compatible = "mmio-mux";
|
||||
#mux-control-cells = <1>;
|
||||
mux-reg-masks = <0x4080 0x3>, <0x4084 0x3>, /* SERDES0 lane0/1 select */
|
||||
|
||||
@@ -60,7 +60,7 @@
|
||||
i-cache-sets = <256>;
|
||||
d-cache-size = <0x8000>;
|
||||
d-cache-line-size = <64>;
|
||||
d-cache-sets = <128>;
|
||||
d-cache-sets = <256>;
|
||||
next-level-cache = <&L2_0>;
|
||||
};
|
||||
|
||||
@@ -74,7 +74,7 @@
|
||||
i-cache-sets = <256>;
|
||||
d-cache-size = <0x8000>;
|
||||
d-cache-line-size = <64>;
|
||||
d-cache-sets = <128>;
|
||||
d-cache-sets = <256>;
|
||||
next-level-cache = <&L2_0>;
|
||||
};
|
||||
};
|
||||
@@ -84,7 +84,7 @@
|
||||
cache-level = <2>;
|
||||
cache-size = <0x100000>;
|
||||
cache-line-size = <64>;
|
||||
cache-sets = <2048>;
|
||||
cache-sets = <1024>;
|
||||
next-level-cache = <&msmc_l3>;
|
||||
};
|
||||
|
||||
|
||||
@@ -61,7 +61,7 @@
|
||||
i-cache-sets = <256>;
|
||||
d-cache-size = <0x8000>;
|
||||
d-cache-line-size = <64>;
|
||||
d-cache-sets = <128>;
|
||||
d-cache-sets = <256>;
|
||||
next-level-cache = <&L2_0>;
|
||||
};
|
||||
|
||||
@@ -75,7 +75,7 @@
|
||||
i-cache-sets = <256>;
|
||||
d-cache-size = <0x8000>;
|
||||
d-cache-line-size = <64>;
|
||||
d-cache-sets = <128>;
|
||||
d-cache-sets = <256>;
|
||||
next-level-cache = <&L2_0>;
|
||||
};
|
||||
};
|
||||
@@ -85,7 +85,7 @@
|
||||
cache-level = <2>;
|
||||
cache-size = <0x100000>;
|
||||
cache-line-size = <64>;
|
||||
cache-sets = <2048>;
|
||||
cache-sets = <1024>;
|
||||
next-level-cache = <&msmc_l3>;
|
||||
};
|
||||
|
||||
|
||||
@@ -102,7 +102,9 @@ compat_user_backtrace(struct compat_frame_tail __user *tail,
|
||||
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||
|
||||
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||
/* We don't support guest os callchain now */
|
||||
return;
|
||||
}
|
||||
@@ -147,9 +149,10 @@ static bool callchain_trace(void *data, unsigned long pc)
|
||||
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||
struct stackframe frame;
|
||||
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||
/* We don't support guest os callchain now */
|
||||
return;
|
||||
}
|
||||
@@ -160,18 +163,21 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||
|
||||
unsigned long perf_instruction_pointer(struct pt_regs *regs)
|
||||
{
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
|
||||
return perf_guest_cbs->get_guest_ip();
|
||||
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||
|
||||
if (guest_cbs && guest_cbs->is_in_guest())
|
||||
return guest_cbs->get_guest_ip();
|
||||
|
||||
return instruction_pointer(regs);
|
||||
}
|
||||
|
||||
unsigned long perf_misc_flags(struct pt_regs *regs)
|
||||
{
|
||||
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||
int misc = 0;
|
||||
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||
if (perf_guest_cbs->is_user_mode())
|
||||
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||
if (guest_cbs->is_user_mode())
|
||||
misc |= PERF_RECORD_MISC_GUEST_USER;
|
||||
else
|
||||
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
|
||||
|
||||
@@ -14,8 +14,9 @@
|
||||
* Parameters:
|
||||
* x0 - dest
|
||||
*/
|
||||
SYM_FUNC_START(clear_page)
|
||||
SYM_FUNC_START_PI(clear_page)
|
||||
mrs x1, dczid_el0
|
||||
tbnz x1, #4, 2f /* Branch if DC ZVA is prohibited */
|
||||
and w1, w1, #0xf
|
||||
mov x2, #4
|
||||
lsl x1, x2, x1
|
||||
@@ -25,5 +26,14 @@ SYM_FUNC_START(clear_page)
|
||||
tst x0, #(PAGE_SIZE - 1)
|
||||
b.ne 1b
|
||||
ret
|
||||
SYM_FUNC_END(clear_page)
|
||||
|
||||
2: stnp xzr, xzr, [x0]
|
||||
stnp xzr, xzr, [x0, #16]
|
||||
stnp xzr, xzr, [x0, #32]
|
||||
stnp xzr, xzr, [x0, #48]
|
||||
add x0, x0, #64
|
||||
tst x0, #(PAGE_SIZE - 1)
|
||||
b.ne 2b
|
||||
ret
|
||||
SYM_FUNC_END_PI(clear_page)
|
||||
EXPORT_SYMBOL(clear_page)
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
* x0 - dest
|
||||
* x1 - src
|
||||
*/
|
||||
SYM_FUNC_START(copy_page)
|
||||
SYM_FUNC_START_PI(copy_page)
|
||||
alternative_if ARM64_HAS_NO_HW_PREFETCH
|
||||
// Prefetch three cache lines ahead.
|
||||
prfm pldl1strm, [x1, #128]
|
||||
@@ -75,5 +75,5 @@ alternative_else_nop_endif
|
||||
stnp x16, x17, [x0, #112 - 256]
|
||||
|
||||
ret
|
||||
SYM_FUNC_END(copy_page)
|
||||
SYM_FUNC_END_PI(copy_page)
|
||||
EXPORT_SYMBOL(copy_page)
|
||||
|
||||
@@ -86,10 +86,11 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
|
||||
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||
unsigned long fp = 0;
|
||||
|
||||
/* C-SKY does not support virtualization. */
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
|
||||
if (guest_cbs && guest_cbs->is_in_guest())
|
||||
return;
|
||||
|
||||
fp = regs->regs[4];
|
||||
@@ -110,10 +111,11 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
||||
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||
struct stackframe fr;
|
||||
|
||||
/* C-SKY does not support virtualization. */
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||
pr_warn("C-SKY does not support perf in guest mode!");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1985,6 +1985,10 @@ config SYS_HAS_CPU_MIPS64_R1
|
||||
config SYS_HAS_CPU_MIPS64_R2
|
||||
bool
|
||||
|
||||
config SYS_HAS_CPU_MIPS64_R5
|
||||
bool
|
||||
select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT
|
||||
|
||||
config SYS_HAS_CPU_MIPS64_R6
|
||||
bool
|
||||
select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT
|
||||
@@ -2146,7 +2150,7 @@ config CPU_SUPPORTS_ADDRWINCFG
|
||||
bool
|
||||
config CPU_SUPPORTS_HUGEPAGES
|
||||
bool
|
||||
depends on !(32BIT && (ARCH_PHYS_ADDR_T_64BIT || EVA))
|
||||
depends on !(32BIT && (PHYS_ADDR_T_64BIT || EVA))
|
||||
config MIPS_PGD_C0_CONTEXT
|
||||
bool
|
||||
default y if 64BIT && (CPU_MIPSR2 || CPU_MIPSR6) && !CPU_XLP
|
||||
|
||||
@@ -387,6 +387,12 @@ struct clk *clk_get_parent(struct clk *clk)
|
||||
}
|
||||
EXPORT_SYMBOL(clk_get_parent);
|
||||
|
||||
int clk_set_parent(struct clk *clk, struct clk *parent)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(clk_set_parent);
|
||||
|
||||
unsigned long clk_get_rate(struct clk *clk)
|
||||
{
|
||||
if (!clk)
|
||||
|
||||
@@ -328,6 +328,7 @@ static int __init octeon_ehci_device_init(void)
|
||||
|
||||
pd->dev.platform_data = &octeon_ehci_pdata;
|
||||
octeon_ehci_hw_start(&pd->dev);
|
||||
put_device(&pd->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -391,6 +392,7 @@ static int __init octeon_ohci_device_init(void)
|
||||
|
||||
pd->dev.platform_data = &octeon_ohci_pdata;
|
||||
octeon_ohci_hw_start(&pd->dev);
|
||||
put_device(&pd->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -544,6 +544,7 @@ static int __init dwc3_octeon_device_init(void)
|
||||
devm_iounmap(&pdev->dev, base);
|
||||
devm_release_mem_region(&pdev->dev, res->start,
|
||||
resource_size(res));
|
||||
put_device(&pdev->dev);
|
||||
}
|
||||
} while (node != NULL);
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
nop
|
||||
/* Loongson-3A R2/R3 */
|
||||
andi t0, (PRID_IMP_MASK | PRID_REV_MASK)
|
||||
slti t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0)
|
||||
slti t0, t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0)
|
||||
bnez t0, 2f
|
||||
nop
|
||||
1:
|
||||
@@ -71,7 +71,7 @@
|
||||
nop
|
||||
/* Loongson-3A R2/R3 */
|
||||
andi t0, (PRID_IMP_MASK | PRID_REV_MASK)
|
||||
slti t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0)
|
||||
slti t0, t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0)
|
||||
bnez t0, 2f
|
||||
nop
|
||||
1:
|
||||
|
||||
@@ -317,7 +317,7 @@ enum cvmx_chip_types_enum {
|
||||
|
||||
/* Functions to return string based on type */
|
||||
#define ENUM_BRD_TYPE_CASE(x) \
|
||||
case x: return(#x + 16); /* Skip CVMX_BOARD_TYPE_ */
|
||||
case x: return (&#x[16]); /* Skip CVMX_BOARD_TYPE_ */
|
||||
static inline const char *cvmx_board_type_to_string(enum
|
||||
cvmx_board_types_enum type)
|
||||
{
|
||||
@@ -408,7 +408,7 @@ static inline const char *cvmx_board_type_to_string(enum
|
||||
}
|
||||
|
||||
#define ENUM_CHIP_TYPE_CASE(x) \
|
||||
case x: return(#x + 15); /* Skip CVMX_CHIP_TYPE */
|
||||
case x: return (&#x[15]); /* Skip CVMX_CHIP_TYPE */
|
||||
static inline const char *cvmx_chip_type_to_string(enum
|
||||
cvmx_chip_types_enum type)
|
||||
{
|
||||
|
||||
@@ -164,6 +164,12 @@ struct clk *clk_get_parent(struct clk *clk)
|
||||
}
|
||||
EXPORT_SYMBOL(clk_get_parent);
|
||||
|
||||
int clk_set_parent(struct clk *clk, struct clk *parent)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(clk_set_parent);
|
||||
|
||||
static inline u32 get_counter_resolution(void)
|
||||
{
|
||||
u32 res;
|
||||
|
||||
@@ -1363,6 +1363,7 @@ void
|
||||
perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||
unsigned long fp = 0;
|
||||
unsigned long gp = 0;
|
||||
unsigned long lp = 0;
|
||||
@@ -1371,7 +1372,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
||||
|
||||
leaf_fp = 0;
|
||||
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||
/* We don't support guest os callchain now */
|
||||
return;
|
||||
}
|
||||
@@ -1479,9 +1480,10 @@ void
|
||||
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||
struct stackframe fr;
|
||||
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||
/* We don't support guest os callchain now */
|
||||
return;
|
||||
}
|
||||
@@ -1493,20 +1495,23 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||
|
||||
unsigned long perf_instruction_pointer(struct pt_regs *regs)
|
||||
{
|
||||
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||
|
||||
/* However, NDS32 does not support virtualization */
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
|
||||
return perf_guest_cbs->get_guest_ip();
|
||||
if (guest_cbs && guest_cbs->is_in_guest())
|
||||
return guest_cbs->get_guest_ip();
|
||||
|
||||
return instruction_pointer(regs);
|
||||
}
|
||||
|
||||
unsigned long perf_misc_flags(struct pt_regs *regs)
|
||||
{
|
||||
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||
int misc = 0;
|
||||
|
||||
/* However, NDS32 does not support virtualization */
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||
if (perf_guest_cbs->is_user_mode())
|
||||
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||
if (guest_cbs->is_user_mode())
|
||||
misc |= PERF_RECORD_MISC_GUEST_USER;
|
||||
else
|
||||
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
|
||||
|
||||
@@ -22,9 +22,11 @@ asmlinkage long sys_or1k_atomic(unsigned long type, unsigned long *v1,
|
||||
|
||||
asmlinkage long __sys_clone(unsigned long clone_flags, unsigned long newsp,
|
||||
void __user *parent_tid, void __user *child_tid, int tls);
|
||||
asmlinkage long __sys_clone3(struct clone_args __user *uargs, size_t size);
|
||||
asmlinkage long __sys_fork(void);
|
||||
|
||||
#define sys_clone __sys_clone
|
||||
#define sys_clone3 __sys_clone3
|
||||
#define sys_fork __sys_fork
|
||||
|
||||
#endif /* __ASM_OPENRISC_SYSCALLS_H */
|
||||
|
||||
@@ -1170,6 +1170,11 @@ ENTRY(__sys_clone)
|
||||
l.j _fork_save_extra_regs_and_call
|
||||
l.nop
|
||||
|
||||
ENTRY(__sys_clone3)
|
||||
l.movhi r29,hi(sys_clone3)
|
||||
l.j _fork_save_extra_regs_and_call
|
||||
l.ori r29,r29,lo(sys_clone3)
|
||||
|
||||
ENTRY(__sys_fork)
|
||||
l.movhi r29,hi(sys_fork)
|
||||
l.ori r29,r29,lo(sys_fork)
|
||||
|
||||
@@ -2,28 +2,32 @@
|
||||
#ifndef __PARISC_SPECIAL_INSNS_H
|
||||
#define __PARISC_SPECIAL_INSNS_H
|
||||
|
||||
#define lpa(va) ({ \
|
||||
unsigned long pa; \
|
||||
__asm__ __volatile__( \
|
||||
"copy %%r0,%0\n\t" \
|
||||
"lpa %%r0(%1),%0" \
|
||||
: "=r" (pa) \
|
||||
: "r" (va) \
|
||||
: "memory" \
|
||||
); \
|
||||
pa; \
|
||||
#define lpa(va) ({ \
|
||||
unsigned long pa; \
|
||||
__asm__ __volatile__( \
|
||||
"copy %%r0,%0\n" \
|
||||
"8:\tlpa %%r0(%1),%0\n" \
|
||||
"9:\n" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \
|
||||
: "=&r" (pa) \
|
||||
: "r" (va) \
|
||||
: "memory" \
|
||||
); \
|
||||
pa; \
|
||||
})
|
||||
|
||||
#define lpa_user(va) ({ \
|
||||
unsigned long pa; \
|
||||
__asm__ __volatile__( \
|
||||
"copy %%r0,%0\n\t" \
|
||||
"lpa %%r0(%%sr3,%1),%0" \
|
||||
: "=r" (pa) \
|
||||
: "r" (va) \
|
||||
: "memory" \
|
||||
); \
|
||||
pa; \
|
||||
#define lpa_user(va) ({ \
|
||||
unsigned long pa; \
|
||||
__asm__ __volatile__( \
|
||||
"copy %%r0,%0\n" \
|
||||
"8:\tlpa %%r0(%%sr3,%1),%0\n" \
|
||||
"9:\n" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \
|
||||
: "=&r" (pa) \
|
||||
: "r" (va) \
|
||||
: "memory" \
|
||||
); \
|
||||
pa; \
|
||||
})
|
||||
|
||||
#define mfctl(reg) ({ \
|
||||
|
||||
@@ -784,7 +784,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
|
||||
* unless pagefault_disable() was called before.
|
||||
*/
|
||||
|
||||
if (fault_space == 0 && !faulthandler_disabled())
|
||||
if (faulthandler_disabled() || fault_space == 0)
|
||||
{
|
||||
/* Clean up and return if in exception table. */
|
||||
if (fixup_exception(regs))
|
||||
|
||||
@@ -79,6 +79,7 @@ fman0: fman@400000 {
|
||||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xfc000 0x1000>;
|
||||
fsl,erratum-a009885;
|
||||
};
|
||||
|
||||
xmdio0: mdio@fd000 {
|
||||
@@ -86,6 +87,7 @@ fman0: fman@400000 {
|
||||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xfd000 0x1000>;
|
||||
fsl,erratum-a009885;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
12
arch/powerpc/include/asm/cpu_setup_power.h
Normal file
12
arch/powerpc/include/asm/cpu_setup_power.h
Normal file
@@ -0,0 +1,12 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
/*
|
||||
* Copyright (C) 2020 IBM Corporation
|
||||
*/
|
||||
void __setup_cpu_power7(unsigned long offset, struct cpu_spec *spec);
|
||||
void __restore_cpu_power7(void);
|
||||
void __setup_cpu_power8(unsigned long offset, struct cpu_spec *spec);
|
||||
void __restore_cpu_power8(void);
|
||||
void __setup_cpu_power9(unsigned long offset, struct cpu_spec *spec);
|
||||
void __restore_cpu_power9(void);
|
||||
void __setup_cpu_power10(unsigned long offset, struct cpu_spec *spec);
|
||||
void __restore_cpu_power10(void);
|
||||
@@ -382,6 +382,8 @@
|
||||
#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2
|
||||
#define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5
|
||||
#define H_CPU_BEHAV_FLUSH_LINK_STACK (1ull << 57) // IBM bit 6
|
||||
#define H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY (1ull << 56) // IBM bit 7
|
||||
#define H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS (1ull << 55) // IBM bit 8
|
||||
|
||||
/* Flag values used in H_REGISTER_PROC_TBL hcall */
|
||||
#define PROC_TABLE_OP_MASK 0x18
|
||||
|
||||
@@ -38,6 +38,8 @@
|
||||
#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE)
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
/*
|
||||
* flags for paca->irq_soft_mask
|
||||
*/
|
||||
@@ -46,8 +48,6 @@
|
||||
#define IRQS_PMI_DISABLED 2
|
||||
#define IRQS_ALL_DISABLED (IRQS_DISABLED | IRQS_PMI_DISABLED)
|
||||
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
extern void replay_system_reset(void);
|
||||
@@ -175,6 +175,42 @@ static inline bool arch_irqs_disabled(void)
|
||||
return arch_irqs_disabled_flags(arch_local_save_flags());
|
||||
}
|
||||
|
||||
static inline void set_pmi_irq_pending(void)
|
||||
{
|
||||
/*
|
||||
* Invoked from PMU callback functions to set PMI bit in the paca.
|
||||
* This has to be called with irq's disabled (via hard_irq_disable()).
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
|
||||
WARN_ON_ONCE(mfmsr() & MSR_EE);
|
||||
|
||||
get_paca()->irq_happened |= PACA_IRQ_PMI;
|
||||
}
|
||||
|
||||
static inline void clear_pmi_irq_pending(void)
|
||||
{
|
||||
/*
|
||||
* Invoked from PMU callback functions to clear the pending PMI bit
|
||||
* in the paca.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
|
||||
WARN_ON_ONCE(mfmsr() & MSR_EE);
|
||||
|
||||
get_paca()->irq_happened &= ~PACA_IRQ_PMI;
|
||||
}
|
||||
|
||||
static inline bool pmi_irq_pending(void)
|
||||
{
|
||||
/*
|
||||
* Invoked from PMU callback functions to check if there is a pending
|
||||
* PMI bit in the paca.
|
||||
*/
|
||||
if (get_paca()->irq_happened & PACA_IRQ_PMI)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
/*
|
||||
* To support disabling and enabling of irq with PMI, set of
|
||||
@@ -296,6 +332,10 @@ extern void irq_set_pending_from_srr1(unsigned long srr1);
|
||||
|
||||
extern void force_external_irq_replay(void);
|
||||
|
||||
static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
|
||||
{
|
||||
regs->softe = val;
|
||||
}
|
||||
#else /* CONFIG_PPC64 */
|
||||
|
||||
static inline unsigned long arch_local_save_flags(void)
|
||||
@@ -364,6 +404,13 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
|
||||
|
||||
static inline void may_hard_irq_enable(void) { }
|
||||
|
||||
static inline void clear_pmi_irq_pending(void) { }
|
||||
static inline void set_pmi_irq_pending(void) { }
|
||||
static inline bool pmi_irq_pending(void) { return false; }
|
||||
|
||||
static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
#define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
|
||||
|
||||
@@ -865,6 +865,7 @@
|
||||
#define MMCR0_BHRBA 0x00200000UL /* BHRB Access allowed in userspace */
|
||||
#define MMCR0_EBE 0x00100000UL /* Event based branch enable */
|
||||
#define MMCR0_PMCC 0x000c0000UL /* PMC control */
|
||||
#define MMCR0_PMCCEXT ASM_CONST(0x00000200) /* PMCCEXT control */
|
||||
#define MMCR0_PMCC_U6 0x00080000UL /* PMC1-6 are R/W by user (PR) */
|
||||
#define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
|
||||
#define MMCR0_PMCjCE ASM_CONST(0x00004000) /* PMCj count enable*/
|
||||
|
||||
@@ -241,8 +241,10 @@ int __init btext_find_display(int allow_nonstdout)
|
||||
rc = btext_initialize(np);
|
||||
printk("result: %d\n", rc);
|
||||
}
|
||||
if (rc == 0)
|
||||
if (rc == 0) {
|
||||
of_node_put(np);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -1,252 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
/*
|
||||
* This file contains low level CPU setup functions.
|
||||
* Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
|
||||
*/
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/ppc_asm.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/book3s/64/mmu-hash.h>
|
||||
|
||||
/* Entry: r3 = crap, r4 = ptr to cputable entry
|
||||
*
|
||||
* Note that we can be called twice for pseudo-PVRs
|
||||
*/
|
||||
_GLOBAL(__setup_cpu_power7)
|
||||
mflr r11
|
||||
bl __init_hvmode_206
|
||||
mtlr r11
|
||||
beqlr
|
||||
li r0,0
|
||||
mtspr SPRN_LPID,r0
|
||||
LOAD_REG_IMMEDIATE(r0, PCR_MASK)
|
||||
mtspr SPRN_PCR,r0
|
||||
mfspr r3,SPRN_LPCR
|
||||
li r4,(LPCR_LPES1 >> LPCR_LPES_SH)
|
||||
bl __init_LPCR_ISA206
|
||||
mtlr r11
|
||||
blr
|
||||
|
||||
_GLOBAL(__restore_cpu_power7)
|
||||
mflr r11
|
||||
mfmsr r3
|
||||
rldicl. r0,r3,4,63
|
||||
beqlr
|
||||
li r0,0
|
||||
mtspr SPRN_LPID,r0
|
||||
LOAD_REG_IMMEDIATE(r0, PCR_MASK)
|
||||
mtspr SPRN_PCR,r0
|
||||
mfspr r3,SPRN_LPCR
|
||||
li r4,(LPCR_LPES1 >> LPCR_LPES_SH)
|
||||
bl __init_LPCR_ISA206
|
||||
mtlr r11
|
||||
blr
|
||||
|
||||
_GLOBAL(__setup_cpu_power8)
|
||||
mflr r11
|
||||
bl __init_FSCR
|
||||
bl __init_PMU
|
||||
bl __init_PMU_ISA207
|
||||
bl __init_hvmode_206
|
||||
mtlr r11
|
||||
beqlr
|
||||
li r0,0
|
||||
mtspr SPRN_LPID,r0
|
||||
LOAD_REG_IMMEDIATE(r0, PCR_MASK)
|
||||
mtspr SPRN_PCR,r0
|
||||
mfspr r3,SPRN_LPCR
|
||||
ori r3, r3, LPCR_PECEDH
|
||||
li r4,0 /* LPES = 0 */
|
||||
bl __init_LPCR_ISA206
|
||||
bl __init_HFSCR
|
||||
bl __init_PMU_HV
|
||||
bl __init_PMU_HV_ISA207
|
||||
mtlr r11
|
||||
blr
|
||||
|
||||
_GLOBAL(__restore_cpu_power8)
|
||||
mflr r11
|
||||
bl __init_FSCR
|
||||
bl __init_PMU
|
||||
bl __init_PMU_ISA207
|
||||
mfmsr r3
|
||||
rldicl. r0,r3,4,63
|
||||
mtlr r11
|
||||
beqlr
|
||||
li r0,0
|
||||
mtspr SPRN_LPID,r0
|
||||
LOAD_REG_IMMEDIATE(r0, PCR_MASK)
|
||||
mtspr SPRN_PCR,r0
|
||||
mfspr r3,SPRN_LPCR
|
||||
ori r3, r3, LPCR_PECEDH
|
||||
li r4,0 /* LPES = 0 */
|
||||
bl __init_LPCR_ISA206
|
||||
bl __init_HFSCR
|
||||
bl __init_PMU_HV
|
||||
bl __init_PMU_HV_ISA207
|
||||
mtlr r11
|
||||
blr
|
||||
|
||||
_GLOBAL(__setup_cpu_power10)
|
||||
mflr r11
|
||||
bl __init_FSCR_power10
|
||||
bl __init_PMU
|
||||
bl __init_PMU_ISA31
|
||||
b 1f
|
||||
|
||||
_GLOBAL(__setup_cpu_power9)
|
||||
mflr r11
|
||||
bl __init_FSCR_power9
|
||||
bl __init_PMU
|
||||
1: bl __init_hvmode_206
|
||||
mtlr r11
|
||||
beqlr
|
||||
li r0,0
|
||||
mtspr SPRN_PSSCR,r0
|
||||
mtspr SPRN_LPID,r0
|
||||
mtspr SPRN_PID,r0
|
||||
LOAD_REG_IMMEDIATE(r0, PCR_MASK)
|
||||
mtspr SPRN_PCR,r0
|
||||
mfspr r3,SPRN_LPCR
|
||||
LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
|
||||
or r3, r3, r4
|
||||
LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR)
|
||||
andc r3, r3, r4
|
||||
li r4,0 /* LPES = 0 */
|
||||
bl __init_LPCR_ISA300
|
||||
bl __init_HFSCR
|
||||
bl __init_PMU_HV
|
||||
mtlr r11
|
||||
blr
|
||||
|
||||
_GLOBAL(__restore_cpu_power10)
|
||||
mflr r11
|
||||
bl __init_FSCR_power10
|
||||
bl __init_PMU
|
||||
bl __init_PMU_ISA31
|
||||
b 1f
|
||||
|
||||
_GLOBAL(__restore_cpu_power9)
|
||||
mflr r11
|
||||
bl __init_FSCR_power9
|
||||
bl __init_PMU
|
||||
1: mfmsr r3
|
||||
rldicl. r0,r3,4,63
|
||||
mtlr r11
|
||||
beqlr
|
||||
li r0,0
|
||||
mtspr SPRN_PSSCR,r0
|
||||
mtspr SPRN_LPID,r0
|
||||
mtspr SPRN_PID,r0
|
||||
LOAD_REG_IMMEDIATE(r0, PCR_MASK)
|
||||
mtspr SPRN_PCR,r0
|
||||
mfspr r3,SPRN_LPCR
|
||||
LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
|
||||
or r3, r3, r4
|
||||
LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR)
|
||||
andc r3, r3, r4
|
||||
li r4,0 /* LPES = 0 */
|
||||
bl __init_LPCR_ISA300
|
||||
bl __init_HFSCR
|
||||
bl __init_PMU_HV
|
||||
mtlr r11
|
||||
blr
|
||||
|
||||
__init_hvmode_206:
|
||||
/* Disable CPU_FTR_HVMODE and exit if MSR:HV is not set */
|
||||
mfmsr r3
|
||||
rldicl. r0,r3,4,63
|
||||
bnelr
|
||||
ld r5,CPU_SPEC_FEATURES(r4)
|
||||
LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE | CPU_FTR_P9_TM_HV_ASSIST)
|
||||
andc r5,r5,r6
|
||||
std r5,CPU_SPEC_FEATURES(r4)
|
||||
blr
|
||||
|
||||
__init_LPCR_ISA206:
|
||||
/* Setup a sane LPCR:
|
||||
* Called with initial LPCR in R3 and desired LPES 2-bit value in R4
|
||||
*
|
||||
* LPES = 0b01 (HSRR0/1 used for 0x500)
|
||||
* PECE = 0b111
|
||||
* DPFD = 4
|
||||
* HDICE = 0
|
||||
* VC = 0b100 (VPM0=1, VPM1=0, ISL=0)
|
||||
* VRMASD = 0b10000 (L=1, LP=00)
|
||||
*
|
||||
* Other bits untouched for now
|
||||
*/
|
||||
li r5,0x10
|
||||
rldimi r3,r5, LPCR_VRMASD_SH, 64-LPCR_VRMASD_SH-5
|
||||
|
||||
/* POWER9 has no VRMASD */
|
||||
__init_LPCR_ISA300:
|
||||
rldimi r3,r4, LPCR_LPES_SH, 64-LPCR_LPES_SH-2
|
||||
ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2)
|
||||
li r5,4
|
||||
rldimi r3,r5, LPCR_DPFD_SH, 64-LPCR_DPFD_SH-3
|
||||
clrrdi r3,r3,1 /* clear HDICE */
|
||||
li r5,4
|
||||
rldimi r3,r5, LPCR_VC_SH, 0
|
||||
mtspr SPRN_LPCR,r3
|
||||
isync
|
||||
blr
|
||||
|
||||
__init_FSCR_power10:
|
||||
mfspr r3, SPRN_FSCR
|
||||
ori r3, r3, FSCR_PREFIX
|
||||
mtspr SPRN_FSCR, r3
|
||||
// fall through
|
||||
|
||||
__init_FSCR_power9:
|
||||
mfspr r3, SPRN_FSCR
|
||||
ori r3, r3, FSCR_SCV
|
||||
mtspr SPRN_FSCR, r3
|
||||
// fall through
|
||||
|
||||
__init_FSCR:
|
||||
mfspr r3,SPRN_FSCR
|
||||
ori r3,r3,FSCR_TAR|FSCR_EBB
|
||||
mtspr SPRN_FSCR,r3
|
||||
blr
|
||||
|
||||
__init_HFSCR:
|
||||
mfspr r3,SPRN_HFSCR
|
||||
ori r3,r3,HFSCR_TAR|HFSCR_TM|HFSCR_BHRB|HFSCR_PM|\
|
||||
HFSCR_DSCR|HFSCR_VECVSX|HFSCR_FP|HFSCR_EBB|HFSCR_MSGP
|
||||
mtspr SPRN_HFSCR,r3
|
||||
blr
|
||||
|
||||
__init_PMU_HV:
|
||||
li r5,0
|
||||
mtspr SPRN_MMCRC,r5
|
||||
blr
|
||||
|
||||
__init_PMU_HV_ISA207:
|
||||
li r5,0
|
||||
mtspr SPRN_MMCRH,r5
|
||||
blr
|
||||
|
||||
__init_PMU:
|
||||
li r5,0
|
||||
mtspr SPRN_MMCRA,r5
|
||||
mtspr SPRN_MMCR0,r5
|
||||
mtspr SPRN_MMCR1,r5
|
||||
mtspr SPRN_MMCR2,r5
|
||||
blr
|
||||
|
||||
__init_PMU_ISA207:
|
||||
li r5,0
|
||||
mtspr SPRN_MMCRS,r5
|
||||
blr
|
||||
|
||||
__init_PMU_ISA31:
|
||||
li r5,0
|
||||
mtspr SPRN_MMCR3,r5
|
||||
LOAD_REG_IMMEDIATE(r5, MMCRA_BHRB_DISABLE)
|
||||
mtspr SPRN_MMCRA,r5
|
||||
blr
|
||||
272
arch/powerpc/kernel/cpu_setup_power.c
Normal file
272
arch/powerpc/kernel/cpu_setup_power.c
Normal file
@@ -0,0 +1,272 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Copyright 2020, Jordan Niethe, IBM Corporation.
|
||||
*
|
||||
* This file contains low level CPU setup functions.
|
||||
* Originally written in assembly by Benjamin Herrenschmidt & various other
|
||||
* authors.
|
||||
*/
|
||||
|
||||
#include <asm/reg.h>
|
||||
#include <asm/synch.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/cpu_setup_power.h>
|
||||
|
||||
/* Disable CPU_FTR_HVMODE and return false if MSR:HV is not set */
|
||||
static bool init_hvmode_206(struct cpu_spec *t)
|
||||
{
|
||||
u64 msr;
|
||||
|
||||
msr = mfmsr();
|
||||
if (msr & MSR_HV)
|
||||
return true;
|
||||
|
||||
t->cpu_features &= ~(CPU_FTR_HVMODE | CPU_FTR_P9_TM_HV_ASSIST);
|
||||
return false;
|
||||
}
|
||||
|
||||
static void init_LPCR_ISA300(u64 lpcr, u64 lpes)
|
||||
{
|
||||
/* POWER9 has no VRMASD */
|
||||
lpcr |= (lpes << LPCR_LPES_SH) & LPCR_LPES;
|
||||
lpcr |= LPCR_PECE0|LPCR_PECE1|LPCR_PECE2;
|
||||
lpcr |= (4ull << LPCR_DPFD_SH) & LPCR_DPFD;
|
||||
lpcr &= ~LPCR_HDICE; /* clear HDICE */
|
||||
lpcr |= (4ull << LPCR_VC_SH);
|
||||
mtspr(SPRN_LPCR, lpcr);
|
||||
isync();
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup a sane LPCR:
|
||||
* Called with initial LPCR and desired LPES 2-bit value
|
||||
*
|
||||
* LPES = 0b01 (HSRR0/1 used for 0x500)
|
||||
* PECE = 0b111
|
||||
* DPFD = 4
|
||||
* HDICE = 0
|
||||
* VC = 0b100 (VPM0=1, VPM1=0, ISL=0)
|
||||
* VRMASD = 0b10000 (L=1, LP=00)
|
||||
*
|
||||
* Other bits untouched for now
|
||||
*/
|
||||
static void init_LPCR_ISA206(u64 lpcr, u64 lpes)
|
||||
{
|
||||
lpcr |= (0x10ull << LPCR_VRMASD_SH) & LPCR_VRMASD;
|
||||
init_LPCR_ISA300(lpcr, lpes);
|
||||
}
|
||||
|
||||
static void init_FSCR(void)
|
||||
{
|
||||
u64 fscr;
|
||||
|
||||
fscr = mfspr(SPRN_FSCR);
|
||||
fscr |= FSCR_TAR|FSCR_EBB;
|
||||
mtspr(SPRN_FSCR, fscr);
|
||||
}
|
||||
|
||||
static void init_FSCR_power9(void)
|
||||
{
|
||||
u64 fscr;
|
||||
|
||||
fscr = mfspr(SPRN_FSCR);
|
||||
fscr |= FSCR_SCV;
|
||||
mtspr(SPRN_FSCR, fscr);
|
||||
init_FSCR();
|
||||
}
|
||||
|
||||
static void init_FSCR_power10(void)
|
||||
{
|
||||
u64 fscr;
|
||||
|
||||
fscr = mfspr(SPRN_FSCR);
|
||||
fscr |= FSCR_PREFIX;
|
||||
mtspr(SPRN_FSCR, fscr);
|
||||
init_FSCR_power9();
|
||||
}
|
||||
|
||||
static void init_HFSCR(void)
|
||||
{
|
||||
u64 hfscr;
|
||||
|
||||
hfscr = mfspr(SPRN_HFSCR);
|
||||
hfscr |= HFSCR_TAR|HFSCR_TM|HFSCR_BHRB|HFSCR_PM|HFSCR_DSCR|\
|
||||
HFSCR_VECVSX|HFSCR_FP|HFSCR_EBB|HFSCR_MSGP;
|
||||
mtspr(SPRN_HFSCR, hfscr);
|
||||
}
|
||||
|
||||
static void init_PMU_HV(void)
|
||||
{
|
||||
mtspr(SPRN_MMCRC, 0);
|
||||
}
|
||||
|
||||
static void init_PMU_HV_ISA207(void)
|
||||
{
|
||||
mtspr(SPRN_MMCRH, 0);
|
||||
}
|
||||
|
||||
static void init_PMU(void)
|
||||
{
|
||||
mtspr(SPRN_MMCRA, 0);
|
||||
mtspr(SPRN_MMCR0, 0);
|
||||
mtspr(SPRN_MMCR1, 0);
|
||||
mtspr(SPRN_MMCR2, 0);
|
||||
}
|
||||
|
||||
static void init_PMU_ISA207(void)
|
||||
{
|
||||
mtspr(SPRN_MMCRS, 0);
|
||||
}
|
||||
|
||||
static void init_PMU_ISA31(void)
|
||||
{
|
||||
mtspr(SPRN_MMCR3, 0);
|
||||
mtspr(SPRN_MMCRA, MMCRA_BHRB_DISABLE);
|
||||
mtspr(SPRN_MMCR0, MMCR0_PMCCEXT);
|
||||
}
|
||||
|
||||
/*
|
||||
* Note that we can be called twice of pseudo-PVRs.
|
||||
* The parameter offset is not used.
|
||||
*/
|
||||
|
||||
void __setup_cpu_power7(unsigned long offset, struct cpu_spec *t)
|
||||
{
|
||||
if (!init_hvmode_206(t))
|
||||
return;
|
||||
|
||||
mtspr(SPRN_LPID, 0);
|
||||
mtspr(SPRN_PCR, PCR_MASK);
|
||||
init_LPCR_ISA206(mfspr(SPRN_LPCR), LPCR_LPES1 >> LPCR_LPES_SH);
|
||||
}
|
||||
|
||||
void __restore_cpu_power7(void)
|
||||
{
|
||||
u64 msr;
|
||||
|
||||
msr = mfmsr();
|
||||
if (!(msr & MSR_HV))
|
||||
return;
|
||||
|
||||
mtspr(SPRN_LPID, 0);
|
||||
mtspr(SPRN_PCR, PCR_MASK);
|
||||
init_LPCR_ISA206(mfspr(SPRN_LPCR), LPCR_LPES1 >> LPCR_LPES_SH);
|
||||
}
|
||||
|
||||
void __setup_cpu_power8(unsigned long offset, struct cpu_spec *t)
|
||||
{
|
||||
init_FSCR();
|
||||
init_PMU();
|
||||
init_PMU_ISA207();
|
||||
|
||||
if (!init_hvmode_206(t))
|
||||
return;
|
||||
|
||||
mtspr(SPRN_LPID, 0);
|
||||
mtspr(SPRN_PCR, PCR_MASK);
|
||||
init_LPCR_ISA206(mfspr(SPRN_LPCR) | LPCR_PECEDH, 0); /* LPES = 0 */
|
||||
init_HFSCR();
|
||||
init_PMU_HV();
|
||||
init_PMU_HV_ISA207();
|
||||
}
|
||||
|
||||
void __restore_cpu_power8(void)
|
||||
{
|
||||
u64 msr;
|
||||
|
||||
init_FSCR();
|
||||
init_PMU();
|
||||
init_PMU_ISA207();
|
||||
|
||||
msr = mfmsr();
|
||||
if (!(msr & MSR_HV))
|
||||
return;
|
||||
|
||||
mtspr(SPRN_LPID, 0);
|
||||
mtspr(SPRN_PCR, PCR_MASK);
|
||||
init_LPCR_ISA206(mfspr(SPRN_LPCR) | LPCR_PECEDH, 0); /* LPES = 0 */
|
||||
init_HFSCR();
|
||||
init_PMU_HV();
|
||||
init_PMU_HV_ISA207();
|
||||
}
|
||||
|
||||
void __setup_cpu_power9(unsigned long offset, struct cpu_spec *t)
|
||||
{
|
||||
init_FSCR_power9();
|
||||
init_PMU();
|
||||
|
||||
if (!init_hvmode_206(t))
|
||||
return;
|
||||
|
||||
mtspr(SPRN_PSSCR, 0);
|
||||
mtspr(SPRN_LPID, 0);
|
||||
mtspr(SPRN_PID, 0);
|
||||
mtspr(SPRN_PCR, PCR_MASK);
|
||||
init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\
|
||||
LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0);
|
||||
init_HFSCR();
|
||||
init_PMU_HV();
|
||||
}
|
||||
|
||||
void __restore_cpu_power9(void)
|
||||
{
|
||||
u64 msr;
|
||||
|
||||
init_FSCR_power9();
|
||||
init_PMU();
|
||||
|
||||
msr = mfmsr();
|
||||
if (!(msr & MSR_HV))
|
||||
return;
|
||||
|
||||
mtspr(SPRN_PSSCR, 0);
|
||||
mtspr(SPRN_LPID, 0);
|
||||
mtspr(SPRN_PID, 0);
|
||||
mtspr(SPRN_PCR, PCR_MASK);
|
||||
init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\
|
||||
LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0);
|
||||
init_HFSCR();
|
||||
init_PMU_HV();
|
||||
}
|
||||
|
||||
void __setup_cpu_power10(unsigned long offset, struct cpu_spec *t)
|
||||
{
|
||||
init_FSCR_power10();
|
||||
init_PMU();
|
||||
init_PMU_ISA31();
|
||||
|
||||
if (!init_hvmode_206(t))
|
||||
return;
|
||||
|
||||
mtspr(SPRN_PSSCR, 0);
|
||||
mtspr(SPRN_LPID, 0);
|
||||
mtspr(SPRN_PID, 0);
|
||||
mtspr(SPRN_PCR, PCR_MASK);
|
||||
init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\
|
||||
LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0);
|
||||
init_HFSCR();
|
||||
init_PMU_HV();
|
||||
}
|
||||
|
||||
void __restore_cpu_power10(void)
|
||||
{
|
||||
u64 msr;
|
||||
|
||||
init_FSCR_power10();
|
||||
init_PMU();
|
||||
init_PMU_ISA31();
|
||||
|
||||
msr = mfmsr();
|
||||
if (!(msr & MSR_HV))
|
||||
return;
|
||||
|
||||
mtspr(SPRN_PSSCR, 0);
|
||||
mtspr(SPRN_LPID, 0);
|
||||
mtspr(SPRN_PID, 0);
|
||||
mtspr(SPRN_PCR, PCR_MASK);
|
||||
init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\
|
||||
LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0);
|
||||
init_HFSCR();
|
||||
init_PMU_HV();
|
||||
}
|
||||
@@ -60,19 +60,15 @@ extern void __setup_cpu_7410(unsigned long offset, struct cpu_spec* spec);
|
||||
extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec);
|
||||
#endif /* CONFIG_PPC32 */
|
||||
#ifdef CONFIG_PPC64
|
||||
#include <asm/cpu_setup_power.h>
|
||||
extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
|
||||
extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec);
|
||||
extern void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec* spec);
|
||||
extern void __restore_cpu_pa6t(void);
|
||||
extern void __restore_cpu_ppc970(void);
|
||||
extern void __setup_cpu_power7(unsigned long offset, struct cpu_spec* spec);
|
||||
extern void __restore_cpu_power7(void);
|
||||
extern void __setup_cpu_power8(unsigned long offset, struct cpu_spec* spec);
|
||||
extern void __restore_cpu_power8(void);
|
||||
extern void __setup_cpu_power9(unsigned long offset, struct cpu_spec* spec);
|
||||
extern void __restore_cpu_power9(void);
|
||||
extern void __setup_cpu_power10(unsigned long offset, struct cpu_spec* spec);
|
||||
extern void __restore_cpu_power10(void);
|
||||
extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
|
||||
extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
|
||||
extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
|
||||
#endif /* CONFIG_PPC64 */
|
||||
#if defined(CONFIG_E500)
|
||||
extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
|
||||
|
||||
@@ -454,6 +454,7 @@ static void init_pmu_power10(void)
|
||||
|
||||
mtspr(SPRN_MMCR3, 0);
|
||||
mtspr(SPRN_MMCRA, MMCRA_BHRB_DISABLE);
|
||||
mtspr(SPRN_MMCR0, MMCR0_PMCCEXT);
|
||||
}
|
||||
|
||||
static int __init feat_enable_pmu_power10(struct dt_cpu_feature *f)
|
||||
|
||||
@@ -1641,6 +1641,14 @@ int __init setup_fadump(void)
|
||||
else if (fw_dump.reserve_dump_area_size)
|
||||
fw_dump.ops->fadump_init_mem_struct(&fw_dump);
|
||||
|
||||
/*
|
||||
* In case of panic, fadump is triggered via ppc_panic_event()
|
||||
* panic notifier. Setting crash_kexec_post_notifiers to 'true'
|
||||
* lets panic() function take crash friendly path before panic
|
||||
* notifiers are invoked.
|
||||
*/
|
||||
crash_kexec_post_notifiers = true;
|
||||
|
||||
return 1;
|
||||
}
|
||||
subsys_initcall(setup_fadump);
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/mmu.h>
|
||||
@@ -626,7 +627,7 @@ start_here:
|
||||
b . /* prevent prefetch past rfi */
|
||||
|
||||
/* Set up the initial MMU state so we can do the first level of
|
||||
* kernel initialization. This maps the first 16 MBytes of memory 1:1
|
||||
* kernel initialization. This maps the first 32 MBytes of memory 1:1
|
||||
* virtual to physical and more importantly sets the cache mode.
|
||||
*/
|
||||
initial_mmu:
|
||||
@@ -663,6 +664,12 @@ initial_mmu:
|
||||
tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */
|
||||
tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */
|
||||
|
||||
li r0,62 /* TLB slot 62 */
|
||||
addis r4,r4,SZ_16M@h
|
||||
addis r3,r3,SZ_16M@h
|
||||
tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */
|
||||
tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */
|
||||
|
||||
isync
|
||||
|
||||
/* Establish the exception vector base
|
||||
|
||||
@@ -2956,7 +2956,7 @@ static void __init fixup_device_tree_efika_add_phy(void)
|
||||
|
||||
/* Check if the phy-handle property exists - bail if it does */
|
||||
rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
|
||||
if (!rv)
|
||||
if (rv <= 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
|
||||
@@ -60,6 +60,7 @@
|
||||
#include <asm/cpu_has_feature.h>
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/kup.h>
|
||||
#include <asm/fadump.h>
|
||||
|
||||
#ifdef DEBUG
|
||||
#include <asm/udbg.h>
|
||||
@@ -594,6 +595,45 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NMI_IPI
|
||||
static void crash_stop_this_cpu(struct pt_regs *regs)
|
||||
#else
|
||||
static void crash_stop_this_cpu(void *dummy)
|
||||
#endif
|
||||
{
|
||||
/*
|
||||
* Just busy wait here and avoid marking CPU as offline to ensure
|
||||
* register data is captured appropriately.
|
||||
*/
|
||||
while (1)
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
void crash_smp_send_stop(void)
|
||||
{
|
||||
static bool stopped = false;
|
||||
|
||||
/*
|
||||
* In case of fadump, register data for all CPUs is captured by f/w
|
||||
* on ibm,os-term rtas call. Skip IPI callbacks to other CPUs before
|
||||
* this rtas call to avoid tricky post processing of those CPUs'
|
||||
* backtraces.
|
||||
*/
|
||||
if (should_fadump_crash())
|
||||
return;
|
||||
|
||||
if (stopped)
|
||||
return;
|
||||
|
||||
stopped = true;
|
||||
|
||||
#ifdef CONFIG_NMI_IPI
|
||||
smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_stop_this_cpu, 1000000);
|
||||
#else
|
||||
smp_call_function(crash_stop_this_cpu, NULL, 0);
|
||||
#endif /* CONFIG_NMI_IPI */
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NMI_IPI
|
||||
static void nmi_stop_this_cpu(struct pt_regs *regs)
|
||||
{
|
||||
@@ -1488,10 +1528,12 @@ void start_secondary(void *unused)
|
||||
BUG();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROFILING
|
||||
int setup_profiling_timer(unsigned int multiplier)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void fixup_topology(void)
|
||||
{
|
||||
|
||||
@@ -1922,11 +1922,40 @@ void vsx_unavailable_tm(struct pt_regs *regs)
|
||||
}
|
||||
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
||||
|
||||
void performance_monitor_exception(struct pt_regs *regs)
|
||||
static void performance_monitor_exception_nmi(struct pt_regs *regs)
|
||||
{
|
||||
nmi_enter();
|
||||
|
||||
__this_cpu_inc(irq_stat.pmu_irqs);
|
||||
|
||||
perf_irq(regs);
|
||||
|
||||
nmi_exit();
|
||||
}
|
||||
|
||||
static void performance_monitor_exception_async(struct pt_regs *regs)
|
||||
{
|
||||
irq_enter();
|
||||
|
||||
__this_cpu_inc(irq_stat.pmu_irqs);
|
||||
|
||||
perf_irq(regs);
|
||||
|
||||
irq_exit();
|
||||
}
|
||||
|
||||
void performance_monitor_exception(struct pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* On 64-bit, if perf interrupts hit in a local_irq_disable
|
||||
* (soft-masked) region, we consider them as NMIs. This is required to
|
||||
* prevent hash faults on user addresses when reading callchains (and
|
||||
* looks better from an irq tracing perspective).
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PPC64) && unlikely(arch_irq_disabled_regs(regs)))
|
||||
performance_monitor_exception_nmi(regs);
|
||||
else
|
||||
performance_monitor_exception_async(regs);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||
|
||||
@@ -132,6 +132,10 @@ static void set_cpumask_stuck(const struct cpumask *cpumask, u64 tb)
|
||||
{
|
||||
cpumask_or(&wd_smp_cpus_stuck, &wd_smp_cpus_stuck, cpumask);
|
||||
cpumask_andnot(&wd_smp_cpus_pending, &wd_smp_cpus_pending, cpumask);
|
||||
/*
|
||||
* See wd_smp_clear_cpu_pending()
|
||||
*/
|
||||
smp_mb();
|
||||
if (cpumask_empty(&wd_smp_cpus_pending)) {
|
||||
wd_smp_last_reset_tb = tb;
|
||||
cpumask_andnot(&wd_smp_cpus_pending,
|
||||
@@ -217,13 +221,44 @@ static void wd_smp_clear_cpu_pending(int cpu, u64 tb)
|
||||
|
||||
cpumask_clear_cpu(cpu, &wd_smp_cpus_stuck);
|
||||
wd_smp_unlock(&flags);
|
||||
} else {
|
||||
/*
|
||||
* The last CPU to clear pending should have reset the
|
||||
* watchdog so we generally should not find it empty
|
||||
* here if our CPU was clear. However it could happen
|
||||
* due to a rare race with another CPU taking the
|
||||
* last CPU out of the mask concurrently.
|
||||
*
|
||||
* We can't add a warning for it. But just in case
|
||||
* there is a problem with the watchdog that is causing
|
||||
* the mask to not be reset, try to kick it along here.
|
||||
*/
|
||||
if (unlikely(cpumask_empty(&wd_smp_cpus_pending)))
|
||||
goto none_pending;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
|
||||
|
||||
/*
|
||||
* Order the store to clear pending with the load(s) to check all
|
||||
* words in the pending mask to check they are all empty. This orders
|
||||
* with the same barrier on another CPU. This prevents two CPUs
|
||||
* clearing the last 2 pending bits, but neither seeing the other's
|
||||
* store when checking if the mask is empty, and missing an empty
|
||||
* mask, which ends with a false positive.
|
||||
*/
|
||||
smp_mb();
|
||||
if (cpumask_empty(&wd_smp_cpus_pending)) {
|
||||
unsigned long flags;
|
||||
|
||||
none_pending:
|
||||
/*
|
||||
* Double check under lock because more than one CPU could see
|
||||
* a clear mask with the lockless check after clearing their
|
||||
* pending bits.
|
||||
*/
|
||||
wd_smp_lock(&flags);
|
||||
if (cpumask_empty(&wd_smp_cpus_pending)) {
|
||||
wd_smp_last_reset_tb = tb;
|
||||
@@ -314,8 +349,12 @@ void arch_touch_nmi_watchdog(void)
|
||||
{
|
||||
unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000;
|
||||
int cpu = smp_processor_id();
|
||||
u64 tb = get_tb();
|
||||
u64 tb;
|
||||
|
||||
if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
|
||||
return;
|
||||
|
||||
tb = get_tb();
|
||||
if (tb - per_cpu(wd_timer_tb, cpu) >= ticks) {
|
||||
per_cpu(wd_timer_tb, cpu) = tb;
|
||||
wd_smp_clear_cpu_pending(cpu, tb);
|
||||
|
||||
@@ -4557,8 +4557,12 @@ static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
|
||||
unsigned long npages = mem->memory_size >> PAGE_SHIFT;
|
||||
|
||||
if (change == KVM_MR_CREATE) {
|
||||
slot->arch.rmap = vzalloc(array_size(npages,
|
||||
sizeof(*slot->arch.rmap)));
|
||||
unsigned long size = array_size(npages, sizeof(*slot->arch.rmap));
|
||||
|
||||
if ((size >> PAGE_SHIFT) > totalram_pages())
|
||||
return -ENOMEM;
|
||||
|
||||
slot->arch.rmap = vzalloc(size);
|
||||
if (!slot->arch.rmap)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@@ -510,7 +510,7 @@ long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu)
|
||||
if (eaddr & (0xFFFUL << 52))
|
||||
return H_PARAMETER;
|
||||
|
||||
buf = kzalloc(n, GFP_KERNEL);
|
||||
buf = kzalloc(n, GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!buf)
|
||||
return H_NO_MEM;
|
||||
|
||||
|
||||
@@ -1152,7 +1152,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
|
||||
|
||||
int pud_clear_huge(pud_t *pud)
|
||||
{
|
||||
if (pud_huge(*pud)) {
|
||||
if (pud_is_leaf(*pud)) {
|
||||
pud_clear(pud);
|
||||
return 1;
|
||||
}
|
||||
@@ -1199,7 +1199,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
|
||||
|
||||
int pmd_clear_huge(pmd_t *pmd)
|
||||
{
|
||||
if (pmd_huge(*pmd)) {
|
||||
if (pmd_is_leaf(*pmd)) {
|
||||
pmd_clear(pmd);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -19,7 +19,8 @@ int __init kasan_init_region(void *start, size_t size)
|
||||
block = memblock_alloc(k_size, k_size_base);
|
||||
|
||||
if (block && k_size_base >= SZ_128K && k_start == ALIGN(k_start, k_size_base)) {
|
||||
int k_size_more = 1 << (ffs(k_size - k_size_base) - 1);
|
||||
int shift = ffs(k_size - k_size_base);
|
||||
int k_size_more = shift ? 1 << (shift - 1) : 0;
|
||||
|
||||
setbat(-1, k_start, __pa(block), k_size_base, PAGE_KERNEL);
|
||||
if (k_size_more >= SZ_128K)
|
||||
|
||||
@@ -102,7 +102,8 @@ EXPORT_SYMBOL(__pte_frag_size_shift);
|
||||
struct page *p4d_page(p4d_t p4d)
|
||||
{
|
||||
if (p4d_is_leaf(p4d)) {
|
||||
VM_WARN_ON(!p4d_huge(p4d));
|
||||
if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
|
||||
VM_WARN_ON(!p4d_huge(p4d));
|
||||
return pte_page(p4d_pte(p4d));
|
||||
}
|
||||
return virt_to_page(p4d_page_vaddr(p4d));
|
||||
@@ -112,7 +113,8 @@ struct page *p4d_page(p4d_t p4d)
|
||||
struct page *pud_page(pud_t pud)
|
||||
{
|
||||
if (pud_is_leaf(pud)) {
|
||||
VM_WARN_ON(!pud_huge(pud));
|
||||
if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
|
||||
VM_WARN_ON(!pud_huge(pud));
|
||||
return pte_page(pud_pte(pud));
|
||||
}
|
||||
return virt_to_page(pud_page_vaddr(pud));
|
||||
@@ -125,7 +127,13 @@ struct page *pud_page(pud_t pud)
|
||||
struct page *pmd_page(pmd_t pmd)
|
||||
{
|
||||
if (pmd_is_leaf(pmd)) {
|
||||
VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
|
||||
/*
|
||||
* vmalloc_to_page may be called on any vmap address (not only
|
||||
* vmalloc), and it uses pmd_page() etc., when huge vmap is
|
||||
* enabled so these checks can't be used.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
|
||||
VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
|
||||
return pte_page(pmd_pte(pmd));
|
||||
}
|
||||
return virt_to_page(pmd_page_vaddr(pmd));
|
||||
|
||||
@@ -95,6 +95,7 @@ static unsigned int freeze_events_kernel = MMCR0_FCS;
|
||||
#define SPRN_SIER3 0
|
||||
#define MMCRA_SAMPLE_ENABLE 0
|
||||
#define MMCRA_BHRB_DISABLE 0
|
||||
#define MMCR0_PMCCEXT 0
|
||||
|
||||
static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
|
||||
{
|
||||
@@ -109,10 +110,6 @@ static inline void perf_read_regs(struct pt_regs *regs)
|
||||
{
|
||||
regs->result = 0;
|
||||
}
|
||||
static inline int perf_intr_is_nmi(struct pt_regs *regs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int siar_valid(struct pt_regs *regs)
|
||||
{
|
||||
@@ -331,15 +328,6 @@ static inline void perf_read_regs(struct pt_regs *regs)
|
||||
regs->result = use_siar;
|
||||
}
|
||||
|
||||
/*
|
||||
* If interrupts were soft-disabled when a PMU interrupt occurs, treat
|
||||
* it as an NMI.
|
||||
*/
|
||||
static inline int perf_intr_is_nmi(struct pt_regs *regs)
|
||||
{
|
||||
return (regs->softe & IRQS_DISABLED);
|
||||
}
|
||||
|
||||
/*
|
||||
* On processors like P7+ that have the SIAR-Valid bit, marked instructions
|
||||
* must be sampled only if the SIAR-valid bit is set.
|
||||
@@ -817,6 +805,19 @@ static void write_pmc(int idx, unsigned long val)
|
||||
}
|
||||
}
|
||||
|
||||
static int any_pmc_overflown(struct cpu_hw_events *cpuhw)
|
||||
{
|
||||
int i, idx;
|
||||
|
||||
for (i = 0; i < cpuhw->n_events; i++) {
|
||||
idx = cpuhw->event[i]->hw.idx;
|
||||
if ((idx) && ((int)read_pmc(idx) < 0))
|
||||
return idx;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Called from sysrq_handle_showregs() */
|
||||
void perf_event_print_debug(void)
|
||||
{
|
||||
@@ -1240,11 +1241,16 @@ static void power_pmu_disable(struct pmu *pmu)
|
||||
|
||||
/*
|
||||
* Set the 'freeze counters' bit, clear EBE/BHRBA/PMCC/PMAO/FC56
|
||||
* Also clear PMXE to disable PMI's getting triggered in some
|
||||
* corner cases during PMU disable.
|
||||
*/
|
||||
val = mmcr0 = mfspr(SPRN_MMCR0);
|
||||
val |= MMCR0_FC;
|
||||
val &= ~(MMCR0_EBE | MMCR0_BHRBA | MMCR0_PMCC | MMCR0_PMAO |
|
||||
MMCR0_FC56);
|
||||
MMCR0_PMXE | MMCR0_FC56);
|
||||
/* Set mmcr0 PMCCEXT for p10 */
|
||||
if (ppmu->flags & PPMU_ARCH_31)
|
||||
val |= MMCR0_PMCCEXT;
|
||||
|
||||
/*
|
||||
* The barrier is to make sure the mtspr has been
|
||||
@@ -1255,6 +1261,23 @@ static void power_pmu_disable(struct pmu *pmu)
|
||||
mb();
|
||||
isync();
|
||||
|
||||
/*
|
||||
* Some corner cases could clear the PMU counter overflow
|
||||
* while a masked PMI is pending. One such case is when
|
||||
* a PMI happens during interrupt replay and perf counter
|
||||
* values are cleared by PMU callbacks before replay.
|
||||
*
|
||||
* If any PMC corresponding to the active PMU events are
|
||||
* overflown, disable the interrupt by clearing the paca
|
||||
* bit for PMI since we are disabling the PMU now.
|
||||
* Otherwise provide a warning if there is PMI pending, but
|
||||
* no counter is found overflown.
|
||||
*/
|
||||
if (any_pmc_overflown(cpuhw))
|
||||
clear_pmi_irq_pending();
|
||||
else
|
||||
WARN_ON(pmi_irq_pending());
|
||||
|
||||
val = mmcra = cpuhw->mmcr.mmcra;
|
||||
|
||||
/*
|
||||
@@ -1346,6 +1369,15 @@ static void power_pmu_enable(struct pmu *pmu)
|
||||
* (possibly updated for removal of events).
|
||||
*/
|
||||
if (!cpuhw->n_added) {
|
||||
/*
|
||||
* If there is any active event with an overflown PMC
|
||||
* value, set back PACA_IRQ_PMI which would have been
|
||||
* cleared in power_pmu_disable().
|
||||
*/
|
||||
hard_irq_disable();
|
||||
if (any_pmc_overflown(cpuhw))
|
||||
set_pmi_irq_pending();
|
||||
|
||||
mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE);
|
||||
mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1);
|
||||
if (ppmu->flags & PPMU_ARCH_31)
|
||||
@@ -2250,7 +2282,6 @@ static void __perf_event_interrupt(struct pt_regs *regs)
|
||||
struct perf_event *event;
|
||||
unsigned long val[8];
|
||||
int found, active;
|
||||
int nmi;
|
||||
|
||||
if (cpuhw->n_limited)
|
||||
freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
|
||||
@@ -2258,18 +2289,6 @@ static void __perf_event_interrupt(struct pt_regs *regs)
|
||||
|
||||
perf_read_regs(regs);
|
||||
|
||||
/*
|
||||
* If perf interrupts hit in a local_irq_disable (soft-masked) region,
|
||||
* we consider them as NMIs. This is required to prevent hash faults on
|
||||
* user addresses when reading callchains. See the NMI test in
|
||||
* do_hash_page.
|
||||
*/
|
||||
nmi = perf_intr_is_nmi(regs);
|
||||
if (nmi)
|
||||
nmi_enter();
|
||||
else
|
||||
irq_enter();
|
||||
|
||||
/* Read all the PMCs since we'll need them a bunch of times */
|
||||
for (i = 0; i < ppmu->n_counter; ++i)
|
||||
val[i] = read_pmc(i + 1);
|
||||
@@ -2296,6 +2315,14 @@ static void __perf_event_interrupt(struct pt_regs *regs)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear PACA_IRQ_PMI in case it was set by
|
||||
* set_pmi_irq_pending() when PMU was enabled
|
||||
* after accounting for interrupts.
|
||||
*/
|
||||
clear_pmi_irq_pending();
|
||||
|
||||
if (!active)
|
||||
/* reset non active counters that have overflowed */
|
||||
write_pmc(i + 1, 0);
|
||||
@@ -2315,8 +2342,15 @@ static void __perf_event_interrupt(struct pt_regs *regs)
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!found && !nmi && printk_ratelimit())
|
||||
printk(KERN_WARNING "Can't find PMC that caused IRQ\n");
|
||||
|
||||
/*
|
||||
* During system wide profling or while specific CPU is monitored for an
|
||||
* event, some corner cases could cause PMC to overflow in idle path. This
|
||||
* will trigger a PMI after waking up from idle. Since counter values are _not_
|
||||
* saved/restored in idle path, can lead to below "Can't find PMC" message.
|
||||
*/
|
||||
if (unlikely(!found) && !arch_irq_disabled_regs(regs))
|
||||
printk_ratelimited(KERN_WARNING "Can't find PMC that caused IRQ\n");
|
||||
|
||||
/*
|
||||
* Reset MMCR0 to its normal value. This will set PMXE and
|
||||
@@ -2326,11 +2360,6 @@ static void __perf_event_interrupt(struct pt_regs *regs)
|
||||
* we get back out of this interrupt.
|
||||
*/
|
||||
write_mmcr0(cpuhw, cpuhw->mmcr.mmcr0);
|
||||
|
||||
if (nmi)
|
||||
nmi_exit();
|
||||
else
|
||||
irq_exit();
|
||||
}
|
||||
|
||||
static void perf_event_interrupt(struct pt_regs *regs)
|
||||
|
||||
@@ -31,19 +31,6 @@ static atomic_t num_events;
|
||||
/* Used to avoid races in calling reserve/release_pmc_hardware */
|
||||
static DEFINE_MUTEX(pmc_reserve_mutex);
|
||||
|
||||
/*
|
||||
* If interrupts were soft-disabled when a PMU interrupt occurs, treat
|
||||
* it as an NMI.
|
||||
*/
|
||||
static inline int perf_intr_is_nmi(struct pt_regs *regs)
|
||||
{
|
||||
#ifdef __powerpc64__
|
||||
return (regs->softe & IRQS_DISABLED);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void perf_event_interrupt(struct pt_regs *regs);
|
||||
|
||||
/*
|
||||
@@ -659,13 +646,6 @@ static void perf_event_interrupt(struct pt_regs *regs)
|
||||
struct perf_event *event;
|
||||
unsigned long val;
|
||||
int found = 0;
|
||||
int nmi;
|
||||
|
||||
nmi = perf_intr_is_nmi(regs);
|
||||
if (nmi)
|
||||
nmi_enter();
|
||||
else
|
||||
irq_enter();
|
||||
|
||||
for (i = 0; i < ppmu->n_counter; ++i) {
|
||||
event = cpuhw->event[i];
|
||||
@@ -690,11 +670,6 @@ static void perf_event_interrupt(struct pt_regs *regs)
|
||||
mtmsr(mfmsr() | MSR_PMM);
|
||||
mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
|
||||
isync();
|
||||
|
||||
if (nmi)
|
||||
nmi_exit();
|
||||
else
|
||||
irq_exit();
|
||||
}
|
||||
|
||||
void hw_perf_event_setup(int cpu)
|
||||
|
||||
@@ -561,6 +561,14 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
|
||||
if (!(pmc_inuse & 0x60))
|
||||
mmcr->mmcr0 |= MMCR0_FC56;
|
||||
|
||||
/*
|
||||
* Set mmcr0 (PMCCEXT) for p10 which
|
||||
* will restrict access to group B registers
|
||||
* when MMCR0 PMCC=0b00.
|
||||
*/
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_31))
|
||||
mmcr->mmcr0 |= MMCR0_PMCCEXT;
|
||||
|
||||
mmcr->mmcr1 = mmcr1;
|
||||
mmcr->mmcra = mmcra;
|
||||
mmcr->mmcr2 = mmcr2;
|
||||
|
||||
@@ -976,6 +976,7 @@ static int __init cell_iommu_fixed_mapping_init(void)
|
||||
if (hbase < dbase || (hend > (dbase + dsize))) {
|
||||
pr_debug("iommu: hash window doesn't fit in"
|
||||
"real DMA window\n");
|
||||
of_node_put(np);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,6 +77,7 @@ static int cbe_system_reset_exception(struct pt_regs *regs)
|
||||
switch (regs->msr & SRR1_WAKEMASK) {
|
||||
case SRR1_WAKEDEC:
|
||||
set_dec(1);
|
||||
break;
|
||||
case SRR1_WAKEEE:
|
||||
/*
|
||||
* Handle these when interrupts get re-enabled and we take
|
||||
|
||||
@@ -215,6 +215,7 @@ void hlwd_pic_probe(void)
|
||||
irq_set_chained_handler(cascade_virq,
|
||||
hlwd_pic_irq_cascade);
|
||||
hlwd_irq_host = host;
|
||||
of_node_put(np);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -582,6 +582,7 @@ static void __init kw_i2c_add(struct pmac_i2c_host_kw *host,
|
||||
bus->close = kw_i2c_close;
|
||||
bus->xfer = kw_i2c_xfer;
|
||||
mutex_init(&bus->mutex);
|
||||
lockdep_register_key(&bus->lock_key);
|
||||
lockdep_set_class(&bus->mutex, &bus->lock_key);
|
||||
if (controller == busnode)
|
||||
bus->flags = pmac_i2c_multibus;
|
||||
@@ -810,6 +811,7 @@ static void __init pmu_i2c_probe(void)
|
||||
bus->hostdata = bus + 1;
|
||||
bus->xfer = pmu_i2c_xfer;
|
||||
mutex_init(&bus->mutex);
|
||||
lockdep_register_key(&bus->lock_key);
|
||||
lockdep_set_class(&bus->mutex, &bus->lock_key);
|
||||
bus->flags = pmac_i2c_multibus;
|
||||
list_add(&bus->link, &pmac_i2c_busses);
|
||||
@@ -933,6 +935,7 @@ static void __init smu_i2c_probe(void)
|
||||
bus->hostdata = bus + 1;
|
||||
bus->xfer = smu_i2c_xfer;
|
||||
mutex_init(&bus->mutex);
|
||||
lockdep_register_key(&bus->lock_key);
|
||||
lockdep_set_class(&bus->mutex, &bus->lock_key);
|
||||
bus->flags = 0;
|
||||
list_add(&bus->link, &pmac_i2c_busses);
|
||||
|
||||
@@ -396,6 +396,7 @@ void __init opal_lpc_init(void)
|
||||
if (!of_get_property(np, "primary", NULL))
|
||||
continue;
|
||||
opal_lpc_chip_id = of_get_ibm_chip_id(np);
|
||||
of_node_put(np);
|
||||
break;
|
||||
}
|
||||
if (opal_lpc_chip_id < 0)
|
||||
|
||||
@@ -538,6 +538,12 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
|
||||
if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
|
||||
security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
|
||||
|
||||
if (result->behaviour & H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY)
|
||||
security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY);
|
||||
|
||||
if (result->behaviour & H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS)
|
||||
security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS);
|
||||
|
||||
if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR))
|
||||
security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
|
||||
}
|
||||
|
||||
@@ -658,6 +658,9 @@ static int xive_spapr_debug_show(struct seq_file *m, void *private)
|
||||
struct xive_irq_bitmap *xibm;
|
||||
char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
|
||||
memset(buf, 0, PAGE_SIZE);
|
||||
bitmap_print_to_pagebuf(true, buf, xibm->bitmap, xibm->count);
|
||||
|
||||
@@ -60,10 +60,11 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
|
||||
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||
unsigned long fp = 0;
|
||||
|
||||
/* RISC-V does not support perf in guest mode. */
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
|
||||
if (guest_cbs && guest_cbs->is_in_guest())
|
||||
return;
|
||||
|
||||
fp = regs->s0;
|
||||
@@ -84,8 +85,10 @@ void notrace walk_stackframe(struct task_struct *task,
|
||||
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||
|
||||
/* RISC-V does not support perf in guest mode. */
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||
pr_warn("RISC-V does not support perf in guest mode!");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -2115,6 +2115,13 @@ int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
|
||||
return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
|
||||
}
|
||||
|
||||
int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
|
||||
|
||||
return test_bit(IRQ_PEND_RESTART, &li->pending_irqs);
|
||||
}
|
||||
|
||||
void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
|
||||
|
||||
@@ -4588,10 +4588,15 @@ int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
|
||||
/*
|
||||
* Set the VCPU to STOPPED and THEN clear the interrupt flag,
|
||||
* now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
|
||||
* have been fully processed. This will ensure that the VCPU
|
||||
* is kept BUSY if another VCPU is inquiring with SIGP SENSE.
|
||||
*/
|
||||
kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
|
||||
kvm_s390_clear_stop_irq(vcpu);
|
||||
|
||||
kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
|
||||
__disable_ibs_on_vcpu(vcpu);
|
||||
|
||||
for (i = 0; i < online_vcpus; i++) {
|
||||
|
||||
@@ -418,6 +418,7 @@ void kvm_s390_destroy_adapters(struct kvm *kvm);
|
||||
int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
|
||||
extern struct kvm_device_ops kvm_flic_ops;
|
||||
int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu);
|
||||
void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu,
|
||||
void __user *buf, int len);
|
||||
|
||||
@@ -288,6 +288,34 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
|
||||
if (!dst_vcpu)
|
||||
return SIGP_CC_NOT_OPERATIONAL;
|
||||
|
||||
/*
|
||||
* SIGP RESTART, SIGP STOP, and SIGP STOP AND STORE STATUS orders
|
||||
* are processed asynchronously. Until the affected VCPU finishes
|
||||
* its work and calls back into KVM to clear the (RESTART or STOP)
|
||||
* interrupt, we need to return any new non-reset orders "busy".
|
||||
*
|
||||
* This is important because a single VCPU could issue:
|
||||
* 1) SIGP STOP $DESTINATION
|
||||
* 2) SIGP SENSE $DESTINATION
|
||||
*
|
||||
* If the SIGP SENSE would not be rejected as "busy", it could
|
||||
* return an incorrect answer as to whether the VCPU is STOPPED
|
||||
* or OPERATING.
|
||||
*/
|
||||
if (order_code != SIGP_INITIAL_CPU_RESET &&
|
||||
order_code != SIGP_CPU_RESET) {
|
||||
/*
|
||||
* Lockless check. Both SIGP STOP and SIGP (RE)START
|
||||
* properly synchronize everything while processing
|
||||
* their orders, while the guest cannot observe a
|
||||
* difference when issuing other orders from two
|
||||
* different VCPUs.
|
||||
*/
|
||||
if (kvm_s390_is_stop_irq_pending(dst_vcpu) ||
|
||||
kvm_s390_is_restart_irq_pending(dst_vcpu))
|
||||
return SIGP_CC_BUSY;
|
||||
}
|
||||
|
||||
switch (order_code) {
|
||||
case SIGP_SENSE:
|
||||
vcpu->stat.instruction_sigp_sense++;
|
||||
|
||||
@@ -253,13 +253,15 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
|
||||
/* Free 2K page table fragment of a 4K page */
|
||||
bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
|
||||
spin_lock_bh(&mm->context.lock);
|
||||
mask = atomic_xor_bits(&page->_refcount, 1U << (bit + 24));
|
||||
mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
|
||||
mask >>= 24;
|
||||
if (mask & 3)
|
||||
list_add(&page->lru, &mm->context.pgtable_list);
|
||||
else
|
||||
list_del(&page->lru);
|
||||
spin_unlock_bh(&mm->context.lock);
|
||||
mask = atomic_xor_bits(&page->_refcount, 0x10U << (bit + 24));
|
||||
mask >>= 24;
|
||||
if (mask != 0)
|
||||
return;
|
||||
} else {
|
||||
|
||||
@@ -1076,6 +1076,8 @@ static void virtio_uml_release_dev(struct device *d)
|
||||
container_of(d, struct virtio_device, dev);
|
||||
struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
|
||||
|
||||
time_travel_propagate_time();
|
||||
|
||||
/* might not have been opened due to not negotiating the feature */
|
||||
if (vu_dev->req_fd >= 0) {
|
||||
um_free_irq(VIRTIO_IRQ, vu_dev);
|
||||
@@ -1109,6 +1111,8 @@ static int virtio_uml_probe(struct platform_device *pdev)
|
||||
vu_dev->pdev = pdev;
|
||||
vu_dev->req_fd = -1;
|
||||
|
||||
time_travel_propagate_time();
|
||||
|
||||
do {
|
||||
rc = os_connect_socket(pdata->socket_path);
|
||||
} while (rc == -EINTR);
|
||||
|
||||
@@ -14,7 +14,7 @@ static inline void um_ndelay(unsigned long nsecs)
|
||||
ndelay(nsecs);
|
||||
}
|
||||
#undef ndelay
|
||||
#define ndelay um_ndelay
|
||||
#define ndelay(n) um_ndelay(n)
|
||||
|
||||
static inline void um_udelay(unsigned long usecs)
|
||||
{
|
||||
@@ -26,5 +26,5 @@ static inline void um_udelay(unsigned long usecs)
|
||||
udelay(usecs);
|
||||
}
|
||||
#undef udelay
|
||||
#define udelay um_udelay
|
||||
#define udelay(n) um_udelay(n)
|
||||
#endif /* __UM_DELAY_H */
|
||||
|
||||
@@ -16,8 +16,8 @@ extern int restore_fp_registers(int pid, unsigned long *fp_regs);
|
||||
extern int save_fpx_registers(int pid, unsigned long *fp_regs);
|
||||
extern int restore_fpx_registers(int pid, unsigned long *fp_regs);
|
||||
extern int save_registers(int pid, struct uml_pt_regs *regs);
|
||||
extern int restore_registers(int pid, struct uml_pt_regs *regs);
|
||||
extern int init_registers(int pid);
|
||||
extern int restore_pid_registers(int pid, struct uml_pt_regs *regs);
|
||||
extern int init_pid_registers(int pid);
|
||||
extern void get_safe_registers(unsigned long *regs, unsigned long *fp_regs);
|
||||
extern unsigned long get_thread_reg(int reg, jmp_buf *buf);
|
||||
extern int get_fp_registers(int pid, unsigned long *regs);
|
||||
|
||||
@@ -21,7 +21,7 @@ int save_registers(int pid, struct uml_pt_regs *regs)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int restore_registers(int pid, struct uml_pt_regs *regs)
|
||||
int restore_pid_registers(int pid, struct uml_pt_regs *regs)
|
||||
{
|
||||
int err;
|
||||
|
||||
@@ -36,7 +36,7 @@ int restore_registers(int pid, struct uml_pt_regs *regs)
|
||||
static unsigned long exec_regs[MAX_REG_NR];
|
||||
static unsigned long exec_fp_regs[FP_SIZE];
|
||||
|
||||
int init_registers(int pid)
|
||||
int init_pid_registers(int pid)
|
||||
{
|
||||
int err;
|
||||
|
||||
|
||||
@@ -336,7 +336,7 @@ void __init os_early_checks(void)
|
||||
check_tmpexec();
|
||||
|
||||
pid = start_ptraced_child();
|
||||
if (init_registers(pid))
|
||||
if (init_pid_registers(pid))
|
||||
fatal("Failed to initialize default registers");
|
||||
stop_ptraced_child(pid, 1, 1);
|
||||
}
|
||||
|
||||
@@ -28,7 +28,11 @@ KCOV_INSTRUMENT := n
|
||||
targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
|
||||
vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 vmlinux.bin.zst
|
||||
|
||||
KBUILD_CFLAGS := -m$(BITS) -O2
|
||||
# CLANG_FLAGS must come before any cc-disable-warning or cc-option calls in
|
||||
# case of cross compiling, as it has the '--target=' flag, which is needed to
|
||||
# avoid errors with '-march=i386', and future flags may depend on the target to
|
||||
# be valid.
|
||||
KBUILD_CFLAGS := -m$(BITS) -O2 $(CLANG_FLAGS)
|
||||
KBUILD_CFLAGS += -fno-strict-aliasing -fPIE
|
||||
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
|
||||
cflags-$(CONFIG_X86_32) := -march=i386
|
||||
@@ -46,7 +50,6 @@ KBUILD_CFLAGS += -D__DISABLE_EXPORTS
|
||||
# Disable relocation relaxation in case the link is not PIE.
|
||||
KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
|
||||
KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h
|
||||
KBUILD_CFLAGS += $(CLANG_FLAGS)
|
||||
|
||||
# sev-es.c indirectly inludes inat-table.h which is generated during
|
||||
# compilation and stored in $(objtree). Add the directory to the includes so
|
||||
|
||||
@@ -264,3 +264,4 @@ CONFIG_BLK_DEV_IO_TRACE=y
|
||||
CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
|
||||
CONFIG_EARLY_PRINTK_DBGP=y
|
||||
CONFIG_DEBUG_BOOT_PARAMS=y
|
||||
CONFIG_KALLSYMS_ALL=y
|
||||
|
||||
@@ -260,3 +260,4 @@ CONFIG_BLK_DEV_IO_TRACE=y
|
||||
CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
|
||||
CONFIG_EARLY_PRINTK_DBGP=y
|
||||
CONFIG_DEBUG_BOOT_PARAMS=y
|
||||
CONFIG_KALLSYMS_ALL=y
|
||||
|
||||
@@ -2545,10 +2545,11 @@ static bool perf_hw_regs(struct pt_regs *regs)
|
||||
void
|
||||
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||
{
|
||||
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||
struct unwind_state state;
|
||||
unsigned long addr;
|
||||
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||
/* TODO: We don't support guest os callchain now */
|
||||
return;
|
||||
}
|
||||
@@ -2648,10 +2649,11 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
|
||||
void
|
||||
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||
{
|
||||
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||
struct stack_frame frame;
|
||||
const struct stack_frame __user *fp;
|
||||
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||
/* TODO: We don't support guest os callchain now */
|
||||
return;
|
||||
}
|
||||
@@ -2728,18 +2730,21 @@ static unsigned long code_segment_base(struct pt_regs *regs)
|
||||
|
||||
unsigned long perf_instruction_pointer(struct pt_regs *regs)
|
||||
{
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
|
||||
return perf_guest_cbs->get_guest_ip();
|
||||
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||
|
||||
if (guest_cbs && guest_cbs->is_in_guest())
|
||||
return guest_cbs->get_guest_ip();
|
||||
|
||||
return regs->ip + code_segment_base(regs);
|
||||
}
|
||||
|
||||
unsigned long perf_misc_flags(struct pt_regs *regs)
|
||||
{
|
||||
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
||||
int misc = 0;
|
||||
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||
if (perf_guest_cbs->is_user_mode())
|
||||
if (guest_cbs && guest_cbs->is_in_guest()) {
|
||||
if (guest_cbs->is_user_mode())
|
||||
misc |= PERF_RECORD_MISC_GUEST_USER;
|
||||
else
|
||||
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
|
||||
|
||||
@@ -2586,6 +2586,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
|
||||
{
|
||||
struct perf_sample_data data;
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||
struct perf_guest_info_callbacks *guest_cbs;
|
||||
int bit;
|
||||
int handled = 0;
|
||||
|
||||
@@ -2651,9 +2652,11 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
|
||||
*/
|
||||
if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) {
|
||||
handled++;
|
||||
if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() &&
|
||||
perf_guest_cbs->handle_intel_pt_intr))
|
||||
perf_guest_cbs->handle_intel_pt_intr();
|
||||
|
||||
guest_cbs = perf_get_guest_cbs();
|
||||
if (unlikely(guest_cbs && guest_cbs->is_in_guest() &&
|
||||
guest_cbs->handle_intel_pt_intr))
|
||||
guest_cbs->handle_intel_pt_intr();
|
||||
else
|
||||
intel_pt_interrupt();
|
||||
}
|
||||
|
||||
@@ -1306,6 +1306,7 @@ struct kvm_x86_init_ops {
|
||||
int (*disabled_by_bios)(void);
|
||||
int (*check_processor_compatibility)(void);
|
||||
int (*hardware_setup)(void);
|
||||
bool (*intel_pt_intr_in_guest)(void);
|
||||
|
||||
struct kvm_x86_ops *runtime_ops;
|
||||
};
|
||||
|
||||
@@ -89,6 +89,7 @@ static inline void set_real_mode_mem(phys_addr_t mem)
|
||||
}
|
||||
|
||||
void reserve_real_mode(void);
|
||||
void load_trampoline_pgtable(void);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user