mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-06 10:00:17 +00:00
Merge remote-tracking branch 'stable/linux-6.12.y' into rpi-6.12.y
This commit is contained in:
@@ -2017,7 +2017,8 @@ attribute-sets:
|
||||
attributes:
|
||||
-
|
||||
name: act
|
||||
type: nest
|
||||
type: indexed-array
|
||||
sub-type: nest
|
||||
nested-attributes: tc-act-attrs
|
||||
-
|
||||
name: police
|
||||
@@ -2250,7 +2251,8 @@ attribute-sets:
|
||||
attributes:
|
||||
-
|
||||
name: act
|
||||
type: nest
|
||||
type: indexed-array
|
||||
sub-type: nest
|
||||
nested-attributes: tc-act-attrs
|
||||
-
|
||||
name: police
|
||||
@@ -2745,7 +2747,7 @@ attribute-sets:
|
||||
type: u16
|
||||
byte-order: big-endian
|
||||
-
|
||||
name: key-l2-tpv3-sid
|
||||
name: key-l2tpv3-sid
|
||||
type: u32
|
||||
byte-order: big-endian
|
||||
-
|
||||
@@ -3504,7 +3506,7 @@ attribute-sets:
|
||||
name: rate64
|
||||
type: u64
|
||||
-
|
||||
name: prate4
|
||||
name: prate64
|
||||
type: u64
|
||||
-
|
||||
name: burst
|
||||
|
||||
@@ -17535,7 +17535,7 @@ F: include/uapi/linux/ppdev.h
|
||||
PARAVIRT_OPS INTERFACE
|
||||
M: Juergen Gross <jgross@suse.com>
|
||||
R: Ajay Kaher <ajay.kaher@broadcom.com>
|
||||
R: Alexey Makhalov <alexey.amakhalov@broadcom.com>
|
||||
R: Alexey Makhalov <alexey.makhalov@broadcom.com>
|
||||
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
|
||||
L: virtualization@lists.linux.dev
|
||||
L: x86@kernel.org
|
||||
@@ -24817,7 +24817,7 @@ F: drivers/misc/vmw_balloon.c
|
||||
|
||||
VMWARE HYPERVISOR INTERFACE
|
||||
M: Ajay Kaher <ajay.kaher@broadcom.com>
|
||||
M: Alexey Makhalov <alexey.amakhalov@broadcom.com>
|
||||
M: Alexey Makhalov <alexey.makhalov@broadcom.com>
|
||||
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
|
||||
L: virtualization@lists.linux.dev
|
||||
L: x86@kernel.org
|
||||
@@ -24845,7 +24845,7 @@ F: drivers/scsi/vmw_pvscsi.h
|
||||
VMWARE VIRTUAL PTP CLOCK DRIVER
|
||||
M: Nick Shi <nick.shi@broadcom.com>
|
||||
R: Ajay Kaher <ajay.kaher@broadcom.com>
|
||||
R: Alexey Makhalov <alexey.amakhalov@broadcom.com>
|
||||
R: Alexey Makhalov <alexey.makhalov@broadcom.com>
|
||||
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
|
||||
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 12
|
||||
SUBLEVEL = 29
|
||||
SUBLEVEL = 30
|
||||
EXTRAVERSION =
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
|
||||
@@ -116,6 +116,10 @@
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&clkc_audio {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&frddr_a {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
@@ -35,7 +35,6 @@
|
||||
<0x1 0x00000000 0 0xc0000000>;
|
||||
};
|
||||
|
||||
|
||||
reg_usdhc2_vmmc: regulator-usdhc2-vmmc {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "VSD_3V3";
|
||||
@@ -46,6 +45,16 @@
|
||||
startup-delay-us = <100>;
|
||||
off-on-delay-us = <12000>;
|
||||
};
|
||||
|
||||
reg_usdhc2_vqmmc: regulator-usdhc2-vqmmc {
|
||||
compatible = "regulator-gpio";
|
||||
regulator-name = "VSD_VSEL";
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
gpios = <&gpio2 12 GPIO_ACTIVE_HIGH>;
|
||||
states = <3300000 0x0 1800000 0x1>;
|
||||
vin-supply = <&ldo5>;
|
||||
};
|
||||
};
|
||||
|
||||
&A53_0 {
|
||||
@@ -205,6 +214,7 @@
|
||||
pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
|
||||
cd-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
|
||||
vmmc-supply = <®_usdhc2_vmmc>;
|
||||
vqmmc-supply = <®_usdhc2_vqmmc>;
|
||||
bus-width = <4>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
@@ -222,6 +222,10 @@
|
||||
compatible = "realtek,rt5616";
|
||||
reg = <0x1b>;
|
||||
#sound-dai-cells = <0>;
|
||||
assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
|
||||
assigned-clock-rates = <12288000>;
|
||||
clocks = <&cru I2S0_8CH_MCLKOUT>;
|
||||
clock-names = "mclk";
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -11,20 +11,15 @@
|
||||
compatible = "operating-points-v2";
|
||||
opp-shared;
|
||||
|
||||
opp-1416000000 {
|
||||
opp-hz = /bits/ 64 <1416000000>;
|
||||
opp-1200000000 {
|
||||
opp-hz = /bits/ 64 <1200000000>;
|
||||
opp-microvolt = <750000 750000 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
opp-suspend;
|
||||
};
|
||||
opp-1608000000 {
|
||||
opp-hz = /bits/ 64 <1608000000>;
|
||||
opp-microvolt = <887500 887500 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
opp-1704000000 {
|
||||
opp-hz = /bits/ 64 <1704000000>;
|
||||
opp-microvolt = <937500 937500 950000>;
|
||||
opp-1296000000 {
|
||||
opp-hz = /bits/ 64 <1296000000>;
|
||||
opp-microvolt = <775000 775000 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
};
|
||||
@@ -33,9 +28,14 @@
|
||||
compatible = "operating-points-v2";
|
||||
opp-shared;
|
||||
|
||||
opp-1200000000{
|
||||
opp-hz = /bits/ 64 <1200000000>;
|
||||
opp-microvolt = <750000 750000 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
opp-1416000000 {
|
||||
opp-hz = /bits/ 64 <1416000000>;
|
||||
opp-microvolt = <750000 750000 950000>;
|
||||
opp-microvolt = <762500 762500 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
opp-1608000000 {
|
||||
@@ -43,25 +43,20 @@
|
||||
opp-microvolt = <787500 787500 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
opp-1800000000 {
|
||||
opp-hz = /bits/ 64 <1800000000>;
|
||||
opp-microvolt = <875000 875000 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
opp-2016000000 {
|
||||
opp-hz = /bits/ 64 <2016000000>;
|
||||
opp-microvolt = <950000 950000 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
};
|
||||
|
||||
cluster2_opp_table: opp-table-cluster2 {
|
||||
compatible = "operating-points-v2";
|
||||
opp-shared;
|
||||
|
||||
opp-1200000000{
|
||||
opp-hz = /bits/ 64 <1200000000>;
|
||||
opp-microvolt = <750000 750000 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
opp-1416000000 {
|
||||
opp-hz = /bits/ 64 <1416000000>;
|
||||
opp-microvolt = <750000 750000 950000>;
|
||||
opp-microvolt = <762500 762500 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
opp-1608000000 {
|
||||
@@ -69,16 +64,6 @@
|
||||
opp-microvolt = <787500 787500 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
opp-1800000000 {
|
||||
opp-hz = /bits/ 64 <1800000000>;
|
||||
opp-microvolt = <875000 875000 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
opp-2016000000 {
|
||||
opp-hz = /bits/ 64 <2016000000>;
|
||||
opp-microvolt = <950000 950000 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
};
|
||||
|
||||
gpu_opp_table: opp-table {
|
||||
@@ -104,10 +89,6 @@
|
||||
opp-hz = /bits/ 64 <700000000>;
|
||||
opp-microvolt = <750000 750000 850000>;
|
||||
};
|
||||
opp-850000000 {
|
||||
opp-hz = /bits/ 64 <800000000>;
|
||||
opp-microvolt = <787500 787500 850000>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ static inline void instruction_pointer_set(struct pt_regs *regs, unsigned long v
|
||||
|
||||
/* Query offset/name of register from its name/offset */
|
||||
extern int regs_query_register_offset(const char *name);
|
||||
#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last))
|
||||
#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last) - sizeof(unsigned long))
|
||||
|
||||
/**
|
||||
* regs_get_register() - get register value from its offset
|
||||
|
||||
@@ -15,7 +15,6 @@ typedef u32 uprobe_opcode_t;
|
||||
#define UPROBE_XOLBP_INSN __emit_break(BRK_UPROBE_XOLBP)
|
||||
|
||||
struct arch_uprobe {
|
||||
unsigned long resume_era;
|
||||
u32 insn[2];
|
||||
u32 ixol[2];
|
||||
bool simulate;
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
#include <asm/stackframe.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
||||
.section .cpuidle.text, "ax"
|
||||
.align 5
|
||||
SYM_FUNC_START(__arch_cpu_idle)
|
||||
/* start of idle interrupt region */
|
||||
@@ -31,14 +32,16 @@ SYM_FUNC_START(__arch_cpu_idle)
|
||||
*/
|
||||
idle 0
|
||||
/* end of idle interrupt region */
|
||||
1: jr ra
|
||||
idle_exit:
|
||||
jr ra
|
||||
SYM_FUNC_END(__arch_cpu_idle)
|
||||
.previous
|
||||
|
||||
SYM_CODE_START(handle_vint)
|
||||
UNWIND_HINT_UNDEFINED
|
||||
BACKUP_T0T1
|
||||
SAVE_ALL
|
||||
la_abs t1, 1b
|
||||
la_abs t1, idle_exit
|
||||
LONG_L t0, sp, PT_ERA
|
||||
/* 3 instructions idle interrupt region */
|
||||
ori t0, t0, 0b1100
|
||||
|
||||
@@ -18,11 +18,28 @@ static unsigned int euen_mask = CSR_EUEN_FPEN;
|
||||
static DEFINE_PER_CPU(bool, in_kernel_fpu);
|
||||
static DEFINE_PER_CPU(unsigned int, euen_current);
|
||||
|
||||
static inline void fpregs_lock(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
preempt_disable();
|
||||
else
|
||||
local_bh_disable();
|
||||
}
|
||||
|
||||
static inline void fpregs_unlock(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
preempt_enable();
|
||||
else
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
void kernel_fpu_begin(void)
|
||||
{
|
||||
unsigned int *euen_curr;
|
||||
|
||||
preempt_disable();
|
||||
if (!irqs_disabled())
|
||||
fpregs_lock();
|
||||
|
||||
WARN_ON(this_cpu_read(in_kernel_fpu));
|
||||
|
||||
@@ -73,7 +90,8 @@ void kernel_fpu_end(void)
|
||||
|
||||
this_cpu_write(in_kernel_fpu, false);
|
||||
|
||||
preempt_enable();
|
||||
if (!irqs_disabled())
|
||||
fpregs_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kernel_fpu_end);
|
||||
|
||||
|
||||
@@ -111,7 +111,7 @@ static unsigned long __init get_loops_per_jiffy(void)
|
||||
return lpj;
|
||||
}
|
||||
|
||||
static long init_offset __nosavedata;
|
||||
static long init_offset;
|
||||
|
||||
void save_counter(void)
|
||||
{
|
||||
|
||||
@@ -42,7 +42,6 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
||||
utask->autask.saved_trap_nr = current->thread.trap_nr;
|
||||
current->thread.trap_nr = UPROBE_TRAP_NR;
|
||||
instruction_pointer_set(regs, utask->xol_vaddr);
|
||||
user_enable_single_step(current);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -53,13 +52,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
||||
|
||||
WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
|
||||
current->thread.trap_nr = utask->autask.saved_trap_nr;
|
||||
|
||||
if (auprobe->simulate)
|
||||
instruction_pointer_set(regs, auprobe->resume_era);
|
||||
else
|
||||
instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE);
|
||||
|
||||
user_disable_single_step(current);
|
||||
instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -70,7 +63,6 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
||||
|
||||
current->thread.trap_nr = utask->autask.saved_trap_nr;
|
||||
instruction_pointer_set(regs, utask->vaddr);
|
||||
user_disable_single_step(current);
|
||||
}
|
||||
|
||||
bool arch_uprobe_xol_was_trapped(struct task_struct *t)
|
||||
@@ -90,7 +82,6 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
||||
|
||||
insn.word = auprobe->insn[0];
|
||||
arch_simulate_insn(insn, regs);
|
||||
auprobe->resume_era = regs->csr_era;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#include <asm/fpu.h>
|
||||
#include <asm/loongson.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <linux/suspend.h>
|
||||
|
||||
@@ -14,6 +15,7 @@ struct pt_regs saved_regs;
|
||||
|
||||
void save_processor_state(void)
|
||||
{
|
||||
save_counter();
|
||||
saved_crmd = csr_read32(LOONGARCH_CSR_CRMD);
|
||||
saved_prmd = csr_read32(LOONGARCH_CSR_PRMD);
|
||||
saved_euen = csr_read32(LOONGARCH_CSR_EUEN);
|
||||
@@ -26,6 +28,7 @@ void save_processor_state(void)
|
||||
|
||||
void restore_processor_state(void)
|
||||
{
|
||||
sync_counter();
|
||||
csr_write32(saved_crmd, LOONGARCH_CSR_CRMD);
|
||||
csr_write32(saved_prmd, LOONGARCH_CSR_PRMD);
|
||||
csr_write32(saved_euen, LOONGARCH_CSR_EUEN);
|
||||
|
||||
@@ -309,7 +309,7 @@
|
||||
1024 1024 1024 1024>;
|
||||
snps,priority = <0 1 2 3 4 5 6 7>;
|
||||
snps,dma-masters = <2>;
|
||||
snps,data-width = <4>;
|
||||
snps,data-width = <2>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
||||
@@ -7616,26 +7616,6 @@ void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
|
||||
bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
|
||||
struct kvm_gfn_range *range)
|
||||
{
|
||||
/*
|
||||
* Zap SPTEs even if the slot can't be mapped PRIVATE. KVM x86 only
|
||||
* supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
|
||||
* can simply ignore such slots. But if userspace is making memory
|
||||
* PRIVATE, then KVM must prevent the guest from accessing the memory
|
||||
* as shared. And if userspace is making memory SHARED and this point
|
||||
* is reached, then at least one page within the range was previously
|
||||
* PRIVATE, i.e. the slot's possible hugepage ranges are changing.
|
||||
* Zapping SPTEs in this case ensures KVM will reassess whether or not
|
||||
* a hugepage can be used for affected ranges.
|
||||
*/
|
||||
if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
|
||||
return false;
|
||||
|
||||
return kvm_unmap_gfn_range(kvm, range);
|
||||
}
|
||||
|
||||
static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
|
||||
int level)
|
||||
{
|
||||
@@ -7654,6 +7634,69 @@ static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
|
||||
lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
|
||||
}
|
||||
|
||||
bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
|
||||
struct kvm_gfn_range *range)
|
||||
{
|
||||
struct kvm_memory_slot *slot = range->slot;
|
||||
int level;
|
||||
|
||||
/*
|
||||
* Zap SPTEs even if the slot can't be mapped PRIVATE. KVM x86 only
|
||||
* supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
|
||||
* can simply ignore such slots. But if userspace is making memory
|
||||
* PRIVATE, then KVM must prevent the guest from accessing the memory
|
||||
* as shared. And if userspace is making memory SHARED and this point
|
||||
* is reached, then at least one page within the range was previously
|
||||
* PRIVATE, i.e. the slot's possible hugepage ranges are changing.
|
||||
* Zapping SPTEs in this case ensures KVM will reassess whether or not
|
||||
* a hugepage can be used for affected ranges.
|
||||
*/
|
||||
if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
|
||||
return false;
|
||||
|
||||
if (WARN_ON_ONCE(range->end <= range->start))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If the head and tail pages of the range currently allow a hugepage,
|
||||
* i.e. reside fully in the slot and don't have mixed attributes, then
|
||||
* add each corresponding hugepage range to the ongoing invalidation,
|
||||
* e.g. to prevent KVM from creating a hugepage in response to a fault
|
||||
* for a gfn whose attributes aren't changing. Note, only the range
|
||||
* of gfns whose attributes are being modified needs to be explicitly
|
||||
* unmapped, as that will unmap any existing hugepages.
|
||||
*/
|
||||
for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
|
||||
gfn_t start = gfn_round_for_level(range->start, level);
|
||||
gfn_t end = gfn_round_for_level(range->end - 1, level);
|
||||
gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
|
||||
|
||||
if ((start != range->start || start + nr_pages > range->end) &&
|
||||
start >= slot->base_gfn &&
|
||||
start + nr_pages <= slot->base_gfn + slot->npages &&
|
||||
!hugepage_test_mixed(slot, start, level))
|
||||
kvm_mmu_invalidate_range_add(kvm, start, start + nr_pages);
|
||||
|
||||
if (end == start)
|
||||
continue;
|
||||
|
||||
if ((end + nr_pages) > range->end &&
|
||||
(end + nr_pages) <= (slot->base_gfn + slot->npages) &&
|
||||
!hugepage_test_mixed(slot, end, level))
|
||||
kvm_mmu_invalidate_range_add(kvm, end, end + nr_pages);
|
||||
}
|
||||
|
||||
/* Unmap the old attribute page. */
|
||||
if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)
|
||||
range->attr_filter = KVM_FILTER_SHARED;
|
||||
else
|
||||
range->attr_filter = KVM_FILTER_PRIVATE;
|
||||
|
||||
return kvm_unmap_gfn_range(kvm, range);
|
||||
}
|
||||
|
||||
|
||||
|
||||
static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
gfn_t gfn, int level, unsigned long attrs)
|
||||
{
|
||||
|
||||
@@ -611,7 +611,7 @@ struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask)
|
||||
{
|
||||
struct bio *bio;
|
||||
|
||||
if (nr_vecs > UIO_MAXIOV)
|
||||
if (nr_vecs > BIO_MAX_INLINE_VECS)
|
||||
return NULL;
|
||||
return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask);
|
||||
}
|
||||
|
||||
@@ -201,7 +201,7 @@ fw_log_fops_write(struct file *file, const char __user *user_buf, size_t size, l
|
||||
if (!size)
|
||||
return -EINVAL;
|
||||
|
||||
ivpu_fw_log_clear(vdev);
|
||||
ivpu_fw_log_mark_read(vdev);
|
||||
return size;
|
||||
}
|
||||
|
||||
|
||||
@@ -218,7 +218,7 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
|
||||
fw->cold_boot_entry_point = fw_hdr->entry_point;
|
||||
fw->entry_point = fw->cold_boot_entry_point;
|
||||
|
||||
fw->trace_level = min_t(u32, ivpu_log_level, IVPU_FW_LOG_FATAL);
|
||||
fw->trace_level = min_t(u32, ivpu_fw_log_level, IVPU_FW_LOG_FATAL);
|
||||
fw->trace_destination_mask = VPU_TRACE_DESTINATION_VERBOSE_TRACING;
|
||||
fw->trace_hw_component_mask = -1;
|
||||
|
||||
@@ -323,7 +323,7 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
|
||||
goto err_free_fw_mem;
|
||||
}
|
||||
|
||||
if (ivpu_log_level <= IVPU_FW_LOG_INFO)
|
||||
if (ivpu_fw_log_level <= IVPU_FW_LOG_INFO)
|
||||
log_verb_size = IVPU_FW_VERBOSE_BUFFER_LARGE_SIZE;
|
||||
else
|
||||
log_verb_size = IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2020-2023 Intel Corporation
|
||||
* Copyright (C) 2020-2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/ctype.h>
|
||||
@@ -15,19 +15,19 @@
|
||||
#include "ivpu_fw_log.h"
|
||||
#include "ivpu_gem.h"
|
||||
|
||||
#define IVPU_FW_LOG_LINE_LENGTH 256
|
||||
#define IVPU_FW_LOG_LINE_LENGTH 256
|
||||
|
||||
unsigned int ivpu_log_level = IVPU_FW_LOG_ERROR;
|
||||
module_param(ivpu_log_level, uint, 0444);
|
||||
MODULE_PARM_DESC(ivpu_log_level,
|
||||
"NPU firmware default trace level: debug=" __stringify(IVPU_FW_LOG_DEBUG)
|
||||
unsigned int ivpu_fw_log_level = IVPU_FW_LOG_ERROR;
|
||||
module_param_named(fw_log_level, ivpu_fw_log_level, uint, 0444);
|
||||
MODULE_PARM_DESC(fw_log_level,
|
||||
"NPU firmware default log level: debug=" __stringify(IVPU_FW_LOG_DEBUG)
|
||||
" info=" __stringify(IVPU_FW_LOG_INFO)
|
||||
" warn=" __stringify(IVPU_FW_LOG_WARN)
|
||||
" error=" __stringify(IVPU_FW_LOG_ERROR)
|
||||
" fatal=" __stringify(IVPU_FW_LOG_FATAL));
|
||||
|
||||
static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
|
||||
struct vpu_tracing_buffer_header **log_header)
|
||||
static int fw_log_from_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
|
||||
struct vpu_tracing_buffer_header **out_log)
|
||||
{
|
||||
struct vpu_tracing_buffer_header *log;
|
||||
|
||||
@@ -48,7 +48,7 @@ static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*log_header = log;
|
||||
*out_log = log;
|
||||
*offset += log->size;
|
||||
|
||||
ivpu_dbg(vdev, FW_BOOT,
|
||||
@@ -59,7 +59,7 @@ static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void buffer_print(char *buffer, u32 size, struct drm_printer *p)
|
||||
static void fw_log_print_lines(char *buffer, u32 size, struct drm_printer *p)
|
||||
{
|
||||
char line[IVPU_FW_LOG_LINE_LENGTH];
|
||||
u32 index = 0;
|
||||
@@ -87,56 +87,89 @@ static void buffer_print(char *buffer, u32 size, struct drm_printer *p)
|
||||
}
|
||||
line[index] = 0;
|
||||
if (index != 0)
|
||||
drm_printf(p, "%s\n", line);
|
||||
drm_printf(p, "%s", line);
|
||||
}
|
||||
|
||||
static void fw_log_print_buffer(struct ivpu_device *vdev, struct vpu_tracing_buffer_header *log,
|
||||
const char *prefix, bool only_new_msgs, struct drm_printer *p)
|
||||
static void fw_log_print_buffer(struct vpu_tracing_buffer_header *log, const char *prefix,
|
||||
bool only_new_msgs, struct drm_printer *p)
|
||||
{
|
||||
char *log_buffer = (void *)log + log->header_size;
|
||||
u32 log_size = log->size - log->header_size;
|
||||
u32 log_start = log->read_index;
|
||||
u32 log_end = log->write_index;
|
||||
char *log_data = (void *)log + log->header_size;
|
||||
u32 data_size = log->size - log->header_size;
|
||||
u32 log_start = only_new_msgs ? READ_ONCE(log->read_index) : 0;
|
||||
u32 log_end = READ_ONCE(log->write_index);
|
||||
|
||||
if (!(log->write_index || log->wrap_count) ||
|
||||
(log->write_index == log->read_index && only_new_msgs)) {
|
||||
drm_printf(p, "==== %s \"%s\" log empty ====\n", prefix, log->name);
|
||||
return;
|
||||
if (log->wrap_count == log->read_wrap_count) {
|
||||
if (log_end <= log_start) {
|
||||
drm_printf(p, "==== %s \"%s\" log empty ====\n", prefix, log->name);
|
||||
return;
|
||||
}
|
||||
} else if (log->wrap_count == log->read_wrap_count + 1) {
|
||||
if (log_end > log_start)
|
||||
log_start = log_end;
|
||||
} else {
|
||||
log_start = log_end;
|
||||
}
|
||||
|
||||
drm_printf(p, "==== %s \"%s\" log start ====\n", prefix, log->name);
|
||||
if (log->write_index > log->read_index) {
|
||||
buffer_print(log_buffer + log_start, log_end - log_start, p);
|
||||
if (log_end > log_start) {
|
||||
fw_log_print_lines(log_data + log_start, log_end - log_start, p);
|
||||
} else {
|
||||
buffer_print(log_buffer + log_end, log_size - log_end, p);
|
||||
buffer_print(log_buffer, log_end, p);
|
||||
fw_log_print_lines(log_data + log_start, data_size - log_start, p);
|
||||
fw_log_print_lines(log_data, log_end, p);
|
||||
}
|
||||
drm_printf(p, "\x1b[0m");
|
||||
drm_printf(p, "\n\x1b[0m"); /* add new line and clear formatting */
|
||||
drm_printf(p, "==== %s \"%s\" log end ====\n", prefix, log->name);
|
||||
}
|
||||
|
||||
static void
|
||||
fw_log_print_all_in_bo(struct ivpu_device *vdev, const char *name,
|
||||
struct ivpu_bo *bo, bool only_new_msgs, struct drm_printer *p)
|
||||
{
|
||||
struct vpu_tracing_buffer_header *log;
|
||||
u32 next = 0;
|
||||
|
||||
while (fw_log_from_bo(vdev, bo, &next, &log) == 0)
|
||||
fw_log_print_buffer(log, name, only_new_msgs, p);
|
||||
}
|
||||
|
||||
void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p)
|
||||
{
|
||||
struct vpu_tracing_buffer_header *log_header;
|
||||
u32 next = 0;
|
||||
|
||||
while (fw_log_ptr(vdev, vdev->fw->mem_log_crit, &next, &log_header) == 0)
|
||||
fw_log_print_buffer(vdev, log_header, "NPU critical", only_new_msgs, p);
|
||||
|
||||
next = 0;
|
||||
while (fw_log_ptr(vdev, vdev->fw->mem_log_verb, &next, &log_header) == 0)
|
||||
fw_log_print_buffer(vdev, log_header, "NPU verbose", only_new_msgs, p);
|
||||
fw_log_print_all_in_bo(vdev, "NPU critical", vdev->fw->mem_log_crit, only_new_msgs, p);
|
||||
fw_log_print_all_in_bo(vdev, "NPU verbose", vdev->fw->mem_log_verb, only_new_msgs, p);
|
||||
}
|
||||
|
||||
void ivpu_fw_log_clear(struct ivpu_device *vdev)
|
||||
void ivpu_fw_log_mark_read(struct ivpu_device *vdev)
|
||||
{
|
||||
struct vpu_tracing_buffer_header *log_header;
|
||||
u32 next = 0;
|
||||
|
||||
while (fw_log_ptr(vdev, vdev->fw->mem_log_crit, &next, &log_header) == 0)
|
||||
log_header->read_index = log_header->write_index;
|
||||
struct vpu_tracing_buffer_header *log;
|
||||
u32 next;
|
||||
|
||||
next = 0;
|
||||
while (fw_log_ptr(vdev, vdev->fw->mem_log_verb, &next, &log_header) == 0)
|
||||
log_header->read_index = log_header->write_index;
|
||||
while (fw_log_from_bo(vdev, vdev->fw->mem_log_crit, &next, &log) == 0) {
|
||||
log->read_index = READ_ONCE(log->write_index);
|
||||
log->read_wrap_count = READ_ONCE(log->wrap_count);
|
||||
}
|
||||
|
||||
next = 0;
|
||||
while (fw_log_from_bo(vdev, vdev->fw->mem_log_verb, &next, &log) == 0) {
|
||||
log->read_index = READ_ONCE(log->write_index);
|
||||
log->read_wrap_count = READ_ONCE(log->wrap_count);
|
||||
}
|
||||
}
|
||||
|
||||
void ivpu_fw_log_reset(struct ivpu_device *vdev)
|
||||
{
|
||||
struct vpu_tracing_buffer_header *log;
|
||||
u32 next;
|
||||
|
||||
next = 0;
|
||||
while (fw_log_from_bo(vdev, vdev->fw->mem_log_crit, &next, &log) == 0) {
|
||||
log->read_index = 0;
|
||||
log->read_wrap_count = 0;
|
||||
}
|
||||
|
||||
next = 0;
|
||||
while (fw_log_from_bo(vdev, vdev->fw->mem_log_verb, &next, &log) == 0) {
|
||||
log->read_index = 0;
|
||||
log->read_wrap_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2020-2023 Intel Corporation
|
||||
* Copyright (C) 2020-2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __IVPU_FW_LOG_H__
|
||||
@@ -17,14 +17,15 @@
|
||||
#define IVPU_FW_LOG_ERROR 4
|
||||
#define IVPU_FW_LOG_FATAL 5
|
||||
|
||||
extern unsigned int ivpu_log_level;
|
||||
|
||||
#define IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE SZ_1M
|
||||
#define IVPU_FW_VERBOSE_BUFFER_LARGE_SIZE SZ_8M
|
||||
#define IVPU_FW_CRITICAL_BUFFER_SIZE SZ_512K
|
||||
|
||||
extern unsigned int ivpu_fw_log_level;
|
||||
|
||||
void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p);
|
||||
void ivpu_fw_log_clear(struct ivpu_device *vdev);
|
||||
void ivpu_fw_log_mark_read(struct ivpu_device *vdev);
|
||||
void ivpu_fw_log_reset(struct ivpu_device *vdev);
|
||||
|
||||
|
||||
#endif /* __IVPU_FW_LOG_H__ */
|
||||
|
||||
@@ -38,6 +38,7 @@ static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
|
||||
|
||||
ivpu_cmdq_reset_all_contexts(vdev);
|
||||
ivpu_ipc_reset(vdev);
|
||||
ivpu_fw_log_reset(vdev);
|
||||
ivpu_fw_load(vdev);
|
||||
fw->entry_point = fw->cold_boot_entry_point;
|
||||
}
|
||||
|
||||
@@ -231,16 +231,18 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
|
||||
sizeof(struct acpi_table_pptt));
|
||||
proc_sz = sizeof(struct acpi_pptt_processor);
|
||||
|
||||
while ((unsigned long)entry + proc_sz < table_end) {
|
||||
/* ignore subtable types that are smaller than a processor node */
|
||||
while ((unsigned long)entry + proc_sz <= table_end) {
|
||||
cpu_node = (struct acpi_pptt_processor *)entry;
|
||||
|
||||
if (entry->type == ACPI_PPTT_TYPE_PROCESSOR &&
|
||||
cpu_node->parent == node_entry)
|
||||
return 0;
|
||||
if (entry->length == 0)
|
||||
return 0;
|
||||
|
||||
entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry,
|
||||
entry->length);
|
||||
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
@@ -273,15 +275,18 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he
|
||||
proc_sz = sizeof(struct acpi_pptt_processor);
|
||||
|
||||
/* find the processor structure associated with this cpuid */
|
||||
while ((unsigned long)entry + proc_sz < table_end) {
|
||||
while ((unsigned long)entry + proc_sz <= table_end) {
|
||||
cpu_node = (struct acpi_pptt_processor *)entry;
|
||||
|
||||
if (entry->length == 0) {
|
||||
pr_warn("Invalid zero length subtable\n");
|
||||
break;
|
||||
}
|
||||
/* entry->length may not equal proc_sz, revalidate the processor structure length */
|
||||
if (entry->type == ACPI_PPTT_TYPE_PROCESSOR &&
|
||||
acpi_cpu_id == cpu_node->acpi_processor_id &&
|
||||
(unsigned long)entry + entry->length <= table_end &&
|
||||
entry->length == proc_sz + cpu_node->number_of_priv_resources * sizeof(u32) &&
|
||||
acpi_pptt_leaf_node(table_hdr, cpu_node)) {
|
||||
return (struct acpi_pptt_processor *)entry;
|
||||
}
|
||||
|
||||
@@ -612,8 +612,10 @@ static int nxp_download_firmware(struct hci_dev *hdev)
|
||||
&nxpdev->tx_state),
|
||||
msecs_to_jiffies(60000));
|
||||
|
||||
release_firmware(nxpdev->fw);
|
||||
memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name));
|
||||
if (nxpdev->fw && strlen(nxpdev->fw_name)) {
|
||||
release_firmware(nxpdev->fw);
|
||||
memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name));
|
||||
}
|
||||
|
||||
if (err == 0) {
|
||||
bt_dev_err(hdev, "FW Download Timeout. offset: %d",
|
||||
|
||||
@@ -40,11 +40,6 @@
|
||||
*
|
||||
* These are the usage functions:
|
||||
*
|
||||
* tpm2_start_auth_session() which allocates the opaque auth structure
|
||||
* and gets a session from the TPM. This must be called before
|
||||
* any of the following functions. The session is protected by a
|
||||
* session_key which is derived from a random salt value
|
||||
* encrypted to the NULL seed.
|
||||
* tpm2_end_auth_session() kills the session and frees the resources.
|
||||
* Under normal operation this function is done by
|
||||
* tpm_buf_check_hmac_response(), so this is only to be used on
|
||||
@@ -963,16 +958,13 @@ err:
|
||||
}
|
||||
|
||||
/**
|
||||
* tpm2_start_auth_session() - create a HMAC authentication session with the TPM
|
||||
* @chip: the TPM chip structure to create the session with
|
||||
* tpm2_start_auth_session() - Create an a HMAC authentication session
|
||||
* @chip: A TPM chip
|
||||
*
|
||||
* This function loads the NULL seed from its saved context and starts
|
||||
* an authentication session on the null seed, fills in the
|
||||
* @chip->auth structure to contain all the session details necessary
|
||||
* for performing the HMAC, encrypt and decrypt operations and
|
||||
* returns. The NULL seed is flushed before this function returns.
|
||||
* Loads the ephemeral key (null seed), and starts an HMAC authenticated
|
||||
* session. The null seed is flushed before the return.
|
||||
*
|
||||
* Return: zero on success or actual error encountered.
|
||||
* Returns zero on success, or a POSIX error code.
|
||||
*/
|
||||
int tpm2_start_auth_session(struct tpm_chip *chip)
|
||||
{
|
||||
@@ -1024,7 +1016,7 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
|
||||
/* hash algorithm for session */
|
||||
tpm_buf_append_u16(&buf, TPM_ALG_SHA256);
|
||||
|
||||
rc = tpm_transmit_cmd(chip, &buf, 0, "start auth session");
|
||||
rc = tpm_ret_to_err(tpm_transmit_cmd(chip, &buf, 0, "StartAuthSession"));
|
||||
tpm2_flush_context(chip, null_key);
|
||||
|
||||
if (rc == TPM2_RC_SUCCESS)
|
||||
|
||||
@@ -54,7 +54,7 @@ enum tis_int_flags {
|
||||
enum tis_defaults {
|
||||
TIS_MEM_LEN = 0x5000,
|
||||
TIS_SHORT_TIMEOUT = 750, /* ms */
|
||||
TIS_LONG_TIMEOUT = 2000, /* 2 sec */
|
||||
TIS_LONG_TIMEOUT = 4000, /* 4 secs */
|
||||
TIS_TIMEOUT_MIN_ATML = 14700, /* usecs */
|
||||
TIS_TIMEOUT_MAX_ATML = 15000, /* usecs */
|
||||
};
|
||||
|
||||
@@ -320,8 +320,9 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
|
||||
count++;
|
||||
|
||||
dma_resv_list_set(fobj, i, fence, usage);
|
||||
/* pointer update must be visible before we extend the num_fences */
|
||||
smp_store_mb(fobj->num_fences, count);
|
||||
/* fence update must be visible before we extend the num_fences */
|
||||
smp_wmb();
|
||||
fobj->num_fences = count;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_resv_add_fence);
|
||||
|
||||
|
||||
@@ -841,9 +841,9 @@ static int dmatest_func(void *data)
|
||||
} else {
|
||||
dma_async_issue_pending(chan);
|
||||
|
||||
wait_event_timeout(thread->done_wait,
|
||||
done->done,
|
||||
msecs_to_jiffies(params->timeout));
|
||||
wait_event_freezable_timeout(thread->done_wait,
|
||||
done->done,
|
||||
msecs_to_jiffies(params->timeout));
|
||||
|
||||
status = dma_async_is_tx_complete(chan, cookie, NULL,
|
||||
NULL);
|
||||
|
||||
@@ -153,6 +153,25 @@ static void idxd_cleanup_interrupts(struct idxd_device *idxd)
|
||||
pci_free_irq_vectors(pdev);
|
||||
}
|
||||
|
||||
static void idxd_clean_wqs(struct idxd_device *idxd)
|
||||
{
|
||||
struct idxd_wq *wq;
|
||||
struct device *conf_dev;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < idxd->max_wqs; i++) {
|
||||
wq = idxd->wqs[i];
|
||||
if (idxd->hw.wq_cap.op_config)
|
||||
bitmap_free(wq->opcap_bmap);
|
||||
kfree(wq->wqcfg);
|
||||
conf_dev = wq_confdev(wq);
|
||||
put_device(conf_dev);
|
||||
kfree(wq);
|
||||
}
|
||||
bitmap_free(idxd->wq_enable_map);
|
||||
kfree(idxd->wqs);
|
||||
}
|
||||
|
||||
static int idxd_setup_wqs(struct idxd_device *idxd)
|
||||
{
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
@@ -167,8 +186,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
|
||||
|
||||
idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev));
|
||||
if (!idxd->wq_enable_map) {
|
||||
kfree(idxd->wqs);
|
||||
return -ENOMEM;
|
||||
rc = -ENOMEM;
|
||||
goto err_bitmap;
|
||||
}
|
||||
|
||||
for (i = 0; i < idxd->max_wqs; i++) {
|
||||
@@ -187,10 +206,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
|
||||
conf_dev->bus = &dsa_bus_type;
|
||||
conf_dev->type = &idxd_wq_device_type;
|
||||
rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
|
||||
if (rc < 0) {
|
||||
put_device(conf_dev);
|
||||
if (rc < 0)
|
||||
goto err;
|
||||
}
|
||||
|
||||
mutex_init(&wq->wq_lock);
|
||||
init_waitqueue_head(&wq->err_queue);
|
||||
@@ -201,7 +218,6 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
|
||||
wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
|
||||
wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
|
||||
if (!wq->wqcfg) {
|
||||
put_device(conf_dev);
|
||||
rc = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
@@ -209,9 +225,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
|
||||
if (idxd->hw.wq_cap.op_config) {
|
||||
wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
|
||||
if (!wq->opcap_bmap) {
|
||||
put_device(conf_dev);
|
||||
rc = -ENOMEM;
|
||||
goto err;
|
||||
goto err_opcap_bmap;
|
||||
}
|
||||
bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
|
||||
}
|
||||
@@ -222,15 +237,46 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
err_opcap_bmap:
|
||||
kfree(wq->wqcfg);
|
||||
|
||||
err:
|
||||
put_device(conf_dev);
|
||||
kfree(wq);
|
||||
|
||||
while (--i >= 0) {
|
||||
wq = idxd->wqs[i];
|
||||
if (idxd->hw.wq_cap.op_config)
|
||||
bitmap_free(wq->opcap_bmap);
|
||||
kfree(wq->wqcfg);
|
||||
conf_dev = wq_confdev(wq);
|
||||
put_device(conf_dev);
|
||||
kfree(wq);
|
||||
|
||||
}
|
||||
bitmap_free(idxd->wq_enable_map);
|
||||
|
||||
err_bitmap:
|
||||
kfree(idxd->wqs);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void idxd_clean_engines(struct idxd_device *idxd)
|
||||
{
|
||||
struct idxd_engine *engine;
|
||||
struct device *conf_dev;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < idxd->max_engines; i++) {
|
||||
engine = idxd->engines[i];
|
||||
conf_dev = engine_confdev(engine);
|
||||
put_device(conf_dev);
|
||||
kfree(engine);
|
||||
}
|
||||
kfree(idxd->engines);
|
||||
}
|
||||
|
||||
static int idxd_setup_engines(struct idxd_device *idxd)
|
||||
{
|
||||
struct idxd_engine *engine;
|
||||
@@ -261,6 +307,7 @@ static int idxd_setup_engines(struct idxd_device *idxd)
|
||||
rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id);
|
||||
if (rc < 0) {
|
||||
put_device(conf_dev);
|
||||
kfree(engine);
|
||||
goto err;
|
||||
}
|
||||
|
||||
@@ -274,10 +321,26 @@ static int idxd_setup_engines(struct idxd_device *idxd)
|
||||
engine = idxd->engines[i];
|
||||
conf_dev = engine_confdev(engine);
|
||||
put_device(conf_dev);
|
||||
kfree(engine);
|
||||
}
|
||||
kfree(idxd->engines);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void idxd_clean_groups(struct idxd_device *idxd)
|
||||
{
|
||||
struct idxd_group *group;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < idxd->max_groups; i++) {
|
||||
group = idxd->groups[i];
|
||||
put_device(group_confdev(group));
|
||||
kfree(group);
|
||||
}
|
||||
kfree(idxd->groups);
|
||||
}
|
||||
|
||||
static int idxd_setup_groups(struct idxd_device *idxd)
|
||||
{
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
@@ -308,6 +371,7 @@ static int idxd_setup_groups(struct idxd_device *idxd)
|
||||
rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id);
|
||||
if (rc < 0) {
|
||||
put_device(conf_dev);
|
||||
kfree(group);
|
||||
goto err;
|
||||
}
|
||||
|
||||
@@ -332,20 +396,18 @@ static int idxd_setup_groups(struct idxd_device *idxd)
|
||||
while (--i >= 0) {
|
||||
group = idxd->groups[i];
|
||||
put_device(group_confdev(group));
|
||||
kfree(group);
|
||||
}
|
||||
kfree(idxd->groups);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void idxd_cleanup_internals(struct idxd_device *idxd)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < idxd->max_groups; i++)
|
||||
put_device(group_confdev(idxd->groups[i]));
|
||||
for (i = 0; i < idxd->max_engines; i++)
|
||||
put_device(engine_confdev(idxd->engines[i]));
|
||||
for (i = 0; i < idxd->max_wqs; i++)
|
||||
put_device(wq_confdev(idxd->wqs[i]));
|
||||
idxd_clean_groups(idxd);
|
||||
idxd_clean_engines(idxd);
|
||||
idxd_clean_wqs(idxd);
|
||||
destroy_workqueue(idxd->wq);
|
||||
}
|
||||
|
||||
@@ -388,7 +450,7 @@ static int idxd_init_evl(struct idxd_device *idxd)
|
||||
static int idxd_setup_internals(struct idxd_device *idxd)
|
||||
{
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
int rc, i;
|
||||
int rc;
|
||||
|
||||
init_waitqueue_head(&idxd->cmd_waitq);
|
||||
|
||||
@@ -419,14 +481,11 @@ static int idxd_setup_internals(struct idxd_device *idxd)
|
||||
err_evl:
|
||||
destroy_workqueue(idxd->wq);
|
||||
err_wkq_create:
|
||||
for (i = 0; i < idxd->max_groups; i++)
|
||||
put_device(group_confdev(idxd->groups[i]));
|
||||
idxd_clean_groups(idxd);
|
||||
err_group:
|
||||
for (i = 0; i < idxd->max_engines; i++)
|
||||
put_device(engine_confdev(idxd->engines[i]));
|
||||
idxd_clean_engines(idxd);
|
||||
err_engine:
|
||||
for (i = 0; i < idxd->max_wqs; i++)
|
||||
put_device(wq_confdev(idxd->wqs[i]));
|
||||
idxd_clean_wqs(idxd);
|
||||
err_wqs:
|
||||
return rc;
|
||||
}
|
||||
@@ -526,6 +585,17 @@ static void idxd_read_caps(struct idxd_device *idxd)
|
||||
idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET);
|
||||
}
|
||||
|
||||
static void idxd_free(struct idxd_device *idxd)
|
||||
{
|
||||
if (!idxd)
|
||||
return;
|
||||
|
||||
put_device(idxd_confdev(idxd));
|
||||
bitmap_free(idxd->opcap_bmap);
|
||||
ida_free(&idxd_ida, idxd->id);
|
||||
kfree(idxd);
|
||||
}
|
||||
|
||||
static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
@@ -543,28 +613,34 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d
|
||||
idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type);
|
||||
idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
|
||||
if (idxd->id < 0)
|
||||
return NULL;
|
||||
goto err_ida;
|
||||
|
||||
idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev));
|
||||
if (!idxd->opcap_bmap) {
|
||||
ida_free(&idxd_ida, idxd->id);
|
||||
return NULL;
|
||||
}
|
||||
if (!idxd->opcap_bmap)
|
||||
goto err_opcap;
|
||||
|
||||
device_initialize(conf_dev);
|
||||
conf_dev->parent = dev;
|
||||
conf_dev->bus = &dsa_bus_type;
|
||||
conf_dev->type = idxd->data->dev_type;
|
||||
rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
|
||||
if (rc < 0) {
|
||||
put_device(conf_dev);
|
||||
return NULL;
|
||||
}
|
||||
if (rc < 0)
|
||||
goto err_name;
|
||||
|
||||
spin_lock_init(&idxd->dev_lock);
|
||||
spin_lock_init(&idxd->cmd_lock);
|
||||
|
||||
return idxd;
|
||||
|
||||
err_name:
|
||||
put_device(conf_dev);
|
||||
bitmap_free(idxd->opcap_bmap);
|
||||
err_opcap:
|
||||
ida_free(&idxd_ida, idxd->id);
|
||||
err_ida:
|
||||
kfree(idxd);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int idxd_enable_system_pasid(struct idxd_device *idxd)
|
||||
@@ -792,7 +868,7 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
err:
|
||||
pci_iounmap(pdev, idxd->reg_base);
|
||||
err_iomap:
|
||||
put_device(idxd_confdev(idxd));
|
||||
idxd_free(idxd);
|
||||
err_idxd_alloc:
|
||||
pci_disable_device(pdev);
|
||||
return rc;
|
||||
@@ -829,7 +905,6 @@ static void idxd_shutdown(struct pci_dev *pdev)
|
||||
static void idxd_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct idxd_device *idxd = pci_get_drvdata(pdev);
|
||||
struct idxd_irq_entry *irq_entry;
|
||||
|
||||
idxd_unregister_devices(idxd);
|
||||
/*
|
||||
@@ -842,20 +917,12 @@ static void idxd_remove(struct pci_dev *pdev)
|
||||
get_device(idxd_confdev(idxd));
|
||||
device_unregister(idxd_confdev(idxd));
|
||||
idxd_shutdown(pdev);
|
||||
if (device_pasid_enabled(idxd))
|
||||
idxd_disable_system_pasid(idxd);
|
||||
idxd_device_remove_debugfs(idxd);
|
||||
|
||||
irq_entry = idxd_get_ie(idxd, 0);
|
||||
free_irq(irq_entry->vector, irq_entry);
|
||||
pci_free_irq_vectors(pdev);
|
||||
idxd_cleanup(idxd);
|
||||
pci_iounmap(pdev, idxd->reg_base);
|
||||
if (device_user_pasid_enabled(idxd))
|
||||
idxd_disable_sva(pdev);
|
||||
pci_disable_device(pdev);
|
||||
destroy_workqueue(idxd->wq);
|
||||
perfmon_pmu_remove(idxd);
|
||||
put_device(idxd_confdev(idxd));
|
||||
idxd_free(idxd);
|
||||
pci_disable_device(pdev);
|
||||
}
|
||||
|
||||
static struct pci_driver idxd_pci_driver = {
|
||||
|
||||
@@ -1091,8 +1091,11 @@ static void udma_check_tx_completion(struct work_struct *work)
|
||||
u32 residue_diff;
|
||||
ktime_t time_diff;
|
||||
unsigned long delay;
|
||||
unsigned long flags;
|
||||
|
||||
while (1) {
|
||||
spin_lock_irqsave(&uc->vc.lock, flags);
|
||||
|
||||
if (uc->desc) {
|
||||
/* Get previous residue and time stamp */
|
||||
residue_diff = uc->tx_drain.residue;
|
||||
@@ -1127,6 +1130,8 @@ static void udma_check_tx_completion(struct work_struct *work)
|
||||
break;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&uc->vc.lock, flags);
|
||||
|
||||
usleep_range(ktime_to_us(delay),
|
||||
ktime_to_us(delay) + 10);
|
||||
continue;
|
||||
@@ -1143,6 +1148,8 @@ static void udma_check_tx_completion(struct work_struct *work)
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&uc->vc.lock, flags);
|
||||
}
|
||||
|
||||
static irqreturn_t udma_ring_irq_handler(int irq, void *data)
|
||||
@@ -4246,7 +4253,6 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
|
||||
struct of_dma *ofdma)
|
||||
{
|
||||
struct udma_dev *ud = ofdma->of_dma_data;
|
||||
dma_cap_mask_t mask = ud->ddev.cap_mask;
|
||||
struct udma_filter_param filter_param;
|
||||
struct dma_chan *chan;
|
||||
|
||||
@@ -4278,7 +4284,7 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
|
||||
}
|
||||
}
|
||||
|
||||
chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
|
||||
chan = __dma_request_channel(&ud->ddev.cap_mask, udma_dma_filter_fn, &filter_param,
|
||||
ofdma->of_node);
|
||||
if (!chan) {
|
||||
dev_err(ud->dev, "get channel fail in %s.\n", __func__);
|
||||
|
||||
@@ -1203,6 +1203,8 @@ static int pca953x_restore_context(struct pca953x_chip *chip)
|
||||
|
||||
guard(mutex)(&chip->i2c_lock);
|
||||
|
||||
if (chip->client->irq > 0)
|
||||
enable_irq(chip->client->irq);
|
||||
regcache_cache_only(chip->regmap, false);
|
||||
regcache_mark_dirty(chip->regmap);
|
||||
ret = pca953x_regcache_sync(chip);
|
||||
@@ -1215,6 +1217,10 @@ static int pca953x_restore_context(struct pca953x_chip *chip)
|
||||
static void pca953x_save_context(struct pca953x_chip *chip)
|
||||
{
|
||||
guard(mutex)(&chip->i2c_lock);
|
||||
|
||||
/* Disable IRQ to prevent early triggering while regmap "cache only" is on */
|
||||
if (chip->client->irq > 0)
|
||||
disable_irq(chip->client->irq);
|
||||
regcache_cache_only(chip->regmap, true);
|
||||
}
|
||||
|
||||
|
||||
@@ -853,6 +853,7 @@ struct amdgpu_device {
|
||||
bool need_swiotlb;
|
||||
bool accel_working;
|
||||
struct notifier_block acpi_nb;
|
||||
struct notifier_block pm_nb;
|
||||
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
|
||||
struct debugfs_blob_wrapper debugfs_vbios_blob;
|
||||
struct debugfs_blob_wrapper debugfs_discovery_blob;
|
||||
@@ -1570,11 +1571,9 @@ static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_cap
|
||||
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
|
||||
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
|
||||
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
|
||||
void amdgpu_choose_low_power_state(struct amdgpu_device *adev);
|
||||
#else
|
||||
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
|
||||
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
|
||||
static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
|
||||
#endif
|
||||
|
||||
void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
|
||||
|
||||
@@ -1533,22 +1533,4 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
|
||||
#endif /* CONFIG_AMD_PMC */
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_choose_low_power_state
|
||||
*
|
||||
* @adev: amdgpu_device_pointer
|
||||
*
|
||||
* Choose the target low power state for the GPU
|
||||
*/
|
||||
void amdgpu_choose_low_power_state(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->in_runpm)
|
||||
return;
|
||||
|
||||
if (amdgpu_acpi_is_s0ix_active(adev))
|
||||
adev->in_s0ix = true;
|
||||
else if (amdgpu_acpi_is_s3_active(adev))
|
||||
adev->in_s3 = true;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SUSPEND */
|
||||
|
||||
@@ -109,7 +109,7 @@ int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
struct drm_exec exec;
|
||||
int r;
|
||||
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
|
||||
drm_exec_init(&exec, 0, 0);
|
||||
drm_exec_until_all_locked(&exec) {
|
||||
r = amdgpu_vm_lock_pd(vm, &exec, 0);
|
||||
if (likely(!r))
|
||||
|
||||
@@ -145,6 +145,8 @@ const char *amdgpu_asic_name[] = {
|
||||
};
|
||||
|
||||
static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev);
|
||||
static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
|
||||
void *data);
|
||||
|
||||
/**
|
||||
* DOC: pcie_replay_count
|
||||
@@ -4519,6 +4521,11 @@ fence_driver_init:
|
||||
|
||||
amdgpu_device_check_iommu_direct_map(adev);
|
||||
|
||||
adev->pm_nb.notifier_call = amdgpu_device_pm_notifier;
|
||||
r = register_pm_notifier(&adev->pm_nb);
|
||||
if (r)
|
||||
goto failed;
|
||||
|
||||
return 0;
|
||||
|
||||
release_ras_con:
|
||||
@@ -4583,6 +4590,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
|
||||
drain_workqueue(adev->mman.bdev.wq);
|
||||
adev->shutdown = true;
|
||||
|
||||
unregister_pm_notifier(&adev->pm_nb);
|
||||
|
||||
/* make sure IB test finished before entering exclusive mode
|
||||
* to avoid preemption on IB test
|
||||
*/
|
||||
@@ -4712,6 +4721,33 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
|
||||
/*
|
||||
* Suspend & resume.
|
||||
*/
|
||||
/**
|
||||
* amdgpu_device_pm_notifier - Notification block for Suspend/Hibernate events
|
||||
* @nb: notifier block
|
||||
* @mode: suspend mode
|
||||
* @data: data
|
||||
*
|
||||
* This function is called when the system is about to suspend or hibernate.
|
||||
* It is used to set the appropriate flags so that eviction can be optimized
|
||||
* in the pm prepare callback.
|
||||
*/
|
||||
static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
|
||||
void *data)
|
||||
{
|
||||
struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb);
|
||||
|
||||
switch (mode) {
|
||||
case PM_HIBERNATION_PREPARE:
|
||||
adev->in_s4 = true;
|
||||
break;
|
||||
case PM_POST_HIBERNATION:
|
||||
adev->in_s4 = false;
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_prepare - prepare for device suspend
|
||||
*
|
||||
@@ -4726,15 +4762,13 @@ int amdgpu_device_prepare(struct drm_device *dev)
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
int i, r;
|
||||
|
||||
amdgpu_choose_low_power_state(adev);
|
||||
|
||||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
|
||||
/* Evict the majority of BOs before starting suspend sequence */
|
||||
r = amdgpu_device_evict_resources(adev);
|
||||
if (r)
|
||||
goto unprepare;
|
||||
return r;
|
||||
|
||||
flush_delayed_work(&adev->gfx.gfx_off_delay_work);
|
||||
|
||||
@@ -4745,15 +4779,10 @@ int amdgpu_device_prepare(struct drm_device *dev)
|
||||
continue;
|
||||
r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev);
|
||||
if (r)
|
||||
goto unprepare;
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
unprepare:
|
||||
adev->in_s0ix = adev->in_s3 = false;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -2635,7 +2635,6 @@ static int amdgpu_pmops_freeze(struct device *dev)
|
||||
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||
int r;
|
||||
|
||||
adev->in_s4 = true;
|
||||
r = amdgpu_device_suspend(drm_dev, true);
|
||||
if (r)
|
||||
return r;
|
||||
@@ -2648,13 +2647,8 @@ static int amdgpu_pmops_freeze(struct device *dev)
|
||||
static int amdgpu_pmops_thaw(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||
int r;
|
||||
|
||||
r = amdgpu_device_resume(drm_dev, true);
|
||||
adev->in_s4 = false;
|
||||
|
||||
return r;
|
||||
return amdgpu_device_resume(drm_dev, true);
|
||||
}
|
||||
|
||||
static int amdgpu_pmops_poweroff(struct device *dev)
|
||||
@@ -2667,9 +2661,6 @@ static int amdgpu_pmops_poweroff(struct device *dev)
|
||||
static int amdgpu_pmops_restore(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||
|
||||
adev->in_s4 = false;
|
||||
|
||||
return amdgpu_device_resume(drm_dev, true);
|
||||
}
|
||||
|
||||
@@ -747,6 +747,18 @@ static int gmc_v11_0_sw_init(void *handle)
|
||||
adev->gmc.vram_type = vram_type;
|
||||
adev->gmc.vram_vendor = vram_vendor;
|
||||
|
||||
/* The mall_size is already calculated as mall_size_per_umc * num_umc.
|
||||
* However, for gfx1151, which features a 2-to-1 UMC mapping,
|
||||
* the result must be multiplied by 2 to determine the actual mall size.
|
||||
*/
|
||||
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
|
||||
case IP_VERSION(11, 5, 1):
|
||||
adev->gmc.mall_size *= 2;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
|
||||
case IP_VERSION(11, 0, 0):
|
||||
case IP_VERSION(11, 0, 1):
|
||||
|
||||
@@ -12546,7 +12546,8 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
|
||||
/* The reply is stored in the top nibble of the command. */
|
||||
payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF;
|
||||
|
||||
if (!payload->write && p_notify->aux_reply.length)
|
||||
/*write req may receive a byte indicating partially written number as well*/
|
||||
if (p_notify->aux_reply.length)
|
||||
memcpy(payload->data, p_notify->aux_reply.data,
|
||||
p_notify->aux_reply.length);
|
||||
|
||||
|
||||
@@ -62,6 +62,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
|
||||
enum aux_return_code_type operation_result;
|
||||
struct amdgpu_device *adev;
|
||||
struct ddc_service *ddc;
|
||||
uint8_t copy[16];
|
||||
|
||||
if (WARN_ON(msg->size > 16))
|
||||
return -E2BIG;
|
||||
@@ -77,6 +78,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
|
||||
(msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0;
|
||||
payload.defer_delay = 0;
|
||||
|
||||
if (payload.write) {
|
||||
memcpy(copy, msg->buffer, msg->size);
|
||||
payload.data = copy;
|
||||
}
|
||||
|
||||
result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
|
||||
&operation_result);
|
||||
|
||||
@@ -100,9 +106,9 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
|
||||
*/
|
||||
if (payload.write && result >= 0) {
|
||||
if (result) {
|
||||
/*one byte indicating partially written bytes. Force 0 to retry*/
|
||||
drm_info(adev_to_drm(adev), "amdgpu: AUX partially written\n");
|
||||
result = 0;
|
||||
/*one byte indicating partially written bytes*/
|
||||
drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX partially written\n");
|
||||
result = payload.data[0];
|
||||
} else if (!payload.reply[0])
|
||||
/*I2C_ACK|AUX_ACK*/
|
||||
result = msg->size;
|
||||
@@ -127,11 +133,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
|
||||
break;
|
||||
}
|
||||
|
||||
drm_info(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result);
|
||||
drm_dbg_dp(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result);
|
||||
}
|
||||
|
||||
if (payload.reply[0])
|
||||
drm_info(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.",
|
||||
drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.",
|
||||
payload.reply[0]);
|
||||
|
||||
return result;
|
||||
|
||||
@@ -120,10 +120,11 @@ void dpp401_set_cursor_attributes(
|
||||
enum dc_cursor_color_format color_format = cursor_attributes->color_format;
|
||||
int cur_rom_en = 0;
|
||||
|
||||
// DCN4 should always do Cursor degamma for Cursor Color modes
|
||||
if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
|
||||
color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
|
||||
cur_rom_en = 1;
|
||||
if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
|
||||
cur_rom_en = 1;
|
||||
}
|
||||
}
|
||||
|
||||
REG_UPDATE_3(CURSOR0_CONTROL,
|
||||
|
||||
@@ -105,6 +105,40 @@ static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
|
||||
|
||||
static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
|
||||
struct drm_fb_helper_surface_size *sizes)
|
||||
{
|
||||
return drm_fbdev_dma_driver_fbdev_probe(fb_helper, sizes);
|
||||
}
|
||||
|
||||
static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
|
||||
struct drm_clip_rect *clip)
|
||||
{
|
||||
struct drm_device *dev = helper->dev;
|
||||
int ret;
|
||||
|
||||
/* Call damage handlers only if necessary */
|
||||
if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
|
||||
return 0;
|
||||
|
||||
if (helper->fb->funcs->dirty) {
|
||||
ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
|
||||
if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
|
||||
.fb_probe = drm_fbdev_dma_helper_fb_probe,
|
||||
.fb_dirty = drm_fbdev_dma_helper_fb_dirty,
|
||||
};
|
||||
|
||||
/*
|
||||
* struct drm_fb_helper
|
||||
*/
|
||||
|
||||
int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
|
||||
struct drm_fb_helper_surface_size *sizes)
|
||||
{
|
||||
struct drm_client_dev *client = &fb_helper->client;
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
@@ -148,6 +182,7 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
|
||||
goto err_drm_client_buffer_delete;
|
||||
}
|
||||
|
||||
fb_helper->funcs = &drm_fbdev_dma_helper_funcs;
|
||||
fb_helper->buffer = buffer;
|
||||
fb_helper->fb = fb;
|
||||
|
||||
@@ -211,30 +246,7 @@ err_drm_client_buffer_delete:
|
||||
drm_client_framebuffer_delete(buffer);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
|
||||
struct drm_clip_rect *clip)
|
||||
{
|
||||
struct drm_device *dev = helper->dev;
|
||||
int ret;
|
||||
|
||||
/* Call damage handlers only if necessary */
|
||||
if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
|
||||
return 0;
|
||||
|
||||
if (helper->fb->funcs->dirty) {
|
||||
ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
|
||||
if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
|
||||
.fb_probe = drm_fbdev_dma_helper_fb_probe,
|
||||
.fb_dirty = drm_fbdev_dma_helper_fb_dirty,
|
||||
};
|
||||
EXPORT_SYMBOL(drm_fbdev_dma_driver_fbdev_probe);
|
||||
|
||||
/*
|
||||
* struct drm_client_funcs
|
||||
|
||||
@@ -67,6 +67,7 @@ config DRM_OFDRM
|
||||
config DRM_PANEL_MIPI_DBI
|
||||
tristate "DRM support for MIPI DBI compatible panels"
|
||||
depends on DRM && SPI
|
||||
select DRM_CLIENT_SELECTION
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_GEM_DMA_HELPER
|
||||
select DRM_MIPI_DBI
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include <linux/spi/spi.h>
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_client_setup.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fbdev_dma.h>
|
||||
#include <drm/drm_gem_atomic_helper.h>
|
||||
@@ -264,6 +265,7 @@ static const struct drm_driver panel_mipi_dbi_driver = {
|
||||
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
|
||||
.fops = &panel_mipi_dbi_fops,
|
||||
DRM_GEM_DMA_DRIVER_OPS_VMAP,
|
||||
DRM_FBDEV_DMA_DRIVER_OPS,
|
||||
.debugfs_init = mipi_dbi_debugfs_init,
|
||||
.name = "panel-mipi-dbi",
|
||||
.desc = "MIPI DBI compatible display panel",
|
||||
@@ -388,7 +390,10 @@ static int panel_mipi_dbi_spi_probe(struct spi_device *spi)
|
||||
|
||||
spi_set_drvdata(spi, drm);
|
||||
|
||||
drm_fbdev_dma_setup(drm, 0);
|
||||
if (bpp == 16)
|
||||
drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB565);
|
||||
else
|
||||
drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB888);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -47,6 +47,10 @@
|
||||
#define MI_LRI_FORCE_POSTED REG_BIT(12)
|
||||
#define MI_LRI_LEN(x) (((x) & 0xff) + 1)
|
||||
|
||||
#define MI_STORE_REGISTER_MEM (__MI_INSTR(0x24) | XE_INSTR_NUM_DW(4))
|
||||
#define MI_SRM_USE_GGTT REG_BIT(22)
|
||||
#define MI_SRM_ADD_CS_OFFSET REG_BIT(19)
|
||||
|
||||
#define MI_FLUSH_DW __MI_INSTR(0x26)
|
||||
#define MI_FLUSH_DW_STORE_INDEX REG_BIT(21)
|
||||
#define MI_INVALIDATE_TLB REG_BIT(18)
|
||||
|
||||
@@ -564,6 +564,28 @@ void xe_gsc_remove(struct xe_gsc *gsc)
|
||||
xe_gsc_proxy_remove(gsc);
|
||||
}
|
||||
|
||||
void xe_gsc_stop_prepare(struct xe_gsc *gsc)
|
||||
{
|
||||
struct xe_gt *gt = gsc_to_gt(gsc);
|
||||
int ret;
|
||||
|
||||
if (!xe_uc_fw_is_loadable(&gsc->fw) || xe_uc_fw_is_in_error_state(&gsc->fw))
|
||||
return;
|
||||
|
||||
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GSC);
|
||||
|
||||
/*
|
||||
* If the GSC FW load or the proxy init are interrupted, the only way
|
||||
* to recover it is to do an FLR and reload the GSC from scratch.
|
||||
* Therefore, let's wait for the init to complete before stopping
|
||||
* operations. The proxy init is the last step, so we can just wait on
|
||||
* that
|
||||
*/
|
||||
ret = xe_gsc_wait_for_proxy_init_done(gsc);
|
||||
if (ret)
|
||||
xe_gt_err(gt, "failed to wait for GSC init completion before uc stop\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* wa_14015076503: if the GSC FW is loaded, we need to alert it before doing a
|
||||
* GSC engine reset by writing a notification bit in the GS1 register and then
|
||||
|
||||
@@ -16,6 +16,7 @@ struct xe_hw_engine;
|
||||
int xe_gsc_init(struct xe_gsc *gsc);
|
||||
int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc);
|
||||
void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc);
|
||||
void xe_gsc_stop_prepare(struct xe_gsc *gsc);
|
||||
void xe_gsc_load_start(struct xe_gsc *gsc);
|
||||
void xe_gsc_remove(struct xe_gsc *gsc);
|
||||
void xe_gsc_hwe_irq_handler(struct xe_hw_engine *hwe, u16 intr_vec);
|
||||
|
||||
@@ -71,6 +71,17 @@ bool xe_gsc_proxy_init_done(struct xe_gsc *gsc)
|
||||
HECI1_FWSTS1_PROXY_STATE_NORMAL;
|
||||
}
|
||||
|
||||
int xe_gsc_wait_for_proxy_init_done(struct xe_gsc *gsc)
|
||||
{
|
||||
struct xe_gt *gt = gsc_to_gt(gsc);
|
||||
|
||||
/* Proxy init can take up to 500ms, so wait double that for safety */
|
||||
return xe_mmio_wait32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE),
|
||||
HECI1_FWSTS1_CURRENT_STATE,
|
||||
HECI1_FWSTS1_PROXY_STATE_NORMAL,
|
||||
USEC_PER_SEC, NULL, false);
|
||||
}
|
||||
|
||||
static void __gsc_proxy_irq_rmw(struct xe_gsc *gsc, u32 clr, u32 set)
|
||||
{
|
||||
struct xe_gt *gt = gsc_to_gt(gsc);
|
||||
|
||||
@@ -13,6 +13,7 @@ struct xe_gsc;
|
||||
int xe_gsc_proxy_init(struct xe_gsc *gsc);
|
||||
bool xe_gsc_proxy_init_done(struct xe_gsc *gsc);
|
||||
void xe_gsc_proxy_remove(struct xe_gsc *gsc);
|
||||
int xe_gsc_wait_for_proxy_init_done(struct xe_gsc *gsc);
|
||||
int xe_gsc_proxy_start(struct xe_gsc *gsc);
|
||||
|
||||
int xe_gsc_proxy_request_handler(struct xe_gsc *gsc);
|
||||
|
||||
@@ -828,7 +828,7 @@ void xe_gt_suspend_prepare(struct xe_gt *gt)
|
||||
{
|
||||
XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
|
||||
|
||||
xe_uc_stop_prepare(>->uc);
|
||||
xe_uc_suspend_prepare(>->uc);
|
||||
|
||||
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
|
||||
}
|
||||
|
||||
@@ -694,7 +694,7 @@ static inline u32 __xe_lrc_start_seqno_offset(struct xe_lrc *lrc)
|
||||
|
||||
static u32 __xe_lrc_ctx_job_timestamp_offset(struct xe_lrc *lrc)
|
||||
{
|
||||
/* The start seqno is stored in the driver-defined portion of PPHWSP */
|
||||
/* This is stored in the driver-defined portion of PPHWSP */
|
||||
return xe_lrc_pphwsp_offset(lrc) + LRC_CTX_JOB_TIMESTAMP_OFFSET;
|
||||
}
|
||||
|
||||
|
||||
@@ -229,13 +229,10 @@ static u32 get_ppgtt_flag(struct xe_sched_job *job)
|
||||
|
||||
static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i)
|
||||
{
|
||||
dw[i++] = MI_COPY_MEM_MEM | MI_COPY_MEM_MEM_SRC_GGTT |
|
||||
MI_COPY_MEM_MEM_DST_GGTT;
|
||||
dw[i++] = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET;
|
||||
dw[i++] = RING_CTX_TIMESTAMP(0).addr;
|
||||
dw[i++] = xe_lrc_ctx_job_timestamp_ggtt_addr(lrc);
|
||||
dw[i++] = 0;
|
||||
dw[i++] = xe_lrc_ctx_timestamp_ggtt_addr(lrc);
|
||||
dw[i++] = 0;
|
||||
dw[i++] = MI_NOOP;
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
@@ -241,7 +241,7 @@ void xe_uc_gucrc_disable(struct xe_uc *uc)
|
||||
|
||||
void xe_uc_stop_prepare(struct xe_uc *uc)
|
||||
{
|
||||
xe_gsc_wait_for_worker_completion(&uc->gsc);
|
||||
xe_gsc_stop_prepare(&uc->gsc);
|
||||
xe_guc_stop_prepare(&uc->guc);
|
||||
}
|
||||
|
||||
@@ -275,6 +275,12 @@ again:
|
||||
goto again;
|
||||
}
|
||||
|
||||
void xe_uc_suspend_prepare(struct xe_uc *uc)
|
||||
{
|
||||
xe_gsc_wait_for_worker_completion(&uc->gsc);
|
||||
xe_guc_stop_prepare(&uc->guc);
|
||||
}
|
||||
|
||||
int xe_uc_suspend(struct xe_uc *uc)
|
||||
{
|
||||
/* GuC submission not enabled, nothing to do */
|
||||
|
||||
@@ -18,6 +18,7 @@ int xe_uc_reset_prepare(struct xe_uc *uc);
|
||||
void xe_uc_stop_prepare(struct xe_uc *uc);
|
||||
void xe_uc_stop(struct xe_uc *uc);
|
||||
int xe_uc_start(struct xe_uc *uc);
|
||||
void xe_uc_suspend_prepare(struct xe_uc *uc);
|
||||
int xe_uc_suspend(struct xe_uc *uc);
|
||||
int xe_uc_sanitize_reset(struct xe_uc *uc);
|
||||
void xe_uc_remove(struct xe_uc *uc);
|
||||
|
||||
@@ -38,6 +38,9 @@ dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type
|
||||
struct hid_bpf_ops *e;
|
||||
int ret;
|
||||
|
||||
if (unlikely(hdev->bpf.destroyed))
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
if (type >= HID_REPORT_TYPES)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
@@ -93,6 +96,9 @@ int dispatch_hid_bpf_raw_requests(struct hid_device *hdev,
|
||||
struct hid_bpf_ops *e;
|
||||
int ret, idx;
|
||||
|
||||
if (unlikely(hdev->bpf.destroyed))
|
||||
return -ENODEV;
|
||||
|
||||
if (rtype >= HID_REPORT_TYPES)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -130,6 +136,9 @@ int dispatch_hid_bpf_output_report(struct hid_device *hdev,
|
||||
struct hid_bpf_ops *e;
|
||||
int ret, idx;
|
||||
|
||||
if (unlikely(hdev->bpf.destroyed))
|
||||
return -ENODEV;
|
||||
|
||||
idx = srcu_read_lock(&hdev->bpf.srcu);
|
||||
list_for_each_entry_srcu(e, &hdev->bpf.prog_list, list,
|
||||
srcu_read_lock_held(&hdev->bpf.srcu)) {
|
||||
|
||||
@@ -174,6 +174,7 @@ static void thrustmaster_interrupts(struct hid_device *hdev)
|
||||
u8 ep_addr[2] = {b_ep, 0};
|
||||
|
||||
if (!usb_check_int_endpoints(usbif, ep_addr)) {
|
||||
kfree(send_buf);
|
||||
hid_err(hdev, "Unexpected non-int endpoint\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -142,11 +142,12 @@ static int uclogic_input_configured(struct hid_device *hdev,
|
||||
suffix = "System Control";
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (suffix)
|
||||
} else {
|
||||
hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
|
||||
"%s %s", hdev->name, suffix);
|
||||
if (!hi->input->name)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1077,68 +1077,10 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
|
||||
EXPORT_SYMBOL(vmbus_sendpacket);
|
||||
|
||||
/*
|
||||
* vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
|
||||
* packets using a GPADL Direct packet type. This interface allows you
|
||||
* to control notifying the host. This will be useful for sending
|
||||
* batched data. Also the sender can control the send flags
|
||||
* explicitly.
|
||||
*/
|
||||
int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
|
||||
struct hv_page_buffer pagebuffers[],
|
||||
u32 pagecount, void *buffer, u32 bufferlen,
|
||||
u64 requestid)
|
||||
{
|
||||
int i;
|
||||
struct vmbus_channel_packet_page_buffer desc;
|
||||
u32 descsize;
|
||||
u32 packetlen;
|
||||
u32 packetlen_aligned;
|
||||
struct kvec bufferlist[3];
|
||||
u64 aligned_data = 0;
|
||||
|
||||
if (pagecount > MAX_PAGE_BUFFER_COUNT)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Adjust the size down since vmbus_channel_packet_page_buffer is the
|
||||
* largest size we support
|
||||
*/
|
||||
descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
|
||||
((MAX_PAGE_BUFFER_COUNT - pagecount) *
|
||||
sizeof(struct hv_page_buffer));
|
||||
packetlen = descsize + bufferlen;
|
||||
packetlen_aligned = ALIGN(packetlen, sizeof(u64));
|
||||
|
||||
/* Setup the descriptor */
|
||||
desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
|
||||
desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
|
||||
desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
|
||||
desc.length8 = (u16)(packetlen_aligned >> 3);
|
||||
desc.transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
|
||||
desc.reserved = 0;
|
||||
desc.rangecount = pagecount;
|
||||
|
||||
for (i = 0; i < pagecount; i++) {
|
||||
desc.range[i].len = pagebuffers[i].len;
|
||||
desc.range[i].offset = pagebuffers[i].offset;
|
||||
desc.range[i].pfn = pagebuffers[i].pfn;
|
||||
}
|
||||
|
||||
bufferlist[0].iov_base = &desc;
|
||||
bufferlist[0].iov_len = descsize;
|
||||
bufferlist[1].iov_base = buffer;
|
||||
bufferlist[1].iov_len = bufferlen;
|
||||
bufferlist[2].iov_base = &aligned_data;
|
||||
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
|
||||
|
||||
return hv_ringbuffer_write(channel, bufferlist, 3, requestid, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
|
||||
|
||||
/*
|
||||
* vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
|
||||
* vmbus_sendpacket_mpb_desc - Send one or more multi-page buffer packets
|
||||
* using a GPADL Direct packet type.
|
||||
* The buffer includes the vmbus descriptor.
|
||||
* The desc argument must include space for the VMBus descriptor. The
|
||||
* rangecount field must already be set.
|
||||
*/
|
||||
int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
|
||||
struct vmbus_packet_mpb_array *desc,
|
||||
@@ -1160,7 +1102,6 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
|
||||
desc->length8 = (u16)(packetlen_aligned >> 3);
|
||||
desc->transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
|
||||
desc->reserved = 0;
|
||||
desc->rangecount = 1;
|
||||
|
||||
bufferlist[0].iov_base = desc;
|
||||
bufferlist[0].iov_len = desc_size;
|
||||
|
||||
@@ -485,4 +485,10 @@ static inline int hv_debug_add_dev_dir(struct hv_device *dev)
|
||||
|
||||
#endif /* CONFIG_HYPERV_TESTING */
|
||||
|
||||
/* Create and remove sysfs entry for memory mapped ring buffers for a channel */
|
||||
int hv_create_ring_sysfs(struct vmbus_channel *channel,
|
||||
int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel,
|
||||
struct vm_area_struct *vma));
|
||||
int hv_remove_ring_sysfs(struct vmbus_channel *channel);
|
||||
|
||||
#endif /* _HYPERV_VMBUS_H */
|
||||
|
||||
@@ -1792,6 +1792,27 @@ static ssize_t subchannel_id_show(struct vmbus_channel *channel,
|
||||
}
|
||||
static VMBUS_CHAN_ATTR_RO(subchannel_id);
|
||||
|
||||
static int hv_mmap_ring_buffer_wrapper(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *attr,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct vmbus_channel *channel = container_of(kobj, struct vmbus_channel, kobj);
|
||||
|
||||
/*
|
||||
* hv_(create|remove)_ring_sysfs implementation ensures that mmap_ring_buffer
|
||||
* is not NULL.
|
||||
*/
|
||||
return channel->mmap_ring_buffer(channel, vma);
|
||||
}
|
||||
|
||||
static struct bin_attribute chan_attr_ring_buffer = {
|
||||
.attr = {
|
||||
.name = "ring",
|
||||
.mode = 0600,
|
||||
},
|
||||
.size = 2 * SZ_2M,
|
||||
.mmap = hv_mmap_ring_buffer_wrapper,
|
||||
};
|
||||
static struct attribute *vmbus_chan_attrs[] = {
|
||||
&chan_attr_out_mask.attr,
|
||||
&chan_attr_in_mask.attr,
|
||||
@@ -1811,6 +1832,11 @@ static struct attribute *vmbus_chan_attrs[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct bin_attribute *vmbus_chan_bin_attrs[] = {
|
||||
&chan_attr_ring_buffer,
|
||||
NULL
|
||||
};
|
||||
|
||||
/*
|
||||
* Channel-level attribute_group callback function. Returns the permission for
|
||||
* each attribute, and returns 0 if an attribute is not visible.
|
||||
@@ -1831,9 +1857,24 @@ static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
|
||||
return attr->mode;
|
||||
}
|
||||
|
||||
static umode_t vmbus_chan_bin_attr_is_visible(struct kobject *kobj,
|
||||
struct bin_attribute *attr, int idx)
|
||||
{
|
||||
const struct vmbus_channel *channel =
|
||||
container_of(kobj, struct vmbus_channel, kobj);
|
||||
|
||||
/* Hide ring attribute if channel's ring_sysfs_visible is set to false */
|
||||
if (attr == &chan_attr_ring_buffer && !channel->ring_sysfs_visible)
|
||||
return 0;
|
||||
|
||||
return attr->attr.mode;
|
||||
}
|
||||
|
||||
static const struct attribute_group vmbus_chan_group = {
|
||||
.attrs = vmbus_chan_attrs,
|
||||
.is_visible = vmbus_chan_attr_is_visible
|
||||
.bin_attrs = vmbus_chan_bin_attrs,
|
||||
.is_visible = vmbus_chan_attr_is_visible,
|
||||
.is_bin_visible = vmbus_chan_bin_attr_is_visible,
|
||||
};
|
||||
|
||||
static const struct kobj_type vmbus_chan_ktype = {
|
||||
@@ -1841,6 +1882,63 @@ static const struct kobj_type vmbus_chan_ktype = {
|
||||
.release = vmbus_chan_release,
|
||||
};
|
||||
|
||||
/**
|
||||
* hv_create_ring_sysfs() - create "ring" sysfs entry corresponding to ring buffers for a channel.
|
||||
* @channel: Pointer to vmbus_channel structure
|
||||
* @hv_mmap_ring_buffer: function pointer for initializing the function to be called on mmap of
|
||||
* channel's "ring" sysfs node, which is for the ring buffer of that channel.
|
||||
* Function pointer is of below type:
|
||||
* int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel,
|
||||
* struct vm_area_struct *vma))
|
||||
* This has a pointer to the channel and a pointer to vm_area_struct,
|
||||
* used for mmap, as arguments.
|
||||
*
|
||||
* Sysfs node for ring buffer of a channel is created along with other fields, however its
|
||||
* visibility is disabled by default. Sysfs creation needs to be controlled when the use-case
|
||||
* is running.
|
||||
* For example, HV_NIC device is used either by uio_hv_generic or hv_netvsc at any given point of
|
||||
* time, and "ring" sysfs is needed only when uio_hv_generic is bound to that device. To avoid
|
||||
* exposing the ring buffer by default, this function is reponsible to enable visibility of
|
||||
* ring for userspace to use.
|
||||
* Note: Race conditions can happen with userspace and it is not encouraged to create new
|
||||
* use-cases for this. This was added to maintain backward compatibility, while solving
|
||||
* one of the race conditions in uio_hv_generic while creating sysfs.
|
||||
*
|
||||
* Returns 0 on success or error code on failure.
|
||||
*/
|
||||
int hv_create_ring_sysfs(struct vmbus_channel *channel,
|
||||
int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel,
|
||||
struct vm_area_struct *vma))
|
||||
{
|
||||
struct kobject *kobj = &channel->kobj;
|
||||
|
||||
channel->mmap_ring_buffer = hv_mmap_ring_buffer;
|
||||
channel->ring_sysfs_visible = true;
|
||||
|
||||
return sysfs_update_group(kobj, &vmbus_chan_group);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hv_create_ring_sysfs);
|
||||
|
||||
/**
|
||||
* hv_remove_ring_sysfs() - remove ring sysfs entry corresponding to ring buffers for a channel.
|
||||
* @channel: Pointer to vmbus_channel structure
|
||||
*
|
||||
* Hide "ring" sysfs for a channel by changing its is_visible attribute and updating sysfs group.
|
||||
*
|
||||
* Returns 0 on success or error code on failure.
|
||||
*/
|
||||
int hv_remove_ring_sysfs(struct vmbus_channel *channel)
|
||||
{
|
||||
struct kobject *kobj = &channel->kobj;
|
||||
int ret;
|
||||
|
||||
channel->ring_sysfs_visible = false;
|
||||
ret = sysfs_update_group(kobj, &vmbus_chan_group);
|
||||
channel->mmap_ring_buffer = NULL;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hv_remove_ring_sysfs);
|
||||
|
||||
/*
|
||||
* vmbus_add_channel_kobj - setup a sub-directory under device/channels
|
||||
*/
|
||||
|
||||
@@ -45,7 +45,7 @@ struct ad7266_state {
|
||||
*/
|
||||
struct {
|
||||
__be16 sample[2];
|
||||
s64 timestamp;
|
||||
aligned_s64 timestamp;
|
||||
} data __aligned(IIO_DMA_MINALIGN);
|
||||
};
|
||||
|
||||
|
||||
@@ -169,7 +169,7 @@ struct ad7768_state {
|
||||
union {
|
||||
struct {
|
||||
__be32 chan;
|
||||
s64 timestamp;
|
||||
aligned_s64 timestamp;
|
||||
} scan;
|
||||
__be32 d32;
|
||||
u8 d8[2];
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
* Copyright (c) Tomasz Duszynski <tduszyns@gmail.com>
|
||||
*/
|
||||
|
||||
#include <linux/unaligned.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/errno.h>
|
||||
@@ -19,6 +18,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/serdev.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/unaligned.h>
|
||||
|
||||
#define PMS7003_DRIVER_NAME "pms7003"
|
||||
|
||||
@@ -76,7 +77,7 @@ struct pms7003_state {
|
||||
/* Used to construct scan to push to the IIO buffer */
|
||||
struct {
|
||||
u16 data[3]; /* PM1, PM2P5, PM10 */
|
||||
s64 ts;
|
||||
aligned_s64 ts;
|
||||
} scan;
|
||||
};
|
||||
|
||||
|
||||
@@ -108,7 +108,7 @@ static irqreturn_t sps30_trigger_handler(int irq, void *p)
|
||||
int ret;
|
||||
struct {
|
||||
s32 data[4]; /* PM1, PM2P5, PM4, PM10 */
|
||||
s64 ts;
|
||||
aligned_s64 ts;
|
||||
} scan;
|
||||
|
||||
mutex_lock(&state->lock);
|
||||
|
||||
@@ -692,8 +692,9 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
|
||||
struct opt3001 *opt = iio_priv(iio);
|
||||
int ret;
|
||||
bool wake_result_ready_queue = false;
|
||||
bool ok_to_ignore_lock = opt->ok_to_ignore_lock;
|
||||
|
||||
if (!opt->ok_to_ignore_lock)
|
||||
if (!ok_to_ignore_lock)
|
||||
mutex_lock(&opt->lock);
|
||||
|
||||
ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION);
|
||||
@@ -730,7 +731,7 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
|
||||
}
|
||||
|
||||
out:
|
||||
if (!opt->ok_to_ignore_lock)
|
||||
if (!ok_to_ignore_lock)
|
||||
mutex_unlock(&opt->lock);
|
||||
|
||||
if (wake_result_ready_queue)
|
||||
|
||||
@@ -34,16 +34,6 @@ struct iio_dev;
|
||||
struct mpr_data;
|
||||
struct mpr_ops;
|
||||
|
||||
/**
|
||||
* struct mpr_chan
|
||||
* @pres: pressure value
|
||||
* @ts: timestamp
|
||||
*/
|
||||
struct mpr_chan {
|
||||
s32 pres;
|
||||
s64 ts;
|
||||
};
|
||||
|
||||
enum mpr_func_id {
|
||||
MPR_FUNCTION_A,
|
||||
MPR_FUNCTION_B,
|
||||
@@ -69,6 +59,8 @@ enum mpr_func_id {
|
||||
* reading in a loop until data is ready
|
||||
* @completion: handshake from irq to read
|
||||
* @chan: channel values for buffered mode
|
||||
* @chan.pres: pressure value
|
||||
* @chan.ts: timestamp
|
||||
* @buffer: raw conversion data
|
||||
*/
|
||||
struct mpr_data {
|
||||
@@ -87,7 +79,10 @@ struct mpr_data {
|
||||
struct gpio_desc *gpiod_reset;
|
||||
int irq;
|
||||
struct completion completion;
|
||||
struct mpr_chan chan;
|
||||
struct {
|
||||
s32 pres;
|
||||
aligned_s64 ts;
|
||||
} chan;
|
||||
u8 buffer[MPR_MEASUREMENT_RD_SIZE] __aligned(IIO_DMA_MINALIGN);
|
||||
};
|
||||
|
||||
|
||||
@@ -1368,6 +1368,9 @@ static void ib_device_notify_register(struct ib_device *device)
|
||||
|
||||
down_read(&devices_rwsem);
|
||||
|
||||
/* Mark for userspace that device is ready */
|
||||
kobject_uevent(&device->dev.kobj, KOBJ_ADD);
|
||||
|
||||
ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT);
|
||||
if (ret)
|
||||
goto out;
|
||||
@@ -1484,10 +1487,9 @@ int ib_register_device(struct ib_device *device, const char *name,
|
||||
return ret;
|
||||
}
|
||||
dev_set_uevent_suppress(&device->dev, false);
|
||||
/* Mark for userspace that device is ready */
|
||||
kobject_uevent(&device->dev.kobj, KOBJ_ADD);
|
||||
|
||||
ib_device_notify_register(device);
|
||||
|
||||
ib_device_put(device);
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -56,11 +56,8 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
|
||||
|
||||
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
|
||||
cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
|
||||
if (err) {
|
||||
vfree(cq->queue->buf);
|
||||
kfree(cq->queue);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
cq->is_user = uresp;
|
||||
|
||||
|
||||
@@ -326,6 +326,26 @@ static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
|
||||
}
|
||||
}
|
||||
|
||||
static void b53_set_eap_mode(struct b53_device *dev, int port, int mode)
|
||||
{
|
||||
u64 eap_conf;
|
||||
|
||||
if (is5325(dev) || is5365(dev) || dev->chip_id == BCM5389_DEVICE_ID)
|
||||
return;
|
||||
|
||||
b53_read64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), &eap_conf);
|
||||
|
||||
if (is63xx(dev)) {
|
||||
eap_conf &= ~EAP_MODE_MASK_63XX;
|
||||
eap_conf |= (u64)mode << EAP_MODE_SHIFT_63XX;
|
||||
} else {
|
||||
eap_conf &= ~EAP_MODE_MASK;
|
||||
eap_conf |= (u64)mode << EAP_MODE_SHIFT;
|
||||
}
|
||||
|
||||
b53_write64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), eap_conf);
|
||||
}
|
||||
|
||||
static void b53_set_forwarding(struct b53_device *dev, int enable)
|
||||
{
|
||||
u8 mgmt;
|
||||
@@ -586,6 +606,13 @@ int b53_setup_port(struct dsa_switch *ds, int port)
|
||||
b53_port_set_mcast_flood(dev, port, true);
|
||||
b53_port_set_learning(dev, port, false);
|
||||
|
||||
/* Force all traffic to go to the CPU port to prevent the ASIC from
|
||||
* trying to forward to bridged ports on matching FDB entries, then
|
||||
* dropping frames because it isn't allowed to forward there.
|
||||
*/
|
||||
if (dsa_is_user_port(ds, port))
|
||||
b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(b53_setup_port);
|
||||
@@ -2043,6 +2070,9 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
|
||||
pvlan |= BIT(i);
|
||||
}
|
||||
|
||||
/* Disable redirection of unknown SA to the CPU port */
|
||||
b53_set_eap_mode(dev, port, EAP_MODE_BASIC);
|
||||
|
||||
/* Configure the local port VLAN control membership to include
|
||||
* remote ports and update the local port bitmask
|
||||
*/
|
||||
@@ -2078,6 +2108,9 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
|
||||
pvlan &= ~BIT(i);
|
||||
}
|
||||
|
||||
/* Enable redirection of unknown SA to the CPU port */
|
||||
b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
|
||||
|
||||
b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
|
||||
dev->ports[port].vlan_ctl_mask = pvlan;
|
||||
|
||||
|
||||
@@ -50,6 +50,9 @@
|
||||
/* Jumbo Frame Registers */
|
||||
#define B53_JUMBO_PAGE 0x40
|
||||
|
||||
/* EAP Registers */
|
||||
#define B53_EAP_PAGE 0x42
|
||||
|
||||
/* EEE Control Registers Page */
|
||||
#define B53_EEE_PAGE 0x92
|
||||
|
||||
@@ -480,6 +483,17 @@
|
||||
#define JMS_MIN_SIZE 1518
|
||||
#define JMS_MAX_SIZE 9724
|
||||
|
||||
/*************************************************************************
|
||||
* EAP Page Registers
|
||||
*************************************************************************/
|
||||
#define B53_PORT_EAP_CONF(i) (0x20 + 8 * (i))
|
||||
#define EAP_MODE_SHIFT 51
|
||||
#define EAP_MODE_SHIFT_63XX 50
|
||||
#define EAP_MODE_MASK (0x3ull << EAP_MODE_SHIFT)
|
||||
#define EAP_MODE_MASK_63XX (0x3ull << EAP_MODE_SHIFT_63XX)
|
||||
#define EAP_MODE_BASIC 0
|
||||
#define EAP_MODE_SIMPLIFIED 3
|
||||
|
||||
/*************************************************************************
|
||||
* EEE Configuration Page Registers
|
||||
*************************************************************************/
|
||||
|
||||
@@ -2083,6 +2083,7 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
|
||||
switch (state) {
|
||||
case BR_STATE_DISABLED:
|
||||
case BR_STATE_BLOCKING:
|
||||
case BR_STATE_LISTENING:
|
||||
/* From UM10944 description of DRPDTAG (why put this there?):
|
||||
* "Management traffic flows to the port regardless of the state
|
||||
* of the INGRESS flag". So BPDUs are still be allowed to pass.
|
||||
@@ -2092,11 +2093,6 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
|
||||
mac[port].egress = false;
|
||||
mac[port].dyn_learn = false;
|
||||
break;
|
||||
case BR_STATE_LISTENING:
|
||||
mac[port].ingress = true;
|
||||
mac[port].egress = false;
|
||||
mac[port].dyn_learn = false;
|
||||
break;
|
||||
case BR_STATE_LEARNING:
|
||||
mac[port].ingress = true;
|
||||
mac[port].egress = false;
|
||||
|
||||
@@ -1033,22 +1033,15 @@ static void macb_update_stats(struct macb *bp)
|
||||
|
||||
static int macb_halt_tx(struct macb *bp)
|
||||
{
|
||||
unsigned long halt_time, timeout;
|
||||
u32 status;
|
||||
u32 status;
|
||||
|
||||
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
|
||||
|
||||
timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
|
||||
do {
|
||||
halt_time = jiffies;
|
||||
status = macb_readl(bp, TSR);
|
||||
if (!(status & MACB_BIT(TGO)))
|
||||
return 0;
|
||||
|
||||
udelay(250);
|
||||
} while (time_before(halt_time, timeout));
|
||||
|
||||
return -ETIMEDOUT;
|
||||
/* Poll TSR until TGO is cleared or timeout. */
|
||||
return read_poll_timeout_atomic(macb_readl, status,
|
||||
!(status & MACB_BIT(TGO)),
|
||||
250, MACB_HALT_TIMEOUT, false,
|
||||
bp, TSR);
|
||||
}
|
||||
|
||||
static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget)
|
||||
|
||||
@@ -67,6 +67,8 @@
|
||||
#define TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE (TSNEP_TX_TYPE_XDP_NDO | TSNEP_TX_TYPE_MAP_PAGE)
|
||||
#define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO)
|
||||
#define TSNEP_TX_TYPE_XSK BIT(12)
|
||||
#define TSNEP_TX_TYPE_TSTAMP BIT(13)
|
||||
#define TSNEP_TX_TYPE_SKB_TSTAMP (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_TSTAMP)
|
||||
|
||||
#define TSNEP_XDP_TX BIT(0)
|
||||
#define TSNEP_XDP_REDIRECT BIT(1)
|
||||
@@ -387,8 +389,7 @@ static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
|
||||
if (entry->skb) {
|
||||
entry->properties = length & TSNEP_DESC_LENGTH_MASK;
|
||||
entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
|
||||
if ((entry->type & TSNEP_TX_TYPE_SKB) &&
|
||||
(skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS))
|
||||
if ((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP)
|
||||
entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
|
||||
|
||||
/* toggle user flag to prevent false acknowledge
|
||||
@@ -480,7 +481,8 @@ static int tsnep_tx_map_frag(skb_frag_t *frag, struct tsnep_tx_entry *entry,
|
||||
return mapped;
|
||||
}
|
||||
|
||||
static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
|
||||
static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count,
|
||||
bool do_tstamp)
|
||||
{
|
||||
struct device *dmadev = tx->adapter->dmadev;
|
||||
struct tsnep_tx_entry *entry;
|
||||
@@ -506,6 +508,9 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
|
||||
entry->type = TSNEP_TX_TYPE_SKB_INLINE;
|
||||
mapped = 0;
|
||||
}
|
||||
|
||||
if (do_tstamp)
|
||||
entry->type |= TSNEP_TX_TYPE_TSTAMP;
|
||||
} else {
|
||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
|
||||
|
||||
@@ -559,11 +564,12 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
|
||||
static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
|
||||
struct tsnep_tx *tx)
|
||||
{
|
||||
int count = 1;
|
||||
struct tsnep_tx_entry *entry;
|
||||
bool do_tstamp = false;
|
||||
int count = 1;
|
||||
int length;
|
||||
int i;
|
||||
int retval;
|
||||
int i;
|
||||
|
||||
if (skb_shinfo(skb)->nr_frags > 0)
|
||||
count += skb_shinfo(skb)->nr_frags;
|
||||
@@ -580,7 +586,13 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
|
||||
entry = &tx->entry[tx->write];
|
||||
entry->skb = skb;
|
||||
|
||||
retval = tsnep_tx_map(skb, tx, count);
|
||||
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
|
||||
tx->adapter->hwtstamp_config.tx_type == HWTSTAMP_TX_ON) {
|
||||
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
||||
do_tstamp = true;
|
||||
}
|
||||
|
||||
retval = tsnep_tx_map(skb, tx, count, do_tstamp);
|
||||
if (retval < 0) {
|
||||
tsnep_tx_unmap(tx, tx->write, count);
|
||||
dev_kfree_skb_any(entry->skb);
|
||||
@@ -592,9 +604,6 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
|
||||
}
|
||||
length = retval;
|
||||
|
||||
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
|
||||
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length,
|
||||
i == count - 1);
|
||||
@@ -845,8 +854,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
|
||||
|
||||
length = tsnep_tx_unmap(tx, tx->read, count);
|
||||
|
||||
if ((entry->type & TSNEP_TX_TYPE_SKB) &&
|
||||
(skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
|
||||
if (((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP) &&
|
||||
(__le32_to_cpu(entry->desc_wb->properties) &
|
||||
TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
|
||||
struct skb_shared_hwtstamps hwtstamps;
|
||||
|
||||
@@ -707,6 +707,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
|
||||
|
||||
if (!is_lmac_valid(cgx, lmac_id))
|
||||
return -ENODEV;
|
||||
|
||||
/* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */
|
||||
if (idx >= CGX_RX_STAT_GLOBAL_INDEX)
|
||||
lmac_id = 0;
|
||||
|
||||
*rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -533,7 +533,8 @@ static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
|
||||
if (sw_tx_sc->encrypt)
|
||||
sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
|
||||
|
||||
policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu);
|
||||
policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU,
|
||||
pfvf->netdev->mtu + OTX2_ETH_HLEN);
|
||||
/* Write SecTag excluding AN bits(1..0) */
|
||||
policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
|
||||
policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
|
||||
|
||||
@@ -364,6 +364,7 @@ struct otx2_flow_config {
|
||||
struct list_head flow_list_tc;
|
||||
u8 ucast_flt_cnt;
|
||||
bool ntuple;
|
||||
u16 ntuple_cnt;
|
||||
};
|
||||
|
||||
struct dev_hw_ops {
|
||||
|
||||
@@ -41,6 +41,7 @@ static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
|
||||
if (!pfvf->flow_cfg)
|
||||
return 0;
|
||||
|
||||
pfvf->flow_cfg->ntuple_cnt = ctx->val.vu16;
|
||||
otx2_alloc_mcam_entries(pfvf, ctx->val.vu16);
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -252,7 +252,7 @@ int otx2_mcam_entry_init(struct otx2_nic *pfvf)
|
||||
mutex_unlock(&pfvf->mbox.lock);
|
||||
|
||||
/* Allocate entries for Ntuple filters */
|
||||
count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
|
||||
count = otx2_alloc_mcam_entries(pfvf, flow_cfg->ntuple_cnt);
|
||||
if (count <= 0) {
|
||||
otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
|
||||
return 0;
|
||||
@@ -312,6 +312,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
|
||||
INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc);
|
||||
|
||||
pf->flow_cfg->ucast_flt_cnt = OTX2_DEFAULT_UNICAST_FLOWS;
|
||||
pf->flow_cfg->ntuple_cnt = OTX2_DEFAULT_FLOWCOUNT;
|
||||
|
||||
/* Allocate bare minimum number of MCAM entries needed for
|
||||
* unicast and ntuple filters.
|
||||
|
||||
@@ -4685,7 +4685,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
|
||||
}
|
||||
|
||||
if (mtk_is_netsys_v3_or_greater(mac->hw) &&
|
||||
MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) &&
|
||||
MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW) &&
|
||||
id == MTK_GMAC1_ID) {
|
||||
mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
|
||||
MAC_SYM_PAUSE |
|
||||
|
||||
@@ -4344,6 +4344,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
|
||||
if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
|
||||
netdev_warn(netdev, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n");
|
||||
|
||||
features &= ~NETIF_F_HW_MACSEC;
|
||||
if (netdev->features & NETIF_F_HW_MACSEC)
|
||||
netdev_warn(netdev, "Disabling HW MACsec offload, not supported in switchdev mode\n");
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
|
||||
@@ -3014,6 +3014,9 @@ static int mlxsw_sp_neigh_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
|
||||
.rif = rif,
|
||||
};
|
||||
|
||||
if (!mlxsw_sp_dev_lower_is_port(mlxsw_sp_rif_dev(rif)))
|
||||
return 0;
|
||||
|
||||
neigh_for_each(&arp_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
|
||||
if (rms.err)
|
||||
goto err_arp;
|
||||
|
||||
@@ -203,7 +203,7 @@ static struct pci_driver qede_pci_driver = {
|
||||
};
|
||||
|
||||
static struct qed_eth_cb_ops qede_ll_ops = {
|
||||
{
|
||||
.common = {
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
.arfs_filter_op = qede_arfs_filter_op,
|
||||
#endif
|
||||
|
||||
@@ -1484,8 +1484,11 @@ static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_o
|
||||
}
|
||||
|
||||
cmd_op = (cmd.rsp.arg[0] & 0xff);
|
||||
if (cmd.rsp.arg[0] >> 25 == 2)
|
||||
return 2;
|
||||
if (cmd.rsp.arg[0] >> 25 == 2) {
|
||||
ret = 2;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
|
||||
set_bit(QLC_BC_VF_STATE, &vf->state);
|
||||
else
|
||||
|
||||
@@ -158,7 +158,6 @@ struct hv_netvsc_packet {
|
||||
u8 cp_partial; /* partial copy into send buffer */
|
||||
|
||||
u8 rmsg_size; /* RNDIS header and PPI size */
|
||||
u8 rmsg_pgcnt; /* page count of RNDIS header and PPI */
|
||||
u8 page_buf_cnt;
|
||||
|
||||
u16 q_idx;
|
||||
@@ -893,6 +892,18 @@ struct nvsp_message {
|
||||
sizeof(struct nvsp_message))
|
||||
#define NETVSC_MIN_IN_MSG_SIZE sizeof(struct vmpacket_descriptor)
|
||||
|
||||
/* Maximum # of contiguous data ranges that can make up a trasmitted packet.
|
||||
* Typically it's the max SKB fragments plus 2 for the rndis packet and the
|
||||
* linear portion of the SKB. But if MAX_SKB_FRAGS is large, the value may
|
||||
* need to be limited to MAX_PAGE_BUFFER_COUNT, which is the max # of entries
|
||||
* in a GPA direct packet sent to netvsp over VMBus.
|
||||
*/
|
||||
#if MAX_SKB_FRAGS + 2 < MAX_PAGE_BUFFER_COUNT
|
||||
#define MAX_DATA_RANGES (MAX_SKB_FRAGS + 2)
|
||||
#else
|
||||
#define MAX_DATA_RANGES MAX_PAGE_BUFFER_COUNT
|
||||
#endif
|
||||
|
||||
/* Estimated requestor size:
|
||||
* out_ring_size/min_out_msg_size + in_ring_size/min_in_msg_size
|
||||
*/
|
||||
|
||||
@@ -947,8 +947,7 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
|
||||
+ pend_size;
|
||||
int i;
|
||||
u32 padding = 0;
|
||||
u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
|
||||
packet->page_buf_cnt;
|
||||
u32 page_count = packet->cp_partial ? 1 : packet->page_buf_cnt;
|
||||
u32 remain;
|
||||
|
||||
/* Add padding */
|
||||
@@ -1049,6 +1048,42 @@ static int netvsc_dma_map(struct hv_device *hv_dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Build an "array" of mpb entries describing the data to be transferred
|
||||
* over VMBus. After the desc header fields, each "array" entry is variable
|
||||
* size, and each entry starts after the end of the previous entry. The
|
||||
* "offset" and "len" fields for each entry imply the size of the entry.
|
||||
*
|
||||
* The pfns are in HV_HYP_PAGE_SIZE, because all communication with Hyper-V
|
||||
* uses that granularity, even if the system page size of the guest is larger.
|
||||
* Each entry in the input "pb" array must describe a contiguous range of
|
||||
* guest physical memory so that the pfns are sequential if the range crosses
|
||||
* a page boundary. The offset field must be < HV_HYP_PAGE_SIZE.
|
||||
*/
|
||||
static inline void netvsc_build_mpb_array(struct hv_page_buffer *pb,
|
||||
u32 page_buffer_count,
|
||||
struct vmbus_packet_mpb_array *desc,
|
||||
u32 *desc_size)
|
||||
{
|
||||
struct hv_mpb_array *mpb_entry = &desc->range;
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < page_buffer_count; i++) {
|
||||
u32 offset = pb[i].offset;
|
||||
u32 len = pb[i].len;
|
||||
|
||||
mpb_entry->offset = offset;
|
||||
mpb_entry->len = len;
|
||||
|
||||
for (j = 0; j < HVPFN_UP(offset + len); j++)
|
||||
mpb_entry->pfn_array[j] = pb[i].pfn + j;
|
||||
|
||||
mpb_entry = (struct hv_mpb_array *)&mpb_entry->pfn_array[j];
|
||||
}
|
||||
|
||||
desc->rangecount = page_buffer_count;
|
||||
*desc_size = (char *)mpb_entry - (char *)desc;
|
||||
}
|
||||
|
||||
static inline int netvsc_send_pkt(
|
||||
struct hv_device *device,
|
||||
struct hv_netvsc_packet *packet,
|
||||
@@ -1091,8 +1126,11 @@ static inline int netvsc_send_pkt(
|
||||
|
||||
packet->dma_range = NULL;
|
||||
if (packet->page_buf_cnt) {
|
||||
struct vmbus_channel_packet_page_buffer desc;
|
||||
u32 desc_size;
|
||||
|
||||
if (packet->cp_partial)
|
||||
pb += packet->rmsg_pgcnt;
|
||||
pb++;
|
||||
|
||||
ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb);
|
||||
if (ret) {
|
||||
@@ -1100,11 +1138,12 @@ static inline int netvsc_send_pkt(
|
||||
goto exit;
|
||||
}
|
||||
|
||||
ret = vmbus_sendpacket_pagebuffer(out_channel,
|
||||
pb, packet->page_buf_cnt,
|
||||
&nvmsg, sizeof(nvmsg),
|
||||
req_id);
|
||||
|
||||
netvsc_build_mpb_array(pb, packet->page_buf_cnt,
|
||||
(struct vmbus_packet_mpb_array *)&desc,
|
||||
&desc_size);
|
||||
ret = vmbus_sendpacket_mpb_desc(out_channel,
|
||||
(struct vmbus_packet_mpb_array *)&desc,
|
||||
desc_size, &nvmsg, sizeof(nvmsg), req_id);
|
||||
if (ret)
|
||||
netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
|
||||
} else {
|
||||
@@ -1253,7 +1292,7 @@ int netvsc_send(struct net_device *ndev,
|
||||
packet->send_buf_index = section_index;
|
||||
|
||||
if (packet->cp_partial) {
|
||||
packet->page_buf_cnt -= packet->rmsg_pgcnt;
|
||||
packet->page_buf_cnt--;
|
||||
packet->total_data_buflen = msd_len + packet->rmsg_size;
|
||||
} else {
|
||||
packet->page_buf_cnt = 0;
|
||||
|
||||
@@ -325,43 +325,10 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
|
||||
return txq;
|
||||
}
|
||||
|
||||
static u32 fill_pg_buf(unsigned long hvpfn, u32 offset, u32 len,
|
||||
struct hv_page_buffer *pb)
|
||||
{
|
||||
int j = 0;
|
||||
|
||||
hvpfn += offset >> HV_HYP_PAGE_SHIFT;
|
||||
offset = offset & ~HV_HYP_PAGE_MASK;
|
||||
|
||||
while (len > 0) {
|
||||
unsigned long bytes;
|
||||
|
||||
bytes = HV_HYP_PAGE_SIZE - offset;
|
||||
if (bytes > len)
|
||||
bytes = len;
|
||||
pb[j].pfn = hvpfn;
|
||||
pb[j].offset = offset;
|
||||
pb[j].len = bytes;
|
||||
|
||||
offset += bytes;
|
||||
len -= bytes;
|
||||
|
||||
if (offset == HV_HYP_PAGE_SIZE && len) {
|
||||
hvpfn++;
|
||||
offset = 0;
|
||||
j++;
|
||||
}
|
||||
}
|
||||
|
||||
return j + 1;
|
||||
}
|
||||
|
||||
static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
|
||||
struct hv_netvsc_packet *packet,
|
||||
struct hv_page_buffer *pb)
|
||||
{
|
||||
u32 slots_used = 0;
|
||||
char *data = skb->data;
|
||||
int frags = skb_shinfo(skb)->nr_frags;
|
||||
int i;
|
||||
|
||||
@@ -370,28 +337,27 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
|
||||
* 2. skb linear data
|
||||
* 3. skb fragment data
|
||||
*/
|
||||
slots_used += fill_pg_buf(virt_to_hvpfn(hdr),
|
||||
offset_in_hvpage(hdr),
|
||||
len,
|
||||
&pb[slots_used]);
|
||||
|
||||
pb[0].offset = offset_in_hvpage(hdr);
|
||||
pb[0].len = len;
|
||||
pb[0].pfn = virt_to_hvpfn(hdr);
|
||||
packet->rmsg_size = len;
|
||||
packet->rmsg_pgcnt = slots_used;
|
||||
|
||||
slots_used += fill_pg_buf(virt_to_hvpfn(data),
|
||||
offset_in_hvpage(data),
|
||||
skb_headlen(skb),
|
||||
&pb[slots_used]);
|
||||
pb[1].offset = offset_in_hvpage(skb->data);
|
||||
pb[1].len = skb_headlen(skb);
|
||||
pb[1].pfn = virt_to_hvpfn(skb->data);
|
||||
|
||||
for (i = 0; i < frags; i++) {
|
||||
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
|
||||
struct hv_page_buffer *cur_pb = &pb[i + 2];
|
||||
u64 pfn = page_to_hvpfn(skb_frag_page(frag));
|
||||
u32 offset = skb_frag_off(frag);
|
||||
|
||||
slots_used += fill_pg_buf(page_to_hvpfn(skb_frag_page(frag)),
|
||||
skb_frag_off(frag),
|
||||
skb_frag_size(frag),
|
||||
&pb[slots_used]);
|
||||
cur_pb->offset = offset_in_hvpage(offset);
|
||||
cur_pb->len = skb_frag_size(frag);
|
||||
cur_pb->pfn = pfn + (offset >> HV_HYP_PAGE_SHIFT);
|
||||
}
|
||||
return slots_used;
|
||||
return frags + 2;
|
||||
}
|
||||
|
||||
static int count_skb_frag_slots(struct sk_buff *skb)
|
||||
@@ -482,7 +448,7 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
|
||||
struct net_device *vf_netdev;
|
||||
u32 rndis_msg_size;
|
||||
u32 hash;
|
||||
struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
|
||||
struct hv_page_buffer pb[MAX_DATA_RANGES];
|
||||
|
||||
/* If VF is present and up then redirect packets to it.
|
||||
* Skip the VF if it is marked down or has no carrier.
|
||||
|
||||
@@ -225,8 +225,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
|
||||
struct rndis_request *req)
|
||||
{
|
||||
struct hv_netvsc_packet *packet;
|
||||
struct hv_page_buffer page_buf[2];
|
||||
struct hv_page_buffer *pb = page_buf;
|
||||
struct hv_page_buffer pb;
|
||||
int ret;
|
||||
|
||||
/* Setup the packet to send it */
|
||||
@@ -235,27 +234,14 @@ static int rndis_filter_send_request(struct rndis_device *dev,
|
||||
packet->total_data_buflen = req->request_msg.msg_len;
|
||||
packet->page_buf_cnt = 1;
|
||||
|
||||
pb[0].pfn = virt_to_phys(&req->request_msg) >>
|
||||
HV_HYP_PAGE_SHIFT;
|
||||
pb[0].len = req->request_msg.msg_len;
|
||||
pb[0].offset = offset_in_hvpage(&req->request_msg);
|
||||
|
||||
/* Add one page_buf when request_msg crossing page boundary */
|
||||
if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) {
|
||||
packet->page_buf_cnt++;
|
||||
pb[0].len = HV_HYP_PAGE_SIZE -
|
||||
pb[0].offset;
|
||||
pb[1].pfn = virt_to_phys((void *)&req->request_msg
|
||||
+ pb[0].len) >> HV_HYP_PAGE_SHIFT;
|
||||
pb[1].offset = 0;
|
||||
pb[1].len = req->request_msg.msg_len -
|
||||
pb[0].len;
|
||||
}
|
||||
pb.pfn = virt_to_phys(&req->request_msg) >> HV_HYP_PAGE_SHIFT;
|
||||
pb.len = req->request_msg.msg_len;
|
||||
pb.offset = offset_in_hvpage(&req->request_msg);
|
||||
|
||||
trace_rndis_send(dev->ndev, 0, &req->request_msg);
|
||||
|
||||
rcu_read_lock_bh();
|
||||
ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL, false);
|
||||
ret = netvsc_send(dev->ndev, packet, NULL, &pb, NULL, false);
|
||||
rcu_read_unlock_bh();
|
||||
|
||||
return ret;
|
||||
|
||||
@@ -5547,7 +5547,7 @@ static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queu
|
||||
|
||||
virtnet_rx_pause(vi, rq);
|
||||
|
||||
err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf);
|
||||
err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf, NULL);
|
||||
if (err) {
|
||||
netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: %d\n", qindex, err);
|
||||
|
||||
@@ -5576,7 +5576,8 @@ static int virtnet_sq_bind_xsk_pool(struct virtnet_info *vi,
|
||||
|
||||
virtnet_tx_pause(vi, sq);
|
||||
|
||||
err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf);
|
||||
err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf,
|
||||
virtnet_sq_free_unused_buf_done);
|
||||
if (err) {
|
||||
netdev_err(vi->dev, "reset tx fail: tx queue index: %d err: %d\n", qindex, err);
|
||||
pool = NULL;
|
||||
|
||||
@@ -999,6 +999,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
|
||||
int i;
|
||||
|
||||
mt76_worker_disable(&dev->tx_worker);
|
||||
napi_disable(&dev->tx_napi);
|
||||
netif_napi_del(&dev->tx_napi);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
|
||||
|
||||
@@ -390,7 +390,7 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
|
||||
* as it only leads to a small amount of wasted memory for the lifetime of
|
||||
* the I/O.
|
||||
*/
|
||||
static int nvme_pci_npages_prp(void)
|
||||
static __always_inline int nvme_pci_npages_prp(void)
|
||||
{
|
||||
unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE;
|
||||
unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE);
|
||||
@@ -1202,7 +1202,9 @@ static void nvme_poll_irqdisable(struct nvme_queue *nvmeq)
|
||||
WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags));
|
||||
|
||||
disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
|
||||
spin_lock(&nvmeq->cq_poll_lock);
|
||||
nvme_poll_cq(nvmeq, NULL);
|
||||
spin_unlock(&nvmeq->cq_poll_lock);
|
||||
enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
|
||||
}
|
||||
|
||||
|
||||
@@ -107,7 +107,6 @@ struct rcar_gen3_phy {
|
||||
struct rcar_gen3_chan *ch;
|
||||
u32 int_enable_bits;
|
||||
bool initialized;
|
||||
bool otg_initialized;
|
||||
bool powered;
|
||||
};
|
||||
|
||||
@@ -320,16 +319,15 @@ static bool rcar_gen3_is_any_rphy_initialized(struct rcar_gen3_chan *ch)
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool rcar_gen3_needs_init_otg(struct rcar_gen3_chan *ch)
|
||||
static bool rcar_gen3_is_any_otg_rphy_initialized(struct rcar_gen3_chan *ch)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NUM_OF_PHYS; i++) {
|
||||
if (ch->rphys[i].otg_initialized)
|
||||
return false;
|
||||
for (enum rcar_gen3_phy_index i = PHY_INDEX_BOTH_HC; i <= PHY_INDEX_EHCI;
|
||||
i++) {
|
||||
if (ch->rphys[i].initialized)
|
||||
return true;
|
||||
}
|
||||
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool rcar_gen3_are_all_rphys_power_off(struct rcar_gen3_chan *ch)
|
||||
@@ -351,7 +349,7 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
|
||||
bool is_b_device;
|
||||
enum phy_mode cur_mode, new_mode;
|
||||
|
||||
if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch))
|
||||
if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch))
|
||||
return -EIO;
|
||||
|
||||
if (sysfs_streq(buf, "host"))
|
||||
@@ -389,7 +387,7 @@ static ssize_t role_show(struct device *dev, struct device_attribute *attr,
|
||||
{
|
||||
struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
|
||||
|
||||
if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch))
|
||||
if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch))
|
||||
return -EIO;
|
||||
|
||||
return sprintf(buf, "%s\n", rcar_gen3_is_host(ch) ? "host" :
|
||||
@@ -402,6 +400,9 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
|
||||
void __iomem *usb2_base = ch->base;
|
||||
u32 val;
|
||||
|
||||
if (!ch->is_otg_channel || rcar_gen3_is_any_otg_rphy_initialized(ch))
|
||||
return;
|
||||
|
||||
/* Should not use functions of read-modify-write a register */
|
||||
val = readl(usb2_base + USB2_LINECTRL1);
|
||||
val = (val & ~USB2_LINECTRL1_DP_RPD) | USB2_LINECTRL1_DPRPD_EN |
|
||||
@@ -462,16 +463,16 @@ static int rcar_gen3_phy_usb2_init(struct phy *p)
|
||||
val = readl(usb2_base + USB2_INT_ENABLE);
|
||||
val |= USB2_INT_ENABLE_UCOM_INTEN | rphy->int_enable_bits;
|
||||
writel(val, usb2_base + USB2_INT_ENABLE);
|
||||
writel(USB2_SPD_RSM_TIMSET_INIT, usb2_base + USB2_SPD_RSM_TIMSET);
|
||||
writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET);
|
||||
|
||||
/* Initialize otg part */
|
||||
if (channel->is_otg_channel) {
|
||||
if (rcar_gen3_needs_init_otg(channel))
|
||||
rcar_gen3_init_otg(channel);
|
||||
rphy->otg_initialized = true;
|
||||
if (!rcar_gen3_is_any_rphy_initialized(channel)) {
|
||||
writel(USB2_SPD_RSM_TIMSET_INIT, usb2_base + USB2_SPD_RSM_TIMSET);
|
||||
writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET);
|
||||
}
|
||||
|
||||
/* Initialize otg part (only if we initialize a PHY with IRQs). */
|
||||
if (rphy->int_enable_bits)
|
||||
rcar_gen3_init_otg(channel);
|
||||
|
||||
rphy->initialized = true;
|
||||
|
||||
return 0;
|
||||
@@ -486,9 +487,6 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p)
|
||||
|
||||
rphy->initialized = false;
|
||||
|
||||
if (channel->is_otg_channel)
|
||||
rphy->otg_initialized = false;
|
||||
|
||||
val = readl(usb2_base + USB2_INT_ENABLE);
|
||||
val &= ~rphy->int_enable_bits;
|
||||
if (!rcar_gen3_is_any_rphy_initialized(channel))
|
||||
|
||||
@@ -237,6 +237,8 @@
|
||||
#define DATA0_VAL_PD BIT(1)
|
||||
#define USE_XUSB_AO BIT(4)
|
||||
|
||||
#define TEGRA_UTMI_PAD_MAX 4
|
||||
|
||||
#define TEGRA186_LANE(_name, _offset, _shift, _mask, _type) \
|
||||
{ \
|
||||
.name = _name, \
|
||||
@@ -269,7 +271,7 @@ struct tegra186_xusb_padctl {
|
||||
|
||||
/* UTMI bias and tracking */
|
||||
struct clk *usb2_trk_clk;
|
||||
unsigned int bias_pad_enable;
|
||||
DECLARE_BITMAP(utmi_pad_enabled, TEGRA_UTMI_PAD_MAX);
|
||||
|
||||
/* padctl context */
|
||||
struct tegra186_xusb_padctl_context context;
|
||||
@@ -603,12 +605,8 @@ static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl)
|
||||
u32 value;
|
||||
int err;
|
||||
|
||||
mutex_lock(&padctl->lock);
|
||||
|
||||
if (priv->bias_pad_enable++ > 0) {
|
||||
mutex_unlock(&padctl->lock);
|
||||
if (!bitmap_empty(priv->utmi_pad_enabled, TEGRA_UTMI_PAD_MAX))
|
||||
return;
|
||||
}
|
||||
|
||||
err = clk_prepare_enable(priv->usb2_trk_clk);
|
||||
if (err < 0)
|
||||
@@ -658,8 +656,6 @@ static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl)
|
||||
} else {
|
||||
clk_disable_unprepare(priv->usb2_trk_clk);
|
||||
}
|
||||
|
||||
mutex_unlock(&padctl->lock);
|
||||
}
|
||||
|
||||
static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl)
|
||||
@@ -667,17 +663,8 @@ static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl)
|
||||
struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
|
||||
u32 value;
|
||||
|
||||
mutex_lock(&padctl->lock);
|
||||
|
||||
if (WARN_ON(priv->bias_pad_enable == 0)) {
|
||||
mutex_unlock(&padctl->lock);
|
||||
if (!bitmap_empty(priv->utmi_pad_enabled, TEGRA_UTMI_PAD_MAX))
|
||||
return;
|
||||
}
|
||||
|
||||
if (--priv->bias_pad_enable > 0) {
|
||||
mutex_unlock(&padctl->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
|
||||
value |= USB2_PD_TRK;
|
||||
@@ -690,13 +677,13 @@ static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl)
|
||||
clk_disable_unprepare(priv->usb2_trk_clk);
|
||||
}
|
||||
|
||||
mutex_unlock(&padctl->lock);
|
||||
}
|
||||
|
||||
static void tegra186_utmi_pad_power_on(struct phy *phy)
|
||||
{
|
||||
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
|
||||
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
|
||||
struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
|
||||
struct tegra_xusb_usb2_port *port;
|
||||
struct device *dev = padctl->dev;
|
||||
unsigned int index = lane->index;
|
||||
@@ -705,9 +692,16 @@ static void tegra186_utmi_pad_power_on(struct phy *phy)
|
||||
if (!phy)
|
||||
return;
|
||||
|
||||
mutex_lock(&padctl->lock);
|
||||
if (test_bit(index, priv->utmi_pad_enabled)) {
|
||||
mutex_unlock(&padctl->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
port = tegra_xusb_find_usb2_port(padctl, index);
|
||||
if (!port) {
|
||||
dev_err(dev, "no port found for USB2 lane %u\n", index);
|
||||
mutex_unlock(&padctl->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -724,18 +718,28 @@ static void tegra186_utmi_pad_power_on(struct phy *phy)
|
||||
value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
|
||||
value &= ~USB2_OTG_PD_DR;
|
||||
padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
|
||||
|
||||
set_bit(index, priv->utmi_pad_enabled);
|
||||
mutex_unlock(&padctl->lock);
|
||||
}
|
||||
|
||||
static void tegra186_utmi_pad_power_down(struct phy *phy)
|
||||
{
|
||||
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
|
||||
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
|
||||
struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
|
||||
unsigned int index = lane->index;
|
||||
u32 value;
|
||||
|
||||
if (!phy)
|
||||
return;
|
||||
|
||||
mutex_lock(&padctl->lock);
|
||||
if (!test_bit(index, priv->utmi_pad_enabled)) {
|
||||
mutex_unlock(&padctl->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
dev_dbg(padctl->dev, "power down UTMI pad %u\n", index);
|
||||
|
||||
value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
|
||||
@@ -748,7 +752,11 @@ static void tegra186_utmi_pad_power_down(struct phy *phy)
|
||||
|
||||
udelay(2);
|
||||
|
||||
clear_bit(index, priv->utmi_pad_enabled);
|
||||
|
||||
tegra186_utmi_bias_pad_power_off(padctl);
|
||||
|
||||
mutex_unlock(&padctl->lock);
|
||||
}
|
||||
|
||||
static int tegra186_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl,
|
||||
|
||||
@@ -548,16 +548,16 @@ static int tegra_xusb_port_init(struct tegra_xusb_port *port,
|
||||
|
||||
err = dev_set_name(&port->dev, "%s-%u", name, index);
|
||||
if (err < 0)
|
||||
goto unregister;
|
||||
goto put_device;
|
||||
|
||||
err = device_add(&port->dev);
|
||||
if (err < 0)
|
||||
goto unregister;
|
||||
goto put_device;
|
||||
|
||||
return 0;
|
||||
|
||||
unregister:
|
||||
device_unregister(&port->dev);
|
||||
put_device:
|
||||
put_device(&port->dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
@@ -217,6 +217,13 @@ static const struct dmi_system_id fwbug_list[] = {
|
||||
DMI_MATCH(DMI_BIOS_VERSION, "03.05"),
|
||||
}
|
||||
},
|
||||
{
|
||||
.ident = "MECHREVO Wujie 14X (GX4HRXL)",
|
||||
.driver_data = &quirk_spurious_8042,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "WUJIE14-GX4HRXL"),
|
||||
}
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
|
||||
@@ -334,6 +334,11 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool amd_pmf_pb_valid(struct amd_pmf_dev *dev)
|
||||
{
|
||||
return memchr_inv(dev->policy_buf, 0xff, dev->policy_sz);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_AMD_PMF_DEBUG
|
||||
static void amd_pmf_hex_dump_pb(struct amd_pmf_dev *dev)
|
||||
{
|
||||
@@ -361,12 +366,22 @@ static ssize_t amd_pmf_get_pb_data(struct file *filp, const char __user *buf,
|
||||
dev->policy_buf = new_policy_buf;
|
||||
dev->policy_sz = length;
|
||||
|
||||
if (!amd_pmf_pb_valid(dev)) {
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
amd_pmf_hex_dump_pb(dev);
|
||||
ret = amd_pmf_start_policy_engine(dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto cleanup;
|
||||
|
||||
return length;
|
||||
|
||||
cleanup:
|
||||
kfree(dev->policy_buf);
|
||||
dev->policy_buf = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct file_operations pb_fops = {
|
||||
@@ -528,6 +543,12 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
|
||||
|
||||
memcpy_fromio(dev->policy_buf, dev->policy_base, dev->policy_sz);
|
||||
|
||||
if (!amd_pmf_pb_valid(dev)) {
|
||||
dev_info(dev->dev, "No Smart PC policy present\n");
|
||||
ret = -EINVAL;
|
||||
goto err_free_policy;
|
||||
}
|
||||
|
||||
amd_pmf_hex_dump_pb(dev);
|
||||
|
||||
dev->prev_data = kzalloc(sizeof(*dev->prev_data), GFP_KERNEL);
|
||||
|
||||
@@ -4795,7 +4795,8 @@ static int asus_wmi_add(struct platform_device *pdev)
|
||||
goto fail_leds;
|
||||
|
||||
asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
|
||||
if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
|
||||
if ((result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT)) ==
|
||||
(ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
|
||||
asus->driver->wlan_ctrl_by_user = 1;
|
||||
|
||||
if (!(asus->driver->wlan_ctrl_by_user && ashs_present())) {
|
||||
|
||||
@@ -132,7 +132,7 @@ static int max20086_regulators_register(struct max20086 *chip)
|
||||
|
||||
static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on)
|
||||
{
|
||||
struct of_regulator_match matches[MAX20086_MAX_REGULATORS] = { };
|
||||
struct of_regulator_match *matches;
|
||||
struct device_node *node;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
@@ -143,6 +143,11 @@ static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
matches = devm_kcalloc(chip->dev, chip->info->num_outputs,
|
||||
sizeof(*matches), GFP_KERNEL);
|
||||
if (!matches)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < chip->info->num_outputs; ++i)
|
||||
matches[i].name = max20086_output_names[i];
|
||||
|
||||
|
||||
@@ -169,6 +169,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
|
||||
unsigned int nr_zones, size_t *buflen)
|
||||
{
|
||||
struct request_queue *q = sdkp->disk->queue;
|
||||
unsigned int max_segments;
|
||||
size_t bufsize;
|
||||
void *buf;
|
||||
|
||||
@@ -180,12 +181,15 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
|
||||
* Furthermore, since the report zone command cannot be split, make
|
||||
* sure that the allocated buffer can always be mapped by limiting the
|
||||
* number of pages allocated to the HBA max segments limit.
|
||||
* Since max segments can be larger than the max inline bio vectors,
|
||||
* further limit the allocated buffer to BIO_MAX_INLINE_VECS.
|
||||
*/
|
||||
nr_zones = min(nr_zones, sdkp->zone_info.nr_zones);
|
||||
bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE);
|
||||
bufsize = min_t(size_t, bufsize,
|
||||
queue_max_hw_sectors(q) << SECTOR_SHIFT);
|
||||
bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
|
||||
max_segments = min(BIO_MAX_INLINE_VECS, queue_max_segments(q));
|
||||
bufsize = min_t(size_t, bufsize, max_segments << PAGE_SHIFT);
|
||||
|
||||
while (bufsize >= SECTOR_SIZE) {
|
||||
buf = kvzalloc(bufsize, GFP_KERNEL | __GFP_NORETRY);
|
||||
|
||||
@@ -1819,6 +1819,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
|
||||
return SCSI_MLQUEUE_DEVICE_BUSY;
|
||||
}
|
||||
|
||||
payload->rangecount = 1;
|
||||
payload->range.len = length;
|
||||
payload->range.offset = offset_in_hvpg;
|
||||
|
||||
|
||||
@@ -420,7 +420,7 @@ MODULE_LICENSE("GPL");
|
||||
static void spi_test_print_hex_dump(char *pre, const void *ptr, size_t len)
|
||||
{
|
||||
/* limit the hex_dump */
|
||||
if (len < 1024) {
|
||||
if (len <= 1024) {
|
||||
print_hex_dump(KERN_INFO, pre,
|
||||
DUMP_PREFIX_OFFSET, 16, 1,
|
||||
ptr, len, 0);
|
||||
|
||||
@@ -728,9 +728,9 @@ static int tegra_spi_set_hw_cs_timing(struct spi_device *spi)
|
||||
u32 inactive_cycles;
|
||||
u8 cs_state;
|
||||
|
||||
if ((setup->unit && setup->unit != SPI_DELAY_UNIT_SCK) ||
|
||||
(hold->unit && hold->unit != SPI_DELAY_UNIT_SCK) ||
|
||||
(inactive->unit && inactive->unit != SPI_DELAY_UNIT_SCK)) {
|
||||
if ((setup->value && setup->unit != SPI_DELAY_UNIT_SCK) ||
|
||||
(hold->value && hold->unit != SPI_DELAY_UNIT_SCK) ||
|
||||
(inactive->value && inactive->unit != SPI_DELAY_UNIT_SCK)) {
|
||||
dev_err(&spi->dev,
|
||||
"Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n",
|
||||
SPI_DELAY_UNIT_SCK);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user