mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-06 01:49:46 +00:00
Compare commits
187 Commits
cf8f90deed
...
9f2dfb4565
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9f2dfb4565 | ||
|
|
d5dc97879a | ||
|
|
5c19daa93d | ||
|
|
01c7a6e25b | ||
|
|
2dbb5e9d48 | ||
|
|
c45a1db3be | ||
|
|
6dce43433e | ||
|
|
90253acae2 | ||
|
|
6393d21c6a | ||
|
|
52f2d5cf33 | ||
|
|
a6226fa652 | ||
|
|
c4476fac0c | ||
|
|
df92165dd0 | ||
|
|
5a127a4553 | ||
|
|
3b987ebe6c | ||
|
|
61f5665d84 | ||
|
|
218a8504e6 | ||
|
|
3010739f53 | ||
|
|
6db2b0eb32 | ||
|
|
c79a6d9da2 | ||
|
|
f3c8243614 | ||
|
|
1652fbe448 | ||
|
|
a8ac2bd0f9 | ||
|
|
08adc31ec7 | ||
|
|
4559d96554 | ||
|
|
34c93e96c3 | ||
|
|
c0a9c2c1b7 | ||
|
|
d4caee32a9 | ||
|
|
0206a9341e | ||
|
|
ab9d10109a | ||
|
|
6d44dd3a0a | ||
|
|
da4f2e33d3 | ||
|
|
ce01b8f005 | ||
|
|
c584a9ecae | ||
|
|
37f92c400e | ||
|
|
3e473aeca3 | ||
|
|
47d412d48b | ||
|
|
921b090841 | ||
|
|
d9db9abf66 | ||
|
|
325aa07165 | ||
|
|
12726095e2 | ||
|
|
1ab78aabdc | ||
|
|
ee492508f7 | ||
|
|
92f06abe64 | ||
|
|
862b0e6393 | ||
|
|
8dd351c412 | ||
|
|
df0f4b13df | ||
|
|
0f64b37f19 | ||
|
|
8e5aa33ef5 | ||
|
|
ad8360d5f7 | ||
|
|
a7907979a7 | ||
|
|
850c7f0537 | ||
|
|
80db91cbb7 | ||
|
|
21b7af43f1 | ||
|
|
16e33851c3 | ||
|
|
f89c5e7077 | ||
|
|
fc6acd4cdd | ||
|
|
48c1734157 | ||
|
|
98e9d5e33b | ||
|
|
8662995aaa | ||
|
|
793245afc6 | ||
|
|
dd853cf1da | ||
|
|
361e53efad | ||
|
|
623bb26127 | ||
|
|
6455948c8a | ||
|
|
6a9657ec69 | ||
|
|
36049e81dc | ||
|
|
b84f083f50 | ||
|
|
0c5579294c | ||
|
|
5476ceb41c | ||
|
|
35ca3d5445 | ||
|
|
381a60545b | ||
|
|
74f78421c9 | ||
|
|
4587a7826b | ||
|
|
592b3b203a | ||
|
|
d7be15a634 | ||
|
|
3bc33097d4 | ||
|
|
b6bc86ce39 | ||
|
|
08c8d23e2e | ||
|
|
4d9b0ea629 | ||
|
|
70c130b1cf | ||
|
|
031e00249e | ||
|
|
27f853e7ac | ||
|
|
4fe3b912f5 | ||
|
|
81c7985382 | ||
|
|
68859a92f9 | ||
|
|
77711ee769 | ||
|
|
20b72f3f4d | ||
|
|
e8d944bc7e | ||
|
|
cf23d531a9 | ||
|
|
49344aac03 | ||
|
|
13d1c96d3a | ||
|
|
ba306daa7f | ||
|
|
cf327202d9 | ||
|
|
f0bb381b07 | ||
|
|
6700c8918b | ||
|
|
0e75a098b0 | ||
|
|
d71b98f253 | ||
|
|
098927a13f | ||
|
|
54f8f38a8e | ||
|
|
04b1fd7945 | ||
|
|
883902e4a8 | ||
|
|
80dc5a2ce5 | ||
|
|
484829bc04 | ||
|
|
9944c7938c | ||
|
|
6392e5f4b1 | ||
|
|
0b4f78e27c | ||
|
|
cbdbfc756f | ||
|
|
b2e4cda71e | ||
|
|
35517f62a0 | ||
|
|
b058e49fd6 | ||
|
|
bd4064f18d | ||
|
|
7da2c13e73 | ||
|
|
8961b12d5a | ||
|
|
b8031e779a | ||
|
|
f2fcc305b4 | ||
|
|
5aea2cde03 | ||
|
|
16608e53c1 | ||
|
|
1fd2470b76 | ||
|
|
2c27dd5b1a | ||
|
|
35638c69ef | ||
|
|
540471df3d | ||
|
|
527739d51b | ||
|
|
49a1b7d2f0 | ||
|
|
944b13dade | ||
|
|
b8026a5028 | ||
|
|
57622b6b1a | ||
|
|
6e48bc7c26 | ||
|
|
4d8b5fe1a4 | ||
|
|
2250a4b79f | ||
|
|
ef70624bde | ||
|
|
e099bde268 | ||
|
|
d1dbbbe839 | ||
|
|
51cb05d4fd | ||
|
|
878c855efa | ||
|
|
db81ad20fd | ||
|
|
acd24d5092 | ||
|
|
3c5451eef2 | ||
|
|
f0104977fe | ||
|
|
b9f8712eb8 | ||
|
|
49742edce0 | ||
|
|
9d9bafbf99 | ||
|
|
35a306bb53 | ||
|
|
1cfa4eac27 | ||
|
|
f9b96218f2 | ||
|
|
13ca43480f | ||
|
|
11cd7e0683 | ||
|
|
95b9b98c93 | ||
|
|
7b6b6c077c | ||
|
|
a5728422b8 | ||
|
|
22511faf14 | ||
|
|
347bf638d3 | ||
|
|
d14c094447 | ||
|
|
50ce635ec8 | ||
|
|
45e2bc24b5 | ||
|
|
a9619d259f | ||
|
|
4d0e0bb190 | ||
|
|
53cf801b85 | ||
|
|
c18a066071 | ||
|
|
c381dd20b0 | ||
|
|
dcc51dfe6f | ||
|
|
f130558773 | ||
|
|
6c627bcc18 | ||
|
|
fe19b58b35 | ||
|
|
25fbc3c27f | ||
|
|
b219d400f4 | ||
|
|
251d0e6256 | ||
|
|
dfd7e631a7 | ||
|
|
ba6fdd9b4d | ||
|
|
43aa61c18a | ||
|
|
39a1c8c860 | ||
|
|
d033e8cf4e | ||
|
|
eaf12bffd7 | ||
|
|
d2d95c0ea6 | ||
|
|
3537f1a373 | ||
|
|
b38ec49edf | ||
|
|
39ddffc6c0 | ||
|
|
df512b40e3 | ||
|
|
008d3b0f09 | ||
|
|
006a41c935 | ||
|
|
35959ab7d1 | ||
|
|
8c364a3a76 | ||
|
|
df21a2be8a | ||
|
|
fbb9ccd574 | ||
|
|
d453865e6e | ||
|
|
4e73066e33 | ||
|
|
2de67c9e62 |
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 12
|
||||
SUBLEVEL = 58
|
||||
SUBLEVEL = 59
|
||||
EXTRAVERSION =
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
|
||||
@@ -55,8 +55,8 @@
|
||||
mdio {
|
||||
/delete-node/ switch@1e;
|
||||
|
||||
bcm54210e: ethernet-phy@0 {
|
||||
reg = <0>;
|
||||
bcm54210e: ethernet-phy@25 {
|
||||
reg = <25>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@@ -259,7 +259,7 @@
|
||||
pinctrl-0 = <&pinctrl_audmux>;
|
||||
status = "okay";
|
||||
|
||||
ssi2 {
|
||||
mux-ssi2 {
|
||||
fsl,audmux-port = <1>;
|
||||
fsl,port-config = <
|
||||
(IMX_AUDMUX_V2_PTCR_SYN |
|
||||
@@ -271,7 +271,7 @@
|
||||
>;
|
||||
};
|
||||
|
||||
aud3 {
|
||||
mux-aud3 {
|
||||
fsl,audmux-port = <2>;
|
||||
fsl,port-config = <
|
||||
IMX_AUDMUX_V2_PTCR_SYN
|
||||
|
||||
@@ -4,7 +4,7 @@ menu "Accelerated Cryptographic Algorithms for CPU (arm)"
|
||||
|
||||
config CRYPTO_CURVE25519_NEON
|
||||
tristate
|
||||
depends on KERNEL_MODE_NEON
|
||||
depends on KERNEL_MODE_NEON && !CPU_BIG_ENDIAN
|
||||
select CRYPTO_KPP
|
||||
select CRYPTO_LIB_CURVE25519_GENERIC
|
||||
select CRYPTO_ARCH_HAVE_LIB_CURVE25519
|
||||
|
||||
@@ -482,6 +482,8 @@
|
||||
};
|
||||
|
||||
&i2s1_8ch {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&i2s1m0_sclktx &i2s1m0_lrcktx &i2s1m0_sdi0 &i2s1m0_sdo0>;
|
||||
rockchip,trcm-sync-tx-only;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
@@ -115,7 +115,7 @@
|
||||
};
|
||||
};
|
||||
|
||||
gpu_opp_table: opp-table {
|
||||
gpu_opp_table: opp-table-gpu {
|
||||
compatible = "operating-points-v2";
|
||||
|
||||
opp-300000000 {
|
||||
|
||||
@@ -66,7 +66,7 @@
|
||||
};
|
||||
};
|
||||
|
||||
gpu_opp_table: opp-table {
|
||||
gpu_opp_table: opp-table-gpu {
|
||||
compatible = "operating-points-v2";
|
||||
|
||||
opp-300000000 {
|
||||
|
||||
@@ -49,7 +49,10 @@ void *alloc_insn_page(void)
|
||||
addr = execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE);
|
||||
if (!addr)
|
||||
return NULL;
|
||||
set_memory_rox((unsigned long)addr, 1);
|
||||
if (set_memory_rox((unsigned long)addr, 1)) {
|
||||
execmem_free(addr);
|
||||
return NULL;
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
||||
@@ -134,13 +134,13 @@ static inline void hw_breakpoint_thread_switch(struct task_struct *next)
|
||||
/* Determine number of BRP registers available. */
|
||||
static inline int get_num_brps(void)
|
||||
{
|
||||
return csr_read64(LOONGARCH_CSR_FWPC) & CSR_FWPC_NUM;
|
||||
return csr_read32(LOONGARCH_CSR_FWPC) & CSR_FWPC_NUM;
|
||||
}
|
||||
|
||||
/* Determine number of WRP registers available. */
|
||||
static inline int get_num_wrps(void)
|
||||
{
|
||||
return csr_read64(LOONGARCH_CSR_MWPC) & CSR_MWPC_NUM;
|
||||
return csr_read32(LOONGARCH_CSR_MWPC) & CSR_MWPC_NUM;
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
@@ -431,6 +431,9 @@ static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
|
||||
|
||||
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||
{
|
||||
if (pte_val(pte) & _PAGE_DIRTY)
|
||||
pte_val(pte) |= _PAGE_MODIFIED;
|
||||
|
||||
return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
|
||||
(pgprot_val(newprot) & ~_PAGE_CHG_MASK));
|
||||
}
|
||||
@@ -565,9 +568,11 @@ static inline struct page *pmd_page(pmd_t pmd)
|
||||
|
||||
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
|
||||
{
|
||||
pmd_val(pmd) = (pmd_val(pmd) & _HPAGE_CHG_MASK) |
|
||||
(pgprot_val(newprot) & ~_HPAGE_CHG_MASK);
|
||||
return pmd;
|
||||
if (pmd_val(pmd) & _PAGE_DIRTY)
|
||||
pmd_val(pmd) |= _PAGE_MODIFIED;
|
||||
|
||||
return __pmd((pmd_val(pmd) & _HPAGE_CHG_MASK) |
|
||||
(pgprot_val(newprot) & ~_HPAGE_CHG_MASK));
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mkinvalid(pmd_t pmd)
|
||||
|
||||
@@ -1123,8 +1123,8 @@ static void configure_exception_vector(void)
|
||||
tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
|
||||
|
||||
csr_write64(eentry, LOONGARCH_CSR_EENTRY);
|
||||
csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
|
||||
csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
|
||||
csr_write64(__pa(eentry), LOONGARCH_CSR_MERRENTRY);
|
||||
csr_write64(__pa(tlbrentry), LOONGARCH_CSR_TLBRENTRY);
|
||||
}
|
||||
|
||||
void per_cpu_trap_init(int cpu)
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/delay.h>
|
||||
#include <asm/kvm_csr.h>
|
||||
#include <asm/kvm_vcpu.h>
|
||||
|
||||
@@ -95,6 +96,7 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu)
|
||||
* and set CSR TVAL with -1
|
||||
*/
|
||||
write_gcsr_timertick(0);
|
||||
__delay(2); /* Wait cycles until timer interrupt injected */
|
||||
|
||||
/*
|
||||
* Writing CSR_TINTCLR_TI to LOONGARCH_CSR_TINTCLR will clear
|
||||
|
||||
@@ -127,6 +127,9 @@ static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
|
||||
* Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
|
||||
* exiting the guest, so that the next time trap into the guest.
|
||||
* We don't need to deal with PMU CSRs contexts.
|
||||
*
|
||||
* Otherwise set the request bit KVM_REQ_PMU to restore guest PMU
|
||||
* before entering guest VM
|
||||
*/
|
||||
val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
|
||||
val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
|
||||
@@ -134,6 +137,8 @@ static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
|
||||
val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
|
||||
if (!(val & KVM_PMU_EVENT_ENABLED))
|
||||
vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU;
|
||||
else
|
||||
kvm_make_request(KVM_REQ_PMU, vcpu);
|
||||
|
||||
kvm_restore_host_pmu(vcpu);
|
||||
}
|
||||
|
||||
@@ -161,7 +161,7 @@ boot-image-$(CONFIG_KERNEL_LZO) := Image.lzo
|
||||
boot-image-$(CONFIG_KERNEL_ZSTD) := Image.zst
|
||||
boot-image-$(CONFIG_KERNEL_XZ) := Image.xz
|
||||
ifdef CONFIG_RISCV_M_MODE
|
||||
boot-image-$(CONFIG_ARCH_CANAAN) := loader.bin
|
||||
boot-image-$(CONFIG_SOC_CANAAN_K210) := loader.bin
|
||||
endif
|
||||
boot-image-$(CONFIG_EFI_ZBOOT) := vmlinuz.efi
|
||||
boot-image-$(CONFIG_XIP_KERNEL) := xipImage
|
||||
|
||||
@@ -54,6 +54,7 @@ void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
|
||||
|
||||
pr_notice("CPU%u: off\n", cpu);
|
||||
|
||||
clear_tasks_mm_cpumask(cpu);
|
||||
/* Verify from the firmware if the cpu is really stopped*/
|
||||
if (cpu_ops->cpu_is_stopped)
|
||||
ret = cpu_ops->cpu_is_stopped(cpu);
|
||||
|
||||
@@ -298,11 +298,14 @@ void __init setup_arch(char **cmdline_p)
|
||||
/* Parse the ACPI tables for possible boot-time configuration */
|
||||
acpi_boot_table_init();
|
||||
|
||||
if (acpi_disabled) {
|
||||
#if IS_ENABLED(CONFIG_BUILTIN_DTB)
|
||||
unflatten_and_copy_device_tree();
|
||||
unflatten_and_copy_device_tree();
|
||||
#else
|
||||
unflatten_device_tree();
|
||||
unflatten_device_tree();
|
||||
#endif
|
||||
}
|
||||
|
||||
misc_mem_init();
|
||||
|
||||
init_resources();
|
||||
|
||||
@@ -194,7 +194,7 @@ int amd_detect_prefcore(bool *detected)
|
||||
break;
|
||||
}
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
for_each_online_cpu(cpu) {
|
||||
u32 tmp;
|
||||
int ret;
|
||||
|
||||
|
||||
@@ -212,6 +212,7 @@ static bool need_sha_check(u32 cur_rev)
|
||||
case 0xb1010: return cur_rev <= 0xb101046; break;
|
||||
case 0xb2040: return cur_rev <= 0xb204031; break;
|
||||
case 0xb4040: return cur_rev <= 0xb404031; break;
|
||||
case 0xb4041: return cur_rev <= 0xb404101; break;
|
||||
case 0xb6000: return cur_rev <= 0xb600031; break;
|
||||
case 0xb6080: return cur_rev <= 0xb608031; break;
|
||||
case 0xb7000: return cur_rev <= 0xb700031; break;
|
||||
|
||||
@@ -3257,7 +3257,11 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
||||
if (data & DEBUGCTL_RESERVED_BITS)
|
||||
return 1;
|
||||
|
||||
if (svm_get_lbr_vmcb(svm)->save.dbgctl == data)
|
||||
break;
|
||||
|
||||
svm_get_lbr_vmcb(svm)->save.dbgctl = data;
|
||||
vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
|
||||
svm_update_lbrv(vcpu);
|
||||
break;
|
||||
case MSR_VM_HSAVE_PA:
|
||||
|
||||
34
arch/x86/kvm/vmx/common.h
Normal file
34
arch/x86/kvm/vmx/common.h
Normal file
@@ -0,0 +1,34 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
#ifndef __KVM_X86_VMX_COMMON_H
|
||||
#define __KVM_X86_VMX_COMMON_H
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include "mmu.h"
|
||||
|
||||
static inline int __vmx_handle_ept_violation(struct kvm_vcpu *vcpu, gpa_t gpa,
|
||||
unsigned long exit_qualification)
|
||||
{
|
||||
u64 error_code;
|
||||
|
||||
/* Is it a read fault? */
|
||||
error_code = (exit_qualification & EPT_VIOLATION_ACC_READ)
|
||||
? PFERR_USER_MASK : 0;
|
||||
/* Is it a write fault? */
|
||||
error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE)
|
||||
? PFERR_WRITE_MASK : 0;
|
||||
/* Is it a fetch fault? */
|
||||
error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR)
|
||||
? PFERR_FETCH_MASK : 0;
|
||||
/* ept page table entry is present? */
|
||||
error_code |= (exit_qualification & EPT_VIOLATION_RWX_MASK)
|
||||
? PFERR_PRESENT_MASK : 0;
|
||||
|
||||
if (exit_qualification & EPT_VIOLATION_GVA_IS_VALID)
|
||||
error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) ?
|
||||
PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
|
||||
|
||||
return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
|
||||
}
|
||||
|
||||
#endif /* __KVM_X86_VMX_COMMON_H */
|
||||
@@ -53,6 +53,7 @@
|
||||
#include <trace/events/ipi.h>
|
||||
|
||||
#include "capabilities.h"
|
||||
#include "common.h"
|
||||
#include "cpuid.h"
|
||||
#include "hyperv.h"
|
||||
#include "kvm_onhyperv.h"
|
||||
@@ -5777,11 +5778,8 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int handle_ept_violation(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long exit_qualification;
|
||||
unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
|
||||
gpa_t gpa;
|
||||
u64 error_code;
|
||||
|
||||
exit_qualification = vmx_get_exit_qual(vcpu);
|
||||
|
||||
/*
|
||||
* EPT violation happened while executing iret from NMI,
|
||||
@@ -5797,23 +5795,6 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
|
||||
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
|
||||
trace_kvm_page_fault(vcpu, gpa, exit_qualification);
|
||||
|
||||
/* Is it a read fault? */
|
||||
error_code = (exit_qualification & EPT_VIOLATION_ACC_READ)
|
||||
? PFERR_USER_MASK : 0;
|
||||
/* Is it a write fault? */
|
||||
error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE)
|
||||
? PFERR_WRITE_MASK : 0;
|
||||
/* Is it a fetch fault? */
|
||||
error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR)
|
||||
? PFERR_FETCH_MASK : 0;
|
||||
/* ept page table entry is present? */
|
||||
error_code |= (exit_qualification & EPT_VIOLATION_RWX_MASK)
|
||||
? PFERR_PRESENT_MASK : 0;
|
||||
|
||||
if (error_code & EPT_VIOLATION_GVA_IS_VALID)
|
||||
error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) ?
|
||||
PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
|
||||
|
||||
/*
|
||||
* Check that the GPA doesn't exceed physical memory limits, as that is
|
||||
* a guest page fault. We have to emulate the instruction here, because
|
||||
@@ -5825,7 +5806,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
|
||||
if (unlikely(allow_smaller_maxphyaddr && !kvm_vcpu_is_legal_gpa(vcpu, gpa)))
|
||||
return kvm_emulate_instruction(vcpu, 0);
|
||||
|
||||
return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
|
||||
return __vmx_handle_ept_violation(vcpu, gpa, exit_qualification);
|
||||
}
|
||||
|
||||
static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
|
||||
|
||||
@@ -447,7 +447,7 @@ bool acpi_cpc_valid(void)
|
||||
if (acpi_disabled)
|
||||
return false;
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
for_each_online_cpu(cpu) {
|
||||
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
|
||||
if (!cpc_ptr)
|
||||
return false;
|
||||
@@ -463,7 +463,7 @@ bool cppc_allow_fast_switch(void)
|
||||
struct cpc_desc *cpc_ptr;
|
||||
int cpu;
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
for_each_online_cpu(cpu) {
|
||||
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
|
||||
desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
|
||||
if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
|
||||
@@ -1366,7 +1366,7 @@ bool cppc_perf_ctrs_in_pcc(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
for_each_online_cpu(cpu) {
|
||||
struct cpc_register_resource *ref_perf_reg;
|
||||
struct cpc_desc *cpc_desc;
|
||||
|
||||
|
||||
@@ -864,10 +864,32 @@ static void hmat_register_target_devices(struct memory_target *target)
|
||||
}
|
||||
}
|
||||
|
||||
static void hmat_register_target(struct memory_target *target)
|
||||
static void hmat_hotplug_target(struct memory_target *target)
|
||||
{
|
||||
int nid = pxm_to_node(target->memory_pxm);
|
||||
|
||||
/*
|
||||
* Skip offline nodes. This can happen when memory marked EFI_MEMORY_SP,
|
||||
* "specific purpose", is applied to all the memory in a proximity
|
||||
* domain leading to * the node being marked offline / unplugged, or if
|
||||
* memory-only "hotplug" node is offline.
|
||||
*/
|
||||
if (nid == NUMA_NO_NODE || !node_online(nid))
|
||||
return;
|
||||
|
||||
guard(mutex)(&target_lock);
|
||||
if (target->registered)
|
||||
return;
|
||||
|
||||
hmat_register_target_initiators(target);
|
||||
hmat_register_target_cache(target);
|
||||
hmat_register_target_perf(target, ACCESS_COORDINATE_LOCAL);
|
||||
hmat_register_target_perf(target, ACCESS_COORDINATE_CPU);
|
||||
target->registered = true;
|
||||
}
|
||||
|
||||
static void hmat_register_target(struct memory_target *target)
|
||||
{
|
||||
/*
|
||||
* Devices may belong to either an offline or online
|
||||
* node, so unconditionally add them.
|
||||
@@ -885,25 +907,7 @@ static void hmat_register_target(struct memory_target *target)
|
||||
}
|
||||
mutex_unlock(&target_lock);
|
||||
|
||||
/*
|
||||
* Skip offline nodes. This can happen when memory
|
||||
* marked EFI_MEMORY_SP, "specific purpose", is applied
|
||||
* to all the memory in a proximity domain leading to
|
||||
* the node being marked offline / unplugged, or if
|
||||
* memory-only "hotplug" node is offline.
|
||||
*/
|
||||
if (nid == NUMA_NO_NODE || !node_online(nid))
|
||||
return;
|
||||
|
||||
mutex_lock(&target_lock);
|
||||
if (!target->registered) {
|
||||
hmat_register_target_initiators(target);
|
||||
hmat_register_target_cache(target);
|
||||
hmat_register_target_perf(target, ACCESS_COORDINATE_LOCAL);
|
||||
hmat_register_target_perf(target, ACCESS_COORDINATE_CPU);
|
||||
target->registered = true;
|
||||
}
|
||||
mutex_unlock(&target_lock);
|
||||
hmat_hotplug_target(target);
|
||||
}
|
||||
|
||||
static void hmat_register_targets(void)
|
||||
@@ -929,7 +933,7 @@ static int hmat_callback(struct notifier_block *self,
|
||||
if (!target)
|
||||
return NOTIFY_OK;
|
||||
|
||||
hmat_register_target(target);
|
||||
hmat_hotplug_target(target);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
|
||||
@@ -143,7 +143,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
|
||||
struct acpi_srat_generic_affinity *p =
|
||||
(struct acpi_srat_generic_affinity *)header;
|
||||
|
||||
if (p->device_handle_type == 0) {
|
||||
if (p->device_handle_type == 1) {
|
||||
/*
|
||||
* For pci devices this may be the only place they
|
||||
* are assigned a proximity domain
|
||||
|
||||
@@ -4179,6 +4179,11 @@ static void btusb_disconnect(struct usb_interface *intf)
|
||||
|
||||
hci_unregister_dev(hdev);
|
||||
|
||||
if (data->oob_wake_irq)
|
||||
device_init_wakeup(&data->udev->dev, false);
|
||||
if (data->reset_gpio)
|
||||
gpiod_put(data->reset_gpio);
|
||||
|
||||
if (intf == data->intf) {
|
||||
if (data->isoc)
|
||||
usb_driver_release_interface(&btusb_driver, data->isoc);
|
||||
@@ -4189,17 +4194,11 @@ static void btusb_disconnect(struct usb_interface *intf)
|
||||
usb_driver_release_interface(&btusb_driver, data->diag);
|
||||
usb_driver_release_interface(&btusb_driver, data->intf);
|
||||
} else if (intf == data->diag) {
|
||||
usb_driver_release_interface(&btusb_driver, data->intf);
|
||||
if (data->isoc)
|
||||
usb_driver_release_interface(&btusb_driver, data->isoc);
|
||||
usb_driver_release_interface(&btusb_driver, data->intf);
|
||||
}
|
||||
|
||||
if (data->oob_wake_irq)
|
||||
device_init_wakeup(&data->udev->dev, false);
|
||||
|
||||
if (data->reset_gpio)
|
||||
gpiod_put(data->reset_gpio);
|
||||
|
||||
hci_free_dev(hdev);
|
||||
}
|
||||
|
||||
|
||||
@@ -3688,10 +3688,12 @@ static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
|
||||
pdev = container_of(dev, struct pci_dev, dev);
|
||||
if (pci_physfn(pdev) != qm->pdev) {
|
||||
pci_err(qm->pdev, "the pdev input does not match the pf!\n");
|
||||
put_device(dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*fun_index = pdev->devfn;
|
||||
put_device(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1184,10 +1184,22 @@ altr_check_ocram_deps_init(struct altr_edac_device_dev *device)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Verify OCRAM has been initialized */
|
||||
/*
|
||||
* Verify that OCRAM has been initialized.
|
||||
* During a warm reset, OCRAM contents are retained, but the control
|
||||
* and status registers are reset to their default values. Therefore,
|
||||
* ECC must be explicitly re-enabled in the control register.
|
||||
* Error condition: if INITCOMPLETEA is clear and ECC_EN is already set.
|
||||
*/
|
||||
if (!ecc_test_bits(ALTR_A10_ECC_INITCOMPLETEA,
|
||||
(base + ALTR_A10_ECC_INITSTAT_OFST)))
|
||||
return -ENODEV;
|
||||
(base + ALTR_A10_ECC_INITSTAT_OFST))) {
|
||||
if (!ecc_test_bits(ALTR_A10_ECC_EN,
|
||||
(base + ALTR_A10_ECC_CTRL_OFST)))
|
||||
ecc_set_bits(ALTR_A10_ECC_EN,
|
||||
(base + ALTR_A10_ECC_CTRL_OFST));
|
||||
else
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Enable IRQ on Single Bit Error */
|
||||
writel(ALTR_A10_ECC_SERRINTEN, (base + ALTR_A10_ECC_ERRINTENS_OFST));
|
||||
@@ -1357,7 +1369,7 @@ static const struct edac_device_prv_data a10_enetecc_data = {
|
||||
.ue_set_mask = ALTR_A10_ECC_TDERRA,
|
||||
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
|
||||
.ecc_irq_handler = altr_edac_a10_ecc_irq,
|
||||
.inject_fops = &altr_edac_a10_device_inject2_fops,
|
||||
.inject_fops = &altr_edac_a10_device_inject_fops,
|
||||
};
|
||||
|
||||
#endif /* CONFIG_EDAC_ALTERA_ETHERNET */
|
||||
@@ -1447,7 +1459,7 @@ static const struct edac_device_prv_data a10_usbecc_data = {
|
||||
.ue_set_mask = ALTR_A10_ECC_TDERRA,
|
||||
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
|
||||
.ecc_irq_handler = altr_edac_a10_ecc_irq,
|
||||
.inject_fops = &altr_edac_a10_device_inject2_fops,
|
||||
.inject_fops = &altr_edac_a10_device_inject_fops,
|
||||
};
|
||||
|
||||
#endif /* CONFIG_EDAC_ALTERA_USB */
|
||||
|
||||
@@ -691,7 +691,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
|
||||
*/
|
||||
const s64 us_upper_bound = 200000;
|
||||
|
||||
if (!adev->mm_stats.log2_max_MBps) {
|
||||
if ((!adev->mm_stats.log2_max_MBps) || !ttm_resource_manager_used(&adev->mman.vram_mgr.manager)) {
|
||||
*max_bytes = 0;
|
||||
*max_vis_bytes = 0;
|
||||
return;
|
||||
|
||||
@@ -81,6 +81,18 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
|
||||
/*
|
||||
* Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+.
|
||||
* Such buffers cannot be safely accessed over P2P due to device-local
|
||||
* compression metadata. Fallback to system-memory path instead.
|
||||
* Device supports GFX12 (GC 12.x or newer)
|
||||
* BO was created with the AMDGPU_GEM_CREATE_GFX12_DCC flag
|
||||
*
|
||||
*/
|
||||
if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0) &&
|
||||
bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
|
||||
attach->peer2peer = false;
|
||||
|
||||
if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) &&
|
||||
pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
|
||||
attach->peer2peer = false;
|
||||
|
||||
@@ -707,7 +707,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||
ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
|
||||
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
||||
case AMDGPU_INFO_VRAM_USAGE:
|
||||
ui64 = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
|
||||
ui64 = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ?
|
||||
ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) : 0;
|
||||
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
||||
case AMDGPU_INFO_VIS_VRAM_USAGE:
|
||||
ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
|
||||
@@ -753,8 +754,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||
mem.vram.usable_heap_size = adev->gmc.real_vram_size -
|
||||
atomic64_read(&adev->vram_pin_size) -
|
||||
AMDGPU_VM_RESERVED_VRAM;
|
||||
mem.vram.heap_usage =
|
||||
ttm_resource_manager_usage(vram_man);
|
||||
mem.vram.heap_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ?
|
||||
ttm_resource_manager_usage(vram_man) : 0;
|
||||
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
|
||||
|
||||
mem.cpu_accessible_vram.total_heap_size =
|
||||
|
||||
@@ -2174,8 +2174,11 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
|
||||
if (!ret && !psp->securedisplay_context.context.resp_status) {
|
||||
psp->securedisplay_context.context.initialized = true;
|
||||
mutex_init(&psp->securedisplay_context.mutex);
|
||||
} else
|
||||
} else {
|
||||
/* don't try again */
|
||||
psp->securedisplay_context.context.bin_desc.size_bytes = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
mutex_lock(&psp->securedisplay_context.mutex);
|
||||
|
||||
|
||||
@@ -595,8 +595,8 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
|
||||
vf2pf_info->driver_cert = 0;
|
||||
vf2pf_info->os_info.all = 0;
|
||||
|
||||
vf2pf_info->fb_usage =
|
||||
ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20;
|
||||
vf2pf_info->fb_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ?
|
||||
ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20 : 0;
|
||||
vf2pf_info->fb_vis_usage =
|
||||
amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
|
||||
vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
|
||||
|
||||
@@ -234,6 +234,9 @@ static umode_t amdgpu_vram_attrs_is_visible(struct kobject *kobj,
|
||||
!adev->gmc.vram_vendor)
|
||||
return 0;
|
||||
|
||||
if (!ttm_resource_manager_used(&adev->mman.vram_mgr.manager))
|
||||
return 0;
|
||||
|
||||
return attr->mode;
|
||||
}
|
||||
|
||||
|
||||
@@ -5632,8 +5632,6 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
||||
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||
u32 header, control = 0;
|
||||
|
||||
BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE);
|
||||
|
||||
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
|
||||
|
||||
control |= ib->length_dw | (vmid << 24);
|
||||
|
||||
@@ -4330,8 +4330,6 @@ static void gfx_v12_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
||||
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||
u32 header, control = 0;
|
||||
|
||||
BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE);
|
||||
|
||||
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
|
||||
|
||||
control |= ib->length_dw | (vmid << 24);
|
||||
|
||||
@@ -297,16 +297,16 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
|
||||
goto out_err_unreserve;
|
||||
}
|
||||
|
||||
if (properties->ctx_save_restore_area_size != topo_dev->node_props.cwsr_size) {
|
||||
pr_debug("queue cwsr size 0x%x not equal to node cwsr size 0x%x\n",
|
||||
if (properties->ctx_save_restore_area_size < topo_dev->node_props.cwsr_size) {
|
||||
pr_debug("queue cwsr size 0x%x not sufficient for node cwsr size 0x%x\n",
|
||||
properties->ctx_save_restore_area_size,
|
||||
topo_dev->node_props.cwsr_size);
|
||||
err = -EINVAL;
|
||||
goto out_err_unreserve;
|
||||
}
|
||||
|
||||
total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size)
|
||||
* NUM_XCC(pdd->dev->xcc_mask);
|
||||
total_cwsr_size = (properties->ctx_save_restore_area_size +
|
||||
topo_dev->node_props.debug_memory_size) * NUM_XCC(pdd->dev->xcc_mask);
|
||||
total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
|
||||
|
||||
err = kfd_queue_buffer_get(vm, (void *)properties->ctx_save_restore_area_address,
|
||||
@@ -352,8 +352,8 @@ int kfd_queue_release_buffers(struct kfd_process_device *pdd, struct queue_prope
|
||||
topo_dev = kfd_topology_device_by_id(pdd->dev->id);
|
||||
if (!topo_dev)
|
||||
return -EINVAL;
|
||||
total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size)
|
||||
* NUM_XCC(pdd->dev->xcc_mask);
|
||||
total_cwsr_size = (properties->ctx_save_restore_area_size +
|
||||
topo_dev->node_props.debug_memory_size) * NUM_XCC(pdd->dev->xcc_mask);
|
||||
total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
|
||||
|
||||
kfd_queue_buffer_svm_put(pdd, properties->ctx_save_restore_area_address, total_cwsr_size);
|
||||
|
||||
@@ -3485,6 +3485,11 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
|
||||
* for these GPUs to calculate bandwidth requirements.
|
||||
*/
|
||||
if (high_pixelclock_count) {
|
||||
/* Work around flickering lines at the bottom edge
|
||||
* of the screen when using a single 4K 60Hz monitor.
|
||||
*/
|
||||
disable_mclk_switching = true;
|
||||
|
||||
/* On Oland, we observe some flickering when two 4K 60Hz
|
||||
* displays are connected, possibly because voltage is too low.
|
||||
* Raise the voltage by requiring a higher SCLK.
|
||||
|
||||
@@ -209,7 +209,7 @@ static u64 div_u64_roundup(u64 nom, u32 den)
|
||||
|
||||
u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count)
|
||||
{
|
||||
return div_u64_roundup(count * NSEC_PER_SEC, gt->clock_frequency);
|
||||
return mul_u64_u32_div(count, NSEC_PER_SEC, gt->clock_frequency);
|
||||
}
|
||||
|
||||
u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count)
|
||||
@@ -219,7 +219,7 @@ u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count)
|
||||
|
||||
u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns)
|
||||
{
|
||||
return div_u64_roundup(gt->clock_frequency * ns, NSEC_PER_SEC);
|
||||
return mul_u64_u32_div(ns, gt->clock_frequency, NSEC_PER_SEC);
|
||||
}
|
||||
|
||||
u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns)
|
||||
|
||||
@@ -1595,8 +1595,20 @@ err_unlock:
|
||||
err_vma_res:
|
||||
i915_vma_resource_free(vma_res);
|
||||
err_fence:
|
||||
if (work)
|
||||
dma_fence_work_commit_imm(&work->base);
|
||||
if (work) {
|
||||
/*
|
||||
* When pinning VMA to GGTT on CHV or BXT with VTD enabled,
|
||||
* commit VMA binding asynchronously to avoid risk of lock
|
||||
* inversion among reservation_ww locks held here and
|
||||
* cpu_hotplug_lock acquired from stop_machine(), which we
|
||||
* wrap around GGTT updates when running in those environments.
|
||||
*/
|
||||
if (i915_vma_is_ggtt(vma) &&
|
||||
intel_vm_no_concurrent_access_wa(vma->vm->i915))
|
||||
dma_fence_work_commit(&work->base);
|
||||
else
|
||||
dma_fence_work_commit_imm(&work->base);
|
||||
}
|
||||
err_rpm:
|
||||
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
|
||||
|
||||
|
||||
@@ -283,6 +283,10 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
|
||||
unsigned int i;
|
||||
unsigned long flags;
|
||||
|
||||
/* release GCE HW usage and start autosuspend */
|
||||
pm_runtime_mark_last_busy(cmdq_cl->chan->mbox->dev);
|
||||
pm_runtime_put_autosuspend(cmdq_cl->chan->mbox->dev);
|
||||
|
||||
if (data->sta < 0)
|
||||
return;
|
||||
|
||||
@@ -618,6 +622,9 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
|
||||
mtk_crtc->config_updating = false;
|
||||
spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
|
||||
|
||||
if (pm_runtime_resume_and_get(mtk_crtc->cmdq_client.chan->mbox->dev) < 0)
|
||||
goto update_config_out;
|
||||
|
||||
mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle);
|
||||
mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
|
||||
goto update_config_out;
|
||||
|
||||
@@ -3686,6 +3686,11 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
|
||||
|
||||
|
||||
cmd_id = header->id;
|
||||
if (header->size > SVGA_CMD_MAX_DATASIZE) {
|
||||
VMW_DEBUG_USER("SVGA3D command: %d is too big.\n",
|
||||
cmd_id + SVGA_3D_CMD_BASE);
|
||||
return -E2BIG;
|
||||
}
|
||||
*size = header->size + sizeof(SVGA3dCmdHeader);
|
||||
|
||||
cmd_id -= SVGA_3D_CMD_BASE;
|
||||
|
||||
@@ -814,16 +814,16 @@ void xe_device_shutdown(struct xe_device *xe)
|
||||
|
||||
drm_dbg(&xe->drm, "Shutting down device\n");
|
||||
|
||||
if (xe_driver_flr_disabled(xe)) {
|
||||
xe_display_pm_shutdown(xe);
|
||||
xe_display_pm_shutdown(xe);
|
||||
|
||||
xe_irq_suspend(xe);
|
||||
xe_irq_suspend(xe);
|
||||
|
||||
for_each_gt(gt, xe, id)
|
||||
xe_gt_shutdown(gt);
|
||||
for_each_gt(gt, xe, id)
|
||||
xe_gt_shutdown(gt);
|
||||
|
||||
xe_display_pm_shutdown_late(xe);
|
||||
} else {
|
||||
xe_display_pm_shutdown_late(xe);
|
||||
|
||||
if (!xe_driver_flr_disabled(xe)) {
|
||||
/* BOOM! */
|
||||
__xe_driver_flr(xe);
|
||||
}
|
||||
|
||||
@@ -188,6 +188,9 @@ static void guc_ct_fini(struct drm_device *drm, void *arg)
|
||||
{
|
||||
struct xe_guc_ct *ct = arg;
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
|
||||
cancel_work_sync(&ct->dead.worker);
|
||||
#endif
|
||||
ct_exit_safe_mode(ct);
|
||||
destroy_workqueue(ct->g2h_wq);
|
||||
xa_destroy(&ct->fence_lookup);
|
||||
|
||||
@@ -341,6 +341,9 @@
|
||||
#define USB_DEVICE_ID_CODEMERCS_IOW_FIRST 0x1500
|
||||
#define USB_DEVICE_ID_CODEMERCS_IOW_LAST 0x15ff
|
||||
|
||||
#define USB_VENDOR_ID_COOLER_MASTER 0x2516
|
||||
#define USB_DEVICE_ID_COOLER_MASTER_MICE_DONGLE 0x01b7
|
||||
|
||||
#define USB_VENDOR_ID_CORSAIR 0x1b1c
|
||||
#define USB_DEVICE_ID_CORSAIR_K90 0x1b02
|
||||
#define USB_DEVICE_ID_CORSAIR_K70R 0x1b09
|
||||
@@ -1420,6 +1423,7 @@
|
||||
|
||||
#define USB_VENDOR_ID_VRS 0x0483
|
||||
#define USB_DEVICE_ID_VRS_DFP 0xa355
|
||||
#define USB_DEVICE_ID_VRS_R295 0xa44c
|
||||
|
||||
#define USB_VENDOR_ID_VTL 0x0306
|
||||
#define USB_DEVICE_ID_VTL_MULTITOUCH_FF3F 0xff3f
|
||||
|
||||
@@ -75,6 +75,7 @@ MODULE_PARM_DESC(disable_tap_to_click,
|
||||
#define HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS BIT(27)
|
||||
#define HIDPP_QUIRK_HI_RES_SCROLL_1P0 BIT(28)
|
||||
#define HIDPP_QUIRK_WIRELESS_STATUS BIT(29)
|
||||
#define HIDPP_QUIRK_RESET_HI_RES_SCROLL BIT(30)
|
||||
|
||||
/* These are just aliases for now */
|
||||
#define HIDPP_QUIRK_KBD_SCROLL_WHEEL HIDPP_QUIRK_HIDPP_WHEELS
|
||||
@@ -193,6 +194,7 @@ struct hidpp_device {
|
||||
void *private_data;
|
||||
|
||||
struct work_struct work;
|
||||
struct work_struct reset_hi_res_work;
|
||||
struct kfifo delayed_work_fifo;
|
||||
struct input_dev *delayed_input;
|
||||
|
||||
@@ -3864,6 +3866,7 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
|
||||
struct hidpp_report *answer = hidpp->send_receive_buf;
|
||||
struct hidpp_report *report = (struct hidpp_report *)data;
|
||||
int ret;
|
||||
int last_online;
|
||||
|
||||
/*
|
||||
* If the mutex is locked then we have a pending answer from a
|
||||
@@ -3905,6 +3908,7 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
|
||||
"See: https://gitlab.freedesktop.org/jwrdegoede/logitech-27mhz-keyboard-encryption-setup/\n");
|
||||
}
|
||||
|
||||
last_online = hidpp->battery.online;
|
||||
if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_BATTERY) {
|
||||
ret = hidpp20_battery_event_1000(hidpp, data, size);
|
||||
if (ret != 0)
|
||||
@@ -3929,6 +3933,11 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (hidpp->quirks & HIDPP_QUIRK_RESET_HI_RES_SCROLL) {
|
||||
if (last_online == 0 && hidpp->battery.online == 1)
|
||||
schedule_work(&hidpp->reset_hi_res_work);
|
||||
}
|
||||
|
||||
if (hidpp->quirks & HIDPP_QUIRK_HIDPP_WHEELS) {
|
||||
ret = hidpp10_wheel_raw_event(hidpp, data, size);
|
||||
if (ret != 0)
|
||||
@@ -4302,6 +4311,13 @@ static void hidpp_connect_event(struct work_struct *work)
|
||||
hidpp->delayed_input = input;
|
||||
}
|
||||
|
||||
static void hidpp_reset_hi_res_handler(struct work_struct *work)
|
||||
{
|
||||
struct hidpp_device *hidpp = container_of(work, struct hidpp_device, reset_hi_res_work);
|
||||
|
||||
hi_res_scroll_enable(hidpp);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(builtin_power_supply, 0000, NULL, NULL);
|
||||
|
||||
static struct attribute *sysfs_attrs[] = {
|
||||
@@ -4432,6 +4448,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
||||
}
|
||||
|
||||
INIT_WORK(&hidpp->work, hidpp_connect_event);
|
||||
INIT_WORK(&hidpp->reset_hi_res_work, hidpp_reset_hi_res_handler);
|
||||
mutex_init(&hidpp->send_mutex);
|
||||
init_waitqueue_head(&hidpp->wait);
|
||||
|
||||
@@ -4527,6 +4544,7 @@ static void hidpp_remove(struct hid_device *hdev)
|
||||
|
||||
hid_hw_stop(hdev);
|
||||
cancel_work_sync(&hidpp->work);
|
||||
cancel_work_sync(&hidpp->reset_hi_res_work);
|
||||
mutex_destroy(&hidpp->send_mutex);
|
||||
}
|
||||
|
||||
@@ -4574,6 +4592,9 @@ static const struct hid_device_id hidpp_devices[] = {
|
||||
{ /* Keyboard MX5500 (Bluetooth-receiver in HID proxy mode) */
|
||||
LDJ_DEVICE(0xb30b),
|
||||
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
|
||||
{ /* Logitech G502 Lightspeed Wireless Gaming Mouse */
|
||||
LDJ_DEVICE(0x407f),
|
||||
.driver_data = HIDPP_QUIRK_RESET_HI_RES_SCROLL },
|
||||
|
||||
{ LDJ_DEVICE(HID_ANY_ID) },
|
||||
|
||||
|
||||
@@ -2424,7 +2424,7 @@ static int joycon_read_info(struct joycon_ctlr *ctlr)
|
||||
struct joycon_input_report *report;
|
||||
|
||||
req.subcmd_id = JC_SUBCMD_REQ_DEV_INFO;
|
||||
ret = joycon_send_subcmd(ctlr, &req, 0, HZ);
|
||||
ret = joycon_send_subcmd(ctlr, &req, 0, 2 * HZ);
|
||||
if (ret) {
|
||||
hid_err(ctlr->hdev, "Failed to get joycon info; ret=%d\n", ret);
|
||||
return ret;
|
||||
|
||||
@@ -142,13 +142,13 @@ static void ntrig_report_version(struct hid_device *hdev)
|
||||
int ret;
|
||||
char buf[20];
|
||||
struct usb_device *usb_dev = hid_to_usb_dev(hdev);
|
||||
unsigned char *data = kmalloc(8, GFP_KERNEL);
|
||||
unsigned char *data __free(kfree) = kmalloc(8, GFP_KERNEL);
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return;
|
||||
|
||||
if (!data)
|
||||
goto err_free;
|
||||
return;
|
||||
|
||||
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
|
||||
USB_REQ_CLEAR_FEATURE,
|
||||
@@ -163,9 +163,6 @@ static void ntrig_report_version(struct hid_device *hdev)
|
||||
hid_info(hdev, "Firmware version: %s (%02x%02x %02x%02x)\n",
|
||||
buf, data[2], data[3], data[4], data[5]);
|
||||
}
|
||||
|
||||
err_free:
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
static ssize_t show_phys_width(struct device *dev,
|
||||
|
||||
@@ -1807,6 +1807,7 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
|
||||
|
||||
hid_warn(hdev, "Failed to retrieve DualShock4 calibration info: %d\n", ret);
|
||||
ret = -EILSEQ;
|
||||
kfree(buf);
|
||||
goto transfer_failed;
|
||||
} else {
|
||||
break;
|
||||
@@ -1824,6 +1825,7 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
|
||||
|
||||
if (ret) {
|
||||
hid_warn(hdev, "Failed to retrieve DualShock4 calibration info: %d\n", ret);
|
||||
kfree(buf);
|
||||
goto transfer_failed;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,6 +58,7 @@ static const struct hid_device_id hid_quirks[] = {
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE), HID_QUIRK_NOGET },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS), HID_QUIRK_NOGET },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE), HID_QUIRK_NOGET },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_COOLER_MASTER, USB_DEVICE_ID_COOLER_MASTER_MICE_DONGLE), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB), HID_QUIRK_NO_INIT_REPORTS },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB), HID_QUIRK_NO_INIT_REPORTS },
|
||||
@@ -207,6 +208,7 @@ static const struct hid_device_id hid_quirks[] = {
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_WP5540), HID_QUIRK_MULTI_INPUT },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_VRS, USB_DEVICE_ID_VRS_R295), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT },
|
||||
|
||||
@@ -1367,8 +1367,10 @@ static int uclogic_params_ugee_v2_init_event_hooks(struct hid_device *hdev,
|
||||
event_hook->hdev = hdev;
|
||||
event_hook->size = ARRAY_SIZE(reconnect_event);
|
||||
event_hook->event = kmemdup(reconnect_event, event_hook->size, GFP_KERNEL);
|
||||
if (!event_hook->event)
|
||||
if (!event_hook->event) {
|
||||
kfree(event_hook);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
list_add_tail(&event_hook->list, &p->event_hooks->list);
|
||||
|
||||
|
||||
@@ -660,7 +660,8 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start,
|
||||
struct iopt_area *area;
|
||||
unsigned long unmapped_bytes = 0;
|
||||
unsigned int tries = 0;
|
||||
int rc = -ENOENT;
|
||||
/* If there are no mapped entries then success */
|
||||
int rc = 0;
|
||||
|
||||
/*
|
||||
* The domains_rwsem must be held in read mode any time any area->pages
|
||||
@@ -724,8 +725,6 @@ again:
|
||||
|
||||
down_write(&iopt->iova_rwsem);
|
||||
}
|
||||
if (unmapped_bytes)
|
||||
rc = 0;
|
||||
|
||||
out_unlock_iova:
|
||||
up_write(&iopt->iova_rwsem);
|
||||
@@ -762,13 +761,8 @@ int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova,
|
||||
|
||||
int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = iopt_unmap_iova_range(iopt, 0, ULONG_MAX, unmapped);
|
||||
/* If the IOVAs are empty then unmap all succeeds */
|
||||
if (rc == -ENOENT)
|
||||
return 0;
|
||||
return rc;
|
||||
return iopt_unmap_iova_range(iopt, 0, ULONG_MAX, unmapped);
|
||||
}
|
||||
|
||||
/* The caller must always free all the nodes in the allowed_iova rb_root. */
|
||||
|
||||
@@ -317,6 +317,10 @@ int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd)
|
||||
&unmapped);
|
||||
if (rc)
|
||||
goto out_put;
|
||||
if (!unmapped) {
|
||||
rc = -ENOENT;
|
||||
goto out_put;
|
||||
}
|
||||
}
|
||||
|
||||
cmd->length = unmapped;
|
||||
|
||||
@@ -166,7 +166,8 @@ static int riscv_intc_domain_alloc(struct irq_domain *domain,
|
||||
static const struct irq_domain_ops riscv_intc_domain_ops = {
|
||||
.map = riscv_intc_domain_map,
|
||||
.xlate = irq_domain_xlate_onecell,
|
||||
.alloc = riscv_intc_domain_alloc
|
||||
.alloc = riscv_intc_domain_alloc,
|
||||
.free = irq_domain_free_irqs_top,
|
||||
};
|
||||
|
||||
static struct fwnode_handle *riscv_intc_hwnode(void)
|
||||
|
||||
@@ -1904,13 +1904,13 @@ out:
|
||||
mISDN_freebchannel(&hw->bch[1]);
|
||||
mISDN_freebchannel(&hw->bch[0]);
|
||||
mISDN_freedchannel(&hw->dch);
|
||||
kfree(hw);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
|
||||
{
|
||||
int err;
|
||||
struct hfcsusb *hw;
|
||||
struct usb_device *dev = interface_to_usbdev(intf);
|
||||
struct usb_host_interface *iface = intf->cur_altsetting;
|
||||
@@ -2101,20 +2101,28 @@ hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
|
||||
if (!hw->ctrl_urb) {
|
||||
pr_warn("%s: No memory for control urb\n",
|
||||
driver_info->vend_name);
|
||||
kfree(hw);
|
||||
return -ENOMEM;
|
||||
err = -ENOMEM;
|
||||
goto err_free_hw;
|
||||
}
|
||||
|
||||
pr_info("%s: %s: detected \"%s\" (%s, if=%d alt=%d)\n",
|
||||
hw->name, __func__, driver_info->vend_name,
|
||||
conf_str[small_match], ifnum, alt_used);
|
||||
|
||||
if (setup_instance(hw, dev->dev.parent))
|
||||
return -EIO;
|
||||
if (setup_instance(hw, dev->dev.parent)) {
|
||||
err = -EIO;
|
||||
goto err_free_urb;
|
||||
}
|
||||
|
||||
hw->intf = intf;
|
||||
usb_set_intfdata(hw->intf, hw);
|
||||
return 0;
|
||||
|
||||
err_free_urb:
|
||||
usb_free_urb(hw->ctrl_urb);
|
||||
err_free_hw:
|
||||
kfree(hw);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* function called when an active device is removed */
|
||||
|
||||
@@ -43,7 +43,7 @@ struct dw_mci_rockchip_priv_data {
|
||||
*/
|
||||
static int rockchip_mmc_get_internal_phase(struct dw_mci *host, bool sample)
|
||||
{
|
||||
unsigned long rate = clk_get_rate(host->ciu_clk);
|
||||
unsigned long rate = clk_get_rate(host->ciu_clk) / RK3288_CLKGEN_DIV;
|
||||
u32 raw_value;
|
||||
u16 degrees;
|
||||
u32 delay_num = 0;
|
||||
@@ -86,7 +86,7 @@ static int rockchip_mmc_get_phase(struct dw_mci *host, bool sample)
|
||||
|
||||
static int rockchip_mmc_set_internal_phase(struct dw_mci *host, bool sample, int degrees)
|
||||
{
|
||||
unsigned long rate = clk_get_rate(host->ciu_clk);
|
||||
unsigned long rate = clk_get_rate(host->ciu_clk) / RK3288_CLKGEN_DIV;
|
||||
u8 nineties, remainder;
|
||||
u8 delay_num;
|
||||
u32 raw_value;
|
||||
|
||||
@@ -94,7 +94,7 @@
|
||||
#define DLL_TXCLK_TAPNUM_DEFAULT 0x10
|
||||
#define DLL_TXCLK_TAPNUM_90_DEGREES 0xA
|
||||
#define DLL_TXCLK_TAPNUM_FROM_SW BIT(24)
|
||||
#define DLL_STRBIN_TAPNUM_DEFAULT 0x8
|
||||
#define DLL_STRBIN_TAPNUM_DEFAULT 0x4
|
||||
#define DLL_STRBIN_TAPNUM_FROM_SW BIT(24)
|
||||
#define DLL_STRBIN_DELAY_NUM_SEL BIT(26)
|
||||
#define DLL_STRBIN_DELAY_NUM_OFFSET 16
|
||||
|
||||
@@ -906,7 +906,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
|
||||
err = devm_request_irq(&pdev->dev, r->start,
|
||||
s5pc110_onenand_irq,
|
||||
IRQF_SHARED, "onenand",
|
||||
&onenand);
|
||||
onenand);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to get irq\n");
|
||||
return err;
|
||||
|
||||
@@ -1921,8 +1921,10 @@ int sja1105_table_delete_entry(struct sja1105_table *table, int i)
|
||||
if (i > table->entry_count)
|
||||
return -ERANGE;
|
||||
|
||||
memmove(entries + i * entry_size, entries + (i + 1) * entry_size,
|
||||
(table->entry_count - i) * entry_size);
|
||||
if (i + 1 < table->entry_count) {
|
||||
memmove(entries + i * entry_size, entries + (i + 1) * entry_size,
|
||||
(table->entry_count - i - 1) * entry_size);
|
||||
}
|
||||
|
||||
table->entry_count--;
|
||||
|
||||
|
||||
@@ -1799,6 +1799,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
|
||||
ndev->stats.rx_packets++;
|
||||
pkt_len = fec16_to_cpu(bdp->cbd_datlen);
|
||||
ndev->stats.rx_bytes += pkt_len;
|
||||
if (fep->quirks & FEC_QUIRK_HAS_RACC)
|
||||
ndev->stats.rx_bytes -= 2;
|
||||
|
||||
index = fec_enet_get_bd_index(bdp, &rxq->bd);
|
||||
page = rxq->rx_skb_info[index].page;
|
||||
|
||||
@@ -595,32 +595,55 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
|
||||
u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
|
||||
__u64 upper_limit_mbps = roundup(255 * MLX5E_100MB, MLX5E_1GB);
|
||||
__u64 upper_limit_mbps;
|
||||
__u64 upper_limit_gbps;
|
||||
int i;
|
||||
struct {
|
||||
int scale;
|
||||
const char *units_str;
|
||||
} units[] = {
|
||||
[MLX5_100_MBPS_UNIT] = {
|
||||
.scale = 100,
|
||||
.units_str = "Mbps",
|
||||
},
|
||||
[MLX5_GBPS_UNIT] = {
|
||||
.scale = 1,
|
||||
.units_str = "Gbps",
|
||||
},
|
||||
};
|
||||
|
||||
memset(max_bw_value, 0, sizeof(max_bw_value));
|
||||
memset(max_bw_unit, 0, sizeof(max_bw_unit));
|
||||
upper_limit_mbps = 255 * MLX5E_100MB;
|
||||
upper_limit_gbps = 255 * MLX5E_1GB;
|
||||
|
||||
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
|
||||
if (!maxrate->tc_maxrate[i]) {
|
||||
max_bw_unit[i] = MLX5_BW_NO_LIMIT;
|
||||
continue;
|
||||
}
|
||||
if (maxrate->tc_maxrate[i] < upper_limit_mbps) {
|
||||
if (maxrate->tc_maxrate[i] <= upper_limit_mbps) {
|
||||
max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
|
||||
MLX5E_100MB);
|
||||
max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
|
||||
max_bw_unit[i] = MLX5_100_MBPS_UNIT;
|
||||
} else {
|
||||
} else if (max_bw_value[i] <= upper_limit_gbps) {
|
||||
max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
|
||||
MLX5E_1GB);
|
||||
max_bw_unit[i] = MLX5_GBPS_UNIT;
|
||||
} else {
|
||||
netdev_err(netdev,
|
||||
"tc_%d maxrate %llu Kbps exceeds limit %llu\n",
|
||||
i, maxrate->tc_maxrate[i],
|
||||
upper_limit_gbps);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
|
||||
netdev_dbg(netdev, "%s: tc_%d <=> max_bw %d Gbps\n",
|
||||
__func__, i, max_bw_value[i]);
|
||||
netdev_dbg(netdev, "%s: tc_%d <=> max_bw %u %s\n", __func__, i,
|
||||
max_bw_value[i] * units[max_bw_unit[i]].scale,
|
||||
units[max_bw_unit[i]].units_str);
|
||||
}
|
||||
|
||||
return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
|
||||
|
||||
@@ -276,9 +276,31 @@ static int am65_cpsw_iet_set_verify_timeout_count(struct am65_cpsw_port *port)
|
||||
/* The number of wireside clocks contained in the verify
|
||||
* timeout counter. The default is 0x1312d0
|
||||
* (10ms at 125Mhz in 1G mode).
|
||||
* The frequency of the clock depends on the link speed
|
||||
* and the PHY interface.
|
||||
*/
|
||||
val = 125 * HZ_PER_MHZ; /* assuming 125MHz wireside clock */
|
||||
switch (port->slave.phy_if) {
|
||||
case PHY_INTERFACE_MODE_RGMII:
|
||||
case PHY_INTERFACE_MODE_RGMII_ID:
|
||||
case PHY_INTERFACE_MODE_RGMII_RXID:
|
||||
case PHY_INTERFACE_MODE_RGMII_TXID:
|
||||
if (port->qos.link_speed == SPEED_1000)
|
||||
val = 125 * HZ_PER_MHZ; /* 125 MHz at 1000Mbps*/
|
||||
else if (port->qos.link_speed == SPEED_100)
|
||||
val = 25 * HZ_PER_MHZ; /* 25 MHz at 100Mbps*/
|
||||
else
|
||||
val = (25 * HZ_PER_MHZ) / 10; /* 2.5 MHz at 10Mbps*/
|
||||
break;
|
||||
|
||||
case PHY_INTERFACE_MODE_QSGMII:
|
||||
case PHY_INTERFACE_MODE_SGMII:
|
||||
val = 125 * HZ_PER_MHZ; /* 125 MHz */
|
||||
break;
|
||||
|
||||
default:
|
||||
netdev_err(port->ndev, "selected mode does not supported IET\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
val /= MILLIHZ_PER_HZ; /* count per ms timeout */
|
||||
val *= verify_time_ms; /* count for timeout ms */
|
||||
|
||||
@@ -295,20 +317,21 @@ static int am65_cpsw_iet_verify_wait(struct am65_cpsw_port *port)
|
||||
u32 ctrl, status;
|
||||
int try;
|
||||
|
||||
try = 20;
|
||||
try = 3;
|
||||
|
||||
/* Reset the verify state machine by writing 1
|
||||
* to LINKFAIL
|
||||
*/
|
||||
ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
|
||||
ctrl |= AM65_CPSW_PN_IET_MAC_LINKFAIL;
|
||||
writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
|
||||
|
||||
/* Clear MAC_LINKFAIL bit to start Verify. */
|
||||
ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
|
||||
ctrl &= ~AM65_CPSW_PN_IET_MAC_LINKFAIL;
|
||||
writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
|
||||
|
||||
do {
|
||||
/* Reset the verify state machine by writing 1
|
||||
* to LINKFAIL
|
||||
*/
|
||||
ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
|
||||
ctrl |= AM65_CPSW_PN_IET_MAC_LINKFAIL;
|
||||
writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
|
||||
|
||||
/* Clear MAC_LINKFAIL bit to start Verify. */
|
||||
ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
|
||||
ctrl &= ~AM65_CPSW_PN_IET_MAC_LINKFAIL;
|
||||
writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
|
||||
|
||||
msleep(port->qos.iet.verify_time_ms);
|
||||
|
||||
status = readl(port->port_base + AM65_CPSW_PN_REG_IET_STATUS);
|
||||
@@ -330,7 +353,7 @@ static int am65_cpsw_iet_verify_wait(struct am65_cpsw_port *port)
|
||||
netdev_dbg(port->ndev, "MAC Merge verify error\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
} while (try-- > 0);
|
||||
} while (--try > 0);
|
||||
|
||||
netdev_dbg(port->ndev, "MAC Merge verify timeout\n");
|
||||
return -ETIMEDOUT;
|
||||
|
||||
@@ -79,8 +79,11 @@ int mdiobus_register_device(struct mdio_device *mdiodev)
|
||||
return err;
|
||||
|
||||
err = mdiobus_register_reset(mdiodev);
|
||||
if (err)
|
||||
if (err) {
|
||||
gpiod_put(mdiodev->reset_gpio);
|
||||
mdiodev->reset_gpio = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Assert the reset signal */
|
||||
mdio_device_reset(mdiodev, 1);
|
||||
|
||||
@@ -2541,6 +2541,52 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* LAN8814_PAGE_AFE_PMA - Selects Extended Page 1.
|
||||
*
|
||||
* This page appears to control the Analog Front-End (AFE) and Physical
|
||||
* Medium Attachment (PMA) layers. It is used to access registers like
|
||||
* LAN8814_PD_CONTROLS and LAN8814_LINK_QUALITY.
|
||||
*/
|
||||
#define LAN8814_PAGE_AFE_PMA 1
|
||||
|
||||
/**
|
||||
* LAN8814_PAGE_PCS_DIGITAL - Selects Extended Page 2.
|
||||
*
|
||||
* This page seems dedicated to the Physical Coding Sublayer (PCS) and other
|
||||
* digital logic. It is used for MDI-X alignment (LAN8814_ALIGN_SWAP) and EEE
|
||||
* state (LAN8814_EEE_STATE) in the LAN8814, and is repurposed for statistics
|
||||
* and self-test counters in the LAN8842.
|
||||
*/
|
||||
#define LAN8814_PAGE_PCS_DIGITAL 2
|
||||
|
||||
/**
|
||||
* LAN8814_PAGE_COMMON_REGS - Selects Extended Page 4.
|
||||
*
|
||||
* This page contains device-common registers that affect the entire chip.
|
||||
* It includes controls for chip-level resets, strap status, GPIO,
|
||||
* QSGMII, the shared 1588 PTP block, and the PVT monitor.
|
||||
*/
|
||||
#define LAN8814_PAGE_COMMON_REGS 4
|
||||
|
||||
/**
|
||||
* LAN8814_PAGE_PORT_REGS - Selects Extended Page 5.
|
||||
*
|
||||
* This page contains port-specific registers that must be accessed
|
||||
* on a per-port basis. It includes controls for port LEDs, QSGMII PCS,
|
||||
* rate adaptation FIFOs, and the per-port 1588 TSU block.
|
||||
*/
|
||||
#define LAN8814_PAGE_PORT_REGS 5
|
||||
|
||||
/**
|
||||
* LAN8814_PAGE_SYSTEM_CTRL - Selects Extended Page 31.
|
||||
*
|
||||
* This page appears to hold fundamental system or global controls. In the
|
||||
* driver, it is used by the related LAN8804 to access the
|
||||
* LAN8814_CLOCK_MANAGEMENT register.
|
||||
*/
|
||||
#define LAN8814_PAGE_SYSTEM_CTRL 31
|
||||
|
||||
#define LAN_EXT_PAGE_ACCESS_CONTROL 0x16
|
||||
#define LAN_EXT_PAGE_ACCESS_ADDRESS_DATA 0x17
|
||||
#define LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC 0x4000
|
||||
@@ -2591,6 +2637,27 @@ static int lanphy_write_page_reg(struct phy_device *phydev, int page, u16 addr,
|
||||
return val;
|
||||
}
|
||||
|
||||
static int lanphy_modify_page_reg(struct phy_device *phydev, int page, u16 addr,
|
||||
u16 mask, u16 set)
|
||||
{
|
||||
int ret;
|
||||
|
||||
phy_lock_mdio_bus(phydev);
|
||||
__phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
|
||||
__phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
|
||||
__phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
|
||||
(page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC));
|
||||
ret = __phy_modify_changed(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA,
|
||||
mask, set);
|
||||
phy_unlock_mdio_bus(phydev);
|
||||
|
||||
if (ret < 0)
|
||||
phydev_err(phydev, "__phy_modify_changed() failed: %pe\n",
|
||||
ERR_PTR(ret));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int lan8814_config_ts_intr(struct phy_device *phydev, bool enable)
|
||||
{
|
||||
u16 val = 0;
|
||||
@@ -2601,35 +2668,46 @@ static int lan8814_config_ts_intr(struct phy_device *phydev, bool enable)
|
||||
PTP_TSU_INT_EN_PTP_RX_TS_EN_ |
|
||||
PTP_TSU_INT_EN_PTP_RX_TS_OVRFL_EN_;
|
||||
|
||||
return lanphy_write_page_reg(phydev, 5, PTP_TSU_INT_EN, val);
|
||||
return lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_TSU_INT_EN, val);
|
||||
}
|
||||
|
||||
static void lan8814_ptp_rx_ts_get(struct phy_device *phydev,
|
||||
u32 *seconds, u32 *nano_seconds, u16 *seq_id)
|
||||
{
|
||||
*seconds = lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_SEC_HI);
|
||||
*seconds = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_RX_INGRESS_SEC_HI);
|
||||
*seconds = (*seconds << 16) |
|
||||
lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_SEC_LO);
|
||||
lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_RX_INGRESS_SEC_LO);
|
||||
|
||||
*nano_seconds = lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_NS_HI);
|
||||
*nano_seconds = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_RX_INGRESS_NS_HI);
|
||||
*nano_seconds = ((*nano_seconds & 0x3fff) << 16) |
|
||||
lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_NS_LO);
|
||||
lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_RX_INGRESS_NS_LO);
|
||||
|
||||
*seq_id = lanphy_read_page_reg(phydev, 5, PTP_RX_MSG_HEADER2);
|
||||
*seq_id = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_RX_MSG_HEADER2);
|
||||
}
|
||||
|
||||
static void lan8814_ptp_tx_ts_get(struct phy_device *phydev,
|
||||
u32 *seconds, u32 *nano_seconds, u16 *seq_id)
|
||||
{
|
||||
*seconds = lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_SEC_HI);
|
||||
*seconds = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_TX_EGRESS_SEC_HI);
|
||||
*seconds = *seconds << 16 |
|
||||
lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_SEC_LO);
|
||||
lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_TX_EGRESS_SEC_LO);
|
||||
|
||||
*nano_seconds = lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_NS_HI);
|
||||
*nano_seconds = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_TX_EGRESS_NS_HI);
|
||||
*nano_seconds = ((*nano_seconds & 0x3fff) << 16) |
|
||||
lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_NS_LO);
|
||||
lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_TX_EGRESS_NS_LO);
|
||||
|
||||
*seq_id = lanphy_read_page_reg(phydev, 5, PTP_TX_MSG_HEADER2);
|
||||
*seq_id = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_TX_MSG_HEADER2);
|
||||
}
|
||||
|
||||
static int lan8814_ts_info(struct mii_timestamper *mii_ts, struct kernel_ethtool_ts_info *info)
|
||||
@@ -2664,11 +2742,11 @@ static void lan8814_flush_fifo(struct phy_device *phydev, bool egress)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < FIFO_SIZE; ++i)
|
||||
lanphy_read_page_reg(phydev, 5,
|
||||
lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
egress ? PTP_TX_MSG_HEADER2 : PTP_RX_MSG_HEADER2);
|
||||
|
||||
/* Read to clear overflow status bit */
|
||||
lanphy_read_page_reg(phydev, 5, PTP_TSU_INT_STS);
|
||||
lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS, PTP_TSU_INT_STS);
|
||||
}
|
||||
|
||||
static int lan8814_hwtstamp(struct mii_timestamper *mii_ts,
|
||||
@@ -2680,7 +2758,6 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts,
|
||||
struct lan8814_ptp_rx_ts *rx_ts, *tmp;
|
||||
int txcfg = 0, rxcfg = 0;
|
||||
int pkt_ts_enable;
|
||||
int tx_mod;
|
||||
|
||||
ptp_priv->hwts_tx_type = config->tx_type;
|
||||
ptp_priv->rx_filter = config->rx_filter;
|
||||
@@ -2719,21 +2796,28 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts,
|
||||
rxcfg |= PTP_RX_PARSE_CONFIG_IPV4_EN_ | PTP_RX_PARSE_CONFIG_IPV6_EN_;
|
||||
txcfg |= PTP_TX_PARSE_CONFIG_IPV4_EN_ | PTP_TX_PARSE_CONFIG_IPV6_EN_;
|
||||
}
|
||||
lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_RX_PARSE_CONFIG, rxcfg);
|
||||
lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_PARSE_CONFIG, txcfg);
|
||||
lanphy_write_page_reg(ptp_priv->phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_RX_PARSE_CONFIG, rxcfg);
|
||||
lanphy_write_page_reg(ptp_priv->phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_TX_PARSE_CONFIG, txcfg);
|
||||
|
||||
pkt_ts_enable = PTP_TIMESTAMP_EN_SYNC_ | PTP_TIMESTAMP_EN_DREQ_ |
|
||||
PTP_TIMESTAMP_EN_PDREQ_ | PTP_TIMESTAMP_EN_PDRES_;
|
||||
lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_RX_TIMESTAMP_EN, pkt_ts_enable);
|
||||
lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_TIMESTAMP_EN, pkt_ts_enable);
|
||||
lanphy_write_page_reg(ptp_priv->phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_RX_TIMESTAMP_EN, pkt_ts_enable);
|
||||
lanphy_write_page_reg(ptp_priv->phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_TX_TIMESTAMP_EN, pkt_ts_enable);
|
||||
|
||||
tx_mod = lanphy_read_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD);
|
||||
if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC) {
|
||||
lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
|
||||
tx_mod | PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
|
||||
lanphy_modify_page_reg(ptp_priv->phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_TX_MOD,
|
||||
PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_,
|
||||
PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
|
||||
} else if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ON) {
|
||||
lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
|
||||
tx_mod & ~PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
|
||||
lanphy_modify_page_reg(ptp_priv->phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_TX_MOD,
|
||||
PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_,
|
||||
0);
|
||||
}
|
||||
|
||||
if (config->rx_filter != HWTSTAMP_FILTER_NONE)
|
||||
@@ -2855,29 +2939,41 @@ static bool lan8814_rxtstamp(struct mii_timestamper *mii_ts, struct sk_buff *skb
|
||||
static void lan8814_ptp_clock_set(struct phy_device *phydev,
|
||||
time64_t sec, u32 nsec)
|
||||
{
|
||||
lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_LO, lower_16_bits(sec));
|
||||
lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_MID, upper_16_bits(sec));
|
||||
lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_HI, upper_32_bits(sec));
|
||||
lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_LO, lower_16_bits(nsec));
|
||||
lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_HI, upper_16_bits(nsec));
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_CLOCK_SET_SEC_LO, lower_16_bits(sec));
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_CLOCK_SET_SEC_MID, upper_16_bits(sec));
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_CLOCK_SET_SEC_HI, upper_32_bits(sec));
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_CLOCK_SET_NS_LO, lower_16_bits(nsec));
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_CLOCK_SET_NS_HI, upper_16_bits(nsec));
|
||||
|
||||
lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_LOAD_);
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_CMD_CTL,
|
||||
PTP_CMD_CTL_PTP_CLOCK_LOAD_);
|
||||
}
|
||||
|
||||
static void lan8814_ptp_clock_get(struct phy_device *phydev,
|
||||
time64_t *sec, u32 *nsec)
|
||||
{
|
||||
lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_READ_);
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_CMD_CTL,
|
||||
PTP_CMD_CTL_PTP_CLOCK_READ_);
|
||||
|
||||
*sec = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_HI);
|
||||
*sec = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_CLOCK_READ_SEC_HI);
|
||||
*sec <<= 16;
|
||||
*sec |= lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_MID);
|
||||
*sec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_CLOCK_READ_SEC_MID);
|
||||
*sec <<= 16;
|
||||
*sec |= lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_LO);
|
||||
*sec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_CLOCK_READ_SEC_LO);
|
||||
|
||||
*nsec = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_HI);
|
||||
*nsec = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_CLOCK_READ_NS_HI);
|
||||
*nsec <<= 16;
|
||||
*nsec |= lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_LO);
|
||||
*nsec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_CLOCK_READ_NS_LO);
|
||||
}
|
||||
|
||||
static int lan8814_ptpci_gettime64(struct ptp_clock_info *ptpci,
|
||||
@@ -2916,14 +3012,18 @@ static void lan8814_ptp_set_target(struct phy_device *phydev, int event,
|
||||
s64 start_sec, u32 start_nsec)
|
||||
{
|
||||
/* Set the start time */
|
||||
lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_SEC_LO(event),
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
LAN8814_PTP_CLOCK_TARGET_SEC_LO(event),
|
||||
lower_16_bits(start_sec));
|
||||
lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_SEC_HI(event),
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
LAN8814_PTP_CLOCK_TARGET_SEC_HI(event),
|
||||
upper_16_bits(start_sec));
|
||||
|
||||
lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_NS_LO(event),
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
LAN8814_PTP_CLOCK_TARGET_NS_LO(event),
|
||||
lower_16_bits(start_nsec));
|
||||
lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_NS_HI(event),
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
LAN8814_PTP_CLOCK_TARGET_NS_HI(event),
|
||||
upper_16_bits(start_nsec) & 0x3fff);
|
||||
}
|
||||
|
||||
@@ -3021,9 +3121,11 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
|
||||
adjustment_value_lo = adjustment_value & 0xffff;
|
||||
adjustment_value_hi = (adjustment_value >> 16) & 0x3fff;
|
||||
|
||||
lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_LO,
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_LTC_STEP_ADJ_LO,
|
||||
adjustment_value_lo);
|
||||
lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI,
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_LTC_STEP_ADJ_HI,
|
||||
PTP_LTC_STEP_ADJ_DIR_ |
|
||||
adjustment_value_hi);
|
||||
seconds -= ((s32)adjustment_value);
|
||||
@@ -3041,9 +3143,11 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
|
||||
adjustment_value_lo = adjustment_value & 0xffff;
|
||||
adjustment_value_hi = (adjustment_value >> 16) & 0x3fff;
|
||||
|
||||
lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_LO,
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_LTC_STEP_ADJ_LO,
|
||||
adjustment_value_lo);
|
||||
lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI,
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_LTC_STEP_ADJ_HI,
|
||||
adjustment_value_hi);
|
||||
seconds += ((s32)adjustment_value);
|
||||
|
||||
@@ -3051,8 +3155,8 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
|
||||
set_seconds += adjustment_value;
|
||||
lan8814_ptp_update_target(phydev, set_seconds);
|
||||
}
|
||||
lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL,
|
||||
PTP_CMD_CTL_PTP_LTC_STEP_SEC_);
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_CMD_CTL, PTP_CMD_CTL_PTP_LTC_STEP_SEC_);
|
||||
}
|
||||
if (nano_seconds) {
|
||||
u16 nano_seconds_lo;
|
||||
@@ -3061,12 +3165,14 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
|
||||
nano_seconds_lo = nano_seconds & 0xffff;
|
||||
nano_seconds_hi = (nano_seconds >> 16) & 0x3fff;
|
||||
|
||||
lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_LO,
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_LTC_STEP_ADJ_LO,
|
||||
nano_seconds_lo);
|
||||
lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI,
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_LTC_STEP_ADJ_HI,
|
||||
PTP_LTC_STEP_ADJ_DIR_ |
|
||||
nano_seconds_hi);
|
||||
lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL,
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_CMD_CTL,
|
||||
PTP_CMD_CTL_PTP_LTC_STEP_NSEC_);
|
||||
}
|
||||
}
|
||||
@@ -3108,8 +3214,10 @@ static int lan8814_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm)
|
||||
kszphy_rate_adj_hi |= PTP_CLOCK_RATE_ADJ_DIR_;
|
||||
|
||||
mutex_lock(&shared->shared_lock);
|
||||
lanphy_write_page_reg(phydev, 4, PTP_CLOCK_RATE_ADJ_HI, kszphy_rate_adj_hi);
|
||||
lanphy_write_page_reg(phydev, 4, PTP_CLOCK_RATE_ADJ_LO, kszphy_rate_adj_lo);
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_CLOCK_RATE_ADJ_HI,
|
||||
kszphy_rate_adj_hi);
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_CLOCK_RATE_ADJ_LO,
|
||||
kszphy_rate_adj_lo);
|
||||
mutex_unlock(&shared->shared_lock);
|
||||
|
||||
return 0;
|
||||
@@ -3118,17 +3226,17 @@ static int lan8814_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm)
|
||||
static void lan8814_ptp_set_reload(struct phy_device *phydev, int event,
|
||||
s64 period_sec, u32 period_nsec)
|
||||
{
|
||||
lanphy_write_page_reg(phydev, 4,
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
LAN8814_PTP_CLOCK_TARGET_RELOAD_SEC_LO(event),
|
||||
lower_16_bits(period_sec));
|
||||
lanphy_write_page_reg(phydev, 4,
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
LAN8814_PTP_CLOCK_TARGET_RELOAD_SEC_HI(event),
|
||||
upper_16_bits(period_sec));
|
||||
|
||||
lanphy_write_page_reg(phydev, 4,
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
LAN8814_PTP_CLOCK_TARGET_RELOAD_NS_LO(event),
|
||||
lower_16_bits(period_nsec));
|
||||
lanphy_write_page_reg(phydev, 4,
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
LAN8814_PTP_CLOCK_TARGET_RELOAD_NS_HI(event),
|
||||
upper_16_bits(period_nsec) & 0x3fff);
|
||||
}
|
||||
@@ -3136,73 +3244,72 @@ static void lan8814_ptp_set_reload(struct phy_device *phydev, int event,
|
||||
static void lan8814_ptp_enable_event(struct phy_device *phydev, int event,
|
||||
int pulse_width)
|
||||
{
|
||||
u16 val;
|
||||
|
||||
val = lanphy_read_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG);
|
||||
/* Set the pulse width of the event */
|
||||
val &= ~(LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_MASK(event));
|
||||
/* Make sure that the target clock will be incremented each time when
|
||||
/* Set the pulse width of the event,
|
||||
* Make sure that the target clock will be incremented each time when
|
||||
* local time reaches or pass it
|
||||
* Set the polarity high
|
||||
*/
|
||||
val |= LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_SET(event, pulse_width);
|
||||
val &= ~(LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event));
|
||||
/* Set the polarity high */
|
||||
val |= LAN8814_PTP_GENERAL_CONFIG_POLARITY_X(event);
|
||||
lanphy_write_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG, val);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, LAN8814_PTP_GENERAL_CONFIG,
|
||||
LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_MASK(event) |
|
||||
LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_SET(event, pulse_width) |
|
||||
LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event) |
|
||||
LAN8814_PTP_GENERAL_CONFIG_POLARITY_X(event),
|
||||
LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_SET(event, pulse_width) |
|
||||
LAN8814_PTP_GENERAL_CONFIG_POLARITY_X(event));
|
||||
}
|
||||
|
||||
static void lan8814_ptp_disable_event(struct phy_device *phydev, int event)
|
||||
{
|
||||
u16 val;
|
||||
|
||||
/* Set target to too far in the future, effectively disabling it */
|
||||
lan8814_ptp_set_target(phydev, event, 0xFFFFFFFF, 0);
|
||||
|
||||
/* And then reload once it recheas the target */
|
||||
val = lanphy_read_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG);
|
||||
val |= LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event);
|
||||
lanphy_write_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG, val);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, LAN8814_PTP_GENERAL_CONFIG,
|
||||
LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event),
|
||||
LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event));
|
||||
}
|
||||
|
||||
static void lan8814_ptp_perout_off(struct phy_device *phydev, int pin)
|
||||
{
|
||||
u16 val;
|
||||
|
||||
/* Disable gpio alternate function,
|
||||
* 1: select as gpio,
|
||||
* 0: select alt func
|
||||
*/
|
||||
val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin));
|
||||
val |= LAN8814_GPIO_EN_BIT(pin);
|
||||
lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin), val);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
LAN8814_GPIO_EN_ADDR(pin),
|
||||
LAN8814_GPIO_EN_BIT(pin),
|
||||
LAN8814_GPIO_EN_BIT(pin));
|
||||
|
||||
val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
|
||||
val &= ~LAN8814_GPIO_DIR_BIT(pin);
|
||||
lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), val);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
LAN8814_GPIO_DIR_ADDR(pin),
|
||||
LAN8814_GPIO_DIR_BIT(pin),
|
||||
0);
|
||||
|
||||
val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin));
|
||||
val &= ~LAN8814_GPIO_BUF_BIT(pin);
|
||||
lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin), val);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
LAN8814_GPIO_BUF_ADDR(pin),
|
||||
LAN8814_GPIO_BUF_BIT(pin),
|
||||
0);
|
||||
}
|
||||
|
||||
static void lan8814_ptp_perout_on(struct phy_device *phydev, int pin)
|
||||
{
|
||||
int val;
|
||||
|
||||
/* Set as gpio output */
|
||||
val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
|
||||
val |= LAN8814_GPIO_DIR_BIT(pin);
|
||||
lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), val);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
LAN8814_GPIO_DIR_ADDR(pin),
|
||||
LAN8814_GPIO_DIR_BIT(pin),
|
||||
LAN8814_GPIO_DIR_BIT(pin));
|
||||
|
||||
/* Enable gpio 0:for alternate function, 1:gpio */
|
||||
val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin));
|
||||
val &= ~LAN8814_GPIO_EN_BIT(pin);
|
||||
lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin), val);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
LAN8814_GPIO_EN_ADDR(pin),
|
||||
LAN8814_GPIO_EN_BIT(pin),
|
||||
0);
|
||||
|
||||
/* Set buffer type to push pull */
|
||||
val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin));
|
||||
val |= LAN8814_GPIO_BUF_BIT(pin);
|
||||
lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin), val);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
LAN8814_GPIO_BUF_ADDR(pin),
|
||||
LAN8814_GPIO_BUF_BIT(pin),
|
||||
LAN8814_GPIO_BUF_BIT(pin));
|
||||
}
|
||||
|
||||
static int lan8814_ptp_perout(struct ptp_clock_info *ptpci,
|
||||
@@ -3321,61 +3428,64 @@ static int lan8814_ptp_perout(struct ptp_clock_info *ptpci,
|
||||
|
||||
static void lan8814_ptp_extts_on(struct phy_device *phydev, int pin, u32 flags)
|
||||
{
|
||||
u16 tmp;
|
||||
|
||||
/* Set as gpio input */
|
||||
tmp = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
|
||||
tmp &= ~LAN8814_GPIO_DIR_BIT(pin);
|
||||
lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), tmp);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
LAN8814_GPIO_DIR_ADDR(pin),
|
||||
LAN8814_GPIO_DIR_BIT(pin),
|
||||
0);
|
||||
|
||||
/* Map the pin to ltc pin 0 of the capture map registers */
|
||||
tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO);
|
||||
tmp |= pin;
|
||||
lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO, tmp);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_GPIO_CAP_MAP_LO, pin, pin);
|
||||
|
||||
/* Enable capture on the edges of the ltc pin */
|
||||
tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_EN);
|
||||
if (flags & PTP_RISING_EDGE)
|
||||
tmp |= PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(0);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_GPIO_CAP_EN,
|
||||
PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(0),
|
||||
PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(0));
|
||||
if (flags & PTP_FALLING_EDGE)
|
||||
tmp |= PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(0);
|
||||
lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_EN, tmp);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_GPIO_CAP_EN,
|
||||
PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(0),
|
||||
PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(0));
|
||||
|
||||
/* Enable interrupt top interrupt */
|
||||
tmp = lanphy_read_page_reg(phydev, 4, PTP_COMMON_INT_ENA);
|
||||
tmp |= PTP_COMMON_INT_ENA_GPIO_CAP_EN;
|
||||
lanphy_write_page_reg(phydev, 4, PTP_COMMON_INT_ENA, tmp);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_COMMON_INT_ENA,
|
||||
PTP_COMMON_INT_ENA_GPIO_CAP_EN,
|
||||
PTP_COMMON_INT_ENA_GPIO_CAP_EN);
|
||||
}
|
||||
|
||||
static void lan8814_ptp_extts_off(struct phy_device *phydev, int pin)
|
||||
{
|
||||
u16 tmp;
|
||||
|
||||
/* Set as gpio out */
|
||||
tmp = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
|
||||
tmp |= LAN8814_GPIO_DIR_BIT(pin);
|
||||
lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), tmp);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
LAN8814_GPIO_DIR_ADDR(pin),
|
||||
LAN8814_GPIO_DIR_BIT(pin),
|
||||
LAN8814_GPIO_DIR_BIT(pin));
|
||||
|
||||
/* Enable alternate, 0:for alternate function, 1:gpio */
|
||||
tmp = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin));
|
||||
tmp &= ~LAN8814_GPIO_EN_BIT(pin);
|
||||
lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin), tmp);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
LAN8814_GPIO_EN_ADDR(pin),
|
||||
LAN8814_GPIO_EN_BIT(pin),
|
||||
0);
|
||||
|
||||
/* Clear the mapping of pin to registers 0 of the capture registers */
|
||||
tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO);
|
||||
tmp &= ~GENMASK(3, 0);
|
||||
lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO, tmp);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_GPIO_CAP_MAP_LO,
|
||||
GENMASK(3, 0),
|
||||
0);
|
||||
|
||||
/* Disable capture on both of the edges */
|
||||
tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_EN);
|
||||
tmp &= ~PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(pin);
|
||||
tmp &= ~PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(pin);
|
||||
lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_EN, tmp);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_GPIO_CAP_EN,
|
||||
PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(pin) |
|
||||
PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(pin),
|
||||
0);
|
||||
|
||||
/* Disable interrupt top interrupt */
|
||||
tmp = lanphy_read_page_reg(phydev, 4, PTP_COMMON_INT_ENA);
|
||||
tmp &= ~PTP_COMMON_INT_ENA_GPIO_CAP_EN;
|
||||
lanphy_write_page_reg(phydev, 4, PTP_COMMON_INT_ENA, tmp);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_COMMON_INT_ENA,
|
||||
PTP_COMMON_INT_ENA_GPIO_CAP_EN,
|
||||
0);
|
||||
}
|
||||
|
||||
static int lan8814_ptp_extts(struct ptp_clock_info *ptpci,
|
||||
@@ -3510,7 +3620,8 @@ static void lan8814_get_tx_ts(struct kszphy_ptp_priv *ptp_priv)
|
||||
/* If other timestamps are available in the FIFO,
|
||||
* process them.
|
||||
*/
|
||||
reg = lanphy_read_page_reg(phydev, 5, PTP_CAP_INFO);
|
||||
reg = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_CAP_INFO);
|
||||
} while (PTP_CAP_INFO_TX_TS_CNT_GET_(reg) > 0);
|
||||
}
|
||||
|
||||
@@ -3583,7 +3694,8 @@ static void lan8814_get_rx_ts(struct kszphy_ptp_priv *ptp_priv)
|
||||
/* If other timestamps are available in the FIFO,
|
||||
* process them.
|
||||
*/
|
||||
reg = lanphy_read_page_reg(phydev, 5, PTP_CAP_INFO);
|
||||
reg = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_CAP_INFO);
|
||||
} while (PTP_CAP_INFO_RX_TS_CNT_GET_(reg) > 0);
|
||||
}
|
||||
|
||||
@@ -3620,31 +3732,40 @@ static int lan8814_gpio_process_cap(struct lan8814_shared_priv *shared)
|
||||
/* This is 0 because whatever was the input pin it was mapped it to
|
||||
* ltc gpio pin 0
|
||||
*/
|
||||
tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_SEL);
|
||||
tmp |= PTP_GPIO_SEL_GPIO_SEL(0);
|
||||
lanphy_write_page_reg(phydev, 4, PTP_GPIO_SEL, tmp);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_GPIO_SEL,
|
||||
PTP_GPIO_SEL_GPIO_SEL(0),
|
||||
PTP_GPIO_SEL_GPIO_SEL(0));
|
||||
|
||||
tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_STS);
|
||||
tmp = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_GPIO_CAP_STS);
|
||||
if (!(tmp & PTP_GPIO_CAP_STS_PTP_GPIO_RE_STS(0)) &&
|
||||
!(tmp & PTP_GPIO_CAP_STS_PTP_GPIO_FE_STS(0)))
|
||||
return -1;
|
||||
|
||||
if (tmp & BIT(0)) {
|
||||
sec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_SEC_HI_CAP);
|
||||
sec = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_GPIO_RE_LTC_SEC_HI_CAP);
|
||||
sec <<= 16;
|
||||
sec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_SEC_LO_CAP);
|
||||
sec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_GPIO_RE_LTC_SEC_LO_CAP);
|
||||
|
||||
nsec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_NS_HI_CAP) & 0x3fff;
|
||||
nsec = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_GPIO_RE_LTC_NS_HI_CAP) & 0x3fff;
|
||||
nsec <<= 16;
|
||||
nsec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_NS_LO_CAP);
|
||||
nsec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_GPIO_RE_LTC_NS_LO_CAP);
|
||||
} else {
|
||||
sec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_FE_LTC_SEC_HI_CAP);
|
||||
sec = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_GPIO_FE_LTC_SEC_HI_CAP);
|
||||
sec <<= 16;
|
||||
sec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_FE_LTC_SEC_LO_CAP);
|
||||
sec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_GPIO_FE_LTC_SEC_LO_CAP);
|
||||
|
||||
nsec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_FE_LTC_NS_HI_CAP) & 0x3fff;
|
||||
nsec = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_GPIO_FE_LTC_NS_HI_CAP) & 0x3fff;
|
||||
nsec <<= 16;
|
||||
nsec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_NS_LO_CAP);
|
||||
nsec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
PTP_GPIO_RE_LTC_NS_LO_CAP);
|
||||
}
|
||||
|
||||
ptp_event.index = 0;
|
||||
@@ -3669,19 +3790,17 @@ static int lan8814_handle_gpio_interrupt(struct phy_device *phydev, u16 status)
|
||||
|
||||
static int lan8804_config_init(struct phy_device *phydev)
|
||||
{
|
||||
int val;
|
||||
|
||||
/* MDI-X setting for swap A,B transmit */
|
||||
val = lanphy_read_page_reg(phydev, 2, LAN8804_ALIGN_SWAP);
|
||||
val &= ~LAN8804_ALIGN_TX_A_B_SWAP_MASK;
|
||||
val |= LAN8804_ALIGN_TX_A_B_SWAP;
|
||||
lanphy_write_page_reg(phydev, 2, LAN8804_ALIGN_SWAP, val);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_PCS_DIGITAL, LAN8804_ALIGN_SWAP,
|
||||
LAN8804_ALIGN_TX_A_B_SWAP_MASK,
|
||||
LAN8804_ALIGN_TX_A_B_SWAP);
|
||||
|
||||
/* Make sure that the PHY will not stop generating the clock when the
|
||||
* link partner goes down
|
||||
*/
|
||||
lanphy_write_page_reg(phydev, 31, LAN8814_CLOCK_MANAGEMENT, 0x27e);
|
||||
lanphy_read_page_reg(phydev, 1, LAN8814_LINK_QUALITY);
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_SYSTEM_CTRL,
|
||||
LAN8814_CLOCK_MANAGEMENT, 0x27e);
|
||||
lanphy_read_page_reg(phydev, LAN8814_PAGE_AFE_PMA, LAN8814_LINK_QUALITY);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -3763,7 +3882,8 @@ static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
|
||||
}
|
||||
|
||||
while (true) {
|
||||
irq_status = lanphy_read_page_reg(phydev, 5, PTP_TSU_INT_STS);
|
||||
irq_status = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_TSU_INT_STS);
|
||||
if (!irq_status)
|
||||
break;
|
||||
|
||||
@@ -3791,7 +3911,7 @@ static int lan8814_config_intr(struct phy_device *phydev)
|
||||
{
|
||||
int err;
|
||||
|
||||
lanphy_write_page_reg(phydev, 4, LAN8814_INTR_CTRL_REG,
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, LAN8814_INTR_CTRL_REG,
|
||||
LAN8814_INTR_CTRL_REG_POLARITY |
|
||||
LAN8814_INTR_CTRL_REG_INTR_ENABLE);
|
||||
|
||||
@@ -3817,35 +3937,41 @@ static void lan8814_ptp_init(struct phy_device *phydev)
|
||||
{
|
||||
struct kszphy_priv *priv = phydev->priv;
|
||||
struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv;
|
||||
u32 temp;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) ||
|
||||
!IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING))
|
||||
return;
|
||||
|
||||
lanphy_write_page_reg(phydev, 5, TSU_HARD_RESET, TSU_HARD_RESET_);
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
TSU_HARD_RESET, TSU_HARD_RESET_);
|
||||
|
||||
temp = lanphy_read_page_reg(phydev, 5, PTP_TX_MOD);
|
||||
temp |= PTP_TX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_;
|
||||
lanphy_write_page_reg(phydev, 5, PTP_TX_MOD, temp);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_PORT_REGS, PTP_TX_MOD,
|
||||
PTP_TX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_,
|
||||
PTP_TX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_);
|
||||
|
||||
temp = lanphy_read_page_reg(phydev, 5, PTP_RX_MOD);
|
||||
temp |= PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_;
|
||||
lanphy_write_page_reg(phydev, 5, PTP_RX_MOD, temp);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_PORT_REGS, PTP_RX_MOD,
|
||||
PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_,
|
||||
PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_);
|
||||
|
||||
lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_CONFIG, 0);
|
||||
lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_CONFIG, 0);
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_RX_PARSE_CONFIG, 0);
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_TX_PARSE_CONFIG, 0);
|
||||
|
||||
/* Removing default registers configs related to L2 and IP */
|
||||
lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_L2_ADDR_EN, 0);
|
||||
lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_L2_ADDR_EN, 0);
|
||||
lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_IP_ADDR_EN, 0);
|
||||
lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_IP_ADDR_EN, 0);
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_TX_PARSE_L2_ADDR_EN, 0);
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_RX_PARSE_L2_ADDR_EN, 0);
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_TX_PARSE_IP_ADDR_EN, 0);
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
PTP_RX_PARSE_IP_ADDR_EN, 0);
|
||||
|
||||
/* Disable checking for minorVersionPTP field */
|
||||
lanphy_write_page_reg(phydev, 5, PTP_RX_VERSION,
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS, PTP_RX_VERSION,
|
||||
PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
|
||||
lanphy_write_page_reg(phydev, 5, PTP_TX_VERSION,
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS, PTP_TX_VERSION,
|
||||
PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
|
||||
|
||||
skb_queue_head_init(&ptp_priv->tx_queue);
|
||||
@@ -3926,12 +4052,14 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
|
||||
/* The EP.4 is shared between all the PHYs in the package and also it
|
||||
* can be accessed by any of the PHYs
|
||||
*/
|
||||
lanphy_write_page_reg(phydev, 4, LTC_HARD_RESET, LTC_HARD_RESET_);
|
||||
lanphy_write_page_reg(phydev, 4, PTP_OPERATING_MODE,
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
LTC_HARD_RESET, LTC_HARD_RESET_);
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_OPERATING_MODE,
|
||||
PTP_OPERATING_MODE_STANDALONE_);
|
||||
|
||||
/* Enable ptp to run LTC clock for ptp and gpio 1PPS operation */
|
||||
lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_ENABLE_);
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_CMD_CTL,
|
||||
PTP_CMD_CTL_PTP_ENABLE_);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -3940,36 +4068,32 @@ static void lan8814_setup_led(struct phy_device *phydev, int val)
|
||||
{
|
||||
int temp;
|
||||
|
||||
temp = lanphy_read_page_reg(phydev, 5, LAN8814_LED_CTRL_1);
|
||||
temp = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
LAN8814_LED_CTRL_1);
|
||||
|
||||
if (val)
|
||||
temp |= LAN8814_LED_CTRL_1_KSZ9031_LED_MODE_;
|
||||
else
|
||||
temp &= ~LAN8814_LED_CTRL_1_KSZ9031_LED_MODE_;
|
||||
|
||||
lanphy_write_page_reg(phydev, 5, LAN8814_LED_CTRL_1, temp);
|
||||
lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
LAN8814_LED_CTRL_1, temp);
|
||||
}
|
||||
|
||||
static int lan8814_config_init(struct phy_device *phydev)
|
||||
{
|
||||
struct kszphy_priv *lan8814 = phydev->priv;
|
||||
int val;
|
||||
|
||||
/* Reset the PHY */
|
||||
val = lanphy_read_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET);
|
||||
val |= LAN8814_QSGMII_SOFT_RESET_BIT;
|
||||
lanphy_write_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET, val);
|
||||
|
||||
/* Disable ANEG with QSGMII PCS Host side */
|
||||
val = lanphy_read_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG);
|
||||
val &= ~LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA;
|
||||
lanphy_write_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG, val);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
|
||||
LAN8814_QSGMII_PCS1G_ANEG_CONFIG,
|
||||
LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA,
|
||||
0);
|
||||
|
||||
/* MDI-X setting for swap A,B transmit */
|
||||
val = lanphy_read_page_reg(phydev, 2, LAN8814_ALIGN_SWAP);
|
||||
val &= ~LAN8814_ALIGN_TX_A_B_SWAP_MASK;
|
||||
val |= LAN8814_ALIGN_TX_A_B_SWAP;
|
||||
lanphy_write_page_reg(phydev, 2, LAN8814_ALIGN_SWAP, val);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_PCS_DIGITAL, LAN8814_ALIGN_SWAP,
|
||||
LAN8814_ALIGN_TX_A_B_SWAP_MASK,
|
||||
LAN8814_ALIGN_TX_A_B_SWAP);
|
||||
|
||||
if (lan8814->led_mode >= 0)
|
||||
lan8814_setup_led(phydev, lan8814->led_mode);
|
||||
@@ -4000,29 +4124,24 @@ static int lan8814_release_coma_mode(struct phy_device *phydev)
|
||||
|
||||
static void lan8814_clear_2psp_bit(struct phy_device *phydev)
|
||||
{
|
||||
u16 val;
|
||||
|
||||
/* It was noticed that when traffic is passing through the PHY and the
|
||||
* cable is removed then the LED was still one even though there is no
|
||||
* link
|
||||
*/
|
||||
val = lanphy_read_page_reg(phydev, 2, LAN8814_EEE_STATE);
|
||||
val &= ~LAN8814_EEE_STATE_MASK2P5P;
|
||||
lanphy_write_page_reg(phydev, 2, LAN8814_EEE_STATE, val);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_PCS_DIGITAL, LAN8814_EEE_STATE,
|
||||
LAN8814_EEE_STATE_MASK2P5P,
|
||||
0);
|
||||
}
|
||||
|
||||
static void lan8814_update_meas_time(struct phy_device *phydev)
|
||||
{
|
||||
u16 val;
|
||||
|
||||
/* By setting the measure time to a value of 0xb this will allow cables
|
||||
* longer than 100m to be used. This configuration can be used
|
||||
* regardless of the mode of operation of the PHY
|
||||
*/
|
||||
val = lanphy_read_page_reg(phydev, 1, LAN8814_PD_CONTROLS);
|
||||
val &= ~LAN8814_PD_CONTROLS_PD_MEAS_TIME_MASK;
|
||||
val |= LAN8814_PD_CONTROLS_PD_MEAS_TIME_VAL;
|
||||
lanphy_write_page_reg(phydev, 1, LAN8814_PD_CONTROLS, val);
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_AFE_PMA, LAN8814_PD_CONTROLS,
|
||||
LAN8814_PD_CONTROLS_PD_MEAS_TIME_MASK,
|
||||
LAN8814_PD_CONTROLS_PD_MEAS_TIME_VAL);
|
||||
}
|
||||
|
||||
static int lan8814_probe(struct phy_device *phydev)
|
||||
@@ -4045,11 +4164,17 @@ static int lan8814_probe(struct phy_device *phydev)
|
||||
/* Strap-in value for PHY address, below register read gives starting
|
||||
* phy address value
|
||||
*/
|
||||
addr = lanphy_read_page_reg(phydev, 4, 0) & 0x1F;
|
||||
addr = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, 0) & 0x1F;
|
||||
devm_phy_package_join(&phydev->mdio.dev, phydev,
|
||||
addr, sizeof(struct lan8814_shared_priv));
|
||||
|
||||
if (phy_package_init_once(phydev)) {
|
||||
/* Reset the PHY */
|
||||
lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
|
||||
LAN8814_QSGMII_SOFT_RESET,
|
||||
LAN8814_QSGMII_SOFT_RESET_BIT,
|
||||
LAN8814_QSGMII_SOFT_RESET_BIT);
|
||||
|
||||
err = lan8814_release_coma_mode(phydev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -2455,22 +2455,28 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
|
||||
return;
|
||||
}
|
||||
|
||||
/* 1. Save the flags early, as the XDP program might overwrite them.
|
||||
/* About the flags below:
|
||||
* 1. Save the flags early, as the XDP program might overwrite them.
|
||||
* These flags ensure packets marked as VIRTIO_NET_HDR_F_DATA_VALID
|
||||
* stay valid after XDP processing.
|
||||
* 2. XDP doesn't work with partially checksummed packets (refer to
|
||||
* virtnet_xdp_set()), so packets marked as
|
||||
* VIRTIO_NET_HDR_F_NEEDS_CSUM get dropped during XDP processing.
|
||||
*/
|
||||
flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
|
||||
|
||||
if (vi->mergeable_rx_bufs)
|
||||
if (vi->mergeable_rx_bufs) {
|
||||
flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
|
||||
skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
|
||||
stats);
|
||||
else if (vi->big_packets)
|
||||
} else if (vi->big_packets) {
|
||||
void *p = page_address((struct page *)buf);
|
||||
|
||||
flags = ((struct virtio_net_common_hdr *)p)->hdr.flags;
|
||||
skb = receive_big(dev, vi, rq, buf, len, stats);
|
||||
else
|
||||
} else {
|
||||
flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
|
||||
skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
|
||||
}
|
||||
|
||||
if (unlikely(!skb))
|
||||
return;
|
||||
|
||||
@@ -936,6 +936,8 @@ unsupported_wcn6855_soc:
|
||||
return 0;
|
||||
|
||||
err_free_irq:
|
||||
/* __free_irq() expects the caller to have cleared the affinity hint */
|
||||
ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
|
||||
ath11k_pcic_free_irq(ab);
|
||||
|
||||
err_ce_free:
|
||||
|
||||
@@ -5961,6 +5961,9 @@ static int wmi_process_mgmt_tx_comp(struct ath11k *ar,
|
||||
dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
|
||||
|
||||
info = IEEE80211_SKB_CB(msdu);
|
||||
memset(&info->status, 0, sizeof(info->status));
|
||||
info->status.rates[0].idx = -1;
|
||||
|
||||
if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) &&
|
||||
!tx_compl_param->status) {
|
||||
info->flags |= IEEE80211_TX_STAT_ACK;
|
||||
|
||||
@@ -54,7 +54,7 @@ static int scmi_pd_power_off(struct generic_pm_domain *domain)
|
||||
|
||||
static int scmi_pm_domain_probe(struct scmi_device *sdev)
|
||||
{
|
||||
int num_domains, i;
|
||||
int num_domains, i, ret;
|
||||
struct device *dev = &sdev->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
struct scmi_pm_domain *scmi_pd;
|
||||
@@ -113,9 +113,18 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
|
||||
scmi_pd_data->domains = domains;
|
||||
scmi_pd_data->num_domains = num_domains;
|
||||
|
||||
ret = of_genpd_add_provider_onecell(np, scmi_pd_data);
|
||||
if (ret)
|
||||
goto err_rm_genpds;
|
||||
|
||||
dev_set_drvdata(dev, scmi_pd_data);
|
||||
|
||||
return of_genpd_add_provider_onecell(np, scmi_pd_data);
|
||||
return 0;
|
||||
err_rm_genpds:
|
||||
for (i = num_domains - 1; i >= 0; i--)
|
||||
pm_genpd_remove(domains[i]);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void scmi_pm_domain_remove(struct scmi_device *sdev)
|
||||
|
||||
@@ -537,6 +537,8 @@ static void imx_gpc_remove(struct platform_device *pdev)
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
of_node_put(pgc_node);
|
||||
}
|
||||
|
||||
static struct platform_driver imx_gpc_driver = {
|
||||
|
||||
@@ -92,13 +92,14 @@ static const struct of_device_id exynos_pm_domain_of_match[] = {
|
||||
{ },
|
||||
};
|
||||
|
||||
static const char *exynos_get_domain_name(struct device_node *node)
|
||||
static const char *exynos_get_domain_name(struct device *dev,
|
||||
struct device_node *node)
|
||||
{
|
||||
const char *name;
|
||||
|
||||
if (of_property_read_string(node, "label", &name) < 0)
|
||||
name = kbasename(node->full_name);
|
||||
return kstrdup_const(name, GFP_KERNEL);
|
||||
return devm_kstrdup_const(dev, name, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static int exynos_pd_probe(struct platform_device *pdev)
|
||||
@@ -115,15 +116,13 @@ static int exynos_pd_probe(struct platform_device *pdev)
|
||||
if (!pd)
|
||||
return -ENOMEM;
|
||||
|
||||
pd->pd.name = exynos_get_domain_name(np);
|
||||
pd->pd.name = exynos_get_domain_name(dev, np);
|
||||
if (!pd->pd.name)
|
||||
return -ENOMEM;
|
||||
|
||||
pd->base = of_iomap(np, 0);
|
||||
if (!pd->base) {
|
||||
kfree_const(pd->pd.name);
|
||||
if (!pd->base)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
pd->pd.power_off = exynos_pd_power_off;
|
||||
pd->pd.power_on = exynos_pd_power_on;
|
||||
|
||||
@@ -334,6 +334,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
|
||||
ret = dev_err_probe(&pdev->dev, PTR_ERR(drvdata->dev),
|
||||
"Failed to register regulator: %ld\n",
|
||||
PTR_ERR(drvdata->dev));
|
||||
gpiod_put(cfg.ena_gpiod);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -2879,6 +2879,16 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
|
||||
acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
|
||||
sizeof(spi->modalias));
|
||||
|
||||
/*
|
||||
* This gets re-tried in spi_probe() for -EPROBE_DEFER handling in case
|
||||
* the GPIO controller does not have a driver yet. This needs to be done
|
||||
* here too, because this call sets the GPIO direction and/or bias.
|
||||
* Setting these needs to be done even if there is no driver, in which
|
||||
* case spi_probe() will never get called.
|
||||
*/
|
||||
if (spi->irq < 0)
|
||||
spi->irq = acpi_dev_gpio_irq_get(adev, 0);
|
||||
|
||||
acpi_device_set_enumerated(adev);
|
||||
|
||||
adev->power.flags.ignore_parent = true;
|
||||
|
||||
@@ -65,6 +65,16 @@ struct hv_uio_private_data {
|
||||
char send_name[32];
|
||||
};
|
||||
|
||||
static void set_event(struct vmbus_channel *channel, s32 irq_state)
|
||||
{
|
||||
channel->inbound.ring_buffer->interrupt_mask = !irq_state;
|
||||
if (!channel->offermsg.monitor_allocated && irq_state) {
|
||||
/* MB is needed for host to see the interrupt mask first */
|
||||
virt_mb();
|
||||
vmbus_set_event(channel);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the irqcontrol callback to be registered to uio_info.
|
||||
* It can be used to disable/enable interrupt from user space processes.
|
||||
@@ -79,12 +89,15 @@ hv_uio_irqcontrol(struct uio_info *info, s32 irq_state)
|
||||
{
|
||||
struct hv_uio_private_data *pdata = info->priv;
|
||||
struct hv_device *dev = pdata->device;
|
||||
struct vmbus_channel *primary, *sc;
|
||||
|
||||
dev->channel->inbound.ring_buffer->interrupt_mask = !irq_state;
|
||||
virt_mb();
|
||||
primary = dev->channel;
|
||||
set_event(primary, irq_state);
|
||||
|
||||
if (!dev->channel->offermsg.monitor_allocated && irq_state)
|
||||
vmbus_setevent(dev->channel);
|
||||
mutex_lock(&vmbus_connection.channel_mutex);
|
||||
list_for_each_entry(sc, &primary->sc_list, sc_list)
|
||||
set_event(sc, irq_state);
|
||||
mutex_unlock(&vmbus_connection.channel_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -95,11 +108,18 @@ hv_uio_irqcontrol(struct uio_info *info, s32 irq_state)
|
||||
static void hv_uio_channel_cb(void *context)
|
||||
{
|
||||
struct vmbus_channel *chan = context;
|
||||
struct hv_device *hv_dev = chan->device_obj;
|
||||
struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
|
||||
struct hv_device *hv_dev;
|
||||
struct hv_uio_private_data *pdata;
|
||||
|
||||
virt_mb();
|
||||
|
||||
/*
|
||||
* The callback may come from a subchannel, in which case look
|
||||
* for the hv device in the primary channel
|
||||
*/
|
||||
hv_dev = chan->primary_channel ?
|
||||
chan->primary_channel->device_obj : chan->device_obj;
|
||||
pdata = hv_get_drvdata(hv_dev);
|
||||
uio_event_notify(&pdata->info);
|
||||
}
|
||||
|
||||
|
||||
@@ -174,8 +174,10 @@ static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
|
||||
return ret;
|
||||
}
|
||||
ret = paths_from_inode(inum, ipath);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
btrfs_put_root(local_root);
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* We deliberately ignore the bit ipath might have been too small to
|
||||
|
||||
@@ -2091,6 +2091,7 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
|
||||
ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start,
|
||||
&length, &bioc, NULL, NULL);
|
||||
if (ret < 0) {
|
||||
bio_put(bio);
|
||||
btrfs_put_bioc(bioc);
|
||||
btrfs_bio_counter_dec(fs_info);
|
||||
goto out;
|
||||
@@ -2100,6 +2101,7 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
|
||||
btrfs_put_bioc(bioc);
|
||||
if (!rbio) {
|
||||
ret = -ENOMEM;
|
||||
bio_put(bio);
|
||||
btrfs_bio_counter_dec(fs_info);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -6795,7 +6795,7 @@ log_extents:
|
||||
* a power failure unless the log was synced as part of an fsync
|
||||
* against any other unrelated inode.
|
||||
*/
|
||||
if (inode_only != LOG_INODE_EXISTS)
|
||||
if (!ctx->logging_new_name && inode_only != LOG_INODE_EXISTS)
|
||||
inode->last_log_commit = inode->last_sub_trans;
|
||||
spin_unlock(&inode->lock);
|
||||
|
||||
|
||||
@@ -1300,6 +1300,7 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
|
||||
if (!btrfs_dev_is_sequential(device, info->physical)) {
|
||||
up_read(&dev_replace->rwsem);
|
||||
info->alloc_offset = WP_CONVENTIONAL;
|
||||
info->capacity = device->zone_info->zone_size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1598,8 +1599,6 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
|
||||
set_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
|
||||
|
||||
if (num_conventional > 0) {
|
||||
/* Zone capacity is always zone size in emulation */
|
||||
cache->zone_capacity = cache->length;
|
||||
ret = calculate_alloc_pointer(cache, &last_alloc, new);
|
||||
if (ret) {
|
||||
btrfs_err(fs_info,
|
||||
@@ -1608,6 +1607,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
|
||||
goto out;
|
||||
} else if (map->num_stripes == num_conventional) {
|
||||
cache->alloc_offset = last_alloc;
|
||||
cache->zone_capacity = cache->length;
|
||||
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -178,7 +178,6 @@ static int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
|
||||
dctx.bounce = strm->bounce;
|
||||
|
||||
do {
|
||||
dctx.avail_out = out_buf.size - out_buf.pos;
|
||||
dctx.inbuf_sz = in_buf.size;
|
||||
dctx.inbuf_pos = in_buf.pos;
|
||||
err = z_erofs_stream_switch_bufs(&dctx, &out_buf.dst,
|
||||
@@ -194,14 +193,18 @@ static int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
|
||||
in_buf.pos = dctx.inbuf_pos;
|
||||
|
||||
zerr = zstd_decompress_stream(stream, &out_buf, &in_buf);
|
||||
if (zstd_is_error(zerr) || (!zerr && rq->outputsize)) {
|
||||
dctx.avail_out = out_buf.size - out_buf.pos;
|
||||
if (zstd_is_error(zerr) ||
|
||||
((rq->outputsize + dctx.avail_out) && (!zerr || (zerr > 0 &&
|
||||
!(rq->inputsize + in_buf.size - in_buf.pos))))) {
|
||||
erofs_err(sb, "failed to decompress in[%u] out[%u]: %s",
|
||||
rq->inputsize, rq->outputsize,
|
||||
zerr ? zstd_get_error_name(zerr) : "unexpected end of stream");
|
||||
zstd_is_error(zerr) ? zstd_get_error_name(zerr) :
|
||||
"unexpected end of stream");
|
||||
err = -EFSCORRUPTED;
|
||||
break;
|
||||
}
|
||||
} while (rq->outputsize || out_buf.pos < out_buf.size);
|
||||
} while (rq->outputsize + dctx.avail_out);
|
||||
|
||||
if (dctx.kout)
|
||||
kunmap_local(dctx.kout);
|
||||
|
||||
@@ -635,10 +635,14 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
|
||||
|
||||
info->type = exfat_get_entry_type(ep);
|
||||
info->attr = le16_to_cpu(ep->dentry.file.attr);
|
||||
info->size = le64_to_cpu(ep2->dentry.stream.valid_size);
|
||||
info->valid_size = le64_to_cpu(ep2->dentry.stream.valid_size);
|
||||
info->size = le64_to_cpu(ep2->dentry.stream.size);
|
||||
|
||||
if (info->valid_size < 0) {
|
||||
exfat_fs_error(sb, "data valid size is invalid(%lld)", info->valid_size);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (unlikely(EXFAT_B_TO_CLU_ROUND_UP(info->size, sbi) > sbi->used_clusters)) {
|
||||
exfat_fs_error(sb, "data size is invalid(%lld)", info->size);
|
||||
return -EIO;
|
||||
|
||||
@@ -4688,6 +4688,11 @@ static inline int ext4_iget_extra_inode(struct inode *inode,
|
||||
*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
|
||||
int err;
|
||||
|
||||
err = xattr_check_inode(inode, IHDR(inode, raw_inode),
|
||||
ITAIL(inode, raw_inode));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ext4_set_inode_state(inode, EXT4_STATE_XATTR);
|
||||
err = ext4_find_inline_data_nolock(inode);
|
||||
if (!err && ext4_has_inline_data(inode))
|
||||
|
||||
@@ -312,7 +312,7 @@ __ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh,
|
||||
__ext4_xattr_check_block((inode), (bh), __func__, __LINE__)
|
||||
|
||||
|
||||
static inline int
|
||||
int
|
||||
__xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
|
||||
void *end, const char *function, unsigned int line)
|
||||
{
|
||||
@@ -320,9 +320,6 @@ __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
|
||||
function, line);
|
||||
}
|
||||
|
||||
#define xattr_check_inode(inode, header, end) \
|
||||
__xattr_check_inode((inode), (header), (end), __func__, __LINE__)
|
||||
|
||||
static int
|
||||
xattr_find_entry(struct inode *inode, struct ext4_xattr_entry **pentry,
|
||||
void *end, int name_index, const char *name, int sorted)
|
||||
@@ -653,10 +650,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
|
||||
return error;
|
||||
raw_inode = ext4_raw_inode(&iloc);
|
||||
header = IHDR(inode, raw_inode);
|
||||
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
|
||||
error = xattr_check_inode(inode, header, end);
|
||||
if (error)
|
||||
goto cleanup;
|
||||
end = ITAIL(inode, raw_inode);
|
||||
entry = IFIRST(header);
|
||||
error = xattr_find_entry(inode, &entry, end, name_index, name, 0);
|
||||
if (error)
|
||||
@@ -787,7 +781,6 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
|
||||
struct ext4_xattr_ibody_header *header;
|
||||
struct ext4_inode *raw_inode;
|
||||
struct ext4_iloc iloc;
|
||||
void *end;
|
||||
int error;
|
||||
|
||||
if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
|
||||
@@ -797,14 +790,9 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
|
||||
return error;
|
||||
raw_inode = ext4_raw_inode(&iloc);
|
||||
header = IHDR(inode, raw_inode);
|
||||
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
|
||||
error = xattr_check_inode(inode, header, end);
|
||||
if (error)
|
||||
goto cleanup;
|
||||
error = ext4_xattr_list_entries(dentry, IFIRST(header),
|
||||
buffer, buffer_size);
|
||||
|
||||
cleanup:
|
||||
brelse(iloc.bh);
|
||||
return error;
|
||||
}
|
||||
@@ -872,7 +860,6 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
|
||||
struct ext4_xattr_ibody_header *header;
|
||||
struct ext4_xattr_entry *entry;
|
||||
qsize_t ea_inode_refs = 0;
|
||||
void *end;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held_read(&EXT4_I(inode)->xattr_sem);
|
||||
@@ -883,10 +870,6 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
|
||||
goto out;
|
||||
raw_inode = ext4_raw_inode(&iloc);
|
||||
header = IHDR(inode, raw_inode);
|
||||
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
|
||||
ret = xattr_check_inode(inode, header, end);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
for (entry = IFIRST(header); !IS_LAST_ENTRY(entry);
|
||||
entry = EXT4_XATTR_NEXT(entry))
|
||||
@@ -2249,11 +2232,8 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
|
||||
header = IHDR(inode, raw_inode);
|
||||
is->s.base = is->s.first = IFIRST(header);
|
||||
is->s.here = is->s.first;
|
||||
is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
|
||||
is->s.end = ITAIL(inode, raw_inode);
|
||||
if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
|
||||
error = xattr_check_inode(inode, header, is->s.end);
|
||||
if (error)
|
||||
return error;
|
||||
/* Find the named attribute. */
|
||||
error = xattr_find_entry(inode, &is->s.here, is->s.end,
|
||||
i->name_index, i->name, 0);
|
||||
@@ -2800,14 +2780,10 @@ retry:
|
||||
*/
|
||||
|
||||
base = IFIRST(header);
|
||||
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
|
||||
end = ITAIL(inode, raw_inode);
|
||||
min_offs = end - base;
|
||||
total_ino = sizeof(struct ext4_xattr_ibody_header) + sizeof(u32);
|
||||
|
||||
error = xattr_check_inode(inode, header, end);
|
||||
if (error)
|
||||
goto cleanup;
|
||||
|
||||
ifree = ext4_xattr_free_space(base, &min_offs, base, &total_ino);
|
||||
if (ifree >= isize_diff)
|
||||
goto shift;
|
||||
|
||||
@@ -67,6 +67,9 @@ struct ext4_xattr_entry {
|
||||
((void *)raw_inode + \
|
||||
EXT4_GOOD_OLD_INODE_SIZE + \
|
||||
EXT4_I(inode)->i_extra_isize))
|
||||
#define ITAIL(inode, raw_inode) \
|
||||
((void *)(raw_inode) + \
|
||||
EXT4_SB((inode)->i_sb)->s_inode_size)
|
||||
#define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
|
||||
|
||||
/*
|
||||
@@ -206,6 +209,13 @@ extern int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
|
||||
extern struct mb_cache *ext4_xattr_create_cache(void);
|
||||
extern void ext4_xattr_destroy_cache(struct mb_cache *);
|
||||
|
||||
extern int
|
||||
__xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
|
||||
void *end, const char *function, unsigned int line);
|
||||
|
||||
#define xattr_check_inode(inode, header, end) \
|
||||
__xattr_check_inode((inode), (header), (end), __func__, __LINE__)
|
||||
|
||||
#ifdef CONFIG_EXT4_FS_SECURITY
|
||||
extern int ext4_init_security(handle_t *handle, struct inode *inode,
|
||||
struct inode *dir, const struct qstr *qstr);
|
||||
|
||||
@@ -1236,7 +1236,7 @@ int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
|
||||
int i;
|
||||
|
||||
for (i = cluster_size - 1; i >= 0; i--) {
|
||||
loff_t start = rpages[i]->index << PAGE_SHIFT;
|
||||
loff_t start = (loff_t)rpages[i]->index << PAGE_SHIFT;
|
||||
|
||||
if (from <= start) {
|
||||
zero_user_segment(rpages[i], 0, PAGE_SIZE);
|
||||
|
||||
@@ -372,7 +372,7 @@ static int virtio_fs_add_queues_sysfs(struct virtio_fs *fs)
|
||||
|
||||
sprintf(buff, "%d", i);
|
||||
fsvq->kobj = kobject_create_and_add(buff, fs->mqs_kobj);
|
||||
if (!fs->mqs_kobj) {
|
||||
if (!fsvq->kobj) {
|
||||
ret = -ENOMEM;
|
||||
goto out_del;
|
||||
}
|
||||
|
||||
@@ -972,7 +972,7 @@ static int hostfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
|
||||
{
|
||||
struct hostfs_fs_info *fsi = fc->s_fs_info;
|
||||
struct fs_parse_result result;
|
||||
char *host_root;
|
||||
char *host_root, *tmp_root;
|
||||
int opt;
|
||||
|
||||
opt = fs_parse(fc, hostfs_param_specs, param, &result);
|
||||
@@ -983,11 +983,13 @@ static int hostfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
|
||||
case Opt_hostfs:
|
||||
host_root = param->string;
|
||||
if (!*host_root)
|
||||
host_root = "";
|
||||
fsi->host_root_path =
|
||||
kasprintf(GFP_KERNEL, "%s/%s", root_ino, host_root);
|
||||
if (fsi->host_root_path == NULL)
|
||||
break;
|
||||
tmp_root = kasprintf(GFP_KERNEL, "%s%s",
|
||||
fsi->host_root_path, host_root);
|
||||
if (!tmp_root)
|
||||
return -ENOMEM;
|
||||
kfree(fsi->host_root_path);
|
||||
fsi->host_root_path = tmp_root;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -997,17 +999,17 @@ static int hostfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
|
||||
static int hostfs_parse_monolithic(struct fs_context *fc, void *data)
|
||||
{
|
||||
struct hostfs_fs_info *fsi = fc->s_fs_info;
|
||||
char *host_root = (char *)data;
|
||||
char *tmp_root, *host_root = (char *)data;
|
||||
|
||||
/* NULL is printed as '(null)' by printf(): avoid that. */
|
||||
if (host_root == NULL)
|
||||
host_root = "";
|
||||
return 0;
|
||||
|
||||
fsi->host_root_path =
|
||||
kasprintf(GFP_KERNEL, "%s/%s", root_ino, host_root);
|
||||
if (fsi->host_root_path == NULL)
|
||||
tmp_root = kasprintf(GFP_KERNEL, "%s%s", fsi->host_root_path, host_root);
|
||||
if (!tmp_root)
|
||||
return -ENOMEM;
|
||||
|
||||
kfree(fsi->host_root_path);
|
||||
fsi->host_root_path = tmp_root;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1042,6 +1044,11 @@ static int hostfs_init_fs_context(struct fs_context *fc)
|
||||
if (!fsi)
|
||||
return -ENOMEM;
|
||||
|
||||
fsi->host_root_path = kasprintf(GFP_KERNEL, "%s/", root_ino);
|
||||
if (!fsi->host_root_path) {
|
||||
kfree(fsi);
|
||||
return -ENOMEM;
|
||||
}
|
||||
fc->s_fs_info = fsi;
|
||||
fc->ops = &hostfs_context_ops;
|
||||
return 0;
|
||||
|
||||
@@ -158,7 +158,8 @@ static void mnt_ns_release(struct mnt_namespace *ns)
|
||||
kfree(ns);
|
||||
}
|
||||
}
|
||||
DEFINE_FREE(mnt_ns_release, struct mnt_namespace *, if (_T) mnt_ns_release(_T))
|
||||
DEFINE_FREE(mnt_ns_release, struct mnt_namespace *,
|
||||
if (!IS_ERR(_T)) mnt_ns_release(_T))
|
||||
|
||||
static void mnt_ns_tree_remove(struct mnt_namespace *ns)
|
||||
{
|
||||
@@ -5325,7 +5326,7 @@ static int copy_mnt_id_req(const struct mnt_id_req __user *req,
|
||||
ret = copy_struct_from_user(kreq, sizeof(*kreq), req, usize);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (kreq->spare != 0)
|
||||
if (kreq->mnt_ns_fd != 0 && kreq->mnt_ns_id)
|
||||
return -EINVAL;
|
||||
/* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */
|
||||
if (kreq->mnt_id <= MNT_UNIQUE_ID_OFFSET)
|
||||
@@ -5342,16 +5343,12 @@ static struct mnt_namespace *grab_requested_mnt_ns(const struct mnt_id_req *kreq
|
||||
{
|
||||
struct mnt_namespace *mnt_ns;
|
||||
|
||||
if (kreq->mnt_ns_id && kreq->spare)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (kreq->mnt_ns_id)
|
||||
return lookup_mnt_ns(kreq->mnt_ns_id);
|
||||
|
||||
if (kreq->spare) {
|
||||
if (kreq->mnt_ns_id) {
|
||||
mnt_ns = lookup_mnt_ns(kreq->mnt_ns_id);
|
||||
} else if (kreq->mnt_ns_fd) {
|
||||
struct ns_common *ns;
|
||||
|
||||
CLASS(fd, f)(kreq->spare);
|
||||
CLASS(fd, f)(kreq->mnt_ns_fd);
|
||||
if (fd_empty(f))
|
||||
return ERR_PTR(-EBADF);
|
||||
|
||||
@@ -5366,6 +5363,8 @@ static struct mnt_namespace *grab_requested_mnt_ns(const struct mnt_id_req *kreq
|
||||
} else {
|
||||
mnt_ns = current->nsproxy->mnt_ns;
|
||||
}
|
||||
if (!mnt_ns)
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
refcount_inc(&mnt_ns->passive);
|
||||
return mnt_ns;
|
||||
@@ -5390,8 +5389,8 @@ SYSCALL_DEFINE4(statmount, const struct mnt_id_req __user *, req,
|
||||
return ret;
|
||||
|
||||
ns = grab_requested_mnt_ns(&kreq);
|
||||
if (!ns)
|
||||
return -ENOENT;
|
||||
if (IS_ERR(ns))
|
||||
return PTR_ERR(ns);
|
||||
|
||||
if (kreq.mnt_ns_id && (ns != current->nsproxy->mnt_ns) &&
|
||||
!ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
|
||||
@@ -5500,8 +5499,8 @@ static void __free_klistmount_free(const struct klistmount *kls)
|
||||
static inline int prepare_klistmount(struct klistmount *kls, struct mnt_id_req *kreq,
|
||||
size_t nr_mnt_ids)
|
||||
{
|
||||
|
||||
u64 last_mnt_id = kreq->param;
|
||||
struct mnt_namespace *ns;
|
||||
|
||||
/* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */
|
||||
if (last_mnt_id != 0 && last_mnt_id <= MNT_UNIQUE_ID_OFFSET)
|
||||
@@ -5515,9 +5514,10 @@ static inline int prepare_klistmount(struct klistmount *kls, struct mnt_id_req *
|
||||
if (!kls->kmnt_ids)
|
||||
return -ENOMEM;
|
||||
|
||||
kls->ns = grab_requested_mnt_ns(kreq);
|
||||
if (!kls->ns)
|
||||
return -ENOENT;
|
||||
ns = grab_requested_mnt_ns(kreq);
|
||||
if (IS_ERR(ns))
|
||||
return PTR_ERR(ns);
|
||||
kls->ns = ns;
|
||||
|
||||
kls->mnt_parent_id = kreq->mnt_id;
|
||||
return 0;
|
||||
|
||||
23
fs/nfs/dir.c
23
fs/nfs/dir.c
@@ -2270,7 +2270,7 @@ int nfs_atomic_open_v23(struct inode *dir, struct dentry *dentry,
|
||||
struct file *file, unsigned int open_flags,
|
||||
umode_t mode)
|
||||
{
|
||||
|
||||
struct dentry *res = NULL;
|
||||
/* Same as look+open from lookup_open(), but with different O_TRUNC
|
||||
* handling.
|
||||
*/
|
||||
@@ -2280,26 +2280,21 @@ int nfs_atomic_open_v23(struct inode *dir, struct dentry *dentry,
|
||||
return -ENAMETOOLONG;
|
||||
|
||||
if (open_flags & O_CREAT) {
|
||||
file->f_mode |= FMODE_CREATED;
|
||||
error = nfs_do_create(dir, dentry, mode, open_flags);
|
||||
if (error)
|
||||
if (!error) {
|
||||
file->f_mode |= FMODE_CREATED;
|
||||
return finish_open(file, dentry, NULL);
|
||||
} else if (error != -EEXIST || open_flags & O_EXCL)
|
||||
return error;
|
||||
return finish_open(file, dentry, NULL);
|
||||
} else if (d_in_lookup(dentry)) {
|
||||
}
|
||||
if (d_in_lookup(dentry)) {
|
||||
/* The only flags nfs_lookup considers are
|
||||
* LOOKUP_EXCL and LOOKUP_RENAME_TARGET, and
|
||||
* we want those to be zero so the lookup isn't skipped.
|
||||
*/
|
||||
struct dentry *res = nfs_lookup(dir, dentry, 0);
|
||||
|
||||
d_lookup_done(dentry);
|
||||
if (unlikely(res)) {
|
||||
if (IS_ERR(res))
|
||||
return PTR_ERR(res);
|
||||
return finish_no_open(file, res);
|
||||
}
|
||||
res = nfs_lookup(dir, dentry, 0);
|
||||
}
|
||||
return finish_no_open(file, NULL);
|
||||
return finish_no_open(file, res);
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_atomic_open_v23);
|
||||
|
||||
@@ -711,6 +711,8 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
|
||||
struct inode *inode = d_inode(dentry);
|
||||
struct nfs_fattr *fattr;
|
||||
int error = 0;
|
||||
kuid_t task_uid = current_fsuid();
|
||||
kuid_t owner_uid = inode->i_uid;
|
||||
|
||||
nfs_inc_stats(inode, NFSIOS_VFSSETATTR);
|
||||
|
||||
@@ -732,9 +734,11 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
|
||||
if (nfs_have_delegated_mtime(inode) && attr->ia_valid & ATTR_MTIME) {
|
||||
spin_lock(&inode->i_lock);
|
||||
if (attr->ia_valid & ATTR_MTIME_SET) {
|
||||
nfs_set_timestamps_to_ts(inode, attr);
|
||||
attr->ia_valid &= ~(ATTR_MTIME|ATTR_MTIME_SET|
|
||||
if (uid_eq(task_uid, owner_uid)) {
|
||||
nfs_set_timestamps_to_ts(inode, attr);
|
||||
attr->ia_valid &= ~(ATTR_MTIME|ATTR_MTIME_SET|
|
||||
ATTR_ATIME|ATTR_ATIME_SET);
|
||||
}
|
||||
} else {
|
||||
nfs_update_timestamps(inode, attr->ia_valid);
|
||||
attr->ia_valid &= ~(ATTR_MTIME|ATTR_ATIME);
|
||||
@@ -744,10 +748,12 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
|
||||
attr->ia_valid & ATTR_ATIME &&
|
||||
!(attr->ia_valid & ATTR_MTIME)) {
|
||||
if (attr->ia_valid & ATTR_ATIME_SET) {
|
||||
spin_lock(&inode->i_lock);
|
||||
nfs_set_timestamps_to_ts(inode, attr);
|
||||
spin_unlock(&inode->i_lock);
|
||||
attr->ia_valid &= ~(ATTR_ATIME|ATTR_ATIME_SET);
|
||||
if (uid_eq(task_uid, owner_uid)) {
|
||||
spin_lock(&inode->i_lock);
|
||||
nfs_set_timestamps_to_ts(inode, attr);
|
||||
spin_unlock(&inode->i_lock);
|
||||
attr->ia_valid &= ~(ATTR_ATIME|ATTR_ATIME_SET);
|
||||
}
|
||||
} else {
|
||||
nfs_update_delegated_atime(inode);
|
||||
attr->ia_valid &= ~ATTR_ATIME;
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#include <linux/nfs_fs.h>
|
||||
#include <linux/nfs_mount.h>
|
||||
#include <linux/sunrpc/addr.h>
|
||||
#include <net/handshake.h>
|
||||
#include "internal.h"
|
||||
#include "nfs3_fs.h"
|
||||
#include "netns.h"
|
||||
@@ -98,7 +99,11 @@ struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
|
||||
.net = mds_clp->cl_net,
|
||||
.timeparms = &ds_timeout,
|
||||
.cred = mds_srv->cred,
|
||||
.xprtsec = mds_clp->cl_xprtsec,
|
||||
.xprtsec = {
|
||||
.policy = RPC_XPRTSEC_NONE,
|
||||
.cert_serial = TLS_NO_CERT,
|
||||
.privkey_serial = TLS_NO_PRIVKEY,
|
||||
},
|
||||
.connect_timeout = connect_timeout,
|
||||
.reconnect_timeout = connect_timeout,
|
||||
};
|
||||
@@ -111,9 +116,14 @@ struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
|
||||
cl_init.hostname = buf;
|
||||
|
||||
switch (ds_proto) {
|
||||
case XPRT_TRANSPORT_TCP_TLS:
|
||||
if (mds_clp->cl_xprtsec.policy != RPC_XPRTSEC_NONE)
|
||||
cl_init.xprtsec = mds_clp->cl_xprtsec;
|
||||
else
|
||||
ds_proto = XPRT_TRANSPORT_TCP;
|
||||
fallthrough;
|
||||
case XPRT_TRANSPORT_RDMA:
|
||||
case XPRT_TRANSPORT_TCP:
|
||||
case XPRT_TRANSPORT_TCP_TLS:
|
||||
if (mds_clp->cl_nconnect > 1)
|
||||
cl_init.nconnect = mds_clp->cl_nconnect;
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
#include <linux/sunrpc/xprt.h>
|
||||
#include <linux/sunrpc/bc_xprt.h>
|
||||
#include <linux/sunrpc/rpc_pipe_fs.h>
|
||||
#include <net/handshake.h>
|
||||
#include "internal.h"
|
||||
#include "callback.h"
|
||||
#include "delegation.h"
|
||||
@@ -222,6 +223,7 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
|
||||
clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
|
||||
clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
|
||||
clp->cl_mig_gen = 1;
|
||||
clp->cl_last_renewal = jiffies;
|
||||
#if IS_ENABLED(CONFIG_NFS_V4_1)
|
||||
init_waitqueue_head(&clp->cl_lock_waitq);
|
||||
#endif
|
||||
@@ -991,7 +993,11 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
|
||||
.net = mds_clp->cl_net,
|
||||
.timeparms = &ds_timeout,
|
||||
.cred = mds_srv->cred,
|
||||
.xprtsec = mds_srv->nfs_client->cl_xprtsec,
|
||||
.xprtsec = {
|
||||
.policy = RPC_XPRTSEC_NONE,
|
||||
.cert_serial = TLS_NO_CERT,
|
||||
.privkey_serial = TLS_NO_PRIVKEY,
|
||||
},
|
||||
};
|
||||
char buf[INET6_ADDRSTRLEN + 1];
|
||||
|
||||
@@ -1000,9 +1006,14 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
|
||||
cl_init.hostname = buf;
|
||||
|
||||
switch (ds_proto) {
|
||||
case XPRT_TRANSPORT_TCP_TLS:
|
||||
if (mds_srv->nfs_client->cl_xprtsec.policy != RPC_XPRTSEC_NONE)
|
||||
cl_init.xprtsec = mds_srv->nfs_client->cl_xprtsec;
|
||||
else
|
||||
ds_proto = XPRT_TRANSPORT_TCP;
|
||||
fallthrough;
|
||||
case XPRT_TRANSPORT_RDMA:
|
||||
case XPRT_TRANSPORT_TCP:
|
||||
case XPRT_TRANSPORT_TCP_TLS:
|
||||
if (mds_clp->cl_nconnect > 1) {
|
||||
cl_init.nconnect = mds_clp->cl_nconnect;
|
||||
cl_init.max_connect = NFS_MAX_TRANSPORTS;
|
||||
|
||||
@@ -3612,6 +3612,7 @@ struct nfs4_closedata {
|
||||
} lr;
|
||||
struct nfs_fattr fattr;
|
||||
unsigned long timestamp;
|
||||
unsigned short retrans;
|
||||
};
|
||||
|
||||
static void nfs4_free_closedata(void *data)
|
||||
@@ -3640,6 +3641,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
|
||||
.state = state,
|
||||
.inode = calldata->inode,
|
||||
.stateid = &calldata->arg.stateid,
|
||||
.retrans = calldata->retrans,
|
||||
};
|
||||
|
||||
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
|
||||
@@ -3687,6 +3689,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
|
||||
default:
|
||||
task->tk_status = nfs4_async_handle_exception(task,
|
||||
server, task->tk_status, &exception);
|
||||
calldata->retrans = exception.retrans;
|
||||
if (exception.retry)
|
||||
goto out_restart;
|
||||
}
|
||||
@@ -4692,16 +4695,19 @@ static int _nfs4_proc_lookupp(struct inode *inode,
|
||||
};
|
||||
unsigned short task_flags = 0;
|
||||
|
||||
if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
|
||||
if (server->flags & NFS_MOUNT_SOFTREVAL)
|
||||
task_flags |= RPC_TASK_TIMEOUT;
|
||||
if (server->caps & NFS_CAP_MOVEABLE)
|
||||
task_flags |= RPC_TASK_MOVEABLE;
|
||||
|
||||
args.bitmask = nfs4_bitmask(server, fattr->label);
|
||||
|
||||
nfs_fattr_init(fattr);
|
||||
nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
|
||||
|
||||
dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino);
|
||||
status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
|
||||
&res.seq_res, task_flags);
|
||||
status = nfs4_do_call_sync(clnt, server, &msg, &args.seq_args,
|
||||
&res.seq_res, task_flags);
|
||||
dprintk("NFS reply lookupp: %d\n", status);
|
||||
return status;
|
||||
}
|
||||
@@ -5546,9 +5552,11 @@ static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
|
||||
.inode = hdr->inode,
|
||||
.state = hdr->args.context->state,
|
||||
.stateid = &hdr->args.stateid,
|
||||
.retrans = hdr->retrans,
|
||||
};
|
||||
task->tk_status = nfs4_async_handle_exception(task,
|
||||
server, task->tk_status, &exception);
|
||||
hdr->retrans = exception.retrans;
|
||||
if (exception.retry) {
|
||||
rpc_restart_call_prepare(task);
|
||||
return -EAGAIN;
|
||||
@@ -5662,10 +5670,12 @@ static int nfs4_write_done_cb(struct rpc_task *task,
|
||||
.inode = hdr->inode,
|
||||
.state = hdr->args.context->state,
|
||||
.stateid = &hdr->args.stateid,
|
||||
.retrans = hdr->retrans,
|
||||
};
|
||||
task->tk_status = nfs4_async_handle_exception(task,
|
||||
NFS_SERVER(inode), task->tk_status,
|
||||
&exception);
|
||||
hdr->retrans = exception.retrans;
|
||||
if (exception.retry) {
|
||||
rpc_restart_call_prepare(task);
|
||||
return -EAGAIN;
|
||||
@@ -6677,6 +6687,7 @@ struct nfs4_delegreturndata {
|
||||
struct nfs_fh fh;
|
||||
nfs4_stateid stateid;
|
||||
unsigned long timestamp;
|
||||
unsigned short retrans;
|
||||
struct {
|
||||
struct nfs4_layoutreturn_args arg;
|
||||
struct nfs4_layoutreturn_res res;
|
||||
@@ -6697,6 +6708,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
|
||||
.inode = data->inode,
|
||||
.stateid = &data->stateid,
|
||||
.task_is_privileged = data->args.seq_args.sa_privileged,
|
||||
.retrans = data->retrans,
|
||||
};
|
||||
|
||||
if (!nfs4_sequence_done(task, &data->res.seq_res))
|
||||
@@ -6768,6 +6780,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
|
||||
task->tk_status = nfs4_async_handle_exception(task,
|
||||
data->res.server, task->tk_status,
|
||||
&exception);
|
||||
data->retrans = exception.retrans;
|
||||
if (exception.retry)
|
||||
goto out_restart;
|
||||
}
|
||||
@@ -7044,6 +7057,7 @@ struct nfs4_unlockdata {
|
||||
struct file_lock fl;
|
||||
struct nfs_server *server;
|
||||
unsigned long timestamp;
|
||||
unsigned short retrans;
|
||||
};
|
||||
|
||||
static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
|
||||
@@ -7098,6 +7112,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
|
||||
struct nfs4_exception exception = {
|
||||
.inode = calldata->lsp->ls_state->inode,
|
||||
.stateid = &calldata->arg.stateid,
|
||||
.retrans = calldata->retrans,
|
||||
};
|
||||
|
||||
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
|
||||
@@ -7131,6 +7146,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
|
||||
task->tk_status = nfs4_async_handle_exception(task,
|
||||
calldata->server, task->tk_status,
|
||||
&exception);
|
||||
calldata->retrans = exception.retrans;
|
||||
if (exception.retry)
|
||||
rpc_restart_call_prepare(task);
|
||||
}
|
||||
|
||||
@@ -867,7 +867,10 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
|
||||
u32 minor_version)
|
||||
{
|
||||
struct nfs_client *clp = ERR_PTR(-EIO);
|
||||
struct nfs_client *mds_clp = mds_srv->nfs_client;
|
||||
enum xprtsec_policies xprtsec_policy = mds_clp->cl_xprtsec.policy;
|
||||
struct nfs4_pnfs_ds_addr *da;
|
||||
int ds_proto;
|
||||
int status = 0;
|
||||
|
||||
dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
|
||||
@@ -895,12 +898,8 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
|
||||
.data = &xprtdata,
|
||||
};
|
||||
|
||||
if (da->da_transport != clp->cl_proto &&
|
||||
clp->cl_proto != XPRT_TRANSPORT_TCP_TLS)
|
||||
continue;
|
||||
if (da->da_transport == XPRT_TRANSPORT_TCP &&
|
||||
mds_srv->nfs_client->cl_proto ==
|
||||
XPRT_TRANSPORT_TCP_TLS) {
|
||||
if (xprt_args.ident == XPRT_TRANSPORT_TCP &&
|
||||
clp->cl_proto == XPRT_TRANSPORT_TCP_TLS) {
|
||||
struct sockaddr *addr =
|
||||
(struct sockaddr *)&da->da_addr;
|
||||
struct sockaddr_in *sin =
|
||||
@@ -931,7 +930,10 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
|
||||
xprt_args.ident = XPRT_TRANSPORT_TCP_TLS;
|
||||
xprt_args.servername = servername;
|
||||
}
|
||||
if (da->da_addr.ss_family != clp->cl_addr.ss_family)
|
||||
if (xprt_args.ident != clp->cl_proto)
|
||||
continue;
|
||||
if (xprt_args.dstaddr->sa_family !=
|
||||
clp->cl_addr.ss_family)
|
||||
continue;
|
||||
|
||||
/**
|
||||
@@ -945,15 +947,14 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
|
||||
if (xprtdata.cred)
|
||||
put_cred(xprtdata.cred);
|
||||
} else {
|
||||
if (da->da_transport == XPRT_TRANSPORT_TCP &&
|
||||
mds_srv->nfs_client->cl_proto ==
|
||||
XPRT_TRANSPORT_TCP_TLS)
|
||||
da->da_transport = XPRT_TRANSPORT_TCP_TLS;
|
||||
clp = nfs4_set_ds_client(mds_srv,
|
||||
&da->da_addr,
|
||||
da->da_addrlen,
|
||||
da->da_transport, timeo,
|
||||
retrans, minor_version);
|
||||
ds_proto = da->da_transport;
|
||||
if (ds_proto == XPRT_TRANSPORT_TCP &&
|
||||
xprtsec_policy != RPC_XPRTSEC_NONE)
|
||||
ds_proto = XPRT_TRANSPORT_TCP_TLS;
|
||||
|
||||
clp = nfs4_set_ds_client(mds_srv, &da->da_addr,
|
||||
da->da_addrlen, ds_proto,
|
||||
timeo, retrans, minor_version);
|
||||
if (IS_ERR(clp))
|
||||
continue;
|
||||
|
||||
@@ -964,7 +965,6 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
|
||||
clp = ERR_PTR(-EIO);
|
||||
continue;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -189,6 +189,7 @@ static struct nfs_netns_client *nfs_netns_client_alloc(struct kobject *parent,
|
||||
return p;
|
||||
|
||||
kobject_put(&p->kobject);
|
||||
kobject_put(&p->nfs_net_kobj);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -1577,7 +1577,8 @@ static int nfs_writeback_done(struct rpc_task *task,
|
||||
/* Deal with the suid/sgid bit corner case */
|
||||
if (nfs_should_remove_suid(inode)) {
|
||||
spin_lock(&inode->i_lock);
|
||||
nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE);
|
||||
nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE
|
||||
| NFS_INO_REVAL_FORCED);
|
||||
spin_unlock(&inode->i_lock);
|
||||
}
|
||||
return 0;
|
||||
|
||||
@@ -1528,7 +1528,8 @@ static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
|
||||
release_all_access(stp);
|
||||
if (stp->st_stateowner)
|
||||
nfs4_put_stateowner(stp->st_stateowner);
|
||||
WARN_ON(!list_empty(&stid->sc_cp_list));
|
||||
if (!list_empty(&stid->sc_cp_list))
|
||||
nfs4_free_cpntf_statelist(stid->sc_client->net, stid);
|
||||
kmem_cache_free(stateid_slab, stid);
|
||||
}
|
||||
|
||||
|
||||
@@ -5800,8 +5800,7 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
|
||||
*/
|
||||
warn_on_nonidempotent_op(op);
|
||||
xdr_truncate_encode(xdr, op_status_offset + XDR_UNIT);
|
||||
}
|
||||
if (so) {
|
||||
} else if (so) {
|
||||
int len = xdr->buf->len - (op_status_offset + XDR_UNIT);
|
||||
|
||||
so->so_replay.rp_status = op->status;
|
||||
|
||||
@@ -458,6 +458,7 @@ enum {
|
||||
#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \
|
||||
(NFSD4_1_SUPPORTED_ATTRS_WORD2 | \
|
||||
FATTR4_WORD2_MODE_UMASK | \
|
||||
FATTR4_WORD2_CLONE_BLKSIZE | \
|
||||
NFSD4_2_SECURITY_ATTRS | \
|
||||
FATTR4_WORD2_XATTR_SUPPORT)
|
||||
|
||||
|
||||
@@ -268,9 +268,6 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct net *net,
|
||||
dentry);
|
||||
}
|
||||
|
||||
fhp->fh_dentry = dentry;
|
||||
fhp->fh_export = exp;
|
||||
|
||||
switch (fhp->fh_maxsize) {
|
||||
case NFS4_FHSIZE:
|
||||
if (dentry->d_sb->s_export_op->flags & EXPORT_OP_NOATOMIC_ATTR)
|
||||
@@ -292,6 +289,9 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct net *net,
|
||||
goto out;
|
||||
}
|
||||
|
||||
fhp->fh_dentry = dentry;
|
||||
fhp->fh_export = exp;
|
||||
|
||||
return 0;
|
||||
out:
|
||||
exp_put(exp);
|
||||
|
||||
@@ -2787,7 +2787,12 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
|
||||
|
||||
if (sci->sc_task) {
|
||||
wake_up(&sci->sc_wait_daemon);
|
||||
kthread_stop(sci->sc_task);
|
||||
if (kthread_stop(sci->sc_task)) {
|
||||
spin_lock(&sci->sc_state_lock);
|
||||
sci->sc_task = NULL;
|
||||
timer_shutdown_sync(&sci->sc_timer);
|
||||
spin_unlock(&sci->sc_state_lock);
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock(&sci->sc_state_lock);
|
||||
|
||||
@@ -828,7 +828,13 @@ static const struct file_operations proc_single_file_operations = {
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* proc_mem_open() can return errno, NULL or mm_struct*.
|
||||
*
|
||||
* - Returns NULL if the task has no mm (PF_KTHREAD or PF_EXITING)
|
||||
* - Returns mm_struct* on success
|
||||
* - Returns error code on failure
|
||||
*/
|
||||
struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
|
||||
{
|
||||
struct task_struct *task = get_proc_task(inode);
|
||||
@@ -853,8 +859,8 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
|
||||
{
|
||||
struct mm_struct *mm = proc_mem_open(inode, mode);
|
||||
|
||||
if (IS_ERR(mm))
|
||||
return PTR_ERR(mm);
|
||||
if (IS_ERR_OR_NULL(mm))
|
||||
return mm ? PTR_ERR(mm) : -ESRCH;
|
||||
|
||||
file->private_data = mm;
|
||||
return 0;
|
||||
|
||||
@@ -694,6 +694,12 @@ void pde_put(struct proc_dir_entry *pde)
|
||||
}
|
||||
}
|
||||
|
||||
static void pde_erase(struct proc_dir_entry *pde, struct proc_dir_entry *parent)
|
||||
{
|
||||
rb_erase(&pde->subdir_node, &parent->subdir);
|
||||
RB_CLEAR_NODE(&pde->subdir_node);
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove a /proc entry and free it if it's not currently in use.
|
||||
*/
|
||||
@@ -716,7 +722,7 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
|
||||
WARN(1, "removing permanent /proc entry '%s'", de->name);
|
||||
de = NULL;
|
||||
} else {
|
||||
rb_erase(&de->subdir_node, &parent->subdir);
|
||||
pde_erase(de, parent);
|
||||
if (S_ISDIR(de->mode))
|
||||
parent->nlink--;
|
||||
}
|
||||
@@ -760,7 +766,7 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
|
||||
root->parent->name, root->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
rb_erase(&root->subdir_node, &parent->subdir);
|
||||
pde_erase(root, parent);
|
||||
|
||||
de = root;
|
||||
while (1) {
|
||||
@@ -772,7 +778,7 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
|
||||
next->parent->name, next->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
rb_erase(&next->subdir_node, &de->subdir);
|
||||
pde_erase(next, de);
|
||||
de = next;
|
||||
continue;
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user