Merge remote-tracking branch 'stable/linux-5.10.y' into rpi-5.10.y

This commit is contained in:
Dom Cobley
2021-10-28 13:19:51 +01:00
114 changed files with 960 additions and 476 deletions

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 75 SUBLEVEL = 76
EXTRAVERSION = EXTRAVERSION =
NAME = Dare mighty things NAME = Dare mighty things

View File

@@ -87,6 +87,7 @@ config ARM
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
select HAVE_FUNCTION_TRACER if !XIP_KERNEL select HAVE_FUNCTION_TRACER if !XIP_KERNEL
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GCC_PLUGINS select HAVE_GCC_PLUGINS
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7) select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
select HAVE_IDE if PCI || ISA || PCMCIA select HAVE_IDE if PCI || ISA || PCMCIA

View File

@@ -71,7 +71,6 @@
isc: isc@f0008000 { isc: isc@f0008000 {
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&pinctrl_isc_base &pinctrl_isc_data_8bit &pinctrl_isc_data_9_10 &pinctrl_isc_data_11_12>; pinctrl-0 = <&pinctrl_isc_base &pinctrl_isc_data_8bit &pinctrl_isc_data_9_10 &pinctrl_isc_data_11_12>;
status = "okay";
}; };
qspi1: spi@f0024000 { qspi1: spi@f0024000 {

View File

@@ -47,7 +47,7 @@
}; };
gmac: eth@e0800000 { gmac: eth@e0800000 {
compatible = "st,spear600-gmac"; compatible = "snps,dwmac-3.40a";
reg = <0xe0800000 0x8000>; reg = <0xe0800000 0x8000>;
interrupts = <23 22>; interrupts = <23 22>;
interrupt-names = "macirq", "eth_wake_irq"; interrupt-names = "macirq", "eth_wake_irq";

View File

@@ -19,7 +19,7 @@
*/ */
/ { / {
bus@4000000 { bus@40000000 {
motherboard { motherboard {
model = "V2M-P1"; model = "V2M-P1";
arm,hbi = <0x190>; arm,hbi = <0x190>;

View File

@@ -295,7 +295,7 @@
}; };
}; };
smb: bus@4000000 { smb: bus@40000000 {
compatible = "simple-bus"; compatible = "simple-bus";
#address-cells = <2>; #address-cells = <2>;

View File

@@ -9,7 +9,7 @@
static inline unsigned long arch_local_save_flags(void) static inline unsigned long arch_local_save_flags(void)
{ {
return RDCTL(CTL_STATUS); return RDCTL(CTL_FSTATUS);
} }
/* /*
@@ -18,7 +18,7 @@ static inline unsigned long arch_local_save_flags(void)
*/ */
static inline void arch_local_irq_restore(unsigned long flags) static inline void arch_local_irq_restore(unsigned long flags)
{ {
WRCTL(CTL_STATUS, flags); WRCTL(CTL_FSTATUS, flags);
} }
static inline void arch_local_irq_disable(void) static inline void arch_local_irq_disable(void)

View File

@@ -11,7 +11,7 @@
#endif #endif
/* control register numbers */ /* control register numbers */
#define CTL_STATUS 0 #define CTL_FSTATUS 0
#define CTL_ESTATUS 1 #define CTL_ESTATUS 1
#define CTL_BSTATUS 2 #define CTL_BSTATUS 2
#define CTL_IENABLE 3 #define CTL_IENABLE 3

View File

@@ -310,12 +310,15 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
r1 &= ~3; r1 &= ~3;
fpregs[t+3] = fpregs[r1+3]; fpregs[t+3] = fpregs[r1+3];
fpregs[t+2] = fpregs[r1+2]; fpregs[t+2] = fpregs[r1+2];
fallthrough;
case 1: /* double */ case 1: /* double */
fpregs[t+1] = fpregs[r1+1]; fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */ case 0: /* single */
fpregs[t] = fpregs[r1]; fpregs[t] = fpregs[r1];
return(NOEXCEPTION); return(NOEXCEPTION);
} }
BUG();
case 3: /* FABS */ case 3: /* FABS */
switch (fmt) { switch (fmt) {
case 2: /* illegal */ case 2: /* illegal */
@@ -325,13 +328,16 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
r1 &= ~3; r1 &= ~3;
fpregs[t+3] = fpregs[r1+3]; fpregs[t+3] = fpregs[r1+3];
fpregs[t+2] = fpregs[r1+2]; fpregs[t+2] = fpregs[r1+2];
fallthrough;
case 1: /* double */ case 1: /* double */
fpregs[t+1] = fpregs[r1+1]; fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */ case 0: /* single */
/* copy and clear sign bit */ /* copy and clear sign bit */
fpregs[t] = fpregs[r1] & 0x7fffffff; fpregs[t] = fpregs[r1] & 0x7fffffff;
return(NOEXCEPTION); return(NOEXCEPTION);
} }
BUG();
case 6: /* FNEG */ case 6: /* FNEG */
switch (fmt) { switch (fmt) {
case 2: /* illegal */ case 2: /* illegal */
@@ -341,13 +347,16 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
r1 &= ~3; r1 &= ~3;
fpregs[t+3] = fpregs[r1+3]; fpregs[t+3] = fpregs[r1+3];
fpregs[t+2] = fpregs[r1+2]; fpregs[t+2] = fpregs[r1+2];
fallthrough;
case 1: /* double */ case 1: /* double */
fpregs[t+1] = fpregs[r1+1]; fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */ case 0: /* single */
/* copy and invert sign bit */ /* copy and invert sign bit */
fpregs[t] = fpregs[r1] ^ 0x80000000; fpregs[t] = fpregs[r1] ^ 0x80000000;
return(NOEXCEPTION); return(NOEXCEPTION);
} }
BUG();
case 7: /* FNEGABS */ case 7: /* FNEGABS */
switch (fmt) { switch (fmt) {
case 2: /* illegal */ case 2: /* illegal */
@@ -357,13 +366,16 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
r1 &= ~3; r1 &= ~3;
fpregs[t+3] = fpregs[r1+3]; fpregs[t+3] = fpregs[r1+3];
fpregs[t+2] = fpregs[r1+2]; fpregs[t+2] = fpregs[r1+2];
fallthrough;
case 1: /* double */ case 1: /* double */
fpregs[t+1] = fpregs[r1+1]; fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */ case 0: /* single */
/* copy and set sign bit */ /* copy and set sign bit */
fpregs[t] = fpregs[r1] | 0x80000000; fpregs[t] = fpregs[r1] | 0x80000000;
return(NOEXCEPTION); return(NOEXCEPTION);
} }
BUG();
case 4: /* FSQRT */ case 4: /* FSQRT */
switch (fmt) { switch (fmt) {
case 0: case 0:
@@ -376,6 +388,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: /* quad not implemented */ case 3: /* quad not implemented */
return(MAJOR_0C_EXCP); return(MAJOR_0C_EXCP);
} }
BUG();
case 5: /* FRND */ case 5: /* FRND */
switch (fmt) { switch (fmt) {
case 0: case 0:
@@ -389,7 +402,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
return(MAJOR_0C_EXCP); return(MAJOR_0C_EXCP);
} }
} /* end of switch (subop) */ } /* end of switch (subop) */
BUG();
case 1: /* class 1 */ case 1: /* class 1 */
df = extru(ir,fpdfpos,2); /* get dest format */ df = extru(ir,fpdfpos,2); /* get dest format */
if ((df & 2) || (fmt & 2)) { if ((df & 2) || (fmt & 2)) {
@@ -419,6 +432,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: /* dbl/dbl */ case 3: /* dbl/dbl */
return(MAJOR_0C_EXCP); return(MAJOR_0C_EXCP);
} }
BUG();
case 1: /* FCNVXF */ case 1: /* FCNVXF */
switch(fmt) { switch(fmt) {
case 0: /* sgl/sgl */ case 0: /* sgl/sgl */
@@ -434,6 +448,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
return(dbl_to_dbl_fcnvxf(&fpregs[r1],0, return(dbl_to_dbl_fcnvxf(&fpregs[r1],0,
&fpregs[t],status)); &fpregs[t],status));
} }
BUG();
case 2: /* FCNVFX */ case 2: /* FCNVFX */
switch(fmt) { switch(fmt) {
case 0: /* sgl/sgl */ case 0: /* sgl/sgl */
@@ -449,6 +464,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
return(dbl_to_dbl_fcnvfx(&fpregs[r1],0, return(dbl_to_dbl_fcnvfx(&fpregs[r1],0,
&fpregs[t],status)); &fpregs[t],status));
} }
BUG();
case 3: /* FCNVFXT */ case 3: /* FCNVFXT */
switch(fmt) { switch(fmt) {
case 0: /* sgl/sgl */ case 0: /* sgl/sgl */
@@ -464,6 +480,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
return(dbl_to_dbl_fcnvfxt(&fpregs[r1],0, return(dbl_to_dbl_fcnvfxt(&fpregs[r1],0,
&fpregs[t],status)); &fpregs[t],status));
} }
BUG();
case 5: /* FCNVUF (PA2.0 only) */ case 5: /* FCNVUF (PA2.0 only) */
switch(fmt) { switch(fmt) {
case 0: /* sgl/sgl */ case 0: /* sgl/sgl */
@@ -479,6 +496,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
return(dbl_to_dbl_fcnvuf(&fpregs[r1],0, return(dbl_to_dbl_fcnvuf(&fpregs[r1],0,
&fpregs[t],status)); &fpregs[t],status));
} }
BUG();
case 6: /* FCNVFU (PA2.0 only) */ case 6: /* FCNVFU (PA2.0 only) */
switch(fmt) { switch(fmt) {
case 0: /* sgl/sgl */ case 0: /* sgl/sgl */
@@ -494,6 +512,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
return(dbl_to_dbl_fcnvfu(&fpregs[r1],0, return(dbl_to_dbl_fcnvfu(&fpregs[r1],0,
&fpregs[t],status)); &fpregs[t],status));
} }
BUG();
case 7: /* FCNVFUT (PA2.0 only) */ case 7: /* FCNVFUT (PA2.0 only) */
switch(fmt) { switch(fmt) {
case 0: /* sgl/sgl */ case 0: /* sgl/sgl */
@@ -509,10 +528,11 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
return(dbl_to_dbl_fcnvfut(&fpregs[r1],0, return(dbl_to_dbl_fcnvfut(&fpregs[r1],0,
&fpregs[t],status)); &fpregs[t],status));
} }
BUG();
case 4: /* undefined */ case 4: /* undefined */
return(MAJOR_0C_EXCP); return(MAJOR_0C_EXCP);
} /* end of switch subop */ } /* end of switch subop */
BUG();
case 2: /* class 2 */ case 2: /* class 2 */
fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS]; fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS];
r2 = extru(ir, fpr2pos, 5) * sizeof(double)/sizeof(u_int); r2 = extru(ir, fpr2pos, 5) * sizeof(double)/sizeof(u_int);
@@ -590,6 +610,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: /* quad not implemented */ case 3: /* quad not implemented */
return(MAJOR_0C_EXCP); return(MAJOR_0C_EXCP);
} }
BUG();
case 1: /* FTEST */ case 1: /* FTEST */
switch (fmt) { switch (fmt) {
case 0: case 0:
@@ -609,8 +630,10 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: case 3:
return(MAJOR_0C_EXCP); return(MAJOR_0C_EXCP);
} }
BUG();
} /* end of switch subop */ } /* end of switch subop */
} /* end of else for PA1.0 & PA1.1 */ } /* end of else for PA1.0 & PA1.1 */
BUG();
case 3: /* class 3 */ case 3: /* class 3 */
r2 = extru(ir,fpr2pos,5) * sizeof(double)/sizeof(u_int); r2 = extru(ir,fpr2pos,5) * sizeof(double)/sizeof(u_int);
if (r2 == 0) if (r2 == 0)
@@ -633,6 +656,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: /* quad not implemented */ case 3: /* quad not implemented */
return(MAJOR_0C_EXCP); return(MAJOR_0C_EXCP);
} }
BUG();
case 1: /* FSUB */ case 1: /* FSUB */
switch (fmt) { switch (fmt) {
case 0: case 0:
@@ -645,6 +669,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: /* quad not implemented */ case 3: /* quad not implemented */
return(MAJOR_0C_EXCP); return(MAJOR_0C_EXCP);
} }
BUG();
case 2: /* FMPY */ case 2: /* FMPY */
switch (fmt) { switch (fmt) {
case 0: case 0:
@@ -657,6 +682,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: /* quad not implemented */ case 3: /* quad not implemented */
return(MAJOR_0C_EXCP); return(MAJOR_0C_EXCP);
} }
BUG();
case 3: /* FDIV */ case 3: /* FDIV */
switch (fmt) { switch (fmt) {
case 0: case 0:
@@ -669,6 +695,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: /* quad not implemented */ case 3: /* quad not implemented */
return(MAJOR_0C_EXCP); return(MAJOR_0C_EXCP);
} }
BUG();
case 4: /* FREM */ case 4: /* FREM */
switch (fmt) { switch (fmt) {
case 0: case 0:
@@ -681,6 +708,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: /* quad not implemented */ case 3: /* quad not implemented */
return(MAJOR_0C_EXCP); return(MAJOR_0C_EXCP);
} }
BUG();
} /* end of class 3 switch */ } /* end of class 3 switch */
} /* end of switch(class) */ } /* end of switch(class) */
@@ -736,10 +764,12 @@ u_int fpregs[];
return(MAJOR_0E_EXCP); return(MAJOR_0E_EXCP);
case 1: /* double */ case 1: /* double */
fpregs[t+1] = fpregs[r1+1]; fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */ case 0: /* single */
fpregs[t] = fpregs[r1]; fpregs[t] = fpregs[r1];
return(NOEXCEPTION); return(NOEXCEPTION);
} }
BUG();
case 3: /* FABS */ case 3: /* FABS */
switch (fmt) { switch (fmt) {
case 2: case 2:
@@ -747,10 +777,12 @@ u_int fpregs[];
return(MAJOR_0E_EXCP); return(MAJOR_0E_EXCP);
case 1: /* double */ case 1: /* double */
fpregs[t+1] = fpregs[r1+1]; fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */ case 0: /* single */
fpregs[t] = fpregs[r1] & 0x7fffffff; fpregs[t] = fpregs[r1] & 0x7fffffff;
return(NOEXCEPTION); return(NOEXCEPTION);
} }
BUG();
case 6: /* FNEG */ case 6: /* FNEG */
switch (fmt) { switch (fmt) {
case 2: case 2:
@@ -758,10 +790,12 @@ u_int fpregs[];
return(MAJOR_0E_EXCP); return(MAJOR_0E_EXCP);
case 1: /* double */ case 1: /* double */
fpregs[t+1] = fpregs[r1+1]; fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */ case 0: /* single */
fpregs[t] = fpregs[r1] ^ 0x80000000; fpregs[t] = fpregs[r1] ^ 0x80000000;
return(NOEXCEPTION); return(NOEXCEPTION);
} }
BUG();
case 7: /* FNEGABS */ case 7: /* FNEGABS */
switch (fmt) { switch (fmt) {
case 2: case 2:
@@ -769,10 +803,12 @@ u_int fpregs[];
return(MAJOR_0E_EXCP); return(MAJOR_0E_EXCP);
case 1: /* double */ case 1: /* double */
fpregs[t+1] = fpregs[r1+1]; fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */ case 0: /* single */
fpregs[t] = fpregs[r1] | 0x80000000; fpregs[t] = fpregs[r1] | 0x80000000;
return(NOEXCEPTION); return(NOEXCEPTION);
} }
BUG();
case 4: /* FSQRT */ case 4: /* FSQRT */
switch (fmt) { switch (fmt) {
case 0: case 0:
@@ -785,6 +821,7 @@ u_int fpregs[];
case 3: case 3:
return(MAJOR_0E_EXCP); return(MAJOR_0E_EXCP);
} }
BUG();
case 5: /* FRMD */ case 5: /* FRMD */
switch (fmt) { switch (fmt) {
case 0: case 0:
@@ -798,7 +835,7 @@ u_int fpregs[];
return(MAJOR_0E_EXCP); return(MAJOR_0E_EXCP);
} }
} /* end of switch (subop */ } /* end of switch (subop */
BUG();
case 1: /* class 1 */ case 1: /* class 1 */
df = extru(ir,fpdfpos,2); /* get dest format */ df = extru(ir,fpdfpos,2); /* get dest format */
/* /*
@@ -826,6 +863,7 @@ u_int fpregs[];
case 3: /* dbl/dbl */ case 3: /* dbl/dbl */
return(MAJOR_0E_EXCP); return(MAJOR_0E_EXCP);
} }
BUG();
case 1: /* FCNVXF */ case 1: /* FCNVXF */
switch(fmt) { switch(fmt) {
case 0: /* sgl/sgl */ case 0: /* sgl/sgl */
@@ -841,6 +879,7 @@ u_int fpregs[];
return(dbl_to_dbl_fcnvxf(&fpregs[r1],0, return(dbl_to_dbl_fcnvxf(&fpregs[r1],0,
&fpregs[t],status)); &fpregs[t],status));
} }
BUG();
case 2: /* FCNVFX */ case 2: /* FCNVFX */
switch(fmt) { switch(fmt) {
case 0: /* sgl/sgl */ case 0: /* sgl/sgl */
@@ -856,6 +895,7 @@ u_int fpregs[];
return(dbl_to_dbl_fcnvfx(&fpregs[r1],0, return(dbl_to_dbl_fcnvfx(&fpregs[r1],0,
&fpregs[t],status)); &fpregs[t],status));
} }
BUG();
case 3: /* FCNVFXT */ case 3: /* FCNVFXT */
switch(fmt) { switch(fmt) {
case 0: /* sgl/sgl */ case 0: /* sgl/sgl */
@@ -871,6 +911,7 @@ u_int fpregs[];
return(dbl_to_dbl_fcnvfxt(&fpregs[r1],0, return(dbl_to_dbl_fcnvfxt(&fpregs[r1],0,
&fpregs[t],status)); &fpregs[t],status));
} }
BUG();
case 5: /* FCNVUF (PA2.0 only) */ case 5: /* FCNVUF (PA2.0 only) */
switch(fmt) { switch(fmt) {
case 0: /* sgl/sgl */ case 0: /* sgl/sgl */
@@ -886,6 +927,7 @@ u_int fpregs[];
return(dbl_to_dbl_fcnvuf(&fpregs[r1],0, return(dbl_to_dbl_fcnvuf(&fpregs[r1],0,
&fpregs[t],status)); &fpregs[t],status));
} }
BUG();
case 6: /* FCNVFU (PA2.0 only) */ case 6: /* FCNVFU (PA2.0 only) */
switch(fmt) { switch(fmt) {
case 0: /* sgl/sgl */ case 0: /* sgl/sgl */
@@ -901,6 +943,7 @@ u_int fpregs[];
return(dbl_to_dbl_fcnvfu(&fpregs[r1],0, return(dbl_to_dbl_fcnvfu(&fpregs[r1],0,
&fpregs[t],status)); &fpregs[t],status));
} }
BUG();
case 7: /* FCNVFUT (PA2.0 only) */ case 7: /* FCNVFUT (PA2.0 only) */
switch(fmt) { switch(fmt) {
case 0: /* sgl/sgl */ case 0: /* sgl/sgl */
@@ -916,9 +959,11 @@ u_int fpregs[];
return(dbl_to_dbl_fcnvfut(&fpregs[r1],0, return(dbl_to_dbl_fcnvfut(&fpregs[r1],0,
&fpregs[t],status)); &fpregs[t],status));
} }
BUG();
case 4: /* undefined */ case 4: /* undefined */
return(MAJOR_0C_EXCP); return(MAJOR_0C_EXCP);
} /* end of switch subop */ } /* end of switch subop */
BUG();
case 2: /* class 2 */ case 2: /* class 2 */
/* /*
* Be careful out there. * Be careful out there.
@@ -994,6 +1039,7 @@ u_int fpregs[];
} }
} /* end of switch subop */ } /* end of switch subop */
} /* end of else for PA1.0 & PA1.1 */ } /* end of else for PA1.0 & PA1.1 */
BUG();
case 3: /* class 3 */ case 3: /* class 3 */
/* /*
* Be careful out there. * Be careful out there.
@@ -1026,6 +1072,7 @@ u_int fpregs[];
return(dbl_fadd(&fpregs[r1],&fpregs[r2], return(dbl_fadd(&fpregs[r1],&fpregs[r2],
&fpregs[t],status)); &fpregs[t],status));
} }
BUG();
case 1: /* FSUB */ case 1: /* FSUB */
switch (fmt) { switch (fmt) {
case 0: case 0:
@@ -1035,6 +1082,7 @@ u_int fpregs[];
return(dbl_fsub(&fpregs[r1],&fpregs[r2], return(dbl_fsub(&fpregs[r1],&fpregs[r2],
&fpregs[t],status)); &fpregs[t],status));
} }
BUG();
case 2: /* FMPY or XMPYU */ case 2: /* FMPY or XMPYU */
/* /*
* check for integer multiply (x bit set) * check for integer multiply (x bit set)
@@ -1071,6 +1119,7 @@ u_int fpregs[];
&fpregs[r2],&fpregs[t],status)); &fpregs[r2],&fpregs[t],status));
} }
} }
BUG();
case 3: /* FDIV */ case 3: /* FDIV */
switch (fmt) { switch (fmt) {
case 0: case 0:
@@ -1080,6 +1129,7 @@ u_int fpregs[];
return(dbl_fdiv(&fpregs[r1],&fpregs[r2], return(dbl_fdiv(&fpregs[r1],&fpregs[r2],
&fpregs[t],status)); &fpregs[t],status));
} }
BUG();
case 4: /* FREM */ case 4: /* FREM */
switch (fmt) { switch (fmt) {
case 0: case 0:

View File

@@ -52,28 +52,32 @@ _GLOBAL(isa300_idle_stop_mayloss)
std r1,PACAR1(r13) std r1,PACAR1(r13)
mflr r4 mflr r4
mfcr r5 mfcr r5
/* use stack red zone rather than a new frame for saving regs */ /*
std r2,-8*0(r1) * Use the stack red zone rather than a new frame for saving regs since
std r14,-8*1(r1) * in the case of no GPR loss the wakeup code branches directly back to
std r15,-8*2(r1) * the caller without deallocating the stack frame first.
std r16,-8*3(r1) */
std r17,-8*4(r1) std r2,-8*1(r1)
std r18,-8*5(r1) std r14,-8*2(r1)
std r19,-8*6(r1) std r15,-8*3(r1)
std r20,-8*7(r1) std r16,-8*4(r1)
std r21,-8*8(r1) std r17,-8*5(r1)
std r22,-8*9(r1) std r18,-8*6(r1)
std r23,-8*10(r1) std r19,-8*7(r1)
std r24,-8*11(r1) std r20,-8*8(r1)
std r25,-8*12(r1) std r21,-8*9(r1)
std r26,-8*13(r1) std r22,-8*10(r1)
std r27,-8*14(r1) std r23,-8*11(r1)
std r28,-8*15(r1) std r24,-8*12(r1)
std r29,-8*16(r1) std r25,-8*13(r1)
std r30,-8*17(r1) std r26,-8*14(r1)
std r31,-8*18(r1) std r27,-8*15(r1)
std r4,-8*19(r1) std r28,-8*16(r1)
std r5,-8*20(r1) std r29,-8*17(r1)
std r30,-8*18(r1)
std r31,-8*19(r1)
std r4,-8*20(r1)
std r5,-8*21(r1)
/* 168 bytes */ /* 168 bytes */
PPC_STOP PPC_STOP
b . /* catch bugs */ b . /* catch bugs */
@@ -89,8 +93,8 @@ _GLOBAL(isa300_idle_stop_mayloss)
*/ */
_GLOBAL(idle_return_gpr_loss) _GLOBAL(idle_return_gpr_loss)
ld r1,PACAR1(r13) ld r1,PACAR1(r13)
ld r4,-8*19(r1) ld r4,-8*20(r1)
ld r5,-8*20(r1) ld r5,-8*21(r1)
mtlr r4 mtlr r4
mtcr r5 mtcr r5
/* /*
@@ -98,38 +102,40 @@ _GLOBAL(idle_return_gpr_loss)
* from PACATOC. This could be avoided for that less common case * from PACATOC. This could be avoided for that less common case
* if KVM saved its r2. * if KVM saved its r2.
*/ */
ld r2,-8*0(r1) ld r2,-8*1(r1)
ld r14,-8*1(r1) ld r14,-8*2(r1)
ld r15,-8*2(r1) ld r15,-8*3(r1)
ld r16,-8*3(r1) ld r16,-8*4(r1)
ld r17,-8*4(r1) ld r17,-8*5(r1)
ld r18,-8*5(r1) ld r18,-8*6(r1)
ld r19,-8*6(r1) ld r19,-8*7(r1)
ld r20,-8*7(r1) ld r20,-8*8(r1)
ld r21,-8*8(r1) ld r21,-8*9(r1)
ld r22,-8*9(r1) ld r22,-8*10(r1)
ld r23,-8*10(r1) ld r23,-8*11(r1)
ld r24,-8*11(r1) ld r24,-8*12(r1)
ld r25,-8*12(r1) ld r25,-8*13(r1)
ld r26,-8*13(r1) ld r26,-8*14(r1)
ld r27,-8*14(r1) ld r27,-8*15(r1)
ld r28,-8*15(r1) ld r28,-8*16(r1)
ld r29,-8*16(r1) ld r29,-8*17(r1)
ld r30,-8*17(r1) ld r30,-8*18(r1)
ld r31,-8*18(r1) ld r31,-8*19(r1)
blr blr
/* /*
* This is the sequence required to execute idle instructions, as * This is the sequence required to execute idle instructions, as
* specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0. * specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0.
* * We have to store a GPR somewhere, ptesync, then reload it, and create
* The 0(r1) slot is used to save r2 in isa206, so use that here. * a false dependency on the result of the load. It doesn't matter which
* GPR we store, or where we store it. We have already stored r2 to the
* stack at -8(r1) in isa206_idle_insn_mayloss, so use that.
*/ */
#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \ #define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \
/* Magic NAP/SLEEP/WINKLE mode enter sequence */ \ /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
std r2,0(r1); \ std r2,-8(r1); \
ptesync; \ ptesync; \
ld r2,0(r1); \ ld r2,-8(r1); \
236: cmpd cr0,r2,r2; \ 236: cmpd cr0,r2,r2; \
bne 236b; \ bne 236b; \
IDLE_INST; \ IDLE_INST; \
@@ -154,28 +160,32 @@ _GLOBAL(isa206_idle_insn_mayloss)
std r1,PACAR1(r13) std r1,PACAR1(r13)
mflr r4 mflr r4
mfcr r5 mfcr r5
/* use stack red zone rather than a new frame for saving regs */ /*
std r2,-8*0(r1) * Use the stack red zone rather than a new frame for saving regs since
std r14,-8*1(r1) * in the case of no GPR loss the wakeup code branches directly back to
std r15,-8*2(r1) * the caller without deallocating the stack frame first.
std r16,-8*3(r1) */
std r17,-8*4(r1) std r2,-8*1(r1)
std r18,-8*5(r1) std r14,-8*2(r1)
std r19,-8*6(r1) std r15,-8*3(r1)
std r20,-8*7(r1) std r16,-8*4(r1)
std r21,-8*8(r1) std r17,-8*5(r1)
std r22,-8*9(r1) std r18,-8*6(r1)
std r23,-8*10(r1) std r19,-8*7(r1)
std r24,-8*11(r1) std r20,-8*8(r1)
std r25,-8*12(r1) std r21,-8*9(r1)
std r26,-8*13(r1) std r22,-8*10(r1)
std r27,-8*14(r1) std r23,-8*11(r1)
std r28,-8*15(r1) std r24,-8*12(r1)
std r29,-8*16(r1) std r25,-8*13(r1)
std r30,-8*17(r1) std r26,-8*14(r1)
std r31,-8*18(r1) std r27,-8*15(r1)
std r4,-8*19(r1) std r28,-8*16(r1)
std r5,-8*20(r1) std r29,-8*17(r1)
std r30,-8*18(r1)
std r31,-8*19(r1)
std r4,-8*20(r1)
std r5,-8*21(r1)
cmpwi r3,PNV_THREAD_NAP cmpwi r3,PNV_THREAD_NAP
bne 1f bne 1f
IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP) IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP)

View File

@@ -1578,8 +1578,6 @@ void __cpu_die(unsigned int cpu)
void arch_cpu_idle_dead(void) void arch_cpu_idle_dead(void)
{ {
sched_preempt_enable_no_resched();
/* /*
* Disable on the down path. This will be re-enabled by * Disable on the down path. This will be re-enabled by
* start_secondary() via start_secondary_resume() below * start_secondary() via start_secondary_resume() below

View File

@@ -292,13 +292,16 @@ kvm_novcpu_exit:
* r3 contains the SRR1 wakeup value, SRR1 is trashed. * r3 contains the SRR1 wakeup value, SRR1 is trashed.
*/ */
_GLOBAL(idle_kvm_start_guest) _GLOBAL(idle_kvm_start_guest)
ld r4,PACAEMERGSP(r13)
mfcr r5 mfcr r5
mflr r0 mflr r0
std r1,0(r4) std r5, 8(r1) // Save CR in caller's frame
std r5,8(r4) std r0, 16(r1) // Save LR in caller's frame
std r0,16(r4) // Create frame on emergency stack
subi r1,r4,STACK_FRAME_OVERHEAD ld r4, PACAEMERGSP(r13)
stdu r1, -SWITCH_FRAME_SIZE(r4)
// Switch to new frame on emergency stack
mr r1, r4
std r3, 32(r1) // Save SRR1 wakeup value
SAVE_NVGPRS(r1) SAVE_NVGPRS(r1)
/* /*
@@ -350,6 +353,10 @@ kvm_unsplit_wakeup:
kvm_secondary_got_guest: kvm_secondary_got_guest:
// About to go to guest, clear saved SRR1
li r0, 0
std r0, 32(r1)
/* Set HSTATE_DSCR(r13) to something sensible */ /* Set HSTATE_DSCR(r13) to something sensible */
ld r6, PACA_DSCR_DEFAULT(r13) ld r6, PACA_DSCR_DEFAULT(r13)
std r6, HSTATE_DSCR(r13) std r6, HSTATE_DSCR(r13)
@@ -441,13 +448,12 @@ kvm_no_guest:
mfspr r4, SPRN_LPCR mfspr r4, SPRN_LPCR
rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
mtspr SPRN_LPCR, r4 mtspr SPRN_LPCR, r4
/* set up r3 for return */ // Return SRR1 wakeup value, or 0 if we went into the guest
mfspr r3,SPRN_SRR1 ld r3, 32(r1)
REST_NVGPRS(r1) REST_NVGPRS(r1)
addi r1, r1, STACK_FRAME_OVERHEAD ld r1, 0(r1) // Switch back to caller stack
ld r0, 16(r1) ld r0, 16(r1) // Reload LR
ld r5, 8(r1) ld r5, 8(r1) // Reload CR
ld r1, 0(r1)
mtlr r0 mtlr r0
mtcr r5 mtcr r5
blr blr

View File

@@ -205,6 +205,9 @@ int zpci_create_device(u32 fid, u32 fh, enum zpci_state state);
void zpci_remove_device(struct zpci_dev *zdev, bool set_error); void zpci_remove_device(struct zpci_dev *zdev, bool set_error);
int zpci_enable_device(struct zpci_dev *); int zpci_enable_device(struct zpci_dev *);
int zpci_disable_device(struct zpci_dev *); int zpci_disable_device(struct zpci_dev *);
void zpci_device_reserved(struct zpci_dev *zdev);
bool zpci_is_device_configured(struct zpci_dev *zdev);
int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64); int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
int zpci_unregister_ioat(struct zpci_dev *, u8); int zpci_unregister_ioat(struct zpci_dev *, u8);
void zpci_remove_reserved_devices(void); void zpci_remove_reserved_devices(void);

View File

@@ -92,7 +92,7 @@ void zpci_remove_reserved_devices(void)
spin_unlock(&zpci_list_lock); spin_unlock(&zpci_list_lock);
list_for_each_entry_safe(zdev, tmp, &remove, entry) list_for_each_entry_safe(zdev, tmp, &remove, entry)
zpci_zdev_put(zdev); zpci_device_reserved(zdev);
} }
int pci_domain_nr(struct pci_bus *bus) int pci_domain_nr(struct pci_bus *bus)
@@ -787,6 +787,39 @@ error:
return rc; return rc;
} }
bool zpci_is_device_configured(struct zpci_dev *zdev)
{
enum zpci_state state = zdev->state;
return state != ZPCI_FN_STATE_RESERVED &&
state != ZPCI_FN_STATE_STANDBY;
}
/**
* zpci_device_reserved() - Mark device as resverved
* @zdev: the zpci_dev that was reserved
*
* Handle the case that a given zPCI function was reserved by another system.
* After a call to this function the zpci_dev can not be found via
* get_zdev_by_fid() anymore but may still be accessible via existing
* references though it will not be functional anymore.
*/
void zpci_device_reserved(struct zpci_dev *zdev)
{
if (zdev->has_hp_slot)
zpci_exit_slot(zdev);
/*
* Remove device from zpci_list as it is going away. This also
* makes sure we ignore subsequent zPCI events for this device.
*/
spin_lock(&zpci_list_lock);
list_del(&zdev->entry);
spin_unlock(&zpci_list_lock);
zdev->state = ZPCI_FN_STATE_RESERVED;
zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
zpci_zdev_put(zdev);
}
void zpci_release_device(struct kref *kref) void zpci_release_device(struct kref *kref)
{ {
struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref); struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
@@ -802,6 +835,12 @@ void zpci_release_device(struct kref *kref)
case ZPCI_FN_STATE_STANDBY: case ZPCI_FN_STATE_STANDBY:
if (zdev->has_hp_slot) if (zdev->has_hp_slot)
zpci_exit_slot(zdev); zpci_exit_slot(zdev);
spin_lock(&zpci_list_lock);
list_del(&zdev->entry);
spin_unlock(&zpci_list_lock);
zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
fallthrough;
case ZPCI_FN_STATE_RESERVED:
zpci_cleanup_bus_resources(zdev); zpci_cleanup_bus_resources(zdev);
zpci_bus_device_unregister(zdev); zpci_bus_device_unregister(zdev);
zpci_destroy_iommu(zdev); zpci_destroy_iommu(zdev);
@@ -809,10 +848,6 @@ void zpci_release_device(struct kref *kref)
default: default:
break; break;
} }
spin_lock(&zpci_list_lock);
list_del(&zdev->entry);
spin_unlock(&zpci_list_lock);
zpci_dbg(3, "rem fid:%x\n", zdev->fid); zpci_dbg(3, "rem fid:%x\n", zdev->fid);
kfree(zdev); kfree(zdev);
} }

View File

@@ -146,7 +146,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
zdev->state = ZPCI_FN_STATE_STANDBY; zdev->state = ZPCI_FN_STATE_STANDBY;
if (!clp_get_state(ccdf->fid, &state) && if (!clp_get_state(ccdf->fid, &state) &&
state == ZPCI_FN_STATE_RESERVED) { state == ZPCI_FN_STATE_RESERVED) {
zpci_zdev_put(zdev); zpci_device_reserved(zdev);
} }
break; break;
case 0x0306: /* 0x308 or 0x302 for multiple devices */ case 0x0306: /* 0x308 or 0x302 for multiple devices */
@@ -156,7 +156,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
case 0x0308: /* Standby -> Reserved */ case 0x0308: /* Standby -> Reserved */
if (!zdev) if (!zdev)
break; break;
zpci_zdev_put(zdev); zpci_device_reserved(zdev);
break; break;
default: default:
break; break;

View File

@@ -68,6 +68,7 @@ static bool test_intel(int idx, void *data)
case INTEL_FAM6_BROADWELL_D: case INTEL_FAM6_BROADWELL_D:
case INTEL_FAM6_BROADWELL_G: case INTEL_FAM6_BROADWELL_G:
case INTEL_FAM6_BROADWELL_X: case INTEL_FAM6_BROADWELL_X:
case INTEL_FAM6_SAPPHIRERAPIDS_X:
case INTEL_FAM6_ATOM_SILVERMONT: case INTEL_FAM6_ATOM_SILVERMONT:
case INTEL_FAM6_ATOM_SILVERMONT_D: case INTEL_FAM6_ATOM_SILVERMONT_D:

View File

@@ -6316,18 +6316,13 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
/* /*
* If we are running L2 and L1 has a new pending interrupt * If we are running L2 and L1 has a new pending interrupt
* which can be injected, we should re-evaluate * which can be injected, this may cause a vmexit or it may
* what should be done with this new L1 interrupt. * be injected into L2. Either way, this interrupt will be
* If L1 intercepts external-interrupts, we should * processed via KVM_REQ_EVENT, not RVI, because we do not use
* exit from L2 to L1. Otherwise, interrupt should be * virtual interrupt delivery to inject L1 interrupts into L2.
* delivered directly to L2.
*/ */
if (is_guest_mode(vcpu) && max_irr_updated) { if (is_guest_mode(vcpu) && max_irr_updated)
if (nested_exit_on_intr(vcpu)) kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_exiting_guest_mode(vcpu);
else
kvm_make_request(KVM_REQ_EVENT, vcpu);
}
} else { } else {
max_irr = kvm_lapic_find_highest_irr(vcpu); max_irr = kvm_lapic_find_highest_irr(vcpu);
} }

View File

@@ -51,9 +51,6 @@ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
DEFINE_PER_CPU(uint32_t, xen_vcpu_id); DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
EXPORT_PER_CPU_SYMBOL(xen_vcpu_id); EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
enum xen_domain_type xen_domain_type = XEN_NATIVE;
EXPORT_SYMBOL_GPL(xen_domain_type);
unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START; unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
EXPORT_SYMBOL(machine_to_phys_mapping); EXPORT_SYMBOL(machine_to_phys_mapping);
unsigned long machine_to_phys_nr; unsigned long machine_to_phys_nr;
@@ -68,9 +65,11 @@ __read_mostly int xen_have_vector_callback;
EXPORT_SYMBOL_GPL(xen_have_vector_callback); EXPORT_SYMBOL_GPL(xen_have_vector_callback);
/* /*
* NB: needs to live in .data because it's used by xen_prepare_pvh which runs * NB: These need to live in .data or alike because they're used by
* before clearing the bss. * xen_prepare_pvh() which runs before clearing the bss.
*/ */
enum xen_domain_type __ro_after_init xen_domain_type = XEN_NATIVE;
EXPORT_SYMBOL_GPL(xen_domain_type);
uint32_t xen_start_flags __section(".data") = 0; uint32_t xen_start_flags __section(".data") = 0;
EXPORT_SYMBOL(xen_start_flags); EXPORT_SYMBOL(xen_start_flags);

View File

@@ -51,8 +51,12 @@ void platform_power_off(void)
void platform_restart(void) void platform_restart(void)
{ {
/* Flush and reset the mmu, simulate a processor reset, and /* Try software reset first. */
* jump to the reset vector. */ WRITE_ONCE(*(u32 *)XTFPGA_SWRST_VADDR, 0xdead);
/* If software reset did not work, flush and reset the mmu,
* simulate a processor reset, and jump to the reset vector.
*/
cpu_reset(); cpu_reset();
/* control never gets here */ /* control never gets here */
} }
@@ -66,7 +70,7 @@ void __init platform_calibrate_ccount(void)
#endif #endif
#ifdef CONFIG_OF #ifdef CONFIG_USE_OF
static void __init xtfpga_clk_setup(struct device_node *np) static void __init xtfpga_clk_setup(struct device_node *np)
{ {
@@ -284,4 +288,4 @@ static int __init xtavnet_init(void)
*/ */
arch_initcall(xtavnet_init); arch_initcall(xtavnet_init);
#endif /* CONFIG_OF */ #endif /* CONFIG_USE_OF */

View File

@@ -129,6 +129,7 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(PCI_P2PDMA), QUEUE_FLAG_NAME(PCI_P2PDMA),
QUEUE_FLAG_NAME(ZONE_RESETALL), QUEUE_FLAG_NAME(ZONE_RESETALL),
QUEUE_FLAG_NAME(RQ_ALLOC_TIME), QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
QUEUE_FLAG_NAME(HCTX_ACTIVE),
QUEUE_FLAG_NAME(NOWAIT), QUEUE_FLAG_NAME(NOWAIT),
}; };
#undef QUEUE_FLAG_NAME #undef QUEUE_FLAG_NAME

View File

@@ -33,6 +33,8 @@ config DRM_AMD_DC_HDCP
config DRM_AMD_DC_SI config DRM_AMD_DC_SI
bool "AMD DC support for Southern Islands ASICs" bool "AMD DC support for Southern Islands ASICs"
depends on DRM_AMDGPU_SI
depends on DRM_AMD_DC
default n default n
help help
Choose this option to enable new AMD DC support for SI asics Choose this option to enable new AMD DC support for SI asics

View File

@@ -268,7 +268,11 @@ static void mxsfb_irq_disable(struct drm_device *drm)
struct mxsfb_drm_private *mxsfb = drm->dev_private; struct mxsfb_drm_private *mxsfb = drm->dev_private;
mxsfb_enable_axi_clk(mxsfb); mxsfb_enable_axi_clk(mxsfb);
mxsfb->crtc.funcs->disable_vblank(&mxsfb->crtc);
/* Disable and clear VBLANK IRQ */
writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_CLR);
writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
mxsfb_disable_axi_clk(mxsfb); mxsfb_disable_axi_clk(mxsfb);
} }

View File

@@ -590,14 +590,14 @@ static const struct drm_display_mode k101_im2byl02_default_mode = {
.clock = 69700, .clock = 69700,
.hdisplay = 800, .hdisplay = 800,
.hsync_start = 800 + 6, .hsync_start = 800 + 52,
.hsync_end = 800 + 6 + 15, .hsync_end = 800 + 52 + 8,
.htotal = 800 + 6 + 15 + 16, .htotal = 800 + 52 + 8 + 48,
.vdisplay = 1280, .vdisplay = 1280,
.vsync_start = 1280 + 8, .vsync_start = 1280 + 16,
.vsync_end = 1280 + 8 + 48, .vsync_end = 1280 + 16 + 6,
.vtotal = 1280 + 8 + 48 + 52, .vtotal = 1280 + 16 + 6 + 15,
.width_mm = 135, .width_mm = 135,
.height_mm = 217, .height_mm = 217,

View File

@@ -3,6 +3,7 @@
// Driver for the IMX SNVS ON/OFF Power Key // Driver for the IMX SNVS ON/OFF Power Key
// Copyright (C) 2015 Freescale Semiconductor, Inc. All Rights Reserved. // Copyright (C) 2015 Freescale Semiconductor, Inc. All Rights Reserved.
#include <linux/clk.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/init.h> #include <linux/init.h>
@@ -99,6 +100,11 @@ static irqreturn_t imx_snvs_pwrkey_interrupt(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void imx_snvs_pwrkey_disable_clk(void *data)
{
clk_disable_unprepare(data);
}
static void imx_snvs_pwrkey_act(void *pdata) static void imx_snvs_pwrkey_act(void *pdata)
{ {
struct pwrkey_drv_data *pd = pdata; struct pwrkey_drv_data *pd = pdata;
@@ -111,6 +117,7 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
struct pwrkey_drv_data *pdata; struct pwrkey_drv_data *pdata;
struct input_dev *input; struct input_dev *input;
struct device_node *np; struct device_node *np;
struct clk *clk;
int error; int error;
u32 vid; u32 vid;
@@ -134,6 +141,28 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "KEY_POWER without setting in dts\n"); dev_warn(&pdev->dev, "KEY_POWER without setting in dts\n");
} }
clk = devm_clk_get_optional(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "Failed to get snvs clock (%pe)\n", clk);
return PTR_ERR(clk);
}
error = clk_prepare_enable(clk);
if (error) {
dev_err(&pdev->dev, "Failed to enable snvs clock (%pe)\n",
ERR_PTR(error));
return error;
}
error = devm_add_action_or_reset(&pdev->dev,
imx_snvs_pwrkey_disable_clk, clk);
if (error) {
dev_err(&pdev->dev,
"Failed to register clock cleanup handler (%pe)\n",
ERR_PTR(error));
return error;
}
pdata->wakeup = of_property_read_bool(np, "wakeup-source"); pdata->wakeup = of_property_read_bool(np, "wakeup-source");
pdata->irq = platform_get_irq(pdev, 0); pdata->irq = platform_get_irq(pdev, 0);

View File

@@ -480,6 +480,11 @@ int detach_capi_ctr(struct capi_ctr *ctr)
ctr_down(ctr, CAPI_CTR_DETACHED); ctr_down(ctr, CAPI_CTR_DETACHED);
if (ctr->cnr < 1 || ctr->cnr - 1 >= CAPI_MAXCONTR) {
err = -EINVAL;
goto unlock_out;
}
if (capi_controller[ctr->cnr - 1] != ctr) { if (capi_controller[ctr->cnr - 1] != ctr) {
err = -EINVAL; err = -EINVAL;
goto unlock_out; goto unlock_out;

View File

@@ -949,8 +949,8 @@ nj_release(struct tiger_hw *card)
nj_disable_hwirq(card); nj_disable_hwirq(card);
mode_tiger(&card->bc[0], ISDN_P_NONE); mode_tiger(&card->bc[0], ISDN_P_NONE);
mode_tiger(&card->bc[1], ISDN_P_NONE); mode_tiger(&card->bc[1], ISDN_P_NONE);
card->isac.release(&card->isac);
spin_unlock_irqrestore(&card->lock, flags); spin_unlock_irqrestore(&card->lock, flags);
card->isac.release(&card->isac);
release_region(card->base, card->base_s); release_region(card->base, card->base_s);
card->base_s = 0; card->base_s = 0;
} }

View File

@@ -846,10 +846,12 @@ static int __maybe_unused rcar_can_suspend(struct device *dev)
struct rcar_can_priv *priv = netdev_priv(ndev); struct rcar_can_priv *priv = netdev_priv(ndev);
u16 ctlr; u16 ctlr;
if (netif_running(ndev)) { if (!netif_running(ndev))
netif_stop_queue(ndev); return 0;
netif_device_detach(ndev);
} netif_stop_queue(ndev);
netif_device_detach(ndev);
ctlr = readw(&priv->regs->ctlr); ctlr = readw(&priv->regs->ctlr);
ctlr |= RCAR_CAN_CTLR_CANM_HALT; ctlr |= RCAR_CAN_CTLR_CANM_HALT;
writew(ctlr, &priv->regs->ctlr); writew(ctlr, &priv->regs->ctlr);
@@ -868,6 +870,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
u16 ctlr; u16 ctlr;
int err; int err;
if (!netif_running(ndev))
return 0;
err = clk_enable(priv->clk); err = clk_enable(priv->clk);
if (err) { if (err) {
netdev_err(ndev, "clk_enable() failed, error %d\n", err); netdev_err(ndev, "clk_enable() failed, error %d\n", err);
@@ -881,10 +886,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
writew(ctlr, &priv->regs->ctlr); writew(ctlr, &priv->regs->ctlr);
priv->can.state = CAN_STATE_ERROR_ACTIVE; priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(ndev)) { netif_device_attach(ndev);
netif_device_attach(ndev); netif_start_queue(ndev);
netif_start_queue(ndev);
}
return 0; return 0;
} }

View File

@@ -731,16 +731,15 @@ static void peak_pci_remove(struct pci_dev *pdev)
struct net_device *prev_dev = chan->prev_dev; struct net_device *prev_dev = chan->prev_dev;
dev_info(&pdev->dev, "removing device %s\n", dev->name); dev_info(&pdev->dev, "removing device %s\n", dev->name);
/* do that only for first channel */
if (!prev_dev && chan->pciec_card)
peak_pciec_remove(chan->pciec_card);
unregister_sja1000dev(dev); unregister_sja1000dev(dev);
free_sja1000dev(dev); free_sja1000dev(dev);
dev = prev_dev; dev = prev_dev;
if (!dev) { if (!dev)
/* do that only for first channel */
if (chan->pciec_card)
peak_pciec_remove(chan->pciec_card);
break; break;
}
priv = netdev_priv(dev); priv = netdev_priv(dev);
chan = priv->priv; chan = priv->priv;
} }

View File

@@ -551,11 +551,10 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
} else if (sm->channel_p_w_b & PUCAN_BUS_WARNING) { } else if (sm->channel_p_w_b & PUCAN_BUS_WARNING) {
new_state = CAN_STATE_ERROR_WARNING; new_state = CAN_STATE_ERROR_WARNING;
} else { } else {
/* no error bit (so, no error skb, back to active state) */ /* back to (or still in) ERROR_ACTIVE state */
dev->can.state = CAN_STATE_ERROR_ACTIVE; new_state = CAN_STATE_ERROR_ACTIVE;
pdev->bec.txerr = 0; pdev->bec.txerr = 0;
pdev->bec.rxerr = 0; pdev->bec.rxerr = 0;
return 0;
} }
/* state hasn't changed */ /* state hasn't changed */

View File

@@ -229,7 +229,7 @@
#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6)) #define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */ #define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */ #define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
#define GSWIP_SDMA_PCTRL_PAUFWD BIT(1) /* Pause Frame Forwarding */ #define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */
#define GSWIP_TABLE_ACTIVE_VLAN 0x01 #define GSWIP_TABLE_ACTIVE_VLAN 0x01
#define GSWIP_TABLE_VLAN_MAPPING 0x02 #define GSWIP_TABLE_VLAN_MAPPING 0x02

View File

@@ -981,9 +981,6 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
{ {
struct mt7530_priv *priv = ds->priv; struct mt7530_priv *priv = ds->priv;
if (!dsa_is_user_port(ds, port))
return 0;
mutex_lock(&priv->reg_mutex); mutex_lock(&priv->reg_mutex);
/* Allow the user port gets connected to the cpu port and also /* Allow the user port gets connected to the cpu port and also
@@ -1006,9 +1003,6 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
{ {
struct mt7530_priv *priv = ds->priv; struct mt7530_priv *priv = ds->priv;
if (!dsa_is_user_port(ds, port))
return;
mutex_lock(&priv->reg_mutex); mutex_lock(&priv->reg_mutex);
/* Clear up all port matrix which could be restored in the next /* Clear up all port matrix which could be restored in the next
@@ -2593,7 +2587,7 @@ mt7530_probe(struct mdio_device *mdiodev)
return -ENOMEM; return -ENOMEM;
priv->ds->dev = &mdiodev->dev; priv->ds->dev = &mdiodev->dev;
priv->ds->num_ports = DSA_MAX_PORTS; priv->ds->num_ports = MT7530_NUM_PORTS;
/* Use medatek,mcm property to distinguish hardware type that would /* Use medatek,mcm property to distinguish hardware type that would
* casues a little bit differences on power-on sequence. * casues a little bit differences on power-on sequence.

View File

@@ -157,7 +157,7 @@ static const struct {
{ ENETC_PM0_TFRM, "MAC tx frames" }, { ENETC_PM0_TFRM, "MAC tx frames" },
{ ENETC_PM0_TFCS, "MAC tx fcs errors" }, { ENETC_PM0_TFCS, "MAC tx fcs errors" },
{ ENETC_PM0_TVLAN, "MAC tx VLAN frames" }, { ENETC_PM0_TVLAN, "MAC tx VLAN frames" },
{ ENETC_PM0_TERR, "MAC tx frames" }, { ENETC_PM0_TERR, "MAC tx frame errors" },
{ ENETC_PM0_TUCA, "MAC tx unicast frames" }, { ENETC_PM0_TUCA, "MAC tx unicast frames" },
{ ENETC_PM0_TMCA, "MAC tx multicast frames" }, { ENETC_PM0_TMCA, "MAC tx multicast frames" },
{ ENETC_PM0_TBCA, "MAC tx broadcast frames" }, { ENETC_PM0_TBCA, "MAC tx broadcast frames" },

View File

@@ -10,6 +10,27 @@ static LIST_HEAD(hnae3_ae_algo_list);
static LIST_HEAD(hnae3_client_list); static LIST_HEAD(hnae3_client_list);
static LIST_HEAD(hnae3_ae_dev_list); static LIST_HEAD(hnae3_ae_dev_list);
void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo)
{
const struct pci_device_id *pci_id;
struct hnae3_ae_dev *ae_dev;
if (!ae_algo)
return;
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
continue;
pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
if (!pci_id)
continue;
if (IS_ENABLED(CONFIG_PCI_IOV))
pci_disable_sriov(ae_dev->pdev);
}
}
EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
/* we are keeping things simple and using single lock for all the /* we are keeping things simple and using single lock for all the
* list. This is a non-critical code so other updations, if happen * list. This is a non-critical code so other updations, if happen
* in parallel, can wait. * in parallel, can wait.

View File

@@ -754,6 +754,7 @@ struct hnae3_handle {
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo);
void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo); void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);
void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo); void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);

View File

@@ -1283,7 +1283,6 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
static int hns3_skb_linearize(struct hns3_enet_ring *ring, static int hns3_skb_linearize(struct hns3_enet_ring *ring,
struct sk_buff *skb, struct sk_buff *skb,
u8 max_non_tso_bd_num,
unsigned int bd_num) unsigned int bd_num)
{ {
/* 'bd_num == UINT_MAX' means the skb' fraglist has a /* 'bd_num == UINT_MAX' means the skb' fraglist has a
@@ -1300,8 +1299,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
* will not help. * will not help.
*/ */
if (skb->len > HNS3_MAX_TSO_SIZE || if (skb->len > HNS3_MAX_TSO_SIZE ||
(!skb_is_gso(skb) && skb->len > (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) {
u64_stats_update_begin(&ring->syncp); u64_stats_update_begin(&ring->syncp);
ring->stats.hw_limitation++; ring->stats.hw_limitation++;
u64_stats_update_end(&ring->syncp); u64_stats_update_end(&ring->syncp);
@@ -1336,8 +1334,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
goto out; goto out;
} }
if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num, if (hns3_skb_linearize(ring, skb, bd_num))
bd_num))
return -ENOMEM; return -ENOMEM;
bd_num = hns3_tx_bd_count(skb->len); bd_num = hns3_tx_bd_count(skb->len);
@@ -2424,6 +2421,7 @@ static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
{ {
hns3_unmap_buffer(ring, &ring->desc_cb[i]); hns3_unmap_buffer(ring, &ring->desc_cb[i]);
ring->desc[i].addr = 0; ring->desc[i].addr = 0;
ring->desc_cb[i].refill = 0;
} }
static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i, static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i,
@@ -2501,6 +2499,7 @@ static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)
return ret; return ret;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
ring->desc_cb[i].refill = 1;
return 0; return 0;
} }
@@ -2531,12 +2530,14 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
hns3_unmap_buffer(ring, &ring->desc_cb[i]); hns3_unmap_buffer(ring, &ring->desc_cb[i]);
ring->desc_cb[i] = *res_cb; ring->desc_cb[i] = *res_cb;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
ring->desc_cb[i].refill = 1;
ring->desc[i].rx.bd_base_info = 0; ring->desc[i].rx.bd_base_info = 0;
} }
static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
{ {
ring->desc_cb[i].reuse_flag = 0; ring->desc_cb[i].reuse_flag = 0;
ring->desc_cb[i].refill = 1;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
ring->desc_cb[i].page_offset); ring->desc_cb[i].page_offset);
ring->desc[i].rx.bd_base_info = 0; ring->desc[i].rx.bd_base_info = 0;
@@ -2634,10 +2635,14 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring)
int ntc = ring->next_to_clean; int ntc = ring->next_to_clean;
int ntu = ring->next_to_use; int ntu = ring->next_to_use;
if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill))
return ring->desc_num;
return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
} }
static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, /* Return true if there is any allocation failure */
static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
int cleand_count) int cleand_count)
{ {
struct hns3_desc_cb *desc_cb; struct hns3_desc_cb *desc_cb;
@@ -2662,7 +2667,10 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
hns3_rl_err(ring_to_netdev(ring), hns3_rl_err(ring_to_netdev(ring),
"alloc rx buffer failed: %d\n", "alloc rx buffer failed: %d\n",
ret); ret);
break;
writel(i, ring->tqp->io_base +
HNS3_RING_RX_RING_HEAD_REG);
return true;
} }
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
@@ -2675,6 +2683,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
} }
writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
return false;
} }
static bool hns3_page_is_reusable(struct page *page) static bool hns3_page_is_reusable(struct page *page)
@@ -2905,6 +2914,7 @@ static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)
{ {
ring->desc[ring->next_to_clean].rx.bd_base_info &= ring->desc[ring->next_to_clean].rx.bd_base_info &=
cpu_to_le32(~BIT(HNS3_RXD_VLD_B)); cpu_to_le32(~BIT(HNS3_RXD_VLD_B));
ring->desc_cb[ring->next_to_clean].refill = 0;
ring->next_to_clean += 1; ring->next_to_clean += 1;
if (unlikely(ring->next_to_clean == ring->desc_num)) if (unlikely(ring->next_to_clean == ring->desc_num))
@@ -3218,6 +3228,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
{ {
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
int unused_count = hns3_desc_unused(ring); int unused_count = hns3_desc_unused(ring);
bool failure = false;
int recv_pkts = 0; int recv_pkts = 0;
int err; int err;
@@ -3226,9 +3237,9 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
while (recv_pkts < budget) { while (recv_pkts < budget) {
/* Reuse or realloc buffers */ /* Reuse or realloc buffers */
if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
hns3_nic_alloc_rx_buffers(ring, unused_count); failure = failure ||
unused_count = hns3_desc_unused(ring) - hns3_nic_alloc_rx_buffers(ring, unused_count);
ring->pending_buf; unused_count = 0;
} }
/* Poll one pkt */ /* Poll one pkt */
@@ -3247,11 +3258,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
} }
out: out:
/* Make all data has been write before submit */ return failure ? budget : recv_pkts;
if (unused_count > 0)
hns3_nic_alloc_rx_buffers(ring, unused_count);
return recv_pkts;
} }
static bool hns3_get_new_flow_lvl(struct hns3_enet_ring_group *ring_group) static bool hns3_get_new_flow_lvl(struct hns3_enet_ring_group *ring_group)

View File

@@ -170,11 +170,9 @@ enum hns3_nic_state {
#define HNS3_MAX_BD_SIZE 65535 #define HNS3_MAX_BD_SIZE 65535
#define HNS3_MAX_TSO_BD_NUM 63U #define HNS3_MAX_TSO_BD_NUM 63U
#define HNS3_MAX_TSO_SIZE \ #define HNS3_MAX_TSO_SIZE 1048576U
(HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM) #define HNS3_MAX_NON_TSO_SIZE 9728U
#define HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num) \
(HNS3_MAX_BD_SIZE * (max_non_tso_bd_num))
#define HNS3_VECTOR_GL0_OFFSET 0x100 #define HNS3_VECTOR_GL0_OFFSET 0x100
#define HNS3_VECTOR_GL1_OFFSET 0x200 #define HNS3_VECTOR_GL1_OFFSET 0x200
@@ -285,6 +283,7 @@ struct hns3_desc_cb {
u32 length; /* length of the buffer */ u32 length; /* length of the buffer */
u16 reuse_flag; u16 reuse_flag;
u16 refill;
/* desc type, used by the ring user to mark the type of the priv data */ /* desc type, used by the ring user to mark the type of the priv data */
u16 type; u16 type;

View File

@@ -134,6 +134,15 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
*changed = true; *changed = true;
break; break;
case IEEE_8021QAZ_TSA_ETS: case IEEE_8021QAZ_TSA_ETS:
/* The hardware will switch to sp mode if bandwidth is
* 0, so limit ets bandwidth must be greater than 0.
*/
if (!ets->tc_tx_bw[i]) {
dev_err(&hdev->pdev->dev,
"tc%u ets bw cannot be 0\n", i);
return -EINVAL;
}
if (hdev->tm_info.tc_info[i].tc_sch_mode != if (hdev->tm_info.tc_info[i].tc_sch_mode !=
HCLGE_SCH_MODE_DWRR) HCLGE_SCH_MODE_DWRR)
*changed = true; *changed = true;

View File

@@ -11518,6 +11518,7 @@ static int hclge_init(void)
static void hclge_exit(void) static void hclge_exit(void)
{ {
hnae3_unregister_ae_algo_prepare(&ae_algo);
hnae3_unregister_ae_algo(&ae_algo); hnae3_unregister_ae_algo(&ae_algo);
destroy_workqueue(hclge_wq); destroy_workqueue(hclge_wq);
} }

View File

@@ -671,6 +671,8 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map; hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
for (k = 0; k < hdev->tm_info.num_tc; k++) for (k = 0; k < hdev->tm_info.num_tc; k++)
hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT; hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
for (; k < HNAE3_MAX_TC; k++)
hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
} }
} }

View File

@@ -2160,9 +2160,9 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
hdev->reset_attempts = 0; hdev->reset_attempts = 0;
hdev->last_reset_time = jiffies; hdev->last_reset_time = jiffies;
while ((hdev->reset_type = hdev->reset_type =
hclgevf_get_reset_level(hdev, &hdev->reset_pending)) hclgevf_get_reset_level(hdev, &hdev->reset_pending);
!= HNAE3_NONE_RESET) if (hdev->reset_type != HNAE3_NONE_RESET)
hclgevf_reset(hdev); hclgevf_reset(hdev);
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
&hdev->reset_state)) { &hdev->reset_state)) {

View File

@@ -113,7 +113,8 @@ enum e1000_boards {
board_pch2lan, board_pch2lan,
board_pch_lpt, board_pch_lpt,
board_pch_spt, board_pch_spt,
board_pch_cnp board_pch_cnp,
board_pch_tgp
}; };
struct e1000_ps_page { struct e1000_ps_page {
@@ -499,6 +500,7 @@ extern const struct e1000_info e1000_pch2_info;
extern const struct e1000_info e1000_pch_lpt_info; extern const struct e1000_info e1000_pch_lpt_info;
extern const struct e1000_info e1000_pch_spt_info; extern const struct e1000_info e1000_pch_spt_info;
extern const struct e1000_info e1000_pch_cnp_info; extern const struct e1000_info e1000_pch_cnp_info;
extern const struct e1000_info e1000_pch_tgp_info;
extern const struct e1000_info e1000_es2_info; extern const struct e1000_info e1000_es2_info;
void e1000e_ptp_init(struct e1000_adapter *adapter); void e1000e_ptp_init(struct e1000_adapter *adapter);

View File

@@ -4811,7 +4811,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
{ {
struct e1000_mac_info *mac = &hw->mac; struct e1000_mac_info *mac = &hw->mac;
u32 ctrl_ext, txdctl, snoop; u32 ctrl_ext, txdctl, snoop, fflt_dbg;
s32 ret_val; s32 ret_val;
u16 i; u16 i;
@@ -4870,6 +4870,15 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
snoop = (u32)~(PCIE_NO_SNOOP_ALL); snoop = (u32)~(PCIE_NO_SNOOP_ALL);
e1000e_set_pcie_no_snoop(hw, snoop); e1000e_set_pcie_no_snoop(hw, snoop);
/* Enable workaround for packet loss issue on TGP PCH
* Do not gate DMA clock from the modPHY block
*/
if (mac->type >= e1000_pch_tgp) {
fflt_dbg = er32(FFLT_DBG);
fflt_dbg |= E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK;
ew32(FFLT_DBG, fflt_dbg);
}
ctrl_ext = er32(CTRL_EXT); ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_RO_DIS; ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
ew32(CTRL_EXT, ctrl_ext); ew32(CTRL_EXT, ctrl_ext);
@@ -5990,3 +5999,23 @@ const struct e1000_info e1000_pch_cnp_info = {
.phy_ops = &ich8_phy_ops, .phy_ops = &ich8_phy_ops,
.nvm_ops = &spt_nvm_ops, .nvm_ops = &spt_nvm_ops,
}; };
const struct e1000_info e1000_pch_tgp_info = {
.mac = e1000_pch_tgp,
.flags = FLAG_IS_ICH
| FLAG_HAS_WOL
| FLAG_HAS_HW_TIMESTAMP
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_HAS_AMT
| FLAG_HAS_FLASH
| FLAG_HAS_JUMBO_FRAMES
| FLAG_APME_IN_WUC,
.flags2 = FLAG2_HAS_PHY_STATS
| FLAG2_HAS_EEE,
.pba = 26,
.max_hw_frame_size = 9022,
.get_variants = e1000_get_variants_ich8lan,
.mac_ops = &ich8_mac_ops,
.phy_ops = &ich8_phy_ops,
.nvm_ops = &spt_nvm_ops,
};

View File

@@ -286,6 +286,9 @@
/* Proprietary Latency Tolerance Reporting PCI Capability */ /* Proprietary Latency Tolerance Reporting PCI Capability */
#define E1000_PCI_LTR_CAP_LPT 0xA8 #define E1000_PCI_LTR_CAP_LPT 0xA8
/* Don't gate wake DMA clock */
#define E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK 0x1000
void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw); void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
bool state); bool state);

View File

@@ -50,6 +50,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_pch_lpt] = &e1000_pch_lpt_info, [board_pch_lpt] = &e1000_pch_lpt_info,
[board_pch_spt] = &e1000_pch_spt_info, [board_pch_spt] = &e1000_pch_spt_info,
[board_pch_cnp] = &e1000_pch_cnp_info, [board_pch_cnp] = &e1000_pch_cnp_info,
[board_pch_tgp] = &e1000_pch_tgp_info,
}; };
struct e1000_reg_info { struct e1000_reg_info {
@@ -7837,20 +7838,20 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_tgp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
}; };

View File

@@ -24,6 +24,8 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E810C_BACKPLANE: case ICE_DEV_ID_E810C_BACKPLANE:
case ICE_DEV_ID_E810C_QSFP: case ICE_DEV_ID_E810C_QSFP:
case ICE_DEV_ID_E810C_SFP: case ICE_DEV_ID_E810C_SFP:
case ICE_DEV_ID_E810_XXV_BACKPLANE:
case ICE_DEV_ID_E810_XXV_QSFP:
case ICE_DEV_ID_E810_XXV_SFP: case ICE_DEV_ID_E810_XXV_SFP:
hw->mac_type = ICE_MAC_E810; hw->mac_type = ICE_MAC_E810;
break; break;

View File

@@ -21,6 +21,10 @@
#define ICE_DEV_ID_E810C_QSFP 0x1592 #define ICE_DEV_ID_E810C_QSFP 0x1592
/* Intel(R) Ethernet Controller E810-C for SFP */ /* Intel(R) Ethernet Controller E810-C for SFP */
#define ICE_DEV_ID_E810C_SFP 0x1593 #define ICE_DEV_ID_E810C_SFP 0x1593
/* Intel(R) Ethernet Controller E810-XXV for backplane */
#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
/* Intel(R) Ethernet Controller E810-XXV for QSFP */
#define ICE_DEV_ID_E810_XXV_QSFP 0x159A
/* Intel(R) Ethernet Controller E810-XXV for SFP */ /* Intel(R) Ethernet Controller E810-XXV for SFP */
#define ICE_DEV_ID_E810_XXV_SFP 0x159B #define ICE_DEV_ID_E810_XXV_SFP 0x159B
/* Intel(R) Ethernet Connection E823-C for backplane */ /* Intel(R) Ethernet Connection E823-C for backplane */

View File

@@ -1669,7 +1669,7 @@ static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
if (hw->tnl.tbl[i].valid && if (hw->tnl.tbl[i].valid &&
hw->tnl.tbl[i].type == type && hw->tnl.tbl[i].type == type &&
idx--) idx-- == 0)
return i; return i;
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
@@ -1829,7 +1829,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
u16 index; u16 index;
tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE; tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
index = ice_tunnel_idx_to_entry(&pf->hw, idx, tnl_type); index = ice_tunnel_idx_to_entry(&pf->hw, tnl_type, idx);
status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port)); status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
if (status) { if (status) {

View File

@@ -4773,6 +4773,8 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },

View File

@@ -71,6 +71,7 @@ err_remove_config_dt:
static const struct of_device_id dwmac_generic_match[] = { static const struct of_device_id dwmac_generic_match[] = {
{ .compatible = "st,spear600-gmac"}, { .compatible = "st,spear600-gmac"},
{ .compatible = "snps,dwmac-3.40a"},
{ .compatible = "snps,dwmac-3.50a"}, { .compatible = "snps,dwmac-3.50a"},
{ .compatible = "snps,dwmac-3.610"}, { .compatible = "snps,dwmac-3.610"},
{ .compatible = "snps,dwmac-3.70a"}, { .compatible = "snps,dwmac-3.70a"},

View File

@@ -605,7 +605,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA; ptp_v2 = PTP_TCR_TSVER2ENA;
snap_type_sel = PTP_TCR_SNAPTYPSEL_1; snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
if (priv->synopsys_id != DWMAC_CORE_5_10) if (priv->synopsys_id < DWMAC_CORE_4_10)
ts_event_en = PTP_TCR_TSEVNTENA; ts_event_en = PTP_TCR_TSEVNTENA;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;

View File

@@ -508,6 +508,14 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
plat->pmt = 1; plat->pmt = 1;
} }
if (of_device_is_compatible(np, "snps,dwmac-3.40a")) {
plat->has_gmac = 1;
plat->enh_desc = 1;
plat->tx_coe = 1;
plat->bugged_jumbo = 1;
plat->pmt = 1;
}
if (of_device_is_compatible(np, "snps,dwmac-4.00") || if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
of_device_is_compatible(np, "snps,dwmac-4.10a") || of_device_is_compatible(np, "snps,dwmac-4.10a") ||
of_device_is_compatible(np, "snps,dwmac-4.20a") || of_device_is_compatible(np, "snps,dwmac-4.20a") ||

View File

@@ -544,6 +544,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
err = device_register(&bus->dev); err = device_register(&bus->dev);
if (err) { if (err) {
pr_err("mii_bus %s failed to register\n", bus->id); pr_err("mii_bus %s failed to register\n", bus->id);
put_device(&bus->dev);
return -EINVAL; return -EINVAL;
} }

View File

@@ -117,6 +117,7 @@ config USB_LAN78XX
select PHYLIB select PHYLIB
select MICROCHIP_PHY select MICROCHIP_PHY
select FIXED_PHY select FIXED_PHY
select CRC32
help help
This option adds support for Microchip LAN78XX based USB 2 This option adds support for Microchip LAN78XX based USB 2
& USB 3 10/100/1000 Ethernet adapters. & USB 3 10/100/1000 Ethernet adapters.

View File

@@ -109,14 +109,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev, struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
hotplug_slot); hotplug_slot);
switch (zdev->state) { *value = zpci_is_device_configured(zdev) ? 1 : 0;
case ZPCI_FN_STATE_STANDBY:
*value = 0;
break;
default:
*value = 1;
break;
}
return 0; return 0;
} }

View File

@@ -1645,8 +1645,8 @@ int __maybe_unused stm32_pinctrl_resume(struct device *dev)
struct stm32_pinctrl_group *g = pctl->groups; struct stm32_pinctrl_group *g = pctl->groups;
int i; int i;
for (i = g->pin; i < g->pin + pctl->ngroups; i++) for (i = 0; i < pctl->ngroups; i++, g++)
stm32_pinctrl_restore_gpio_regs(pctl, i); stm32_pinctrl_restore_gpio_regs(pctl, g->pin);
return 0; return 0;
} }

View File

@@ -247,7 +247,7 @@ static inline int busy_loop(struct intel_scu_ipc_dev *scu)
return -ETIMEDOUT; return -ETIMEDOUT;
} }
/* Wait till ipc ioc interrupt is received or timeout in 3 HZ */ /* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu) static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
{ {
int status; int status;

View File

@@ -220,7 +220,8 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
goto fail; goto fail;
} }
shost->cmd_per_lun = min_t(short, shost->cmd_per_lun, /* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
shost->can_queue); shost->can_queue);
error = scsi_init_sense_cache(shost); error = scsi_init_sense_cache(shost);

View File

@@ -414,7 +414,7 @@ done_unmap_sg:
goto done_free_fcport; goto done_free_fcport;
done_free_fcport: done_free_fcport:
if (bsg_request->msgcode == FC_BSG_RPT_ELS) if (bsg_request->msgcode != FC_BSG_RPT_ELS)
qla2x00_free_fcport(fcport); qla2x00_free_fcport(fcport);
done: done:
return rval; return rval;

View File

@@ -2907,8 +2907,6 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
session->recovery_tmo = value; session->recovery_tmo = value;
break; break;
default: default:
err = transport->set_param(conn, ev->u.set_param.param,
data, ev->u.set_param.len);
if ((conn->state == ISCSI_CONN_BOUND) || if ((conn->state == ISCSI_CONN_BOUND) ||
(conn->state == ISCSI_CONN_UP)) { (conn->state == ISCSI_CONN_UP)) {
err = transport->set_param(conn, ev->u.set_param.param, err = transport->set_param(conn, ev->u.set_param.param,

View File

@@ -894,9 +894,11 @@ out:
} }
/* /*
* helper function to see if a given name and sequence number found * See if a given name and sequence number found in an inode back reference are
* in an inode back reference are already in a directory and correctly * already in a directory and correctly point to this inode.
* point to this inode *
* Returns: < 0 on error, 0 if the directory entry does not exists and 1 if it
* exists.
*/ */
static noinline int inode_in_dir(struct btrfs_root *root, static noinline int inode_in_dir(struct btrfs_root *root,
struct btrfs_path *path, struct btrfs_path *path,
@@ -905,29 +907,35 @@ static noinline int inode_in_dir(struct btrfs_root *root,
{ {
struct btrfs_dir_item *di; struct btrfs_dir_item *di;
struct btrfs_key location; struct btrfs_key location;
int match = 0; int ret = 0;
di = btrfs_lookup_dir_index_item(NULL, root, path, dirid, di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
index, name, name_len, 0); index, name, name_len, 0);
if (di && !IS_ERR(di)) { if (IS_ERR(di)) {
if (PTR_ERR(di) != -ENOENT)
ret = PTR_ERR(di);
goto out;
} else if (di) {
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
if (location.objectid != objectid) if (location.objectid != objectid)
goto out; goto out;
} else } else {
goto out; goto out;
btrfs_release_path(path); }
btrfs_release_path(path);
di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0); di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
if (di && !IS_ERR(di)) { if (IS_ERR(di)) {
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); ret = PTR_ERR(di);
if (location.objectid != objectid)
goto out;
} else
goto out; goto out;
match = 1; } else if (di) {
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
if (location.objectid == objectid)
ret = 1;
}
out: out:
btrfs_release_path(path); btrfs_release_path(path);
return match; return ret;
} }
/* /*
@@ -1477,10 +1485,12 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
if (ret) if (ret)
goto out; goto out;
/* if we already have a perfect match, we're done */ ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)), btrfs_ino(BTRFS_I(inode)), ref_index,
btrfs_ino(BTRFS_I(inode)), ref_index, name, namelen);
name, namelen)) { if (ret < 0) {
goto out;
} else if (ret == 0) {
/* /*
* look for a conflicting back reference in the * look for a conflicting back reference in the
* metadata. if we find one we have to unlink that name * metadata. if we find one we have to unlink that name
@@ -1538,6 +1548,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
btrfs_update_inode(trans, root, inode); btrfs_update_inode(trans, root, inode);
} }
/* Else, ret == 1, we already have a perfect match, we're done. */
ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen; ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
kfree(name); kfree(name);

View File

@@ -2334,7 +2334,6 @@ static int unsafe_request_wait(struct inode *inode)
int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync) int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{ {
struct ceph_file_info *fi = file->private_data;
struct inode *inode = file->f_mapping->host; struct inode *inode = file->f_mapping->host;
struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_inode_info *ci = ceph_inode(inode);
u64 flush_tid; u64 flush_tid;
@@ -2369,14 +2368,9 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
if (err < 0) if (err < 0)
ret = err; ret = err;
if (errseq_check(&ci->i_meta_err, READ_ONCE(fi->meta_err))) { err = file_check_and_advance_wb_err(file);
spin_lock(&file->f_lock); if (err < 0)
err = errseq_check_and_advance(&ci->i_meta_err, ret = err;
&fi->meta_err);
spin_unlock(&file->f_lock);
if (err < 0)
ret = err;
}
out: out:
dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret); dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
return ret; return ret;

View File

@@ -233,7 +233,6 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
spin_lock_init(&fi->rw_contexts_lock); spin_lock_init(&fi->rw_contexts_lock);
INIT_LIST_HEAD(&fi->rw_contexts); INIT_LIST_HEAD(&fi->rw_contexts);
fi->meta_err = errseq_sample(&ci->i_meta_err);
fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen); fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
return 0; return 0;

View File

@@ -529,8 +529,6 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
ceph_fscache_inode_init(ci); ceph_fscache_inode_init(ci);
ci->i_meta_err = 0;
return &ci->vfs_inode; return &ci->vfs_inode;
} }

View File

@@ -1481,7 +1481,6 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
{ {
struct ceph_mds_request *req; struct ceph_mds_request *req;
struct rb_node *p; struct rb_node *p;
struct ceph_inode_info *ci;
dout("cleanup_session_requests mds%d\n", session->s_mds); dout("cleanup_session_requests mds%d\n", session->s_mds);
mutex_lock(&mdsc->mutex); mutex_lock(&mdsc->mutex);
@@ -1490,16 +1489,10 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
struct ceph_mds_request, r_unsafe_item); struct ceph_mds_request, r_unsafe_item);
pr_warn_ratelimited(" dropping unsafe request %llu\n", pr_warn_ratelimited(" dropping unsafe request %llu\n",
req->r_tid); req->r_tid);
if (req->r_target_inode) { if (req->r_target_inode)
/* dropping unsafe change of inode's attributes */ mapping_set_error(req->r_target_inode->i_mapping, -EIO);
ci = ceph_inode(req->r_target_inode); if (req->r_unsafe_dir)
errseq_set(&ci->i_meta_err, -EIO); mapping_set_error(req->r_unsafe_dir->i_mapping, -EIO);
}
if (req->r_unsafe_dir) {
/* dropping unsafe directory operation */
ci = ceph_inode(req->r_unsafe_dir);
errseq_set(&ci->i_meta_err, -EIO);
}
__unregister_request(mdsc, req); __unregister_request(mdsc, req);
} }
/* zero r_attempts, so kick_requests() will re-send requests */ /* zero r_attempts, so kick_requests() will re-send requests */
@@ -1668,7 +1661,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
spin_unlock(&mdsc->cap_dirty_lock); spin_unlock(&mdsc->cap_dirty_lock);
if (dirty_dropped) { if (dirty_dropped) {
errseq_set(&ci->i_meta_err, -EIO); mapping_set_error(inode->i_mapping, -EIO);
if (ci->i_wrbuffer_ref_head == 0 && if (ci->i_wrbuffer_ref_head == 0 &&
ci->i_wr_ref == 0 && ci->i_wr_ref == 0 &&

View File

@@ -997,16 +997,16 @@ static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
struct ceph_fs_client *new = fc->s_fs_info; struct ceph_fs_client *new = fc->s_fs_info;
struct ceph_mount_options *fsopt = new->mount_options; struct ceph_mount_options *fsopt = new->mount_options;
struct ceph_options *opt = new->client->options; struct ceph_options *opt = new->client->options;
struct ceph_fs_client *other = ceph_sb_to_client(sb); struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
dout("ceph_compare_super %p\n", sb); dout("ceph_compare_super %p\n", sb);
if (compare_mount_options(fsopt, opt, other)) { if (compare_mount_options(fsopt, opt, fsc)) {
dout("monitor(s)/mount options don't match\n"); dout("monitor(s)/mount options don't match\n");
return 0; return 0;
} }
if ((opt->flags & CEPH_OPT_FSID) && if ((opt->flags & CEPH_OPT_FSID) &&
ceph_fsid_compare(&opt->fsid, &other->client->fsid)) { ceph_fsid_compare(&opt->fsid, &fsc->client->fsid)) {
dout("fsid doesn't match\n"); dout("fsid doesn't match\n");
return 0; return 0;
} }
@@ -1014,6 +1014,17 @@ static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
dout("flags differ\n"); dout("flags differ\n");
return 0; return 0;
} }
if (fsc->blocklisted && !ceph_test_mount_opt(fsc, CLEANRECOVER)) {
dout("client is blocklisted (and CLEANRECOVER is not set)\n");
return 0;
}
if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) {
dout("client has been forcibly unmounted\n");
return 0;
}
return 1; return 1;
} }

View File

@@ -430,8 +430,6 @@ struct ceph_inode_info {
struct fscache_cookie *fscache; struct fscache_cookie *fscache;
u32 i_fscache_gen; u32 i_fscache_gen;
#endif #endif
errseq_t i_meta_err;
struct inode vfs_inode; /* at end */ struct inode vfs_inode; /* at end */
}; };
@@ -773,7 +771,6 @@ struct ceph_file_info {
spinlock_t rw_contexts_lock; spinlock_t rw_contexts_lock;
struct list_head rw_contexts; struct list_head rw_contexts;
errseq_t meta_err;
u32 filp_gen; u32 filp_gen;
atomic_t num_locks; atomic_t num_locks;
}; };

View File

@@ -5559,7 +5559,7 @@ static int io_timeout_remove_prep(struct io_kiocb *req,
return -EINVAL; return -EINVAL;
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
return -EINVAL; return -EINVAL;
if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->timeout_flags | if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->timeout_flags ||
sqe->splice_fd_in) sqe->splice_fd_in)
return -EINVAL; return -EINVAL;

View File

@@ -178,7 +178,7 @@ int kernel_read_file_from_fd(int fd, loff_t offset, void **buf,
struct fd f = fdget(fd); struct fd f = fdget(fd);
int ret = -EBADF; int ret = -EBADF;
if (!f.file) if (!f.file || !(f.file->f_mode & FMODE_READ))
goto out; goto out;
ret = kernel_read_file(f.file, offset, buf, buf_size, file_size, id); ret = kernel_read_file(f.file, offset, buf, buf_size, file_size, id);

View File

@@ -792,7 +792,10 @@ out_close:
svc_xprt_put(xprt); svc_xprt_put(xprt);
} }
out_err: out_err:
nfsd_destroy(net); if (!list_empty(&nn->nfsd_serv->sv_permsocks))
nn->nfsd_serv->sv_nrthreads--;
else
nfsd_destroy(net);
return err; return err;
} }

View File

@@ -7047,7 +7047,7 @@ void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di)
int ocfs2_convert_inline_data_to_extents(struct inode *inode, int ocfs2_convert_inline_data_to_extents(struct inode *inode,
struct buffer_head *di_bh) struct buffer_head *di_bh)
{ {
int ret, i, has_data, num_pages = 0; int ret, has_data, num_pages = 0;
int need_free = 0; int need_free = 0;
u32 bit_off, num; u32 bit_off, num;
handle_t *handle; handle_t *handle;
@@ -7056,26 +7056,17 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_alloc_context *data_ac = NULL; struct ocfs2_alloc_context *data_ac = NULL;
struct page **pages = NULL; struct page *page = NULL;
loff_t end = osb->s_clustersize;
struct ocfs2_extent_tree et; struct ocfs2_extent_tree et;
int did_quota = 0; int did_quota = 0;
has_data = i_size_read(inode) ? 1 : 0; has_data = i_size_read(inode) ? 1 : 0;
if (has_data) { if (has_data) {
pages = kcalloc(ocfs2_pages_per_cluster(osb->sb),
sizeof(struct page *), GFP_NOFS);
if (pages == NULL) {
ret = -ENOMEM;
mlog_errno(ret);
return ret;
}
ret = ocfs2_reserve_clusters(osb, 1, &data_ac); ret = ocfs2_reserve_clusters(osb, 1, &data_ac);
if (ret) { if (ret) {
mlog_errno(ret); mlog_errno(ret);
goto free_pages; goto out;
} }
} }
@@ -7095,7 +7086,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
} }
if (has_data) { if (has_data) {
unsigned int page_end; unsigned int page_end = min_t(unsigned, PAGE_SIZE,
osb->s_clustersize);
u64 phys; u64 phys;
ret = dquot_alloc_space_nodirty(inode, ret = dquot_alloc_space_nodirty(inode,
@@ -7119,15 +7111,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
*/ */
block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off); block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
/* ret = ocfs2_grab_eof_pages(inode, 0, page_end, &page,
* Non sparse file systems zero on extend, so no need &num_pages);
* to do that now.
*/
if (!ocfs2_sparse_alloc(osb) &&
PAGE_SIZE < osb->s_clustersize)
end = PAGE_SIZE;
ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages);
if (ret) { if (ret) {
mlog_errno(ret); mlog_errno(ret);
need_free = 1; need_free = 1;
@@ -7138,20 +7123,15 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
* This should populate the 1st page for us and mark * This should populate the 1st page for us and mark
* it up to date. * it up to date.
*/ */
ret = ocfs2_read_inline_data(inode, pages[0], di_bh); ret = ocfs2_read_inline_data(inode, page, di_bh);
if (ret) { if (ret) {
mlog_errno(ret); mlog_errno(ret);
need_free = 1; need_free = 1;
goto out_unlock; goto out_unlock;
} }
page_end = PAGE_SIZE; ocfs2_map_and_dirty_page(inode, handle, 0, page_end, page, 0,
if (PAGE_SIZE > osb->s_clustersize) &phys);
page_end = osb->s_clustersize;
for (i = 0; i < num_pages; i++)
ocfs2_map_and_dirty_page(inode, handle, 0, page_end,
pages[i], i > 0, &phys);
} }
spin_lock(&oi->ip_lock); spin_lock(&oi->ip_lock);
@@ -7182,8 +7162,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
} }
out_unlock: out_unlock:
if (pages) if (page)
ocfs2_unlock_and_free_pages(pages, num_pages); ocfs2_unlock_and_free_pages(&page, num_pages);
out_commit: out_commit:
if (ret < 0 && did_quota) if (ret < 0 && did_quota)
@@ -7207,8 +7187,6 @@ out_commit:
out: out:
if (data_ac) if (data_ac)
ocfs2_free_alloc_context(data_ac); ocfs2_free_alloc_context(data_ac);
free_pages:
kfree(pages);
return ret; return ret;
} }

View File

@@ -2171,11 +2171,17 @@ static int ocfs2_initialize_super(struct super_block *sb,
} }
if (ocfs2_clusterinfo_valid(osb)) { if (ocfs2_clusterinfo_valid(osb)) {
/*
* ci_stack and ci_cluster in ocfs2_cluster_info may not be null
* terminated, so make sure no overflow happens here by using
* memcpy. Destination strings will always be null terminated
* because osb is allocated using kzalloc.
*/
osb->osb_stackflags = osb->osb_stackflags =
OCFS2_RAW_SB(di)->s_cluster_info.ci_stackflags; OCFS2_RAW_SB(di)->s_cluster_info.ci_stackflags;
strlcpy(osb->osb_cluster_stack, memcpy(osb->osb_cluster_stack,
OCFS2_RAW_SB(di)->s_cluster_info.ci_stack, OCFS2_RAW_SB(di)->s_cluster_info.ci_stack,
OCFS2_STACK_LABEL_LEN + 1); OCFS2_STACK_LABEL_LEN);
if (strlen(osb->osb_cluster_stack) != OCFS2_STACK_LABEL_LEN) { if (strlen(osb->osb_cluster_stack) != OCFS2_STACK_LABEL_LEN) {
mlog(ML_ERROR, mlog(ML_ERROR,
"couldn't mount because of an invalid " "couldn't mount because of an invalid "
@@ -2184,9 +2190,9 @@ static int ocfs2_initialize_super(struct super_block *sb,
status = -EINVAL; status = -EINVAL;
goto bail; goto bail;
} }
strlcpy(osb->osb_cluster_name, memcpy(osb->osb_cluster_name,
OCFS2_RAW_SB(di)->s_cluster_info.ci_cluster, OCFS2_RAW_SB(di)->s_cluster_info.ci_cluster,
OCFS2_CLUSTER_NAME_LEN + 1); OCFS2_CLUSTER_NAME_LEN);
} else { } else {
/* The empty string is identical with classic tools that /* The empty string is identical with classic tools that
* don't know about s_cluster_info. */ * don't know about s_cluster_info. */

View File

@@ -1794,9 +1794,15 @@ static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
if (mode_wp && mode_dontwake) if (mode_wp && mode_dontwake)
return -EINVAL; return -EINVAL;
ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start, if (mmget_not_zero(ctx->mm)) {
uffdio_wp.range.len, mode_wp, ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
&ctx->mmap_changing); uffdio_wp.range.len, mode_wp,
&ctx->mmap_changing);
mmput(ctx->mm);
} else {
return -ESRCH;
}
if (ret) if (ret)
return ret; return ret;

View File

@@ -104,7 +104,7 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg
#endif #endif
} }
#if defined(CONFIG_UM) || defined(CONFIG_IA64) #if (defined(CONFIG_UML) && defined(CONFIG_X86_32)) || defined(CONFIG_IA64)
/* /*
* These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
* extra segments containing the gate DSO contents. Dumping its * extra segments containing the gate DSO contents. Dumping its

View File

@@ -225,6 +225,7 @@ struct hda_codec {
#endif #endif
/* misc flags */ /* misc flags */
unsigned int configured:1; /* codec was configured */
unsigned int in_freeing:1; /* being released */ unsigned int in_freeing:1; /* being released */
unsigned int registered:1; /* codec was registered */ unsigned int registered:1; /* codec was registered */
unsigned int display_power_control:1; /* needs display power */ unsigned int display_power_control:1; /* needs display power */

View File

@@ -653,7 +653,7 @@ static int audit_filter_rules(struct task_struct *tsk,
result = audit_comparator(audit_loginuid_set(tsk), f->op, f->val); result = audit_comparator(audit_loginuid_set(tsk), f->op, f->val);
break; break;
case AUDIT_SADDR_FAM: case AUDIT_SADDR_FAM:
if (ctx->sockaddr) if (ctx && ctx->sockaddr)
result = audit_comparator(ctx->sockaddr->ss_family, result = audit_comparator(ctx->sockaddr->ss_family,
f->op, f->val); f->op, f->val);
break; break;

View File

@@ -1300,6 +1300,12 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
if (unlikely(dma_debug_disabled())) if (unlikely(dma_debug_disabled()))
return; return;
for_each_sg(sg, s, nents, i) {
check_for_stack(dev, sg_page(s), s->offset);
if (!PageHighMem(sg_page(s)))
check_for_illegal_area(dev, sg_virt(s), s->length);
}
for_each_sg(sg, s, mapped_ents, i) { for_each_sg(sg, s, mapped_ents, i) {
entry = dma_entry_alloc(); entry = dma_entry_alloc();
if (!entry) if (!entry)
@@ -1315,12 +1321,6 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
entry->sg_call_ents = nents; entry->sg_call_ents = nents;
entry->sg_mapped_ents = mapped_ents; entry->sg_mapped_ents = mapped_ents;
check_for_stack(dev, sg_page(s), s->offset);
if (!PageHighMem(sg_page(s))) {
check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
}
check_sg_segment(dev, s); check_sg_segment(dev, s);
add_dma_entry(entry); add_dma_entry(entry);

View File

@@ -6677,6 +6677,7 @@ void idle_task_exit(void)
finish_arch_post_lock_switch(); finish_arch_post_lock_switch();
} }
scs_task_reset(current);
/* finish_cpu(), as ran on the BP, will clean up the active_mm state */ /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
} }

View File

@@ -6985,7 +6985,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op; struct ftrace_ops *op;
int bit; int bit;
bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); bit = trace_test_and_set_recursion(TRACE_LIST_START);
if (bit < 0) if (bit < 0)
return; return;
@@ -7060,7 +7060,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
{ {
int bit; int bit;
bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); bit = trace_test_and_set_recursion(TRACE_LIST_START);
if (bit < 0) if (bit < 0)
return; return;

View File

@@ -573,18 +573,6 @@ struct tracer {
* then this function calls... * then this function calls...
* The function callback, which can use the FTRACE bits to * The function callback, which can use the FTRACE bits to
* check for recursion. * check for recursion.
*
* Now if the arch does not support a feature, and it calls
* the global list function which calls the ftrace callback
* all three of these steps will do a recursion protection.
* There's no reason to do one if the previous caller already
* did. The recursion that we are protecting against will
* go through the same steps again.
*
* To prevent the multiple recursion checks, if a recursion
* bit is set that is higher than the MAX bit of the current
* check, then we know that the check was made by the previous
* caller, and we can skip the current check.
*/ */
enum { enum {
/* Function recursion bits */ /* Function recursion bits */
@@ -592,12 +580,14 @@ enum {
TRACE_FTRACE_NMI_BIT, TRACE_FTRACE_NMI_BIT,
TRACE_FTRACE_IRQ_BIT, TRACE_FTRACE_IRQ_BIT,
TRACE_FTRACE_SIRQ_BIT, TRACE_FTRACE_SIRQ_BIT,
TRACE_FTRACE_TRANSITION_BIT,
/* INTERNAL_BITs must be greater than FTRACE_BITs */ /* Internal use recursion bits */
TRACE_INTERNAL_BIT, TRACE_INTERNAL_BIT,
TRACE_INTERNAL_NMI_BIT, TRACE_INTERNAL_NMI_BIT,
TRACE_INTERNAL_IRQ_BIT, TRACE_INTERNAL_IRQ_BIT,
TRACE_INTERNAL_SIRQ_BIT, TRACE_INTERNAL_SIRQ_BIT,
TRACE_INTERNAL_TRANSITION_BIT,
TRACE_BRANCH_BIT, TRACE_BRANCH_BIT,
/* /*
@@ -637,12 +627,6 @@ enum {
* function is called to clear it. * function is called to clear it.
*/ */
TRACE_GRAPH_NOTRACE_BIT, TRACE_GRAPH_NOTRACE_BIT,
/*
* When transitioning between context, the preempt_count() may
* not be correct. Allow for a single recursion to cover this case.
*/
TRACE_TRANSITION_BIT,
}; };
#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
@@ -662,12 +646,18 @@ enum {
#define TRACE_CONTEXT_BITS 4 #define TRACE_CONTEXT_BITS 4
#define TRACE_FTRACE_START TRACE_FTRACE_BIT #define TRACE_FTRACE_START TRACE_FTRACE_BIT
#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
#define TRACE_LIST_START TRACE_INTERNAL_BIT #define TRACE_LIST_START TRACE_INTERNAL_BIT
#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
#define TRACE_CONTEXT_MASK TRACE_LIST_MAX #define TRACE_CONTEXT_MASK ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
enum {
TRACE_CTX_NMI,
TRACE_CTX_IRQ,
TRACE_CTX_SOFTIRQ,
TRACE_CTX_NORMAL,
TRACE_CTX_TRANSITION,
};
static __always_inline int trace_get_context_bit(void) static __always_inline int trace_get_context_bit(void)
{ {
@@ -675,59 +665,48 @@ static __always_inline int trace_get_context_bit(void)
if (in_interrupt()) { if (in_interrupt()) {
if (in_nmi()) if (in_nmi())
bit = 0; bit = TRACE_CTX_NMI;
else if (in_irq()) else if (in_irq())
bit = 1; bit = TRACE_CTX_IRQ;
else else
bit = 2; bit = TRACE_CTX_SOFTIRQ;
} else } else
bit = 3; bit = TRACE_CTX_NORMAL;
return bit; return bit;
} }
static __always_inline int trace_test_and_set_recursion(int start, int max) static __always_inline int trace_test_and_set_recursion(int start)
{ {
unsigned int val = current->trace_recursion; unsigned int val = current->trace_recursion;
int bit; int bit;
/* A previous recursion check was made */
if ((val & TRACE_CONTEXT_MASK) > max)
return 0;
bit = trace_get_context_bit() + start; bit = trace_get_context_bit() + start;
if (unlikely(val & (1 << bit))) { if (unlikely(val & (1 << bit))) {
/* /*
* It could be that preempt_count has not been updated during * It could be that preempt_count has not been updated during
* a switch between contexts. Allow for a single recursion. * a switch between contexts. Allow for a single recursion.
*/ */
bit = TRACE_TRANSITION_BIT; bit = start + TRACE_CTX_TRANSITION;
if (trace_recursion_test(bit)) if (trace_recursion_test(bit))
return -1; return -1;
trace_recursion_set(bit); trace_recursion_set(bit);
barrier(); barrier();
return bit + 1; return bit;
} }
/* Normal check passed, clear the transition to allow it again */
trace_recursion_clear(TRACE_TRANSITION_BIT);
val |= 1 << bit; val |= 1 << bit;
current->trace_recursion = val; current->trace_recursion = val;
barrier(); barrier();
return bit + 1; return bit;
} }
static __always_inline void trace_clear_recursion(int bit) static __always_inline void trace_clear_recursion(int bit)
{ {
unsigned int val = current->trace_recursion; unsigned int val = current->trace_recursion;
if (!bit)
return;
bit--;
bit = 1 << bit; bit = 1 << bit;
val &= ~bit; val &= ~bit;

View File

@@ -144,7 +144,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
pc = preempt_count(); pc = preempt_count();
preempt_disable_notrace(); preempt_disable_notrace();
bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX); bit = trace_test_and_set_recursion(TRACE_FTRACE_START);
if (bit < 0) if (bit < 0)
goto out; goto out;

View File

@@ -1543,7 +1543,8 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
} }
static inline bool slab_free_freelist_hook(struct kmem_cache *s, static inline bool slab_free_freelist_hook(struct kmem_cache *s,
void **head, void **tail) void **head, void **tail,
int *cnt)
{ {
void *object; void *object;
@@ -1578,6 +1579,12 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
*head = object; *head = object;
if (!*tail) if (!*tail)
*tail = object; *tail = object;
} else {
/*
* Adjust the reconstructed freelist depth
* accordingly if object's reuse is delayed.
*/
--(*cnt);
} }
} while (object != old_tail); } while (object != old_tail);
@@ -3093,7 +3100,9 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
struct kmem_cache_cpu *c; struct kmem_cache_cpu *c;
unsigned long tid; unsigned long tid;
memcg_slab_free_hook(s, &head, 1); /* memcg_slab_free_hook() is already called for bulk free. */
if (!tail)
memcg_slab_free_hook(s, &head, 1);
redo: redo:
/* /*
* Determine the currently cpus per cpu slab. * Determine the currently cpus per cpu slab.
@@ -3137,7 +3146,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
* With KASAN enabled slab_free_freelist_hook modifies the freelist * With KASAN enabled slab_free_freelist_hook modifies the freelist
* to remove objects, whose reuse must be delayed. * to remove objects, whose reuse must be delayed.
*/ */
if (slab_free_freelist_hook(s, &head, &tail)) if (slab_free_freelist_hook(s, &head, &tail, &cnt))
do_slab_free(s, page, head, tail, cnt, addr); do_slab_free(s, page, head, tail, cnt, addr);
} }
@@ -3825,8 +3834,8 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
if (alloc_kmem_cache_cpus(s)) if (alloc_kmem_cache_cpus(s))
return 0; return 0;
free_kmem_cache_nodes(s);
error: error:
__kmem_cache_release(s);
return -EINVAL; return -EINVAL;
} }

View File

@@ -481,6 +481,12 @@ static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
__skb->gso_segs = skb_shinfo(skb)->gso_segs; __skb->gso_segs = skb_shinfo(skb)->gso_segs;
} }
static struct proto bpf_dummy_proto = {
.name = "bpf_dummy",
.owner = THIS_MODULE,
.obj_size = sizeof(struct sock),
};
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr) union bpf_attr __user *uattr)
{ {
@@ -525,20 +531,19 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
break; break;
} }
sk = kzalloc(sizeof(struct sock), GFP_USER); sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
if (!sk) { if (!sk) {
kfree(data); kfree(data);
kfree(ctx); kfree(ctx);
return -ENOMEM; return -ENOMEM;
} }
sock_net_set(sk, net);
sock_init_data(NULL, sk); sock_init_data(NULL, sk);
skb = build_skb(data, 0); skb = build_skb(data, 0);
if (!skb) { if (!skb) {
kfree(data); kfree(data);
kfree(ctx); kfree(ctx);
kfree(sk); sk_free(sk);
return -ENOMEM; return -ENOMEM;
} }
skb->sk = sk; skb->sk = sk;
@@ -611,8 +616,7 @@ out:
if (dev && dev != net->loopback_dev) if (dev && dev != net->loopback_dev)
dev_put(dev); dev_put(dev);
kfree_skb(skb); kfree_skb(skb);
bpf_sk_storage_free(sk); sk_free(sk);
kfree(sk);
kfree(ctx); kfree(ctx);
return ret; return ret;
} }

View File

@@ -931,9 +931,7 @@ static inline unsigned long br_multicast_lmqt(const struct net_bridge *br)
static inline unsigned long br_multicast_gmi(const struct net_bridge *br) static inline unsigned long br_multicast_gmi(const struct net_bridge *br)
{ {
/* use the RFC default of 2 for QRV */ return br->multicast_membership_interval;
return 2 * br->multicast_query_interval +
br->multicast_query_response_interval;
} }
#else #else
static inline int br_multicast_rcv(struct net_bridge *br, static inline int br_multicast_rcv(struct net_bridge *br,

View File

@@ -121,7 +121,7 @@ enum {
struct tpcon { struct tpcon {
int idx; int idx;
int len; int len;
u8 state; u32 state;
u8 bs; u8 bs;
u8 sn; u8 sn;
u8 ll_dl; u8 ll_dl;
@@ -846,6 +846,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
{ {
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
struct isotp_sock *so = isotp_sk(sk); struct isotp_sock *so = isotp_sk(sk);
u32 old_state = so->tx.state;
struct sk_buff *skb; struct sk_buff *skb;
struct net_device *dev; struct net_device *dev;
struct canfd_frame *cf; struct canfd_frame *cf;
@@ -858,37 +859,45 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
/* we do not support multiple buffers - for now */ /* we do not support multiple buffers - for now */
if (so->tx.state != ISOTP_IDLE || wq_has_sleeper(&so->wait)) { if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE ||
if (msg->msg_flags & MSG_DONTWAIT) wq_has_sleeper(&so->wait)) {
return -EAGAIN; if (msg->msg_flags & MSG_DONTWAIT) {
err = -EAGAIN;
goto err_out;
}
/* wait for complete transmission of current pdu */ /* wait for complete transmission of current pdu */
wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE); err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
if (err)
goto err_out;
} }
if (!size || size > MAX_MSG_LENGTH) if (!size || size > MAX_MSG_LENGTH) {
return -EINVAL; err = -EINVAL;
goto err_out;
}
err = memcpy_from_msg(so->tx.buf, msg, size); err = memcpy_from_msg(so->tx.buf, msg, size);
if (err < 0) if (err < 0)
return err; goto err_out;
dev = dev_get_by_index(sock_net(sk), so->ifindex); dev = dev_get_by_index(sock_net(sk), so->ifindex);
if (!dev) if (!dev) {
return -ENXIO; err = -ENXIO;
goto err_out;
}
skb = sock_alloc_send_skb(sk, so->ll.mtu + sizeof(struct can_skb_priv), skb = sock_alloc_send_skb(sk, so->ll.mtu + sizeof(struct can_skb_priv),
msg->msg_flags & MSG_DONTWAIT, &err); msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb) { if (!skb) {
dev_put(dev); dev_put(dev);
return err; goto err_out;
} }
can_skb_reserve(skb); can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex; can_skb_prv(skb)->ifindex = dev->ifindex;
can_skb_prv(skb)->skbcnt = 0; can_skb_prv(skb)->skbcnt = 0;
so->tx.state = ISOTP_SENDING;
so->tx.len = size; so->tx.len = size;
so->tx.idx = 0; so->tx.idx = 0;
@@ -947,15 +956,25 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
if (err) { if (err) {
pr_notice_once("can-isotp: %s: can_send_ret %d\n", pr_notice_once("can-isotp: %s: can_send_ret %d\n",
__func__, err); __func__, err);
return err; goto err_out;
} }
if (wait_tx_done) { if (wait_tx_done) {
/* wait for complete transmission of current pdu */ /* wait for complete transmission of current pdu */
wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE); wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
if (sk->sk_err)
return -sk->sk_err;
} }
return size; return size;
err_out:
so->tx.state = old_state;
if (so->tx.state == ISOTP_IDLE)
wake_up_interruptible(&so->wait);
return err;
} }
static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,

View File

@@ -326,6 +326,7 @@ int j1939_session_activate(struct j1939_session *session);
void j1939_tp_schedule_txtimer(struct j1939_session *session, int msec); void j1939_tp_schedule_txtimer(struct j1939_session *session, int msec);
void j1939_session_timers_cancel(struct j1939_session *session); void j1939_session_timers_cancel(struct j1939_session *session);
#define J1939_MIN_TP_PACKET_SIZE 9
#define J1939_MAX_TP_PACKET_SIZE (7 * 0xff) #define J1939_MAX_TP_PACKET_SIZE (7 * 0xff)
#define J1939_MAX_ETP_PACKET_SIZE (7 * 0x00ffffff) #define J1939_MAX_ETP_PACKET_SIZE (7 * 0x00ffffff)

View File

@@ -249,11 +249,14 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
struct j1939_priv *priv, *priv_new; struct j1939_priv *priv, *priv_new;
int ret; int ret;
priv = j1939_priv_get_by_ndev(ndev); spin_lock(&j1939_netdev_lock);
priv = j1939_priv_get_by_ndev_locked(ndev);
if (priv) { if (priv) {
kref_get(&priv->rx_kref); kref_get(&priv->rx_kref);
spin_unlock(&j1939_netdev_lock);
return priv; return priv;
} }
spin_unlock(&j1939_netdev_lock);
priv = j1939_priv_create(ndev); priv = j1939_priv_create(ndev);
if (!priv) if (!priv)
@@ -269,10 +272,10 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
/* Someone was faster than us, use their priv and roll /* Someone was faster than us, use their priv and roll
* back our's. * back our's.
*/ */
kref_get(&priv_new->rx_kref);
spin_unlock(&j1939_netdev_lock); spin_unlock(&j1939_netdev_lock);
dev_put(ndev); dev_put(ndev);
kfree(priv); kfree(priv);
kref_get(&priv_new->rx_kref);
return priv_new; return priv_new;
} }
j1939_priv_set(ndev, priv); j1939_priv_set(ndev, priv);

View File

@@ -1230,12 +1230,11 @@ static enum hrtimer_restart j1939_tp_rxtimer(struct hrtimer *hrtimer)
session->err = -ETIME; session->err = -ETIME;
j1939_session_deactivate(session); j1939_session_deactivate(session);
} else { } else {
netdev_alert(priv->ndev, "%s: 0x%p: rx timeout, send abort\n",
__func__, session);
j1939_session_list_lock(session->priv); j1939_session_list_lock(session->priv);
if (session->state >= J1939_SESSION_ACTIVE && if (session->state >= J1939_SESSION_ACTIVE &&
session->state < J1939_SESSION_ACTIVE_MAX) { session->state < J1939_SESSION_ACTIVE_MAX) {
netdev_alert(priv->ndev, "%s: 0x%p: rx timeout, send abort\n",
__func__, session);
j1939_session_get(session); j1939_session_get(session);
hrtimer_start(&session->rxtimer, hrtimer_start(&session->rxtimer,
ms_to_ktime(J1939_XTP_ABORT_TIMEOUT_MS), ms_to_ktime(J1939_XTP_ABORT_TIMEOUT_MS),
@@ -1597,6 +1596,8 @@ j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
abort = J1939_XTP_ABORT_FAULT; abort = J1939_XTP_ABORT_FAULT;
else if (len > priv->tp_max_packet_size) else if (len > priv->tp_max_packet_size)
abort = J1939_XTP_ABORT_RESOURCE; abort = J1939_XTP_ABORT_RESOURCE;
else if (len < J1939_MIN_TP_PACKET_SIZE)
abort = J1939_XTP_ABORT_FAULT;
} }
if (abort != J1939_XTP_NO_ABORT) { if (abort != J1939_XTP_NO_ABORT) {
@@ -1771,6 +1772,7 @@ static void j1939_xtp_rx_dpo(struct j1939_priv *priv, struct sk_buff *skb,
static void j1939_xtp_rx_dat_one(struct j1939_session *session, static void j1939_xtp_rx_dat_one(struct j1939_session *session,
struct sk_buff *skb) struct sk_buff *skb)
{ {
enum j1939_xtp_abort abort = J1939_XTP_ABORT_FAULT;
struct j1939_priv *priv = session->priv; struct j1939_priv *priv = session->priv;
struct j1939_sk_buff_cb *skcb; struct j1939_sk_buff_cb *skcb;
struct sk_buff *se_skb = NULL; struct sk_buff *se_skb = NULL;
@@ -1785,9 +1787,11 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
skcb = j1939_skb_to_cb(skb); skcb = j1939_skb_to_cb(skb);
dat = skb->data; dat = skb->data;
if (skb->len <= 1) if (skb->len != 8) {
/* makes no sense */ /* makes no sense */
abort = J1939_XTP_ABORT_UNEXPECTED_DATA;
goto out_session_cancel; goto out_session_cancel;
}
switch (session->last_cmd) { switch (session->last_cmd) {
case 0xff: case 0xff:
@@ -1885,7 +1889,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
out_session_cancel: out_session_cancel:
kfree_skb(se_skb); kfree_skb(se_skb);
j1939_session_timers_cancel(session); j1939_session_timers_cancel(session);
j1939_session_cancel(session, J1939_XTP_ABORT_FAULT); j1939_session_cancel(session, abort);
j1939_session_put(session); j1939_session_put(session);
} }

View File

@@ -1022,6 +1022,20 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
DEFINE_STATIC_KEY_FALSE(tcp_md5_needed); DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
EXPORT_SYMBOL(tcp_md5_needed); EXPORT_SYMBOL(tcp_md5_needed);
static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new)
{
if (!old)
return true;
/* l3index always overrides non-l3index */
if (old->l3index && new->l3index == 0)
return false;
if (old->l3index == 0 && new->l3index)
return true;
return old->prefixlen < new->prefixlen;
}
/* Find the Key structure for an address. */ /* Find the Key structure for an address. */
struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index, struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
const union tcp_md5_addr *addr, const union tcp_md5_addr *addr,
@@ -1059,8 +1073,7 @@ struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
match = false; match = false;
} }
if (match && (!best_match || if (match && better_md5_match(best_match, key))
key->prefixlen > best_match->prefixlen))
best_match = key; best_match = key;
} }
return best_match; return best_match;
@@ -1090,7 +1103,7 @@ static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
lockdep_sock_is_held(sk)) { lockdep_sock_is_held(sk)) {
if (key->family != family) if (key->family != family)
continue; continue;
if (key->l3index && key->l3index != l3index) if (key->l3index != l3index)
continue; continue;
if (!memcmp(&key->addr, addr, size) && if (!memcmp(&key->addr, addr, size) &&
key->prefixlen == prefixlen) key->prefixlen == prefixlen)

View File

@@ -487,13 +487,14 @@ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
int ip6_forward(struct sk_buff *skb) int ip6_forward(struct sk_buff *skb)
{ {
struct inet6_dev *idev = __in6_dev_get_safely(skb->dev);
struct dst_entry *dst = skb_dst(skb); struct dst_entry *dst = skb_dst(skb);
struct ipv6hdr *hdr = ipv6_hdr(skb); struct ipv6hdr *hdr = ipv6_hdr(skb);
struct inet6_skb_parm *opt = IP6CB(skb); struct inet6_skb_parm *opt = IP6CB(skb);
struct net *net = dev_net(dst->dev); struct net *net = dev_net(dst->dev);
struct inet6_dev *idev;
u32 mtu; u32 mtu;
idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
if (net->ipv6.devconf_all->forwarding == 0) if (net->ipv6.devconf_all->forwarding == 0)
goto error; goto error;

View File

@@ -25,12 +25,7 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
static inline bool static inline bool
segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert) segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert)
{ {
bool r; return (id >= min && id <= max) ^ invert;
pr_debug("segsleft_match:%c 0x%x <= 0x%x <= 0x%x\n",
invert ? '!' : ' ', min, id, max);
r = (id >= min && id <= max) ^ invert;
pr_debug(" result %s\n", r ? "PASS" : "FAILED");
return r;
} }
static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par) static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
@@ -65,30 +60,6 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
return false; return false;
} }
pr_debug("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen);
pr_debug("TYPE %04X ", rh->type);
pr_debug("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left);
pr_debug("IPv6 RT segsleft %02X ",
segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
rh->segments_left,
!!(rtinfo->invflags & IP6T_RT_INV_SGS)));
pr_debug("type %02X %02X %02X ",
rtinfo->rt_type, rh->type,
(!(rtinfo->flags & IP6T_RT_TYP) ||
((rtinfo->rt_type == rh->type) ^
!!(rtinfo->invflags & IP6T_RT_INV_TYP))));
pr_debug("len %02X %04X %02X ",
rtinfo->hdrlen, hdrlen,
!(rtinfo->flags & IP6T_RT_LEN) ||
((rtinfo->hdrlen == hdrlen) ^
!!(rtinfo->invflags & IP6T_RT_INV_LEN)));
pr_debug("res %02X %02X %02X ",
rtinfo->flags & IP6T_RT_RES,
((const struct rt0_hdr *)rh)->reserved,
!((rtinfo->flags & IP6T_RT_RES) &&
(((const struct rt0_hdr *)rh)->reserved)));
ret = (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1], ret = (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
rh->segments_left, rh->segments_left,
!!(rtinfo->invflags & IP6T_RT_INV_SGS))) && !!(rtinfo->invflags & IP6T_RT_INV_SGS))) &&
@@ -107,22 +78,22 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
reserved), reserved),
sizeof(_reserved), sizeof(_reserved),
&_reserved); &_reserved);
if (!rp) {
par->hotdrop = true;
return false;
}
ret = (*rp == 0); ret = (*rp == 0);
} }
pr_debug("#%d ", rtinfo->addrnr);
if (!(rtinfo->flags & IP6T_RT_FST)) { if (!(rtinfo->flags & IP6T_RT_FST)) {
return ret; return ret;
} else if (rtinfo->flags & IP6T_RT_FST_NSTRICT) { } else if (rtinfo->flags & IP6T_RT_FST_NSTRICT) {
pr_debug("Not strict ");
if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) { if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
pr_debug("There isn't enough space\n");
return false; return false;
} else { } else {
unsigned int i = 0; unsigned int i = 0;
pr_debug("#%d ", rtinfo->addrnr);
for (temp = 0; for (temp = 0;
temp < (unsigned int)((hdrlen - 8) / 16); temp < (unsigned int)((hdrlen - 8) / 16);
temp++) { temp++) {
@@ -138,26 +109,20 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
return false; return false;
} }
if (ipv6_addr_equal(ap, &rtinfo->addrs[i])) { if (ipv6_addr_equal(ap, &rtinfo->addrs[i]))
pr_debug("i=%d temp=%d;\n", i, temp);
i++; i++;
}
if (i == rtinfo->addrnr) if (i == rtinfo->addrnr)
break; break;
} }
pr_debug("i=%d #%d\n", i, rtinfo->addrnr);
if (i == rtinfo->addrnr) if (i == rtinfo->addrnr)
return ret; return ret;
else else
return false; return false;
} }
} else { } else {
pr_debug("Strict ");
if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) { if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
pr_debug("There isn't enough space\n");
return false; return false;
} else { } else {
pr_debug("#%d ", rtinfo->addrnr);
for (temp = 0; temp < rtinfo->addrnr; temp++) { for (temp = 0; temp < rtinfo->addrnr; temp++) {
ap = skb_header_pointer(skb, ap = skb_header_pointer(skb,
ptr ptr
@@ -173,7 +138,6 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
if (!ipv6_addr_equal(ap, &rtinfo->addrs[temp])) if (!ipv6_addr_equal(ap, &rtinfo->addrs[temp]))
break; break;
} }
pr_debug("temp=%d #%d\n", temp, rtinfo->addrnr);
if (temp == rtinfo->addrnr && if (temp == rtinfo->addrnr &&
temp == (unsigned int)((hdrlen - 8) / 16)) temp == (unsigned int)((hdrlen - 8) / 16))
return ret; return ret;

View File

@@ -94,7 +94,7 @@ config NF_CONNTRACK_MARK
config NF_CONNTRACK_SECMARK config NF_CONNTRACK_SECMARK
bool 'Connection tracking security mark support' bool 'Connection tracking security mark support'
depends on NETWORK_SECMARK depends on NETWORK_SECMARK
default m if NETFILTER_ADVANCED=n default y if NETFILTER_ADVANCED=n
help help
This option enables security markings to be applied to This option enables security markings to be applied to
connections. Typically they are copied to connections from connections. Typically they are copied to connections from

View File

@@ -4090,6 +4090,11 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs)
tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode; tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode;
tbl[idx++].data = &ipvs->sysctl_schedule_icmp; tbl[idx++].data = &ipvs->sysctl_schedule_icmp;
tbl[idx++].data = &ipvs->sysctl_ignore_tunneled; tbl[idx++].data = &ipvs->sysctl_ignore_tunneled;
#ifdef CONFIG_IP_VS_DEBUG
/* Global sysctls must be ro in non-init netns */
if (!net_eq(net, &init_net))
tbl[idx++].mode = 0444;
#endif
ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl); ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
if (ipvs->sysctl_hdr == NULL) { if (ipvs->sysctl_hdr == NULL) {

View File

@@ -137,7 +137,7 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
{ {
int ret; int ret;
info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL); info->timer = kzalloc(sizeof(*info->timer), GFP_KERNEL);
if (!info->timer) { if (!info->timer) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;

View File

@@ -277,6 +277,8 @@ static void nci_core_conn_close_rsp_packet(struct nci_dev *ndev,
ndev->cur_conn_id); ndev->cur_conn_id);
if (conn_info) { if (conn_info) {
list_del(&conn_info->list); list_del(&conn_info->list);
if (conn_info == ndev->rf_conn_info)
ndev->rf_conn_info = NULL;
devm_kfree(&ndev->nfc_dev->dev, conn_info); devm_kfree(&ndev->nfc_dev->dev, conn_info);
} }
} }

View File

@@ -19,6 +19,10 @@ gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF) \
+= -fplugin-arg-structleak_plugin-byref += -fplugin-arg-structleak_plugin-byref
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL) \ gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL) \
+= -fplugin-arg-structleak_plugin-byref-all += -fplugin-arg-structleak_plugin-byref-all
ifdef CONFIG_GCC_PLUGIN_STRUCTLEAK
DISABLE_STRUCTLEAK_PLUGIN += -fplugin-arg-structleak_plugin-disable
endif
export DISABLE_STRUCTLEAK_PLUGIN
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK) \ gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK) \
+= -DSTRUCTLEAK_PLUGIN += -DSTRUCTLEAK_PLUGIN

View File

@@ -421,8 +421,9 @@ int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset)
if (!full_reset) if (!full_reset)
goto skip_reset; goto skip_reset;
/* clear STATESTS */ /* clear STATESTS if not in reset */
snd_hdac_chip_writew(bus, STATESTS, STATESTS_INT_MASK); if (snd_hdac_chip_readb(bus, GCTL) & AZX_GCTL_RESET)
snd_hdac_chip_writew(bus, STATESTS, STATESTS_INT_MASK);
/* reset controller */ /* reset controller */
snd_hdac_bus_enter_link_reset(bus); snd_hdac_bus_enter_link_reset(bus);

View File

@@ -301,29 +301,31 @@ int snd_hda_codec_configure(struct hda_codec *codec)
{ {
int err; int err;
if (codec->configured)
return 0;
if (is_generic_config(codec)) if (is_generic_config(codec))
codec->probe_id = HDA_CODEC_ID_GENERIC; codec->probe_id = HDA_CODEC_ID_GENERIC;
else else
codec->probe_id = 0; codec->probe_id = 0;
err = snd_hdac_device_register(&codec->core); if (!device_is_registered(&codec->core.dev)) {
if (err < 0) err = snd_hdac_device_register(&codec->core);
return err; if (err < 0)
return err;
}
if (!codec->preset) if (!codec->preset)
codec_bind_module(codec); codec_bind_module(codec);
if (!codec->preset) { if (!codec->preset) {
err = codec_bind_generic(codec); err = codec_bind_generic(codec);
if (err < 0) { if (err < 0) {
codec_err(codec, "Unable to bind the codec\n"); codec_dbg(codec, "Unable to bind the codec\n");
goto error; return err;
} }
} }
codec->configured = 1;
return 0; return 0;
error:
snd_hdac_device_unregister(&codec->core);
return err;
} }
EXPORT_SYMBOL_GPL(snd_hda_codec_configure); EXPORT_SYMBOL_GPL(snd_hda_codec_configure);

View File

@@ -791,6 +791,7 @@ void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec)
snd_array_free(&codec->nids); snd_array_free(&codec->nids);
remove_conn_list(codec); remove_conn_list(codec);
snd_hdac_regmap_exit(&codec->core); snd_hdac_regmap_exit(&codec->core);
codec->configured = 0;
} }
EXPORT_SYMBOL_GPL(snd_hda_codec_cleanup_for_unbind); EXPORT_SYMBOL_GPL(snd_hda_codec_cleanup_for_unbind);

View File

@@ -25,6 +25,7 @@
#include <sound/core.h> #include <sound/core.h>
#include <sound/initval.h> #include <sound/initval.h>
#include "hda_controller.h" #include "hda_controller.h"
#include "hda_local.h"
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include "hda_controller_trace.h" #include "hda_controller_trace.h"
@@ -1259,17 +1260,24 @@ EXPORT_SYMBOL_GPL(azx_probe_codecs);
int azx_codec_configure(struct azx *chip) int azx_codec_configure(struct azx *chip)
{ {
struct hda_codec *codec, *next; struct hda_codec *codec, *next;
int success = 0;
/* use _safe version here since snd_hda_codec_configure() deregisters list_for_each_codec(codec, &chip->bus) {
* the device upon error and deletes itself from the bus list. if (!snd_hda_codec_configure(codec))
*/ success++;
list_for_each_codec_safe(codec, next, &chip->bus) {
snd_hda_codec_configure(codec);
} }
if (!azx_bus(chip)->num_codecs) if (success) {
return -ENODEV; /* unregister failed codecs if any codec has been probed */
return 0; list_for_each_codec_safe(codec, next, &chip->bus) {
if (!codec->configured) {
codec_err(codec, "Unable to configure, disabling\n");
snd_hdac_device_unregister(&codec->core);
}
}
}
return success ? 0 : -ENODEV;
} }
EXPORT_SYMBOL_GPL(azx_codec_configure); EXPORT_SYMBOL_GPL(azx_codec_configure);

View File

@@ -41,7 +41,7 @@
/* 24 unused */ /* 24 unused */
#define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */ #define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */
#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ #define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
/* 27 unused */ #define AZX_DCAPS_RETRY_PROBE (1 << 27) /* retry probe if no codec is configured */
#define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */ #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */
#define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */ #define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */
#define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */ #define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */

Some files were not shown because too many files have changed in this diff Show More