mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-27 12:32:50 +00:00
When running in a non-root time namespace, the global VDSO data page is replaced by a dedicated namespace data page and the global data page is mapped next to it. Detailed explanations can be found at commit660fd04f93("lib/vdso: Prepare for time namespace support"). When it happens, __kernel_get_syscall_map and __kernel_get_tbfreq and __kernel_sync_dicache don't work anymore because they read 0 instead of the data they need. To address that, clock_mode has to be read. When it is set to VDSO_CLOCKMODE_TIMENS, it means it is a dedicated namespace data page and the global data is located on the following page. Add a macro called get_realdatapage which reads clock_mode and add PAGE_SIZE to the pointer provided by get_datapage macro when clock_mode is equal to VDSO_CLOCKMODE_TIMENS. Use this new macro instead of get_datapage macro except for time functions as they handle it internally. Fixes:74205b3fc2("powerpc/vdso: Add support for time namespaces") Reported-by: Jason A. Donenfeld <Jason@zx2c4.com> Closes: https://lore.kernel.org/all/ZtnYqZI-nrsNslwy@zx2c4.com/ Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> Acked-by: Michael Ellerman <mpe@ellerman.id.au> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
100 lines
1.9 KiB
ArmAsm
100 lines
1.9 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* vDSO provided cache flush routines
|
|
*
|
|
* Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org),
|
|
* IBM Corp.
|
|
*/
|
|
#include <asm/processor.h>
|
|
#include <asm/ppc_asm.h>
|
|
#include <asm/vdso.h>
|
|
#include <asm/vdso_datapage.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/cache.h>
|
|
|
|
.text
|
|
|
|
/*
|
|
* Default "generic" version of __kernel_sync_dicache.
|
|
*
|
|
* void __kernel_sync_dicache(unsigned long start, unsigned long end)
|
|
*
|
|
* Flushes the data cache & invalidate the instruction cache for the
|
|
* provided range [start, end[
|
|
*/
|
|
V_FUNCTION_BEGIN(__kernel_sync_dicache)
|
|
.cfi_startproc
|
|
BEGIN_FTR_SECTION
|
|
b 3f
|
|
END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
|
|
#ifdef CONFIG_PPC64
|
|
mflr r12
|
|
.cfi_register lr,r12
|
|
get_realdatapage r10, r11
|
|
mtlr r12
|
|
.cfi_restore lr
|
|
#endif
|
|
|
|
#ifdef CONFIG_PPC64
|
|
lwz r7,CFG_DCACHE_BLOCKSZ(r10)
|
|
addi r5,r7,-1
|
|
#else
|
|
li r5, L1_CACHE_BYTES - 1
|
|
#endif
|
|
andc r6,r3,r5 /* round low to line bdy */
|
|
subf r8,r6,r4 /* compute length */
|
|
add r8,r8,r5 /* ensure we get enough */
|
|
#ifdef CONFIG_PPC64
|
|
lwz r9,CFG_DCACHE_LOGBLOCKSZ(r10)
|
|
PPC_SRL. r8,r8,r9 /* compute line count */
|
|
#else
|
|
srwi. r8, r8, L1_CACHE_SHIFT
|
|
mr r7, r6
|
|
#endif
|
|
crclr cr0*4+so
|
|
beqlr /* nothing to do? */
|
|
mtctr r8
|
|
1: dcbst 0,r6
|
|
#ifdef CONFIG_PPC64
|
|
add r6,r6,r7
|
|
#else
|
|
addi r6, r6, L1_CACHE_BYTES
|
|
#endif
|
|
bdnz 1b
|
|
sync
|
|
|
|
/* Now invalidate the instruction cache */
|
|
|
|
#ifdef CONFIG_PPC64
|
|
lwz r7,CFG_ICACHE_BLOCKSZ(r10)
|
|
addi r5,r7,-1
|
|
andc r6,r3,r5 /* round low to line bdy */
|
|
subf r8,r6,r4 /* compute length */
|
|
add r8,r8,r5
|
|
lwz r9,CFG_ICACHE_LOGBLOCKSZ(r10)
|
|
PPC_SRL. r8,r8,r9 /* compute line count */
|
|
crclr cr0*4+so
|
|
beqlr /* nothing to do? */
|
|
#endif
|
|
mtctr r8
|
|
#ifdef CONFIG_PPC64
|
|
2: icbi 0,r6
|
|
add r6,r6,r7
|
|
#else
|
|
2: icbi 0, r7
|
|
addi r7, r7, L1_CACHE_BYTES
|
|
#endif
|
|
bdnz 2b
|
|
isync
|
|
li r3,0
|
|
blr
|
|
3:
|
|
crclr cr0*4+so
|
|
sync
|
|
icbi 0,r1
|
|
isync
|
|
li r3,0
|
|
blr
|
|
.cfi_endproc
|
|
V_FUNCTION_END(__kernel_sync_dicache)
|