mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-06 10:00:17 +00:00
x86/vdso: Introduce helper functions for CPU and node number
Clean up the CPU/node number related code a bit, to make it more apparent how we are encoding/extracting the CPU and node fields from the segment limit. No change in functionality intended. [ mingo: Wrote new changelog. ] Suggested-by: Andy Lutomirski <luto@kernel.org> Suggested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Markus T Metzger <markus.t.metzger@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Shankar <ravi.v.shankar@intel.com> Cc: Rik van Riel <riel@surriel.com> Link: http://lkml.kernel.org/r/1537312139-5580-8-git-send-email-chang.seok.bae@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
committed by
Ingo Molnar
parent
c4755613a1
commit
ffebbaedc8
@@ -13,14 +13,7 @@
|
|||||||
notrace long
|
notrace long
|
||||||
__vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
|
__vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
|
||||||
{
|
{
|
||||||
unsigned int p;
|
vdso_read_cpu_node(cpu, node);
|
||||||
|
|
||||||
p = __getcpu();
|
|
||||||
|
|
||||||
if (cpu)
|
|
||||||
*cpu = p & VGETCPU_CPU_MASK;
|
|
||||||
if (node)
|
|
||||||
*node = p >> 12;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -339,20 +339,15 @@ static void vgetcpu_cpu_init(void *arg)
|
|||||||
{
|
{
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
struct desc_struct d = { };
|
struct desc_struct d = { };
|
||||||
unsigned long node = 0;
|
unsigned long cpudata = vdso_encode_cpu_node(cpu, cpu_to_node(cpu));
|
||||||
#ifdef CONFIG_NUMA
|
|
||||||
node = cpu_to_node(cpu);
|
if (static_cpu_has(X86_FEATURE_RDTSCP))
|
||||||
#endif
|
write_rdtscp_aux(cpudata);
|
||||||
if (static_cpu_has(X86_FEATURE_RDTSCP))
|
|
||||||
write_rdtscp_aux((node << 12) | cpu);
|
/* Store CPU and node number in limit */
|
||||||
|
d.limit0 = cpudata;
|
||||||
|
d.limit1 = cpudata >> 16;
|
||||||
|
|
||||||
/*
|
|
||||||
* Store cpu number in limit so that it can be loaded
|
|
||||||
* quickly in user space in vgetcpu. (12 bits for the CPU
|
|
||||||
* and 8 bits for the node)
|
|
||||||
*/
|
|
||||||
d.limit0 = cpu | ((node & 0xf) << 12);
|
|
||||||
d.limit1 = node >> 4;
|
|
||||||
d.type = 5; /* RO data, expand down, accessed */
|
d.type = 5; /* RO data, expand down, accessed */
|
||||||
d.dpl = 3; /* Visible to user code */
|
d.dpl = 3; /* Visible to user code */
|
||||||
d.s = 1; /* Not a system segment */
|
d.s = 1; /* Not a system segment */
|
||||||
|
|||||||
@@ -224,6 +224,47 @@
|
|||||||
#define GDT_ENTRY_TLS_ENTRIES 3
|
#define GDT_ENTRY_TLS_ENTRIES 3
|
||||||
#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES* 8)
|
#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES* 8)
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
|
||||||
|
/* Bit size and mask of CPU number stored in the per CPU data (and TSC_AUX) */
|
||||||
|
#define VDSO_CPU_SIZE 12
|
||||||
|
#define VDSO_CPU_MASK 0xfff
|
||||||
|
|
||||||
|
#ifndef __ASSEMBLY__
|
||||||
|
|
||||||
|
/* Helper functions to store/load CPU and node numbers */
|
||||||
|
|
||||||
|
static inline unsigned long vdso_encode_cpu_node(int cpu, unsigned long node)
|
||||||
|
{
|
||||||
|
return ((node << VDSO_CPU_SIZE) | cpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void vdso_read_cpu_node(unsigned *cpu, unsigned *node)
|
||||||
|
{
|
||||||
|
unsigned int p;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Load CPU and node number from GDT. LSL is faster than RDTSCP
|
||||||
|
* and works on all CPUs. This is volatile so that it orders
|
||||||
|
* correctly with respect to barrier() and to keep GCC from cleverly
|
||||||
|
* hoisting it out of the calling function.
|
||||||
|
*
|
||||||
|
* If RDPID is available, use it.
|
||||||
|
*/
|
||||||
|
alternative_io ("lsl %[seg],%[p]",
|
||||||
|
".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
|
||||||
|
X86_FEATURE_RDPID,
|
||||||
|
[p] "=a" (p), [seg] "r" (__CPU_NUMBER_SEG));
|
||||||
|
|
||||||
|
if (cpu)
|
||||||
|
*cpu = (p & VDSO_CPU_MASK);
|
||||||
|
if (node)
|
||||||
|
*node = (p >> VDSO_CPU_SIZE);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
#endif /* CONFIG_X86_64 */
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -77,30 +77,4 @@ static inline void gtod_write_end(struct vsyscall_gtod_data *s)
|
|||||||
++s->seq;
|
++s->seq;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
|
|
||||||
#define VGETCPU_CPU_MASK 0xfff
|
|
||||||
|
|
||||||
static inline unsigned int __getcpu(void)
|
|
||||||
{
|
|
||||||
unsigned int p;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Load CPU (and node) number from GDT. LSL is faster than RDTSCP
|
|
||||||
* and works on all CPUs. This is volatile so that it orders
|
|
||||||
* correctly with respect to barrier() and to keep GCC from cleverly
|
|
||||||
* hoisting it out of the calling function.
|
|
||||||
*
|
|
||||||
* If RDPID is available, use it.
|
|
||||||
*/
|
|
||||||
alternative_io ("lsl %[seg],%[p]",
|
|
||||||
".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
|
|
||||||
X86_FEATURE_RDPID,
|
|
||||||
[p] "=a" (p), [seg] "r" (__CPU_NUMBER_SEG));
|
|
||||||
|
|
||||||
return p;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* CONFIG_X86_64 */
|
|
||||||
|
|
||||||
#endif /* _ASM_X86_VGTOD_H */
|
#endif /* _ASM_X86_VGTOD_H */
|
||||||
|
|||||||
Reference in New Issue
Block a user