aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
-rw-r--r--arch/x86/kernel/cpu/common.c125
1 files changed, 111 insertions, 14 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 637ca414d431..8eabbafff213 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -92,7 +92,7 @@ static const struct cpu_dev default_cpu = {
92 92
93static const struct cpu_dev *this_cpu = &default_cpu; 93static const struct cpu_dev *this_cpu = &default_cpu;
94 94
95DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 95DEFINE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(struct gdt_page, gdt_page) = { .gdt = {
96#ifdef CONFIG_X86_64 96#ifdef CONFIG_X86_64
97 /* 97 /*
98 * We need valid kernel segments for data and code in long mode too 98 * We need valid kernel segments for data and code in long mode too
@@ -162,6 +162,40 @@ static int __init x86_mpx_setup(char *s)
162} 162}
163__setup("nompx", x86_mpx_setup); 163__setup("nompx", x86_mpx_setup);
164 164
165#ifdef CONFIG_X86_64
166static int __init x86_pcid_setup(char *s)
167{
168 /* require an exact match without trailing characters */
169 if (strlen(s))
170 return 0;
171
172 /* do not emit a message if the feature is not present */
173 if (!boot_cpu_has(X86_FEATURE_PCID))
174 return 1;
175
176 setup_clear_cpu_cap(X86_FEATURE_PCID);
177 pr_info("nopcid: PCID feature disabled\n");
178 return 1;
179}
180__setup("nopcid", x86_pcid_setup);
181#endif
182
183static int __init x86_noinvpcid_setup(char *s)
184{
185 /* noinvpcid doesn't accept parameters */
186 if (s)
187 return -EINVAL;
188
189 /* do not emit a message if the feature is not present */
190 if (!boot_cpu_has(X86_FEATURE_INVPCID))
191 return 0;
192
193 setup_clear_cpu_cap(X86_FEATURE_INVPCID);
194 pr_info("noinvpcid: INVPCID feature disabled\n");
195 return 0;
196}
197early_param("noinvpcid", x86_noinvpcid_setup);
198
165#ifdef CONFIG_X86_32 199#ifdef CONFIG_X86_32
166static int cachesize_override = -1; 200static int cachesize_override = -1;
167static int disable_x86_serial_nr = 1; 201static int disable_x86_serial_nr = 1;
@@ -287,6 +321,39 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
287 } 321 }
288} 322}
289 323
324static void setup_pcid(struct cpuinfo_x86 *c)
325{
326 if (cpu_has(c, X86_FEATURE_PCID)) {
327 if (cpu_has(c, X86_FEATURE_PGE) || kaiser_enabled) {
328 cr4_set_bits(X86_CR4_PCIDE);
329 /*
330 * INVPCID has two "groups" of types:
331 * 1/2: Invalidate an individual address
332 * 3/4: Invalidate all contexts
333 *
334 * 1/2 take a PCID, but 3/4 do not. So, 3/4
335 * ignore the PCID argument in the descriptor.
336 * But, we have to be careful not to call 1/2
337 * with an actual non-zero PCID in them before
338 * we do the above cr4_set_bits().
339 */
340 if (cpu_has(c, X86_FEATURE_INVPCID))
341 set_cpu_cap(c, X86_FEATURE_INVPCID_SINGLE);
342 } else {
343 /*
344 * flush_tlb_all(), as currently implemented, won't
345 * work if PCID is on but PGE is not. Since that
346 * combination doesn't exist on real hardware, there's
347 * no reason to try to fully support it, but it's
348 * polite to avoid corrupting data if we're on
349 * an improperly configured VM.
350 */
351 clear_cpu_cap(c, X86_FEATURE_PCID);
352 }
353 }
354 kaiser_setup_pcid();
355}
356
290/* 357/*
291 * Some CPU features depend on higher CPUID levels, which may not always 358 * Some CPU features depend on higher CPUID levels, which may not always
292 * be available due to CPUID level capping or broken virtualization 359 * be available due to CPUID level capping or broken virtualization
@@ -365,8 +432,8 @@ static const char *table_lookup_model(struct cpuinfo_x86 *c)
365 return NULL; /* Not found */ 432 return NULL; /* Not found */
366} 433}
367 434
368__u32 cpu_caps_cleared[NCAPINTS]; 435__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
369__u32 cpu_caps_set[NCAPINTS]; 436__u32 cpu_caps_set[NCAPINTS + NBUGINTS];
370 437
371void load_percpu_segment(int cpu) 438void load_percpu_segment(int cpu)
372{ 439{
@@ -597,6 +664,16 @@ void cpu_detect(struct cpuinfo_x86 *c)
597 } 664 }
598} 665}
599 666
667static void apply_forced_caps(struct cpuinfo_x86 *c)
668{
669 int i;
670
671 for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
672 c->x86_capability[i] &= ~cpu_caps_cleared[i];
673 c->x86_capability[i] |= cpu_caps_set[i];
674 }
675}
676
600void get_cpu_cap(struct cpuinfo_x86 *c) 677void get_cpu_cap(struct cpuinfo_x86 *c)
601{ 678{
602 u32 tfms, xlvl; 679 u32 tfms, xlvl;
@@ -753,7 +830,22 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
753 } 830 }
754 831
755 setup_force_cpu_cap(X86_FEATURE_ALWAYS); 832 setup_force_cpu_cap(X86_FEATURE_ALWAYS);
833
834 if (c->x86_vendor != X86_VENDOR_AMD)
835 setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
836
837 setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
838 setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
839
756 fpu__init_system(c); 840 fpu__init_system(c);
841
842#ifdef CONFIG_X86_32
843 /*
844 * Regardless of whether PCID is enumerated, the SDM says
845 * that it can't be enabled in 32-bit mode.
846 */
847 setup_clear_cpu_cap(X86_FEATURE_PCID);
848#endif
757} 849}
758 850
759void __init early_cpu_init(void) 851void __init early_cpu_init(void)
@@ -863,7 +955,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
863 int i; 955 int i;
864 956
865 c->loops_per_jiffy = loops_per_jiffy; 957 c->loops_per_jiffy = loops_per_jiffy;
866 c->x86_cache_size = -1; 958 c->x86_cache_size = 0;
867 c->x86_vendor = X86_VENDOR_UNKNOWN; 959 c->x86_vendor = X86_VENDOR_UNKNOWN;
868 c->x86_model = c->x86_mask = 0; /* So far unknown... */ 960 c->x86_model = c->x86_mask = 0; /* So far unknown... */
869 c->x86_vendor_id[0] = '\0'; /* Unset */ 961 c->x86_vendor_id[0] = '\0'; /* Unset */
@@ -888,11 +980,8 @@ static void identify_cpu(struct cpuinfo_x86 *c)
888 if (this_cpu->c_identify) 980 if (this_cpu->c_identify)
889 this_cpu->c_identify(c); 981 this_cpu->c_identify(c);
890 982
891 /* Clear/Set all flags overriden by options, after probe */ 983 /* Clear/Set all flags overridden by options, after probe */
892 for (i = 0; i < NCAPINTS; i++) { 984 apply_forced_caps(c);
893 c->x86_capability[i] &= ~cpu_caps_cleared[i];
894 c->x86_capability[i] |= cpu_caps_set[i];
895 }
896 985
897#ifdef CONFIG_X86_64 986#ifdef CONFIG_X86_64
898 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); 987 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
@@ -918,6 +1007,9 @@ static void identify_cpu(struct cpuinfo_x86 *c)
918 setup_smep(c); 1007 setup_smep(c);
919 setup_smap(c); 1008 setup_smap(c);
920 1009
1010 /* Set up PCID */
1011 setup_pcid(c);
1012
921 /* 1013 /*
922 * The vendor-specific functions might have changed features. 1014 * The vendor-specific functions might have changed features.
923 * Now we do "generic changes." 1015 * Now we do "generic changes."
@@ -950,10 +1042,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
950 * Clear/Set all flags overriden by options, need do it 1042 * Clear/Set all flags overriden by options, need do it
951 * before following smp all cpus cap AND. 1043 * before following smp all cpus cap AND.
952 */ 1044 */
953 for (i = 0; i < NCAPINTS; i++) { 1045 apply_forced_caps(c);
954 c->x86_capability[i] &= ~cpu_caps_cleared[i];
955 c->x86_capability[i] |= cpu_caps_set[i];
956 }
957 1046
958 /* 1047 /*
959 * On SMP, boot_cpu_data holds the common feature set between 1048 * On SMP, boot_cpu_data holds the common feature set between
@@ -1173,7 +1262,7 @@ static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
1173 [DEBUG_STACK - 1] = DEBUG_STKSZ 1262 [DEBUG_STACK - 1] = DEBUG_STKSZ
1174}; 1263};
1175 1264
1176static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks 1265DEFINE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(char, exception_stacks
1177 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); 1266 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
1178 1267
1179/* May not be marked __init: used by software suspend */ 1268/* May not be marked __init: used by software suspend */
@@ -1336,6 +1425,14 @@ void cpu_init(void)
1336 * try to read it. 1425 * try to read it.
1337 */ 1426 */
1338 cr4_init_shadow(); 1427 cr4_init_shadow();
1428 if (!kaiser_enabled) {
1429 /*
1430 * secondary_startup_64() deferred setting PGE in cr4:
1431 * probe_page_size_mask() sets it on the boot cpu,
1432 * but it needs to be set on each secondary cpu.
1433 */
1434 cr4_set_bits(X86_CR4_PGE);
1435 }
1339 1436
1340 /* 1437 /*
1341 * Load microcode on this cpu if a valid microcode is available. 1438 * Load microcode on this cpu if a valid microcode is available.