@@ -15,11 +15,27 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
1515 [(N_EXCEPTION_STACKS - 1 ) * EXCEPTION_STKSZ + DEBUG_STKSZ ]) ;
1616#endif
1717
18+ struct cpu_entry_area * get_cpu_entry_area (int cpu )
19+ {
20+ unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE ;
21+ BUILD_BUG_ON (sizeof (struct cpu_entry_area ) % PAGE_SIZE != 0 );
22+
23+ return (struct cpu_entry_area * ) va ;
24+ }
25+ EXPORT_SYMBOL (get_cpu_entry_area );
26+
27+ void cea_set_pte (void * cea_vaddr , phys_addr_t pa , pgprot_t flags )
28+ {
29+ unsigned long va = (unsigned long ) cea_vaddr ;
30+
31+ set_pte_vaddr (va , pfn_pte (pa >> PAGE_SHIFT , flags ));
32+ }
33+
1834static void __init
19- set_percpu_fixmap_pages ( int idx , void * ptr , int pages , pgprot_t prot )
35+ cea_map_percpu_pages ( void * cea_vaddr , void * ptr , int pages , pgprot_t prot )
2036{
21- for ( ; pages ; pages -- , idx -- , ptr += PAGE_SIZE )
22- __set_fixmap ( idx , per_cpu_ptr_to_phys (ptr ), prot );
37+ for ( ; pages ; pages -- , cea_vaddr += PAGE_SIZE , ptr += PAGE_SIZE )
38+ cea_set_pte ( cea_vaddr , per_cpu_ptr_to_phys (ptr ), prot );
2339}
2440
2541/* Setup the fixmap mappings only once per-processor */
@@ -47,10 +63,12 @@ static void __init setup_cpu_entry_area(int cpu)
4763 pgprot_t tss_prot = PAGE_KERNEL ;
4864#endif
4965
50- __set_fixmap (get_cpu_entry_area_index (cpu , gdt ), get_cpu_gdt_paddr (cpu ), gdt_prot );
51- set_percpu_fixmap_pages (get_cpu_entry_area_index (cpu , entry_stack_page ),
52- per_cpu_ptr (& entry_stack_storage , cpu ), 1 ,
53- PAGE_KERNEL );
66+ cea_set_pte (& get_cpu_entry_area (cpu )-> gdt , get_cpu_gdt_paddr (cpu ),
67+ gdt_prot );
68+
69+ cea_map_percpu_pages (& get_cpu_entry_area (cpu )-> entry_stack_page ,
70+ per_cpu_ptr (& entry_stack_storage , cpu ), 1 ,
71+ PAGE_KERNEL );
5472
5573 /*
5674 * The Intel SDM says (Volume 3, 7.2.1):
@@ -72,10 +90,9 @@ static void __init setup_cpu_entry_area(int cpu)
7290 BUILD_BUG_ON ((offsetof(struct tss_struct , x86_tss ) ^
7391 offsetofend (struct tss_struct , x86_tss )) & PAGE_MASK );
7492 BUILD_BUG_ON (sizeof (struct tss_struct ) % PAGE_SIZE != 0 );
75- set_percpu_fixmap_pages (get_cpu_entry_area_index (cpu , tss ),
76- & per_cpu (cpu_tss_rw , cpu ),
77- sizeof (struct tss_struct ) / PAGE_SIZE ,
78- tss_prot );
93+ cea_map_percpu_pages (& get_cpu_entry_area (cpu )-> tss ,
94+ & per_cpu (cpu_tss_rw , cpu ),
95+ sizeof (struct tss_struct ) / PAGE_SIZE , tss_prot );
7996
8097#ifdef CONFIG_X86_32
8198 per_cpu (cpu_entry_area , cpu ) = get_cpu_entry_area (cpu );
@@ -85,20 +102,37 @@ static void __init setup_cpu_entry_area(int cpu)
85102 BUILD_BUG_ON (sizeof (exception_stacks ) % PAGE_SIZE != 0 );
86103 BUILD_BUG_ON (sizeof (exception_stacks ) !=
87104 sizeof (((struct cpu_entry_area * )0 )-> exception_stacks ));
88- set_percpu_fixmap_pages (get_cpu_entry_area_index (cpu , exception_stacks ),
89- & per_cpu (exception_stacks , cpu ),
90- sizeof (exception_stacks ) / PAGE_SIZE ,
91- PAGE_KERNEL );
105+ cea_map_percpu_pages (& get_cpu_entry_area (cpu )-> exception_stacks ,
106+ & per_cpu (exception_stacks , cpu ),
107+ sizeof (exception_stacks ) / PAGE_SIZE , PAGE_KERNEL );
92108
93- __set_fixmap ( get_cpu_entry_area_index (cpu , entry_trampoline ) ,
109+ cea_set_pte ( & get_cpu_entry_area (cpu ) -> entry_trampoline ,
94110 __pa_symbol (_entry_trampoline ), PAGE_KERNEL_RX );
95111#endif
96112}
97113
114+ static __init void setup_cpu_entry_area_ptes (void )
115+ {
116+ #ifdef CONFIG_X86_32
117+ unsigned long start , end ;
118+
119+ BUILD_BUG_ON (CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE );
120+ BUG_ON (CPU_ENTRY_AREA_BASE & ~PMD_MASK );
121+
122+ start = CPU_ENTRY_AREA_BASE ;
123+ end = start + CPU_ENTRY_AREA_MAP_SIZE ;
124+
125+ for (; start < end ; start += PMD_SIZE )
126+ populate_extra_pte (start );
127+ #endif
128+ }
129+
98130void __init setup_cpu_entry_areas (void )
99131{
100132 unsigned int cpu ;
101133
134+ setup_cpu_entry_area_ptes ();
135+
102136 for_each_possible_cpu (cpu )
103137 setup_cpu_entry_area (cpu );
104138}
0 commit comments