@@ -4405,13 +4405,13 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
44054405 return ret ;
44064406}
44074407
4408- static unsigned long fault_around_bytes __read_mostly =
4409- rounddown_pow_of_two ( 65536 ) ;
4408+ static unsigned long fault_around_pages __read_mostly =
4409+ 65536 >> PAGE_SHIFT ;
44104410
44114411#ifdef CONFIG_DEBUG_FS
44124412static int fault_around_bytes_get (void * data , u64 * val )
44134413{
4414- * val = fault_around_bytes ;
4414+ * val = fault_around_pages << PAGE_SHIFT ;
44154415 return 0 ;
44164416}
44174417
@@ -4423,10 +4423,13 @@ static int fault_around_bytes_set(void *data, u64 val)
44234423{
44244424 if (val / PAGE_SIZE > PTRS_PER_PTE )
44254425 return - EINVAL ;
4426- if (val > PAGE_SIZE )
4427- fault_around_bytes = rounddown_pow_of_two (val );
4428- else
4429- fault_around_bytes = PAGE_SIZE ; /* rounddown_pow_of_two(0) is undefined */
4426+
4427+ /*
4428+ * The minimum value is 1 page, however this results in no fault-around
4429+ * at all. See should_fault_around().
4430+ */
4431+ fault_around_pages = max (rounddown_pow_of_two (val ) >> PAGE_SHIFT , 1UL );
4432+
44304433 return 0 ;
44314434}
44324435DEFINE_DEBUGFS_ATTRIBUTE (fault_around_bytes_fops ,
@@ -4452,18 +4455,18 @@ late_initcall(fault_around_debugfs);
44524455 * This function doesn't cross VMA or page table boundaries, in order to call
44534456 * map_pages() and acquire a PTE lock only once.
44544457 *
4455- * fault_around_bytes defines how many bytes we'll try to map.
4458+ * fault_around_pages defines how many pages we'll try to map.
44564459 * do_fault_around() expects it to be set to a power of two less than or equal
44574460 * to PTRS_PER_PTE.
44584461 *
44594462 * The virtual address of the area that we map is naturally aligned to
4460- * fault_around_bytes rounded down to the machine page size
4463+ * fault_around_pages * PAGE_SIZE rounded down to the machine page size
44614464 * (and therefore to page order). This way it's easier to guarantee
44624465 * that we don't cross page table boundaries.
44634466 */
44644467static vm_fault_t do_fault_around (struct vm_fault * vmf )
44654468{
4466- pgoff_t nr_pages = READ_ONCE (fault_around_bytes ) >> PAGE_SHIFT ;
4469+ pgoff_t nr_pages = READ_ONCE (fault_around_pages ) ;
44674470 pgoff_t pte_off = pte_index (vmf -> address );
44684471 /* The page offset of vmf->address within the VMA. */
44694472 pgoff_t vma_off = vmf -> pgoff - vmf -> vma -> vm_pgoff ;
@@ -4498,7 +4501,8 @@ static inline bool should_fault_around(struct vm_fault *vmf)
44984501 if (uffd_disable_fault_around (vmf -> vma ))
44994502 return false;
45004503
4501- return fault_around_bytes >> PAGE_SHIFT > 1 ;
4504+ /* A single page implies no faulting 'around' at all. */
4505+ return fault_around_pages > 1 ;
45024506}
45034507
45044508static vm_fault_t do_read_fault (struct vm_fault * vmf )
0 commit comments