@@ -1040,20 +1040,43 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
10401040{
10411041 unsigned long gfn = memslot -> base_gfn + pagenum ;
10421042 unsigned long gpa = gfn << PAGE_SHIFT ;
1043- pte_t * ptep ;
1043+ pte_t * ptep , pte ;
10441044 unsigned int shift ;
10451045 int ret = 0 ;
10461046 unsigned long old , * rmapp ;
10471047
10481048 if (kvm -> arch .secure_guest & KVMPPC_SECURE_INIT_DONE )
10491049 return ret ;
10501050
1051- ptep = find_kvm_secondary_pte (kvm , gpa , & shift );
1052- if (ptep && pte_present (* ptep ) && pte_dirty (* ptep )) {
1053- ret = 1 ;
1054- if (shift )
1055- ret = 1 << (shift - PAGE_SHIFT );
1051+ /*
1052+ * For performance reasons we don't hold kvm->mmu_lock while walking the
1053+ * partition scoped table.
1054+ */
1055+ ptep = find_kvm_secondary_pte_unlocked (kvm , gpa , & shift );
1056+ if (!ptep )
1057+ return 0 ;
1058+
1059+ pte = READ_ONCE (* ptep );
1060+ if (pte_present (pte ) && pte_dirty (pte )) {
10561061 spin_lock (& kvm -> mmu_lock );
1062+ /*
1063+ * Recheck the pte again
1064+ */
1065+ if (pte_val (pte ) != pte_val (* ptep )) {
1066+ /*
1067+ * We have KVM_MEM_LOG_DIRTY_PAGES enabled. Hence we can
1068+ * only find PAGE_SIZE pte entries here. We can continue
1069+ * to use the pte addr returned by above page table
1070+ * walk.
1071+ */
1072+ if (!pte_present (* ptep ) || !pte_dirty (* ptep )) {
1073+ spin_unlock (& kvm -> mmu_lock );
1074+ return 0 ;
1075+ }
1076+ }
1077+
1078+ ret = 1 ;
1079+ VM_BUG_ON (shift );
10571080 old = kvmppc_radix_update_pte (kvm , ptep , _PAGE_DIRTY , 0 ,
10581081 gpa , shift );
10591082 kvmppc_radix_tlbie_page (kvm , gpa , shift , kvm -> arch .lpid );
0 commit comments