diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 8978c1bf91e..cc2cd900732 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1080,7 +1080,7 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, { int ret = 0; pmd_t pmd; - + bool force_flush = false; struct mm_struct *mm = vma->vm_mm; if ((old_addr & ~HPAGE_PMD_MASK) || @@ -1101,8 +1101,12 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, ret = __pmd_trans_huge_lock(old_pmd, vma); if (ret == 1) { pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); + if (pmd_present(pmd)) + force_flush = true; VM_BUG_ON(!pmd_none(*new_pmd)); set_pmd_at(mm, new_addr, new_pmd, pmd); + if (force_flush) + flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); spin_unlock(&mm->page_table_lock); } out: diff --git a/mm/mremap.c b/mm/mremap.c index db8d983b5a7..958218123c7 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -77,6 +77,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, struct mm_struct *mm = vma->vm_mm; pte_t *old_pte, *new_pte, pte; spinlock_t *old_ptl, *new_ptl; + bool force_flush = false; + unsigned long len = old_end - old_addr; if (vma->vm_file) { /* @@ -105,11 +107,25 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, if (pte_none(*old_pte)) continue; pte = ptep_get_and_clear(mm, old_addr, old_pte); + /* + * If we are remapping a valid PTE, make sure + * to flush TLB before we drop the PTL for the PTE. + * + * NOTE! Both old and new PTL matter: the old one + * for racing with page_mkclean(), the new one to + * make sure the physical page stays valid until + * the TLB entry for the old mapping has been + * flushed. + */ + if (pte_present(pte)) + force_flush = true; pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); set_pte_at(mm, new_addr, new_pte, pte); } arch_leave_lazy_mmu_mode(); + if (force_flush) + flush_tlb_range(vma, old_end - len, old_end); if (new_ptl != old_ptl) spin_unlock(new_ptl); pte_unmap(new_pte - 1); @@ -126,7 +142,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma, { unsigned long extent, next, old_end; pmd_t *old_pmd, *new_pmd; - bool need_flush = false; + unsigned long mmun_start; /* For mmu_notifiers */ + unsigned long mmun_end; /* For mmu_notifiers */ old_end = old_addr + len; flush_cache_range(vma, old_addr, old_end); @@ -153,7 +170,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma, new_addr, old_end, old_pmd, new_pmd); if (err > 0) { - need_flush = true; continue; } else if (!err) { split_huge_page_pmd(vma->vm_mm, old_pmd); @@ -170,10 +186,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma, extent = LATENCY_LIMIT; move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, new_pmd, new_addr); - need_flush = true; } - if (likely(need_flush)) - flush_tlb_range(vma, old_end-len, old_addr); mmu_notifier_invalidate_range_end(vma->vm_mm, old_end-len, old_end);