1 --- a/include/linux/mm.h 2016-08-25 18:07:47.000000000 +0300
2 +++ b/include/linux/mm.h 2016-10-24 16:19:16.000000000 +0300
4 #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
5 #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
6 #define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
7 +#define FOLL_COW 0x4000 /* internal GUP flag */
9 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
11 diff -Naur ./mm/memory.c ../linux-2.6.32-642.6.2.el6/mm/memory.c
12 --- a/mm/memory.c 2016-08-25 18:06:57.000000000 +0300
13 +++ b/mm/memory.c 2016-10-24 16:19:16.000000000 +0300
14 @@ -1191,6 +1191,24 @@
16 EXPORT_SYMBOL_GPL(zap_vma_ptes);
18 +static inline bool can_follow_write_pte(pte_t pte, struct page *page,
25 + * Make sure that we are really following CoWed page. We do not really
26 + * have to care about exclusiveness of the page because we only want
27 + * to ensure that once COWed page hasn't disappeared in the meantime
28 + * or it hasn't been merged to a KSM page.
30 + if ((flags & FOLL_FORCE) && (flags & FOLL_COW))
31 + return page && PageAnon(page) && !PageKsm(page);
37 * Do a quick page-table lookup for a single page.
39 @@ -1280,10 +1298,11 @@
40 migration_entry_wait(mm, pmd, address);
41 goto split_fallthrough;
43 - if ((flags & FOLL_WRITE) && !pte_write(pte))
46 page = vm_normal_page(vma, address, pte);
47 + if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, page, flags)) {
48 + pte_unmap_unlock(ptep, ptl);
51 if (unlikely(!page)) {
52 if ((flags & FOLL_DUMP) ||
53 !is_zero_pfn(pte_pfn(pte)))
56 mark_page_accessed(page);
59 pte_unmap_unlock(ptep, ptl);
62 @@ -1503,17 +1520,13 @@
63 * The VM_FAULT_WRITE bit tells us that
64 * do_wp_page has broken COW when necessary,
65 * even if maybe_mkwrite decided not to set
66 - * pte_write. We can thus safely do subsequent
67 - * page lookups as if they were reads. But only
68 - * do so when looping for pte_write is futile:
69 - * in some cases userspace may also be wanting
70 - * to write to the gotten user page, which a
71 - * read fault here might prevent (a readonly
72 - * page might get reCOWed by userspace write).
73 + * pte_write. We cannot simply drop FOLL_WRITE
74 + * here because the COWed page might be gone by
75 + * the time we do the subsequent page lookups.
77 if ((ret & VM_FAULT_WRITE) &&
78 !(vma->vm_flags & VM_WRITE))
79 - foll_flags &= ~FOLL_WRITE;
80 + foll_flags |= FOLL_COW;