Fix CVE-2016-5195 Dirty COW
[packages/centos6/kernel.git] / cve-2016-2016-5195.patch
1 --- a/include/linux/mm.h        2016-08-25 18:07:47.000000000 +0300
2 +++ b/include/linux/mm.h        2016-10-24 16:19:16.000000000 +0300
3 @@ -1401,6 +1401,7 @@
4  #define FOLL_HWPOISON  0x100   /* check page is hwpoisoned */
5  #define FOLL_NUMA      0x200   /* force NUMA hinting page fault */
6  #define FOLL_MIGRATION 0x400   /* wait for page to replace migration entry */
7 +#define FOLL_COW       0x4000  /* internal GUP flag */
8  
9  typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
10                         void *data);
11 diff -Naur ./mm/memory.c ../linux-2.6.32-642.6.2.el6/mm/memory.c
12 --- a/mm/memory.c       2016-08-25 18:06:57.000000000 +0300
13 +++ b/mm/memory.c       2016-10-24 16:19:16.000000000 +0300
14 @@ -1191,6 +1191,24 @@
15  }
16  EXPORT_SYMBOL_GPL(zap_vma_ptes);
17  
18 +static inline bool can_follow_write_pte(pte_t pte, struct page *page,
19 +                                       unsigned int flags)
20 +{
21 +       if (pte_write(pte))
22 +               return true;
23 +
24 +       /*
25 +        * Make sure that we are really following CoWed page. We do not really
26 +        * have to care about exclusiveness of the page because we only want
27 +        * to ensure that once COWed page hasn't disappeared in the meantime
28 +        * or it hasn't been merged to a KSM page.
29 +        */
30 +       if ((flags & FOLL_FORCE) && (flags & FOLL_COW))
31 +               return page && PageAnon(page) && !PageKsm(page);
32 +
33 +       return false;
34 +}
35 +
36  /*
37   * Do a quick page-table lookup for a single page.
38   */
39 @@ -1280,10 +1298,11 @@
40                 migration_entry_wait(mm, pmd, address);
41                 goto split_fallthrough;
42         }
43 -       if ((flags & FOLL_WRITE) && !pte_write(pte))
44 -               goto unlock;
45 -
46         page = vm_normal_page(vma, address, pte);
47 +       if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, page, flags)) {
48 +               pte_unmap_unlock(ptep, ptl);
49 +               return NULL;
50 +       }
51         if (unlikely(!page)) {
52                 if ((flags & FOLL_DUMP) ||
53                     !is_zero_pfn(pte_pfn(pte)))
54 @@ -1304,7 +1323,6 @@
55                  */
56                 mark_page_accessed(page);
57         }
58 -unlock:
59         pte_unmap_unlock(ptep, ptl);
60  out:
61         return page;
62 @@ -1503,17 +1520,13 @@
63                                  * The VM_FAULT_WRITE bit tells us that
64                                  * do_wp_page has broken COW when necessary,
65                                  * even if maybe_mkwrite decided not to set
66 -                                * pte_write. We can thus safely do subsequent
67 -                                * page lookups as if they were reads. But only
68 -                                * do so when looping for pte_write is futile:
69 -                                * in some cases userspace may also be wanting
70 -                                * to write to the gotten user page, which a
71 -                                * read fault here might prevent (a readonly
72 -                                * page might get reCOWed by userspace write).
73 +                                * pte_write. We cannot simply drop FOLL_WRITE
74 +                                * here because the COWed page might be gone by
75 +                                * the time we do the subsequent page lookups.
76                                  */
77                                 if ((ret & VM_FAULT_WRITE) &&
78                                     !(vma->vm_flags & VM_WRITE))
79 -                                       foll_flags &= ~FOLL_WRITE;
80 +                                       foll_flags |= FOLL_COW;
81  
82                                 cond_resched();
83                         }