--- /dev/null
+--- a/include/linux/mm.h 2016-08-25 18:07:47.000000000 +0300
++++ b/include/linux/mm.h 2016-10-24 16:19:16.000000000 +0300
+@@ -1401,6 +1401,7 @@
+ #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
+ #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
+ #define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
++#define FOLL_COW 0x4000 /* internal GUP flag */
+
+ typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
+ void *data);
+diff -Naur ./mm/memory.c ../linux-2.6.32-642.6.2.el6/mm/memory.c
+--- a/mm/memory.c 2016-08-25 18:06:57.000000000 +0300
++++ b/mm/memory.c 2016-10-24 16:19:16.000000000 +0300
+@@ -1191,6 +1191,24 @@
+ }
+ EXPORT_SYMBOL_GPL(zap_vma_ptes);
+
++static inline bool can_follow_write_pte(pte_t pte, struct page *page,
++ unsigned int flags)
++{
++ if (pte_write(pte))
++ return true;
++
++ /*
++ * Make sure that we are really following CoWed page. We do not really
++ * have to care about exclusiveness of the page because we only want
++ * to ensure that once COWed page hasn't disappeared in the meantime
++ * or it hasn't been merged to a KSM page.
++ */
++ if ((flags & FOLL_FORCE) && (flags & FOLL_COW))
++ return page && PageAnon(page) && !PageKsm(page);
++
++ return false;
++}
++
+ /*
+ * Do a quick page-table lookup for a single page.
+ */
+@@ -1280,10 +1298,11 @@
+ migration_entry_wait(mm, pmd, address);
+ goto split_fallthrough;
+ }
+- if ((flags & FOLL_WRITE) && !pte_write(pte))
+- goto unlock;
+-
+ page = vm_normal_page(vma, address, pte);
++ if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, page, flags)) {
++ pte_unmap_unlock(ptep, ptl);
++ return NULL;
++ }
+ if (unlikely(!page)) {
+ if ((flags & FOLL_DUMP) ||
+ !is_zero_pfn(pte_pfn(pte)))
+@@ -1304,7 +1323,6 @@
+ */
+ mark_page_accessed(page);
+ }
+-unlock:
+ pte_unmap_unlock(ptep, ptl);
+ out:
+ return page;
+@@ -1503,17 +1520,13 @@
+ * The VM_FAULT_WRITE bit tells us that
+ * do_wp_page has broken COW when necessary,
+ * even if maybe_mkwrite decided not to set
+- * pte_write. We can thus safely do subsequent
+- * page lookups as if they were reads. But only
+- * do so when looping for pte_write is futile:
+- * in some cases userspace may also be wanting
+- * to write to the gotten user page, which a
+- * read fault here might prevent (a readonly
+- * page might get reCOWed by userspace write).
++ * pte_write. We cannot simply drop FOLL_WRITE
++ * here because the COWed page might be gone by
++ * the time we do the subsequent page lookups.
+ */
+ if ((ret & VM_FAULT_WRITE) &&
+ !(vma->vm_flags & VM_WRITE))
+- foll_flags &= ~FOLL_WRITE;
++ foll_flags |= FOLL_COW;
+
+ cond_resched();
+ }
# by setting the define to ".local" or ".bz123456"
#
# % define buildid .local
+%define buildid .mos1
%define distro_build 431.20.3
%define signmodules 1
Source85: config-powerpc64-debug-rhel
Source86: config-s390x-debug-rhel
+# LP: https://bugs.launchpad.net/mos/+bug/1636528
+Patch1: cve-2016-2016-5195.patch
+
# empty final patch file to facilitate testing of kernel patches
Patch999999: linux-kernel-test.patch
# Dynamically generate kernel .config files from config-* files
make -f %{SOURCE20} VERSION=%{version} configs
+ApplyPatch cve-2016-2016-5195.patch
+
ApplyOptionalPatch linux-kernel-test.patch
# Any further pre-build tree manipulations happen here.
%endif
%changelog
+* Mon Nov 14 2016 Anton Chevychalov <achevychalov@mirantis.com> [2.6.32-431.20.3.mos1.el6.centos]
+- [mm] Backport fix for CVE-2015-5195
+
* Thu Jun 19 2014 Johnny Hughes <johnny@centos.org> [2.6.32-431.20.3.el6.centos]
- Roll in CentOS Branding