From 5d45cb459b1f908e65f4170fcbc8cbcd9a8a0611 Mon Sep 17 00:00:00 2001 From: "Pavel Shamis (Pasha)" Date: Wed, 2 Aug 2017 17:08:51 -0500 Subject: [PATCH] Patching XPMEM kernel code for 4.11.0 Signed-off-by: Pavel Shamis (Pasha) --- kernel/xpmem_attach.c | 16 +++++++++++++++- kernel/xpmem_main.c | 5 +++++ kernel/xpmem_misc.c | 5 +++++ kernel/xpmem_pfn.c | 11 +++++++++-- 4 files changed, 34 insertions(+), 3 deletions(-) diff --git a/kernel/xpmem_attach.c b/kernel/xpmem_attach.c index 3b0fbd1..3c85f70 100644 --- a/kernel/xpmem_attach.c +++ b/kernel/xpmem_attach.c @@ -7,6 +7,7 @@ * Copyright 2010,2012 Cray Inc. All Rights Reserved * Copyright (c) 2014-2017 Los Alamos National Security, LLC. All rights * reserved. + * Copyright 2017 ARM, Inc. All Rights Reserved */ /* @@ -20,6 +21,10 @@ #include "xpmem_internal.h" #include "xpmem_private.h" +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#include +#endif + static void xpmem_open_handler(struct vm_area_struct *vma) { @@ -152,11 +157,20 @@ xpmem_close_handler(struct vm_area_struct *vma) } static int +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +xpmem_fault_handler(struct vm_fault *vmf) +#else xpmem_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf) +#endif { int ret, att_locked = 0; int seg_tg_mmap_sem_locked = 0, vma_verification_needed = 0; - u64 vaddr = (u64)(uintptr_t) vmf->virtual_address; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) + u64 vaddr = (u64)(uintptr_t) vmf->address; + struct vm_area_struct *vma = vmf->vma; +#else + u64 vaddr = (u64)(uintptr_t) vmf->virtual_address; +#endif u64 seg_vaddr; unsigned long pfn = 0, old_pfn = 0; struct xpmem_thread_group *ap_tg, *seg_tg; diff --git a/kernel/xpmem_main.c b/kernel/xpmem_main.c index 67316f0..6402554 100644 --- a/kernel/xpmem_main.c +++ b/kernel/xpmem_main.c @@ -6,6 +6,7 @@ * Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved. * Copyright 2010, 2014 Cray Inc. All Rights Reserved * Copyright 2015-2016 Los Alamos National Security, LLC. All rights reserved. + * Copyright 2017 ARM, Inc. All rights reserved. */ /* @@ -33,6 +34,10 @@ #include "xpmem_internal.h" #include "xpmem_private.h" +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#include +#endif + #if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) #define proc_set_user(_pde, _uid, _gid) \ do { \ diff --git a/kernel/xpmem_misc.c b/kernel/xpmem_misc.c index c1e161c..a2099b8 100644 --- a/kernel/xpmem_misc.c +++ b/kernel/xpmem_misc.c @@ -5,6 +5,7 @@ * * Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved. * Copyright 2009, 2010, 2014 Cray Inc. All Rights Reserved + * Copyright 2017 ARM, Inc. All Rights Reserved */ /* @@ -19,6 +20,10 @@ #include "xpmem_private.h" #include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#include +#endif + uint32_t xpmem_debug_on = 0; /* diff --git a/kernel/xpmem_pfn.c b/kernel/xpmem_pfn.c index c9dc70c..26728fe 100644 --- a/kernel/xpmem_pfn.c +++ b/kernel/xpmem_pfn.c @@ -5,7 +5,7 @@ * * Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved. * Copyright 2009, 2014 Cray Inc. All Rights Reserved - * Copyright 2016 ARM Inc. All Rights Reserved + * Copyright 2016-2017 ARM Inc. All Rights Reserved * Copyright (c) 2016-2017 Nathan Hjelm */ @@ -20,6 +20,10 @@ #include "xpmem_internal.h" #include "xpmem_private.h" +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#include +#endif + /* #of pages rounded up that vaddr and size occupy */ #undef num_of_pages #define num_of_pages(v, s) \ @@ -223,7 +227,10 @@ xpmem_pin_page(struct xpmem_thread_group *tg, struct task_struct *src_task, } /* get_user_pages() faults and pins the page */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) + ret = get_user_pages_remote (src_task, src_mm, vaddr, 1, FOLL_WRITE | FOLL_FORCE, + &page, NULL, NULL); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0) ret = get_user_pages_remote (src_task, src_mm, vaddr, 1, FOLL_WRITE | FOLL_FORCE, &page, NULL); #else