@@ -162,7 +162,11 @@ xpmem_vaddr_to_pte_offset(struct mm_struct *mm, u64 vaddr, u64 *offset)
162
162
}
163
163
#endif
164
164
165
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION (6 , 4 , 0 )
166
+ pte = pte_offset_kernel (pmd , vaddr );
167
+ #else
165
168
pte = pte_offset_map (pmd , vaddr );
169
+ #endif
166
170
if (!pte_present (* pte ))
167
171
return NULL ;
168
172
@@ -216,7 +220,11 @@ xpmem_vaddr_to_pte_size(struct mm_struct *mm, u64 vaddr, u64 *size)
216
220
return NULL ;
217
221
}
218
222
223
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION (6 , 4 , 0 )
224
+ pte = pte_offset_kernel (pmd , vaddr );
225
+ #else
219
226
pte = pte_offset_map (pmd , vaddr );
227
+ #endif
220
228
if (!pte_present (* pte )) {
221
229
* size = PAGE_SIZE ;
222
230
return NULL ;
@@ -268,10 +276,12 @@ xpmem_pin_page(struct xpmem_thread_group *tg, struct task_struct *src_task,
268
276
foll_write = (vma -> vm_flags & VM_WRITE ) ? FOLL_WRITE : 0 ;
269
277
270
278
/* get_user_pages()/get_user_pages_remote() faults and pins the page */
271
- #if LINUX_VERSION_CODE >= KERNEL_VERSION (5 , 9 , 0 )
279
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION (6 , 5 , 0 )
280
+ ret = get_user_pages_remote (src_mm , vaddr , 1 , foll_write , & page , NULL );
281
+ #elif LINUX_VERSION_CODE >= KERNEL_VERSION (5 , 9 , 0 )
272
282
ret = get_user_pages_remote (src_mm , vaddr , 1 , foll_write , & page , NULL ,
273
283
NULL );
274
- #elif LINUX_VERSION_CODE >= KERNEL_VERSION (4 , 10 , 0 )
284
+ #elif LINUX_VERSION_CODE >= KERNEL_VERSION (4 , 10 , 0 )
275
285
ret = get_user_pages_remote (src_task , src_mm , vaddr , 1 , foll_write ,
276
286
& page , NULL , NULL );
277
287
#elif LINUX_VERSION_CODE >= KERNEL_VERSION (4 , 9 , 0 )
0 commit comments