Skip to content

Commit 33b95d9

Browse files
urezkigregkh
authored andcommitted
mm/vmalloc, mm/kasan: respect gfp mask in kasan_populate_vmalloc()
commit 79357cd upstream. kasan_populate_vmalloc() and its helpers ignore the caller's gfp_mask and always allocate memory using the hardcoded GFP_KERNEL flag. This makes them inconsistent with vmalloc(), which was recently extended to support GFP_NOFS and GFP_NOIO allocations. Page table allocations performed during shadow population also ignore the external gfp_mask. To preserve the intended semantics of GFP_NOFS and GFP_NOIO, wrap the apply_to_page_range() calls into the appropriate memalloc scope. xfs calls vmalloc with GFP_NOFS, so this bug could lead to deadlock. There was a report here https://lkml.kernel.org/r/[email protected] This patch: - Extends kasan_populate_vmalloc() and helpers to take gfp_mask; - Passes gfp_mask down to alloc_pages_bulk() and __get_free_page(); - Enforces GFP_NOFS/NOIO semantics with memalloc_*_save()/restore() around apply_to_page_range(); - Updates vmalloc.c and percpu allocator call sites accordingly. Link: https://lkml.kernel.org/r/[email protected] Fixes: 451769e ("mm/vmalloc: alloc GFP_NO{FS,IO} for vmalloc") Signed-off-by: Uladzislau Rezki (Sony) <[email protected]> Reported-by: [email protected] Reviewed-by: Andrey Ryabinin <[email protected]> Cc: Baoquan He <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Alexander Potapenko <[email protected]> Cc: Andrey Konovalov <[email protected]> Cc: Dmitry Vyukov <[email protected]> Cc: Vincenzo Frascino <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 36e84da commit 33b95d9

File tree

3 files changed

+31
-14
lines changed

3 files changed

+31
-14
lines changed

include/linux/kasan.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -562,7 +562,7 @@ static inline void kasan_init_hw_tags(void) { }
562562
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
563563

564564
void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
565-
int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
565+
int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
566566
void kasan_release_vmalloc(unsigned long start, unsigned long end,
567567
unsigned long free_region_start,
568568
unsigned long free_region_end,
@@ -574,7 +574,7 @@ static inline void kasan_populate_early_vm_area_shadow(void *start,
574574
unsigned long size)
575575
{ }
576576
static inline int kasan_populate_vmalloc(unsigned long start,
577-
unsigned long size)
577+
unsigned long size, gfp_t gfp_mask)
578578
{
579579
return 0;
580580
}
@@ -610,7 +610,7 @@ static __always_inline void kasan_poison_vmalloc(const void *start,
610610
static inline void kasan_populate_early_vm_area_shadow(void *start,
611611
unsigned long size) { }
612612
static inline int kasan_populate_vmalloc(unsigned long start,
613-
unsigned long size)
613+
unsigned long size, gfp_t gfp_mask)
614614
{
615615
return 0;
616616
}

mm/kasan/shadow.c

Lines changed: 24 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -335,13 +335,13 @@ static void ___free_pages_bulk(struct page **pages, int nr_pages)
335335
}
336336
}
337337

338-
static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
338+
static int ___alloc_pages_bulk(struct page **pages, int nr_pages, gfp_t gfp_mask)
339339
{
340340
unsigned long nr_populated, nr_total = nr_pages;
341341
struct page **page_array = pages;
342342

343343
while (nr_pages) {
344-
nr_populated = alloc_pages_bulk(GFP_KERNEL, nr_pages, pages);
344+
nr_populated = alloc_pages_bulk(gfp_mask, nr_pages, pages);
345345
if (!nr_populated) {
346346
___free_pages_bulk(page_array, nr_total - nr_pages);
347347
return -ENOMEM;
@@ -353,25 +353,42 @@ static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
353353
return 0;
354354
}
355355

356-
static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
356+
static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_t gfp_mask)
357357
{
358358
unsigned long nr_pages, nr_total = PFN_UP(end - start);
359359
struct vmalloc_populate_data data;
360+
unsigned int flags;
360361
int ret = 0;
361362

362-
data.pages = (struct page **)__get_free_page(GFP_KERNEL | __GFP_ZERO);
363+
data.pages = (struct page **)__get_free_page(gfp_mask | __GFP_ZERO);
363364
if (!data.pages)
364365
return -ENOMEM;
365366

366367
while (nr_total) {
367368
nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0]));
368-
ret = ___alloc_pages_bulk(data.pages, nr_pages);
369+
ret = ___alloc_pages_bulk(data.pages, nr_pages, gfp_mask);
369370
if (ret)
370371
break;
371372

372373
data.start = start;
374+
375+
/*
376+
* page tables allocations ignore external gfp mask, enforce it
377+
* by the scope API
378+
*/
379+
if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
380+
flags = memalloc_nofs_save();
381+
else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
382+
flags = memalloc_noio_save();
383+
373384
ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE,
374385
kasan_populate_vmalloc_pte, &data);
386+
387+
if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
388+
memalloc_nofs_restore(flags);
389+
else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
390+
memalloc_noio_restore(flags);
391+
375392
___free_pages_bulk(data.pages, nr_pages);
376393
if (ret)
377394
break;
@@ -385,7 +402,7 @@ static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
385402
return ret;
386403
}
387404

388-
int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
405+
int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
389406
{
390407
unsigned long shadow_start, shadow_end;
391408
int ret;
@@ -414,7 +431,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
414431
shadow_start = PAGE_ALIGN_DOWN(shadow_start);
415432
shadow_end = PAGE_ALIGN(shadow_end);
416433

417-
ret = __kasan_populate_vmalloc(shadow_start, shadow_end);
434+
ret = __kasan_populate_vmalloc(shadow_start, shadow_end, gfp_mask);
418435
if (ret)
419436
return ret;
420437

mm/vmalloc.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2026,6 +2026,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
20262026
if (unlikely(!vmap_initialized))
20272027
return ERR_PTR(-EBUSY);
20282028

2029+
/* Only reclaim behaviour flags are relevant. */
2030+
gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
20292031
might_sleep();
20302032

20312033
/*
@@ -2038,8 +2040,6 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
20382040
*/
20392041
va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
20402042
if (!va) {
2041-
gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
2042-
20432043
va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
20442044
if (unlikely(!va))
20452045
return ERR_PTR(-ENOMEM);
@@ -2089,7 +2089,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
20892089
BUG_ON(va->va_start < vstart);
20902090
BUG_ON(va->va_end > vend);
20912091

2092-
ret = kasan_populate_vmalloc(addr, size);
2092+
ret = kasan_populate_vmalloc(addr, size, gfp_mask);
20932093
if (ret) {
20942094
free_vmap_area(va);
20952095
return ERR_PTR(ret);
@@ -4826,7 +4826,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
48264826

48274827
/* populate the kasan shadow space */
48284828
for (area = 0; area < nr_vms; area++) {
4829-
if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
4829+
if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area], GFP_KERNEL))
48304830
goto err_free_shadow;
48314831
}
48324832

0 commit comments

Comments
 (0)