diff --git a/include/linux/gfp.h b/include/linux/gfp.h index f78d1e89593f..194da88f128f 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -44,6 +44,7 @@ struct vm_area_struct; #else #define ___GFP_NOLOCKDEP 0 #endif +#define ___GFP_CMA 0x1000000u /* If the above are modified, __GFP_BITS_SHIFT may need updating */ /* @@ -57,6 +58,7 @@ struct vm_area_struct; #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ +#define __GFP_CMA ((__force gfp_t)___GFP_CMA) #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) /** @@ -217,8 +219,13 @@ struct vm_area_struct; #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP)) +#define __GFP_BITS_SHIFT (25) +#ifdef CONFIG_LOCKDEP #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) +#else +#define __GFP_BITS_MASK (((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) & \ + ~0x800000u) +#endif /** * DOC: Useful GFP flag combinations diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 0690679832d4..e80602ee359b 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -181,7 +181,12 @@ static inline struct page * alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, unsigned long vaddr) { +#ifndef CONFIG_CMA return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); +#else + return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma, + vaddr); +#endif } static inline void clear_highpage(struct page *page) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index e8fbc2c74093..769bab28dc1c 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -387,6 +387,10 @@ struct zone { struct pglist_data *zone_pgdat; struct per_cpu_pageset __percpu *pageset; +#ifdef CONFIG_CMA + bool cma_alloc; +#endif + #ifndef CONFIG_SPARSEMEM /* * Flags for a pageblock_nr_pages block. See pageblock-flags.h. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f680a6532cc3..ee41b849d276 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2579,14 +2579,30 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, retry: page = __rmqueue_smallest(zone, order, migratetype); - if (unlikely(!page)) { - if (migratetype == MIGRATE_MOVABLE) - page = __rmqueue_cma_fallback(zone, order); - if (!page && __rmqueue_fallback(zone, order, migratetype, - alloc_flags)) - goto retry; - } + if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype, + alloc_flags)) + goto retry; + + trace_mm_page_alloc_zone_locked(page, order, migratetype); + return page; +} + +static struct page *__rmqueue_cma(struct zone *zone, unsigned int order, + int migratetype) +{ + struct page *page = 0; + +retry: +#ifdef CONFIG_CMA + if (migratetype == MIGRATE_MOVABLE && !zone->cma_alloc) + page = __rmqueue_cma_fallback(zone, order); + else +#endif + page = __rmqueue_smallest(zone, order, migratetype); + + if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype)) + goto retry; trace_mm_page_alloc_zone_locked(page, order, migratetype); return page; @@ -2599,14 +2615,19 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, */ static int rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, - int migratetype, unsigned int alloc_flags) + int migratetype, unsigned int alloc_flags, int cma) { int i, alloced = 0; spin_lock(&zone->lock); for (i = 0; i < count; ++i) { - struct page *page = __rmqueue(zone, order, migratetype, - alloc_flags); + struct page *page; + + if (cma) + page = __rmqueue_cma(zone, order, migratetype); + else + page = __rmqueue(zone, order, migratetype, alloc_flags); + if (unlikely(page == NULL)) break; @@ -3064,7 +3085,7 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, unsigned int alloc_flags, struct per_cpu_pages *pcp, - struct list_head *list) + struct list_head *list, gfp_t gfp_flags) { struct page *page; @@ -3072,7 +3093,8 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, if (list_empty(list)) { pcp->count += rmqueue_bulk(zone, 0, pcp->batch, list, - migratetype, alloc_flags); + migratetype, alloc_flags, + gfp_flags & __GFP_CMA); if (unlikely(list_empty(list))) return NULL; } @@ -3099,7 +3121,8 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, local_irq_save(flags); pcp = &this_cpu_ptr(zone->pageset)->pcp; list = &pcp->lists[migratetype]; - page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list); + page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list, + gfp_flags); if (page) { __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); zone_statistics(preferred_zone, zone); @@ -3140,8 +3163,13 @@ struct page *rmqueue(struct zone *preferred_zone, if (page) trace_mm_page_alloc_zone_locked(page, order, migratetype); } - if (!page) - page = __rmqueue(zone, order, migratetype, alloc_flags); + if (!page) { + if (gfp_flags & __GFP_CMA) + page = __rmqueue_cma(zone, order, migratetype); + else + page = __rmqueue(zone, order, migratetype, + alloc_flags); + } } while (page && check_new_pages(page, order)); spin_unlock(&zone->lock); if (!page) @@ -8159,6 +8187,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, if (ret) return ret; + cc.zone->cma_alloc = 1; /* * In case of -EBUSY, we'd like to know which page causes problem. * So, just fall through. test_pages_isolated() has a tracepoint @@ -8241,6 +8270,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, done: undo_isolate_page_range(pfn_max_align_down(start), pfn_max_align_up(end), migratetype); + cc.zone->cma_alloc = 0; return ret; }