mirror of
https://github.com/torvalds/linux.git
synced 2024-11-01 04:53:36 +01:00
mm/page_alloc: handle a missing case for memalloc_nocma_{save/restore} APIs
memalloc_nocma_{save/restore} APIs can be used to skip page allocation
on CMA area, but, there is a missing case and the page on CMA area could
be allocated even if APIs are used. This patch handles this case to fix
the potential issue.
For now, these APIs are used to prevent long-term pinning on the CMA
page. When the long-term pinning is requested on the CMA page, it is
migrated to the non-CMA page before pinning. This non-CMA page is
allocated by using memalloc_nocma_{save/restore} APIs. If APIs doesn't
work as intended, the CMA page is allocated and it is pinned for a long
time. This long-term pin for the CMA page causes cma_alloc() failure
and it could result in wrong behaviour on the device driver who uses the
cma_alloc().
Missing case is an allocation from the pcplist. MIGRATE_MOVABLE pcplist
could have the pages on CMA area so we need to skip it if ALLOC_CMA
isn't specified.
Fixes: 8510e69c8e
(mm/page_alloc: fix memalloc_nocma_{save/restore} APIs)
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.ibm.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Link: https://lkml.kernel.org/r/1601429472-12599-1-git-send-email-iamjoonsoo.kim@lge.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
484cfaca95
commit
1d91df85f3
1 changed files with 16 additions and 3 deletions
|
@ -3367,10 +3367,17 @@ struct page *rmqueue(struct zone *preferred_zone,
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
if (likely(order == 0)) {
|
if (likely(order == 0)) {
|
||||||
|
/*
|
||||||
|
* MIGRATE_MOVABLE pcplist could have the pages on CMA area and
|
||||||
|
* we need to skip it when CMA area isn't allowed.
|
||||||
|
*/
|
||||||
|
if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||
|
||||||
|
migratetype != MIGRATE_MOVABLE) {
|
||||||
page = rmqueue_pcplist(preferred_zone, zone, gfp_flags,
|
page = rmqueue_pcplist(preferred_zone, zone, gfp_flags,
|
||||||
migratetype, alloc_flags);
|
migratetype, alloc_flags);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We most definitely don't want callers attempting to
|
* We most definitely don't want callers attempting to
|
||||||
|
@ -3381,7 +3388,13 @@ struct page *rmqueue(struct zone *preferred_zone,
|
||||||
|
|
||||||
do {
|
do {
|
||||||
page = NULL;
|
page = NULL;
|
||||||
if (alloc_flags & ALLOC_HARDER) {
|
/*
|
||||||
|
* order-0 request can reach here when the pcplist is skipped
|
||||||
|
* due to non-CMA allocation context. HIGHATOMIC area is
|
||||||
|
* reserved for high-order atomic allocation, so order-0
|
||||||
|
* request should skip it.
|
||||||
|
*/
|
||||||
|
if (order > 0 && alloc_flags & ALLOC_HARDER) {
|
||||||
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
|
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
|
||||||
if (page)
|
if (page)
|
||||||
trace_mm_page_alloc_zone_locked(page, order, migratetype);
|
trace_mm_page_alloc_zone_locked(page, order, migratetype);
|
||||||
|
|
Loading…
Reference in a new issue