Commit e2f466e3 authored by Lucas Stach's avatar Lucas Stach Committed by Linus Torvalds

mm: cma_alloc: allow to specify GFP mask

Most users of this interface just want to use it with the default
GFP_KERNEL flags, but for cases where DMA memory is allocated it may be
called from a different context.

No functional change yet, just passing through the flag to the
underlying alloc_contig_range function.

Link: default avatarLucas Stach <>
Acked-by: default avatarVlastimil Babka <>
Acked-by: default avatarMichal Hocko <>
Cc: Radim Krcmar <>
Cc: Catalin Marinas <>
Cc: Will Deacon <>
Cc: Chris Zankel <>
Cc: Ralf Baechle <>
Cc: Paolo Bonzini <>
Cc: Alexander Graf <>
Cc: Joonsoo Kim <>
Signed-off-by: default avatarAndrew Morton <>
Signed-off-by: default avatarLinus Torvalds <>
parent ca96b625
......@@ -56,7 +56,8 @@ struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES));
return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),
......@@ -193,7 +193,7 @@ struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
return cma_alloc(dev_get_cma_area(dev), count, align);
return cma_alloc(dev_get_cma_area(dev), count, align, GFP_KERNEL);
......@@ -29,6 +29,7 @@ extern int __init cma_declare_contiguous(phys_addr_t base,
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
unsigned int order_per_bit,
struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align);
extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
gfp_t gfp_mask);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
......@@ -357,7 +357,8 @@ int __init cma_declare_contiguous(phys_addr_t base,
* This function allocates part of contiguous memory on specific
* contiguous memory area.
struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
gfp_t gfp_mask)
unsigned long mask, offset;
unsigned long pfn = -1;
......@@ -403,7 +404,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
if (ret == 0) {
page = pfn_to_page(pfn);
......@@ -138,7 +138,7 @@ static int cma_alloc_mem(struct cma *cma, int count)
if (!mem)
return -ENOMEM;
p = cma_alloc(cma, count, 0);
p = cma_alloc(cma, count, 0, GFP_KERNEL);
if (!p) {
return -ENOMEM;
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment