mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-08-28 18:10:32 +00:00
tracing: Use vmap_page_range() to map memmap ring buffer
The code to map the physical memory retrieved by memmap currently allocates an array of pages to cover the physical memory and then calls vmap() to map it to a virtual address. Instead of using this temporary array of struct page descriptors, simply use vmap_page_range() that can directly map the contiguous physical memory to a virtual address. Link: https://lore.kernel.org/all/CAHk-=whUOfVucfJRt7E0AH+GV41ELmS4wJqxHDnui6Giddfkzw@mail.gmail.com/ Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Vincent Donnefort <vdonnefort@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Mike Rapoport <rppt@kernel.org> Cc: Jann Horn <jannh@google.com> Link: https://lore.kernel.org/20250402144953.754618481@goodmis.org Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
This commit is contained in:
parent
34ea8fa084
commit
394f3f02de
@ -50,6 +50,7 @@
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/sort.h>
|
||||
#include <linux/io.h> /* vmap_page_range() */
|
||||
|
||||
#include <asm/setup.h> /* COMMAND_LINE_SIZE */
|
||||
|
||||
@ -9796,29 +9797,27 @@ static int instance_mkdir(const char *name)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u64 map_pages(u64 start, u64 size)
|
||||
static u64 map_pages(unsigned long start, unsigned long size)
|
||||
{
|
||||
struct page **pages;
|
||||
phys_addr_t page_start;
|
||||
unsigned int page_count;
|
||||
unsigned int i;
|
||||
void *vaddr;
|
||||
unsigned long vmap_start, vmap_end;
|
||||
struct vm_struct *area;
|
||||
int ret;
|
||||
|
||||
page_count = DIV_ROUND_UP(size, PAGE_SIZE);
|
||||
|
||||
page_start = start;
|
||||
pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
|
||||
if (!pages)
|
||||
area = get_vm_area(size, VM_IOREMAP);
|
||||
if (!area)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < page_count; i++) {
|
||||
phys_addr_t addr = page_start + i * PAGE_SIZE;
|
||||
pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
|
||||
}
|
||||
vaddr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL);
|
||||
kfree(pages);
|
||||
vmap_start = (unsigned long) area->addr;
|
||||
vmap_end = vmap_start + size;
|
||||
|
||||
return (u64)(unsigned long)vaddr;
|
||||
ret = vmap_page_range(vmap_start, vmap_end,
|
||||
start, pgprot_nx(PAGE_KERNEL));
|
||||
if (ret < 0) {
|
||||
free_vm_area(area);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return (u64)vmap_start;
|
||||
}
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user