Skip to content

Commit c2a0831

Browse files
committed
ring-buffer: Allow reserve_mem persistent ring buffers to be mmapped
When the persistent ring buffer is created from the memory returned by reserve_mem there is nothing prohibiting it to be memory mapped to user space. The memory is the same as the pages allocated by alloc_page(). The way the memory is managed by the ring buffer code is slightly different though and needs to be addressed. The persistent memory uses the page->id for its own purpose where as the user mmap buffer currently uses that for the subbuf array mapped to user space. If the buffer is a persistent buffer, use the page index into that buffer as the identifier instead of the page->id. That is, the page->id for a persistent buffer, represents the order of the buffer is in the link list. ->id == 0 means it is the reader page. When a reader page is swapped, the new reader page's ->id gets zero, and the old reader page gets the ->id of the page that it swapped with. The user space mapping has the ->id is the index of where it was mapped in user space and does not change while it is mapped. Since the persistent buffer is fixed in its location, the index of where a page is in the memory range can be used as the "id" to put in the meta page array, and it can be mapped in the same order to user space as it is in the persistent memory. A new rb_page_id() helper function is used to get and set the id depending on if the page is a normal memory allocated buffer or a physical memory mapped buffer. Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Vincent Donnefort <vdonnefort@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Mike Rapoport <rppt@kernel.org> Cc: Jann Horn <jannh@google.com> Link: https://lore.kernel.org/20250401203332.246646011@goodmis.org Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
1 parent 4fc78a7 commit c2a0831

File tree

2 files changed

+45
-8
lines changed

2 files changed

+45
-8
lines changed

kernel/trace/ring_buffer.c

Lines changed: 45 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6003,6 +6003,39 @@ static void rb_clear_buffer_page(struct buffer_page *page)
60036003
page->read = 0;
60046004
}
60056005

6006+
/*
6007+
* When the buffer is memory mapped to user space, each sub buffer
6008+
* has a unique id that is used by the meta data to tell the user
6009+
* where the current reader page is.
6010+
*
6011+
* For a normal allocated ring buffer, the id is saved in the buffer page
6012+
* id field, and updated via this function.
6013+
*
6014+
* But for a fixed memory mapped buffer, the id is already assigned for
6015+
* fixed memory ording in the memory layout and can not be used. Instead
6016+
* the index of where the page lies in the memory layout is used.
6017+
*
6018+
* For the normal pages, set the buffer page id with the passed in @id
6019+
* value and return that.
6020+
*
6021+
* For fixed memory mapped pages, get the page index in the memory layout
6022+
* and return that as the id.
6023+
*/
6024+
static int rb_page_id(struct ring_buffer_per_cpu *cpu_buffer,
6025+
struct buffer_page *bpage, int id)
6026+
{
6027+
/*
6028+
* For boot buffers, the id is the index,
6029+
* otherwise, set the buffer page with this id
6030+
*/
6031+
if (cpu_buffer->ring_meta)
6032+
id = rb_meta_subbuf_idx(cpu_buffer->ring_meta, bpage->page);
6033+
else
6034+
bpage->id = id;
6035+
6036+
return id;
6037+
}
6038+
60066039
static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
60076040
{
60086041
struct trace_buffer_meta *meta = cpu_buffer->meta_page;
@@ -6011,7 +6044,9 @@ static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
60116044
return;
60126045

60136046
meta->reader.read = cpu_buffer->reader_page->read;
6014-
meta->reader.id = cpu_buffer->reader_page->id;
6047+
meta->reader.id = rb_page_id(cpu_buffer, cpu_buffer->reader_page,
6048+
cpu_buffer->reader_page->id);
6049+
60156050
meta->reader.lost_events = cpu_buffer->lost_events;
60166051

60176052
meta->entries = local_read(&cpu_buffer->entries);
@@ -6927,23 +6962,29 @@ static void rb_setup_ids_meta_page(struct ring_buffer_per_cpu *cpu_buffer,
69276962
struct trace_buffer_meta *meta = cpu_buffer->meta_page;
69286963
unsigned int nr_subbufs = cpu_buffer->nr_pages + 1;
69296964
struct buffer_page *first_subbuf, *subbuf;
6965+
int cnt = 0;
69306966
int id = 0;
69316967

6932-
subbuf_ids[id] = (unsigned long)cpu_buffer->reader_page->page;
6933-
cpu_buffer->reader_page->id = id++;
6968+
id = rb_page_id(cpu_buffer, cpu_buffer->reader_page, id);
6969+
subbuf_ids[id++] = (unsigned long)cpu_buffer->reader_page->page;
6970+
cnt++;
69346971

69356972
first_subbuf = subbuf = rb_set_head_page(cpu_buffer);
69366973
do {
6974+
id = rb_page_id(cpu_buffer, subbuf, id);
6975+
69376976
if (WARN_ON(id >= nr_subbufs))
69386977
break;
69396978

69406979
subbuf_ids[id] = (unsigned long)subbuf->page;
6941-
subbuf->id = id;
69426980

69436981
rb_inc_page(&subbuf);
69446982
id++;
6983+
cnt++;
69456984
} while (subbuf != first_subbuf);
69466985

6986+
WARN_ON(cnt != nr_subbufs);
6987+
69476988
/* install subbuf ID to kern VA translation */
69486989
cpu_buffer->subbuf_ids = subbuf_ids;
69496990

kernel/trace/trace.c

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8508,10 +8508,6 @@ static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma)
85088508
if (iter->tr->flags & TRACE_ARRAY_FL_MEMMAP)
85098509
return -ENODEV;
85108510

8511-
/* Currently the boot mapped buffer is not supported for mmap */
8512-
if (iter->tr->flags & TRACE_ARRAY_FL_BOOT)
8513-
return -ENODEV;
8514-
85158511
ret = get_snapshot_map(iter->tr);
85168512
if (ret)
85178513
return ret;

0 commit comments

Comments
 (0)