Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

llext: use the new inspection API #9831

Merged
merged 3 commits into from
Feb 24, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions src/include/sof/lib_manager.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,12 +97,14 @@ struct lib_manager_segment_desc {
};

struct llext;
struct llext_buf_loader;

struct lib_manager_module {
unsigned int start_idx; /* Index of the first driver from this module in
* the library-global driver list */
const struct sof_man_module_manifest *mod_manifest;
struct llext *llext; /* Zephyr loadable extension context */
struct llext_buf_loader *ebl; /* Zephyr loadable extension buffer loader */
bool mapped;
struct lib_manager_segment_desc segment[LIB_MANAGER_N_SEGMENTS];
};
Expand Down
143 changes: 95 additions & 48 deletions src/library_manager/llext_manager.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
#include <zephyr/llext/loader.h>
#include <zephyr/llext/llext.h>
#include <zephyr/logging/log_ctrl.h>
#include <zephyr/llext/inspect.h>

#include <rimage/sof/user/manifest.h>
#include <module/module/api_ver.h>
Expand Down Expand Up @@ -70,40 +71,49 @@ static int llext_manager_align_unmap(void __sparse_cache *vma, size_t size)
return sys_mm_drv_unmap_region(aligned_vma, ALIGN_UP(pre_pad_size + size, PAGE_SZ));
}

static int llext_manager_load_data_from_storage(const struct llext *ext,
/*
* Map the memory range covered by 'vma' and 'size' as writable, copy all
* sections that belong to the specified 'region' and are contained in the
* memory range, then remap the same area according to the 'flags' parameter.
*/
static int llext_manager_load_data_from_storage(const struct llext_loader *ldr,
const struct llext *ext,
enum llext_mem region,
void __sparse_cache *vma,
const uint8_t *load_base,
size_t size, uint32_t flags)
{
unsigned int i;
const void *region_addr;
int ret = llext_manager_align_map(vma, size, SYS_MM_MEM_PERM_RW);
const elf_shdr_t *shdr;

if (ret < 0) {
tr_err(&lib_manager_tr, "cannot map %u of %p", size, (__sparse_force void *)vma);
return ret;
}

size_t init_offset = 0;
llext_get_region_info(ldr, ext, region, NULL, &region_addr, NULL);

/* Need to copy sections within regions individually, offsets may differ */
for (i = 0, shdr = llext_section_headers(ext); i < llext_section_count(ext); i++, shdr++) {
for (i = 0; i < llext_section_count(ext); i++) {
const elf_shdr_t *shdr;
enum llext_mem s_region = LLEXT_MEM_COUNT;
size_t s_offset = 0;

llext_get_section_info(ldr, ext, i, &shdr, &s_region, &s_offset);

/* skip sections not in the requested region */
if (s_region != region)
continue;

/* skip detached sections (will be outside requested VMA area) */
if ((uintptr_t)shdr->sh_addr < (uintptr_t)vma ||
(uintptr_t)shdr->sh_addr >= (uintptr_t)vma + size)
continue;

if (!init_offset)
init_offset = shdr->sh_offset;

/* found a section within the region */
size_t offset = shdr->sh_offset - init_offset;

if (shdr->sh_type != SHT_NOBITS) {
ret = memcpy_s((__sparse_force void *)shdr->sh_addr, size - offset,
load_base + offset, shdr->sh_size);
if (ret < 0)
return ret;
}
ret = memcpy_s((__sparse_force void *)shdr->sh_addr, size - s_offset,
(const uint8_t *)region_addr + s_offset, shdr->sh_size);
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we still need the .bss check, don't we?

Copy link
Contributor Author

@pillo79 pillo79 Feb 21, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Actually, that is not needed anymore. Only sections for the requested region will be considered by the check above, so BSS will always be skipped regardless of its placement in memory.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nnno, I think this is breaking SOF. Look at

if ((uintptr_t)bss_addr + bss_size == (uintptr_t)va_base_data &&
!((uintptr_t)bss_addr & (PAGE_SZ - 1))) {
/* .bss directly in front of writable data and properly aligned, prepend */
va_base_data = bss_addr;
data_size += bss_size;
} else if ((uintptr_t)bss_addr == (uintptr_t)va_base_data +
ALIGN_UP(data_size, bss_align)) {
/* .bss directly behind writable data, append */
data_size += bss_size;
. What we do is we merge .bss and .data to map them in one go. We can map memory only in units of pages, so if we were to map them separately we would potentially waste a page of memory. By merging them we avoid that. And we can merge them because they're mapped with the same flags - as writable data. And here I think now that is broken.

Copy link
Contributor Author

@pillo79 pillo79 Feb 21, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry, I do not understand how that is relevant. The changes did not impact the mapping with the vma and size parameters in any way, so while mapping writable data, the same "BSS-and-DATA" area is mapped in one go, as before.

Instead of having to rely only on the address, the copy code now checks the actual region LLEXT mapped each section to. So, yes, previously that check was needed, but now LLEXT will report .bss in LLEXT_MEM_BSS and that loop will definitely ignore it when looking for LLEXT_MEM_DATA.

EDIT: CI is flaky, but the LNL + HDA test passed again. Can you describe a possible failing situation?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@pillo79 I think you're right. The thing is that even now calls to llext_manager_load_data_from_storage() pass two (related but) different "memory descriptors:" (1) memory that has to be mapped and (2) memory, that has to be copied. Before your PR the call for .bss + .data was

ret = llext_manager_load_data_from_storage(ext, va_base_data, ext->mem[LLEXT_MEM_DATA],
data_size, SYS_MM_MEM_PERM_RW);
where function arguments 2 and 4 referred to the memory, that had to be mapped, while argument 3 to the memory, that had to be copied. For .rodata and .text those were the same, but not for .bss + .data. After your change this different meanings remain, only parameters change: now argument 3 is for copying and 4 and 5 for mapping. We need a comment about that at least. Let's do this after this PR is merged - either you can submit it or I can do it.

if (ret < 0)
return ret;
}

/*
Expand Down Expand Up @@ -165,23 +175,29 @@ static int llext_manager_load_module(struct lib_manager_module *mctx)
}
}

const struct llext_loader *ldr = &mctx->ebl->loader;
const struct llext *ext = mctx->llext;

/* Copy Code */
ret = llext_manager_load_data_from_storage(ext, va_base_text, ext->mem[LLEXT_MEM_TEXT],
text_size, SYS_MM_MEM_PERM_EXEC);
ret = llext_manager_load_data_from_storage(ldr, ext, LLEXT_MEM_TEXT,
va_base_text, text_size, SYS_MM_MEM_PERM_EXEC);
if (ret < 0)
return ret;

/* Copy read-only data */
ret = llext_manager_load_data_from_storage(ext, va_base_rodata, ext->mem[LLEXT_MEM_RODATA],
rodata_size, 0);
ret = llext_manager_load_data_from_storage(ldr, ext, LLEXT_MEM_RODATA,
va_base_rodata, rodata_size, 0);
if (ret < 0)
goto e_text;

/* Copy writable data */
ret = llext_manager_load_data_from_storage(ext, va_base_data, ext->mem[LLEXT_MEM_DATA],
data_size, SYS_MM_MEM_PERM_RW);
/*
* NOTE: va_base_data and data_size refer to an address range that
* spans over the BSS area as well, so the mapping will cover
* both, but only LLEXT_MEM_DATA sections will be copied.
*/
ret = llext_manager_load_data_from_storage(ldr, ext, LLEXT_MEM_DATA,
va_base_data, data_size, SYS_MM_MEM_PERM_RW);
if (ret < 0)
goto e_rodata;

Expand Down Expand Up @@ -238,11 +254,13 @@ static bool llext_manager_section_detached(const elf_shdr_t *shdr)
return shdr->sh_addr < SOF_MODULE_DRAM_LINK_END;
}

static int llext_manager_link(struct llext_buf_loader *ebl, const char *name,
static int llext_manager_link(const char *name,
struct lib_manager_module *mctx, const void **buildinfo,
const struct sof_man_module_manifest **mod_manifest)
{
struct llext **llext = &mctx->llext;
struct llext_loader *ldr = &mctx->ebl->loader;
const elf_shdr_t *hdr;
int ret;

if (*llext && !mctx->mapped) {
Expand All @@ -266,56 +284,65 @@ static int llext_manager_link(struct llext_buf_loader *ebl, const char *name,
.relocate_local = !*llext,
.pre_located = true,
.section_detached = llext_manager_section_detached,
.keep_section_info = true,
};

ret = llext_load(&ebl->loader, name, llext, &ldr_parm);
ret = llext_load(ldr, name, llext, &ldr_parm);
if (ret)
return ret;
}

mctx->segment[LIB_MANAGER_TEXT].addr = ebl->loader.sects[LLEXT_MEM_TEXT].sh_addr;
mctx->segment[LIB_MANAGER_TEXT].size = ebl->loader.sects[LLEXT_MEM_TEXT].sh_size;
/* All code sections */
llext_get_region_info(ldr, *llext, LLEXT_MEM_TEXT, &hdr, NULL, NULL);
mctx->segment[LIB_MANAGER_TEXT].addr = hdr->sh_addr;
mctx->segment[LIB_MANAGER_TEXT].size = hdr->sh_size;

tr_dbg(&lib_manager_tr, ".text: start: %#lx size %#x",
mctx->segment[LIB_MANAGER_TEXT].addr,
mctx->segment[LIB_MANAGER_TEXT].size);

/* All read-only data sections */
mctx->segment[LIB_MANAGER_RODATA].addr =
ebl->loader.sects[LLEXT_MEM_RODATA].sh_addr;
mctx->segment[LIB_MANAGER_RODATA].size = ebl->loader.sects[LLEXT_MEM_RODATA].sh_size;
llext_get_region_info(ldr, *llext, LLEXT_MEM_RODATA, &hdr, NULL, NULL);
mctx->segment[LIB_MANAGER_RODATA].addr = hdr->sh_addr;
mctx->segment[LIB_MANAGER_RODATA].size = hdr->sh_size;

tr_dbg(&lib_manager_tr, ".rodata: start: %#lx size %#x",
mctx->segment[LIB_MANAGER_RODATA].addr,
mctx->segment[LIB_MANAGER_RODATA].size);

/* All writable data sections */
mctx->segment[LIB_MANAGER_DATA].addr =
ebl->loader.sects[LLEXT_MEM_DATA].sh_addr;
mctx->segment[LIB_MANAGER_DATA].size = ebl->loader.sects[LLEXT_MEM_DATA].sh_size;
llext_get_region_info(ldr, *llext, LLEXT_MEM_DATA, &hdr, NULL, NULL);
mctx->segment[LIB_MANAGER_DATA].addr = hdr->sh_addr;
mctx->segment[LIB_MANAGER_DATA].size = hdr->sh_size;

tr_dbg(&lib_manager_tr, ".data: start: %#lx size %#x",
mctx->segment[LIB_MANAGER_DATA].addr,
mctx->segment[LIB_MANAGER_DATA].size);

mctx->segment[LIB_MANAGER_BSS].addr = ebl->loader.sects[LLEXT_MEM_BSS].sh_addr;
mctx->segment[LIB_MANAGER_BSS].size = ebl->loader.sects[LLEXT_MEM_BSS].sh_size;
/* Writable uninitialized data section */
llext_get_region_info(ldr, *llext, LLEXT_MEM_BSS, &hdr, NULL, NULL);
mctx->segment[LIB_MANAGER_BSS].addr = hdr->sh_addr;
mctx->segment[LIB_MANAGER_BSS].size = hdr->sh_size;

tr_dbg(&lib_manager_tr, ".bss: start: %#lx size %#x",
mctx->segment[LIB_MANAGER_BSS].addr,
mctx->segment[LIB_MANAGER_BSS].size);

ssize_t binfo_o = llext_find_section(&ebl->loader, ".mod_buildinfo");

if (binfo_o >= 0)
*buildinfo = llext_peek(&ebl->loader, binfo_o);

ssize_t mod_o = llext_find_section(&ebl->loader, ".module");
*buildinfo = NULL;
ret = llext_section_shndx(ldr, *llext, ".mod_buildinfo");
if (ret >= 0) {
llext_get_section_info(ldr, *llext, ret, &hdr, NULL, NULL);
*buildinfo = llext_peek(ldr, hdr->sh_offset);
}

if (mod_o >= 0)
*mod_manifest = llext_peek(&ebl->loader, mod_o);
*mod_manifest = NULL;
ret = llext_section_shndx(ldr, *llext, ".module");
if (ret >= 0) {
llext_get_section_info(ldr, *llext, ret, &hdr, NULL, NULL);
*mod_manifest = llext_peek(ldr, hdr->sh_offset);
}
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why is this now better than a single call to llext_find_section(llext_find_section()`?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

My intention for LLEXT is to ask for deprecation of llext_find_section, as it has a fundamental flaw - it does not provide the section length, so it's basically impossible to use it safely.

The new functions in the inspection API expose all details (when useful to the application) at the same runtime cost, and require the user to provide both ldr and ext parameters, allowing future freedom in Zephyr for internal LLEXT structure management.


return binfo_o >= 0 && mod_o >= 0 ? 0 : -EPROTO;
return *buildinfo && *mod_manifest ? 0 : -EPROTO;
}

/* Count "module files" in the library, allocate and initialize memory for their descriptors */
Expand Down Expand Up @@ -350,6 +377,7 @@ static int llext_manager_mod_init(struct lib_manager_mod_ctx *ctx,
offs = mod_array[i].segment[LIB_MANAGER_TEXT].file_offset;
ctx->mod[n_mod].mapped = false;
ctx->mod[n_mod].llext = NULL;
ctx->mod[n_mod].ebl = NULL;
ctx->mod[n_mod++].start_idx = i;
}

Expand Down Expand Up @@ -427,16 +455,28 @@ static int llext_manager_link_single(uint32_t module_id, const struct sof_man_fw
mod_size = ALIGN_UP(mod_array[i].segment[LIB_MANAGER_TEXT].file_offset - mod_offset,
PAGE_SZ);

uintptr_t dram_base = (uintptr_t)desc - SOF_MAN_ELF_TEXT_OFFSET;
struct llext_buf_loader ebl = LLEXT_BUF_LOADER((uint8_t *)dram_base + mod_offset, mod_size);
if (!mctx->ebl) {
/* allocate once, never freed */
mctx->ebl = rmalloc(SOF_MEM_ZONE_RUNTIME_SHARED, 0, SOF_MEM_CAPS_RAM,
sizeof(struct llext_buf_loader));
if (!mctx->ebl) {
tr_err(&lib_manager_tr, "loader alloc failed");
return 0;
}

uint8_t *dram_base = (uint8_t *)desc - SOF_MAN_ELF_TEXT_OFFSET;

*mctx->ebl = (struct llext_buf_loader)LLEXT_BUF_LOADER(dram_base + mod_offset,
mod_size);
}

/*
* LLEXT linking is only needed once for all the "drivers" in the
* module. This calls llext_load(), which also takes references to any
* dependencies, sets up sections and retrieves buildinfo and
* mod_manifest
*/
ret = llext_manager_link(&ebl, mod_array[entry_index - inst_idx].name, mctx,
ret = llext_manager_link(mod_array[entry_index - inst_idx].name, mctx,
buildinfo, mod_manifest);
if (ret < 0) {
tr_err(&lib_manager_tr, "linking failed: %d", ret);
Expand Down Expand Up @@ -602,7 +642,14 @@ int llext_manager_free_module(const uint32_t component_id)

/* Protected by IPC serialization */
if (mctx->llext->use_count > 1) {
/* llext_unload() will return a positive number */
/*
* At least 2 users: llext_unload() will never actually free
* the extension but only reduce the refcount and return its
* new value (must be a positive number).
* NOTE: if this is modified to allow extension unload, the
* inspection data in the loader must be freed as well by
* calling the llext_free_inspection_data() function.
*/
int ret = llext_unload(&mctx->llext);

if (ret <= 0) {
Expand Down
Loading