From b4c1dac0f739d5cfc9e145cb9640a18e12e3e531 Mon Sep 17 00:00:00 2001 From: Li Ruilin Date: Mon, 15 Dec 2025 00:32:58 +0800 Subject: [PATCH 1/2] obmm: Fix race condition between unexport and addr_query_by_pa euleros inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IDBKX6 CVE: NA ------------------------------------- When obmm_query_by_pa was iterating through regions, it could access a region that was being unexported and thus in an inconsistent state. This may cause sgt structure being accessed after it was freed. Fix this by trying to get region refcount. Fixes: 64c6ae6a06e8 ("obmm: Add memory region export functionality") Signed-off-by: Li Ruilin --- drivers/ub/obmm/obmm_core.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/ub/obmm/obmm_core.c b/drivers/ub/obmm/obmm_core.c index c01ac152e5ad..a0a681d947bc 100644 --- a/drivers/ub/obmm/obmm_core.c +++ b/drivers/ub/obmm/obmm_core.c @@ -178,6 +178,8 @@ int obmm_query_by_pa(unsigned long pa, struct obmm_ext_addr *ext_addr) spin_lock_irqsave(lock, flags); list_for_each_entry(region, &g_obmm_ctx_info.regions, node) { + if (!try_get_obmm_region(region)) + continue; if (region->type == OBMM_IMPORT_REGION) { struct obmm_import_region *i_reg; @@ -190,7 +192,7 @@ int obmm_query_by_pa(unsigned long pa, struct obmm_ext_addr *ext_addr) e_reg = container_of(region, struct obmm_export_region, region); ret = get_pa_detail_export_region(e_reg, pa, ext_addr); } - + put_obmm_region(region); if (ret == 0) break; } -- Gitee From e777551b49fb8d8dbad14dd3a44c2f9b5c8fa6d4 Mon Sep 17 00:00:00 2001 From: Li Ruilin Date: Mon, 15 Dec 2025 12:19:31 +0800 Subject: [PATCH 2/2] obmm: Remove log pringts of physical address and kernelspace virtual address euleros inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IDBKX6 CVE: NA ------------------------------------- For security reasons, remove all redundant log prints of physical address information and kernelspace virtual address information. For physical address information, only keep the prints at the entry and exit points of user operations. Address information on error path is also removed. prints. When an error occurs found, we can still locate it with the information prints at the operation entry point. All kernelspace virtual address prints have been completely removed. Userspace virtual address prints are keeped. Fixes: ac78ffc6e0c3 ("obmm: Add resource management support for imported memory") Signed-off-by: Li Ruilin --- drivers/ub/obmm/conti_mem_allocator.c | 3 -- drivers/ub/obmm/obmm_addr_check.c | 23 +++---------- drivers/ub/obmm/obmm_cache.c | 8 +---- drivers/ub/obmm/obmm_export_from_user.c | 2 +- drivers/ub/obmm/obmm_export_region_ops.c | 11 +++---- drivers/ub/obmm/obmm_import.c | 26 +++++++-------- drivers/ub/obmm/obmm_preimport.c | 33 ++++++++++--------- drivers/ub/obmm/obmm_preimport_prefilled.c | 38 +++++++--------------- drivers/ub/obmm/ubmempool_allocator.c | 21 +++--------- 9 files changed, 58 insertions(+), 107 deletions(-) diff --git a/drivers/ub/obmm/conti_mem_allocator.c b/drivers/ub/obmm/conti_mem_allocator.c index 3ed9445daca6..8ca482f5dfa5 100644 --- a/drivers/ub/obmm/conti_mem_allocator.c +++ b/drivers/ub/obmm/conti_mem_allocator.c @@ -373,11 +373,8 @@ static int conti_clear_thread(void *p) list_del(&node->list); allocator->memseg_clearing = node; - pr_debug("clearing: %d: %pa + 0x%lx\n", allocator->nid, &node->addr, node->size); spin_unlock_irqrestore(&allocator->lock, flags); ret = conti_clear_memseg(allocator, node); - pr_debug("%s: nid=%d, clear done node=%p, addr=%pa\n", __func__, allocator->nid, - node, &node->addr); spin_lock_irqsave(&allocator->lock, flags); allocator->memseg_clearing = NULL; diff --git a/drivers/ub/obmm/obmm_addr_check.c b/drivers/ub/obmm/obmm_addr_check.c index 09085d008ca8..4619dd9e6c6b 100644 --- a/drivers/ub/obmm/obmm_addr_check.c +++ b/drivers/ub/obmm/obmm_addr_check.c @@ -21,12 +21,7 @@ static struct pa_checker g_pa_checker; static bool is_same_pa_range(const struct obmm_pa_range *l, const struct obmm_pa_range *r) { - bool same = l->start == r->start && l->end == r->end; - - if (!same) - pr_err("unmatched pa range: [%pa, %pa] vs. [%pa, %pa]\n", &l->start, &l->end, - &r->start, &r->end); - return same; + return l->start == r->start && l->end == r->end; } int occupy_pa_range(const struct obmm_pa_range *pa_range) @@ -46,14 +41,9 @@ int occupy_pa_range(const struct obmm_pa_range *pa_range) if (ret != 0) { kfree(persist_info); - pr_err("failed to occupy PA range [%pa, %pa]: ret=%pe\n", &pa_range->start, - &pa_range->end, ERR_PTR(ret)); + pr_err("failed to occupy PA range: ret=%pe\n", ERR_PTR(ret)); return ret; } - pr_debug("pa_check: add [%pa,%pa]->{user=%s,data=%p}\n", &pa_range->start, &pa_range->end, - pa_range->info.user == OBMM_ADDR_USER_DIRECT_IMPORT ? - "direct_import" : "preimport", - pa_range->info.data); return 0; } @@ -68,19 +58,17 @@ int free_pa_range(const struct obmm_pa_range *pa_range) entry = mtree_erase(&g_pa_checker.pa_ranges, (unsigned long)pa_range->start); spin_unlock_irqrestore(&g_pa_checker.lock, flags); if (!entry) { - pr_err("PA range [%pa, %pa], not found.\n", &pa_range->start, &pa_range->end); + pr_err("PA range to be freed not found.\n"); return -EFAULT; } ret = 0; if (!is_same_pa_range((const struct obmm_pa_range *)entry, pa_range)) { /* expected to be UNREACHABLE */ - pr_err("BUG: PA range does not fully match.\n"); + pr_err("BUG: PA range to be freed does not fully match.\n"); ret = -ENOTRECOVERABLE; } user = ((struct obmm_pa_range *)entry)->info.user == OBMM_ADDR_USER_DIRECT_IMPORT ? "import" : "preimport"; - pr_debug("pa_check: del [%pa,?]->{user=%s,data=%p}\n", &pa_range->start, user, - ((struct obmm_pa_range *)entry)->info.data); kfree(entry); return ret; } @@ -126,9 +114,6 @@ int update_pa_range(phys_addr_t addr, const struct obmm_addr_info *info) if (!retrieved) return -EFAULT; - pr_debug("pa_check: update [%pa,?]->{user=%s,data=%p}\n", &addr, - info->user == OBMM_ADDR_USER_DIRECT_IMPORT ? "direct_import" : "preimport", - info->data); return 0; } diff --git a/drivers/ub/obmm/obmm_cache.c b/drivers/ub/obmm/obmm_cache.c index 534be7788501..1909da83de4c 100644 --- a/drivers/ub/obmm/obmm_cache.c +++ b/drivers/ub/obmm/obmm_cache.c @@ -71,8 +71,7 @@ int flush_cache_by_pa(phys_addr_t addr, size_t size, unsigned long cache_ops) enum hisi_soc_cache_maint_type maint_type = hisi_maint_type[cache_ops]; if (skip_cache_maintain) { - pr_debug_ratelimited("cache maintenance request {addr=%pa, size=%#zx, cache_ops=%lu}.\n", - &addr, size, cache_ops); + pr_debug_ratelimited("cache maintenance request {cache_ops=%lu}.\n", cache_ops); return 0; } @@ -225,10 +224,5 @@ int modify_pgtable_prot(struct mm_struct *mm, void *va, size_t size, bool cachea mmap_read_unlock(mm); obmm_flush_tlb(mm); - pr_debug("scan [%p-%#lx]\n", va, (uintptr_t)va + size); - pr_debug("\tpmd: %d\n", info.pmd_cnt); - pr_debug("\tpmd leaf: %d\n", info.pmd_leaf_cnt); - pr_debug("\tpte: %d\n", info.pte_cnt); - pr_debug("\thugetlb: %d\n", info.hugetlb_cnt); return 0; } diff --git a/drivers/ub/obmm/obmm_export_from_user.c b/drivers/ub/obmm/obmm_export_from_user.c index e1cd35416e63..bd0663bb197f 100644 --- a/drivers/ub/obmm/obmm_export_from_user.c +++ b/drivers/ub/obmm/obmm_export_from_user.c @@ -100,7 +100,7 @@ static bool hisi_workarounds_check_page_list(struct obmm_export_region *reg, str nid = 0; #endif if (nid < 0 || nid >= OBMM_MAX_LOCAL_NUMA_NODES) { - pr_err("Invalid node ID %d for page %p\n", nid, p); + pr_err("Invalid node ID %d.\n", nid); return false; } diff --git a/drivers/ub/obmm/obmm_export_region_ops.c b/drivers/ub/obmm/obmm_export_region_ops.c index 3b9cdd6b7dac..b561224b660b 100644 --- a/drivers/ub/obmm/obmm_export_region_ops.c +++ b/drivers/ub/obmm/obmm_export_region_ops.c @@ -129,15 +129,14 @@ static int kernel_pgtable_invalid_call(phys_addr_t start, phys_addr_t end, unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long end_pfn = (end + 1) >> PAGE_SHIFT; - pr_debug("call external: set_linear_mapping_invalid(start_pfn=%#lx, end_pfn=%#lx, set_inval=%d)\n", - start_pfn, end_pfn, info->set_inval); + pr_debug("call external: set_linear_mapping_invalid(set_inval=%d)\n", info->set_inval); ret = set_linear_mapping_invalid(start_pfn, end_pfn, info->set_inval); if (ret < 0) { - pr_err("error calling set_linear_mapping_invalid(start_pfn=%#lx, end_pfn=%#lx, set_inval=%d): ret=%pe\n", - start_pfn, end_pfn, info->set_inval, ERR_PTR(ret)); + pr_err("error calling set_linear_mapping_invalid(set_inval=%d): ret=%pe\n", + info->set_inval, ERR_PTR(ret)); } else { - pr_debug("external called: set_linear_mapping_invalid(start_pfn=%#lx, end_pfn=%#lx, set_inval=%d, ret=%pe)\n", - start_pfn, end_pfn, info->set_inval, ERR_PTR(ret)); + pr_debug("external called: set_linear_mapping_invalid(set_inval=%d, ret=%pe)\n", + info->set_inval, ERR_PTR(ret)); } info->ret = ret; diff --git a/drivers/ub/obmm/obmm_import.c b/drivers/ub/obmm/obmm_import.c index 2875fa9e58ec..18f59efa90c6 100644 --- a/drivers/ub/obmm/obmm_import.c +++ b/drivers/ub/obmm/obmm_import.c @@ -38,8 +38,7 @@ static unsigned long get_pa_range_mem_cap(u32 scna, phys_addr_t pa, size_t size) if (ub_memory_validate_pa(scna, pa_start, pa_end, false)) mem_cap |= OBMM_MEM_ALLOW_NONCACHEABLE_MMAP; if (mem_cap == 0) - pr_err("PA range invalid. Non-UBMEM memory cannot be mmaped as import memory: pa=%pa, size=%#zx\n", - &pa_start, size); + pr_err("PA range invalid. Non-UBMEM memory cannot be mmaped as import memory\n"); return mem_cap; } @@ -60,8 +59,7 @@ static int setup_pa(struct obmm_import_region *i_reg) ubmem_res = setup_ubmem_resource(i_reg->pa, i_reg->region.mem_size, false); if (IS_ERR(ubmem_res)) { - pr_err("failed to setup ubmem resource. pa=%pa, size=%#llx, ret=%pe\n", - &i_reg->pa, i_reg->region.mem_size, ubmem_res); + pr_err("failed to setup ubmem resource: ret=%pe\n", ubmem_res); return PTR_ERR(ubmem_res); } i_reg->ubmem_res = ubmem_res; @@ -99,17 +97,20 @@ static int teardown_remote_numa(struct obmm_import_region *i_reg, bool force) { int ret, this_ret; - pr_info("call external: remove_memory_remote(nid=%d, pa=%#llx, size=%#llx)\n", - i_reg->numa_id, i_reg->pa, i_reg->region.mem_size); + pr_info("call external: remove_memory_remote(nid=%d, size=%#llx)\n", + i_reg->numa_id, i_reg->region.mem_size); ret = remove_memory_remote(i_reg->numa_id, i_reg->pa, i_reg->region.mem_size); pr_debug("external called: remove_memory_remote, ret=%pe\n", ERR_PTR(ret)); /* a full rollback is still possible: check whether this is a full teardown */ - if (ret != 0 && !force) + if (ret != 0 && !force) { + pr_err("remove_memory_remote(nid=%d, size=%#llx) failed: ret=%pe.\n", + i_reg->numa_id, i_reg->region.mem_size, ERR_PTR(ret)); return ret; + } if (region_preimport(&i_reg->region)) { - pr_info("call external: add_memory_remote(nid=%d, start=0x%llx, size=0x%llx, flags=MEMORY_KEEP_ISOLATED)\n", - i_reg->numa_id, i_reg->pa, i_reg->region.mem_size); + pr_info("call external: add_memory_remote(nid=%d, size=0x%llx, flags=MEMORY_KEEP_ISOLATED)\n", + i_reg->numa_id, i_reg->region.mem_size); this_ret = add_memory_remote(i_reg->numa_id, i_reg->pa, i_reg->region.mem_size, MEMORY_KEEP_ISOLATED); pr_debug("external called: add_memory_remote() returned %d\n", this_ret); @@ -132,13 +133,12 @@ static int setup_remote_numa(struct obmm_import_region *i_reg) flags = MEMORY_DIRECT_ONLINE; if (!(i_reg->region.mem_cap & OBMM_MEM_ALLOW_CACHEABLE_MMAP)) { - pr_err("PA range invalid. Cacheable memory cannot be managed with numa.remote: pa=%pa, size=%#llx\n", - &i_reg->pa, i_reg->region.mem_size); + pr_err("PA range invalid. Cacheable memory cannot be managed with numa.remote\n"); return -EINVAL; } - pr_info("call external: add_memory_remote(nid=%d, start=0x%llx, size=0x%llx, flags=%d)\n", - i_reg->numa_id, i_reg->pa, i_reg->region.mem_size, flags); + pr_info("call external: add_memory_remote(nid=%d, flags=%d)\n", + i_reg->numa_id, flags); ret = add_memory_remote(i_reg->numa_id, i_reg->pa, i_reg->region.mem_size, flags); pr_debug("external called: add_memory_remote() returned %d\n", ret); if (ret < 0) { diff --git a/drivers/ub/obmm/obmm_preimport.c b/drivers/ub/obmm/obmm_preimport.c index 96daafbadcec..aac211232a88 100644 --- a/drivers/ub/obmm/obmm_preimport.c +++ b/drivers/ub/obmm/obmm_preimport.c @@ -69,17 +69,17 @@ int check_preimport_cmd_common(const struct obmm_cmd_preimport *cmd) * to check for OBMM_BASIC_GRANU here. */ if (cmd->length % memory_block_size_bytes() != 0) { - pr_err("preimport length not aligned to %#lx: %#llx + %#llx.\n", - memory_block_size_bytes(), cmd->pa, cmd->length); + pr_err("preimport length not aligned to %#lx.\n", + memory_block_size_bytes()); return -EINVAL; } if (cmd->pa % memory_block_size_bytes()) { - pr_err("preimport base PA not aligned to %#lx: %#llx + %#llx.\n", - memory_block_size_bytes(), cmd->pa, cmd->length); + pr_err("preimport base PA not aligned to %#lx.\n", + memory_block_size_bytes()); return -EINVAL; } if (cmd->length > ULLONG_MAX - cmd->pa) { - pr_err("preimport PA range overflowed: %#llx + %#llx.\n", cmd->pa, cmd->length); + pr_err("preimport PA range overflowed.\n"); return -EINVAL; } if (cmd->length == 0) { @@ -103,18 +103,20 @@ int preimport_prepare_common(struct preimport_range *pr, uint8_t base_dist) int ret, ret_err; if (!ub_memory_validate_pa(pr->scna, pr->start, pr->end, true)) { - pr_err("PA range invalid. Cacheable memory cannot be managed with preimport: pa=%pa, size=%#llx\n", - &pr->start, pr->end - pr->start + 1); + pr_err("PA range invalid. Cacheable memory cannot be managed with preimport\n"); return -EINVAL; } - pr_info("call external: add_memory_remote(nid=%d, start=%pa, size=%#llx, flags=MEMORY_KEEP_ISOLATED)\n", - pr->numa_id, &pr->start, pr->end - pr->start + 1); + pr_info("call external: add_memory_remote(nid=%d, flags=MEMORY_KEEP_ISOLATED)\n", + pr->numa_id); ret = add_memory_remote(pr->numa_id, pr->start, pr->end - pr->start + 1, MEMORY_KEEP_ISOLATED); pr_debug("external called: add_memory_remote() returned %d\n", ret); - if (ret < 0) + if (ret < 0) { + pr_err("failed to call add_memory_remote(nid=%d): %pe\n", + pr->numa_id, ERR_PTR(ret)); return -EPERM; + } WARN_ON(pr->numa_id != NUMA_NO_NODE && pr->numa_id != ret); pr->numa_id = ret; @@ -131,8 +133,7 @@ int preimport_prepare_common(struct preimport_range *pr, uint8_t base_dist) return 0; err_remove_memory_remote: - pr_info("call external: remove_memory_remote(nid=%d, start=%pa, size=%#llx)\n", pr->numa_id, - &pr->start, pr->end - pr->start + 1); + pr_info("call external: remove_memory_remote(nid=%d)\n", pr->numa_id); ret_err = remove_memory_remote(pr->numa_id, pr->start, pr->end - pr->start + 1); pr_debug("external called: remove_memory_remote() returned %d\n", ret_err); return ret; @@ -142,12 +143,14 @@ int preimport_release_common(struct preimport_range *pr, bool force) { int ret; - pr_info("call external: remove_memory_remote(nid=%d, start=%pa, size=%#llx)\n", pr->numa_id, - &pr->start, pr->end - pr->start + 1); + pr_info("call external: remove_memory_remote(nid=%d)\n", pr->numa_id); ret = remove_memory_remote(pr->numa_id, pr->start, pr->end - pr->start + 1); pr_debug("external called: remove_memory_remote() returned %pe\n", ERR_PTR(ret)); - if (ret && !force) + if (ret && !force) { + pr_err("failed to call remove_memory_remote(nid=%d, size=%#llx): ret=%pe.\n", + pr->numa_id, pr->end - pr->start + 1, ERR_PTR(ret)); return ret; + } mutex_lock(&list_mutex); list_del(&pr->node); diff --git a/drivers/ub/obmm/obmm_preimport_prefilled.c b/drivers/ub/obmm/obmm_preimport_prefilled.c index f06df16892bf..50a4273d0924 100644 --- a/drivers/ub/obmm/obmm_preimport_prefilled.c +++ b/drivers/ub/obmm/obmm_preimport_prefilled.c @@ -49,8 +49,7 @@ static int create_prefilled_preimport_range(const struct obmm_cmd_preimport *cmd ppr->ubmem_res = setup_ubmem_resource(cmd->pa, cmd->length, true); if (IS_ERR(ppr->ubmem_res)) { - pr_err("failed to setup ubmem resource on preimport. pa=%pa, size=%#llx, ret=%pe\n", - &cmd->pa, cmd->length, ppr->ubmem_res); + pr_err("failed to setup ubmem resource on preimport: ret=%pe\n", ppr->ubmem_res); kfree(ppr->bitmap); kfree(ppr); return PTR_ERR(ppr->ubmem_res); @@ -74,11 +73,11 @@ static int get_pa_mapping(phys_addr_t addr, struct prefilled_preimport_range **p ret = query_pa_range(addr, &info); if (ret) { - pr_err("No information found with PA=%pa.\n", &addr); + pr_err("No information found with PA requested.\n"); return ret; } if (info.user != OBMM_ADDR_USER_PREIMPORT) { - pr_err("PA=%pa is not a preimport address.\n", &addr); + pr_err("PA requested is not a preimport address.\n"); return -EINVAL; } if (info.data == not_ready_ptr) { @@ -87,7 +86,6 @@ static int get_pa_mapping(phys_addr_t addr, struct prefilled_preimport_range **p } *p_ppr = (struct prefilled_preimport_range *)info.data; - pr_debug("prefilled preimport range found with PA %pa.\n", &addr); return 0; } @@ -166,22 +164,18 @@ int preimport_release_prefilled(phys_addr_t start, phys_addr_t end) } /* must be an exact match */ if (ppr->pr.start != start || ppr->pr.end != end) { - pr_err("requested range touches ppr<%pa> but is not an exact match.\n", - &ppr->pr.start); + pr_err("requested range touches ppr but is not an exact match.\n"); ret = -EINVAL; goto err_unlock; } if (ppr->pr.use_count != 0) { - pr_err("ppr<%pa> cannot be released: %u active users found.\n", &ppr->pr.start, - ppr->pr.use_count); + pr_err("preimport cannot be released: %u active users found.\n", ppr->pr.use_count); ret = -EBUSY; goto err_unlock; } ret = preimport_release_common(&ppr->pr, false); - if (ret) { - pr_err("failed to release ppr<%pa>.\n", &ppr->pr.start); + if (ret) goto err_unlock; - } /* roll back is not possible from this point */ pa_range.start = ppr->pr.start; @@ -193,7 +187,6 @@ int preimport_release_prefilled(phys_addr_t start, phys_addr_t end) mutex_unlock(&preimport_mutex); destroy_prefilled_preimport_range(ppr); - pr_debug("ppr<%pa> released.\n", &start); return ret; err_unlock: @@ -211,11 +204,10 @@ static int get_ppr(phys_addr_t pa, struct prefilled_preimport_range **p_ppr) if (ret) goto out_unlock; if (ppr == not_ready_ptr) { - pr_err("ppr <%pa> not ready yet.\n", &pa); + pr_err("preimport requested not ready yet.\n"); ret = -EAGAIN; goto out_unlock; } - pr_debug("ppr <%pa> refcount: %u -> %u.\n", &pa, ppr->pr.use_count, ppr->pr.use_count + 1); ppr->pr.use_count += 1; *p_ppr = ppr; out_unlock: @@ -227,8 +219,6 @@ static void put_ppr(struct prefilled_preimport_range *ppr) { mutex_lock(&preimport_mutex); WARN_ON(ppr->pr.use_count == 0); - pr_debug("ppr <%pa> refcount: %u -> %u.\n", &ppr->pr.start, ppr->pr.use_count, - ppr->pr.use_count - 1); ppr->pr.use_count -= 1; mutex_unlock(&preimport_mutex); } @@ -241,8 +231,7 @@ static int occupy_ppr_blocks(struct prefilled_preimport_range *ppr, phys_addr_t spin_lock_irqsave(&ppr->bitmap_lock, flags); if (start < ppr->pr.start || end > ppr->pr.end) { - pr_err("requested range [%pa, %pa] is not managed by ppr [%pa, %pa].\n", &start, - &end, &ppr->pr.start, &ppr->pr.end); + pr_err("requested range is not managed by preimport.\n"); ret = -EINVAL; goto out_unlock; } @@ -252,15 +241,13 @@ static int occupy_ppr_blocks(struct prefilled_preimport_range *ppr, phys_addr_t for (bit = init_bit; bit <= end_bit; bit++) { if (test_bit(bit, ppr->bitmap)) { ret = -EEXIST; - pr_err("conflicts on preimport block %lu of ppr<%pa>.\n", bit, - &ppr->pr.start); + pr_err("requested range conflicts on preimport block %lu.\n", bit); goto out_unlock; } } for (bit = init_bit; bit <= end_bit; bit++) set_bit(bit, ppr->bitmap); - pr_debug("ppr<%pa>: bitmap[%lu, %lu] set.\n", &ppr->pr.start, init_bit, end_bit); out_unlock: spin_unlock_irqrestore(&ppr->bitmap_lock, flags); @@ -275,8 +262,7 @@ static int free_ppr_blocks(struct prefilled_preimport_range *ppr, phys_addr_t st spin_lock_irqsave(&ppr->bitmap_lock, flags); if (start < ppr->pr.start || end > ppr->pr.end) { - pr_err("requested range [%pa, %pa] is not managed by ppr [%pa, %pa].\n", &start, - &end, &ppr->pr.start, &ppr->pr.end); + pr_err("requested range is not managed by preimport.\n"); ret = -EINVAL; goto out_unlock; } @@ -286,15 +272,13 @@ static int free_ppr_blocks(struct prefilled_preimport_range *ppr, phys_addr_t st for (bit = init_bit; bit <= end_bit; bit++) { if (!test_bit(bit, ppr->bitmap)) { ret = -EINVAL; - pr_err("preimport block %lu of ppr<%pa> never used.\n", bit, - &ppr->pr.start); + pr_err("preimport block %lu never used.\n", bit); goto out_unlock; } } for (bit = init_bit; bit <= end_bit; bit++) clear_bit(bit, ppr->bitmap); - pr_debug("ppr<%pa>: bitmap[%lu, %lu] cleared.\n", &ppr->pr.start, init_bit, end_bit); out_unlock: spin_unlock_irqrestore(&ppr->bitmap_lock, flags); diff --git a/drivers/ub/obmm/ubmempool_allocator.c b/drivers/ub/obmm/ubmempool_allocator.c index f687dec91ca7..da6094288f7b 100644 --- a/drivers/ub/obmm/ubmempool_allocator.c +++ b/drivers/ub/obmm/ubmempool_allocator.c @@ -201,16 +201,15 @@ static int set_memseg_linear_mapping_invalid(struct memseg_node *node, bool set_ start_pfn = PHYS_PFN(node->addr); end_pfn = PHYS_PFN(node->addr + node->size); - pr_debug("call external: set_linear_mapping_invalid(start_pfn=%#lx, end_pfn=%#lx, set_nc=%d)\n", - start_pfn, end_pfn, set_nc); + pr_debug("call external: set_linear_mapping_invalid(set_nc=%d)\n", set_nc); ret = set_linear_mapping_invalid(start_pfn, end_pfn, set_nc); if (ret) { - pr_err("failed to update kernel linear mapping cacheability for segment %#llx+%#lx, error=%pe.\n", - node->addr, node->size, ERR_PTR(ret)); + pr_err("failed to update kernel linear mapping cacheability: error=%pe.\n", + ERR_PTR(ret)); return ret; } - pr_debug("external called: set_linear_mapping_invalid(start_pfn=%#lx, end_pfn=%#lx, set_nc=%d, ret=%pe)\n", - start_pfn, end_pfn, set_nc, ERR_PTR(ret)); + pr_debug("external called: set_linear_mapping_invalid(set_nc=%d, ret=%pe)\n", + set_nc, ERR_PTR(ret)); return 0; } @@ -238,8 +237,6 @@ static struct memseg_node *hugetlb_pmd_alloc_memseg(struct conti_mem_allocator * goto out_free_seg; } - pr_debug("%s: node %pa+%#lx\n", __func__, &node->addr, node->size); - ret = set_memseg_linear_mapping_invalid(node, true); if (unlikely(ret)) goto out_free_seg; @@ -266,8 +263,6 @@ static void hugetlb_free_memseg(struct conti_mem_allocator *a __always_unused, return; } - pr_debug("%s: node %pa+%#lx\n", __func__, &node->addr, node->size); - folio = pfn_folio(node->addr >> PAGE_SHIFT); set_memseg_linear_mapping_invalid(node, false); @@ -303,8 +298,6 @@ static struct memseg_node *hugetlb_pud_alloc_memseg(struct conti_mem_allocator * goto out_free_seg; } - pr_debug("%s: node %pa+%#lx\n", __func__, &node->addr, node->size); - ret = set_memseg_linear_mapping_invalid(node, true); if (unlikely(ret)) goto out_free_seg; @@ -331,8 +324,6 @@ static void buddy_free_memseg(struct conti_mem_allocator *a __always_unused, return; } - pr_debug("%s: node %pa+%#lx\n", __func__, &node->addr, node->size); - folio = pfn_folio(node->addr >> PAGE_SHIFT); set_memseg_linear_mapping_invalid(node, false); @@ -369,8 +360,6 @@ static struct memseg_node *buddy_alloc_memseg(struct conti_mem_allocator *a) goto out_free_seg; } - pr_debug("%s: node %pa+%#lx\n", __func__, &node->addr, node->size); - ret = set_memseg_linear_mapping_invalid(node, true); if (unlikely(ret)) goto out_free_seg; -- Gitee