X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=lib%2Flmb.c;h=9cee17142b2cdc6d2d7a78c8b6ca5fd6573eb4b8;hb=d7f6de1e9c4a12e11ba7186c70f0f40caa76f590;hp=83287d3869a3d8e52129982f85af166c5b1246fc;hpb=9d88a2eb6e05c07aa0d484b8fa1372722fa921d0;p=safe%2Fjmp%2Flinux-2.6 diff --git a/lib/lmb.c b/lib/lmb.c index 83287d3..9cee171 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -19,31 +19,43 @@ struct lmb lmb; -void lmb_dump_all(void) +static int lmb_debug; + +static int __init early_lmb(char *p) { -#ifdef DEBUG - unsigned long i; + if (p && strstr(p, "debug")) + lmb_debug = 1; + return 0; +} +early_param("lmb", early_lmb); - pr_debug("lmb_dump_all:\n"); - pr_debug(" memory.cnt = 0x%lx\n", lmb.memory.cnt); - pr_debug(" memory.size = 0x%llx\n", - (unsigned long long)lmb.memory.size); - for (i=0; i < lmb.memory.cnt ;i++) { - pr_debug(" memory.region[0x%x].base = 0x%llx\n", - i, (unsigned long long)lmb.memory.region[i].base); - pr_debug(" .size = 0x%llx\n", - (unsigned long long)lmb.memory.region[i].size); - } +static void lmb_dump(struct lmb_region *region, char *name) +{ + unsigned long long base, size; + int i; + + pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); + + for (i = 0; i < region->cnt; i++) { + base = region->region[i].base; + size = region->region[i].size; - pr_debug(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt); - pr_debug(" reserved.size = 0x%lx\n", lmb.reserved.size); - for (i=0; i < lmb.reserved.cnt ;i++) { - pr_debug(" reserved.region[0x%x].base = 0x%llx\n", - i, (unsigned long long)lmb.reserved.region[i].base); - pr_debug(" .size = 0x%llx\n", - (unsigned long long)lmb.reserved.region[i].size); + pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n", + name, i, base, base + size - 1, size); } -#endif /* DEBUG */ +} + +void lmb_dump_all(void) +{ + if (!lmb_debug) + return; + + pr_info("LMB configuration:\n"); + pr_info(" rmo_size = 0x%llx\n", (unsigned long long)lmb.rmo_size); + pr_info(" memory.size = 0x%llx\n", (unsigned long long)lmb.memory.size); + + lmb_dump(&lmb.memory, "memory"); + lmb_dump(&lmb.reserved, "reserved"); } static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2, @@ -251,7 +263,7 @@ long __init lmb_reserve(u64 base, u64 size) return lmb_add_region(_rgn, base, size); } -long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size) +long lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size) { unsigned long i; @@ -286,8 +298,7 @@ static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end, j = lmb_overlaps_region(&lmb.reserved, base, size); if (j < 0) { /* this area isn't reserved, take it */ - if (lmb_add_region(&lmb.reserved, base, - lmb_align_up(size, align)) < 0) + if (lmb_add_region(&lmb.reserved, base, size) < 0) base = ~(u64)0; return base; } @@ -333,6 +344,10 @@ u64 __init lmb_alloc_nid(u64 size, u64 align, int nid, struct lmb_region *mem = &lmb.memory; int i; + BUG_ON(0 == size); + + size = lmb_align_up(size, align); + for (i = 0; i < mem->cnt; i++) { u64 ret = lmb_alloc_nid_region(&mem->region[i], nid_range, @@ -370,6 +385,8 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) BUG_ON(0 == size); + size = lmb_align_up(size, align); + /* On some platforms, make sure we allocate lowmem */ /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */ if (max_addr == LMB_ALLOC_ANYWHERE) @@ -393,8 +410,7 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) j = lmb_overlaps_region(&lmb.reserved, base, size); if (j < 0) { /* this area isn't reserved, take it */ - if (lmb_add_region(&lmb.reserved, base, - lmb_align_up(size, align)) < 0) + if (lmb_add_region(&lmb.reserved, base, size) < 0) return 0; return base; } @@ -413,7 +429,7 @@ u64 __init lmb_phys_mem_size(void) return lmb.memory.size; } -u64 __init lmb_end_of_DRAM(void) +u64 lmb_end_of_DRAM(void) { int idx = lmb.memory.cnt - 1; @@ -446,6 +462,8 @@ void __init lmb_enforce_memory_limit(u64 memory_limit) if (lmb.memory.region[0].size < lmb.rmo_size) lmb.rmo_size = lmb.memory.region[0].size; + memory_limit = lmb_end_of_DRAM(); + /* And truncate any reserves above the limit also. */ for (i = 0; i < lmb.reserved.cnt; i++) { p = &lmb.reserved.region[i]; @@ -475,6 +493,11 @@ int __init lmb_is_reserved(u64 addr) return 0; } +int lmb_is_region_reserved(u64 base, u64 size) +{ + return lmb_overlaps_region(&lmb.reserved, base, size); +} + /* * Given a , find which memory regions belong to this range. * Adjust the request and return a contiguous chunk.