X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=lib%2Flmb.c;h=0343c05609f0d62f9b48b259bf3018db728b11a5;hb=0199c4e68d1f02894bdefe4b5d9e9ee4aedd8d62;hp=896e2832099ea207b36c02af276a0fd2c15516ea;hpb=d9024df02ffe74d723d97d552f86de3b34beb8cc;p=safe%2Fjmp%2Flinux-2.6 diff --git a/lib/lmb.c b/lib/lmb.c index 896e283..0343c05 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -19,41 +19,52 @@ struct lmb lmb; -void lmb_dump_all(void) +static int lmb_debug; + +static int __init early_lmb(char *p) { -#ifdef DEBUG - unsigned long i; + if (p && strstr(p, "debug")) + lmb_debug = 1; + return 0; +} +early_param("lmb", early_lmb); - pr_debug("lmb_dump_all:\n"); - pr_debug(" memory.cnt = 0x%lx\n", lmb.memory.cnt); - pr_debug(" memory.size = 0x%llx\n", - (unsigned long long)lmb.memory.size); - for (i=0; i < lmb.memory.cnt ;i++) { - pr_debug(" memory.region[0x%x].base = 0x%llx\n", - i, (unsigned long long)lmb.memory.region[i].base); - pr_debug(" .size = 0x%llx\n", - (unsigned long long)lmb.memory.region[i].size); - } +static void lmb_dump(struct lmb_region *region, char *name) +{ + unsigned long long base, size; + int i; + + pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); - pr_debug(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt); - pr_debug(" reserved.size = 0x%lx\n", lmb.reserved.size); - for (i=0; i < lmb.reserved.cnt ;i++) { - pr_debug(" reserved.region[0x%x].base = 0x%llx\n", - i, (unsigned long long)lmb.reserved.region[i].base); - pr_debug(" .size = 0x%llx\n", - (unsigned long long)lmb.reserved.region[i].size); + for (i = 0; i < region->cnt; i++) { + base = region->region[i].base; + size = region->region[i].size; + + pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n", + name, i, base, base + size - 1, size); } -#endif /* DEBUG */ } -static unsigned long __init lmb_addrs_overlap(u64 base1, u64 size1, - u64 base2, u64 size2) +void lmb_dump_all(void) +{ + if (!lmb_debug) + return; + + pr_info("LMB configuration:\n"); + pr_info(" rmo_size = 0x%llx\n", (unsigned long long)lmb.rmo_size); + pr_info(" memory.size = 0x%llx\n", (unsigned long long)lmb.memory.size); + + lmb_dump(&lmb.memory, "memory"); + lmb_dump(&lmb.reserved, "reserved"); +} + +static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2, + u64 size2) { return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); } -static long __init lmb_addrs_adjacent(u64 base1, u64 size1, - u64 base2, u64 size2) +static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2) { if (base2 == base1 + size1) return 1; @@ -63,7 +74,7 @@ static long __init lmb_addrs_adjacent(u64 base1, u64 size1, return 0; } -static long __init lmb_regions_adjacent(struct lmb_region *rgn, +static long lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, unsigned long r2) { u64 base1 = rgn->region[r1].base; @@ -74,7 +85,7 @@ static long __init lmb_regions_adjacent(struct lmb_region *rgn, return lmb_addrs_adjacent(base1, size1, base2, size2); } -static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r) +static void lmb_remove_region(struct lmb_region *rgn, unsigned long r) { unsigned long i; @@ -86,7 +97,7 @@ static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r) } /* Assumption: base addr of region 1 < base addr of region 2 */ -static void __init lmb_coalesce_regions(struct lmb_region *rgn, +static void lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2) { rgn->region[r1].size += rgn->region[r2].size; @@ -118,7 +129,7 @@ void __init lmb_analyze(void) lmb.memory.size += lmb.memory.region[i].size; } -static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) +static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) { unsigned long coalesced = 0; long adjacent, i; @@ -182,7 +193,7 @@ static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) return 0; } -long __init lmb_add(u64 base, u64 size) +long lmb_add(u64 base, u64 size) { struct lmb_region *_rgn = &lmb.memory; @@ -194,6 +205,55 @@ long __init lmb_add(u64 base, u64 size) } +long lmb_remove(u64 base, u64 size) +{ + struct lmb_region *rgn = &(lmb.memory); + u64 rgnbegin, rgnend; + u64 end = base + size; + int i; + + rgnbegin = rgnend = 0; /* supress gcc warnings */ + + /* Find the region where (base, size) belongs to */ + for (i=0; i < rgn->cnt; i++) { + rgnbegin = rgn->region[i].base; + rgnend = rgnbegin + rgn->region[i].size; + + if ((rgnbegin <= base) && (end <= rgnend)) + break; + } + + /* Didn't find the region */ + if (i == rgn->cnt) + return -1; + + /* Check to see if we are removing entire region */ + if ((rgnbegin == base) && (rgnend == end)) { + lmb_remove_region(rgn, i); + return 0; + } + + /* Check to see if region is matching at the front */ + if (rgnbegin == base) { + rgn->region[i].base = end; + rgn->region[i].size -= size; + return 0; + } + + /* Check to see if the region is matching at the end */ + if (rgnend == end) { + rgn->region[i].size -= size; + return 0; + } + + /* + * We need to split the entry - adjust the current one to the + * beginging of the hole and add the region after hole. + */ + rgn->region[i].size = base - rgn->region[i].base; + return lmb_add_region(rgn, end, rgnend - end); +} + long __init lmb_reserve(u64 base, u64 size) { struct lmb_region *_rgn = &lmb.reserved; @@ -238,8 +298,7 @@ static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end, j = lmb_overlaps_region(&lmb.reserved, base, size); if (j < 0) { /* this area isn't reserved, take it */ - if (lmb_add_region(&lmb.reserved, base, - lmb_align_up(size, align)) < 0) + if (lmb_add_region(&lmb.reserved, base, size) < 0) base = ~(u64)0; return base; } @@ -285,6 +344,10 @@ u64 __init lmb_alloc_nid(u64 size, u64 align, int nid, struct lmb_region *mem = &lmb.memory; int i; + BUG_ON(0 == size); + + size = lmb_align_up(size, align); + for (i = 0; i < mem->cnt; i++) { u64 ret = lmb_alloc_nid_region(&mem->region[i], nid_range, @@ -322,6 +385,8 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) BUG_ON(0 == size); + size = lmb_align_up(size, align); + /* On some platforms, make sure we allocate lowmem */ /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */ if (max_addr == LMB_ALLOC_ANYWHERE) @@ -345,8 +410,7 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) j = lmb_overlaps_region(&lmb.reserved, base, size); if (j < 0) { /* this area isn't reserved, take it */ - if (lmb_add_region(&lmb.reserved, base, - size) < 0) + if (lmb_add_region(&lmb.reserved, base, size) < 0) return 0; return base; } @@ -365,7 +429,7 @@ u64 __init lmb_phys_mem_size(void) return lmb.memory.size; } -u64 __init lmb_end_of_DRAM(void) +u64 lmb_end_of_DRAM(void) { int idx = lmb.memory.cnt - 1; @@ -398,6 +462,8 @@ void __init lmb_enforce_memory_limit(u64 memory_limit) if (lmb.memory.region[0].size < lmb.rmo_size) lmb.rmo_size = lmb.memory.region[0].size; + memory_limit = lmb_end_of_DRAM(); + /* And truncate any reserves above the limit also. */ for (i = 0; i < lmb.reserved.cnt; i++) { p = &lmb.reserved.region[i]; @@ -426,3 +492,36 @@ int __init lmb_is_reserved(u64 addr) } return 0; } + +/* + * Given a , find which memory regions belong to this range. + * Adjust the request and return a contiguous chunk. + */ +int lmb_find(struct lmb_property *res) +{ + int i; + u64 rstart, rend; + + rstart = res->base; + rend = rstart + res->size - 1; + + for (i = 0; i < lmb.memory.cnt; i++) { + u64 start = lmb.memory.region[i].base; + u64 end = start + lmb.memory.region[i].size - 1; + + if (start > rend) + return -1; + + if ((end >= rstart) && (start < rend)) { + /* adjust the request */ + if (rstart < start) + rstart = start; + if (rend > end) + rend = end; + res->base = rstart; + res->size = rend - rstart + 1; + return 0; + } + } + return -1; +}