2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
8 #include "asm/pgalloc.h"
9 #include "asm/pgtable.h"
10 #include "asm/tlbflush.h"
11 #include "as-layout.h"
18 static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
19 unsigned int prot, struct host_vm_op *ops, int *index,
20 int last_filled, union mm_context *mmu, void **flush,
21 int (*do_ops)(union mm_context *, struct host_vm_op *,
25 struct host_vm_op *last;
28 fd = phys_mapping(phys, &offset);
31 if((last->type == MMAP) &&
32 (last->u.mmap.addr + last->u.mmap.len == virt) &&
33 (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
34 (last->u.mmap.offset + last->u.mmap.len == offset)){
35 last->u.mmap.len += len;
40 if(*index == last_filled){
41 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
45 ops[++*index] = ((struct host_vm_op) { .type = MMAP,
56 static int add_munmap(unsigned long addr, unsigned long len,
57 struct host_vm_op *ops, int *index, int last_filled,
58 union mm_context *mmu, void **flush,
59 int (*do_ops)(union mm_context *, struct host_vm_op *,
62 struct host_vm_op *last;
67 if((last->type == MUNMAP) &&
68 (last->u.munmap.addr + last->u.mmap.len == addr)){
69 last->u.munmap.len += len;
74 if(*index == last_filled){
75 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
79 ops[++*index] = ((struct host_vm_op) { .type = MUNMAP,
86 static int add_mprotect(unsigned long addr, unsigned long len,
87 unsigned int prot, struct host_vm_op *ops, int *index,
88 int last_filled, union mm_context *mmu, void **flush,
89 int (*do_ops)(union mm_context *, struct host_vm_op *,
92 struct host_vm_op *last;
97 if((last->type == MPROTECT) &&
98 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
99 (last->u.mprotect.prot == prot)){
100 last->u.mprotect.len += len;
105 if(*index == last_filled){
106 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
110 ops[++*index] = ((struct host_vm_op) { .type = MPROTECT,
118 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
120 static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
121 unsigned long end, struct host_vm_op *ops,
122 int last_op, int *op_index, int force,
123 union mm_context *mmu, void **flush,
124 int (*do_ops)(union mm_context *,
125 struct host_vm_op *, int, int,
129 int r, w, x, prot, ret = 0;
131 pte = pte_offset_kernel(pmd, addr);
136 if (!pte_young(*pte)) {
139 } else if (!pte_dirty(*pte)) {
142 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
143 (x ? UM_PROT_EXEC : 0));
144 if(force || pte_newpage(*pte)){
145 if(pte_present(*pte))
146 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
147 PAGE_SIZE, prot, ops, op_index,
148 last_op, mmu, flush, do_ops);
149 else ret = add_munmap(addr, PAGE_SIZE, ops, op_index,
150 last_op, mmu, flush, do_ops);
152 else if(pte_newprot(*pte))
153 ret = add_mprotect(addr, PAGE_SIZE, prot, ops, op_index,
154 last_op, mmu, flush, do_ops);
155 *pte = pte_mkuptodate(*pte);
156 } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret));
160 static inline int update_pmd_range(pud_t *pud, unsigned long addr,
161 unsigned long end, struct host_vm_op *ops,
162 int last_op, int *op_index, int force,
163 union mm_context *mmu, void **flush,
164 int (*do_ops)(union mm_context *,
165 struct host_vm_op *, int, int,
172 pmd = pmd_offset(pud, addr);
174 next = pmd_addr_end(addr, end);
175 if(!pmd_present(*pmd)){
176 if(force || pmd_newpage(*pmd)){
177 ret = add_munmap(addr, next - addr, ops,
178 op_index, last_op, mmu,
180 pmd_mkuptodate(*pmd);
183 else ret = update_pte_range(pmd, addr, next, ops, last_op,
184 op_index, force, mmu, flush,
186 } while (pmd++, addr = next, ((addr != end) && !ret));
190 static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
191 unsigned long end, struct host_vm_op *ops,
192 int last_op, int *op_index, int force,
193 union mm_context *mmu, void **flush,
194 int (*do_ops)(union mm_context *,
195 struct host_vm_op *, int, int,
202 pud = pud_offset(pgd, addr);
204 next = pud_addr_end(addr, end);
205 if(!pud_present(*pud)){
206 if(force || pud_newpage(*pud)){
207 ret = add_munmap(addr, next - addr, ops,
208 op_index, last_op, mmu,
210 pud_mkuptodate(*pud);
213 else ret = update_pmd_range(pud, addr, next, ops, last_op,
214 op_index, force, mmu, flush,
216 } while (pud++, addr = next, ((addr != end) && !ret));
220 void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
221 unsigned long end_addr, int force,
222 int (*do_ops)(union mm_context *, struct host_vm_op *,
226 union mm_context *mmu = &mm->context;
227 struct host_vm_op ops[1];
228 unsigned long addr = start_addr, next;
229 int ret = 0, last_op = ARRAY_SIZE(ops) - 1, op_index = -1;
233 pgd = pgd_offset(mm, addr);
235 next = pgd_addr_end(addr, end_addr);
236 if(!pgd_present(*pgd)){
237 if (force || pgd_newpage(*pgd)){
238 ret = add_munmap(addr, next - addr, ops,
239 &op_index, last_op, mmu,
241 pgd_mkuptodate(*pgd);
244 else ret = update_pud_range(pgd, addr, next, ops, last_op,
245 &op_index, force, mmu, &flush,
247 } while (pgd++, addr = next, ((addr != end_addr) && !ret));
250 ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
252 /* This is not an else because ret is modified above */
254 printk("fix_range_common: failed, killing current process\n");
255 force_sig(SIGKILL, current);
259 int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
261 struct mm_struct *mm;
266 unsigned long addr, last;
267 int updated = 0, err;
270 for(addr = start; addr < end;){
271 pgd = pgd_offset(mm, addr);
272 if(!pgd_present(*pgd)){
273 last = ADD_ROUND(addr, PGDIR_SIZE);
276 if(pgd_newpage(*pgd)){
278 err = os_unmap_memory((void *) addr,
281 panic("munmap failed, errno = %d\n",
288 pud = pud_offset(pgd, addr);
289 if(!pud_present(*pud)){
290 last = ADD_ROUND(addr, PUD_SIZE);
293 if(pud_newpage(*pud)){
295 err = os_unmap_memory((void *) addr,
298 panic("munmap failed, errno = %d\n",
305 pmd = pmd_offset(pud, addr);
306 if(!pmd_present(*pmd)){
307 last = ADD_ROUND(addr, PMD_SIZE);
310 if(pmd_newpage(*pmd)){
312 err = os_unmap_memory((void *) addr,
315 panic("munmap failed, errno = %d\n",
322 pte = pte_offset_kernel(pmd, addr);
323 if(!pte_present(*pte) || pte_newpage(*pte)){
325 err = os_unmap_memory((void *) addr,
328 panic("munmap failed, errno = %d\n",
330 if(pte_present(*pte))
332 pte_val(*pte) & PAGE_MASK,
335 else if(pte_newprot(*pte)){
337 os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
344 void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
350 struct mm_struct *mm = vma->vm_mm;
352 int r, w, x, prot, err = 0;
355 address &= PAGE_MASK;
356 pgd = pgd_offset(mm, address);
357 if(!pgd_present(*pgd))
360 pud = pud_offset(pgd, address);
361 if(!pud_present(*pud))
364 pmd = pmd_offset(pud, address);
365 if(!pmd_present(*pmd))
368 pte = pte_offset_kernel(pmd, address);
373 if (!pte_young(*pte)) {
376 } else if (!pte_dirty(*pte)) {
380 mm_id = &mm->context.skas.id;
381 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
382 (x ? UM_PROT_EXEC : 0));
383 if(pte_newpage(*pte)){
384 if(pte_present(*pte)){
385 unsigned long long offset;
388 fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
389 err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
392 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
394 else if(pte_newprot(*pte))
395 err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
400 *pte = pte_mkuptodate(*pte);
405 printk("Failed to flush page for address 0x%lx\n", address);
406 force_sig(SIGKILL, current);
409 pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
411 return(pgd_offset(mm, address));
414 pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
416 return(pud_offset(pgd, address));
419 pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
421 return(pmd_offset(pud, address));
424 pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
426 return(pte_offset_kernel(pmd, address));
429 pte_t *addr_pte(struct task_struct *task, unsigned long addr)
431 pgd_t *pgd = pgd_offset(task->mm, addr);
432 pud_t *pud = pud_offset(pgd, addr);
433 pmd_t *pmd = pmd_offset(pud, addr);
435 return(pte_offset_map(pmd, addr));
438 void flush_tlb_all(void)
440 flush_tlb_mm(current->mm);
443 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
445 flush_tlb_kernel_range_common(start, end);
448 void flush_tlb_kernel_vm(void)
450 flush_tlb_kernel_range_common(start_vm, end_vm);
453 void __flush_tlb_one(unsigned long addr)
455 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
458 static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
459 int finished, void **flush)
461 struct host_vm_op *op;
464 for(i = 0; i <= last && !ret; i++){
468 ret = map(&mmu->skas.id, op->u.mmap.addr,
469 op->u.mmap.len, op->u.mmap.prot,
470 op->u.mmap.fd, op->u.mmap.offset, finished,
474 ret = unmap(&mmu->skas.id, op->u.munmap.addr,
475 op->u.munmap.len, finished, flush);
478 ret = protect(&mmu->skas.id, op->u.mprotect.addr,
479 op->u.mprotect.len, op->u.mprotect.prot,
483 printk("Unknown op type %d in do_ops\n", op->type);
491 static void fix_range(struct mm_struct *mm, unsigned long start_addr,
492 unsigned long end_addr, int force)
494 if(!proc_mm && (end_addr > CONFIG_STUB_START))
495 end_addr = CONFIG_STUB_START;
497 fix_range_common(mm, start_addr, end_addr, force, do_ops);
500 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
503 if(vma->vm_mm == NULL)
504 flush_tlb_kernel_range_common(start, end);
505 else fix_range(vma->vm_mm, start, end, 0);
508 void flush_tlb_mm(struct mm_struct *mm)
512 /* Don't bother flushing if this address space is about to be
515 if(atomic_read(&mm->mm_users) == 0)
518 end = proc_mm ? task_size : CONFIG_STUB_START;
519 fix_range(mm, 0, end, 0);
522 void force_flush_all(void)
524 struct mm_struct *mm = current->mm;
525 struct vm_area_struct *vma = mm->mmap;
528 fix_range(mm, vma->vm_start, vma->vm_end, 1);