X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fchar%2Fmspec.c;h=30f095a8c2d4ff3932d7ad8db1d5ec462c33cd2d;hb=a419aef8b858a2bdb98df60336063d28df4b272f;hp=5426b1e5595f7cef7877ae7d697758b942bee182;hpb=17a3b05047119b7fc72ef03962e202becc659579;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c index 5426b1e..30f095a 100644 --- a/drivers/char/mspec.c +++ b/drivers/char/mspec.c @@ -30,7 +30,6 @@ * processor from ever speculating a cache line from this page. */ -#include #include #include #include @@ -39,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -67,27 +67,41 @@ /* * Page types allocated by the device. */ -enum { +enum mspec_page_type { MSPEC_FETCHOP = 1, MSPEC_CACHED, MSPEC_UNCACHED }; +#ifdef CONFIG_SGI_SN static int is_sn2; +#else +#define is_sn2 0 +#endif /* * One of these structures is allocated when an mspec region is mmaped. The * structure is pointed to by the vma->vm_private_data field in the vma struct. * This structure is used to record the addresses of the mspec pages. + * This structure is shared by all vma's that are split off from the + * original vma when split_vma()'s are done. + * + * The refcnt is incremented atomically because mm->mmap_sem does not + * protect in fork case where multiple tasks share the vma_data. */ struct vma_data { atomic_t refcnt; /* Number of vmas sharing the data. */ - spinlock_t lock; /* Serialize access to the vma. */ + spinlock_t lock; /* Serialize access to this structure. */ int count; /* Number of pages allocated. */ - int type; /* Type of pages allocated. */ + enum mspec_page_type type; /* Type of pages allocated. */ + int flags; /* See VMD_xxx below. */ + unsigned long vm_start; /* Original (unsplit) base. */ + unsigned long vm_end; /* Original (unsplit) end. */ unsigned long maddr[0]; /* Array of MSPEC addresses. */ }; +#define VMD_VMALLOCED 0x1 /* vmalloc'd rather than kmalloc'd */ + /* used on shub2 to clear FOP cache in the HUB */ static unsigned long scratch_page[MAX_NUMNODES]; #define SH2_AMO_CACHE_ENTRIES 4 @@ -125,8 +139,8 @@ mspec_zero_block(unsigned long addr, int len) * mspec_open * * Called when a device mapping is created by a means other than mmap - * (via fork, etc.). Increments the reference count on the underlying - * mspec data so it is not freed prematurely. + * (via fork, munmap, etc.). Increments the reference count on the + * underlying mspec data so it is not freed prematurely. */ static void mspec_open(struct vm_area_struct *vma) @@ -141,69 +155,68 @@ mspec_open(struct vm_area_struct *vma) * mspec_close * * Called when unmapping a device mapping. Frees all mspec pages - * belonging to the vma. + * belonging to all the vma's sharing this vma_data structure. */ static void mspec_close(struct vm_area_struct *vma) { struct vma_data *vdata; - int i, pages, result, vdata_size; + int index, last_index; + unsigned long my_page; vdata = vma->vm_private_data; + if (!atomic_dec_and_test(&vdata->refcnt)) return; - pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; - vdata_size = sizeof(struct vma_data) + pages * sizeof(long); - for (i = 0; i < pages; i++) { - if (vdata->maddr[i] == 0) + last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT; + for (index = 0; index < last_index; index++) { + if (vdata->maddr[index] == 0) continue; /* * Clear the page before sticking it back * into the pool. */ - result = mspec_zero_block(vdata->maddr[i], PAGE_SIZE); - if (!result) - uncached_free_page(vdata->maddr[i]); + my_page = vdata->maddr[index]; + vdata->maddr[index] = 0; + if (!mspec_zero_block(my_page, PAGE_SIZE)) + uncached_free_page(my_page, 1); else printk(KERN_WARNING "mspec_close(): " - "failed to zero page %i\n", - result); + "failed to zero page %ld\n", my_page); } - if (vdata_size <= PAGE_SIZE) - kfree(vdata); - else + if (vdata->flags & VMD_VMALLOCED) vfree(vdata); + else + kfree(vdata); } - /* - * mspec_nopfn + * mspec_fault * * Creates a mspec page and maps it to user space. */ -static unsigned long -mspec_nopfn(struct vm_area_struct *vma, unsigned long address) +static int +mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { unsigned long paddr, maddr; unsigned long pfn; - int index; + pgoff_t index = vmf->pgoff; struct vma_data *vdata = vma->vm_private_data; - index = (address - vma->vm_start) >> PAGE_SHIFT; maddr = (volatile unsigned long) vdata->maddr[index]; if (maddr == 0) { - maddr = uncached_alloc_page(numa_node_id()); + maddr = uncached_alloc_page(numa_node_id(), 1); if (maddr == 0) - return NOPFN_OOM; + return VM_FAULT_OOM; spin_lock(&vdata->lock); if (vdata->maddr[index] == 0) { vdata->count++; vdata->maddr[index] = maddr; } else { - uncached_free_page(maddr); + uncached_free_page(maddr, 1); maddr = vdata->maddr[index]; } spin_unlock(&vdata->lock); @@ -212,17 +225,24 @@ mspec_nopfn(struct vm_area_struct *vma, unsigned long address) if (vdata->type == MSPEC_FETCHOP) paddr = TO_AMO(maddr); else - paddr = __pa(TO_CAC(maddr)); + paddr = maddr & ~__IA64_UNCACHED_OFFSET; pfn = paddr >> PAGE_SHIFT; - return pfn; + /* + * vm_insert_pfn can fail with -EBUSY, but in that case it will + * be because another thread has installed the pte first, so it + * is no problem. + */ + vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); + + return VM_FAULT_NOPAGE; } static struct vm_operations_struct mspec_vm_ops = { .open = mspec_open, .close = mspec_close, - .nopfn = mspec_nopfn + .fault = mspec_fault, }; /* @@ -233,10 +253,11 @@ static struct vm_operations_struct mspec_vm_ops = { * underlying pages. */ static int -mspec_mmap(struct file *file, struct vm_area_struct *vma, int type) +mspec_mmap(struct file *file, struct vm_area_struct *vma, + enum mspec_page_type type) { struct vma_data *vdata; - int pages, vdata_size; + int pages, vdata_size, flags = 0; if (vma->vm_pgoff != 0) return -EINVAL; @@ -251,18 +272,23 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma, int type) vdata_size = sizeof(struct vma_data) + pages * sizeof(long); if (vdata_size <= PAGE_SIZE) vdata = kmalloc(vdata_size, GFP_KERNEL); - else + else { vdata = vmalloc(vdata_size); + flags = VMD_VMALLOCED; + } if (!vdata) return -ENOMEM; memset(vdata, 0, vdata_size); + vdata->vm_start = vma->vm_start; + vdata->vm_end = vma->vm_end; + vdata->flags = flags; vdata->type = type; spin_lock_init(&vdata->lock); vdata->refcnt = ATOMIC_INIT(1); vma->vm_private_data = vdata; - vma->vm_flags |= (VM_IO | VM_LOCKED | VM_RESERVED | VM_PFNMAP); + vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND); if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &mspec_vm_ops; @@ -288,7 +314,7 @@ uncached_mmap(struct file *file, struct vm_area_struct *vma) return mspec_mmap(file, vma, MSPEC_UNCACHED); } -static struct file_operations fetchop_fops = { +static const struct file_operations fetchop_fops = { .owner = THIS_MODULE, .mmap = fetchop_mmap }; @@ -299,7 +325,7 @@ static struct miscdevice fetchop_miscdev = { .fops = &fetchop_fops }; -static struct file_operations cached_fops = { +static const struct file_operations cached_fops = { .owner = THIS_MODULE, .mmap = cached_mmap }; @@ -310,7 +336,7 @@ static struct miscdevice cached_miscdev = { .fops = &cached_fops }; -static struct file_operations uncached_fops = { +static const struct file_operations uncached_fops = { .owner = THIS_MODULE, .mmap = uncached_mmap }; @@ -336,16 +362,17 @@ mspec_init(void) * The fetchop device only works on SN2 hardware, uncached and cached * memory drivers should both be valid on all ia64 hardware */ +#ifdef CONFIG_SGI_SN if (ia64_platform_is("sn2")) { is_sn2 = 1; if (is_shub2()) { ret = -ENOMEM; - for_each_online_node(nid) { + for_each_node_state(nid, N_ONLINE) { int actual_nid; int nasid; unsigned long phys; - scratch_page[nid] = uncached_alloc_page(nid); + scratch_page[nid] = uncached_alloc_page(nid, 1); if (scratch_page[nid] == 0) goto free_scratch_pages; phys = __pa(scratch_page[nid]); @@ -364,6 +391,7 @@ mspec_init(void) goto free_scratch_pages; } } +#endif ret = misc_register(&cached_miscdev); if (ret) { printk(KERN_ERR "%s: failed to register device %i\n", @@ -391,7 +419,7 @@ mspec_init(void) free_scratch_pages: for_each_node(nid) { if (scratch_page[nid] != 0) - uncached_free_page(scratch_page[nid]); + uncached_free_page(scratch_page[nid], 1); } return ret; } @@ -408,7 +436,7 @@ mspec_exit(void) for_each_node(nid) { if (scratch_page[nid] != 0) - uncached_free_page(scratch_page[nid]); + uncached_free_page(scratch_page[nid], 1); } } }