2 * linux/drivers/char/mem.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
11 #include <linux/config.h>
13 #include <linux/miscdevice.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mman.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/raw.h>
20 #include <linux/tty.h>
21 #include <linux/capability.h>
22 #include <linux/smp_lock.h>
23 #include <linux/devfs_fs_kernel.h>
24 #include <linux/ptrace.h>
25 #include <linux/device.h>
26 #include <linux/highmem.h>
27 #include <linux/crash_dump.h>
28 #include <linux/backing-dev.h>
30 #include <asm/uaccess.h>
34 # include <linux/efi.h>
37 #if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
38 extern void tapechar_init(void);
42 * Architectures vary in how they handle caching for addresses
43 * outside of main memory.
46 static inline int uncached_access(struct file *file, unsigned long addr)
50 * On the PPro and successors, the MTRRs are used to set
51 * memory types for physical addresses outside main memory,
52 * so blindly setting PCD or PWT on those pages is wrong.
53 * For Pentiums and earlier, the surround logic should disable
54 * caching for the high addresses through the KEN pin, but
55 * we maintain the tradition of paranoia in this code.
57 if (file->f_flags & O_SYNC)
59 return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
60 test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
61 test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
62 test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
63 && addr >= __pa(high_memory);
64 #elif defined(__x86_64__)
66 * This is broken because it can generate memory type aliases,
67 * which can cause cache corruptions
68 * But it is only available for root and we have to be bug-to-bug
69 * compatible with i386.
71 if (file->f_flags & O_SYNC)
73 /* same behaviour as i386. PAT always set to cached and MTRRs control the
75 Hopefully a full PAT implementation will fix that soon. */
77 #elif defined(CONFIG_IA64)
79 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
81 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
84 * Accessing memory above the top the kernel knows about or through a file pointer
85 * that was marked O_SYNC will be done non-cached.
87 if (file->f_flags & O_SYNC)
89 return addr >= __pa(high_memory);
93 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
94 static inline int valid_phys_addr_range(unsigned long addr, size_t *count)
96 unsigned long end_mem;
98 end_mem = __pa(high_memory);
102 if (*count > end_mem - addr)
103 *count = end_mem - addr;
110 * This funcion reads the *physical* memory. The f_pos points directly to the
113 static ssize_t read_mem(struct file * file, char __user * buf,
114 size_t count, loff_t *ppos)
116 unsigned long p = *ppos;
120 if (!valid_phys_addr_range(p, &count))
123 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
124 /* we don't have page 0 mapped on sparc and m68k.. */
130 if (clear_user(buf, sz))
142 * Handle first page in case it's not aligned
144 if (-p & (PAGE_SIZE - 1))
145 sz = -p & (PAGE_SIZE - 1);
149 sz = min_t(unsigned long, sz, count);
152 * On ia64 if a page has been mapped somewhere as
153 * uncached, then it must also be accessed uncached
154 * by the kernel or data corruption may occur
156 ptr = xlate_dev_mem_ptr(p);
158 if (copy_to_user(buf, ptr, sz))
170 static ssize_t write_mem(struct file * file, const char __user * buf,
171 size_t count, loff_t *ppos)
173 unsigned long p = *ppos;
175 unsigned long copied;
178 if (!valid_phys_addr_range(p, &count))
183 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
184 /* we don't have page 0 mapped on sparc and m68k.. */
186 unsigned long sz = PAGE_SIZE - p;
189 /* Hmm. Do something? */
199 * Handle first page in case it's not aligned
201 if (-p & (PAGE_SIZE - 1))
202 sz = -p & (PAGE_SIZE - 1);
206 sz = min_t(unsigned long, sz, count);
209 * On ia64 if a page has been mapped somewhere as
210 * uncached, then it must also be accessed uncached
211 * by the kernel or data corruption may occur
213 ptr = xlate_dev_mem_ptr(p);
215 copied = copy_from_user(ptr, buf, sz);
219 ret = written + (sz - copied);
234 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
236 #if defined(__HAVE_PHYS_MEM_ACCESS_PROT)
237 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
239 vma->vm_page_prot = phys_mem_access_prot(file, offset,
240 vma->vm_end - vma->vm_start,
242 #elif defined(pgprot_noncached)
243 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
246 uncached = uncached_access(file, offset);
248 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
251 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
252 if (remap_pfn_range(vma,
255 vma->vm_end-vma->vm_start,
261 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
263 unsigned long long val;
265 * RED-PEN: on some architectures there is more mapped memory
266 * than available in mem_map which pfn_valid checks
267 * for. Perhaps should add a new macro here.
269 * RED-PEN: vmalloc is not supported right now.
271 if (!pfn_valid(vma->vm_pgoff))
273 val = (u64)vma->vm_pgoff << PAGE_SHIFT;
274 vma->vm_pgoff = __pa(val) >> PAGE_SHIFT;
275 return mmap_mem(file, vma);
278 #ifdef CONFIG_CRASH_DUMP
280 * Read memory corresponding to the old kernel.
281 * If we are reading from the reserved section, which is
282 * actually used by the current kernel, we just return zeroes.
283 * Or if we are reading from the first 640k, we return from the
286 static ssize_t read_oldmem(struct file * file, char * buf,
287 size_t count, loff_t *ppos)
290 unsigned backup_start, backup_end, relocate_start;
291 size_t read=0, csize;
293 backup_start = CRASH_BACKUP_BASE / PAGE_SIZE;
294 backup_end = backup_start + (CRASH_BACKUP_SIZE / PAGE_SIZE);
295 relocate_start = (CRASH_BACKUP_BASE + CRASH_BACKUP_SIZE) / PAGE_SIZE;
298 pfn = *ppos / PAGE_SIZE;
300 csize = (count > PAGE_SIZE) ? PAGE_SIZE : count;
302 /* Perform translation (see comment above) */
303 if ((pfn >= backup_start) && (pfn < backup_end)) {
304 if (clear_user(buf, csize)) {
310 } else if (pfn < (CRASH_RELOCATE_SIZE / PAGE_SIZE))
311 pfn += relocate_start;
313 if (pfn > saved_max_pfn) {
318 if (copy_oldmem_page(pfn, buf, csize, 1)) {
334 extern long vread(char *buf, char *addr, unsigned long count);
335 extern long vwrite(char *buf, char *addr, unsigned long count);
338 * This function reads the *virtual* memory as seen by the kernel.
340 static ssize_t read_kmem(struct file *file, char __user *buf,
341 size_t count, loff_t *ppos)
343 unsigned long p = *ppos;
344 ssize_t low_count, read, sz;
345 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
348 if (p < (unsigned long) high_memory) {
350 if (count > (unsigned long) high_memory - p)
351 low_count = (unsigned long) high_memory - p;
353 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
354 /* we don't have page 0 mapped on sparc and m68k.. */
355 if (p < PAGE_SIZE && low_count > 0) {
356 size_t tmp = PAGE_SIZE - p;
357 if (tmp > low_count) tmp = low_count;
358 if (clear_user(buf, tmp))
367 while (low_count > 0) {
369 * Handle first page in case it's not aligned
371 if (-p & (PAGE_SIZE - 1))
372 sz = -p & (PAGE_SIZE - 1);
376 sz = min_t(unsigned long, sz, low_count);
379 * On ia64 if a page has been mapped somewhere as
380 * uncached, then it must also be accessed uncached
381 * by the kernel or data corruption may occur
383 kbuf = xlate_dev_kmem_ptr((char *)p);
385 if (copy_to_user(buf, kbuf, sz))
396 kbuf = (char *)__get_free_page(GFP_KERNEL);
404 len = vread(kbuf, (char *)p, len);
407 if (copy_to_user(buf, kbuf, len)) {
408 free_page((unsigned long)kbuf);
416 free_page((unsigned long)kbuf);
423 static inline ssize_t
424 do_write_kmem(void *p, unsigned long realp, const char __user * buf,
425 size_t count, loff_t *ppos)
428 unsigned long copied;
431 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
432 /* we don't have page 0 mapped on sparc and m68k.. */
433 if (realp < PAGE_SIZE) {
434 unsigned long sz = PAGE_SIZE - realp;
437 /* Hmm. Do something? */
449 * Handle first page in case it's not aligned
451 if (-realp & (PAGE_SIZE - 1))
452 sz = -realp & (PAGE_SIZE - 1);
456 sz = min_t(unsigned long, sz, count);
459 * On ia64 if a page has been mapped somewhere as
460 * uncached, then it must also be accessed uncached
461 * by the kernel or data corruption may occur
463 ptr = xlate_dev_kmem_ptr(p);
465 copied = copy_from_user(ptr, buf, sz);
469 ret = written + (sz - copied);
487 * This function writes to the *virtual* memory as seen by the kernel.
489 static ssize_t write_kmem(struct file * file, const char __user * buf,
490 size_t count, loff_t *ppos)
492 unsigned long p = *ppos;
496 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
498 if (p < (unsigned long) high_memory) {
501 if (count > (unsigned long) high_memory - p)
502 wrote = (unsigned long) high_memory - p;
504 written = do_write_kmem((void*)p, p, buf, wrote, ppos);
505 if (written != wrote)
514 kbuf = (char *)__get_free_page(GFP_KERNEL);
516 return wrote ? wrote : -ENOMEM;
523 written = copy_from_user(kbuf, buf, len);
527 free_page((unsigned long)kbuf);
528 ret = wrote + virtr + (len - written);
529 return ret ? ret : -EFAULT;
532 len = vwrite(kbuf, (char *)p, len);
538 free_page((unsigned long)kbuf);
542 return virtr + wrote;
545 #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI))
546 static ssize_t read_port(struct file * file, char __user * buf,
547 size_t count, loff_t *ppos)
549 unsigned long i = *ppos;
550 char __user *tmp = buf;
552 if (!access_ok(VERIFY_WRITE, buf, count))
554 while (count-- > 0 && i < 65536) {
555 if (__put_user(inb(i),tmp) < 0)
564 static ssize_t write_port(struct file * file, const char __user * buf,
565 size_t count, loff_t *ppos)
567 unsigned long i = *ppos;
568 const char __user * tmp = buf;
570 if (!access_ok(VERIFY_READ,buf,count))
572 while (count-- > 0 && i < 65536) {
574 if (__get_user(c, tmp))
585 static ssize_t read_null(struct file * file, char __user * buf,
586 size_t count, loff_t *ppos)
591 static ssize_t write_null(struct file * file, const char __user * buf,
592 size_t count, loff_t *ppos)
599 * For fun, we are using the MMU for this.
601 static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
603 struct mm_struct *mm;
604 struct vm_area_struct * vma;
605 unsigned long addr=(unsigned long)buf;
608 /* Oops, this was forgotten before. -ben */
609 down_read(&mm->mmap_sem);
611 /* For private mappings, just map in zero pages. */
612 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
615 if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
617 if (vma->vm_flags & (VM_SHARED | VM_HUGETLB))
619 count = vma->vm_end - addr;
623 zap_page_range(vma, addr, count, NULL);
624 zeromap_page_range(vma, addr, count, PAGE_COPY);
633 up_read(&mm->mmap_sem);
635 /* The shared case is hard. Let's do the conventional zeroing. */
637 unsigned long unwritten = clear_user(buf, PAGE_SIZE);
639 return size + unwritten - PAGE_SIZE;
647 up_read(&mm->mmap_sem);
651 static ssize_t read_zero(struct file * file, char __user * buf,
652 size_t count, loff_t *ppos)
654 unsigned long left, unwritten, written = 0;
659 if (!access_ok(VERIFY_WRITE, buf, count))
664 /* do we want to be clever? Arbitrary cut-off */
665 if (count >= PAGE_SIZE*4) {
666 unsigned long partial;
668 /* How much left of the page? */
669 partial = (PAGE_SIZE-1) & -(unsigned long) buf;
670 unwritten = clear_user(buf, partial);
671 written = partial - unwritten;
676 unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
677 written += (left & PAGE_MASK) - unwritten;
680 buf += left & PAGE_MASK;
683 unwritten = clear_user(buf, left);
684 written += left - unwritten;
686 return written ? written : -EFAULT;
689 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
691 if (vma->vm_flags & VM_SHARED)
692 return shmem_zero_setup(vma);
693 if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
697 #else /* CONFIG_MMU */
698 static ssize_t read_zero(struct file * file, char * buf,
699 size_t count, loff_t *ppos)
707 chunk = 4096; /* Just for latency reasons */
708 if (clear_user(buf, chunk))
717 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
721 #endif /* CONFIG_MMU */
723 static ssize_t write_full(struct file * file, const char __user * buf,
724 size_t count, loff_t *ppos)
730 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
731 * can fopen() both devices with "a" now. This was previously impossible.
735 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
737 return file->f_pos = 0;
741 * The memory devices use the full 32/64 bits of the offset, and so we cannot
742 * check against negative addresses: they are ok. The return value is weird,
743 * though, in that case (0).
745 * also note that seeking relative to the "end of file" isn't supported:
746 * it has no meaning, so it returns -EINVAL.
748 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
752 down(&file->f_dentry->d_inode->i_sem);
755 file->f_pos = offset;
757 force_successful_syscall_return();
760 file->f_pos += offset;
762 force_successful_syscall_return();
767 up(&file->f_dentry->d_inode->i_sem);
771 static int open_port(struct inode * inode, struct file * filp)
773 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
776 #define zero_lseek null_lseek
777 #define full_lseek null_lseek
778 #define write_zero write_null
779 #define read_full read_zero
780 #define open_mem open_port
781 #define open_kmem open_mem
782 #define open_oldmem open_mem
784 static struct file_operations mem_fops = {
785 .llseek = memory_lseek,
792 static struct file_operations kmem_fops = {
793 .llseek = memory_lseek,
800 static struct file_operations null_fops = {
801 .llseek = null_lseek,
806 #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI))
807 static struct file_operations port_fops = {
808 .llseek = memory_lseek,
815 static struct file_operations zero_fops = {
816 .llseek = zero_lseek,
822 static struct backing_dev_info zero_bdi = {
823 .capabilities = BDI_CAP_MAP_COPY,
826 static struct file_operations full_fops = {
827 .llseek = full_lseek,
832 #ifdef CONFIG_CRASH_DUMP
833 static struct file_operations oldmem_fops = {
839 static ssize_t kmsg_write(struct file * file, const char __user * buf,
840 size_t count, loff_t *ppos)
845 tmp = kmalloc(count + 1, GFP_KERNEL);
849 if (!copy_from_user(tmp, buf, count)) {
851 ret = printk("%s", tmp);
857 static struct file_operations kmsg_fops = {
861 static int memory_open(struct inode * inode, struct file * filp)
863 switch (iminor(inode)) {
865 filp->f_op = &mem_fops;
868 filp->f_op = &kmem_fops;
871 filp->f_op = &null_fops;
873 #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI))
875 filp->f_op = &port_fops;
879 filp->f_mapping->backing_dev_info = &zero_bdi;
880 filp->f_op = &zero_fops;
883 filp->f_op = &full_fops;
886 filp->f_op = &random_fops;
889 filp->f_op = &urandom_fops;
892 filp->f_op = &kmsg_fops;
894 #ifdef CONFIG_CRASH_DUMP
896 filp->f_op = &oldmem_fops;
902 if (filp->f_op && filp->f_op->open)
903 return filp->f_op->open(inode,filp);
907 static struct file_operations memory_fops = {
908 .open = memory_open, /* just a selector for the real open */
911 static const struct {
915 struct file_operations *fops;
916 } devlist[] = { /* list of minor devices */
917 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
918 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
919 {3, "null", S_IRUGO | S_IWUGO, &null_fops},
920 #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI))
921 {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
923 {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
924 {7, "full", S_IRUGO | S_IWUGO, &full_fops},
925 {8, "random", S_IRUGO | S_IWUSR, &random_fops},
926 {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},
927 {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},
928 #ifdef CONFIG_CRASH_DUMP
929 {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
933 static struct class *mem_class;
935 static int __init chr_dev_init(void)
939 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
940 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
942 mem_class = class_create(THIS_MODULE, "mem");
943 for (i = 0; i < ARRAY_SIZE(devlist); i++) {
944 class_device_create(mem_class, MKDEV(MEM_MAJOR, devlist[i].minor),
945 NULL, devlist[i].name);
946 devfs_mk_cdev(MKDEV(MEM_MAJOR, devlist[i].minor),
947 S_IFCHR | devlist[i].mode, devlist[i].name);
953 fs_initcall(chr_dev_init);