[PATCH] devfs: Remove the devfs_fs_kernel.h file from the tree
[safe/jmp/linux-2.6] / drivers / char / mem.c
1 /*
2  *  linux/drivers/char/mem.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  Added devfs support. 
7  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8  *  Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9  */
10
11 #include <linux/config.h>
12 #include <linux/mm.h>
13 #include <linux/miscdevice.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mman.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/raw.h>
20 #include <linux/tty.h>
21 #include <linux/capability.h>
22 #include <linux/smp_lock.h>
23 #include <linux/ptrace.h>
24 #include <linux/device.h>
25 #include <linux/highmem.h>
26 #include <linux/crash_dump.h>
27 #include <linux/backing-dev.h>
28 #include <linux/bootmem.h>
29 #include <linux/pipe_fs_i.h>
30
31 #include <asm/uaccess.h>
32 #include <asm/io.h>
33
34 #ifdef CONFIG_IA64
35 # include <linux/efi.h>
36 #endif
37
38 /*
39  * Architectures vary in how they handle caching for addresses
40  * outside of main memory.
41  *
42  */
43 static inline int uncached_access(struct file *file, unsigned long addr)
44 {
45 #if defined(__i386__)
46         /*
47          * On the PPro and successors, the MTRRs are used to set
48          * memory types for physical addresses outside main memory,
49          * so blindly setting PCD or PWT on those pages is wrong.
50          * For Pentiums and earlier, the surround logic should disable
51          * caching for the high addresses through the KEN pin, but
52          * we maintain the tradition of paranoia in this code.
53          */
54         if (file->f_flags & O_SYNC)
55                 return 1;
56         return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
57                   test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
58                   test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
59                   test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
60           && addr >= __pa(high_memory);
61 #elif defined(__x86_64__)
62         /* 
63          * This is broken because it can generate memory type aliases,
64          * which can cause cache corruptions
65          * But it is only available for root and we have to be bug-to-bug
66          * compatible with i386.
67          */
68         if (file->f_flags & O_SYNC)
69                 return 1;
70         /* same behaviour as i386. PAT always set to cached and MTRRs control the
71            caching behaviour. 
72            Hopefully a full PAT implementation will fix that soon. */      
73         return 0;
74 #elif defined(CONFIG_IA64)
75         /*
76          * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
77          */
78         return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
79 #else
80         /*
81          * Accessing memory above the top the kernel knows about or through a file pointer
82          * that was marked O_SYNC will be done non-cached.
83          */
84         if (file->f_flags & O_SYNC)
85                 return 1;
86         return addr >= __pa(high_memory);
87 #endif
88 }
89
90 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
91 static inline int valid_phys_addr_range(unsigned long addr, size_t count)
92 {
93         if (addr + count > __pa(high_memory))
94                 return 0;
95
96         return 1;
97 }
98
99 static inline int valid_mmap_phys_addr_range(unsigned long addr, size_t size)
100 {
101         return 1;
102 }
103 #endif
104
105 /*
106  * This funcion reads the *physical* memory. The f_pos points directly to the 
107  * memory location. 
108  */
109 static ssize_t read_mem(struct file * file, char __user * buf,
110                         size_t count, loff_t *ppos)
111 {
112         unsigned long p = *ppos;
113         ssize_t read, sz;
114         char *ptr;
115
116         if (!valid_phys_addr_range(p, count))
117                 return -EFAULT;
118         read = 0;
119 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
120         /* we don't have page 0 mapped on sparc and m68k.. */
121         if (p < PAGE_SIZE) {
122                 sz = PAGE_SIZE - p;
123                 if (sz > count) 
124                         sz = count; 
125                 if (sz > 0) {
126                         if (clear_user(buf, sz))
127                                 return -EFAULT;
128                         buf += sz; 
129                         p += sz; 
130                         count -= sz; 
131                         read += sz; 
132                 }
133         }
134 #endif
135
136         while (count > 0) {
137                 /*
138                  * Handle first page in case it's not aligned
139                  */
140                 if (-p & (PAGE_SIZE - 1))
141                         sz = -p & (PAGE_SIZE - 1);
142                 else
143                         sz = PAGE_SIZE;
144
145                 sz = min_t(unsigned long, sz, count);
146
147                 /*
148                  * On ia64 if a page has been mapped somewhere as
149                  * uncached, then it must also be accessed uncached
150                  * by the kernel or data corruption may occur
151                  */
152                 ptr = xlate_dev_mem_ptr(p);
153
154                 if (copy_to_user(buf, ptr, sz))
155                         return -EFAULT;
156                 buf += sz;
157                 p += sz;
158                 count -= sz;
159                 read += sz;
160         }
161
162         *ppos += read;
163         return read;
164 }
165
166 static ssize_t write_mem(struct file * file, const char __user * buf, 
167                          size_t count, loff_t *ppos)
168 {
169         unsigned long p = *ppos;
170         ssize_t written, sz;
171         unsigned long copied;
172         void *ptr;
173
174         if (!valid_phys_addr_range(p, count))
175                 return -EFAULT;
176
177         written = 0;
178
179 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
180         /* we don't have page 0 mapped on sparc and m68k.. */
181         if (p < PAGE_SIZE) {
182                 unsigned long sz = PAGE_SIZE - p;
183                 if (sz > count)
184                         sz = count;
185                 /* Hmm. Do something? */
186                 buf += sz;
187                 p += sz;
188                 count -= sz;
189                 written += sz;
190         }
191 #endif
192
193         while (count > 0) {
194                 /*
195                  * Handle first page in case it's not aligned
196                  */
197                 if (-p & (PAGE_SIZE - 1))
198                         sz = -p & (PAGE_SIZE - 1);
199                 else
200                         sz = PAGE_SIZE;
201
202                 sz = min_t(unsigned long, sz, count);
203
204                 /*
205                  * On ia64 if a page has been mapped somewhere as
206                  * uncached, then it must also be accessed uncached
207                  * by the kernel or data corruption may occur
208                  */
209                 ptr = xlate_dev_mem_ptr(p);
210
211                 copied = copy_from_user(ptr, buf, sz);
212                 if (copied) {
213                         written += sz - copied;
214                         if (written)
215                                 break;
216                         return -EFAULT;
217                 }
218                 buf += sz;
219                 p += sz;
220                 count -= sz;
221                 written += sz;
222         }
223
224         *ppos += written;
225         return written;
226 }
227
228 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
229 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
230                                      unsigned long size, pgprot_t vma_prot)
231 {
232 #ifdef pgprot_noncached
233         unsigned long offset = pfn << PAGE_SHIFT;
234
235         if (uncached_access(file, offset))
236                 return pgprot_noncached(vma_prot);
237 #endif
238         return vma_prot;
239 }
240 #endif
241
242 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
243 {
244         size_t size = vma->vm_end - vma->vm_start;
245
246         if (!valid_mmap_phys_addr_range(vma->vm_pgoff << PAGE_SHIFT, size))
247                 return -EINVAL;
248
249         vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
250                                                  size,
251                                                  vma->vm_page_prot);
252
253         /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
254         if (remap_pfn_range(vma,
255                             vma->vm_start,
256                             vma->vm_pgoff,
257                             size,
258                             vma->vm_page_prot))
259                 return -EAGAIN;
260         return 0;
261 }
262
263 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
264 {
265         unsigned long pfn;
266
267         /* Turn a kernel-virtual address into a physical page frame */
268         pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
269
270         /*
271          * RED-PEN: on some architectures there is more mapped memory
272          * than available in mem_map which pfn_valid checks
273          * for. Perhaps should add a new macro here.
274          *
275          * RED-PEN: vmalloc is not supported right now.
276          */
277         if (!pfn_valid(pfn))
278                 return -EIO;
279
280         vma->vm_pgoff = pfn;
281         return mmap_mem(file, vma);
282 }
283
284 #ifdef CONFIG_CRASH_DUMP
285 /*
286  * Read memory corresponding to the old kernel.
287  */
288 static ssize_t read_oldmem(struct file *file, char __user *buf,
289                                 size_t count, loff_t *ppos)
290 {
291         unsigned long pfn, offset;
292         size_t read = 0, csize;
293         int rc = 0;
294
295         while (count) {
296                 pfn = *ppos / PAGE_SIZE;
297                 if (pfn > saved_max_pfn)
298                         return read;
299
300                 offset = (unsigned long)(*ppos % PAGE_SIZE);
301                 if (count > PAGE_SIZE - offset)
302                         csize = PAGE_SIZE - offset;
303                 else
304                         csize = count;
305
306                 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
307                 if (rc < 0)
308                         return rc;
309                 buf += csize;
310                 *ppos += csize;
311                 read += csize;
312                 count -= csize;
313         }
314         return read;
315 }
316 #endif
317
318 extern long vread(char *buf, char *addr, unsigned long count);
319 extern long vwrite(char *buf, char *addr, unsigned long count);
320
321 /*
322  * This function reads the *virtual* memory as seen by the kernel.
323  */
324 static ssize_t read_kmem(struct file *file, char __user *buf, 
325                          size_t count, loff_t *ppos)
326 {
327         unsigned long p = *ppos;
328         ssize_t low_count, read, sz;
329         char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
330
331         read = 0;
332         if (p < (unsigned long) high_memory) {
333                 low_count = count;
334                 if (count > (unsigned long) high_memory - p)
335                         low_count = (unsigned long) high_memory - p;
336
337 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
338                 /* we don't have page 0 mapped on sparc and m68k.. */
339                 if (p < PAGE_SIZE && low_count > 0) {
340                         size_t tmp = PAGE_SIZE - p;
341                         if (tmp > low_count) tmp = low_count;
342                         if (clear_user(buf, tmp))
343                                 return -EFAULT;
344                         buf += tmp;
345                         p += tmp;
346                         read += tmp;
347                         low_count -= tmp;
348                         count -= tmp;
349                 }
350 #endif
351                 while (low_count > 0) {
352                         /*
353                          * Handle first page in case it's not aligned
354                          */
355                         if (-p & (PAGE_SIZE - 1))
356                                 sz = -p & (PAGE_SIZE - 1);
357                         else
358                                 sz = PAGE_SIZE;
359
360                         sz = min_t(unsigned long, sz, low_count);
361
362                         /*
363                          * On ia64 if a page has been mapped somewhere as
364                          * uncached, then it must also be accessed uncached
365                          * by the kernel or data corruption may occur
366                          */
367                         kbuf = xlate_dev_kmem_ptr((char *)p);
368
369                         if (copy_to_user(buf, kbuf, sz))
370                                 return -EFAULT;
371                         buf += sz;
372                         p += sz;
373                         read += sz;
374                         low_count -= sz;
375                         count -= sz;
376                 }
377         }
378
379         if (count > 0) {
380                 kbuf = (char *)__get_free_page(GFP_KERNEL);
381                 if (!kbuf)
382                         return -ENOMEM;
383                 while (count > 0) {
384                         int len = count;
385
386                         if (len > PAGE_SIZE)
387                                 len = PAGE_SIZE;
388                         len = vread(kbuf, (char *)p, len);
389                         if (!len)
390                                 break;
391                         if (copy_to_user(buf, kbuf, len)) {
392                                 free_page((unsigned long)kbuf);
393                                 return -EFAULT;
394                         }
395                         count -= len;
396                         buf += len;
397                         read += len;
398                         p += len;
399                 }
400                 free_page((unsigned long)kbuf);
401         }
402         *ppos = p;
403         return read;
404 }
405
406
407 static inline ssize_t
408 do_write_kmem(void *p, unsigned long realp, const char __user * buf,
409               size_t count, loff_t *ppos)
410 {
411         ssize_t written, sz;
412         unsigned long copied;
413
414         written = 0;
415 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
416         /* we don't have page 0 mapped on sparc and m68k.. */
417         if (realp < PAGE_SIZE) {
418                 unsigned long sz = PAGE_SIZE - realp;
419                 if (sz > count)
420                         sz = count;
421                 /* Hmm. Do something? */
422                 buf += sz;
423                 p += sz;
424                 realp += sz;
425                 count -= sz;
426                 written += sz;
427         }
428 #endif
429
430         while (count > 0) {
431                 char *ptr;
432                 /*
433                  * Handle first page in case it's not aligned
434                  */
435                 if (-realp & (PAGE_SIZE - 1))
436                         sz = -realp & (PAGE_SIZE - 1);
437                 else
438                         sz = PAGE_SIZE;
439
440                 sz = min_t(unsigned long, sz, count);
441
442                 /*
443                  * On ia64 if a page has been mapped somewhere as
444                  * uncached, then it must also be accessed uncached
445                  * by the kernel or data corruption may occur
446                  */
447                 ptr = xlate_dev_kmem_ptr(p);
448
449                 copied = copy_from_user(ptr, buf, sz);
450                 if (copied) {
451                         written += sz - copied;
452                         if (written)
453                                 break;
454                         return -EFAULT;
455                 }
456                 buf += sz;
457                 p += sz;
458                 realp += sz;
459                 count -= sz;
460                 written += sz;
461         }
462
463         *ppos += written;
464         return written;
465 }
466
467
468 /*
469  * This function writes to the *virtual* memory as seen by the kernel.
470  */
471 static ssize_t write_kmem(struct file * file, const char __user * buf, 
472                           size_t count, loff_t *ppos)
473 {
474         unsigned long p = *ppos;
475         ssize_t wrote = 0;
476         ssize_t virtr = 0;
477         ssize_t written;
478         char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
479
480         if (p < (unsigned long) high_memory) {
481
482                 wrote = count;
483                 if (count > (unsigned long) high_memory - p)
484                         wrote = (unsigned long) high_memory - p;
485
486                 written = do_write_kmem((void*)p, p, buf, wrote, ppos);
487                 if (written != wrote)
488                         return written;
489                 wrote = written;
490                 p += wrote;
491                 buf += wrote;
492                 count -= wrote;
493         }
494
495         if (count > 0) {
496                 kbuf = (char *)__get_free_page(GFP_KERNEL);
497                 if (!kbuf)
498                         return wrote ? wrote : -ENOMEM;
499                 while (count > 0) {
500                         int len = count;
501
502                         if (len > PAGE_SIZE)
503                                 len = PAGE_SIZE;
504                         if (len) {
505                                 written = copy_from_user(kbuf, buf, len);
506                                 if (written) {
507                                         if (wrote + virtr)
508                                                 break;
509                                         free_page((unsigned long)kbuf);
510                                         return -EFAULT;
511                                 }
512                         }
513                         len = vwrite(kbuf, (char *)p, len);
514                         count -= len;
515                         buf += len;
516                         virtr += len;
517                         p += len;
518                 }
519                 free_page((unsigned long)kbuf);
520         }
521
522         *ppos = p;
523         return virtr + wrote;
524 }
525
526 #if defined(CONFIG_ISA) || !defined(__mc68000__)
527 static ssize_t read_port(struct file * file, char __user * buf,
528                          size_t count, loff_t *ppos)
529 {
530         unsigned long i = *ppos;
531         char __user *tmp = buf;
532
533         if (!access_ok(VERIFY_WRITE, buf, count))
534                 return -EFAULT; 
535         while (count-- > 0 && i < 65536) {
536                 if (__put_user(inb(i),tmp) < 0) 
537                         return -EFAULT;  
538                 i++;
539                 tmp++;
540         }
541         *ppos = i;
542         return tmp-buf;
543 }
544
545 static ssize_t write_port(struct file * file, const char __user * buf,
546                           size_t count, loff_t *ppos)
547 {
548         unsigned long i = *ppos;
549         const char __user * tmp = buf;
550
551         if (!access_ok(VERIFY_READ,buf,count))
552                 return -EFAULT;
553         while (count-- > 0 && i < 65536) {
554                 char c;
555                 if (__get_user(c, tmp)) {
556                         if (tmp > buf)
557                                 break;
558                         return -EFAULT; 
559                 }
560                 outb(c,i);
561                 i++;
562                 tmp++;
563         }
564         *ppos = i;
565         return tmp-buf;
566 }
567 #endif
568
569 static ssize_t read_null(struct file * file, char __user * buf,
570                          size_t count, loff_t *ppos)
571 {
572         return 0;
573 }
574
575 static ssize_t write_null(struct file * file, const char __user * buf,
576                           size_t count, loff_t *ppos)
577 {
578         return count;
579 }
580
581 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
582                         struct splice_desc *sd)
583 {
584         return sd->len;
585 }
586
587 static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
588                                  loff_t *ppos, size_t len, unsigned int flags)
589 {
590         return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
591 }
592
593 #ifdef CONFIG_MMU
594 /*
595  * For fun, we are using the MMU for this.
596  */
597 static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
598 {
599         struct mm_struct *mm;
600         struct vm_area_struct * vma;
601         unsigned long addr=(unsigned long)buf;
602
603         mm = current->mm;
604         /* Oops, this was forgotten before. -ben */
605         down_read(&mm->mmap_sem);
606
607         /* For private mappings, just map in zero pages. */
608         for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
609                 unsigned long count;
610
611                 if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
612                         goto out_up;
613                 if (vma->vm_flags & (VM_SHARED | VM_HUGETLB))
614                         break;
615                 count = vma->vm_end - addr;
616                 if (count > size)
617                         count = size;
618
619                 zap_page_range(vma, addr, count, NULL);
620                 zeromap_page_range(vma, addr, count, PAGE_COPY);
621
622                 size -= count;
623                 buf += count;
624                 addr += count;
625                 if (size == 0)
626                         goto out_up;
627         }
628
629         up_read(&mm->mmap_sem);
630         
631         /* The shared case is hard. Let's do the conventional zeroing. */ 
632         do {
633                 unsigned long unwritten = clear_user(buf, PAGE_SIZE);
634                 if (unwritten)
635                         return size + unwritten - PAGE_SIZE;
636                 cond_resched();
637                 buf += PAGE_SIZE;
638                 size -= PAGE_SIZE;
639         } while (size);
640
641         return size;
642 out_up:
643         up_read(&mm->mmap_sem);
644         return size;
645 }
646
647 static ssize_t read_zero(struct file * file, char __user * buf, 
648                          size_t count, loff_t *ppos)
649 {
650         unsigned long left, unwritten, written = 0;
651
652         if (!count)
653                 return 0;
654
655         if (!access_ok(VERIFY_WRITE, buf, count))
656                 return -EFAULT;
657
658         left = count;
659
660         /* do we want to be clever? Arbitrary cut-off */
661         if (count >= PAGE_SIZE*4) {
662                 unsigned long partial;
663
664                 /* How much left of the page? */
665                 partial = (PAGE_SIZE-1) & -(unsigned long) buf;
666                 unwritten = clear_user(buf, partial);
667                 written = partial - unwritten;
668                 if (unwritten)
669                         goto out;
670                 left -= partial;
671                 buf += partial;
672                 unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
673                 written += (left & PAGE_MASK) - unwritten;
674                 if (unwritten)
675                         goto out;
676                 buf += left & PAGE_MASK;
677                 left &= ~PAGE_MASK;
678         }
679         unwritten = clear_user(buf, left);
680         written += left - unwritten;
681 out:
682         return written ? written : -EFAULT;
683 }
684
685 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
686 {
687         if (vma->vm_flags & VM_SHARED)
688                 return shmem_zero_setup(vma);
689         if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
690                 return -EAGAIN;
691         return 0;
692 }
693 #else /* CONFIG_MMU */
694 static ssize_t read_zero(struct file * file, char * buf, 
695                          size_t count, loff_t *ppos)
696 {
697         size_t todo = count;
698
699         while (todo) {
700                 size_t chunk = todo;
701
702                 if (chunk > 4096)
703                         chunk = 4096;   /* Just for latency reasons */
704                 if (clear_user(buf, chunk))
705                         return -EFAULT;
706                 buf += chunk;
707                 todo -= chunk;
708                 cond_resched();
709         }
710         return count;
711 }
712
713 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
714 {
715         return -ENOSYS;
716 }
717 #endif /* CONFIG_MMU */
718
719 static ssize_t write_full(struct file * file, const char __user * buf,
720                           size_t count, loff_t *ppos)
721 {
722         return -ENOSPC;
723 }
724
725 /*
726  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
727  * can fopen() both devices with "a" now.  This was previously impossible.
728  * -- SRB.
729  */
730
731 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
732 {
733         return file->f_pos = 0;
734 }
735
736 /*
737  * The memory devices use the full 32/64 bits of the offset, and so we cannot
738  * check against negative addresses: they are ok. The return value is weird,
739  * though, in that case (0).
740  *
741  * also note that seeking relative to the "end of file" isn't supported:
742  * it has no meaning, so it returns -EINVAL.
743  */
744 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
745 {
746         loff_t ret;
747
748         mutex_lock(&file->f_dentry->d_inode->i_mutex);
749         switch (orig) {
750                 case 0:
751                         file->f_pos = offset;
752                         ret = file->f_pos;
753                         force_successful_syscall_return();
754                         break;
755                 case 1:
756                         file->f_pos += offset;
757                         ret = file->f_pos;
758                         force_successful_syscall_return();
759                         break;
760                 default:
761                         ret = -EINVAL;
762         }
763         mutex_unlock(&file->f_dentry->d_inode->i_mutex);
764         return ret;
765 }
766
767 static int open_port(struct inode * inode, struct file * filp)
768 {
769         return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
770 }
771
772 #define zero_lseek      null_lseek
773 #define full_lseek      null_lseek
774 #define write_zero      write_null
775 #define read_full       read_zero
776 #define open_mem        open_port
777 #define open_kmem       open_mem
778 #define open_oldmem     open_mem
779
780 static struct file_operations mem_fops = {
781         .llseek         = memory_lseek,
782         .read           = read_mem,
783         .write          = write_mem,
784         .mmap           = mmap_mem,
785         .open           = open_mem,
786 };
787
788 static struct file_operations kmem_fops = {
789         .llseek         = memory_lseek,
790         .read           = read_kmem,
791         .write          = write_kmem,
792         .mmap           = mmap_kmem,
793         .open           = open_kmem,
794 };
795
796 static struct file_operations null_fops = {
797         .llseek         = null_lseek,
798         .read           = read_null,
799         .write          = write_null,
800         .splice_write   = splice_write_null,
801 };
802
803 #if defined(CONFIG_ISA) || !defined(__mc68000__)
804 static struct file_operations port_fops = {
805         .llseek         = memory_lseek,
806         .read           = read_port,
807         .write          = write_port,
808         .open           = open_port,
809 };
810 #endif
811
812 static struct file_operations zero_fops = {
813         .llseek         = zero_lseek,
814         .read           = read_zero,
815         .write          = write_zero,
816         .mmap           = mmap_zero,
817 };
818
819 static struct backing_dev_info zero_bdi = {
820         .capabilities   = BDI_CAP_MAP_COPY,
821 };
822
823 static struct file_operations full_fops = {
824         .llseek         = full_lseek,
825         .read           = read_full,
826         .write          = write_full,
827 };
828
829 #ifdef CONFIG_CRASH_DUMP
830 static struct file_operations oldmem_fops = {
831         .read   = read_oldmem,
832         .open   = open_oldmem,
833 };
834 #endif
835
836 static ssize_t kmsg_write(struct file * file, const char __user * buf,
837                           size_t count, loff_t *ppos)
838 {
839         char *tmp;
840         ssize_t ret;
841
842         tmp = kmalloc(count + 1, GFP_KERNEL);
843         if (tmp == NULL)
844                 return -ENOMEM;
845         ret = -EFAULT;
846         if (!copy_from_user(tmp, buf, count)) {
847                 tmp[count] = 0;
848                 ret = printk("%s", tmp);
849                 if (ret > count)
850                         /* printk can add a prefix */
851                         ret = count;
852         }
853         kfree(tmp);
854         return ret;
855 }
856
857 static struct file_operations kmsg_fops = {
858         .write =        kmsg_write,
859 };
860
861 static int memory_open(struct inode * inode, struct file * filp)
862 {
863         switch (iminor(inode)) {
864                 case 1:
865                         filp->f_op = &mem_fops;
866                         break;
867                 case 2:
868                         filp->f_op = &kmem_fops;
869                         break;
870                 case 3:
871                         filp->f_op = &null_fops;
872                         break;
873 #if defined(CONFIG_ISA) || !defined(__mc68000__)
874                 case 4:
875                         filp->f_op = &port_fops;
876                         break;
877 #endif
878                 case 5:
879                         filp->f_mapping->backing_dev_info = &zero_bdi;
880                         filp->f_op = &zero_fops;
881                         break;
882                 case 7:
883                         filp->f_op = &full_fops;
884                         break;
885                 case 8:
886                         filp->f_op = &random_fops;
887                         break;
888                 case 9:
889                         filp->f_op = &urandom_fops;
890                         break;
891                 case 11:
892                         filp->f_op = &kmsg_fops;
893                         break;
894 #ifdef CONFIG_CRASH_DUMP
895                 case 12:
896                         filp->f_op = &oldmem_fops;
897                         break;
898 #endif
899                 default:
900                         return -ENXIO;
901         }
902         if (filp->f_op && filp->f_op->open)
903                 return filp->f_op->open(inode,filp);
904         return 0;
905 }
906
907 static struct file_operations memory_fops = {
908         .open           = memory_open,  /* just a selector for the real open */
909 };
910
911 static const struct {
912         unsigned int            minor;
913         char                    *name;
914         umode_t                 mode;
915         const struct file_operations    *fops;
916 } devlist[] = { /* list of minor devices */
917         {1, "mem",     S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
918         {2, "kmem",    S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
919         {3, "null",    S_IRUGO | S_IWUGO,           &null_fops},
920 #if defined(CONFIG_ISA) || !defined(__mc68000__)
921         {4, "port",    S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
922 #endif
923         {5, "zero",    S_IRUGO | S_IWUGO,           &zero_fops},
924         {7, "full",    S_IRUGO | S_IWUGO,           &full_fops},
925         {8, "random",  S_IRUGO | S_IWUSR,           &random_fops},
926         {9, "urandom", S_IRUGO | S_IWUSR,           &urandom_fops},
927         {11,"kmsg",    S_IRUGO | S_IWUSR,           &kmsg_fops},
928 #ifdef CONFIG_CRASH_DUMP
929         {12,"oldmem",    S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
930 #endif
931 };
932
933 static struct class *mem_class;
934
935 static int __init chr_dev_init(void)
936 {
937         int i;
938
939         if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
940                 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
941
942         mem_class = class_create(THIS_MODULE, "mem");
943         for (i = 0; i < ARRAY_SIZE(devlist); i++)
944                 class_device_create(mem_class, NULL,
945                                         MKDEV(MEM_MAJOR, devlist[i].minor),
946                                         NULL, devlist[i].name);
947         
948         return 0;
949 }
950
951 fs_initcall(chr_dev_init);