7b309e7b6cefff2ec8fe3215049930573be88469
[safe/jmp/linux-2.6] / arch / m68k / kernel / sys_m68k.c
1 /*
2  * linux/arch/m68k/kernel/sys_m68k.c
3  *
4  * This file contains various random system calls that
5  * have a non-standard calling sequence on the Linux/m68k
6  * platform.
7  */
8
9 #include <linux/capability.h>
10 #include <linux/errno.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/fs.h>
14 #include <linux/smp.h>
15 #include <linux/smp_lock.h>
16 #include <linux/sem.h>
17 #include <linux/msg.h>
18 #include <linux/shm.h>
19 #include <linux/stat.h>
20 #include <linux/syscalls.h>
21 #include <linux/mman.h>
22 #include <linux/file.h>
23 #include <linux/ipc.h>
24
25 #include <asm/setup.h>
26 #include <asm/uaccess.h>
27 #include <asm/cachectl.h>
28 #include <asm/traps.h>
29 #include <asm/page.h>
30 #include <asm/unistd.h>
31 #include <linux/elf.h>
32 #include <asm/tlb.h>
33
34 asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
35                              unsigned long error_code);
36
37 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
38         unsigned long prot, unsigned long flags,
39         unsigned long fd, unsigned long pgoff)
40 {
41         /*
42          * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
43          * so we need to shift the argument down by 1; m68k mmap64(3)
44          * (in libc) expects the last argument of mmap2 in 4Kb units.
45          */
46         return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
47 }
48
49 /*
50  * sys_ipc() is the de-multiplexer for the SysV IPC calls..
51  *
52  * This is really horribly ugly.
53  */
54 asmlinkage int sys_ipc (uint call, int first, int second,
55                         int third, void __user *ptr, long fifth)
56 {
57         int version, ret;
58
59         version = call >> 16; /* hack for backward compatibility */
60         call &= 0xffff;
61
62         if (call <= SEMCTL)
63                 switch (call) {
64                 case SEMOP:
65                         return sys_semop (first, ptr, second);
66                 case SEMGET:
67                         return sys_semget (first, second, third);
68                 case SEMCTL: {
69                         union semun fourth;
70                         if (!ptr)
71                                 return -EINVAL;
72                         if (get_user(fourth.__pad, (void __user *__user *) ptr))
73                                 return -EFAULT;
74                         return sys_semctl (first, second, third, fourth);
75                         }
76                 default:
77                         return -ENOSYS;
78                 }
79         if (call <= MSGCTL)
80                 switch (call) {
81                 case MSGSND:
82                         return sys_msgsnd (first, ptr, second, third);
83                 case MSGRCV:
84                         switch (version) {
85                         case 0: {
86                                 struct ipc_kludge tmp;
87                                 if (!ptr)
88                                         return -EINVAL;
89                                 if (copy_from_user (&tmp, ptr, sizeof (tmp)))
90                                         return -EFAULT;
91                                 return sys_msgrcv (first, tmp.msgp, second,
92                                                    tmp.msgtyp, third);
93                                 }
94                         default:
95                                 return sys_msgrcv (first, ptr,
96                                                    second, fifth, third);
97                         }
98                 case MSGGET:
99                         return sys_msgget ((key_t) first, second);
100                 case MSGCTL:
101                         return sys_msgctl (first, second, ptr);
102                 default:
103                         return -ENOSYS;
104                 }
105         if (call <= SHMCTL)
106                 switch (call) {
107                 case SHMAT:
108                         switch (version) {
109                         default: {
110                                 ulong raddr;
111                                 ret = do_shmat (first, ptr, second, &raddr);
112                                 if (ret)
113                                         return ret;
114                                 return put_user (raddr, (ulong __user *) third);
115                         }
116                         }
117                 case SHMDT:
118                         return sys_shmdt (ptr);
119                 case SHMGET:
120                         return sys_shmget (first, second, third);
121                 case SHMCTL:
122                         return sys_shmctl (first, second, ptr);
123                 default:
124                         return -ENOSYS;
125                 }
126
127         return -EINVAL;
128 }
129
130 /* Convert virtual (user) address VADDR to physical address PADDR */
131 #define virt_to_phys_040(vaddr)                                         \
132 ({                                                                      \
133   unsigned long _mmusr, _paddr;                                         \
134                                                                         \
135   __asm__ __volatile__ (".chip 68040\n\t"                               \
136                         "ptestr (%1)\n\t"                               \
137                         "movec %%mmusr,%0\n\t"                          \
138                         ".chip 68k"                                     \
139                         : "=r" (_mmusr)                                 \
140                         : "a" (vaddr));                                 \
141   _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0;             \
142   _paddr;                                                               \
143 })
144
145 static inline int
146 cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
147 {
148   unsigned long paddr, i;
149
150   switch (scope)
151     {
152     case FLUSH_SCOPE_ALL:
153       switch (cache)
154         {
155         case FLUSH_CACHE_DATA:
156           /* This nop is needed for some broken versions of the 68040.  */
157           __asm__ __volatile__ ("nop\n\t"
158                                 ".chip 68040\n\t"
159                                 "cpusha %dc\n\t"
160                                 ".chip 68k");
161           break;
162         case FLUSH_CACHE_INSN:
163           __asm__ __volatile__ ("nop\n\t"
164                                 ".chip 68040\n\t"
165                                 "cpusha %ic\n\t"
166                                 ".chip 68k");
167           break;
168         default:
169         case FLUSH_CACHE_BOTH:
170           __asm__ __volatile__ ("nop\n\t"
171                                 ".chip 68040\n\t"
172                                 "cpusha %bc\n\t"
173                                 ".chip 68k");
174           break;
175         }
176       break;
177
178     case FLUSH_SCOPE_LINE:
179       /* Find the physical address of the first mapped page in the
180          address range.  */
181       if ((paddr = virt_to_phys_040(addr))) {
182         paddr += addr & ~(PAGE_MASK | 15);
183         len = (len + (addr & 15) + 15) >> 4;
184       } else {
185         unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
186
187         if (len <= tmp)
188           return 0;
189         addr += tmp;
190         len -= tmp;
191         tmp = PAGE_SIZE;
192         for (;;)
193           {
194             if ((paddr = virt_to_phys_040(addr)))
195               break;
196             if (len <= tmp)
197               return 0;
198             addr += tmp;
199             len -= tmp;
200           }
201         len = (len + 15) >> 4;
202       }
203       i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
204       while (len--)
205         {
206           switch (cache)
207             {
208             case FLUSH_CACHE_DATA:
209               __asm__ __volatile__ ("nop\n\t"
210                                     ".chip 68040\n\t"
211                                     "cpushl %%dc,(%0)\n\t"
212                                     ".chip 68k"
213                                     : : "a" (paddr));
214               break;
215             case FLUSH_CACHE_INSN:
216               __asm__ __volatile__ ("nop\n\t"
217                                     ".chip 68040\n\t"
218                                     "cpushl %%ic,(%0)\n\t"
219                                     ".chip 68k"
220                                     : : "a" (paddr));
221               break;
222             default:
223             case FLUSH_CACHE_BOTH:
224               __asm__ __volatile__ ("nop\n\t"
225                                     ".chip 68040\n\t"
226                                     "cpushl %%bc,(%0)\n\t"
227                                     ".chip 68k"
228                                     : : "a" (paddr));
229               break;
230             }
231           if (!--i && len)
232             {
233               /*
234                * No need to page align here since it is done by
235                * virt_to_phys_040().
236                */
237               addr += PAGE_SIZE;
238               i = PAGE_SIZE / 16;
239               /* Recompute physical address when crossing a page
240                  boundary. */
241               for (;;)
242                 {
243                   if ((paddr = virt_to_phys_040(addr)))
244                     break;
245                   if (len <= i)
246                     return 0;
247                   len -= i;
248                   addr += PAGE_SIZE;
249                 }
250             }
251           else
252             paddr += 16;
253         }
254       break;
255
256     default:
257     case FLUSH_SCOPE_PAGE:
258       len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
259       for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
260         {
261           if (!(paddr = virt_to_phys_040(addr)))
262             continue;
263           switch (cache)
264             {
265             case FLUSH_CACHE_DATA:
266               __asm__ __volatile__ ("nop\n\t"
267                                     ".chip 68040\n\t"
268                                     "cpushp %%dc,(%0)\n\t"
269                                     ".chip 68k"
270                                     : : "a" (paddr));
271               break;
272             case FLUSH_CACHE_INSN:
273               __asm__ __volatile__ ("nop\n\t"
274                                     ".chip 68040\n\t"
275                                     "cpushp %%ic,(%0)\n\t"
276                                     ".chip 68k"
277                                     : : "a" (paddr));
278               break;
279             default:
280             case FLUSH_CACHE_BOTH:
281               __asm__ __volatile__ ("nop\n\t"
282                                     ".chip 68040\n\t"
283                                     "cpushp %%bc,(%0)\n\t"
284                                     ".chip 68k"
285                                     : : "a" (paddr));
286               break;
287             }
288         }
289       break;
290     }
291   return 0;
292 }
293
294 #define virt_to_phys_060(vaddr)                         \
295 ({                                                      \
296   unsigned long paddr;                                  \
297   __asm__ __volatile__ (".chip 68060\n\t"               \
298                         "plpar (%0)\n\t"                \
299                         ".chip 68k"                     \
300                         : "=a" (paddr)                  \
301                         : "0" (vaddr));                 \
302   (paddr); /* XXX */                                    \
303 })
304
305 static inline int
306 cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
307 {
308   unsigned long paddr, i;
309
310   /*
311    * 68060 manual says:
312    *  cpush %dc : flush DC, remains valid (with our %cacr setup)
313    *  cpush %ic : invalidate IC
314    *  cpush %bc : flush DC + invalidate IC
315    */
316   switch (scope)
317     {
318     case FLUSH_SCOPE_ALL:
319       switch (cache)
320         {
321         case FLUSH_CACHE_DATA:
322           __asm__ __volatile__ (".chip 68060\n\t"
323                                 "cpusha %dc\n\t"
324                                 ".chip 68k");
325           break;
326         case FLUSH_CACHE_INSN:
327           __asm__ __volatile__ (".chip 68060\n\t"
328                                 "cpusha %ic\n\t"
329                                 ".chip 68k");
330           break;
331         default:
332         case FLUSH_CACHE_BOTH:
333           __asm__ __volatile__ (".chip 68060\n\t"
334                                 "cpusha %bc\n\t"
335                                 ".chip 68k");
336           break;
337         }
338       break;
339
340     case FLUSH_SCOPE_LINE:
341       /* Find the physical address of the first mapped page in the
342          address range.  */
343       len += addr & 15;
344       addr &= -16;
345       if (!(paddr = virt_to_phys_060(addr))) {
346         unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
347
348         if (len <= tmp)
349           return 0;
350         addr += tmp;
351         len -= tmp;
352         tmp = PAGE_SIZE;
353         for (;;)
354           {
355             if ((paddr = virt_to_phys_060(addr)))
356               break;
357             if (len <= tmp)
358               return 0;
359             addr += tmp;
360             len -= tmp;
361           }
362       }
363       len = (len + 15) >> 4;
364       i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
365       while (len--)
366         {
367           switch (cache)
368             {
369             case FLUSH_CACHE_DATA:
370               __asm__ __volatile__ (".chip 68060\n\t"
371                                     "cpushl %%dc,(%0)\n\t"
372                                     ".chip 68k"
373                                     : : "a" (paddr));
374               break;
375             case FLUSH_CACHE_INSN:
376               __asm__ __volatile__ (".chip 68060\n\t"
377                                     "cpushl %%ic,(%0)\n\t"
378                                     ".chip 68k"
379                                     : : "a" (paddr));
380               break;
381             default:
382             case FLUSH_CACHE_BOTH:
383               __asm__ __volatile__ (".chip 68060\n\t"
384                                     "cpushl %%bc,(%0)\n\t"
385                                     ".chip 68k"
386                                     : : "a" (paddr));
387               break;
388             }
389           if (!--i && len)
390             {
391
392               /*
393                * We just want to jump to the first cache line
394                * in the next page.
395                */
396               addr += PAGE_SIZE;
397               addr &= PAGE_MASK;
398
399               i = PAGE_SIZE / 16;
400               /* Recompute physical address when crossing a page
401                  boundary. */
402               for (;;)
403                 {
404                   if ((paddr = virt_to_phys_060(addr)))
405                     break;
406                   if (len <= i)
407                     return 0;
408                   len -= i;
409                   addr += PAGE_SIZE;
410                 }
411             }
412           else
413             paddr += 16;
414         }
415       break;
416
417     default:
418     case FLUSH_SCOPE_PAGE:
419       len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
420       addr &= PAGE_MASK;        /* Workaround for bug in some
421                                    revisions of the 68060 */
422       for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
423         {
424           if (!(paddr = virt_to_phys_060(addr)))
425             continue;
426           switch (cache)
427             {
428             case FLUSH_CACHE_DATA:
429               __asm__ __volatile__ (".chip 68060\n\t"
430                                     "cpushp %%dc,(%0)\n\t"
431                                     ".chip 68k"
432                                     : : "a" (paddr));
433               break;
434             case FLUSH_CACHE_INSN:
435               __asm__ __volatile__ (".chip 68060\n\t"
436                                     "cpushp %%ic,(%0)\n\t"
437                                     ".chip 68k"
438                                     : : "a" (paddr));
439               break;
440             default:
441             case FLUSH_CACHE_BOTH:
442               __asm__ __volatile__ (".chip 68060\n\t"
443                                     "cpushp %%bc,(%0)\n\t"
444                                     ".chip 68k"
445                                     : : "a" (paddr));
446               break;
447             }
448         }
449       break;
450     }
451   return 0;
452 }
453
454 /* sys_cacheflush -- flush (part of) the processor cache.  */
455 asmlinkage int
456 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
457 {
458         struct vm_area_struct *vma;
459         int ret = -EINVAL;
460
461         lock_kernel();
462         if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
463             cache & ~FLUSH_CACHE_BOTH)
464                 goto out;
465
466         if (scope == FLUSH_SCOPE_ALL) {
467                 /* Only the superuser may explicitly flush the whole cache. */
468                 ret = -EPERM;
469                 if (!capable(CAP_SYS_ADMIN))
470                         goto out;
471         } else {
472                 /*
473                  * Verify that the specified address region actually belongs
474                  * to this process.
475                  */
476                 vma = find_vma (current->mm, addr);
477                 ret = -EINVAL;
478                 /* Check for overflow.  */
479                 if (addr + len < addr)
480                         goto out;
481                 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
482                         goto out;
483         }
484
485         if (CPU_IS_020_OR_030) {
486                 if (scope == FLUSH_SCOPE_LINE && len < 256) {
487                         unsigned long cacr;
488                         __asm__ ("movec %%cacr, %0" : "=r" (cacr));
489                         if (cache & FLUSH_CACHE_INSN)
490                                 cacr |= 4;
491                         if (cache & FLUSH_CACHE_DATA)
492                                 cacr |= 0x400;
493                         len >>= 2;
494                         while (len--) {
495                                 __asm__ __volatile__ ("movec %1, %%caar\n\t"
496                                                       "movec %0, %%cacr"
497                                                       : /* no outputs */
498                                                       : "r" (cacr), "r" (addr));
499                                 addr += 4;
500                         }
501                 } else {
502                         /* Flush the whole cache, even if page granularity requested. */
503                         unsigned long cacr;
504                         __asm__ ("movec %%cacr, %0" : "=r" (cacr));
505                         if (cache & FLUSH_CACHE_INSN)
506                                 cacr |= 8;
507                         if (cache & FLUSH_CACHE_DATA)
508                                 cacr |= 0x800;
509                         __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
510                 }
511                 ret = 0;
512                 goto out;
513         } else {
514             /*
515              * 040 or 060: don't blindly trust 'scope', someone could
516              * try to flush a few megs of memory.
517              */
518
519             if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
520                 scope=FLUSH_SCOPE_PAGE;
521             if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
522                 scope=FLUSH_SCOPE_ALL;
523             if (CPU_IS_040) {
524                 ret = cache_flush_040 (addr, scope, cache, len);
525             } else if (CPU_IS_060) {
526                 ret = cache_flush_060 (addr, scope, cache, len);
527             }
528         }
529 out:
530         unlock_kernel();
531         return ret;
532 }
533
534 asmlinkage int sys_getpagesize(void)
535 {
536         return PAGE_SIZE;
537 }
538
539 /*
540  * Do a system call from kernel instead of calling sys_execve so we
541  * end up with proper pt_regs.
542  */
543 int kernel_execve(const char *filename, char *const argv[], char *const envp[])
544 {
545         register long __res asm ("%d0") = __NR_execve;
546         register long __a asm ("%d1") = (long)(filename);
547         register long __b asm ("%d2") = (long)(argv);
548         register long __c asm ("%d3") = (long)(envp);
549         asm volatile ("trap  #0" : "+d" (__res)
550                         : "d" (__a), "d" (__b), "d" (__c));
551         return __res;
552 }
553
554 asmlinkage unsigned long sys_get_thread_area(void)
555 {
556         return current_thread_info()->tp_value;
557 }
558
559 asmlinkage int sys_set_thread_area(unsigned long tp)
560 {
561         current_thread_info()->tp_value = tp;
562         return 0;
563 }
564
565 /* This syscall gets its arguments in A0 (mem), D2 (oldval) and
566    D1 (newval).  */
567 asmlinkage int
568 sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
569                       unsigned long __user * mem)
570 {
571         /* This was borrowed from ARM's implementation.  */
572         for (;;) {
573                 struct mm_struct *mm = current->mm;
574                 pgd_t *pgd;
575                 pmd_t *pmd;
576                 pte_t *pte;
577                 spinlock_t *ptl;
578                 unsigned long mem_value;
579
580                 down_read(&mm->mmap_sem);
581                 pgd = pgd_offset(mm, (unsigned long)mem);
582                 if (!pgd_present(*pgd))
583                         goto bad_access;
584                 pmd = pmd_offset(pgd, (unsigned long)mem);
585                 if (!pmd_present(*pmd))
586                         goto bad_access;
587                 pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
588                 if (!pte_present(*pte) || !pte_dirty(*pte)
589                     || !pte_write(*pte)) {
590                         pte_unmap_unlock(pte, ptl);
591                         goto bad_access;
592                 }
593
594                 mem_value = *mem;
595                 if (mem_value == oldval)
596                         *mem = newval;
597
598                 pte_unmap_unlock(pte, ptl);
599                 up_read(&mm->mmap_sem);
600                 return mem_value;
601
602               bad_access:
603                 up_read(&mm->mmap_sem);
604                 /* This is not necessarily a bad access, we can get here if
605                    a memory we're trying to write to should be copied-on-write.
606                    Make the kernel do the necessary page stuff, then re-iterate.
607                    Simulate a write access fault to do that.  */
608                 {
609                         /* The first argument of the function corresponds to
610                            D1, which is the first field of struct pt_regs.  */
611                         struct pt_regs *fp = (struct pt_regs *)&newval;
612
613                         /* '3' is an RMW flag.  */
614                         if (do_page_fault(fp, (unsigned long)mem, 3))
615                                 /* If the do_page_fault() failed, we don't
616                                    have anything meaningful to return.
617                                    There should be a SIGSEGV pending for
618                                    the process.  */
619                                 return 0xdeadbeef;
620                 }
621         }
622 }
623
624 asmlinkage int sys_atomic_barrier(void)
625 {
626         /* no code needed for uniprocs */
627         return 0;
628 }