Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
authorLinus Torvalds <torvalds@ppc970.osdl.org>
Wed, 22 Jun 2005 01:19:10 +0000 (18:19 -0700)
committerLinus Torvalds <torvalds@ppc970.osdl.org>
Wed, 22 Jun 2005 01:19:10 +0000 (18:19 -0700)
arch/sparc64/solaris/socket.c
include/asm-sparc64/processor.h

index ec8e074..0674058 100644 (file)
@@ -317,8 +317,10 @@ asmlinkage int solaris_sendmsg(int fd, struct sol_nmsghdr __user *user_msg, unsi
                unsigned long *kcmsg;
                compat_size_t cmlen;
 
-               if(kern_msg.msg_controllen > sizeof(ctl) &&
-                  kern_msg.msg_controllen <= 256) {
+               if (kern_msg.msg_controllen <= sizeof(compat_size_t))
+                       return -EINVAL;
+
+               if(kern_msg.msg_controllen > sizeof(ctl)) {
                        err = -ENOBUFS;
                        ctl_buf = kmalloc(kern_msg.msg_controllen, GFP_KERNEL);
                        if(!ctl_buf)
index bc1445b..d0bee24 100644 (file)
@@ -192,6 +192,40 @@ extern unsigned long get_wchan(struct task_struct *task);
 
 #define cpu_relax()    barrier()
 
+/* Prefetch support.  This is tuned for UltraSPARC-III and later.
+ * UltraSPARC-I will treat these as nops, and UltraSPARC-II has
+ * a shallower prefetch queue than later chips.
+ */
+#define ARCH_HAS_PREFETCH
+#define ARCH_HAS_PREFETCHW
+#define ARCH_HAS_SPINLOCK_PREFETCH
+
+static inline void prefetch(const void *x)
+{
+       /* We do not use the read prefetch mnemonic because that
+        * prefetches into the prefetch-cache which only is accessible
+        * by floating point operations in UltraSPARC-III and later.
+        * By contrast, "#one_write" prefetches into the L2 cache
+        * in shared state.
+        */
+       __asm__ __volatile__("prefetch [%0], #one_write"
+                            : /* no outputs */
+                            : "r" (x));
+}
+
+static inline void prefetchw(const void *x)
+{
+       /* The most optimal prefetch to use for writes is
+        * "#n_writes".  This brings the cacheline into the
+        * L2 cache in "owned" state.
+        */
+       __asm__ __volatile__("prefetch [%0], #n_writes"
+                            : /* no outputs */
+                            : "r" (x));
+}
+
+#define spin_lock_prefetch(x)  prefetchw(x)
+
 #endif /* !(__ASSEMBLY__) */
 
 #endif /* !(__ASM_SPARC64_PROCESSOR_H) */