X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Fselect.c;h=00f58c5c7e05251ef4350e762481df8b07294212;hb=305787e44ebc21d87ab4d4949da5b97d4252aa9b;hp=1815a57d225585122c22dd0a357dbdced59e1c40;hpb=74910e6c7dc7471b286a883c1a7af70483ffd2ba;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/select.c b/fs/select.c index 1815a57..00f58c5 100644 --- a/fs/select.c +++ b/fs/select.c @@ -14,10 +14,10 @@ * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian). */ +#include #include #include #include -#include #include #include /* for STICKY_TIMEOUTS */ #include @@ -26,15 +26,6 @@ #include -#define ROUND_UP(x,y) (((x)+(y)-1)/(y)) -#define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM) - -struct poll_table_entry { - struct file * filp; - wait_queue_t wait; - wait_queue_head_t * wait_address; -}; - struct poll_table_page { struct poll_table_page * next; struct poll_table_entry * entry; @@ -64,13 +55,23 @@ void poll_initwait(struct poll_wqueues *pwq) init_poll_funcptr(&pwq->pt, __pollwait); pwq->error = 0; pwq->table = NULL; + pwq->inline_index = 0; } EXPORT_SYMBOL(poll_initwait); +static void free_poll_entry(struct poll_table_entry *entry) +{ + remove_wait_queue(entry->wait_address, &entry->wait); + fput(entry->filp); +} + void poll_freewait(struct poll_wqueues *pwq) { struct poll_table_page * p = pwq->table; + int i; + for (i = 0; i < pwq->inline_index; i++) + free_poll_entry(pwq->inline_entries + i); while (p) { struct poll_table_entry * entry; struct poll_table_page *old; @@ -78,8 +79,7 @@ void poll_freewait(struct poll_wqueues *pwq) entry = p->entry; do { entry--; - remove_wait_queue(entry->wait_address,&entry->wait); - fput(entry->filp); + free_poll_entry(entry); } while (entry > p->entries); old = p; p = p->next; @@ -89,12 +89,14 @@ void poll_freewait(struct poll_wqueues *pwq) EXPORT_SYMBOL(poll_freewait); -static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, - poll_table *_p) +static struct poll_table_entry *poll_get_entry(poll_table *_p) { struct poll_wqueues *p = container_of(_p, struct poll_wqueues, pt); struct poll_table_page *table = p->table; + if (p->inline_index < N_INLINE_POLL_ENTRIES) + return p->inline_entries + p->inline_index++; + if (!table || POLL_TABLE_FULL(table)) { struct poll_table_page *new_table; @@ -102,7 +104,7 @@ static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, if (!new_table) { p->error = -ENOMEM; __set_current_state(TASK_RUNNING); - return; + return NULL; } new_table->entry = new_table->entries; new_table->next = table; @@ -110,16 +112,21 @@ static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, table = new_table; } - /* Add a new entry */ - { - struct poll_table_entry * entry = table->entry; - table->entry = entry+1; - get_file(filp); - entry->filp = filp; - entry->wait_address = wait_address; - init_waitqueue_entry(&entry->wait, current); - add_wait_queue(wait_address,&entry->wait); - } + return table->entry++; +} + +/* Add a new entry */ +static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, + poll_table *p) +{ + struct poll_table_entry *entry = poll_get_entry(p); + if (!entry) + return; + get_file(filp); + entry->filp = filp; + entry->wait_address = wait_address; + init_waitqueue_entry(&entry->wait, current); + add_wait_queue(wait_address, &entry->wait); } #define FDS_IN(fds, n) (fds->in + n) @@ -170,11 +177,6 @@ get_max: return max; } -#define BIT(i) (1UL << ((i)&(__NFDBITS-1))) -#define MEM(i,m) ((m)+(unsigned)(i)/__NFDBITS) -#define ISSET(i,m) (((i)&*(m)) != 0) -#define SET(i,m) (*(m) |= (i)) - #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR) #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR) #define POLLEX_SET (POLLPRI) @@ -210,7 +212,7 @@ int do_select(int n, fd_set_bits *fds, s64 *timeout) for (i = 0; i < n; ++rinp, ++routp, ++rexp) { unsigned long in, out, ex, all_bits, bit = 1, mask, j; unsigned long res_in = 0, res_out = 0, res_ex = 0; - struct file_operations *f_op = NULL; + const struct file_operations *f_op = NULL; struct file *file = NULL; in = *inp++; out = *outp++; ex = *exp++; @@ -221,17 +223,18 @@ int do_select(int n, fd_set_bits *fds, s64 *timeout) } for (j = 0; j < __NFDBITS; ++j, ++i, bit <<= 1) { + int fput_needed; if (i >= n) break; if (!(bit & all_bits)) continue; - file = fget(i); + file = fget_light(i, &fput_needed); if (file) { f_op = file->f_op; mask = DEFAULT_POLLMASK; if (f_op && f_op->poll) mask = (*f_op->poll)(file, retval ? NULL : wait); - fput(file); + fput_light(file, fput_needed); if ((mask & POLLIN_SET) && (in & bit)) { res_in |= bit; retval++; @@ -257,7 +260,7 @@ int do_select(int n, fd_set_bits *fds, s64 *timeout) wait = NULL; if (retval || !*timeout || signal_pending(current)) break; - if(table.error) { + if (table.error) { retval = table.error; break; } @@ -284,16 +287,6 @@ int do_select(int n, fd_set_bits *fds, s64 *timeout) return retval; } -static void *select_bits_alloc(int size) -{ - return kmalloc(6 * size, GFP_KERNEL); -} - -static void select_bits_free(void *bits, int size) -{ - kfree(bits); -} - /* * We can actually return ERESTARTSYS instead of EINTR, but I'd * like to be certain this leads to no problems. So I return @@ -309,38 +302,45 @@ static int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, s64 *timeout) { fd_set_bits fds; - char *bits; - int ret, size, max_fdset; + void *bits; + int ret, max_fds; + unsigned int size; struct fdtable *fdt; + /* Allocate small arguments on the stack to save memory and be faster */ + long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; ret = -EINVAL; if (n < 0) goto out_nofds; - /* max_fdset can increase, so grab it once to avoid race */ + /* max_fds can increase, so grab it once to avoid race */ rcu_read_lock(); fdt = files_fdtable(current->files); - max_fdset = fdt->max_fdset; + max_fds = fdt->max_fds; rcu_read_unlock(); - if (n > max_fdset) - n = max_fdset; + if (n > max_fds) + n = max_fds; /* * We need 6 bitmaps (in/out/ex for both incoming and outgoing), * since we used fdset we need to allocate memory in units of * long-words. */ - ret = -ENOMEM; size = FDS_BYTES(n); - bits = select_bits_alloc(size); - if (!bits) - goto out_nofds; - fds.in = (unsigned long *) bits; - fds.out = (unsigned long *) (bits + size); - fds.ex = (unsigned long *) (bits + 2*size); - fds.res_in = (unsigned long *) (bits + 3*size); - fds.res_out = (unsigned long *) (bits + 4*size); - fds.res_ex = (unsigned long *) (bits + 5*size); + bits = stack_fds; + if (size > sizeof(stack_fds) / 6) { + /* Not enough space in on-stack array; must use kmalloc */ + ret = -ENOMEM; + bits = kmalloc(6 * size, GFP_KERNEL); + if (!bits) + goto out_nofds; + } + fds.in = bits; + fds.out = bits + size; + fds.ex = bits + 2*size; + fds.res_in = bits + 3*size; + fds.res_out = bits + 4*size; + fds.res_ex = bits + 5*size; if ((ret = get_fd_set(n, inp, fds.in)) || (ret = get_fd_set(n, outp, fds.out)) || @@ -367,7 +367,8 @@ static int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, ret = -EFAULT; out: - select_bits_free(bits, size); + if (bits != stack_fds) + kfree(bits); out_nofds: return ret; } @@ -390,7 +391,7 @@ asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp, if ((u64)tv.tv_sec >= (u64)MAX_INT64_SECONDS) timeout = -1; /* infinite */ else { - timeout = ROUND_UP(tv.tv_usec, USEC_PER_SEC/HZ); + timeout = DIV_ROUND_UP(tv.tv_usec, USEC_PER_SEC/HZ); timeout += tv.tv_sec * HZ; } } @@ -445,7 +446,7 @@ asmlinkage long sys_pselect7(int n, fd_set __user *inp, fd_set __user *outp, if ((u64)ts.tv_sec >= (u64)MAX_INT64_SECONDS) timeout = -1; /* infinite */ else { - timeout = ROUND_UP(ts.tv_nsec, NSEC_PER_SEC/HZ); + timeout = DIV_ROUND_UP(ts.tv_nsec, NSEC_PER_SEC/HZ); timeout += ts.tv_sec * HZ; } } @@ -537,36 +538,38 @@ struct poll_list { #define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd)) -static void do_pollfd(unsigned int num, struct pollfd * fdpage, - poll_table ** pwait, int *count) +/* + * Fish for pollable events on the pollfd->fd file descriptor. We're only + * interested in events matching the pollfd->events mask, and the result + * matching that mask is both recorded in pollfd->revents and returned. The + * pwait poll_table will be used by the fd-provided poll handler for waiting, + * if non-NULL. + */ +static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait) { - int i; - - for (i = 0; i < num; i++) { - int fd; - unsigned int mask; - struct pollfd *fdp; - - mask = 0; - fdp = fdpage+i; - fd = fdp->fd; - if (fd >= 0) { - struct file * file = fget(fd); - mask = POLLNVAL; - if (file != NULL) { - mask = DEFAULT_POLLMASK; - if (file->f_op && file->f_op->poll) - mask = file->f_op->poll(file, *pwait); - mask &= fdp->events | POLLERR | POLLHUP; - fput(file); - } - if (mask) { - *pwait = NULL; - (*count)++; - } + unsigned int mask; + int fd; + + mask = 0; + fd = pollfd->fd; + if (fd >= 0) { + int fput_needed; + struct file * file; + + file = fget_light(fd, &fput_needed); + mask = POLLNVAL; + if (file != NULL) { + mask = DEFAULT_POLLMASK; + if (file->f_op && file->f_op->poll) + mask = file->f_op->poll(file, pwait); + /* Mask out unneeded events. */ + mask &= pollfd->events | POLLERR | POLLHUP; + fput_light(file, fput_needed); } - fdp->revents = mask; } + pollfd->revents = mask; + + return mask; } static int do_poll(unsigned int nfds, struct poll_list *list, @@ -578,22 +581,42 @@ static int do_poll(unsigned int nfds, struct poll_list *list, /* Optimise the no-wait case */ if (!(*timeout)) pt = NULL; - + for (;;) { struct poll_list *walk; long __timeout; set_current_state(TASK_INTERRUPTIBLE); - walk = list; - while(walk != NULL) { - do_pollfd( walk->len, walk->entries, &pt, &count); - walk = walk->next; + for (walk = list; walk != NULL; walk = walk->next) { + struct pollfd * pfd, * pfd_end; + + pfd = walk->entries; + pfd_end = pfd + walk->len; + for (; pfd != pfd_end; pfd++) { + /* + * Fish for events. If we found one, record it + * and kill the poll_table, so we don't + * needlessly register any other waiters after + * this. They'll get immediately deregistered + * when we break out and return. + */ + if (do_pollfd(pfd, pt)) { + count++; + pt = NULL; + } + } } + /* + * All waiters have already been registered, so don't provide + * a poll_table to them on the next loop iteration. + */ pt = NULL; - if (count || !*timeout || signal_pending(current)) - break; - count = wait->error; - if (count) + if (!count) { + count = wait->error; + if (signal_pending(current)) + count = -EINTR; + } + if (count || !*timeout) break; if (*timeout < 0) { @@ -619,99 +642,121 @@ static int do_poll(unsigned int nfds, struct poll_list *list, return count; } +#define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \ + sizeof(struct pollfd)) + int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, s64 *timeout) { struct poll_wqueues table; - int fdcount, err; - unsigned int i; - struct poll_list *head; - struct poll_list *walk; - struct fdtable *fdt; - int max_fdset; - - /* Do a sanity check on nfds ... */ - rcu_read_lock(); - fdt = files_fdtable(current->files); - max_fdset = fdt->max_fdset; - rcu_read_unlock(); - if (nfds > max_fdset && nfds > OPEN_MAX) + int err = -EFAULT, fdcount, len, size; + /* Allocate small arguments on the stack to save memory and be + faster - use long to make sure the buffer is aligned properly + on 64 bit archs to avoid unaligned access */ + long stack_pps[POLL_STACK_ALLOC/sizeof(long)]; + struct poll_list *const head = (struct poll_list *)stack_pps; + struct poll_list *walk = head; + unsigned long todo = nfds; + + if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur) return -EINVAL; - poll_initwait(&table); + len = min_t(unsigned int, nfds, N_STACK_PPS); + for (;;) { + walk->next = NULL; + walk->len = len; + if (!len) + break; - head = NULL; - walk = NULL; - i = nfds; - err = -ENOMEM; - while(i!=0) { - struct poll_list *pp; - pp = kmalloc(sizeof(struct poll_list)+ - sizeof(struct pollfd)* - (i>POLLFD_PER_PAGE?POLLFD_PER_PAGE:i), - GFP_KERNEL); - if(pp==NULL) + if (copy_from_user(walk->entries, ufds + nfds-todo, + sizeof(struct pollfd) * walk->len)) goto out_fds; - pp->next=NULL; - pp->len = (i>POLLFD_PER_PAGE?POLLFD_PER_PAGE:i); - if (head == NULL) - head = pp; - else - walk->next = pp; - walk = pp; - if (copy_from_user(pp->entries, ufds + nfds-i, - sizeof(struct pollfd)*pp->len)) { - err = -EFAULT; + todo -= walk->len; + if (!todo) + break; + + len = min(todo, POLLFD_PER_PAGE); + size = sizeof(struct poll_list) + sizeof(struct pollfd) * len; + walk = walk->next = kmalloc(size, GFP_KERNEL); + if (!walk) { + err = -ENOMEM; goto out_fds; } - i -= pp->len; } + poll_initwait(&table); fdcount = do_poll(nfds, head, &table, timeout); + poll_freewait(&table); - /* OK, now copy the revents fields back to user space. */ - walk = head; - err = -EFAULT; - while(walk != NULL) { + for (walk = head; walk; walk = walk->next) { struct pollfd *fds = walk->entries; int j; - for (j=0; j < walk->len; j++, ufds++) { - if(__put_user(fds[j].revents, &ufds->revents)) + for (j = 0; j < walk->len; j++, ufds++) + if (__put_user(fds[j].revents, &ufds->revents)) goto out_fds; - } - walk = walk->next; } + err = fdcount; - if (!fdcount && signal_pending(current)) - err = -EINTR; out_fds: - walk = head; - while(walk!=NULL) { - struct poll_list *pp = walk->next; - kfree(walk); - walk = pp; + walk = head->next; + while (walk) { + struct poll_list *pos = walk; + walk = walk->next; + kfree(pos); } - poll_freewait(&table); + return err; } +static long do_restart_poll(struct restart_block *restart_block) +{ + struct pollfd __user *ufds = (struct pollfd __user*)restart_block->arg0; + int nfds = restart_block->arg1; + s64 timeout = ((s64)restart_block->arg3<<32) | (s64)restart_block->arg2; + int ret; + + ret = do_sys_poll(ufds, nfds, &timeout); + if (ret == -EINTR) { + restart_block->fn = do_restart_poll; + restart_block->arg2 = timeout & 0xFFFFFFFF; + restart_block->arg3 = (u64)timeout >> 32; + ret = -ERESTART_RESTARTBLOCK; + } + return ret; +} + asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds, long timeout_msecs) { - s64 timeout_jiffies = 0; + s64 timeout_jiffies; + int ret; - if (timeout_msecs) { + if (timeout_msecs > 0) { #if HZ > 1000 /* We can only overflow if HZ > 1000 */ if (timeout_msecs / 1000 > (s64)0x7fffffffffffffffULL / (s64)HZ) timeout_jiffies = -1; else #endif - timeout_jiffies = msecs_to_jiffies(timeout_msecs); + timeout_jiffies = msecs_to_jiffies(timeout_msecs) + 1; + } else { + /* Infinite (< 0) or no (0) timeout */ + timeout_jiffies = timeout_msecs; } - return do_sys_poll(ufds, nfds, &timeout_jiffies); + ret = do_sys_poll(ufds, nfds, &timeout_jiffies); + if (ret == -EINTR) { + struct restart_block *restart_block; + restart_block = ¤t_thread_info()->restart_block; + restart_block->fn = do_restart_poll; + restart_block->arg0 = (unsigned long)ufds; + restart_block->arg1 = nfds; + restart_block->arg2 = timeout_jiffies & 0xFFFFFFFF; + restart_block->arg3 = (u64)timeout_jiffies >> 32; + ret = -ERESTART_RESTARTBLOCK; + } + return ret; } #ifdef TIF_RESTORE_SIGMASK @@ -732,7 +777,7 @@ asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, if ((u64)ts.tv_sec >= (u64)MAX_INT64_SECONDS) timeout = -1; /* infinite */ else { - timeout = ROUND_UP(ts.tv_nsec, NSEC_PER_SEC/HZ); + timeout = DIV_ROUND_UP(ts.tv_nsec, NSEC_PER_SEC/HZ); timeout += ts.tv_sec * HZ; } }