X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Fnfs%2Fnfs4renewd.c;h=d87f10327b72a8bdd8ea0e23f4050685b2f12a7a;hb=bef5bc2464517cbbf8f85f09b5ade46904afec9a;hp=823298561c0a9a7904695ce554899165d26f7403;hpb=65f27f38446e1976cc98fd3004b110fedcddd189;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index 8232985..d87f103 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c @@ -36,15 +36,8 @@ * as an rpc_task, not a real kernel thread, so it always runs in rpciod's * context. There is one renewd per nfs_server. * - * TODO: If the send queue gets backlogged (e.g., if the server goes down), - * we will keep filling the queue with periodic RENEW requests. We need a - * mechanism for ensuring that if renewd successfully sends off a request, - * then it only wakes up when the request is finished. Maybe use the - * child task framework of the RPC layer? */ -#include -#include #include #include #include @@ -61,14 +54,15 @@ void nfs4_renew_state(struct work_struct *work) { + struct nfs4_state_maintenance_ops *ops; struct nfs_client *clp = container_of(work, struct nfs_client, cl_renewd.work); struct rpc_cred *cred; - long lease, timeout; + long lease; unsigned long last, now; - down_read(&clp->cl_sem); - dprintk("%s: start\n", __FUNCTION__); + ops = nfs4_state_renewal_ops[clp->cl_minorversion]; + dprintk("%s: start\n", __func__); /* Are there any active superblocks? */ if (list_empty(&clp->cl_superblocks)) goto out; @@ -76,38 +70,34 @@ nfs4_renew_state(struct work_struct *work) lease = clp->cl_lease_time; last = clp->cl_last_renewal; now = jiffies; - timeout = (2 * lease) / 3 + (long)last - (long)now; /* Are we close to a lease timeout? */ if (time_after(now, last + lease/3)) { - cred = nfs4_get_renew_cred(clp); + cred = ops->get_state_renewal_cred_locked(clp); + spin_unlock(&clp->cl_lock); if (cred == NULL) { - set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); - spin_unlock(&clp->cl_lock); + if (list_empty(&clp->cl_delegations)) { + set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); + goto out; + } nfs_expire_all_delegations(clp); - goto out; + } else { + /* Queue an asynchronous RENEW. */ + ops->sched_state_renewal(clp, cred); + put_rpccred(cred); + goto out_exp; } - spin_unlock(&clp->cl_lock); - /* Queue an asynchronous RENEW. */ - nfs4_proc_async_renew(clp, cred); - put_rpccred(cred); - timeout = (2 * lease) / 3; - spin_lock(&clp->cl_lock); - } else + } else { dprintk("%s: failed to call renewd. Reason: lease not expired \n", - __FUNCTION__); - if (timeout < 5 * HZ) /* safeguard */ - timeout = 5 * HZ; - dprintk("%s: requeueing work. Lease period = %ld\n", - __FUNCTION__, (timeout + HZ - 1) / HZ); - cancel_delayed_work(&clp->cl_renewd); - schedule_delayed_work(&clp->cl_renewd, timeout); - spin_unlock(&clp->cl_lock); + __func__); + spin_unlock(&clp->cl_lock); + } + nfs4_schedule_state_renewal(clp); +out_exp: + nfs_expire_unreferenced_delegations(clp); out: - up_read(&clp->cl_sem); - dprintk("%s: done\n", __FUNCTION__); + dprintk("%s: done\n", __func__); } -/* Must be called with clp->cl_sem locked for writes */ void nfs4_schedule_state_renewal(struct nfs_client *clp) { @@ -119,7 +109,7 @@ nfs4_schedule_state_renewal(struct nfs_client *clp) if (timeout < 5 * HZ) timeout = 5 * HZ; dprintk("%s: requeueing work. Lease period = %ld\n", - __FUNCTION__, (timeout + HZ - 1) / HZ); + __func__, (timeout + HZ - 1) / HZ); cancel_delayed_work(&clp->cl_renewd); schedule_delayed_work(&clp->cl_renewd, timeout); set_bit(NFS_CS_RENEWD, &clp->cl_res_state); @@ -127,18 +117,9 @@ nfs4_schedule_state_renewal(struct nfs_client *clp) } void -nfs4_renewd_prepare_shutdown(struct nfs_server *server) -{ - flush_scheduled_work(); -} - -void nfs4_kill_renewd(struct nfs_client *clp) { - down_read(&clp->cl_sem); - cancel_delayed_work(&clp->cl_renewd); - up_read(&clp->cl_sem); - flush_scheduled_work(); + cancel_delayed_work_sync(&clp->cl_renewd); } /*