mirror of
				https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
				synced 2025-10-31 16:38:31 +00:00 
			
		
		
		
	nfs: prevent backlogging of renewd requests
If the renewd send queue gets backlogged (e.g., if the server goes down), we will keep filling the queue with periodic RENEW/SEQUENCE requests. This patch schedules a new renewd request if and only if the previous one returns (either success or failure) Signed-off-by: Alexandros Batsakis <batsakis@netapp.com> [Trond.Myklebust@netapp.com: moved nfs4_schedule_state_renewal() into separate nfs4_renew_release() and nfs41_sequence_release() callbacks to ensure correct behaviour on call setup failure] Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
This commit is contained in:
		
							parent
							
								
									888ef2e3f8
								
							
						
					
					
						commit
						dc96aef96a
					
				| @ -3147,10 +3147,17 @@ static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_messa | ||||
|  * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special | ||||
|  * standalone procedure for queueing an asynchronous RENEW. | ||||
|  */ | ||||
| static void nfs4_renew_release(void *data) | ||||
| { | ||||
| 	struct nfs_client *clp = data; | ||||
| 
 | ||||
| 	nfs4_schedule_state_renewal(clp); | ||||
| } | ||||
| 
 | ||||
| static void nfs4_renew_done(struct rpc_task *task, void *data) | ||||
| { | ||||
| 	struct nfs_client *clp = (struct nfs_client *)task->tk_msg.rpc_argp; | ||||
| 	unsigned long timestamp = (unsigned long)data; | ||||
| 	struct nfs_client *clp = data; | ||||
| 	unsigned long timestamp = task->tk_start; | ||||
| 
 | ||||
| 	if (task->tk_status < 0) { | ||||
| 		/* Unless we're shutting down, schedule state recovery! */ | ||||
| @ -3166,6 +3173,7 @@ static void nfs4_renew_done(struct rpc_task *task, void *data) | ||||
| 
 | ||||
| static const struct rpc_call_ops nfs4_renew_ops = { | ||||
| 	.rpc_call_done = nfs4_renew_done, | ||||
| 	.rpc_release = nfs4_renew_release, | ||||
| }; | ||||
| 
 | ||||
| int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred) | ||||
| @ -3177,7 +3185,7 @@ int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred) | ||||
| 	}; | ||||
| 
 | ||||
| 	return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT, | ||||
| 			&nfs4_renew_ops, (void *)jiffies); | ||||
| 			&nfs4_renew_ops, clp); | ||||
| } | ||||
| 
 | ||||
| int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) | ||||
| @ -5023,7 +5031,14 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) | ||||
| 				       &res, args.sa_cache_this, 1); | ||||
| } | ||||
| 
 | ||||
| void nfs41_sequence_call_done(struct rpc_task *task, void *data) | ||||
| static void nfs41_sequence_release(void *data) | ||||
| { | ||||
| 	struct nfs_client *clp = (struct nfs_client *)data; | ||||
| 
 | ||||
| 	nfs4_schedule_state_renewal(clp); | ||||
| } | ||||
| 
 | ||||
| static void nfs41_sequence_call_done(struct rpc_task *task, void *data) | ||||
| { | ||||
| 	struct nfs_client *clp = (struct nfs_client *)data; | ||||
| 
 | ||||
| @ -5064,6 +5079,7 @@ static void nfs41_sequence_prepare(struct rpc_task *task, void *data) | ||||
| static const struct rpc_call_ops nfs41_sequence_ops = { | ||||
| 	.rpc_call_done = nfs41_sequence_call_done, | ||||
| 	.rpc_call_prepare = nfs41_sequence_prepare, | ||||
| 	.rpc_release = nfs41_sequence_release, | ||||
| }; | ||||
| 
 | ||||
| static int nfs41_proc_async_sequence(struct nfs_client *clp, | ||||
|  | ||||
| @ -36,11 +36,6 @@ | ||||
|  * as an rpc_task, not a real kernel thread, so it always runs in rpciod's | ||||
|  * context.  There is one renewd per nfs_server. | ||||
|  * | ||||
|  * TODO: If the send queue gets backlogged (e.g., if the server goes down), | ||||
|  * we will keep filling the queue with periodic RENEW requests.  We need a | ||||
|  * mechanism for ensuring that if renewd successfully sends off a request, | ||||
|  * then it only wakes up when the request is finished.  Maybe use the | ||||
|  * child task framework of the RPC layer? | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/mm.h> | ||||
| @ -63,7 +58,7 @@ nfs4_renew_state(struct work_struct *work) | ||||
| 	struct nfs_client *clp = | ||||
| 		container_of(work, struct nfs_client, cl_renewd.work); | ||||
| 	struct rpc_cred *cred; | ||||
| 	long lease, timeout; | ||||
| 	long lease; | ||||
| 	unsigned long last, now; | ||||
| 
 | ||||
| 	ops = nfs4_state_renewal_ops[clp->cl_minorversion]; | ||||
| @ -75,7 +70,6 @@ nfs4_renew_state(struct work_struct *work) | ||||
| 	lease = clp->cl_lease_time; | ||||
| 	last = clp->cl_last_renewal; | ||||
| 	now = jiffies; | ||||
| 	timeout = (2 * lease) / 3 + (long)last - (long)now; | ||||
| 	/* Are we close to a lease timeout? */ | ||||
| 	if (time_after(now, last + lease/3)) { | ||||
| 		cred = ops->get_state_renewal_cred_locked(clp); | ||||
| @ -90,19 +84,15 @@ nfs4_renew_state(struct work_struct *work) | ||||
| 			/* Queue an asynchronous RENEW. */ | ||||
| 			ops->sched_state_renewal(clp, cred); | ||||
| 			put_rpccred(cred); | ||||
| 			goto out_exp; | ||||
| 		} | ||||
| 		timeout = (2 * lease) / 3; | ||||
| 		spin_lock(&clp->cl_lock); | ||||
| 	} else | ||||
| 	} else { | ||||
| 		dprintk("%s: failed to call renewd. Reason: lease not expired \n", | ||||
| 				__func__); | ||||
| 	if (timeout < 5 * HZ)    /* safeguard */ | ||||
| 		timeout = 5 * HZ; | ||||
| 	dprintk("%s: requeueing work. Lease period = %ld\n", | ||||
| 			__func__, (timeout + HZ - 1) / HZ); | ||||
| 	cancel_delayed_work(&clp->cl_renewd); | ||||
| 	schedule_delayed_work(&clp->cl_renewd, timeout); | ||||
| 	spin_unlock(&clp->cl_lock); | ||||
| 		spin_unlock(&clp->cl_lock); | ||||
| 	} | ||||
| 	nfs4_schedule_state_renewal(clp); | ||||
| out_exp: | ||||
| 	nfs_expire_unreferenced_delegations(clp); | ||||
| out: | ||||
| 	dprintk("%s: done\n", __func__); | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user
	 Alexandros Batsakis
						Alexandros Batsakis