mirror of
https://git.proxmox.com/git/mirror_frr
synced 2025-07-27 08:32:12 +00:00
lib: mgmtd: only clear pending for the in-progress command
The lock/unlocks are being done short-circuit so they are never pending; however, the handling of the unlock notification was always resuming the command if pending was set. In all cases pending is set for another command. For example implicit commit locks then when notified its done unlocks which was clearing the set-config pending flag and resuming that command incorrectly. Signed-off-by: Christian Hopps <chopps@labn.net>
This commit is contained in:
parent
71b0e94261
commit
28c145ebd2
@ -629,6 +629,11 @@ uint mgmt_fe_client_session_count(struct mgmt_fe_client *client)
|
||||
return mgmt_sessions_count(&client->sessions);
|
||||
}
|
||||
|
||||
bool mgmt_fe_client_current_msg_short_circuit(struct mgmt_fe_client *client)
|
||||
{
|
||||
return client->client.conn.is_short_circuit;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a new Session for a Frontend Client connection.
|
||||
*/
|
||||
|
@ -373,6 +373,12 @@ extern void mgmt_fe_client_destroy(struct mgmt_fe_client *client);
|
||||
*/
|
||||
extern uint mgmt_fe_client_session_count(struct mgmt_fe_client *client);
|
||||
|
||||
/*
|
||||
* True if the current handled message is being short-circuited
|
||||
*/
|
||||
extern bool
|
||||
mgmt_fe_client_current_msg_short_circuit(struct mgmt_fe_client *client);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@ -3524,6 +3524,7 @@ static void vty_mgmt_ds_lock_notified(struct mgmt_fe_client *client,
|
||||
char *errmsg_if_any)
|
||||
{
|
||||
struct vty *vty;
|
||||
bool is_short_circuit = mgmt_fe_client_current_msg_short_circuit(client);
|
||||
|
||||
vty = (struct vty *)session_ctx;
|
||||
|
||||
@ -3540,8 +3541,10 @@ static void vty_mgmt_ds_lock_notified(struct mgmt_fe_client *client,
|
||||
vty->mgmt_locked_running_ds = lock_ds;
|
||||
}
|
||||
|
||||
if (vty->mgmt_req_pending_cmd)
|
||||
if (!is_short_circuit && vty->mgmt_req_pending_cmd) {
|
||||
assert(!strcmp(vty->mgmt_req_pending_cmd, "MESSAGE_LOCKDS_REQ"));
|
||||
vty_mgmt_resume_response(vty, success);
|
||||
}
|
||||
}
|
||||
|
||||
static void vty_mgmt_set_config_result_notified(
|
||||
@ -3734,6 +3737,7 @@ int vty_mgmt_send_config_data(struct vty *vty, bool implicit_commit)
|
||||
} else if (vty_mgmt_lock_running_inline(vty)) {
|
||||
vty_out(vty,
|
||||
"%% command failed, could not lock running DS\n");
|
||||
vty_mgmt_unlock_candidate_inline(vty);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
@ -593,9 +593,8 @@ mgmt_fe_session_handle_lockds_req_msg(struct mgmt_fe_session_ctx *session,
|
||||
}
|
||||
|
||||
if (lockds_req->lock) {
|
||||
if (mgmt_fe_session_write_lock_ds(lockds_req->ds_id,
|
||||
ds_ctx, session)
|
||||
!= 0) {
|
||||
if (mgmt_fe_session_write_lock_ds(lockds_req->ds_id, ds_ctx,
|
||||
session)) {
|
||||
fe_adapter_send_lockds_reply(
|
||||
session, lockds_req->ds_id, lockds_req->req_id,
|
||||
lockds_req->lock, false,
|
||||
|
@ -2356,6 +2356,7 @@ static int vtysh_exit(struct vty *vty)
|
||||
if (vty->node == CONFIG_NODE) {
|
||||
/* resync in case one of the daemons is somewhere else */
|
||||
vtysh_execute("end");
|
||||
/* NOTE: a rather expensive thing to do, can we avoid it? */
|
||||
vtysh_execute("configure terminal file-lock");
|
||||
}
|
||||
return CMD_SUCCESS;
|
||||
|
Loading…
Reference in New Issue
Block a user