diff --git a/contrib/init/lightningd.service b/contrib/init/lightningd.service index 8e46096b6497..60f76b045499 100644 --- a/contrib/init/lightningd.service +++ b/contrib/init/lightningd.service @@ -16,6 +16,10 @@ After=network-online.target [Service] ExecStart=/usr/bin/lightningd --conf /etc/lightningd/lightningd.conf --pid-file=/run/lightningd/lightningd.pid +# graceful was introduced in v26.06: before that, simply stop. +ExecStop=/usr/bin/lightning-cli --conf /etc/lightningd/lightningd.conf graceful 20 +ExecStop=/usr/bin/lightning-cli --conf /etc/lightningd/lightningd.conf stop +TimeoutStopSec=60 # Creates /run/lightningd owned by bitcoin RuntimeDirectory=lightningd diff --git a/contrib/msggen/msggen/schema.json b/contrib/msggen/msggen/schema.json index 731545456bf3..beffe2debfdd 100644 --- a/contrib/msggen/msggen/schema.json +++ b/contrib/msggen/msggen/schema.json @@ -15941,6 +15941,72 @@ } ] }, + "graceful.json": { + "$schema": "../rpc-schema-draft.json", + "type": "object", + "rpc": "graceful", + "added": "v26.06", + "title": "Command to prepare Core Lightning node for stopping.", + "description": [ + "**graceful** is a RPC command to prevent further htlcs, and disconnect all idle peers. It returns when all HTLCs are complete, and all peers disconnected: then you can shutdown. It also sends notifications about HTLC expiry, so you can judge how long it is safe to be offline.", + "With a timeout, it always returns after that many seconds: if any peer connections or HTLCs are still pending, those are returned. An empty response means nothing is pending" + ], + "request": { + "required": [], + "additionalProperties": false, + "properties": { + "timeout": { + "type": "u32", + "description": [ + "If set, the graceful command will return after this time even if not all HTLCs have terminated. Useful for scripting, where you may want to follow with `stop`." + ] + } + } + }, + "response": { + "required": [], + "additionalProperties": false, + "properties": { + "pending_htlc_expiries": { + "type": "array", + "items": { + "type": "u32" + }, + "description": [ + "The (sorted) expiry blockheights of all HTLCs which are not resolved" + ] + }, + "pending_peers": { + "type": "array", + "items": { + "type": "pubkey" + }, + "description": [ + "Any peers still connected (presumably because they have outstanding HTLCs)" + ] + } + } + }, + "author": [ + "Rusty Russell [rusty@rustcorp.com.au](mailto:rusty@rustcorp.com.au) is mainly responsible." + ], + "see_also": [ + "lightning-stop(7)" + ], + "resources": [ + "Main web site: [https://github.com/ElementsProject/lightning](https://github.com/ElementsProject/lightning)" + ], + "examples": [ + { + "request": { + "id": "example:graceful#1", + "method": "graceful", + "params": {} + }, + "response": {} + } + ] + }, "help.json": { "$schema": "../rpc-schema-draft.json", "type": "object", @@ -34981,6 +35047,9 @@ "Vincenzo Palazzo [vincenzo.palazzo@protonmail.com](mailto:vincenzo.palazzo@protonmail.com) wrote the initial version of this man page,", "but many others did the hard work of actually implementing this rpc command." ], + "see_also": [ + "lightning-graceful(7)" + ], "resources": [ "Main web site: [https://github.com/ElementsProject/lightning](https://github.com/ElementsProject/lightning)" ], diff --git a/doc/Makefile b/doc/Makefile index c430de68c693..2839fcf1a155 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -76,6 +76,7 @@ MARKDOWNPAGES := doc/addgossip.7 \ doc/getlog.7 \ doc/getroute.7 \ doc/getroutes.7 \ + doc/graceful.7 \ doc/help.7 \ doc/injectonionmessage.7 \ doc/injectpaymentonion.7 \ diff --git a/doc/index.rst b/doc/index.rst index 6ecc0a945bdb..77d06f211e1f 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -84,6 +84,7 @@ Core Lightning Documentation getlog getroute getroutes + graceful help hook-commitment_revocation hook-custommsg diff --git a/doc/schemas/graceful.json b/doc/schemas/graceful.json new file mode 100644 index 000000000000..9ff2815e96e5 --- /dev/null +++ b/doc/schemas/graceful.json @@ -0,0 +1,68 @@ +{ + "$schema": "../rpc-schema-draft.json", + "type": "object", + "rpc": "graceful", + "added": "v26.06", + "title": "Command to prepare Core Lightning node for stopping.", + "description": [ + "**graceful** is a RPC command to prevent further htlcs, and disconnect all idle peers. It returns when all HTLCs are complete, and all peers disconnected: then you can shutdown. It also sends notifications about HTLC expiry, so you can judge how long it is safe to be offline.", + "With a timeout, it always returns after that many seconds: if any peer connections or HTLCs are still pending, those are returned. An empty response means nothing is pending" + ], + "request": { + "required": [], + "additionalProperties": false, + "properties": { + "timeout": { + "type": "u32", + "description": [ + "If set, the graceful command will return after this time even if not all HTLCs have terminated. Useful for scripting, where you may want to follow with `stop`." + ] + } + } + }, + "response": { + "required": [ + ], + "additionalProperties": false, + "properties": { + "pending_htlc_expiries": { + "type": "array", + "items": { + "type": "u32" + }, + "description": [ + "The (sorted) expiry blockheights of all HTLCs which are not resolved" + ] + }, + "pending_peers": { + "type": "array", + "items": { + "type": "pubkey" + }, + "description": [ + "Any peers still connected (presumably because they have outstanding HTLCs)" + ] + } + } + }, + "author": [ + "Rusty Russell [rusty@rustcorp.com.au](mailto:rusty@rustcorp.com.au) is mainly responsible." + ], + "see_also": [ + "lightning-stop(7)" + ], + "resources": [ + "Main web site: [https://github.com/ElementsProject/lightning](https://github.com/ElementsProject/lightning)" + ], + "examples": [ + { + "request": { + "id": "example:graceful#1", + "method": "graceful", + "params": {} + }, + "response": { + } + } + ] +} diff --git a/doc/schemas/stop.json b/doc/schemas/stop.json index 159f7862b033..bf52d65e8a0d 100644 --- a/doc/schemas/stop.json +++ b/doc/schemas/stop.json @@ -30,6 +30,9 @@ "Vincenzo Palazzo [vincenzo.palazzo@protonmail.com](mailto:vincenzo.palazzo@protonmail.com) wrote the initial version of this man page,", "but many others did the hard work of actually implementing this rpc command." ], + "see_also": [ + "lightning-graceful(7)" + ], "resources": [ "Main web site: [https://github.com/ElementsProject/lightning](https://github.com/ElementsProject/lightning)" ], diff --git a/lightningd/channel.c b/lightningd/channel.c index 62d6d995c5f3..1cd40871ed2d 100644 --- a/lightningd/channel.c +++ b/lightningd/channel.c @@ -27,7 +27,7 @@ void channel_set_owner(struct channel *channel, struct subd *owner) subd_release_channel(old_owner, channel); } -struct htlc_out *channel_has_htlc_out(struct channel *channel) +struct htlc_out *channel_has_htlc_out(const struct channel *channel) { struct htlc_out_map_iter outi; struct htlc_out *hout; @@ -43,7 +43,7 @@ struct htlc_out *channel_has_htlc_out(struct channel *channel) return NULL; } -struct htlc_in *channel_has_htlc_in(struct channel *channel) +struct htlc_in *channel_has_htlc_in(const struct channel *channel) { struct htlc_in_map_iter ini; struct htlc_in *hin; diff --git a/lightningd/channel.h b/lightningd/channel.h index 13656474f8ae..46add63f51a4 100644 --- a/lightningd/channel.h +++ b/lightningd/channel.h @@ -962,8 +962,8 @@ void get_channel_basepoints(struct lightningd *ld, void channel_set_billboard(struct channel *channel, bool perm, const char *str TAKES); -struct htlc_in *channel_has_htlc_in(struct channel *channel); -struct htlc_out *channel_has_htlc_out(struct channel *channel); +struct htlc_in *channel_has_htlc_in(const struct channel *channel); +struct htlc_out *channel_has_htlc_out(const struct channel *channel); /* hin can be NULL */ const u8 *channel_update_for_error(const tal_t *ctx, diff --git a/lightningd/jsonrpc.c b/lightningd/jsonrpc.c index 6d9e83aa605e..cf8f68a20f3e 100644 --- a/lightningd/jsonrpc.c +++ b/lightningd/jsonrpc.c @@ -1156,7 +1156,8 @@ static struct io_plan *start_json_stream(struct io_conn *conn, io_wake(conn); /* Once the stop_conn conn is drained, we can shut down. */ - if (jcon->ld->stop_conn == conn && jcon->ld->state == LD_STATE_RUNNING) { + if (jcon->ld->stop_conn == conn + && (jcon->ld->state == LD_STATE_RUNNING || jcon->ld->state == LD_STATE_GRACE)) { /* Return us to toplevel lightningd.c */ log_debug(jcon->ld->log, "io_break: %s", __func__); io_break(jcon->ld); diff --git a/lightningd/lightningd.c b/lightningd/lightningd.c index 3e21caa69482..cd0d8b05890f 100644 --- a/lightningd/lightningd.c +++ b/lightningd/lightningd.c @@ -238,6 +238,7 @@ static struct lightningd *new_lightningd(const tal_t *ctx) list_head_init(&ld->splice_commands); list_head_init(&ld->waitblockheight_commands); list_head_init(&ld->wait_commands); + list_head_init(&ld->graceful_commands); /*~ Tal also explicitly supports arrays: it stores the number of * elements, which can be accessed with tal_count() (or tal_bytelen() diff --git a/lightningd/lightningd.h b/lightningd/lightningd.h index 3b4e0e84d904..81a03d41261f 100644 --- a/lightningd/lightningd.h +++ b/lightningd/lightningd.h @@ -95,8 +95,13 @@ struct config { typedef STRMAP(const char *) alt_subdaemon_map; enum lightningd_state { + /* Starting up */ LD_STATE_INITIALIZING, + /* Normal */ LD_STATE_RUNNING, + /* Waiting for graceful shutdown */ + LD_STATE_GRACE, + /* Shutting down */ LD_STATE_SHUTDOWN, }; @@ -250,7 +255,8 @@ struct lightningd { struct list_head disconnect_commands; /* Outstanding wait commands */ struct list_head wait_commands; - + /* Outstanding graceful commands */ + struct list_head graceful_commands; /* Outstanding splice commands. */ struct list_head splice_commands; diff --git a/lightningd/peer_control.c b/lightningd/peer_control.c index ed00651814bc..3229a30212ec 100644 --- a/lightningd/peer_control.c +++ b/lightningd/peer_control.c @@ -1382,6 +1382,14 @@ peer_connected_serialize(struct peer_connected_hook_payload *payload, json_object_end(stream); /* .peer */ } +static bool ignore_idle_channel(const struct lightningd *ld, + const struct channel *channel) +{ + return ld->state == LD_STATE_GRACE + && !channel_has_htlc_out(channel) + && !channel_has_htlc_in(channel); +} + /* Talk to connectd about an active channel */ static void connect_activate_subd(struct lightningd *ld, struct channel *channel) { @@ -1558,6 +1566,12 @@ static void peer_connected_hook_final(struct peer_connected_hook_payload *payloa list_for_each(&peer->channels, channel, list) { /* FIXME: It can race by opening a channel before this! */ if (channel_state_wants_peercomms(channel->state) && !channel->owner) { + if (ignore_idle_channel(ld, channel)) { + log_debug(channel->log, + "Peer has reconnected, but gracefully shutting down; " + "not connecting subd"); + continue; + } log_debug(channel->log, "Peer has reconnected, state %s: connecting subd", channel_state_name(channel)); @@ -2064,6 +2078,22 @@ void handle_peer_spoke(struct lightningd *ld, const u8 *msg) return; } + if (msgtype == WIRE_CHANNEL_REESTABLISH + && ignore_idle_channel(ld, channel)) { + log_debug(channel->log, + "Peer sent channel_reestablish, but gracefully shutting down; " + "sending warning and ignoring"); + error = towire_warningfmt(tmpctx, &channel_id, + "Declining to reestablish idle channel " + "because this node will be halting soon."); + /* Don't goto send_error; we don't want to disconnect. */ + subd_send_msg(ld->connectd, + take(towire_connectd_peer_send_msg(NULL, &peer->id, + peer->connectd_counter, + error))); + return; + } + log_debug(channel->log, "channel already active"); if (channel->state == DUALOPEND_AWAITING_LOCKIN) { pfd = sockpair(tmpctx, channel, &other_fd, &error); @@ -2098,7 +2128,7 @@ void handle_peer_spoke(struct lightningd *ld, const u8 *msg) } if (peer->uncommitted_channel) { error = towire_errorfmt(tmpctx, &channel_id, - "Multiple simulteneous opens not supported"); + "Multiple simultaneous opens not supported"); goto send_error; } peer->uncommitted_channel = new_uncommitted_channel(peer); @@ -2239,6 +2269,9 @@ static void peer_disconnected(struct lightningd *ld, /* If connection was only thing keeping it, this will delete it. */ if (p) maybe_delete_peer(p); + + /* Maybe graceful wants to know? */ + check_graceful_shutdown(ld); } void handle_peer_disconnected(struct lightningd *ld, const u8 *msg) @@ -2868,7 +2901,8 @@ static void setup_peer(struct peer *peer) && !(channel->channel_flags & CHANNEL_FLAGS_ANNOUNCE_CHANNEL)) continue; - if (channel_state_wants_peercomms(channel->state)) + if (channel_state_wants_peercomms(channel->state) + && !ignore_idle_channel(ld, channel)) connect = true; if (channel_important_filter(channel, NULL)) important = true; @@ -2971,6 +3005,225 @@ static struct command_result *param_peer(struct command *cmd, return NULL; } +static void graceful_disconnect(struct peer *peer) +{ + force_peer_disconnect(peer->ld, peer, "graceful shutdown"); +} + +/* Returns number currently connected */ +static size_t disconnect_idle_peers(struct lightningd *ld) +{ + struct peer *peer; + struct peer_node_id_map_iter it; + size_t num_connected = 0; + + for (peer = peer_node_id_map_first(ld->peers, &it); + peer; + peer = peer_node_id_map_next(ld->peers, &it)) { + bool all_idle = true; + const struct channel *channel; + + if (peer->connected == PEER_DISCONNECTED) + continue; + + num_connected++; + list_for_each(&peer->channels, channel, list) { + if (!ignore_idle_channel(ld, channel)) + all_idle = false; + } + /* We can't use force_peer_disconnect here, since we must not + * free the channel: make a timer do the dirty work! */ + if (all_idle) { + new_reltimer(ld->timers, peer, time_from_sec(0), + graceful_disconnect, peer); + } + } + return num_connected; +} + +struct graceful_waiter { + struct list_node list; + struct command *cmd; + const char *last_msg; + struct oneshot *timeout; +}; + +static struct command_result *check_graceful_shutdown_progress(struct lightningd *ld, struct command *cmd) +{ + struct htlc_out_map_iter outi; + const struct htlc_out *hout, *closest_hout = NULL; + struct htlc_in_map_iter ini; + const struct htlc_in *hin, *closest_hin = NULL; + size_t num_connected; + struct graceful_waiter *w; + const char *msg; + + if (ld->state != LD_STATE_GRACE) + return NULL; + + /* Try disconnecing anyone who no longer has htlcs */ + num_connected = disconnect_idle_peers(ld); + + /* Report on any remaining htlcs */ + for (hout = htlc_out_map_first(ld->htlcs_out, &outi); + hout; + hout = htlc_out_map_next(ld->htlcs_out, &outi)) { + if (!closest_hout || hout->cltv_expiry < closest_hout->cltv_expiry) + closest_hout = hout; + } + for (hin = htlc_in_map_first(ld->htlcs_in, &ini); + hin; + hin = htlc_in_map_next(ld->htlcs_in, &ini)) { + if (!closest_hin || hin->cltv_expiry < closest_hin->cltv_expiry) + closest_hin = hin; + } + + /* Choose single closest one if both */ + if (closest_hin && closest_hout) { + if (closest_hin->cltv_expiry < closest_hout->cltv_expiry) + closest_hout = NULL; + else + closest_hin = NULL; + } + + /* If given a cmd, only do that one */ + if (closest_hin || closest_hout) { + u32 expiry = closest_hin ? closest_hin->cltv_expiry : closest_hout->cltv_expiry; + const struct channel *c = closest_hin ? closest_hin->key.channel : closest_hout->key.channel; + u32 blockheight = get_block_height(ld->topology); + const char *state = htlc_state_name(closest_hin ? closest_hin->hstate : closest_hout->hstate); + + msg = tal_fmt(tmpctx, + "Next HTLC %s expires at block #%u (%u blocks %s) %s peer %s (%s)", + state, expiry, + expiry >= blockheight ? expiry - blockheight : blockheight - expiry, + expiry >= blockheight ? "from now" : "ago", + closest_hin ? "coming from" : "going to", + fmt_node_id(tmpctx, &c->peer->id), + c->peer->connected == PEER_CONNECTED ? "connected" : "disconnected"); + } else if (num_connected) { + msg = tal_fmt(tmpctx, "%zu peers still connected", num_connected); + } else { + /* All finished! */ + if (cmd) + return command_success(cmd, json_stream_success(cmd)); + while ((w = list_pop(&ld->graceful_commands, struct graceful_waiter, list)) != NULL) + was_pending(command_success(w->cmd, json_stream_success(w->cmd))); + return NULL; + } + + /* Otherwise, notify everyone (iff it has changed). */ + list_for_each(&ld->graceful_commands, w, list) { + if (!w->last_msg || !streq(w->last_msg, msg)) { + json_notify_fmt(w->cmd, LOG_INFORM, "%s", msg); + tal_free(w->last_msg); + w->last_msg = tal_strdup(w, msg); + } + } + if (cmd) + return command_still_pending(cmd); + return NULL; +} + +static int cmp_height(const u32 *a, + const u32 *b, + void *unused) +{ + if (*a > *b) + return 1; + if (*b < *a) + return -1; + return 0; +} + +static void graceful_timeout(struct graceful_waiter *gw) +{ + struct htlc_out_map_iter outi; + const struct htlc_out *hout; + struct htlc_in_map_iter ini; + const struct htlc_in *hin; + struct peer *peer; + struct peer_node_id_map_iter it; + struct lightningd *ld = gw->cmd->ld; + u32 *heights = tal_arr(tmpctx, u32, 0); + struct node_id *peers = tal_arr(tmpctx, struct node_id, 0); + struct json_stream *result; + + /* Report on any remaining htlcs */ + for (hout = htlc_out_map_first(ld->htlcs_out, &outi); + hout; + hout = htlc_out_map_next(ld->htlcs_out, &outi)) { + tal_arr_expand(&heights, hout->cltv_expiry); + } + for (hin = htlc_in_map_first(ld->htlcs_in, &ini); + hin; + hin = htlc_in_map_next(ld->htlcs_in, &ini)) { + tal_arr_expand(&heights, hin->cltv_expiry); + } + + asort(heights, tal_count(heights), cmp_height, NULL); + + for (peer = peer_node_id_map_first(ld->peers, &it); + peer; + peer = peer_node_id_map_next(ld->peers, &it)) { + if (peer->connected != PEER_DISCONNECTED) + tal_arr_expand(&peers, peer->id); + } + + result = json_stream_success(gw->cmd); + if (tal_count(heights)) { + json_array_start(result, "pending_htlc_expiries"); + for (size_t i = 0; i < tal_count(heights); i++) + json_add_u32(result, NULL, heights[i]); + json_array_end(result); + } + if (tal_count(peers)) { + json_array_start(result, "pending_peers"); + for (size_t i = 0; i < tal_count(peers); i++) + json_add_node_id(result, NULL, &peers[i]); + json_array_end(result); + } + list_del_from(&ld->graceful_commands, &gw->list); + was_pending(command_success(gw->cmd, result)); +} + +void check_graceful_shutdown(struct lightningd *ld) +{ + check_graceful_shutdown_progress(ld, NULL); +} + +static struct command_result *json_graceful(struct command *cmd, + const char *buffer, + const jsmntok_t *obj UNNEEDED, + const jsmntok_t *params) +{ + struct graceful_waiter *gw = tal(cmd, struct graceful_waiter); + u64 *timeout; + + if (!param(cmd, buffer, params, + p_opt("timeout", param_u64, &timeout), + NULL)) + return command_param_failed(); + + log_unusual(cmd->ld->log, "JSON-RPC graceful: preventing more connections"); + cmd->ld->state = LD_STATE_GRACE; + + gw->cmd = cmd; + gw->last_msg = NULL; + if (timeout) + gw->timeout = new_reltimer(cmd->ld->timers, gw, + time_from_sec(*timeout), + graceful_timeout, gw); + list_add_tail(&cmd->ld->graceful_commands, &gw->list); + return check_graceful_shutdown_progress(cmd->ld, cmd); +} + +static const struct json_command graceful_command = { + "graceful", + json_graceful, +}; +AUTODATA(json_command, &graceful_command); + static struct command_result *json_disconnect(struct command *cmd, const char *buffer, const jsmntok_t *obj UNNEEDED, diff --git a/lightningd/peer_control.h b/lightningd/peer_control.h index 279a2f91d679..e5130ddd752b 100644 --- a/lightningd/peer_control.h +++ b/lightningd/peer_control.h @@ -178,6 +178,9 @@ command_find_channel(struct command *cmd, const char *buffer, const jsmntok_t *tok, struct channel **channel); +/* Called whenever htlc closed, or peer disconnected */ +void check_graceful_shutdown(struct lightningd *ld); + /* We do this lazily, when reconnecting */ void peer_channels_cleanup(struct peer *peer); diff --git a/lightningd/peer_htlcs.c b/lightningd/peer_htlcs.c index 53b01ede1da5..acaccff8fa21 100644 --- a/lightningd/peer_htlcs.c +++ b/lightningd/peer_htlcs.c @@ -1953,6 +1953,8 @@ void onchain_failed_our_htlc(const struct channel *channel, static void remove_htlc_in(struct channel *channel, struct htlc_in *hin) { + struct lightningd *ld = channel->peer->ld; + htlc_in_check(hin, __func__); assert(hin->failonion || hin->preimage || hin->badonion); @@ -1998,10 +2000,13 @@ static void remove_htlc_in(struct channel *channel, struct htlc_in *hin) } tal_free(hin); + check_graceful_shutdown(ld); } static void remove_htlc_out(struct channel *channel, struct htlc_out *hout) { + struct lightningd *ld = channel->peer->ld; + htlc_out_check(hout, __func__); assert(hout->failonion || hout->preimage || hout->failmsg); log_debug(channel->log, "Removing out HTLC %"PRIu64" state %s %s", @@ -2048,6 +2053,7 @@ static void remove_htlc_out(struct channel *channel, struct htlc_out *hout) } tal_free(hout); + check_graceful_shutdown(ld); } static bool update_in_htlc(struct channel *channel, diff --git a/lightningd/test/run-invoice-select-inchan.c b/lightningd/test/run-invoice-select-inchan.c index 6c87d155ba5c..16ef2afb6252 100644 --- a/lightningd/test/run-invoice-select-inchan.c +++ b/lightningd/test/run-invoice-select-inchan.c @@ -97,10 +97,10 @@ void channel_gossip_node_announce(struct lightningd *ld UNNEEDED) void channel_gossip_startup_done(struct lightningd *ld UNNEEDED) { fprintf(stderr, "channel_gossip_startup_done called!\n"); abort(); } /* Generated stub for channel_has_htlc_in */ -struct htlc_in *channel_has_htlc_in(struct channel *channel UNNEEDED) +struct htlc_in *channel_has_htlc_in(const struct channel *channel UNNEEDED) { fprintf(stderr, "channel_has_htlc_in called!\n"); abort(); } /* Generated stub for channel_has_htlc_out */ -struct htlc_out *channel_has_htlc_out(struct channel *channel UNNEEDED) +struct htlc_out *channel_has_htlc_out(const struct channel *channel UNNEEDED) { fprintf(stderr, "channel_has_htlc_out called!\n"); abort(); } /* Generated stub for channel_important_filter */ bool channel_important_filter(const struct channel *channel UNNEEDED, void *unused UNNEEDED) @@ -453,6 +453,12 @@ void json_add_unsaved_channel(struct command *cmd UNNEEDED, const struct channel *channel UNNEEDED, const struct peer *peer UNNEEDED) { fprintf(stderr, "json_add_unsaved_channel called!\n"); abort(); } +/* Generated stub for json_notify_fmt */ +void json_notify_fmt(struct command *cmd UNNEEDED, + enum log_level level UNNEEDED, + const char *fmt UNNEEDED, ...) + +{ fprintf(stderr, "json_notify_fmt called!\n"); abort(); } /* Generated stub for json_stream_fail */ struct json_stream *json_stream_fail(struct command *cmd UNNEEDED, enum jsonrpc_errcode code UNNEEDED, diff --git a/tests/test_misc.py b/tests/test_misc.py index 3fcf133e9dae..804abbda9e14 100644 --- a/tests/test_misc.py +++ b/tests/test_misc.py @@ -4314,6 +4314,88 @@ def test_create_gossip_mesh(node_factory, bitcoind): assert False, "Test failed on purpose, grab the gossip store from /tmp/ltests-..." +def test_graceful_no_peers(node_factory): + """graceful with no channels returns immediately""" + l1 = node_factory.get_node() + assert l1.rpc.graceful() == {} + # Returns instantly even with timeout. + assert l1.rpc.graceful(10000) == {} + + +def test_graceful_idle_peer(node_factory, executor): + """graceful with an idle peer disconnects it and completes""" + l1, l2 = node_factory.line_graph(2) + + notifications = [] + + def run_graceful(): + def capture(message, **kwargs): + if message: + notifications.append(message) + with l1.rpc.notify(capture): + return l1.rpc.graceful() + + fut = executor.submit(run_graceful) + result = fut.result(TIMEOUT) + + assert result == {} + assert any("peers still connected" in n for n in notifications) + + +def test_graceful_htlc(node_factory, executor): + """graceful with an HTLC in flight notifies about expiry and completes after HTLC resolves""" + l1, l2, l3 = node_factory.line_graph(3, wait_for_announce=True, + opts=[{'may_reconnect': True, + 'dev-no-reconnect': None}, + {'may_reconnect': True, + 'dev-no-reconnect': None}, + {'plugin': os.path.join(os.getcwd(), 'tests/plugins/hold_invoice.py')}]) + + inv = l3.rpc.invoice(10000, 'hold', 'hold invoice') + route = l1.rpc.getroute(l3.info['id'], 10000, 1)['route'] + l1.rpc.sendpay(route, inv['payment_hash'], payment_secret=inv['payment_secret']) + wait_for(lambda: len(only_one(l3.rpc.listpeerchannels()['channels'])['htlcs']) == 1) + + notifications = [] + + def run_graceful(): + def capture(message, **kwargs): + if message: + notifications.append(message) + with l2.rpc.notify(capture): + return l2.rpc.graceful() + + fut = executor.submit(run_graceful) + + # Wait until graceful has sent at least one HTLC expiry notification + wait_for(lambda: len(notifications) == 1) + wait_for(lambda: notifications[0] == f'Next HTLC SENT_ADD_ACK_REVOCATION expires at block #118 (10 blocks from now) going to peer {l3.info["id"]} (connected)') + + # This will tell us about htlcs and the peers (peers unordered) + ret = l2.rpc.graceful(1) + assert ret in ({'pending_htlc_expiries': [118, 124], + 'pending_peers': [l1.info['id'], l3.info['id']]}, + {'pending_htlc_expiries': [118, 124], + 'pending_peers': [l3.info['id'], l1.info['id']]}) + + # Close incoming connection, so incoming HTLC gets stuck. + l1.rpc.disconnect(l2.info['id'], force=True) + wait_for(lambda: notifications[-1] == f'Next HTLC SENT_ADD_ACK_REVOCATION expires at block #118 (10 blocks from now) going to peer {l3.info["id"]} (connected)') + + # Release the hold so the *outgoing* HTLC resolves + open(os.path.join(l3.daemon.lightning_dir, TEST_NETWORK, "unhold"), "w").close() + + wait_for(lambda: notifications[-1] == f'Next HTLC SENT_REMOVE_HTLC expires at block #124 (16 blocks from now) coming from peer {l1.info["id"]} (disconnected)') + + ret = l2.rpc.graceful(1) + assert ret == {'pending_htlc_expiries': [124]} + + # Reconnect and it will settle. + l1.rpc.connect(l2.info['id'], 'localhost', l2.port) + + assert fut.result(TIMEOUT) == {} + + def test_fast_shutdown(node_factory): l1 = node_factory.get_node(start=False) diff --git a/wallet/test/run-wallet.c b/wallet/test/run-wallet.c index c37517a18ca9..0cef1f6c9ab2 100644 --- a/wallet/test/run-wallet.c +++ b/wallet/test/run-wallet.c @@ -434,6 +434,12 @@ void json_add_unsaved_channel(struct command *cmd UNNEEDED, const struct channel *channel UNNEEDED, const struct peer *peer UNNEEDED) { fprintf(stderr, "json_add_unsaved_channel called!\n"); abort(); } +/* Generated stub for json_notify_fmt */ +void json_notify_fmt(struct command *cmd UNNEEDED, + enum log_level level UNNEEDED, + const char *fmt UNNEEDED, ...) + +{ fprintf(stderr, "json_notify_fmt called!\n"); abort(); } /* Generated stub for json_stream_fail */ struct json_stream *json_stream_fail(struct command *cmd UNNEEDED, enum jsonrpc_errcode code UNNEEDED,