tokio_xmpp/stanzastream/mod.rs
1// Copyright (c) 2019 Emmanuel Gil Peyrot <linkmauve@linkmauve.fr>
2//
3// This Source Code Form is subject to the terms of the Mozilla Public
4// License, v. 2.0. If a copy of the MPL was not distributed with this
5// file, You can obtain one at http://mozilla.org/MPL/2.0/.
6
7//! # Resilient stanza stream
8//!
9//! This module provides the [`StanzaStream`], which is the next level up from
10//! the low-level [`XmlStream`][`crate::xmlstream::XmlStream`].
11//!
12//! The stanza stream knows about XMPP and it most importantly knows how to
13//! fix a broken connection with a reconnect and how to do this smoothly using
14//! [XEP-0198 (Stream Management)](https://xmpp.org/extensions/xep-0198.html).
15//! XEP-0198 is only used if the peer supports it. If the peer does not
16//! support XEP-0198, automatic reconnects are still done, but with more
17//! undetectable data loss.
18//!
19//! The main API entrypoint for the stanza stream is, unsurprisingly,
20//! [`StanzaStream`].
21
22use core::pin::Pin;
23use core::task::{Context, Poll};
24use core::time::Duration;
25
26// TODO: ensure that IDs are always set on stanzas.
27
28// TODO: figure out what to do with the mpsc::Sender<QueueEntry> on lossy
29// stream reconnects. Keeping it may cause stanzas to be sent which weren't
30// meant for that stream, replacing it is racy.
31
32use futures::{SinkExt, Stream};
33
34use tokio::sync::{mpsc, oneshot};
35
36use xmpp_parsers::{jid::Jid, stream_features::StreamFeatures};
37
38use crate::connect::ServerConnector;
39use crate::xmlstream::Timeouts;
40use crate::Stanza;
41
42mod connected;
43mod error;
44mod negotiation;
45mod queue;
46mod stream_management;
47mod worker;
48
49use self::queue::QueueEntry;
50pub use self::queue::{StanzaStage, StanzaState, StanzaToken};
51pub use self::worker::{Connection, XmppStream};
52use self::worker::{StanzaStreamWorker, LOCAL_SHUTDOWN_TIMEOUT};
53
54/// Event informing about the change of the [`StanzaStream`]'s status.
55#[derive(Debug)]
56pub enum StreamEvent {
57 /// The stream was (re-)established **with** loss of state.
58 Reset {
59 /// The new JID to which the stream is bound.
60 bound_jid: Jid,
61
62 /// The features reported by the stream.
63 features: StreamFeatures,
64 },
65
66 /// The stream is currently inactive because a connection was lost.
67 ///
68 /// Resumption without loss of state is still possible. This event is
69 /// merely informative and may be used to prolong timeouts or inform the
70 /// user that the connection is currently unstable.
71 Suspended,
72
73 /// The stream was reestablished **without** loss of state.
74 ///
75 /// This is merely informative. Potentially useful to prolong timeouts.
76 Resumed,
77}
78
79/// Event emitted by the [`StanzaStream`].
80///
81/// Note that stream closure is not an explicit event, but the end of the
82/// event stream itself.
83#[derive(Debug)]
84pub enum Event {
85 /// The streams connectivity status has changed.
86 Stream(StreamEvent),
87
88 /// A stanza was received over the stream.
89 Stanza(Stanza),
90}
91
92/// Frontend interface to a reliable, always-online stanza stream.
93pub struct StanzaStream {
94 rx: mpsc::Receiver<Event>,
95 tx: mpsc::Sender<QueueEntry>,
96}
97
98impl StanzaStream {
99 /// Establish a new client-to-server stream using the given
100 /// [`ServerConnector`].
101 ///
102 /// `jid` and `password` must be the user account's credentials. `jid` may
103 /// either be a bare JID (to let the server choose a resource) or a full
104 /// JID (to request a specific resource from the server, with no guarantee
105 /// of succcess).
106 ///
107 /// `timeouts` controls the responsiveness to connection interruptions
108 /// on the underlying transports. Please see the [`Timeouts`] struct's
109 /// documentation for hints on how to correctly configure this.
110 ///
111 /// The `queue_depth` controls the sizes for the incoming and outgoing
112 /// stanza queues. If the size is exceeded, the corresponding direction
113 /// will block until the queues can be flushed. Note that the respective
114 /// reverse direction is not affected (i.e. if your outgoing queue is
115 /// full for example because of a slow server, you can still receive
116 /// data).
117 pub fn new_c2s<C: ServerConnector>(
118 server: C,
119 jid: Jid,
120 password: String,
121 timeouts: Timeouts,
122 queue_depth: usize,
123 ) -> Self {
124 let reconnector = Box::new(
125 move |_preferred_location: Option<String>, slot: oneshot::Sender<Connection>| {
126 let jid = jid.clone();
127 let server = server.clone();
128 let password = password.clone();
129 tokio::spawn(async move {
130 const MAX_DELAY: Duration = Duration::new(30, 0);
131 let mut delay = Duration::new(1, 0);
132 loop {
133 log::debug!("Starting new connection as {}", jid);
134 match crate::client::login::client_auth(
135 server.clone(),
136 jid.clone(),
137 password.clone(),
138 timeouts,
139 )
140 .await
141 {
142 Ok((features, stream)) => {
143 log::debug!("Connection as {} established", jid);
144 let stream = stream.box_stream();
145 let Err(mut conn) = slot.send(Connection {
146 stream,
147 features,
148 identity: jid,
149 }) else {
150 // Send succeeded, we're done here.
151 return;
152 };
153
154 log::debug!("StanzaStream dropped, attempting graceful termination of fresh stream.");
155 // Send failed, i.e. the stanzastream is dead. Let's
156 // be polite and close this stream cleanly.
157 // We don't care whether that works, though, we
158 // just want to release the resources after a
159 // defined amount of time.
160 let _: Result<_, _> = tokio::time::timeout(
161 LOCAL_SHUTDOWN_TIMEOUT,
162 <XmppStream as SinkExt<&Stanza>>::close(&mut conn.stream),
163 )
164 .await;
165 return;
166 }
167 Err(e) => {
168 // TODO: auth errors should probably be fatal??
169 log::error!("Failed to connect: {}. Retrying in {:?}.", e, delay);
170 tokio::time::sleep(delay).await;
171 delay = delay * 2;
172 if delay > MAX_DELAY {
173 delay = MAX_DELAY;
174 }
175 }
176 }
177 }
178 });
179 },
180 );
181 Self::new(reconnector, queue_depth)
182 }
183
184 /// Create a new stanza stream.
185 ///
186 /// Stanza streams operate using a `connector` which is responsible for
187 /// producing a new stream whenever necessary. It is the connector's
188 /// responsibility that:
189 ///
190 /// - It never fails to send to the channel it is given. If the connector
191 /// drops the channel, the `StanzaStream` will consider this fatal and
192 /// fail the stream.
193 ///
194 /// - All streams are authenticated and secured as necessary.
195 ///
196 /// - All streams are authenticated for the same entity. If the connector
197 /// were to provide streams for different identities, information leaks
198 /// could occur as queues from previous sessions are being flushed on
199 /// the new stream on a reconnect.
200 ///
201 /// Most notably, the `connector` is **not** responsible for performing
202 /// resource binding: Resource binding is handled by the `StanzaStream`.
203 ///
204 /// `connector` will be called soon after `new()` was called to establish
205 /// the first underlying stream for the `StanzaStream`.
206 ///
207 /// The `queue_depth` controls the sizes for the incoming and outgoing
208 /// stanza queues. If the size is exceeded, the corresponding direction
209 /// will block until the queues can be flushed. Note that the respective
210 /// reverse direction is not affected (i.e. if your outgoing queue is
211 /// full for example because of a slow server, you can still receive
212 /// data).
213 pub fn new(
214 connector: Box<dyn FnMut(Option<String>, oneshot::Sender<Connection>) + Send + 'static>,
215 queue_depth: usize,
216 ) -> Self {
217 // c2f = core to frontend, f2c = frontend to core
218 let (f2c_tx, c2f_rx) = StanzaStreamWorker::spawn(connector, queue_depth);
219 Self {
220 tx: f2c_tx,
221 rx: c2f_rx,
222 }
223 }
224
225 async fn assert_send(&self, cmd: QueueEntry) {
226 match self.tx.send(cmd).await {
227 Ok(()) => (),
228 Err(_) => panic!("Stream closed or the stream's background workers have crashed."),
229 }
230 }
231
232 /// Close the stream.
233 ///
234 /// This will initiate a clean shutdown of the stream and will prevent and
235 /// cancel any more reconnection attempts.
236 pub async fn close(mut self) {
237 drop(self.tx); // closes stream.
238 while let Some(ev) = self.rx.recv().await {
239 log::trace!("discarding event {:?} after stream closure", ev);
240 }
241 }
242
243 /// Send a stanza via the stream.
244 ///
245 /// Note that completion of this function merely signals that the stanza
246 /// has been enqueued successfully: it may be stuck in the transmission
247 /// queue for quite a while if the stream is currently disconnected. The
248 /// transmission progress can be observed via the returned
249 /// [`StanzaToken`].
250 ///
251 /// # Panics
252 ///
253 /// If the stream has failed catastrophically (i.e. due to a software
254 /// bug), this function may panic.
255 pub async fn send(&self, stanza: Box<Stanza>) -> StanzaToken {
256 let (queue_entry, token) = QueueEntry::tracked(stanza);
257 self.assert_send(queue_entry).await;
258 token
259 }
260}
261
262impl Stream for StanzaStream {
263 type Item = Event;
264
265 fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
266 self.rx.poll_recv(cx)
267 }
268}