Linux: Implement EventLoopExtPumpEvents and EventLoopExtRunOnDemand

Wayland:

I found the calloop abstraction a little awkward to work with while I was
trying to understand why there was surprising workaround code in the wayland
backend for manually dispatching pending events.

Investigating this further it looks like there may currently be several issues
with the calloop WaylandSource (with how prepare_read is used and with (not)
flushing writes before polling)

Considering the current minimal needs for polling in all winit backends I do
personally tend to think it would be simpler to just own the responsibility for
polling more directly, so the logic for wayland-client `prepare_read` wouldn't
be in a separate crate (and in this current situation would also be easier to fix)

I've tried to maintain the status quo with calloop + workarounds.

X11:

I found that the recent changes (4ac2006cbc) to port the X11 backend
from mio to calloop lost the ability to check for pending events before
needing to poll/dispatch. (The `has_pending` state being queried
before dispatching() was based on state that was filled in during
dispatching)

As part of the rebase this re-introduces the PeekableReceiver and
WakeSender which are small utilities on top of
`std::sync::mpsc::channel()`. This adds a calloop `PingSource`
so we can use a `Ping` as a generic event loop waker.

For taking into account false positive wake ups the X11 source now
tracks when the file descriptor is readable so after we poll via
calloop we can then specifically check if there are new X11 events
or pending redraw/user events when deciding whether to skip the
event loop iteration.
This commit is contained in:
Robert Bragg 2023-06-18 12:40:03 +01:00 committed by Kirill Chibisov
parent 461efaf99f
commit c47d0846fa
5 changed files with 835 additions and 578 deletions

View file

@ -11,8 +11,8 @@
//!
//! And the following platform-specific modules:
//!
//! - `run_ondemand` (available on `windows`, `macos`, `android`)
//! - `pump_events` (available on `windows`, `macos`, `android`)
//! - `run_ondemand` (available on `windows`, `unix`, `macos`, `android`)
//! - `pump_events` (available on `windows`, `unix`, `macos`, `android`)
//! - `run_return` (available on `windows`, `unix`, `macos`, and `android`)
//!
//! However only the module corresponding to the platform you're compiling to will be available.
@ -36,10 +36,22 @@ pub mod windows;
#[cfg(x11_platform)]
pub mod x11;
#[cfg(any(windows_platform, macos_platform, android_platform))]
#[cfg(any(
windows_platform,
macos_platform,
android_platform,
x11_platform,
wayland_platform
))]
pub mod run_ondemand;
#[cfg(any(windows_platform, macos_platform, android_platform,))]
#[cfg(any(
windows_platform,
macos_platform,
android_platform,
x11_platform,
wayland_platform
))]
pub mod pump_events;
#[cfg(any(

View file

@ -19,6 +19,7 @@ use std::{
use once_cell::sync::Lazy;
use raw_window_handle::{RawDisplayHandle, RawWindowHandle};
use smol_str::SmolStr;
use std::time::Duration;
#[cfg(x11_platform)]
pub use self::x11::XNotSupported;
@ -28,7 +29,7 @@ use self::x11::{ffi::XVisualInfo, util::WindowType as XWindowType, X11Error, XCo
use crate::platform::x11::XlibErrorHook;
use crate::{
dpi::{PhysicalPosition, PhysicalSize, Position, Size},
error::{ExternalError, NotSupportedError, OsError as RootOsError},
error::{ExternalError, NotSupportedError, OsError as RootOsError, RunLoopError},
event::{Event, KeyEvent},
event_loop::{
AsyncRequestSerial, ControlFlow, DeviceEvents, EventLoopClosed,
@ -36,7 +37,10 @@ use crate::{
},
icon::Icon,
keyboard::{Key, KeyCode},
platform::{modifier_supplement::KeyEventExtModifierSupplement, scancode::KeyCodeExtScancode},
platform::{
modifier_supplement::KeyEventExtModifierSupplement, pump_events::PumpStatus,
scancode::KeyCodeExtScancode,
},
window::{
ActivationToken, CursorGrabMode, CursorIcon, ImePurpose, ResizeDirection, Theme,
UserAttentionType, WindowAttributes, WindowButtons, WindowLevel,
@ -840,6 +844,20 @@ impl<T: 'static> EventLoop<T> {
x11_or_wayland!(match self; EventLoop(evlp) => evlp.run(callback))
}
pub fn run_ondemand<F>(&mut self, callback: F) -> Result<(), RunLoopError>
where
F: FnMut(crate::event::Event<'_, T>, &RootELW<T>, &mut ControlFlow),
{
x11_or_wayland!(match self; EventLoop(evlp) => evlp.run_ondemand(callback))
}
pub fn pump_events<F>(&mut self, callback: F) -> PumpStatus
where
F: FnMut(crate::event::Event<'_, T>, &RootELW<T>, &mut ControlFlow),
{
x11_or_wayland!(match self; EventLoop(evlp) => evlp.pump_events(callback))
}
pub fn window_target(&self) -> &crate::event_loop::EventLoopWindowTarget<T> {
x11_or_wayland!(match self; EventLoop(evlp) => evlp.window_target())
}
@ -933,6 +951,15 @@ fn sticky_exit_callback<T, F>(
}
}
/// Returns the minimum `Option<Duration>`, taking into account that `None`
/// equates to an infinite timeout, not a zero timeout (so can't just use
/// `Option::min`)
fn min_timeout(a: Option<Duration>, b: Option<Duration>) -> Option<Duration> {
a.map_or(b, |a_timeout| {
b.map_or(Some(a_timeout), |b_timeout| Some(a_timeout.min(b_timeout)))
})
}
#[cfg(target_os = "linux")]
fn is_main_thread() -> bool {
rustix::thread::gettid() == rustix::process::getpid()

View file

@ -5,7 +5,6 @@ use std::error::Error;
use std::io::Result as IOResult;
use std::marker::PhantomData;
use std::mem;
use std::process;
use std::rc::Rc;
use std::sync::atomic::Ordering;
use std::time::{Duration, Instant};
@ -17,8 +16,11 @@ use sctk::reexports::client::globals;
use sctk::reexports::client::{Connection, Proxy, QueueHandle, WaylandSource};
use crate::dpi::{LogicalSize, PhysicalSize};
use crate::error::RunLoopError;
use crate::event::{Event, StartCause, WindowEvent};
use crate::event_loop::{ControlFlow, EventLoopWindowTarget as RootEventLoopWindowTarget};
use crate::platform::pump_events::PumpStatus;
use crate::platform_impl::platform::min_timeout;
use crate::platform_impl::platform::sticky_exit_callback;
use crate::platform_impl::EventLoopWindowTarget as PlatformEventLoopWindowTarget;
@ -35,6 +37,16 @@ type WaylandDispatcher = calloop::Dispatcher<'static, WaylandSource<WinitState>,
/// The Wayland event loop.
pub struct EventLoop<T: 'static> {
/// Has `run` or `run_ondemand` been called or a call to `pump_events` that starts the loop
loop_running: bool,
/// The application's latest control_flow state
control_flow: ControlFlow,
buffer_sink: EventSink,
compositor_updates: Vec<WindowCompositorUpdate>,
window_ids: Vec<WindowId>,
/// Sender of user events.
user_events_sender: calloop::channel::Sender<T>,
@ -114,6 +126,11 @@ impl<T: 'static> EventLoop<T> {
};
let event_loop = Self {
loop_running: false,
control_flow: ControlFlow::default(),
compositor_updates: Vec::new(),
buffer_sink: EventSink::default(),
window_ids: Vec::new(),
connection,
wayland_dispatcher,
user_events_sender,
@ -132,41 +149,108 @@ impl<T: 'static> EventLoop<T> {
where
F: FnMut(Event<'_, T>, &RootEventLoopWindowTarget<T>, &mut ControlFlow) + 'static,
{
let exit_code = self.run_return(callback);
process::exit(exit_code);
let exit_code = match self.run_ondemand(callback) {
Err(RunLoopError::ExitFailure(code)) => code,
Err(_err) => 1,
Ok(_) => 0,
};
::std::process::exit(exit_code)
}
pub fn run_return<F>(&mut self, mut callback: F) -> i32
pub fn run_return<F>(&mut self, callback: F) -> i32
where
F: FnMut(Event<'_, T>, &RootEventLoopWindowTarget<T>, &mut ControlFlow),
{
let mut control_flow = ControlFlow::Poll;
match self.run_ondemand(callback) {
Err(RunLoopError::ExitFailure(code)) => code,
Err(_err) => 1,
Ok(_) => 0,
}
}
// XXX preallocate certian structures to avoid allocating on each loop iteration.
let mut window_ids = Vec::<WindowId>::new();
let mut compositor_updates = Vec::<WindowCompositorUpdate>::new();
let mut buffer_sink = EventSink::new();
pub fn run_ondemand<F>(&mut self, mut event_handler: F) -> Result<(), RunLoopError>
where
F: FnMut(Event<'_, T>, &RootEventLoopWindowTarget<T>, &mut ControlFlow),
{
if self.loop_running {
return Err(RunLoopError::AlreadyRunning);
}
callback(
Event::NewEvents(StartCause::Init),
&self.window_target,
&mut control_flow,
loop {
match self.pump_events_with_timeout(None, &mut event_handler) {
PumpStatus::Exit(0) => {
break Ok(());
}
PumpStatus::Exit(code) => {
break Err(RunLoopError::ExitFailure(code));
}
_ => {
continue;
}
}
}
}
pub fn pump_events<F>(&mut self, event_handler: F) -> PumpStatus
where
F: FnMut(Event<'_, T>, &RootEventLoopWindowTarget<T>, &mut ControlFlow),
{
self.pump_events_with_timeout(Some(Duration::ZERO), event_handler)
}
fn pump_events_with_timeout<F>(
&mut self,
timeout: Option<Duration>,
mut callback: F,
) -> PumpStatus
where
F: FnMut(Event<'_, T>, &RootEventLoopWindowTarget<T>, &mut ControlFlow),
{
if !self.loop_running {
self.loop_running = true;
// Reset the internal state for the loop as we start running to
// ensure consistent behaviour in case the loop runs and exits more
// than once.
self.control_flow = ControlFlow::Poll;
// Run the initial loop iteration.
self.single_iteration(&mut callback, StartCause::Init);
}
// Consider the possibility that the `StartCause::Init` iteration could
// request to Exit.
if !matches!(self.control_flow, ControlFlow::ExitWithCode(_)) {
self.poll_events_with_timeout(timeout, &mut callback);
}
if let ControlFlow::ExitWithCode(code) = self.control_flow {
self.loop_running = false;
let mut dummy = self.control_flow;
sticky_exit_callback(
Event::LoopDestroyed,
self.window_target(),
&mut dummy,
&mut callback,
);
// XXX For consistency all platforms must emit a 'Resumed' event even though Wayland
// applications don't themselves have a formal suspend/resume lifecycle.
callback(Event::Resumed, &self.window_target, &mut control_flow);
PumpStatus::Exit(code)
} else {
PumpStatus::Continue
}
}
// XXX We break on errors from dispatches, since if we've got protocol error
// libwayland-client/wayland-rs will inform us anyway, but crashing downstream is not
// really an option. Instead we inform that the event loop got destroyed. We may
// communicate an error that something was terminated, but winit doesn't provide us
// with an API to do that via some event.
// Still, we set the exit code to the error's OS error code, or to 1 if not possible.
let exit_code = loop {
// Flush the connection.
let _ = self.connection.flush();
pub fn poll_events_with_timeout<F>(&mut self, mut timeout: Option<Duration>, mut callback: F)
where
F: FnMut(Event<'_, T>, &RootEventLoopWindowTarget<T>, &mut ControlFlow),
{
let start = Instant::now();
// TODO(rib): remove this workaround and instead make sure that the calloop
// WaylandSource correctly implements the cooperative prepare_read protocol
// that support multithreaded wayland clients that may all read from the
// same socket.
//
// During the run of the user callback, some other code monitoring and reading the
// Wayland socket may have been run (mesa for example does this with vsync), if that
// is the case, some events may have been enqueued in our event queue.
@ -189,82 +273,110 @@ impl<T: 'static> EventLoop<T> {
Ok(dispatched) => dispatched > 0,
Err(error) => {
error!("Error dispatching wayland queue: {}", error);
break 1;
self.control_flow = ControlFlow::ExitWithCode(1);
return;
}
}
};
match control_flow {
ControlFlow::ExitWithCode(code) => break code,
ControlFlow::Poll => {
// Non-blocking dispatch.
let timeout = Duration::ZERO;
if let Err(error) = self.loop_dispatch(Some(timeout)) {
break error.raw_os_error().unwrap_or(1);
}
callback(
Event::NewEvents(StartCause::Poll),
&self.window_target,
&mut control_flow,
);
}
ControlFlow::Wait => {
let timeout = if instant_wakeup {
timeout = if instant_wakeup {
Some(Duration::ZERO)
} else {
None
let control_flow_timeout = match self.control_flow {
ControlFlow::Wait => None,
ControlFlow::Poll => Some(Duration::ZERO),
ControlFlow::WaitUntil(wait_deadline) => {
Some(wait_deadline.saturating_duration_since(start))
}
// This function shouldn't have to handle any requests to exit
// the application (there should be no need to poll for events
// if the application has requested to exit) so we consider
// it a bug in the backend if we ever see `ExitWithCode` here.
ControlFlow::ExitWithCode(_code) => unreachable!(),
};
min_timeout(control_flow_timeout, timeout)
};
// NOTE Ideally we should flush as the last thing we do before polling
// to wait for events, and this should be done by the calloop
// WaylandSource but we currently need to flush writes manually.
let _ = self.connection.flush();
if let Err(error) = self.loop_dispatch(timeout) {
break error.raw_os_error().unwrap_or(1);
// NOTE We exit on errors from dispatches, since if we've got protocol error
// libwayland-client/wayland-rs will inform us anyway, but crashing downstream is not
// really an option. Instead we inform that the event loop got destroyed. We may
// communicate an error that something was terminated, but winit doesn't provide us
// with an API to do that via some event.
// Still, we set the exit code to the error's OS error code, or to 1 if not possible.
let exit_code = error.raw_os_error().unwrap_or(1);
self.control_flow = ControlFlow::ExitWithCode(exit_code);
return;
}
callback(
Event::NewEvents(StartCause::WaitCancelled {
start: Instant::now(),
// NB: `StartCause::Init` is handled as a special case and doesn't need
// to be considered here
let cause = match self.control_flow {
ControlFlow::Poll => StartCause::Poll,
ControlFlow::Wait => StartCause::WaitCancelled {
start,
requested_resume: None,
}),
&self.window_target,
&mut control_flow,
);
}
},
ControlFlow::WaitUntil(deadline) => {
let start = Instant::now();
// Compute the amount of time we'll block for.
let duration = if deadline > start && !instant_wakeup {
deadline - start
} else {
Duration::ZERO
};
if let Err(error) = self.loop_dispatch(Some(duration)) {
break error.raw_os_error().unwrap_or(1);
}
let now = Instant::now();
if now < deadline {
callback(
Event::NewEvents(StartCause::WaitCancelled {
if Instant::now() < deadline {
StartCause::WaitCancelled {
start,
requested_resume: Some(deadline),
}),
&self.window_target,
&mut control_flow,
)
}
} else {
callback(
Event::NewEvents(StartCause::ResumeTimeReached {
StartCause::ResumeTimeReached {
start,
requested_resume: deadline,
}),
}
}
}
// This function shouldn't have to handle any requests to exit
// the application (there should be no need to poll for events
// if the application has requested to exit) so we consider
// it a bug in the backend if we ever see `ExitWithCode` here.
ControlFlow::ExitWithCode(_code) => unreachable!(),
};
self.single_iteration(&mut callback, cause);
}
fn single_iteration<F>(&mut self, mut callback: &mut F, cause: StartCause)
where
F: FnMut(Event<'_, T>, &RootEventLoopWindowTarget<T>, &mut ControlFlow),
{
// NOTE currently just indented to simplify the diff
let mut control_flow = self.control_flow;
// We retain these grow-only scratch buffers as part of the EventLoop
// for the sake of avoiding lots of reallocs. We take them here to avoid
// trying to mutably borrow `self` more than once and we swap them back
// when finished.
let mut compositor_updates = std::mem::take(&mut self.compositor_updates);
let mut buffer_sink = std::mem::take(&mut self.buffer_sink);
let mut window_ids = std::mem::take(&mut self.window_ids);
sticky_exit_callback(
Event::NewEvents(cause),
&self.window_target,
&mut control_flow,
)
}
}
callback,
);
// NB: For consistency all platforms must emit a 'resumed' event even though Wayland
// applications don't themselves have a formal suspend/resume lifecycle.
if cause == StartCause::Init {
sticky_exit_callback(
Event::Resumed,
&self.window_target,
&mut control_flow,
callback,
);
}
// Handle pending user events. We don't need back buffer, since we can't dispatch
@ -279,9 +391,7 @@ impl<T: 'static> EventLoop<T> {
}
// Drain the pending compositor updates.
self.with_state(|state| {
compositor_updates.append(&mut state.window_compositor_updates)
});
self.with_state(|state| compositor_updates.append(&mut state.window_compositor_updates));
for mut compositor_update in compositor_updates.drain(..) {
let window_id = compositor_update.window_id;
@ -448,10 +558,11 @@ impl<T: 'static> EventLoop<T> {
&mut control_flow,
&mut callback,
);
};
callback(Event::LoopDestroyed, &self.window_target, &mut control_flow);
exit_code
self.control_flow = control_flow;
std::mem::swap(&mut self.compositor_updates, &mut compositor_updates);
std::mem::swap(&mut self.buffer_sink, &mut buffer_sink);
std::mem::swap(&mut self.window_ids, &mut window_ids);
}
#[inline]

View file

@ -19,13 +19,13 @@ pub(crate) use self::{
pub use self::xdisplay::{XError, XNotSupported};
use calloop::channel::{channel, Channel, Event as ChanResult, Sender};
use calloop::generic::Generic;
use calloop::{Dispatcher, EventLoop as Loop};
use calloop::EventLoop as Loop;
use calloop::{ping::Ping, Readiness};
use std::{
cell::{Cell, RefCell},
collections::{HashMap, HashSet, VecDeque},
collections::{HashMap, HashSet},
ffi::CStr,
fmt,
mem::{self, MaybeUninit},
@ -37,6 +37,7 @@ use std::{
ptr,
rc::Rc,
slice,
sync::mpsc::{Receiver, Sender, TryRecvError},
sync::{mpsc, Arc, Weak},
time::{Duration, Instant},
};
@ -63,11 +64,12 @@ use self::{
};
use super::common::xkb_state::KbdState;
use crate::{
error::OsError as RootOsError,
error::{OsError as RootOsError, RunLoopError},
event::{Event, StartCause},
event_loop::{ControlFlow, DeviceEvents, EventLoopClosed, EventLoopWindowTarget as RootELW},
platform::pump_events::PumpStatus,
platform_impl::{
platform::{sticky_exit_callback, WindowId},
platform::{min_timeout, sticky_exit_callback, WindowId},
PlatformSpecificWindowBuilderAttributes,
},
window::WindowAttributes,
@ -75,6 +77,64 @@ use crate::{
type X11Source = Generic<RawFd>;
struct WakeSender<T> {
sender: Sender<T>,
waker: Ping,
}
impl<T> Clone for WakeSender<T> {
fn clone(&self) -> Self {
Self {
sender: self.sender.clone(),
waker: self.waker.clone(),
}
}
}
impl<T> WakeSender<T> {
pub fn send(&self, t: T) -> Result<(), EventLoopClosed<T>> {
let res = self.sender.send(t).map_err(|e| EventLoopClosed(e.0));
if res.is_ok() {
self.waker.ping();
}
res
}
}
struct PeekableReceiver<T> {
recv: Receiver<T>,
first: Option<T>,
}
impl<T> PeekableReceiver<T> {
pub fn from_recv(recv: Receiver<T>) -> Self {
Self { recv, first: None }
}
pub fn has_incoming(&mut self) -> bool {
if self.first.is_some() {
return true;
}
match self.recv.try_recv() {
Ok(v) => {
self.first = Some(v);
true
}
Err(TryRecvError::Empty) => false,
Err(TryRecvError::Disconnected) => {
warn!("Channel was disconnected when checking incoming");
false
}
}
}
pub fn try_recv(&mut self) -> Result<T, TryRecvError> {
if let Some(first) = self.first.take() {
return Ok(first);
}
self.recv.try_recv()
}
}
pub struct EventLoopWindowTarget<T> {
xconn: Arc<XConnection>,
wm_delete_window: xproto::Atom,
@ -83,40 +143,37 @@ pub struct EventLoopWindowTarget<T> {
root: xproto::Window,
ime: RefCell<Ime>,
windows: RefCell<HashMap<WindowId, Weak<UnownedWindow>>>,
redraw_sender: Sender<WindowId>,
activation_sender: Sender<ActivationToken>,
redraw_sender: WakeSender<WindowId>,
activation_sender: WakeSender<ActivationToken>,
device_events: Cell<DeviceEvents>,
_marker: ::std::marker::PhantomData<T>,
}
pub struct EventLoop<T: 'static> {
event_loop: Loop<'static, EventLoopState<T>>,
loop_running: bool,
control_flow: ControlFlow,
event_loop: Loop<'static, EventLoopState>,
waker: calloop::ping::Ping,
event_processor: EventProcessor<T>,
redraw_receiver: PeekableReceiver<WindowId>,
user_receiver: PeekableReceiver<T>,
activation_receiver: PeekableReceiver<ActivationToken>,
user_sender: Sender<T>,
target: Rc<RootELW<T>>,
/// The current state of the event loop.
state: EventLoopState<T>,
/// Dispatcher for redraw events.
redraw_dispatcher: Dispatcher<'static, Channel<WindowId>, EventLoopState<T>>,
state: EventLoopState,
}
type ActivationToken = (WindowId, crate::event_loop::AsyncRequestSerial);
struct EventLoopState<T> {
/// Incoming user events.
user_events: VecDeque<T>,
/// Incoming redraw events.
redraw_events: VecDeque<WindowId>,
/// Incoming activation tokens.
activation_tokens: VecDeque<ActivationToken>,
struct EventLoopState {
/// The latest readiness state for the x11 file descriptor
x11_readiness: Readiness,
}
pub struct EventLoopProxy<T: 'static> {
user_sender: Sender<T>,
user_sender: WakeSender<T>,
}
impl<T: 'static> Clone for EventLoopProxy<T> {
@ -242,7 +299,7 @@ impl<T: 'static> EventLoop<T> {
// Create an event loop.
let event_loop =
Loop::<EventLoopState<T>>::try_new().expect("Failed to initialize the event loop");
Loop::<EventLoopState>::try_new().expect("Failed to initialize the event loop");
let handle = event_loop.handle();
// Create the X11 event dispatcher.
@ -252,48 +309,29 @@ impl<T: 'static> EventLoop<T> {
calloop::Mode::Level,
);
handle
.insert_source(source, |_, _, _| Ok(calloop::PostAction::Continue))
.insert_source(source, |readiness, _, state| {
state.x11_readiness = readiness;
Ok(calloop::PostAction::Continue)
})
.expect("Failed to register the X11 event dispatcher");
// Create a channel for sending user events.
let (user_sender, user_channel) = channel();
handle
.insert_source(user_channel, |ev, _, state| {
if let ChanResult::Msg(user) = ev {
state.user_events.push_back(user);
}
let (waker, waker_source) =
calloop::ping::make_ping().expect("Failed to create event loop waker");
event_loop
.handle()
.insert_source(waker_source, move |_, _, _| {
// No extra handling is required, we just need to wake-up.
})
.expect("Failed to register the user event channel with the event loop");
.expect("Failed to register the event loop waker source");
// Create a channel for handling redraw requests.
let (redraw_sender, redraw_channel) = channel();
let (redraw_sender, redraw_channel) = mpsc::channel();
// Create a channel for sending activation tokens.
let (activation_token_sender, activation_token_channel) = channel();
let (activation_token_sender, activation_token_channel) = mpsc::channel();
// Create a dispatcher for the redraw channel such that we can dispatch it independent of the
// event loop.
let redraw_dispatcher =
Dispatcher::<_, EventLoopState<T>>::new(redraw_channel, |ev, _, state| {
if let ChanResult::Msg(window_id) = ev {
state.redraw_events.push_back(window_id);
}
});
handle
.register_dispatcher(redraw_dispatcher.clone())
.expect("Failed to register the redraw event channel with the event loop");
// Create a dispatcher for the activation token channel such that we can dispatch it
// independent of the event loop.
let activation_tokens =
Dispatcher::<_, EventLoopState<T>>::new(activation_token_channel, |ev, _, state| {
if let ChanResult::Msg(token) = ev {
state.activation_tokens.push_back(token);
}
});
handle
.register_dispatcher(activation_tokens.clone())
.expect("Failed to register the activation token channel with the event loop");
// Create a channel for sending user events.
let (user_sender, user_channel) = mpsc::channel();
let kb_state =
KbdState::from_x11_xkb(xconn.xcb_connection().get_raw_xcb_connection()).unwrap();
@ -307,8 +345,14 @@ impl<T: 'static> EventLoop<T> {
xconn,
wm_delete_window,
net_wm_ping,
redraw_sender,
activation_sender: activation_token_sender,
redraw_sender: WakeSender {
sender: redraw_sender, // not used again so no clone
waker: waker.clone(),
},
activation_sender: WakeSender {
sender: activation_token_sender, // not used again so no clone
waker: waker.clone(),
},
device_events: Default::default(),
};
@ -359,22 +403,28 @@ impl<T: 'static> EventLoop<T> {
event_processor.init_device(ffi::XIAllDevices);
EventLoop {
loop_running: false,
control_flow: ControlFlow::default(),
event_loop,
waker,
event_processor,
redraw_receiver: PeekableReceiver::from_recv(redraw_channel),
activation_receiver: PeekableReceiver::from_recv(activation_token_channel),
user_receiver: PeekableReceiver::from_recv(user_channel),
user_sender,
target,
redraw_dispatcher,
state: EventLoopState {
user_events: VecDeque::new(),
redraw_events: VecDeque::new(),
activation_tokens: VecDeque::new(),
x11_readiness: Readiness::EMPTY,
},
}
}
pub fn create_proxy(&self) -> EventLoopProxy<T> {
EventLoopProxy {
user_sender: self.user_sender.clone(),
user_sender: WakeSender {
sender: self.user_sender.clone(),
waker: self.waker.clone(),
},
}
}
@ -382,48 +432,220 @@ impl<T: 'static> EventLoop<T> {
&self.target
}
pub fn run_return<F>(&mut self, mut callback: F) -> i32
pub fn run<F>(mut self, callback: F) -> !
where
F: FnMut(Event<'_, T>, &RootELW<T>, &mut ControlFlow),
F: FnMut(Event<'_, T>, &RootELW<T>, &mut ControlFlow) + 'static,
{
struct IterationResult {
deadline: Option<Instant>,
timeout: Option<Duration>,
wait_start: Instant,
let exit_code = match self.run_ondemand(callback) {
Err(RunLoopError::ExitFailure(code)) => code,
Err(_err) => 1,
Ok(_) => 0,
};
::std::process::exit(exit_code)
}
fn single_iteration<T, F>(
this: &mut EventLoop<T>,
control_flow: &mut ControlFlow,
cause: &mut StartCause,
callback: &mut F,
) -> IterationResult
pub fn run_return<F>(&mut self, callback: F) -> i32
where
F: FnMut(Event<'_, T>, &RootELW<T>, &mut ControlFlow),
{
match self.run_ondemand(callback) {
Err(RunLoopError::ExitFailure(code)) => code,
Err(_err) => 1,
Ok(_) => 0,
}
}
pub fn run_ondemand<F>(&mut self, mut event_handler: F) -> Result<(), RunLoopError>
where
F: FnMut(Event<'_, T>, &RootELW<T>, &mut ControlFlow),
{
if self.loop_running {
return Err(RunLoopError::AlreadyRunning);
}
loop {
match self.pump_events_with_timeout(None, &mut event_handler) {
PumpStatus::Exit(0) => {
break Ok(());
}
PumpStatus::Exit(code) => {
break Err(RunLoopError::ExitFailure(code));
}
_ => {
continue;
}
}
}
}
pub fn pump_events<F>(&mut self, event_handler: F) -> PumpStatus
where
F: FnMut(Event<'_, T>, &RootELW<T>, &mut ControlFlow),
{
self.pump_events_with_timeout(Some(Duration::ZERO), event_handler)
}
fn pump_events_with_timeout<F>(
&mut self,
timeout: Option<Duration>,
mut callback: F,
) -> PumpStatus
where
F: FnMut(Event<'_, T>, &RootELW<T>, &mut ControlFlow),
{
if !self.loop_running {
self.loop_running = true;
// Reset the internal state for the loop as we start running to
// ensure consistent behaviour in case the loop runs and exits more
// than once.
self.control_flow = ControlFlow::Poll;
// run the initial loop iteration
self.single_iteration(&mut callback, StartCause::Init);
}
// Consider the possibility that the `StartCause::Init` iteration could
// request to Exit.
if !matches!(self.control_flow, ControlFlow::ExitWithCode(_)) {
self.poll_events_with_timeout(timeout, &mut callback);
}
if let ControlFlow::ExitWithCode(code) = self.control_flow {
self.loop_running = false;
let mut dummy = self.control_flow;
sticky_exit_callback(
crate::event::Event::NewEvents(*cause),
&this.target,
control_flow,
Event::LoopDestroyed,
self.window_target(),
&mut dummy,
&mut callback,
);
PumpStatus::Exit(code)
} else {
PumpStatus::Continue
}
}
fn has_pending(&mut self) -> bool {
self.event_processor.poll()
|| self.user_receiver.has_incoming()
|| self.redraw_receiver.has_incoming()
}
pub fn poll_events_with_timeout<F>(&mut self, mut timeout: Option<Duration>, mut callback: F)
where
F: FnMut(Event<'_, T>, &RootELW<T>, &mut ControlFlow),
{
let start = Instant::now();
let has_pending = self.has_pending();
timeout = if has_pending {
// If we already have work to do then we don't want to block on the next poll.
Some(Duration::ZERO)
} else {
let control_flow_timeout = match self.control_flow {
ControlFlow::Wait => None,
ControlFlow::Poll => Some(Duration::ZERO),
ControlFlow::WaitUntil(wait_deadline) => {
Some(wait_deadline.saturating_duration_since(start))
}
// This function shouldn't have to handle any requests to exit
// the application (there should be no need to poll for events
// if the application has requested to exit) so we consider
// it a bug in the backend if we ever see `ExitWithCode` here.
ControlFlow::ExitWithCode(_code) => unreachable!(),
};
min_timeout(control_flow_timeout, timeout)
};
self.state.x11_readiness = Readiness::EMPTY;
if let Err(error) = self
.event_loop
.dispatch(timeout, &mut self.state)
.map_err(std::io::Error::from)
{
log::error!("Failed to poll for events: {error:?}");
let exit_code = error.raw_os_error().unwrap_or(1);
self.control_flow = ControlFlow::ExitWithCode(exit_code);
return;
}
// False positive / spurious wake ups could lead to us spamming
// redundant iterations of the event loop with no new events to
// dispatch.
//
// If there's no readable event source then we just double check if we
// have any pending `_receiver` events and if not we return without
// running a loop iteration.
// If we don't have any pending `_receiver`
if !self.has_pending() && !self.state.x11_readiness.readable {
return;
}
// NB: `StartCause::Init` is handled as a special case and doesn't need
// to be considered here
let cause = match self.control_flow {
ControlFlow::Poll => StartCause::Poll,
ControlFlow::Wait => StartCause::WaitCancelled {
start,
requested_resume: None,
},
ControlFlow::WaitUntil(deadline) => {
if Instant::now() < deadline {
StartCause::WaitCancelled {
start,
requested_resume: Some(deadline),
}
} else {
StartCause::ResumeTimeReached {
start,
requested_resume: deadline,
}
}
}
// This function shouldn't have to handle any requests to exit
// the application (there should be no need to poll for events
// if the application has requested to exit) so we consider
// it a bug in the backend if we ever see `ExitWithCode` here.
ControlFlow::ExitWithCode(_code) => unreachable!(),
};
self.single_iteration(&mut callback, cause);
}
fn single_iteration<F>(&mut self, callback: &mut F, cause: StartCause)
where
F: FnMut(Event<'_, T>, &RootELW<T>, &mut ControlFlow),
{
let mut control_flow = self.control_flow;
sticky_exit_callback(
crate::event::Event::NewEvents(cause),
&self.target,
&mut control_flow,
callback,
);
// NB: For consistency all platforms must emit a 'resumed' event even though X11
// applications don't themselves have a formal suspend/resume lifecycle.
if *cause == StartCause::Init {
if cause == StartCause::Init {
sticky_exit_callback(
crate::event::Event::Resumed,
&this.target,
control_flow,
&self.target,
&mut control_flow,
callback,
);
}
// Process all pending events
this.drain_events(callback, control_flow);
self.drain_events(callback, &mut control_flow);
// Empty activation tokens.
while let Some((window_id, serial)) = this.state.activation_tokens.pop_front() {
let token = this
while let Ok((window_id, serial)) = self.activation_receiver.try_recv() {
let token = self
.event_processor
.with_window(window_id.0 as xproto::Window, |window| {
window.generate_activation_token()
@ -438,8 +660,8 @@ impl<T: 'static> EventLoop<T> {
token: crate::window::ActivationToken::_new(token),
},
},
&this.target,
control_flow,
&self.target,
&mut control_flow,
callback,
),
Some(Err(e)) => {
@ -451,11 +673,11 @@ impl<T: 'static> EventLoop<T> {
// Empty the user event buffer
{
while let Some(event) = this.state.user_events.pop_front() {
while let Ok(event) = self.user_receiver.try_recv() {
sticky_exit_callback(
crate::event::Event::UserEvent(event),
&this.target,
control_flow,
&self.target,
&mut control_flow,
callback,
);
}
@ -464,24 +686,16 @@ impl<T: 'static> EventLoop<T> {
{
sticky_exit_callback(
crate::event::Event::MainEventsCleared,
&this.target,
control_flow,
&self.target,
&mut control_flow,
callback,
);
}
// Quickly dispatch all redraw events to avoid buffering them.
while let Ok(event) = this.redraw_dispatcher.as_source_mut().try_recv() {
this.state.redraw_events.push_back(event);
}
// Empty the redraw requests
{
let mut windows = HashSet::new();
// Empty the channel.
while let Some(window_id) = this.state.redraw_events.pop_front() {
while let Ok(window_id) = self.redraw_receiver.try_recv() {
windows.insert(window_id);
}
@ -489,8 +703,8 @@ impl<T: 'static> EventLoop<T> {
let window_id = crate::window::WindowId(window_id);
sticky_exit_callback(
Event::RedrawRequested(window_id),
&this.target,
control_flow,
&self.target,
&mut control_flow,
callback,
);
}
@ -499,119 +713,13 @@ impl<T: 'static> EventLoop<T> {
{
sticky_exit_callback(
crate::event::Event::RedrawEventsCleared,
&this.target,
control_flow,
&self.target,
&mut control_flow,
callback,
);
}
let start = Instant::now();
let (deadline, timeout);
match control_flow {
ControlFlow::ExitWithCode(_) => {
return IterationResult {
wait_start: start,
deadline: None,
timeout: None,
};
}
ControlFlow::Poll => {
*cause = StartCause::Poll;
deadline = None;
timeout = Some(Duration::from_millis(0));
}
ControlFlow::Wait => {
*cause = StartCause::WaitCancelled {
start,
requested_resume: None,
};
deadline = None;
timeout = None;
}
ControlFlow::WaitUntil(wait_deadline) => {
*cause = StartCause::ResumeTimeReached {
start,
requested_resume: *wait_deadline,
};
timeout = if *wait_deadline > start {
Some(*wait_deadline - start)
} else {
Some(Duration::from_millis(0))
};
deadline = Some(*wait_deadline);
}
}
IterationResult {
wait_start: start,
deadline,
timeout,
}
}
let mut control_flow = ControlFlow::default();
let mut cause = StartCause::Init;
// run the initial loop iteration
let mut iter_result = single_iteration(self, &mut control_flow, &mut cause, &mut callback);
let exit_code = loop {
if let ControlFlow::ExitWithCode(code) = control_flow {
break code;
}
let has_pending = self.event_processor.poll()
|| !self.state.user_events.is_empty()
|| !self.state.redraw_events.is_empty();
if !has_pending {
// Wait until
if let Err(error) = self
.event_loop
.dispatch(iter_result.timeout, &mut self.state)
.map_err(std::io::Error::from)
{
break error.raw_os_error().unwrap_or(1);
}
if control_flow == ControlFlow::Wait {
// We don't go straight into executing the event loop iteration, we instead go
// to the start of this loop and check again if there's any pending event. We
// must do this because during the execution of the iteration we sometimes wake
// the calloop waker, and if the waker is already awaken before we call poll(),
// then poll doesn't block, but it returns immediately. This caused the event
// loop to run continuously even if the control_flow was `Wait`
continue;
}
}
let wait_cancelled = iter_result
.deadline
.map_or(false, |deadline| Instant::now() < deadline);
if wait_cancelled {
cause = StartCause::WaitCancelled {
start: iter_result.wait_start,
requested_resume: iter_result.deadline,
};
}
iter_result = single_iteration(self, &mut control_flow, &mut cause, &mut callback);
};
callback(
crate::event::Event::LoopDestroyed,
&self.target,
&mut control_flow,
);
exit_code
}
pub fn run<F>(mut self, callback: F) -> !
where
F: 'static + FnMut(Event<'_, T>, &RootELW<T>, &mut ControlFlow),
{
let exit_code = self.run_return(callback);
::std::process::exit(exit_code);
self.control_flow = control_flow;
}
fn drain_events<F>(&mut self, callback: &mut F, control_flow: &mut ControlFlow)

View file

@ -23,7 +23,7 @@ use crate::{
error::{ExternalError, NotSupportedError, OsError as RootOsError},
event_loop::AsyncRequestSerial,
platform_impl::{
x11::{atoms::*, MonitorHandle as X11MonitorHandle, X11Error},
x11::{atoms::*, MonitorHandle as X11MonitorHandle, WakeSender, X11Error},
Fullscreen, MonitorHandle as PlatformMonitorHandle, OsError,
PlatformSpecificWindowBuilderAttributes, VideoMode as PlatformVideoMode,
},
@ -37,7 +37,6 @@ use super::{
ffi, util, CookieResultExt, EventLoopWindowTarget, ImeRequest, ImeSender, VoidCookie, WindowId,
XConnection,
};
use calloop::channel::Sender;
#[derive(Debug)]
pub struct SharedState {
@ -122,8 +121,8 @@ pub(crate) struct UnownedWindow {
cursor_visible: Mutex<bool>,
ime_sender: Mutex<ImeSender>,
pub shared_state: Mutex<SharedState>,
redraw_sender: Sender<WindowId>,
activation_sender: Sender<super::ActivationToken>,
redraw_sender: WakeSender<WindowId>,
activation_sender: WakeSender<super::ActivationToken>,
}
impl UnownedWindow {