Files
ab_glyph_rasterizer
addr2line
adler
andrew
approx
arrayvec
ash
atom
backtrace
bitflags
byteorder
calloop
cfg_if
colorful
conrod_core
conrod_derive
conrod_example_shared
conrod_gfx
conrod_glium
conrod_piston
conrod_rendy
conrod_vulkano
conrod_wgpu
conrod_winit
copyless
copypasta
crossbeam
crossbeam_channel
crossbeam_deque
crossbeam_epoch
crossbeam_queue
crossbeam_utils
daggy
dlib
downcast_rs
draw_state
either
fixedbitset
float
fnv
futures
futures_channel
futures_core
futures_executor
futures_io
futures_macro
futures_sink
futures_task
futures_util
async_await
future
io
lock
sink
stream
task
fxhash
getrandom
gfx
gfx_backend_empty
gfx_backend_vulkan
gfx_core
gfx_descriptor
gfx_hal
gfx_memory
gimli
glium
glutin
glutin_egl_sys
glutin_glx_sys
graphics
half
hibitset
inplace_it
input
instant
interpolation
iovec
itoa
lazy_static
lazycell
libc
libloading
line_drawing
linked_hash_map
lock_api
log
maybe_uninit
memchr
memmap
memoffset
miniz_oxide
mio
mio_extras
naga
net2
nix
nom
num
num_bigint
num_complex
num_cpus
num_integer
num_iter
num_rational
num_traits
object
once_cell
ordered_float
ordermap
osmesa_sys
owned_ttf_parser
parking_lot
parking_lot_core
percent_encoding
petgraph
pin_project
pin_project_internal
pin_project_lite
pin_utils
ppv_lite86
proc_macro2
proc_macro_hack
proc_macro_nested
quote
rand
rand_chacha
rand_core
raw_window_handle
read_color
relevant
rendy
rendy_chain
rendy_command
rendy_core
rendy_descriptor
rendy_factory
rendy_frame
rendy_graph
rendy_init
rendy_memory
rendy_mesh
rendy_resource
rendy_shader
rendy_texture
rendy_wsi
rustc_demangle
rustc_hash
rusttype
ryu
same_file
scoped_tls
scopeguard
serde
serde_derive
serde_json
shaderc
shaderc_sys
shared_library
slab
smallvec
smithay_client_toolkit
smithay_clipboard
spirv_headers
stb_truetype
syn
takeable_option
texture
thiserror
thiserror_impl
thread_profiler
time
tracing
tracing_core
ttf_parser
typed_arena
unicode_xid
vecmath
viewport
vk_sys
void
vulkano
buffer
command_buffer
descriptor
device
framebuffer
image
instance
memory
pipeline
query
swapchain
sync
vulkano_shaders
walkdir
wayland_client
wayland_commons
wayland_cursor
wayland_egl
wayland_protocols
wayland_sys
wgpu
wgpu_core
wgpu_types
winit
x11
x11_clipboard
x11_dl
xcb
xcursor
xdg
xml
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
use crate::task::AtomicWaker;
use core::cell::UnsafeCell;
use core::ptr;
use core::sync::atomic::AtomicPtr;
use core::sync::atomic::Ordering::{Relaxed, Acquire, Release, AcqRel};
use alloc::sync::Arc;

use super::abort::abort;
use super::task::Task;

pub(super) enum Dequeue<Fut> {
    Data(*const Task<Fut>),
    Empty,
    Inconsistent,
}

pub(super) struct ReadyToRunQueue<Fut> {
    // The waker of the task using `FuturesUnordered`.
    pub(super) waker: AtomicWaker,

    // Head/tail of the readiness queue
    pub(super) head: AtomicPtr<Task<Fut>>,
    pub(super) tail: UnsafeCell<*const Task<Fut>>,
    pub(super) stub: Arc<Task<Fut>>,
}

/// An MPSC queue into which the tasks containing the futures are inserted
/// whenever the future inside is scheduled for polling.
impl<Fut> ReadyToRunQueue<Fut> {
    /// The enqueue function from the 1024cores intrusive MPSC queue algorithm.
    pub(super) fn enqueue(&self, task: *const Task<Fut>) {
        unsafe {
            debug_assert!((*task).queued.load(Relaxed));

            // This action does not require any coordination
            (*task).next_ready_to_run.store(ptr::null_mut(), Relaxed);

            // Note that these atomic orderings come from 1024cores
            let task = task as *mut _;
            let prev = self.head.swap(task, AcqRel);
            (*prev).next_ready_to_run.store(task, Release);
        }
    }

    /// The dequeue function from the 1024cores intrusive MPSC queue algorithm
    ///
    /// Note that this is unsafe as it required mutual exclusion (only one
    /// thread can call this) to be guaranteed elsewhere.
    pub(super) unsafe fn dequeue(&self) -> Dequeue<Fut> {
        let mut tail = *self.tail.get();
        let mut next = (*tail).next_ready_to_run.load(Acquire);

        if tail == self.stub() {
            if next.is_null() {
                return Dequeue::Empty;
            }

            *self.tail.get() = next;
            tail = next;
            next = (*next).next_ready_to_run.load(Acquire);
        }

        if !next.is_null() {
            *self.tail.get() = next;
            debug_assert!(tail != self.stub());
            return Dequeue::Data(tail);
        }

        if self.head.load(Acquire) as *const _ != tail {
            return Dequeue::Inconsistent;
        }

        self.enqueue(self.stub());

        next = (*tail).next_ready_to_run.load(Acquire);

        if !next.is_null() {
            *self.tail.get() = next;
            return Dequeue::Data(tail);
        }

        Dequeue::Inconsistent
    }

    pub(super) fn stub(&self) -> *const Task<Fut> {
        &*self.stub
    }
}

impl<Fut> Drop for ReadyToRunQueue<Fut> {
    fn drop(&mut self) {
        // Once we're in the destructor for `Inner<Fut>` we need to clear out
        // the ready to run queue of tasks if there's anything left in there.
        //
        // Note that each task has a strong reference count associated with it
        // which is owned by the ready to run queue. All tasks should have had
        // their futures dropped already by the `FuturesUnordered` destructor
        // above, so we're just pulling out tasks and dropping their refcounts.
        unsafe {
            loop {
                match self.dequeue() {
                    Dequeue::Empty => break,
                    Dequeue::Inconsistent => abort("inconsistent in drop"),
                    Dequeue::Data(ptr) => drop(Arc::from_raw(ptr)),
                }
            }
        }
    }
}