Skip to content

Commit

Permalink
Test
Browse files Browse the repository at this point in the history
  • Loading branch information
zyma98 committed Jun 22, 2024
1 parent 2e492c4 commit f6224f6
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 58 deletions.
58 changes: 6 additions & 52 deletions src/sync/mailbox.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,13 @@ struct Inner {
count: AtomicUsize,
pending_count: AtomicUsize,
wait_task: Spin<Option<Arc<Task>>>,
has_timeout: AtomicBool,
task_notified: AtomicBool,
}

struct InnerFullAccessor<'a> {
count: &'a AtomicUsize,
pending_count: &'a AtomicUsize,
wait_task: &'a Spin<Option<Arc<Task>>>,
has_timeout: &'a AtomicBool,
task_notified: &'a AtomicBool,
}

Expand All @@ -35,7 +33,6 @@ impl<'a> AllowPendOp<'a> for Inner {
count: &self.count,
pending_count: &self.pending_count,
wait_task: &self.wait_task,
has_timeout: &self.has_timeout,
task_notified: &self.task_notified,
}
}
Expand All @@ -53,19 +50,9 @@ impl<'a> RunPendedOp for InnerFullAccessor<'a> {
self.count.fetch_add(pending_count, Ordering::SeqCst);

if let Some(wait_task) = self.wait_task.lock_now_or_die().take() {
let has_timeout = self.has_timeout.load(Ordering::SeqCst);
if has_timeout {
let removed = time::remove_task_from_sleep_queue(&wait_task);
if removed {
schedule::make_task_ready_and_enqueue(wait_task);
self.count.fetch_sub(1, Ordering::SeqCst);
self.task_notified.store(true, Ordering::SeqCst);
}
} else {
schedule::make_task_ready_and_enqueue(wait_task);
self.count.fetch_sub(1, Ordering::SeqCst);
self.task_notified.store(true, Ordering::SeqCst);
}
time::remove_task_from_sleep_queue_allow_isr(wait_task);
self.count.fetch_sub(1, Ordering::SeqCst);
self.task_notified.store(true, Ordering::SeqCst);
}
}
}
Expand All @@ -76,7 +63,6 @@ impl Inner {
count: AtomicUsize::new(0),
pending_count: AtomicUsize::new(0),
wait_task: Spin::new(None),
has_timeout: AtomicBool::new(false),
task_notified: AtomicBool::new(false),
}
}
Expand All @@ -92,27 +78,7 @@ impl Mailbox {
pub fn wait(&self) {
die_if_in_isr();

let should_block = self.inner.lock().must_with_full_access(|full_access| {
if full_access.count.load(Ordering::SeqCst) > 0 {
full_access.count.fetch_sub(1, Ordering::SeqCst);
return false;
}

full_access.has_timeout.store(false, Ordering::SeqCst);
full_access.task_notified.store(false, Ordering::SeqCst);

schedule::with_current_task_arc(|cur_task| {
schedule::set_task_state_block(&cur_task);
let mut locked_wait_task = full_access.wait_task.lock_now_or_die();
*locked_wait_task = Some(cur_task);
});

true
});

if should_block {
svc::svc_block_current_task();
}
self.wait_until_timeout(100_000_000);
}

pub fn wait_until_timeout(&self, timeout_ms: u32) -> bool {
Expand All @@ -124,7 +90,6 @@ impl Mailbox {
return false;
}

full_access.has_timeout.store(true, Ordering::SeqCst);
full_access.task_notified.store(false, Ordering::SeqCst);

schedule::with_current_task_arc(|cur_task| {
Expand Down Expand Up @@ -153,19 +118,8 @@ impl Mailbox {
self.inner.lock().with_access(|access| match access {
Access::Full { full_access } => match full_access.wait_task.lock_now_or_die().take() {
Some(wait_task) => {
let has_timeout = full_access.has_timeout.load(Ordering::SeqCst);
if has_timeout {
let removed = time::remove_task_from_sleep_queue(&wait_task);
if removed {
schedule::make_task_ready_and_enqueue(wait_task);
full_access.task_notified.store(true, Ordering::SeqCst);
} else {
full_access.count.fetch_add(1, Ordering::SeqCst);
}
} else {
schedule::make_task_ready_and_enqueue(wait_task);
full_access.task_notified.store(true, Ordering::SeqCst);
}
time::remove_task_from_sleep_queue_allow_isr(wait_task);
full_access.task_notified.store(true, Ordering::SeqCst);
}
None => {
full_access.count.fetch_add(1, Ordering::SeqCst);
Expand Down
35 changes: 29 additions & 6 deletions src/time/mod.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
use crate::{
config,
interrupt::svc,
schedule,
sync::{Access, AllowPendOp, Interruptable, RefCellSchedSafe, RunPendedOp, Spin},
Expand All @@ -7,17 +8,22 @@ use crate::{
};
use alloc::sync::Arc;
use core::sync::atomic::{AtomicBool, AtomicU32, Ordering};
use heapless::mpmc::MpMcQueue;
use intrusive_collections::LinkedList;

struct Inner {
time_sorted_queue: Spin<LinkedList<TaskListAdapter>>,
delete_buffer: DeleteBuffer,
time_to_wakeup: AtomicBool,
}

type DeleteBuffer = MpMcQueue<Arc<Task>, { config::MAX_TASK_NUMBER }>;

impl Inner {
const fn new() -> Self {
Self {
time_sorted_queue: Spin::new(LinkedList::new(TaskListAdapter::NEW)),
delete_buffer: DeleteBuffer::new(),
time_to_wakeup: AtomicBool::new(false),
}
}
Expand All @@ -27,10 +33,12 @@ type SleepQueue = RefCellSchedSafe<Interruptable<Inner>>;

struct InnerFullAccessor<'a> {
time_sorted_queue: &'a Spin<LinkedList<TaskListAdapter>>,
delete_buffer: &'a DeleteBuffer,
time_to_wakeup: &'a AtomicBool,
}

struct InnerPendAccessor<'a> {
delete_buffer: &'a DeleteBuffer,
time_to_wakeup: &'a AtomicBool,
}

Expand All @@ -41,11 +49,13 @@ impl<'a> AllowPendOp<'a> for Inner {
fn full_access(&'a self) -> InnerFullAccessor<'a> {
InnerFullAccessor {
time_sorted_queue: &self.time_sorted_queue,
delete_buffer: &self.delete_buffer,
time_to_wakeup: &self.time_to_wakeup,
}
}
fn pend_only_access(&'a self) -> InnerPendAccessor<'a> {
InnerPendAccessor {
delete_buffer: &self.delete_buffer,
time_to_wakeup: &self.time_to_wakeup,
}
}
Expand All @@ -66,6 +76,13 @@ impl<'a> InnerFullAccessor<'a> {
break;
}
}

while let Some(task) = self.delete_buffer.dequeue() {
let removed = locked_queue.remove_task(&task);
if removed {
schedule::make_task_ready_and_enqueue(task);
}
}
}
}

Expand Down Expand Up @@ -139,13 +156,19 @@ pub(crate) fn add_task_to_sleep_queue(task: Arc<Task>, wake_at_tick: u32) {
});
}

pub(crate) fn remove_task_from_sleep_queue(task: &Task) -> bool {
SLEEP_TASK_QUEUE
.lock()
.must_with_full_access(|full_access| {
pub(crate) fn remove_task_from_sleep_queue_allow_isr(task: Arc<Task>) {
SLEEP_TASK_QUEUE.lock().with_access(|access| match access {
Access::Full { full_access } => {
let mut locked_queue = full_access.time_sorted_queue.lock_now_or_die();
locked_queue.remove_task(task)
})
let removed = locked_queue.remove_task(&task);
if removed {
schedule::make_task_ready_and_enqueue(task);
}
}
Access::PendOnly { pend_access } => {
pend_access.delete_buffer.enqueue(task).unwrap_or_die();
}
});
}

/// A time-based task barrier that allow a task to proceed at a given interval.
Expand Down

0 comments on commit f6224f6

Please sign in to comment.