Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions applications/tests/test_multi_prioritized_scheduler/Cargo.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
[package]
name = "test_multi_prioritized_scheduler"
version = "0.1.0"
edition = "2021"

[dependencies]
log = "0.4"

[dependencies.awkernel_async_lib]
path = "../../../awkernel_async_lib"
default-features = false

[dependencies.awkernel_lib]
path = "../../../awkernel_lib"
default-features = false
51 changes: 51 additions & 0 deletions applications/tests/test_multi_prioritized_scheduler/src/lib.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
#![no_std]

extern crate alloc;

use awkernel_async_lib::{scheduler::SchedulerType, spawn};
use awkernel_lib::{cpu::num_cpu, delay::wait_millisec};

pub async fn run() {
wait_millisec(1000);
for i in 1..num_cpu() {
let sched_type = if i % 3 == 0 {
SchedulerType::PrioritizedFIFO(0)
} else if i % 3 == 1 {
SchedulerType::PrioritizedRR(0)
} else {
SchedulerType::GEDF(1000)
};

spawn(
"low_priority".into(),
async move {
log::debug!("low priority task {i} started. sched_type = {sched_type:?}");
wait_millisec(1000);
log::debug!("low priority task {i} finished. sched_type = {sched_type:?}");
},
sched_type,
)
.await;
}

for i in 1..num_cpu() {
let sched_type = if i % 3 == 0 {
SchedulerType::PrioritizedFIFO(1)
} else if i % 3 == 1 {
SchedulerType::PrioritizedRR(1)
} else {
SchedulerType::GEDF(500)
};

spawn(
"high_priority".into(),
async move {
log::debug!("high priority task {i} started. sched_type = {sched_type:?}");
wait_millisec(100);
log::debug!("high priority task {i} finished. sched_type = {sched_type:?}");
},
sched_type,
)
.await;
}
}
8 changes: 8 additions & 0 deletions awkernel_async_lib/src/scheduler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,11 @@ static PRIORITY_LIST: [SchedulerType; 4] = [
SchedulerType::Panicked,
];

/// For exclusion execution of `wake_task` and `get_next` across all schedulers.
/// In order to resolve priority inversion in multiple priority-based schedulers,
/// the decision to preempt, dequeuing, enqueuing, and updating of RUNNING must be executed exclusively.
static GLOBAL_WAKE_GET_MUTEX: Mutex<()> = Mutex::new(());

pub(crate) trait Scheduler {
/// Enqueue an executable task.
/// The enqueued task will be taken by `get_next()`.
Expand All @@ -134,6 +139,9 @@ pub(crate) trait Scheduler {
/// Get the next executable task.
#[inline]
pub(crate) fn get_next_task(execution_ensured: bool) -> Option<Arc<Task>> {
let mut node = MCSNode::new();
let _guard = GLOBAL_WAKE_GET_MUTEX.lock(&mut node);

let task = PRIORITY_LIST
.iter()
.find_map(|&scheduler_type| get_scheduler(scheduler_type).get_next(execution_ensured));
Expand Down
13 changes: 6 additions & 7 deletions awkernel_async_lib/src/scheduler/gedf.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ use core::cmp::max;
use super::{Scheduler, SchedulerType, Task};
use crate::{
dag::{get_dag, get_dag_absolute_deadline, set_dag_absolute_deadline, to_node_index},
scheduler::GLOBAL_WAKE_GET_MUTEX,
scheduler::{get_priority, peek_preemption_pending, push_preemption_pending},
task::{
get_task, get_tasks_running, set_current_task, set_need_preemption, DagInfo, State,
Expand Down Expand Up @@ -62,13 +63,6 @@ impl GEDFData {

impl Scheduler for GEDFScheduler {
fn wake_task(&self, task: Arc<Task>) {
let mut node = MCSNode::new();
// The reason for acquiring this lock before invoke_preemption() is to prevent priority inversion from occurring
// when invoke_preemption() is executed between the time the next task is determined and the RUNNING is updated
// within the scheduler's get_next().
let mut data = self.data.lock(&mut node);
let internal_data = data.get_or_insert_with(GEDFData::new);

let (wake_time, absolute_deadline) = {
let mut node_inner = MCSNode::new();
let mut info = task.info.lock(&mut node_inner);
Expand All @@ -94,7 +88,12 @@ impl Scheduler for GEDFScheduler {
}
};

let mut node = MCSNode::new();
let _guard = GLOBAL_WAKE_GET_MUTEX.lock(&mut node);
if !self.invoke_preemption(task.clone()) {
let mut node_inner = MCSNode::new();
let mut data = self.data.lock(&mut node_inner);
let internal_data = data.get_or_insert_with(GEDFData::new);
internal_data.queue.push(GEDFTask {
task: task.clone(),
absolute_deadline,
Expand Down
13 changes: 6 additions & 7 deletions awkernel_async_lib/src/scheduler/prioritized_fifo.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
use core::cmp::max;

use super::{Scheduler, SchedulerType, Task};
use crate::scheduler::{peek_preemption_pending, push_preemption_pending};
use crate::scheduler::{peek_preemption_pending, push_preemption_pending, GLOBAL_WAKE_GET_MUTEX};
use crate::task::{get_task, get_tasks_running, set_current_task, set_need_preemption};
use crate::{scheduler::get_priority, task::State};
use alloc::sync::Arc;
Expand Down Expand Up @@ -35,12 +35,6 @@ impl PrioritizedFIFOData {

impl Scheduler for PrioritizedFIFOScheduler {
fn wake_task(&self, task: Arc<Task>) {
let mut node = MCSNode::new();
// The reason for acquiring this lock before invoke_preemption() is to prevent priority inversion from occurring
// when invoke_preemption() is executed between the time the next task is determined and the RUNNING is updated
// within the scheduler's get_next().
let mut data = self.data.lock(&mut node);
let internal_data = data.get_or_insert_with(PrioritizedFIFOData::new);
let priority = {
let mut node_inner = MCSNode::new();
let info = task.info.lock(&mut node_inner);
Expand All @@ -50,7 +44,12 @@ impl Scheduler for PrioritizedFIFOScheduler {
}
};

let mut node = MCSNode::new();
let _guard = GLOBAL_WAKE_GET_MUTEX.lock(&mut node);
if !self.invoke_preemption(task.clone()) {
let mut node_inner = MCSNode::new();
let mut data = self.data.lock(&mut node_inner);
let internal_data = data.get_or_insert_with(PrioritizedFIFOData::new);
internal_data.queue.push(
priority,
PrioritizedFIFOTask {
Expand Down
16 changes: 9 additions & 7 deletions awkernel_async_lib/src/scheduler/prioritized_rr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,10 @@ use core::cmp::max;

use super::{Scheduler, SchedulerType, Task};
use crate::{
scheduler::{get_next_task, get_priority, peek_preemption_pending, push_preemption_pending},
scheduler::{
get_next_task, get_priority, peek_preemption_pending, push_preemption_pending,
GLOBAL_WAKE_GET_MUTEX,
},
task::{
get_last_executed_by_task_id, get_task, get_tasks_running, set_current_task,
set_need_preemption, State,
Expand Down Expand Up @@ -40,12 +43,6 @@ impl PrioritizedRRData {

impl Scheduler for PrioritizedRRScheduler {
fn wake_task(&self, task: Arc<Task>) {
let mut node = MCSNode::new();
// The reason for acquiring this lock before invoke_preemption() is to prevent priority inversion from occurring
// when invoke_preemption() is executed between the time the next task is determined and the RUNNING is updated
// within the scheduler's get_next().
let mut data = self.data.lock(&mut node);
let internal_data = data.get_or_insert_with(PrioritizedRRData::new);
let priority = {
let mut node_inner = MCSNode::new();
let info = task.info.lock(&mut node_inner);
Expand All @@ -55,7 +52,12 @@ impl Scheduler for PrioritizedRRScheduler {
}
};

let mut node = MCSNode::new();
let _guard = GLOBAL_WAKE_GET_MUTEX.lock(&mut node);
if !self.invoke_preemption_wake(task.clone()) {
let mut node_inner = MCSNode::new();
let mut data = self.data.lock(&mut node_inner);
let internal_data = data.get_or_insert_with(PrioritizedRRData::new);
internal_data.queue.push(
priority,
PrioritizedRRTask {
Expand Down
Loading