use alloc::{boxed::Box, string::String, sync::Arc};
#[cfg(feature = "monolithic")]
use axconfig::SMP;
#[cfg(feature = "monolithic")]
use axhal::KERNEL_PROCESS_ID;
use core::ops::Deref;
use core::sync::atomic::{AtomicBool, AtomicI32, AtomicU64, AtomicU8, Ordering};
use core::{alloc::Layout, cell::UnsafeCell, fmt, ptr::NonNull};
#[cfg(feature = "preempt")]
use core::sync::atomic::AtomicUsize;
#[cfg(feature = "tls")]
use axhal::tls::TlsArea;
use axhal::arch::TaskContext;
use memory_addr::{align_up_4k, VirtAddr};
#[cfg(feature = "monolithic")]
use axhal::arch::TrapFrame;
use crate::stat::TimeStat;
use crate::{AxRunQueue, AxTask, AxTaskRef, WaitQueue};
#[allow(unused)]
use crate_interface::call_interface;
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub struct TaskId(u64);
#[repr(u8)]
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
#[allow(missing_docs)]
pub enum TaskState {
Running = 1,
Ready = 2,
Blocked = 3,
Exited = 4,
}
#[derive(PartialEq, Eq, Clone, Copy)]
#[allow(non_camel_case_types)]
pub enum SchedPolicy {
SCHED_OTHER = 0,
SCHED_FIFO = 1,
SCHED_RR = 2,
SCHED_BATCH = 3,
SCHED_IDLE = 5,
SCHED_UNKNOWN,
}
impl From<usize> for SchedPolicy {
#[inline]
fn from(policy: usize) -> Self {
match policy {
0 => SchedPolicy::SCHED_OTHER,
1 => SchedPolicy::SCHED_FIFO,
2 => SchedPolicy::SCHED_RR,
3 => SchedPolicy::SCHED_BATCH,
5 => SchedPolicy::SCHED_IDLE,
_ => SchedPolicy::SCHED_UNKNOWN,
}
}
}
impl From<SchedPolicy> for isize {
#[inline]
fn from(policy: SchedPolicy) -> Self {
match policy {
SchedPolicy::SCHED_OTHER => 0,
SchedPolicy::SCHED_FIFO => 1,
SchedPolicy::SCHED_RR => 2,
SchedPolicy::SCHED_BATCH => 3,
SchedPolicy::SCHED_IDLE => 5,
SchedPolicy::SCHED_UNKNOWN => -1,
}
}
}
#[derive(Clone, Copy)]
pub struct SchedStatus {
pub policy: SchedPolicy,
pub priority: usize,
}
pub struct TaskInner {
id: TaskId,
name: UnsafeCell<String>,
is_idle: bool,
is_init: bool,
entry: Option<*mut dyn FnOnce()>,
state: AtomicU8,
in_wait_queue: AtomicBool,
#[cfg(feature = "irq")]
in_timer_list: AtomicBool,
#[cfg(feature = "preempt")]
need_resched: AtomicBool,
#[cfg(feature = "preempt")]
preempt_disable_count: AtomicUsize,
exit_code: AtomicI32,
wait_for_exit: WaitQueue,
kstack: Option<TaskStack>,
ctx: UnsafeCell<TaskContext>,
#[cfg(feature = "tls")]
tls: TlsArea,
#[cfg(feature = "monolithic")]
process_id: AtomicU64,
#[cfg(feature = "monolithic")]
is_leader: AtomicBool,
#[cfg(feature = "monolithic")]
pub trap_frame: UnsafeCell<TrapFrame>,
#[cfg(feature = "monolithic")]
pub page_table_token: UnsafeCell<usize>,
#[cfg(feature = "monolithic")]
set_child_tid: AtomicU64,
#[cfg(feature = "monolithic")]
clear_child_tid: AtomicU64,
#[allow(unused)]
time: UnsafeCell<TimeStat>,
#[cfg(feature = "monolithic")]
pub cpu_set: AtomicU64,
#[cfg(feature = "signal")]
pub send_sigchld_when_exit: bool,
#[cfg(feature = "monolithic")]
pub sched_status: UnsafeCell<SchedStatus>,
}
static ID_COUNTER: AtomicU64 = AtomicU64::new(1);
impl TaskId {
pub fn new() -> Self {
Self(ID_COUNTER.fetch_add(1, Ordering::Relaxed))
}
pub const fn as_u64(&self) -> u64 {
self.0
}
#[cfg(feature = "monolithic")]
pub fn clear() {
ID_COUNTER.store(5, Ordering::Relaxed);
}
}
impl Default for TaskId {
fn default() -> Self {
Self::new()
}
}
impl From<u8> for TaskState {
#[inline]
fn from(state: u8) -> Self {
match state {
1 => Self::Running,
2 => Self::Ready,
3 => Self::Blocked,
4 => Self::Exited,
_ => unreachable!(),
}
}
}
unsafe impl Send for TaskInner {}
unsafe impl Sync for TaskInner {}
impl TaskInner {
pub const fn id(&self) -> TaskId {
self.id
}
pub fn name(&self) -> &str {
unsafe { (*self.name.get()).as_str() }
}
pub fn set_name(&self, name: &str) {
unsafe {
*self.name.get() = String::from(name);
}
}
pub fn id_name(&self) -> alloc::string::String {
alloc::format!("Task({}, {:?})", self.id.as_u64(), self.name())
}
pub fn join(&self) -> Option<i32> {
self.wait_for_exit
.wait_until(|| self.state() == TaskState::Exited);
Some(self.exit_code.load(Ordering::Acquire))
}
#[inline]
pub fn get_kernel_stack_top(&self) -> Option<usize> {
if let Some(kstack) = &self.kstack {
return Some(kstack.top().as_usize());
}
None
}
}
#[crate_interface::def_interface]
pub trait VforkCheck {
fn check_vfork(&self, process_id: u64) -> bool;
}
#[cfg(feature = "monolithic")]
impl TaskInner {
pub fn set_child_tid(&self, tid: usize) {
self.set_child_tid.store(tid as u64, Ordering::Release)
}
pub fn set_clear_child_tid(&self, tid: usize) {
self.clear_child_tid.store(tid as u64, Ordering::Release)
}
pub fn get_clear_child_tid(&self) -> usize {
self.clear_child_tid.load(Ordering::Acquire) as usize
}
#[inline]
pub fn get_page_table_token(&self) -> usize {
unsafe { *self.page_table_token.get() }
}
#[inline]
pub fn set_page_table_token(&self, token: usize) {
unsafe {
*self.page_table_token.get() = token;
}
}
#[inline]
pub fn time_stat_from_user_to_kernel(&self) {
let time = self.time.get();
unsafe {
(*time).switch_into_kernel_mode(self.id.as_u64() as isize);
}
}
#[inline]
pub fn time_stat_from_kernel_to_user(&self) {
let time = self.time.get();
unsafe {
(*time).switch_into_user_mode(self.id.as_u64() as isize);
}
}
#[inline]
pub fn time_stat_when_switch_from(&self) {
let time = self.time.get();
unsafe {
(*time).swtich_from_old_task(self.id.as_u64() as isize);
}
}
#[inline]
pub fn time_stat_when_switch_to(&self) {
let time = self.time.get();
unsafe {
(*time).switch_to_new_task(self.id.as_u64() as isize);
}
}
#[inline]
pub fn time_stat_output(&self) -> (usize, usize, usize, usize) {
let time = self.time.get();
unsafe { (*time).output_as_us() }
}
#[inline]
pub fn timer_output(&self) -> (usize, usize) {
let time = self.time.get();
unsafe { (*time).output_timer_as_us() }
}
#[inline]
pub fn set_timer(
&self,
timer_interval_ns: usize,
timer_remained_ns: usize,
timer_type: usize,
) -> bool {
let time = self.time.get();
unsafe { (*time).set_timer(timer_interval_ns, timer_remained_ns, timer_type) }
}
#[inline]
pub fn time_stat_clear(&self) {
let time = self.time.get();
unsafe {
(*time).clear();
}
}
#[inline]
pub fn get_process_id(&self) -> u64 {
self.process_id.load(Ordering::Acquire)
}
#[inline]
pub fn set_process_id(&self, process_id: u64) {
self.process_id.store(process_id, Ordering::Release);
}
#[inline]
pub fn get_first_trap_frame(&self) -> *mut TrapFrame {
if let Some(kstack) = &self.kstack {
return kstack.get_first_trap_frame();
}
unreachable!("get_first_trap_frame: kstack is None");
}
pub fn set_leader(&self, is_lead: bool) {
self.is_leader.store(is_lead, Ordering::Release);
}
pub fn is_leader(&self) -> bool {
self.is_leader.load(Ordering::Acquire)
}
pub fn set_trap_context(&self, trap_frame: TrapFrame) {
let now_trap_frame = self.trap_frame.get();
unsafe {
*now_trap_frame = trap_frame;
}
}
pub fn set_trap_in_kernel_stack(&self) {
let trap_frame_size = core::mem::size_of::<TrapFrame>();
let frame_address = self.trap_frame.get();
let kernel_base = self.get_kernel_stack_top().unwrap() - trap_frame_size;
unsafe {
*(kernel_base as *mut TrapFrame) = *frame_address;
}
}
pub fn set_cpu_set(&self, mask: usize, set_size: usize) {
let len = if set_size * 4 > SMP {
SMP
} else {
set_size * 4
};
let now_mask = mask & 1 << ((len) - 1);
self.cpu_set.store(now_mask as u64, Ordering::Release)
}
pub fn get_cpu_set(&self) -> usize {
self.cpu_set.load(Ordering::Acquire) as usize
}
pub fn set_sched_status(&self, status: SchedStatus) {
let prev_status = self.sched_status.get();
unsafe {
*prev_status = status;
}
}
pub fn get_sched_status(&self) -> SchedStatus {
let status = self.sched_status.get();
unsafe { *status }
}
pub fn get_ctx(&self) -> &TaskContext {
unsafe { self.ctx.get().as_ref().unwrap() }
}
#[cfg(feature = "signal")]
pub fn get_sig_child(&self) -> bool {
self.send_sigchld_when_exit
}
#[cfg(feature = "signal")]
pub fn set_sig_child(&mut self, sig_child: bool) {
self.send_sigchld_when_exit = sig_child;
}
#[cfg(target_arch = "x86_64")]
pub unsafe fn set_tls_force(&self, value: usize) {
self.ctx.get().as_mut().unwrap().fs_base = value;
}
pub fn is_vfork(&self) -> bool {
call_interface!(VforkCheck::check_vfork(self.get_process_id()))
}
}
impl TaskInner {
fn new_common(id: TaskId, name: String) -> Self {
Self {
id,
name: UnsafeCell::new(name),
is_idle: false,
is_init: false,
entry: None,
state: AtomicU8::new(TaskState::Ready as u8),
in_wait_queue: AtomicBool::new(false),
#[cfg(feature = "irq")]
in_timer_list: AtomicBool::new(false),
#[cfg(feature = "preempt")]
need_resched: AtomicBool::new(false),
#[cfg(feature = "preempt")]
preempt_disable_count: AtomicUsize::new(0),
exit_code: AtomicI32::new(0),
wait_for_exit: WaitQueue::new(),
kstack: None,
ctx: UnsafeCell::new(TaskContext::new()),
#[cfg(feature = "tls")]
tls: TlsArea::alloc(),
time: UnsafeCell::new(TimeStat::new()),
#[cfg(feature = "monolithic")]
process_id: AtomicU64::new(KERNEL_PROCESS_ID),
#[cfg(feature = "monolithic")]
is_leader: AtomicBool::new(false),
#[cfg(feature = "monolithic")]
trap_frame: UnsafeCell::new(TrapFrame::default()),
#[cfg(feature = "monolithic")]
page_table_token: UnsafeCell::new(0),
#[cfg(feature = "monolithic")]
set_child_tid: AtomicU64::new(0),
#[cfg(feature = "monolithic")]
clear_child_tid: AtomicU64::new(0),
#[cfg(feature = "monolithic")]
cpu_set: AtomicU64::new(((1 << SMP) - 1) as u64),
#[cfg(feature = "monolithic")]
sched_status: UnsafeCell::new(SchedStatus {
policy: SchedPolicy::SCHED_FIFO,
priority: 1,
}),
#[cfg(feature = "signal")]
send_sigchld_when_exit: false,
}
}
pub fn new<F>(
entry: F,
name: String,
stack_size: usize,
#[cfg(feature = "monolithic")] process_id: u64,
#[cfg(feature = "monolithic")] page_table_token: usize,
#[cfg(feature = "signal")] sig_child: bool,
) -> AxTaskRef
where
F: FnOnce() + Send + 'static,
{
let mut t = Self::new_common(TaskId::new(), name);
debug!("new task: {}", t.id_name());
let kstack = TaskStack::alloc(align_up_4k(stack_size));
#[cfg(feature = "tls")]
let tls = VirtAddr::from(t.tls.tls_ptr() as usize);
#[cfg(not(feature = "tls"))]
let tls = VirtAddr::from(0);
t.entry = Some(Box::into_raw(Box::new(entry)));
#[cfg(feature = "signal")]
t.set_sig_child(sig_child);
#[cfg(feature = "monolithic")]
{
t.process_id.store(process_id, Ordering::Release);
t.page_table_token = UnsafeCell::new(page_table_token);
t.ctx.get_mut().init(
task_entry as usize,
kstack.top() - core::mem::size_of::<TrapFrame>(),
tls,
);
}
#[cfg(not(feature = "monolithic"))]
t.ctx.get_mut().init(task_entry as usize, kstack.top(), tls);
t.kstack = Some(kstack);
if unsafe { &*t.name.get() }.as_str() == "idle" {
t.is_idle = true;
}
Arc::new(AxTask::new(t))
}
pub(crate) fn new_init(name: String) -> AxTaskRef {
let mut t = Self::new_common(TaskId::new(), name);
t.is_init = true;
if unsafe { &*t.name.get() }.as_str() == "idle" {
t.is_idle = true;
}
Arc::new(AxTask::new(t))
}
#[inline]
pub fn state(&self) -> TaskState {
self.state.load(Ordering::Acquire).into()
}
#[inline]
pub fn set_state(&self, state: TaskState) {
self.state.store(state as u8, Ordering::Release)
}
#[inline]
pub(crate) fn is_running(&self) -> bool {
matches!(self.state(), TaskState::Running)
}
#[inline]
pub(crate) fn is_ready(&self) -> bool {
matches!(self.state(), TaskState::Ready)
}
#[inline]
pub(crate) fn is_blocked(&self) -> bool {
matches!(self.state(), TaskState::Blocked)
}
#[inline]
pub(crate) const fn is_init(&self) -> bool {
self.is_init
}
#[inline]
pub(crate) const fn is_idle(&self) -> bool {
self.is_idle
}
#[inline]
pub(crate) fn in_wait_queue(&self) -> bool {
self.in_wait_queue.load(Ordering::Acquire)
}
#[inline]
pub(crate) fn set_in_wait_queue(&self, in_wait_queue: bool) {
self.in_wait_queue.store(in_wait_queue, Ordering::Release);
}
#[inline]
#[cfg(feature = "irq")]
pub(crate) fn in_timer_list(&self) -> bool {
self.in_timer_list.load(Ordering::Acquire)
}
#[inline]
#[cfg(feature = "irq")]
pub(crate) fn set_in_timer_list(&self, in_timer_list: bool) {
self.in_timer_list.store(in_timer_list, Ordering::Release);
}
#[inline]
#[cfg(feature = "preempt")]
pub(crate) fn set_preempt_pending(&self, pending: bool) {
self.need_resched.store(pending, Ordering::Release)
}
#[inline]
#[cfg(feature = "preempt")]
pub(crate) fn can_preempt(&self, current_disable_count: usize) -> bool {
self.preempt_disable_count.load(Ordering::Acquire) == current_disable_count
}
#[inline]
#[cfg(feature = "preempt")]
pub(crate) fn disable_preempt(&self) {
self.preempt_disable_count.fetch_add(1, Ordering::Relaxed);
}
#[inline]
#[cfg(feature = "preempt")]
pub(crate) fn enable_preempt(&self, resched: bool) {
if self.preempt_disable_count.fetch_sub(1, Ordering::Relaxed) == 1 && resched {
Self::current_check_preempt_pending();
}
}
#[cfg(feature = "preempt")]
fn current_check_preempt_pending() {
let curr = crate::current();
if curr.need_resched.load(Ordering::Acquire) && curr.can_preempt(0) {
let mut rq = crate::RUN_QUEUE.lock();
if curr.need_resched.load(Ordering::Acquire) {
rq.preempt_resched();
}
}
}
pub(crate) fn notify_exit(&self, exit_code: i32, rq: &mut AxRunQueue) {
self.exit_code.store(exit_code, Ordering::Release);
self.wait_for_exit.notify_all_locked(false, rq);
}
#[inline]
pub(crate) const unsafe fn ctx_mut_ptr(&self) -> *mut TaskContext {
self.ctx.get()
}
}
impl fmt::Debug for TaskInner {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("TaskInner")
.field("id", &self.id)
.field("name", &self.name)
.field("state", &self.state())
.finish()
}
}
impl Drop for TaskInner {
fn drop(&mut self) {
debug!("task drop: {}", self.id_name());
}
}
struct TaskStack {
ptr: NonNull<u8>,
layout: Layout,
}
impl TaskStack {
pub fn alloc(size: usize) -> Self {
let layout = Layout::from_size_align(size, 16).unwrap();
Self {
ptr: NonNull::new(unsafe { alloc::alloc::alloc(layout) }).unwrap(),
layout,
}
}
pub const fn top(&self) -> VirtAddr {
unsafe { core::mem::transmute(self.ptr.as_ptr().add(self.layout.size())) }
}
#[cfg(feature = "monolithic")]
pub fn get_first_trap_frame(&self) -> *mut TrapFrame {
(self.top().as_usize() - core::mem::size_of::<TrapFrame>()) as *mut TrapFrame
}
}
impl Drop for TaskStack {
fn drop(&mut self) {
unsafe { alloc::alloc::dealloc(self.ptr.as_ptr(), self.layout) }
}
}
use core::mem::ManuallyDrop;
pub struct CurrentTask(ManuallyDrop<AxTaskRef>);
impl CurrentTask {
pub(crate) fn try_get() -> Option<Self> {
let ptr: *const super::AxTask = axhal::cpu::current_task_ptr();
if !ptr.is_null() {
Some(Self(unsafe { ManuallyDrop::new(AxTaskRef::from_raw(ptr)) }))
} else {
None
}
}
pub(crate) fn get() -> Self {
Self::try_get().expect("current task is uninitialized")
}
pub fn as_task_ref(&self) -> &AxTaskRef {
&self.0
}
pub(crate) fn clone(&self) -> AxTaskRef {
self.0.deref().clone()
}
pub(crate) fn ptr_eq(&self, other: &AxTaskRef) -> bool {
Arc::ptr_eq(&self.0, other)
}
pub(crate) unsafe fn init_current(init_task: AxTaskRef) {
#[cfg(feature = "tls")]
axhal::arch::write_thread_pointer(init_task.tls.tls_ptr() as usize);
let ptr = Arc::into_raw(init_task);
axhal::cpu::set_current_task_ptr(ptr);
}
pub(crate) unsafe fn set_current(prev: Self, next: AxTaskRef) {
let Self(arc) = prev;
ManuallyDrop::into_inner(arc); let ptr = Arc::into_raw(next);
axhal::cpu::set_current_task_ptr(ptr);
}
}
impl Deref for CurrentTask {
type Target = TaskInner;
fn deref(&self) -> &Self::Target {
self.0.deref()
}
}
extern "C" fn task_entry() -> ! {
unsafe { crate::RUN_QUEUE.force_unlock() };
#[cfg(feature = "irq")]
axhal::arch::enable_irqs();
let task = crate::current();
if let Some(entry) = task.entry {
cfg_if::cfg_if! {
if #[cfg(feature = "monolithic")] {
use axhal::KERNEL_PROCESS_ID;
if task.get_process_id() == KERNEL_PROCESS_ID {
unsafe { Box::from_raw(entry)() };
} else {
let kernel_sp = task.get_kernel_stack_top().unwrap();
let frame_address = task.get_first_trap_frame();
task.time_stat_from_kernel_to_user();
axhal::arch::first_into_user(kernel_sp, frame_address as usize);
}
}
else {
unsafe { Box::from_raw(entry)() };
}
}
}
crate::exit(0);
}