surenyi
6 months ago
9 changed files with 165 additions and 1765 deletions
@ -1,218 +0,0 @@ |
|||
/*
|
|||
* Copyright (c) 2006-2023, RT-Thread Development Team |
|||
* |
|||
* SPDX-License-Identifier: Apache-2.0 |
|||
* |
|||
* Change Logs: |
|||
* Date Author Notes |
|||
* 2018-10-30 Bernard The first version |
|||
* 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable |
|||
* 2023-12-10 xqyjlj spinlock should lock sched |
|||
* 2024-01-25 Shell Using rt_exit_critical_safe |
|||
*/ |
|||
#include <rthw.h> |
|||
#include <rtthread.h> |
|||
|
|||
#ifdef RT_USING_SMART |
|||
#include <lwp.h> |
|||
#endif |
|||
|
|||
#ifdef RT_USING_DEBUG |
|||
rt_base_t _cpus_critical_level; |
|||
#endif /* RT_USING_DEBUG */ |
|||
|
|||
static struct rt_cpu _cpus[RT_CPUS_NR]; |
|||
rt_hw_spinlock_t _cpus_lock; |
|||
#if defined(RT_DEBUGING_SPINLOCK) |
|||
void *_cpus_lock_owner = 0; |
|||
void *_cpus_lock_pc = 0; |
|||
|
|||
#endif /* RT_DEBUGING_SPINLOCK */ |
|||
|
|||
/**
|
|||
* @brief Initialize a static spinlock object. |
|||
* |
|||
* @param lock is a pointer to the spinlock to initialize. |
|||
*/ |
|||
void rt_spin_lock_init(struct rt_spinlock *lock) |
|||
{ |
|||
rt_hw_spin_lock_init(&lock->lock); |
|||
} |
|||
RTM_EXPORT(rt_spin_lock_init) |
|||
|
|||
/**
|
|||
* @brief This function will lock the spinlock, will lock the thread scheduler. |
|||
* |
|||
* @note If the spinlock is locked, the current CPU will keep polling the spinlock state |
|||
* until the spinlock is unlocked. |
|||
* |
|||
* @param lock is a pointer to the spinlock. |
|||
*/ |
|||
void rt_spin_lock(struct rt_spinlock *lock) |
|||
{ |
|||
rt_enter_critical(); |
|||
rt_hw_spin_lock(&lock->lock); |
|||
RT_SPIN_LOCK_DEBUG(lock); |
|||
} |
|||
RTM_EXPORT(rt_spin_lock) |
|||
|
|||
/**
|
|||
* @brief This function will unlock the spinlock, will unlock the thread scheduler. |
|||
* |
|||
* @param lock is a pointer to the spinlock. |
|||
*/ |
|||
void rt_spin_unlock(struct rt_spinlock *lock) |
|||
{ |
|||
rt_base_t critical_level; |
|||
RT_SPIN_UNLOCK_DEBUG(lock, critical_level); |
|||
rt_hw_spin_unlock(&lock->lock); |
|||
rt_exit_critical_safe(critical_level); |
|||
} |
|||
RTM_EXPORT(rt_spin_unlock) |
|||
|
|||
/**
|
|||
* @brief This function will disable the local interrupt and then lock the spinlock, will lock the thread scheduler. |
|||
* |
|||
* @note If the spinlock is locked, the current CPU will keep polling the spinlock state |
|||
* until the spinlock is unlocked. |
|||
* |
|||
* @param lock is a pointer to the spinlock. |
|||
* |
|||
* @return Return current cpu interrupt status. |
|||
*/ |
|||
rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock) |
|||
{ |
|||
rt_base_t level; |
|||
|
|||
level = rt_hw_local_irq_disable(); |
|||
rt_enter_critical(); |
|||
rt_hw_spin_lock(&lock->lock); |
|||
RT_SPIN_LOCK_DEBUG(lock); |
|||
return level; |
|||
} |
|||
RTM_EXPORT(rt_spin_lock_irqsave) |
|||
|
|||
/**
|
|||
* @brief This function will unlock the spinlock and then restore current cpu interrupt status, will unlock the thread scheduler. |
|||
* |
|||
* @param lock is a pointer to the spinlock. |
|||
* |
|||
* @param level is interrupt status returned by rt_spin_lock_irqsave(). |
|||
*/ |
|||
void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level) |
|||
{ |
|||
rt_base_t critical_level; |
|||
|
|||
RT_SPIN_UNLOCK_DEBUG(lock, critical_level); |
|||
rt_hw_spin_unlock(&lock->lock); |
|||
rt_exit_critical_safe(critical_level); |
|||
rt_hw_local_irq_enable(level); |
|||
} |
|||
RTM_EXPORT(rt_spin_unlock_irqrestore) |
|||
|
|||
/**
|
|||
* @brief This fucntion will return current cpu object. |
|||
* |
|||
* @return Return a pointer to the current cpu object. |
|||
*/ |
|||
struct rt_cpu *rt_cpu_self(void) |
|||
{ |
|||
return &_cpus[rt_hw_cpu_id()]; |
|||
} |
|||
|
|||
/**
|
|||
* @brief This fucntion will return the cpu object corresponding to index. |
|||
* |
|||
* @param index is the index of target cpu object. |
|||
* |
|||
* @return Return a pointer to the cpu object corresponding to index. |
|||
*/ |
|||
struct rt_cpu *rt_cpu_index(int index) |
|||
{ |
|||
return &_cpus[index]; |
|||
} |
|||
|
|||
/**
|
|||
* @brief This function will lock all cpus's scheduler and disable local irq. |
|||
* |
|||
* @return Return current cpu interrupt status. |
|||
*/ |
|||
rt_base_t rt_cpus_lock(void) |
|||
{ |
|||
rt_base_t level; |
|||
struct rt_cpu* pcpu; |
|||
|
|||
level = rt_hw_local_irq_disable(); |
|||
pcpu = rt_cpu_self(); |
|||
if (pcpu->current_thread != RT_NULL) |
|||
{ |
|||
register rt_ubase_t lock_nest = rt_atomic_load(&(pcpu->current_thread->cpus_lock_nest)); |
|||
|
|||
rt_atomic_add(&(pcpu->current_thread->cpus_lock_nest), 1); |
|||
if (lock_nest == 0) |
|||
{ |
|||
rt_enter_critical(); |
|||
rt_hw_spin_lock(&_cpus_lock); |
|||
#ifdef RT_USING_DEBUG |
|||
_cpus_critical_level = rt_critical_level(); |
|||
#endif /* RT_USING_DEBUG */ |
|||
|
|||
#ifdef RT_DEBUGING_SPINLOCK |
|||
_cpus_lock_owner = pcpu->current_thread; |
|||
_cpus_lock_pc = __GET_RETURN_ADDRESS; |
|||
#endif /* RT_DEBUGING_SPINLOCK */ |
|||
} |
|||
} |
|||
|
|||
return level; |
|||
} |
|||
RTM_EXPORT(rt_cpus_lock); |
|||
|
|||
/**
|
|||
* @brief This function will restore all cpus's scheduler and restore local irq. |
|||
* |
|||
* @param level is interrupt status returned by rt_cpus_lock(). |
|||
*/ |
|||
void rt_cpus_unlock(rt_base_t level) |
|||
{ |
|||
struct rt_cpu* pcpu = rt_cpu_self(); |
|||
|
|||
if (pcpu->current_thread != RT_NULL) |
|||
{ |
|||
rt_base_t critical_level = 0; |
|||
RT_ASSERT(rt_atomic_load(&(pcpu->current_thread->cpus_lock_nest)) > 0); |
|||
rt_atomic_sub(&(pcpu->current_thread->cpus_lock_nest), 1); |
|||
|
|||
if (pcpu->current_thread->cpus_lock_nest == 0) |
|||
{ |
|||
#if defined(RT_DEBUGING_SPINLOCK) |
|||
_cpus_lock_owner = __OWNER_MAGIC; |
|||
_cpus_lock_pc = RT_NULL; |
|||
#endif /* RT_DEBUGING_SPINLOCK */ |
|||
#ifdef RT_USING_DEBUG |
|||
critical_level = _cpus_critical_level; |
|||
_cpus_critical_level = 0; |
|||
#endif /* RT_USING_DEBUG */ |
|||
rt_hw_spin_unlock(&_cpus_lock); |
|||
rt_exit_critical_safe(critical_level); |
|||
} |
|||
} |
|||
rt_hw_local_irq_enable(level); |
|||
} |
|||
RTM_EXPORT(rt_cpus_unlock); |
|||
|
|||
/**
|
|||
* This function is invoked by scheduler. |
|||
* It will restore the lock state to whatever the thread's counter expects. |
|||
* If target thread not locked the cpus then unlock the cpus lock. |
|||
* |
|||
* @param thread is a pointer to the target thread. |
|||
*/ |
|||
void rt_cpus_lock_status_restore(struct rt_thread *thread) |
|||
{ |
|||
#if defined(ARCH_MM_MMU) && defined(RT_USING_SMART) |
|||
lwp_aspace_switch(thread); |
|||
#endif |
|||
rt_sched_post_ctx_switch(thread); |
|||
} |
|||
RTM_EXPORT(rt_cpus_lock_status_restore); |
File diff suppressed because it is too large
@ -0,0 +1,145 @@ |
|||
/* |
|||
* linker script for STM32F429xx with GNU ld |
|||
* bernard.xiong 2009-10-14 |
|||
*/ |
|||
|
|||
/* Program Entry, set to mark it as "used" and avoid gc */ |
|||
MEMORY |
|||
{ |
|||
FLASH (rx) : ORIGIN = 0x08000000, LENGTH = 2048k /* 1024KB flash */ |
|||
SRAM (rw) : ORIGIN = 0x20000000, LENGTH = 192k /* 128K sram */ |
|||
CCRAM (rwx) : ORIGIN = 0x10000000, LENGTH = 64K |
|||
} |
|||
|
|||
ENTRY(Reset_Handler) |
|||
_system_stack_size = DEFINED(_system_stack_size) ? _system_stack_size : 0x200; |
|||
|
|||
SECTIONS |
|||
{ |
|||
.text : |
|||
{ |
|||
. = ALIGN(4); |
|||
_stext = .; |
|||
KEEP(*(.isr_vector)) /* Startup code */ |
|||
. = ALIGN(4); |
|||
*(.text) /* remaining code */ |
|||
*(.text.*) /* remaining code */ |
|||
*(.rodata) /* read-only data (constants) */ |
|||
*(.rodata*) |
|||
*(.glue_7) |
|||
*(.glue_7t) |
|||
*(.gnu.linkonce.t*) |
|||
|
|||
/* section information for finsh shell */ |
|||
. = ALIGN(4); |
|||
__fsymtab_start = .; |
|||
KEEP(*(FSymTab)) |
|||
__fsymtab_end = .; |
|||
. = ALIGN(4); |
|||
__vsymtab_start = .; |
|||
KEEP(*(VSymTab)) |
|||
__vsymtab_end = .; |
|||
. = ALIGN(4); |
|||
|
|||
/* section information for initial. */ |
|||
. = ALIGN(4); |
|||
__rt_init_start = .; |
|||
KEEP(*(SORT(.rti_fn*))) |
|||
__rt_init_end = .; |
|||
. = ALIGN(4); |
|||
|
|||
. = ALIGN(4); |
|||
_etext = .; |
|||
} > FLASH = 0 |
|||
|
|||
/* .ARM.exidx is sorted, so has to go in its own output section. */ |
|||
__exidx_start = .; |
|||
.ARM.exidx : |
|||
{ |
|||
*(.ARM.exidx* .gnu.linkonce.armexidx.*) |
|||
|
|||
/* This is used by the startup in order to initialize the .data secion */ |
|||
_sidata = .; |
|||
} > FLASH |
|||
__exidx_end = .; |
|||
|
|||
/* .data section which is used for initialized data */ |
|||
|
|||
.data : AT (_sidata) |
|||
{ |
|||
. = ALIGN(4); |
|||
/* This is used by the startup in order to initialize the .data secion */ |
|||
_sdata = . ; |
|||
|
|||
*(.data) |
|||
*(.data.*) |
|||
*(.gnu.linkonce.d*) |
|||
|
|||
. = ALIGN(4); |
|||
/* This is used by the startup in order to initialize the .data secion */ |
|||
_edata = . ; |
|||
} >SRAM |
|||
|
|||
__bss_start = .; |
|||
.bss : |
|||
{ |
|||
. = ALIGN(4); |
|||
/* This is used by the startup in order to initialize the .bss secion */ |
|||
_sbss = .; |
|||
|
|||
*(.bss) |
|||
*(.bss.*) |
|||
*(COMMON) |
|||
|
|||
. = ALIGN(4); |
|||
/* This is used by the startup in order to initialize the .bss secion */ |
|||
_ebss = . ; |
|||
|
|||
*(.bss.init) |
|||
} > SRAM |
|||
__bss_end = .; |
|||
|
|||
.stack : |
|||
{ |
|||
. = . + _system_stack_size; |
|||
. = ALIGN(4); |
|||
_estack = .; |
|||
} >SRAM |
|||
|
|||
_end = .; |
|||
__ram_end__ = ORIGIN(SRAM) + LENGTH(SRAM); |
|||
|
|||
/* Stabs debugging sections. */ |
|||
.stab 0 : { *(.stab) } |
|||
.stabstr 0 : { *(.stabstr) } |
|||
.stab.excl 0 : { *(.stab.excl) } |
|||
.stab.exclstr 0 : { *(.stab.exclstr) } |
|||
.stab.index 0 : { *(.stab.index) } |
|||
.stab.indexstr 0 : { *(.stab.indexstr) } |
|||
.comment 0 : { *(.comment) } |
|||
/* DWARF debug sections. |
|||
* Symbols in the DWARF debugging sections are relative to the beginning |
|||
* of the section so we begin them at 0. */ |
|||
/* DWARF 1 */ |
|||
.debug 0 : { *(.debug) } |
|||
.line 0 : { *(.line) } |
|||
/* GNU DWARF 1 extensions */ |
|||
.debug_srcinfo 0 : { *(.debug_srcinfo) } |
|||
.debug_sfnames 0 : { *(.debug_sfnames) } |
|||
/* DWARF 1.1 and DWARF 2 */ |
|||
.debug_aranges 0 : { *(.debug_aranges) } |
|||
.debug_pubnames 0 : { *(.debug_pubnames) } |
|||
/* DWARF 2 */ |
|||
.debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } |
|||
.debug_abbrev 0 : { *(.debug_abbrev) } |
|||
.debug_line 0 : { *(.debug_line) } |
|||
.debug_frame 0 : { *(.debug_frame) } |
|||
.debug_str 0 : { *(.debug_str) } |
|||
.debug_loc 0 : { *(.debug_loc) } |
|||
.debug_macinfo 0 : { *(.debug_macinfo) } |
|||
/* SGI/MIPS DWARF 2 extensions */ |
|||
.debug_weaknames 0 : { *(.debug_weaknames) } |
|||
.debug_funcnames 0 : { *(.debug_funcnames) } |
|||
.debug_typenames 0 : { *(.debug_typenames) } |
|||
.debug_varnames 0 : { *(.debug_varnames) } |
|||
} |
@ -1,234 +0,0 @@ |
|||
/* vim: set ts=4 sw=4 et fdm=marker: |
|||
* |
|||
* Linker script to configure memory regions. |
|||
* Need modifying for a specific board. |
|||
* FLASH.ORIGIN: starting address of flash |
|||
* FLASH.LENGTH: length of flash |
|||
* RAM.ORIGIN: starting address of RAM bank 0 |
|||
* RAM.LENGTH: length of RAM bank 0 |
|||
*/ |
|||
MEMORY |
|||
{ |
|||
FLASH (rx) : ORIGIN = 0x08000000, LENGTH = 512K |
|||
RAM (rwx) : ORIGIN = 0x20000000, LENGTH = 128K |
|||
TCM (rwx) : ORIGIN = 0x10000000, LENGTH = 64K |
|||
} |
|||
|
|||
/* Linker script to place sections and symbol values. Should be used together |
|||
* with other linker script that defines memory regions FLASH and RAM. |
|||
* It references following symbols, which must be defined in code: |
|||
* Reset_Handler : Entry of reset handler |
|||
* |
|||
* It defines following symbols, which code can use without definition: |
|||
* __exidx_start |
|||
* __exidx_end |
|||
* __copy_table_start__ |
|||
* __copy_table_end__ |
|||
* __zero_table_start__ |
|||
* __zero_table_end__ |
|||
* __etext |
|||
* __data_start__ |
|||
* __preinit_array_start |
|||
* __preinit_array_end |
|||
* __init_array_start |
|||
* __init_array_end |
|||
* __fini_array_start |
|||
* __fini_array_end |
|||
* __data_end__ |
|||
* __bss_start__ |
|||
* __bss_end__ |
|||
* __end__ |
|||
* end |
|||
* __HeapLimit |
|||
* __StackLimit |
|||
* __StackTop |
|||
* __stack |
|||
*/ |
|||
ENTRY(Reset_Handler) |
|||
|
|||
SECTIONS |
|||
{ |
|||
.text : |
|||
{ |
|||
__text_start__ = .; |
|||
|
|||
. = ALIGN(4); |
|||
KEEP(*(.isr_vector)) |
|||
*(.text*) |
|||
|
|||
KEEP(*(.init)) |
|||
KEEP(*(.fini)) |
|||
|
|||
/* .ctors */ |
|||
*crtbegin.o(.ctors) |
|||
*crtbegin?.o(.ctors) |
|||
*(EXCLUDE_FILE(*crtend?.o *crtend.o) .ctors) |
|||
*(SORT(.ctors.*)) |
|||
*(.ctors) |
|||
|
|||
/* .dtors */ |
|||
*crtbegin.o(.dtors) |
|||
*crtbegin?.o(.dtors) |
|||
*(EXCLUDE_FILE(*crtend?.o *crtend.o) .dtors) |
|||
*(SORT(.dtors.*)) |
|||
*(.dtors) |
|||
|
|||
/* section information for finsh shell */ |
|||
. = ALIGN(4); |
|||
__fsymtab_start = .; |
|||
KEEP(*(FSymTab)) |
|||
__fsymtab_end = .; |
|||
|
|||
. = ALIGN(4); |
|||
__vsymtab_start = .; |
|||
KEEP(*(VSymTab)) |
|||
__vsymtab_end = .; |
|||
|
|||
/* section information for initial. */ |
|||
. = ALIGN(4); |
|||
__rt_init_start = .; |
|||
KEEP(*(SORT(.rti_fn*))) |
|||
__rt_init_end = .; |
|||
|
|||
. = ALIGN(4); |
|||
|
|||
PROVIDE(__ctors_start__ = .); |
|||
KEEP (*(SORT(.init_array.*))) |
|||
KEEP (*(.init_array)) |
|||
PROVIDE(__ctors_end__ = .); |
|||
|
|||
KEEP(*(.eh_frame*)) |
|||
} > FLASH |
|||
|
|||
.ARM.extab : |
|||
{ |
|||
*(.ARM.extab* .gnu.linkonce.armextab.*) |
|||
} > FLASH |
|||
|
|||
__exidx_start = .; |
|||
.ARM.exidx : |
|||
{ |
|||
*(.ARM.exidx* .gnu.linkonce.armexidx.*) |
|||
} > FLASH |
|||
__exidx_end = .; |
|||
|
|||
|
|||
.rodata : AT(__exidx_end) |
|||
{ |
|||
. = ALIGN(4); |
|||
__rodata_start = .; |
|||
*(.rodata*) |
|||
|
|||
. = ALIGN(4); |
|||
/* preinit data */ |
|||
PROVIDE_HIDDEN (__preinit_array_start = .); |
|||
KEEP(*(.preinit_array)) |
|||
PROVIDE_HIDDEN (__preinit_array_end = .); |
|||
|
|||
. = ALIGN(4); |
|||
/* init data */ |
|||
PROVIDE_HIDDEN (__init_array_start = .); |
|||
KEEP(*(SORT(.init_array.*))) |
|||
KEEP(*(.init_array)) |
|||
PROVIDE_HIDDEN (__init_array_end = .); |
|||
|
|||
. = ALIGN(4); |
|||
/* finit data */ |
|||
PROVIDE_HIDDEN (__fini_array_start = .); |
|||
KEEP(*(SORT(.fini_array.*))) |
|||
KEEP(*(.fini_array)) |
|||
PROVIDE_HIDDEN (__fini_array_end = .); |
|||
|
|||
. = ALIGN(4); |
|||
__rodata_end = .; |
|||
} > FLASH |
|||
|
|||
|
|||
/* To copy multiple ROM to RAM sections, |
|||
* uncomment .copy.table section and, |
|||
* define __STARTUP_COPY_MULTIPLE in startup_ARMCMx.S */ |
|||
/* |
|||
.copy.table : |
|||
{ |
|||
. = ALIGN(4); |
|||
__copy_table_start__ = .; |
|||
LONG (__etext) |
|||
LONG (__data_start__) |
|||
LONG (__data_end__ - __data_start__) |
|||
LONG (__etext2) |
|||
LONG (__data2_start__) |
|||
LONG (__data2_end__ - __data2_start__) |
|||
__copy_table_end__ = .; |
|||
} > FLASH |
|||
*/ |
|||
|
|||
/* To clear multiple BSS sections, |
|||
* uncomment .zero.table section and, |
|||
* define __STARTUP_CLEAR_BSS_MULTIPLE in startup_ARMCMx.S */ |
|||
/* |
|||
.zero.table : |
|||
{ |
|||
. = ALIGN(4); |
|||
__zero_table_start__ = .; |
|||
LONG (__bss_start__) |
|||
LONG (__bss_end__ - __bss_start__) |
|||
LONG (__bss2_start__) |
|||
LONG (__bss2_end__ - __bss2_start__) |
|||
__zero_table_end__ = .; |
|||
} > FLASH |
|||
*/ |
|||
|
|||
__etext = .; |
|||
.data : AT (__etext) |
|||
{ |
|||
__data_start__ = .; |
|||
*(vtable) |
|||
*(.data*) |
|||
|
|||
KEEP(*(.jcr*)) |
|||
. = ALIGN(4); |
|||
/* All data end */ |
|||
__data_end__ = .; |
|||
} > RAM |
|||
|
|||
.bss : |
|||
{ |
|||
. = ALIGN(4); |
|||
__bss_start__ = .; |
|||
*(.bss*) |
|||
*(COMMON) |
|||
. = ALIGN(4); |
|||
*(.rxdma*) |
|||
*(.txdma*) |
|||
*(.txbuf*) |
|||
. = ALIGN(4); |
|||
__bss_end__ = .; |
|||
} > RAM |
|||
|
|||
.heap (NOLOAD): |
|||
{ |
|||
__end__ = .; |
|||
PROVIDE(end = .); |
|||
*(.heap*) |
|||
__HeapLimit = .; |
|||
} > RAM |
|||
|
|||
/* .stack_dummy section doesn't contains any symbols. It is only |
|||
* used for linker to calculate size of stack sections, and assign |
|||
* values to stack symbols later */ |
|||
.stack_dummy (NOLOAD): |
|||
{ |
|||
*(.stack*) |
|||
} > RAM |
|||
|
|||
/* Set stack top to end of RAM, and stack limit move down by |
|||
* size of stack_dummy section */ |
|||
__StackTop = ORIGIN(RAM) + LENGTH(RAM); |
|||
__StackLimit = __StackTop - SIZEOF(.stack_dummy); |
|||
PROVIDE(__stack = __StackTop); |
|||
|
|||
__ram_end__ = ORIGIN(RAM) + LENGTH(RAM) -1 ; |
|||
|
|||
/* Check if data + heap + stack exceeds RAM limit */ |
|||
ASSERT(__StackLimit >= __HeapLimit, "region RAM overflowed with stack") |
|||
} |
Loading…
Reference in new issue