Browse Source

upate linker.ld

Signed-off-by: surenyi <surenyi82@163.com>
master
surenyi 6 months ago
parent
commit
172df562ad
  1. 2
      CMakeLists.txt
  2. 26
      driver/start.c
  3. 4
      driver/vectors.c
  4. 7
      rt-thread/bsp/board.c
  5. 1
      rt-thread/bsp/rtconfig.h
  6. 218
      rt-thread/src/cpu_mp.c
  7. 1293
      rt-thread/src/scheduler_mp.c
  8. 145
      scripts/stm32_rom.ld
  9. 234
      scripts/stm32f4xx_linker.ld

2
CMakeLists.txt

@ -146,7 +146,7 @@ tgt_add_linker_script(${tgt_name} PRIVATE ${lds_name})
# -P: prevents preprocessor from generating linemarkers (#line directives)
#-x c: tells GCC treat linker script as C source file
add_custom_target(gen_lds
${CMAKE_C_COMPILER} -E -P -x c ${PROJECT_SOURCE_DIR}/scripts/stm32f4xx_linker.ld -o ${lds_name}
${CMAKE_C_COMPILER} -E -P -x c ${PROJECT_SOURCE_DIR}/scripts/stm32_rom.ld -o ${lds_name}
COMMENT "Generate link script"
VERBATIM)
add_dependencies(${tgt_name} gen_lds)

26
driver/start.c

@ -45,29 +45,29 @@ void __libc_init_array (void)
/* }}} */
/* {{{ Reset_Handler */
extern unsigned int __StackTop;
extern unsigned int __etext;
extern unsigned int __data_start__;
extern unsigned int __data_end__;
extern unsigned int __bss_start__;
extern unsigned int __bss_end__;
extern unsigned int _estack;
extern unsigned int _sidata;
extern unsigned int _sdata;
extern unsigned int _edata;
extern unsigned int _sbss;
extern unsigned int _ebss;
static void __attribute__((naked)) __do_cpu_boot()
{
unsigned int *dstp, *srcp;
srcp = &__etext;
dstp = &__data_start__;
srcp = &_sidata;
dstp = &_sdata;
if (srcp != dstp) { /* need to be load */
while (dstp < (&__data_end__)) {
while (dstp < (&_edata)) {
*dstp++ = *srcp++;
}
}
/* clear the bss */
dstp = &__bss_start__;
while (dstp < (&__bss_end__)) {
dstp = &_sbss;
while (dstp < (&_ebss)) {
*dstp++ = 0;
}
@ -85,9 +85,9 @@ Reset_Handler(void)
{
#if defined ( __CC_ARM )
register unsigned int __reg_msp __asm("msp");
__reg_msp = (unsigned int)&__StackTop;
__reg_msp = (unsigned int)&_estack;
#else /* gcc or armclang */
__asm volatile ("MSR msp, %0" : : "r" (&__StackTop) : );
__asm volatile ("MSR msp, %0" : : "r" (&_estack) : );
#endif
__do_cpu_boot();
while (1) {

4
driver/vectors.c

@ -199,12 +199,12 @@ void DMA2D_IRQHandler (void) __attribute__ ((weak, alias("Dummy_
/* }}} */
/* Initialize segments */
extern uint32_t __StackTop;
extern uint32_t _estack;
/* {{{ Exception Table */
/* clang-format off */
__attribute__((section(".isr_vector"))) const void * const __gVectors[] = {
(void *)&__StackTop , /* Top of Stack */
(void *)&_estack , /* Top of Stack */
(void *)Reset_Handler , /* 1:Reset Handler */
(void *)NMI_Handler , /* 2:NMI Handler */
(void *)HardFault_Handler , /* 3:Hard Fault Handler */

7
rt-thread/bsp/board.c

@ -56,15 +56,16 @@ void rt_delay_microseconds(uint32_t us)
#define BSP_HEAP_SIZE 1024
#endif
static uint32_t rt_heap[BSP_HEAP_SIZE] __attribute__((section(".heap.__main__"))); // heap default size: 4K(1024 * 4)
extern int _end;
extern int __ram_end__;
rt_weak void *rt_heap_begin_get(void)
{
return rt_heap;
return &_end;
}
rt_weak void *rt_heap_end_get(void)
{
return rt_heap + BSP_HEAP_SIZE;
return &__ram_end__;
}
#endif

1
rt-thread/bsp/rtconfig.h

@ -261,5 +261,4 @@
#define RT_LWIP_RAW 1
#define RT_LWIP_REASSEMBLY_FRAG 1
#define BSP_HEAP_SIZE (30 * 1024)
#endif

218
rt-thread/src/cpu_mp.c

@ -1,218 +0,0 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-10-30 Bernard The first version
* 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
* 2023-12-10 xqyjlj spinlock should lock sched
* 2024-01-25 Shell Using rt_exit_critical_safe
*/
#include <rthw.h>
#include <rtthread.h>
#ifdef RT_USING_SMART
#include <lwp.h>
#endif
#ifdef RT_USING_DEBUG
rt_base_t _cpus_critical_level;
#endif /* RT_USING_DEBUG */
static struct rt_cpu _cpus[RT_CPUS_NR];
rt_hw_spinlock_t _cpus_lock;
#if defined(RT_DEBUGING_SPINLOCK)
void *_cpus_lock_owner = 0;
void *_cpus_lock_pc = 0;
#endif /* RT_DEBUGING_SPINLOCK */
/**
* @brief Initialize a static spinlock object.
*
* @param lock is a pointer to the spinlock to initialize.
*/
void rt_spin_lock_init(struct rt_spinlock *lock)
{
rt_hw_spin_lock_init(&lock->lock);
}
RTM_EXPORT(rt_spin_lock_init)
/**
* @brief This function will lock the spinlock, will lock the thread scheduler.
*
* @note If the spinlock is locked, the current CPU will keep polling the spinlock state
* until the spinlock is unlocked.
*
* @param lock is a pointer to the spinlock.
*/
void rt_spin_lock(struct rt_spinlock *lock)
{
rt_enter_critical();
rt_hw_spin_lock(&lock->lock);
RT_SPIN_LOCK_DEBUG(lock);
}
RTM_EXPORT(rt_spin_lock)
/**
* @brief This function will unlock the spinlock, will unlock the thread scheduler.
*
* @param lock is a pointer to the spinlock.
*/
void rt_spin_unlock(struct rt_spinlock *lock)
{
rt_base_t critical_level;
RT_SPIN_UNLOCK_DEBUG(lock, critical_level);
rt_hw_spin_unlock(&lock->lock);
rt_exit_critical_safe(critical_level);
}
RTM_EXPORT(rt_spin_unlock)
/**
* @brief This function will disable the local interrupt and then lock the spinlock, will lock the thread scheduler.
*
* @note If the spinlock is locked, the current CPU will keep polling the spinlock state
* until the spinlock is unlocked.
*
* @param lock is a pointer to the spinlock.
*
* @return Return current cpu interrupt status.
*/
rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
{
rt_base_t level;
level = rt_hw_local_irq_disable();
rt_enter_critical();
rt_hw_spin_lock(&lock->lock);
RT_SPIN_LOCK_DEBUG(lock);
return level;
}
RTM_EXPORT(rt_spin_lock_irqsave)
/**
* @brief This function will unlock the spinlock and then restore current cpu interrupt status, will unlock the thread scheduler.
*
* @param lock is a pointer to the spinlock.
*
* @param level is interrupt status returned by rt_spin_lock_irqsave().
*/
void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
{
rt_base_t critical_level;
RT_SPIN_UNLOCK_DEBUG(lock, critical_level);
rt_hw_spin_unlock(&lock->lock);
rt_exit_critical_safe(critical_level);
rt_hw_local_irq_enable(level);
}
RTM_EXPORT(rt_spin_unlock_irqrestore)
/**
* @brief This fucntion will return current cpu object.
*
* @return Return a pointer to the current cpu object.
*/
struct rt_cpu *rt_cpu_self(void)
{
return &_cpus[rt_hw_cpu_id()];
}
/**
* @brief This fucntion will return the cpu object corresponding to index.
*
* @param index is the index of target cpu object.
*
* @return Return a pointer to the cpu object corresponding to index.
*/
struct rt_cpu *rt_cpu_index(int index)
{
return &_cpus[index];
}
/**
* @brief This function will lock all cpus's scheduler and disable local irq.
*
* @return Return current cpu interrupt status.
*/
rt_base_t rt_cpus_lock(void)
{
rt_base_t level;
struct rt_cpu* pcpu;
level = rt_hw_local_irq_disable();
pcpu = rt_cpu_self();
if (pcpu->current_thread != RT_NULL)
{
register rt_ubase_t lock_nest = rt_atomic_load(&(pcpu->current_thread->cpus_lock_nest));
rt_atomic_add(&(pcpu->current_thread->cpus_lock_nest), 1);
if (lock_nest == 0)
{
rt_enter_critical();
rt_hw_spin_lock(&_cpus_lock);
#ifdef RT_USING_DEBUG
_cpus_critical_level = rt_critical_level();
#endif /* RT_USING_DEBUG */
#ifdef RT_DEBUGING_SPINLOCK
_cpus_lock_owner = pcpu->current_thread;
_cpus_lock_pc = __GET_RETURN_ADDRESS;
#endif /* RT_DEBUGING_SPINLOCK */
}
}
return level;
}
RTM_EXPORT(rt_cpus_lock);
/**
* @brief This function will restore all cpus's scheduler and restore local irq.
*
* @param level is interrupt status returned by rt_cpus_lock().
*/
void rt_cpus_unlock(rt_base_t level)
{
struct rt_cpu* pcpu = rt_cpu_self();
if (pcpu->current_thread != RT_NULL)
{
rt_base_t critical_level = 0;
RT_ASSERT(rt_atomic_load(&(pcpu->current_thread->cpus_lock_nest)) > 0);
rt_atomic_sub(&(pcpu->current_thread->cpus_lock_nest), 1);
if (pcpu->current_thread->cpus_lock_nest == 0)
{
#if defined(RT_DEBUGING_SPINLOCK)
_cpus_lock_owner = __OWNER_MAGIC;
_cpus_lock_pc = RT_NULL;
#endif /* RT_DEBUGING_SPINLOCK */
#ifdef RT_USING_DEBUG
critical_level = _cpus_critical_level;
_cpus_critical_level = 0;
#endif /* RT_USING_DEBUG */
rt_hw_spin_unlock(&_cpus_lock);
rt_exit_critical_safe(critical_level);
}
}
rt_hw_local_irq_enable(level);
}
RTM_EXPORT(rt_cpus_unlock);
/**
* This function is invoked by scheduler.
* It will restore the lock state to whatever the thread's counter expects.
* If target thread not locked the cpus then unlock the cpus lock.
*
* @param thread is a pointer to the target thread.
*/
void rt_cpus_lock_status_restore(struct rt_thread *thread)
{
#if defined(ARCH_MM_MMU) && defined(RT_USING_SMART)
lwp_aspace_switch(thread);
#endif
rt_sched_post_ctx_switch(thread);
}
RTM_EXPORT(rt_cpus_lock_status_restore);

1293
rt-thread/src/scheduler_mp.c

File diff suppressed because it is too large

145
scripts/stm32_rom.ld

@ -0,0 +1,145 @@
/*
* linker script for STM32F429xx with GNU ld
* bernard.xiong 2009-10-14
*/
/* Program Entry, set to mark it as "used" and avoid gc */
MEMORY
{
FLASH (rx) : ORIGIN = 0x08000000, LENGTH = 2048k /* 1024KB flash */
SRAM (rw) : ORIGIN = 0x20000000, LENGTH = 192k /* 128K sram */
CCRAM (rwx) : ORIGIN = 0x10000000, LENGTH = 64K
}
ENTRY(Reset_Handler)
_system_stack_size = DEFINED(_system_stack_size) ? _system_stack_size : 0x200;
SECTIONS
{
.text :
{
. = ALIGN(4);
_stext = .;
KEEP(*(.isr_vector)) /* Startup code */
. = ALIGN(4);
*(.text) /* remaining code */
*(.text.*) /* remaining code */
*(.rodata) /* read-only data (constants) */
*(.rodata*)
*(.glue_7)
*(.glue_7t)
*(.gnu.linkonce.t*)
/* section information for finsh shell */
. = ALIGN(4);
__fsymtab_start = .;
KEEP(*(FSymTab))
__fsymtab_end = .;
. = ALIGN(4);
__vsymtab_start = .;
KEEP(*(VSymTab))
__vsymtab_end = .;
. = ALIGN(4);
/* section information for initial. */
. = ALIGN(4);
__rt_init_start = .;
KEEP(*(SORT(.rti_fn*)))
__rt_init_end = .;
. = ALIGN(4);
. = ALIGN(4);
_etext = .;
} > FLASH = 0
/* .ARM.exidx is sorted, so has to go in its own output section. */
__exidx_start = .;
.ARM.exidx :
{
*(.ARM.exidx* .gnu.linkonce.armexidx.*)
/* This is used by the startup in order to initialize the .data secion */
_sidata = .;
} > FLASH
__exidx_end = .;
/* .data section which is used for initialized data */
.data : AT (_sidata)
{
. = ALIGN(4);
/* This is used by the startup in order to initialize the .data secion */
_sdata = . ;
*(.data)
*(.data.*)
*(.gnu.linkonce.d*)
. = ALIGN(4);
/* This is used by the startup in order to initialize the .data secion */
_edata = . ;
} >SRAM
__bss_start = .;
.bss :
{
. = ALIGN(4);
/* This is used by the startup in order to initialize the .bss secion */
_sbss = .;
*(.bss)
*(.bss.*)
*(COMMON)
. = ALIGN(4);
/* This is used by the startup in order to initialize the .bss secion */
_ebss = . ;
*(.bss.init)
} > SRAM
__bss_end = .;
.stack :
{
. = . + _system_stack_size;
. = ALIGN(4);
_estack = .;
} >SRAM
_end = .;
__ram_end__ = ORIGIN(SRAM) + LENGTH(SRAM);
/* Stabs debugging sections. */
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
.stab.excl 0 : { *(.stab.excl) }
.stab.exclstr 0 : { *(.stab.exclstr) }
.stab.index 0 : { *(.stab.index) }
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
/* DWARF debug sections.
* Symbols in the DWARF debugging sections are relative to the beginning
* of the section so we begin them at 0. */
/* DWARF 1 */
.debug 0 : { *(.debug) }
.line 0 : { *(.line) }
/* GNU DWARF 1 extensions */
.debug_srcinfo 0 : { *(.debug_srcinfo) }
.debug_sfnames 0 : { *(.debug_sfnames) }
/* DWARF 1.1 and DWARF 2 */
.debug_aranges 0 : { *(.debug_aranges) }
.debug_pubnames 0 : { *(.debug_pubnames) }
/* DWARF 2 */
.debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
.debug_abbrev 0 : { *(.debug_abbrev) }
.debug_line 0 : { *(.debug_line) }
.debug_frame 0 : { *(.debug_frame) }
.debug_str 0 : { *(.debug_str) }
.debug_loc 0 : { *(.debug_loc) }
.debug_macinfo 0 : { *(.debug_macinfo) }
/* SGI/MIPS DWARF 2 extensions */
.debug_weaknames 0 : { *(.debug_weaknames) }
.debug_funcnames 0 : { *(.debug_funcnames) }
.debug_typenames 0 : { *(.debug_typenames) }
.debug_varnames 0 : { *(.debug_varnames) }
}

234
scripts/stm32f4xx_linker.ld

@ -1,234 +0,0 @@
/* vim: set ts=4 sw=4 et fdm=marker:
*
* Linker script to configure memory regions.
* Need modifying for a specific board.
* FLASH.ORIGIN: starting address of flash
* FLASH.LENGTH: length of flash
* RAM.ORIGIN: starting address of RAM bank 0
* RAM.LENGTH: length of RAM bank 0
*/
MEMORY
{
FLASH (rx) : ORIGIN = 0x08000000, LENGTH = 512K
RAM (rwx) : ORIGIN = 0x20000000, LENGTH = 128K
TCM (rwx) : ORIGIN = 0x10000000, LENGTH = 64K
}
/* Linker script to place sections and symbol values. Should be used together
* with other linker script that defines memory regions FLASH and RAM.
* It references following symbols, which must be defined in code:
* Reset_Handler : Entry of reset handler
*
* It defines following symbols, which code can use without definition:
* __exidx_start
* __exidx_end
* __copy_table_start__
* __copy_table_end__
* __zero_table_start__
* __zero_table_end__
* __etext
* __data_start__
* __preinit_array_start
* __preinit_array_end
* __init_array_start
* __init_array_end
* __fini_array_start
* __fini_array_end
* __data_end__
* __bss_start__
* __bss_end__
* __end__
* end
* __HeapLimit
* __StackLimit
* __StackTop
* __stack
*/
ENTRY(Reset_Handler)
SECTIONS
{
.text :
{
__text_start__ = .;
. = ALIGN(4);
KEEP(*(.isr_vector))
*(.text*)
KEEP(*(.init))
KEEP(*(.fini))
/* .ctors */
*crtbegin.o(.ctors)
*crtbegin?.o(.ctors)
*(EXCLUDE_FILE(*crtend?.o *crtend.o) .ctors)
*(SORT(.ctors.*))
*(.ctors)
/* .dtors */
*crtbegin.o(.dtors)
*crtbegin?.o(.dtors)
*(EXCLUDE_FILE(*crtend?.o *crtend.o) .dtors)
*(SORT(.dtors.*))
*(.dtors)
/* section information for finsh shell */
. = ALIGN(4);
__fsymtab_start = .;
KEEP(*(FSymTab))
__fsymtab_end = .;
. = ALIGN(4);
__vsymtab_start = .;
KEEP(*(VSymTab))
__vsymtab_end = .;
/* section information for initial. */
. = ALIGN(4);
__rt_init_start = .;
KEEP(*(SORT(.rti_fn*)))
__rt_init_end = .;
. = ALIGN(4);
PROVIDE(__ctors_start__ = .);
KEEP (*(SORT(.init_array.*)))
KEEP (*(.init_array))
PROVIDE(__ctors_end__ = .);
KEEP(*(.eh_frame*))
} > FLASH
.ARM.extab :
{
*(.ARM.extab* .gnu.linkonce.armextab.*)
} > FLASH
__exidx_start = .;
.ARM.exidx :
{
*(.ARM.exidx* .gnu.linkonce.armexidx.*)
} > FLASH
__exidx_end = .;
.rodata : AT(__exidx_end)
{
. = ALIGN(4);
__rodata_start = .;
*(.rodata*)
. = ALIGN(4);
/* preinit data */
PROVIDE_HIDDEN (__preinit_array_start = .);
KEEP(*(.preinit_array))
PROVIDE_HIDDEN (__preinit_array_end = .);
. = ALIGN(4);
/* init data */
PROVIDE_HIDDEN (__init_array_start = .);
KEEP(*(SORT(.init_array.*)))
KEEP(*(.init_array))
PROVIDE_HIDDEN (__init_array_end = .);
. = ALIGN(4);
/* finit data */
PROVIDE_HIDDEN (__fini_array_start = .);
KEEP(*(SORT(.fini_array.*)))
KEEP(*(.fini_array))
PROVIDE_HIDDEN (__fini_array_end = .);
. = ALIGN(4);
__rodata_end = .;
} > FLASH
/* To copy multiple ROM to RAM sections,
* uncomment .copy.table section and,
* define __STARTUP_COPY_MULTIPLE in startup_ARMCMx.S */
/*
.copy.table :
{
. = ALIGN(4);
__copy_table_start__ = .;
LONG (__etext)
LONG (__data_start__)
LONG (__data_end__ - __data_start__)
LONG (__etext2)
LONG (__data2_start__)
LONG (__data2_end__ - __data2_start__)
__copy_table_end__ = .;
} > FLASH
*/
/* To clear multiple BSS sections,
* uncomment .zero.table section and,
* define __STARTUP_CLEAR_BSS_MULTIPLE in startup_ARMCMx.S */
/*
.zero.table :
{
. = ALIGN(4);
__zero_table_start__ = .;
LONG (__bss_start__)
LONG (__bss_end__ - __bss_start__)
LONG (__bss2_start__)
LONG (__bss2_end__ - __bss2_start__)
__zero_table_end__ = .;
} > FLASH
*/
__etext = .;
.data : AT (__etext)
{
__data_start__ = .;
*(vtable)
*(.data*)
KEEP(*(.jcr*))
. = ALIGN(4);
/* All data end */
__data_end__ = .;
} > RAM
.bss :
{
. = ALIGN(4);
__bss_start__ = .;
*(.bss*)
*(COMMON)
. = ALIGN(4);
*(.rxdma*)
*(.txdma*)
*(.txbuf*)
. = ALIGN(4);
__bss_end__ = .;
} > RAM
.heap (NOLOAD):
{
__end__ = .;
PROVIDE(end = .);
*(.heap*)
__HeapLimit = .;
} > RAM
/* .stack_dummy section doesn't contains any symbols. It is only
* used for linker to calculate size of stack sections, and assign
* values to stack symbols later */
.stack_dummy (NOLOAD):
{
*(.stack*)
} > RAM
/* Set stack top to end of RAM, and stack limit move down by
* size of stack_dummy section */
__StackTop = ORIGIN(RAM) + LENGTH(RAM);
__StackLimit = __StackTop - SIZEOF(.stack_dummy);
PROVIDE(__stack = __StackTop);
__ram_end__ = ORIGIN(RAM) + LENGTH(RAM) -1 ;
/* Check if data + heap + stack exceeds RAM limit */
ASSERT(__StackLimit >= __HeapLimit, "region RAM overflowed with stack")
}
Loading…
Cancel
Save