Skip to content

Commit e16bca4

Browse files
committed
cpu/esp32/syscalls: migration to ESP-IDF v5.4
1 parent d57fd6a commit e16bca4

File tree

2 files changed

+166
-91
lines changed

2 files changed

+166
-91
lines changed

cpu/esp32/syscalls.c

Lines changed: 162 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -32,10 +32,11 @@
3232
#include "sys/lock.h"
3333
#include "timex.h"
3434

35+
#include "esp_cpu.h"
36+
#include "esp_private/periph_ctrl.h"
3537
#include "esp_rom_caps.h"
36-
#include "hal/interrupt_controller_types.h"
37-
#include "hal/interrupt_controller_ll.h"
3838
#include "hal/timer_hal.h"
39+
#include "hal/timer_ll.h"
3940
#include "hal/wdt_hal.h"
4041
#include "hal/wdt_types.h"
4142
#include "rom/ets_sys.h"
@@ -92,6 +93,28 @@ void heap_stats(void)
9293
_alloc + _free, _alloc, _free);
9394
}
9495

96+
#else /* IS_USED(MODULE_ESP_IDF_HEAP) */
97+
98+
void *heap_caps_malloc_prefer(size_t size, size_t num, ...)
99+
{
100+
/* This function usually allocates a chunk of memory in descending order
101+
* of the capabilities as defined in variable parameters. However,
102+
* allocating memory according to given capabilities is only relevant
103+
* if multiple heaps use memories of different capabalities, like the
104+
* alignment, the memory type a.s.o. Since we only use embedded RAM with
105+
* identical capabilities, we just map this function to the standard malloc.
106+
*/
107+
return malloc(size);
108+
}
109+
110+
void *heap_caps_aligned_calloc(size_t alignment, size_t n, size_t size, uint32_t caps)
111+
{
112+
(void)alignment;
113+
(void)caps;
114+
115+
return calloc(n, size);
116+
}
117+
95118
#endif /* IS_USED(MODULE_ESP_IDF_HEAP) */
96119

97120
/**
@@ -193,10 +216,10 @@ static struct syscall_stub_table s_stub_table =
193216
{
194217
.__getreent = &__getreent,
195218

196-
._malloc_r = &_malloc_r,
197-
._free_r = &_free_r,
198-
._realloc_r = &_realloc_r,
199-
._calloc_r = &_calloc_r,
219+
._malloc_r = (void * (*)(struct _reent *, size_t))&_malloc_r,
220+
._free_r = (void (*)(struct _reent *, void *))&_free_r,
221+
._realloc_r = (void * (*)(struct _reent *, void *, size_t))&_realloc_r,
222+
._calloc_r = (void * (*)(struct _reent *, size_t, size_t))&_calloc_r,
200223
._sbrk_r = &_sbrk_r,
201224

202225
._system_r = (void*)&_no_sys_func,
@@ -256,7 +279,7 @@ static struct syscall_stub_table s_stub_table =
256279

257280
timer_hal_context_t sys_timer = {
258281
.dev = TIMER_LL_GET_HW(TIMER_SYSTEM_GROUP),
259-
.idx = TIMER_SYSTEM_INDEX,
282+
.timer_id = TIMER_SYSTEM_INDEX,
260283
};
261284

262285
#if defined(_RETARGETABLE_LOCKING)
@@ -276,16 +299,123 @@ extern struct __lock __attribute__((alias("s_shared_mutex"))) __lock___tz_mutex;
276299
extern struct __lock __attribute__((alias("s_shared_mutex"))) __lock___dd_hash_mutex;
277300
extern struct __lock __attribute__((alias("s_shared_mutex"))) __lock___arc4random_mutex;
278301

279-
#endif
302+
/* map newlib's `__retarget_*` functions to the existing `_lock_*` functions */
303+
304+
void __retarget_lock_init(_LOCK_T *lock)
305+
{
306+
_lock_init(lock);
307+
}
308+
309+
extern void __retarget_lock_init_recursive(_LOCK_T *lock)
310+
{
311+
_lock_init_recursive(lock);
312+
}
313+
314+
void __retarget_lock_close(_LOCK_T lock)
315+
{
316+
_lock_close(&lock);
317+
}
318+
319+
void __retarget_lock_close_recursive(_LOCK_T lock)
320+
{
321+
_lock_close_recursive(&lock);
322+
}
323+
324+
void __retarget_lock_acquire(_LOCK_T lock)
325+
{
326+
if (lock == NULL) {
327+
/* use the shared mutex if lock is NULL */
328+
lock = (_lock_t)&s_shared_mutex;
329+
}
330+
_lock_acquire(&lock);
331+
}
332+
333+
void __retarget_lock_acquire_recursive(_LOCK_T lock)
334+
{
335+
if (lock == NULL) {
336+
/* use the shared rmutex if lock is NULL */
337+
lock = (_lock_t)&s_shared_rmutex;
338+
}
339+
_lock_acquire_recursive(&lock);
340+
}
341+
342+
int __retarget_lock_try_acquire(_LOCK_T lock)
343+
{
344+
if (lock == NULL) {
345+
/* use the shared mutex if lock is NULL */
346+
lock = (_lock_t)&s_shared_mutex;
347+
}
348+
return _lock_try_acquire(&lock);
349+
}
350+
351+
int __retarget_lock_try_acquire_recursive(_LOCK_T lock)
352+
{
353+
if (lock == NULL) {
354+
/* use the shared rmutex if lock is NULL */
355+
lock = (_lock_t)&s_shared_rmutex;
356+
}
357+
return _lock_try_acquire_recursive(&lock);
358+
}
359+
360+
void __retarget_lock_release(_LOCK_T lock)
361+
{
362+
if (lock == NULL) {
363+
/* use the shared mutex if lock is NULL */
364+
lock = (_lock_t)&s_shared_mutex;
365+
}
366+
_lock_release(&lock);
367+
}
368+
369+
void __retarget_lock_release_recursive(_LOCK_T lock)
370+
{
371+
if (lock == NULL) {
372+
/* use the shared rmutex if lock is NULL */
373+
lock = (_lock_t)&s_shared_rmutex;
374+
}
375+
_lock_release(&lock);
376+
}
377+
378+
#endif /* _RETARGETABLE_LOCKING */
280379

281380
void IRAM syscalls_init_arch(void)
282381
{
283-
/* initialize and enable the system timer in us (TMG0 is enabled by default) */
284-
timer_hal_init(&sys_timer, TIMER_SYSTEM_GROUP, TIMER_SYSTEM_INDEX);
285-
timer_hal_set_divider(&sys_timer, rtc_clk_apb_freq_get() / MHZ);
286-
timer_hal_set_counter_increase(&sys_timer, true);
287-
timer_hal_set_auto_reload(&sys_timer, false);
288-
timer_hal_set_counter_enable(&sys_timer, true);
382+
#if 0
383+
/* In ESP-IDF, the newlibc functions in ROM are used that require some
384+
* variables that have to be set to the shared mutex/rmutex. Since we
385+
* don't use the newlib functions in ROM, we don't have to set these
386+
* variables here for the moment
387+
*/
388+
#ifdef CONFIG_IDF_TARGET_ESP32
389+
/* Newlib 2.2.0 is used in ROM, the following lock symbols are defined: */
390+
extern _lock_t __sfp_lock;
391+
__sfp_lock = (_lock_t) &s_shared_rmutex;
392+
extern _lock_t __sinit_lock;
393+
__sinit_lock = (_lock_t) &s_shared_rmutex;
394+
extern _lock_t __env_lock_object;
395+
__env_lock_object = (_lock_t) &s_shared_rmutex;
396+
extern _lock_t __tz_lock_object;
397+
__tz_lock_object = (_lock_t) &s_shared_rmutex;
398+
#elif defined(CONFIG_IDF_TARGET_ESP32S2)
399+
/* Newlib 3.0.0 is used in ROM, the following lock symbols are defined: */
400+
extern _lock_t __sinit_recursive_mutex;
401+
__sinit_recursive_mutex = (_lock_t) &s_shared_rmutex;
402+
extern _lock_t __sfp_recursive_mutex;
403+
__sfp_recursive_mutex = (_lock_t) &s_shared_rmutex;
404+
#endif
405+
#endif
406+
407+
/* initialize and enable the system timer in us */
408+
periph_module_enable(PERIPH_TIMG0_MODULE);
409+
timer_ll_set_clock_source(sys_timer.dev, sys_timer.timer_id, GPTIMER_CLK_SRC_DEFAULT);
410+
timer_ll_enable_clock(sys_timer.dev, sys_timer.timer_id, true);
411+
timer_ll_set_clock_prescale(sys_timer.dev, sys_timer.timer_id, rtc_clk_apb_freq_get() / MHZ);
412+
timer_ll_set_count_direction(sys_timer.dev, sys_timer.timer_id, GPTIMER_COUNT_UP);
413+
timer_ll_enable_auto_reload(sys_timer.dev, sys_timer.timer_id, false);
414+
timer_ll_enable_counter(sys_timer.dev, sys_timer.timer_id, true);
415+
timer_ll_enable_alarm(sys_timer.dev, sys_timer.timer_id, false);
416+
#if SOC_TIMER_SUPPORT_ETM
417+
timer_ll_enable_etm(sys_timer.dev, true);
418+
#endif
289419

290420
#if defined(CPU_FAM_ESP32)
291421
syscall_table_ptr_pro = &s_stub_table;
@@ -307,11 +437,10 @@ uint32_t system_get_time_ms(void)
307437
return system_get_time_64() / US_PER_MS;
308438
}
309439

310-
int64_t system_get_time_64(void)
440+
uint64_t system_get_time_64(void)
311441
{
312-
uint64_t ret;
313-
timer_hal_get_counter_value(&sys_timer, &ret);
314-
return ret;
442+
timer_ll_trigger_soft_capture(sys_timer.dev, sys_timer.timer_id);
443+
return timer_ll_get_counter_value(sys_timer.dev, sys_timer.timer_id);
315444
}
316445

317446
wdt_hal_context_t mwdt;
@@ -357,32 +486,23 @@ void system_wdt_init(void)
357486
wdt_hal_write_protect_enable(&mwdt);
358487
wdt_hal_write_protect_enable(&rwdt);
359488

360-
#if defined(CPU_FAM_ESP32)
361-
DEBUG("%s TIMERG0 wdtconfig0=%08"PRIx32" wdtconfig1=%08"PRIx32
362-
" wdtconfig2=%08"PRIx32" wdtconfig3=%08"PRIx32
363-
" wdtconfig4=%08"PRIx32" regclk=%08"PRIx32"\n", __func__,
364-
TIMERG0.wdt_config0.val, TIMERG0.wdt_config1.val,
365-
TIMERG0.wdt_config2, TIMERG0.wdt_config3,
366-
TIMERG0.wdt_config4, TIMERG0.clk.val);
367-
#else
368489
DEBUG("%s TIMERG0 wdtconfig0=%08"PRIx32" wdtconfig1=%08"PRIx32
369490
" wdtconfig2=%08"PRIx32" wdtconfig3=%08"PRIx32
370491
" wdtconfig4=%08"PRIx32" regclk=%08"PRIx32"\n", __func__,
371492
TIMERG0.wdtconfig0.val, TIMERG0.wdtconfig1.val,
372493
TIMERG0.wdtconfig2.val, TIMERG0.wdtconfig3.val,
373494
TIMERG0.wdtconfig4.val, TIMERG0.regclk.val);
374-
#endif
375495

376496
/* route WDT peripheral interrupt source to CPU_INUM_WDT */
377497
intr_matrix_set(PRO_CPU_NUM, ETS_TG0_WDT_LEVEL_INTR_SOURCE, CPU_INUM_WDT);
378498
/* set the interrupt handler and activate the interrupt */
379-
intr_cntrl_ll_set_int_handler(CPU_INUM_WDT, system_wdt_int_handler, NULL);
380-
intr_cntrl_ll_enable_interrupts(BIT(CPU_INUM_WDT));
499+
esp_cpu_intr_set_handler(CPU_INUM_WDT, system_wdt_int_handler, NULL);
500+
esp_cpu_intr_enable(BIT(CPU_INUM_WDT));
381501
}
382502

383503
void system_wdt_stop(void)
384504
{
385-
intr_cntrl_ll_disable_interrupts(BIT(CPU_INUM_WDT));
505+
esp_cpu_intr_disable(BIT(CPU_INUM_WDT));
386506
wdt_hal_write_protect_disable(&mwdt);
387507
wdt_hal_disable(&mwdt);
388508
wdt_hal_write_protect_enable(&mwdt);
@@ -393,5 +513,16 @@ void system_wdt_start(void)
393513
wdt_hal_write_protect_disable(&mwdt);
394514
wdt_hal_enable(&mwdt);
395515
wdt_hal_write_protect_enable(&mwdt);
396-
intr_cntrl_ll_enable_interrupts(BIT(CPU_INUM_WDT));
516+
esp_cpu_intr_enable(BIT(CPU_INUM_WDT));
397517
}
518+
519+
#ifndef MODULE_POSIX_SLEEP
520+
521+
int usleep(useconds_t us)
522+
{
523+
extern void esp_rom_delay_us(uint32_t us);
524+
esp_rom_delay_us((uint32_t) us);
525+
return 0;
526+
}
527+
528+
#endif

cpu/esp_common/syscalls.c

Lines changed: 4 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,10 @@ static _lock_t *__malloc_static_object = NULL;
9090
#define _lock_critical_enter() uint32_t __lock_state = irq_disable();
9191
#define _lock_critical_exit() irq_restore(__lock_state);
9292

93+
/* check whether `struct __lock` is large enough to hold a recursive mutex */
94+
static_assert(sizeof(struct __lock) >= sizeof(rmutex_t),
95+
"struct __lock is too small to hold a recursive mutex of type rmutex_t");
96+
9397
#endif
9498

9599
void IRAM_ATTR _lock_init(_lock_t *lock)
@@ -318,66 +322,6 @@ void IRAM_ATTR _lock_release_recursive(_lock_t *lock)
318322
_lock_critical_exit();
319323
}
320324

321-
#if defined(_RETARGETABLE_LOCKING)
322-
323-
/* check whether `struct __lock` is large enough to hold a recursive mutex */
324-
static_assert(sizeof(struct __lock) >= sizeof(rmutex_t),
325-
"struct __lock is too small to hold a recursive mutex of type rmutex_t");
326-
327-
/* map newlib's `__retarget_*` functions to the existing `_lock_*` functions */
328-
329-
void __retarget_lock_init(_LOCK_T *lock)
330-
{
331-
_lock_init(lock);
332-
}
333-
334-
extern void __retarget_lock_init_recursive(_LOCK_T *lock)
335-
{
336-
_lock_init_recursive(lock);
337-
}
338-
339-
void __retarget_lock_close(_LOCK_T lock)
340-
{
341-
_lock_close(&lock);
342-
}
343-
344-
void __retarget_lock_close_recursive(_LOCK_T lock)
345-
{
346-
_lock_close_recursive(&lock);
347-
}
348-
349-
void __retarget_lock_acquire(_LOCK_T lock)
350-
{
351-
_lock_acquire(&lock);
352-
}
353-
354-
void __retarget_lock_acquire_recursive(_LOCK_T lock)
355-
{
356-
_lock_acquire_recursive(&lock);
357-
}
358-
359-
int __retarget_lock_try_acquire(_LOCK_T lock)
360-
{
361-
return _lock_try_acquire(&lock);
362-
}
363-
364-
int __retarget_lock_try_acquire_recursive(_LOCK_T lock)
365-
{
366-
return _lock_try_acquire_recursive(&lock);
367-
}
368-
369-
void __retarget_lock_release(_LOCK_T lock)
370-
{
371-
_lock_release(&lock);
372-
}
373-
374-
void __retarget_lock_release_recursive(_LOCK_T lock)
375-
{
376-
_lock_release(&lock);
377-
}
378-
379-
#endif /* _RETARGETABLE_LOCKING */
380-
381325
/**
382326
* @name Memory allocation functions
383327
*/

0 commit comments

Comments
 (0)