@web-font-path: "roboto-debian.css";
Loading...
Searching...
No Matches
sync.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#ifndef _HARDWARE_SYNC_H
8#define _HARDWARE_SYNC_H
9
10#include "pico.h"
12#include "hardware/regs/sio.h"
13
14#ifdef __cplusplus
15extern "C" {
16#endif
17
48// PICO_CONFIG: PARAM_ASSERTIONS_ENABLED_SYNC, Enable/disable assertions in the HW sync module, type=bool, default=0, group=hardware_sync
49#ifndef PARAM_ASSERTIONS_ENABLED_SYNC
50#define PARAM_ASSERTIONS_ENABLED_SYNC 0
51#endif
52
56typedef volatile uint32_t spin_lock_t;
57
58// PICO_CONFIG: PICO_SPINLOCK_ID_IRQ, Spinlock ID for IRQ protection, min=0, max=31, default=9, group=hardware_sync
59#ifndef PICO_SPINLOCK_ID_IRQ
60#define PICO_SPINLOCK_ID_IRQ 9
61#endif
62
63// PICO_CONFIG: PICO_SPINLOCK_ID_TIMER, Spinlock ID for Timer protection, min=0, max=31, default=10, group=hardware_sync
64#ifndef PICO_SPINLOCK_ID_TIMER
65#define PICO_SPINLOCK_ID_TIMER 10
66#endif
67
68// PICO_CONFIG: PICO_SPINLOCK_ID_HARDWARE_CLAIM, Spinlock ID for Hardware claim protection, min=0, max=31, default=11, group=hardware_sync
69#ifndef PICO_SPINLOCK_ID_HARDWARE_CLAIM
70#define PICO_SPINLOCK_ID_HARDWARE_CLAIM 11
71#endif
72
73// PICO_CONFIG: PICO_SPINLOCK_ID_RAND, Spinlock ID for Random Number Generator, min=0, max=31, default=12, group=hardware_sync
74#ifndef PICO_SPINLOCK_ID_RAND
75#define PICO_SPINLOCK_ID_RAND 12
76#endif
77
78// PICO_CONFIG: PICO_SPINLOCK_ID_OS1, First Spinlock ID reserved for use by low level OS style software, min=0, max=31, default=14, group=hardware_sync
79#ifndef PICO_SPINLOCK_ID_OS1
80#define PICO_SPINLOCK_ID_OS1 14
81#endif
82
83// PICO_CONFIG: PICO_SPINLOCK_ID_OS2, Second Spinlock ID reserved for use by low level OS style software, min=0, max=31, default=15, group=hardware_sync
84#ifndef PICO_SPINLOCK_ID_OS2
85#define PICO_SPINLOCK_ID_OS2 15
86#endif
87
88// PICO_CONFIG: PICO_SPINLOCK_ID_STRIPED_FIRST, Lowest Spinlock ID in the 'striped' range, min=0, max=31, default=16, group=hardware_sync
89#ifndef PICO_SPINLOCK_ID_STRIPED_FIRST
90#define PICO_SPINLOCK_ID_STRIPED_FIRST 16
91#endif
92
93// PICO_CONFIG: PICO_SPINLOCK_ID_STRIPED_LAST, Highest Spinlock ID in the 'striped' range, min=0, max=31, default=23, group=hardware_sync
94#ifndef PICO_SPINLOCK_ID_STRIPED_LAST
95#define PICO_SPINLOCK_ID_STRIPED_LAST 23
96#endif
97
98// PICO_CONFIG: PICO_SPINLOCK_ID_CLAIM_FREE_FIRST, Lowest Spinlock ID in the 'claim free' range, min=0, max=31, default=24, group=hardware_sync
99#ifndef PICO_SPINLOCK_ID_CLAIM_FREE_FIRST
100#define PICO_SPINLOCK_ID_CLAIM_FREE_FIRST 24
101#endif
102
103#ifdef PICO_SPINLOCK_ID_CLAIM_FREE_END
104#warning PICO_SPINLOCK_ID_CLAIM_FREE_END has been renamed to PICO_SPINLOCK_ID_CLAIM_FREE_LAST
105#endif
106
107// PICO_CONFIG: PICO_SPINLOCK_ID_CLAIM_FREE_LAST, Highest Spinlock ID in the 'claim free' range, min=0, max=31, default=31, group=hardware_sync
108#ifndef PICO_SPINLOCK_ID_CLAIM_FREE_LAST
109#define PICO_SPINLOCK_ID_CLAIM_FREE_LAST 31
110#endif
111
117#if !__has_builtin(__sev)
118__force_inline static void __sev(void) {
119 pico_default_asm_volatile ("sev");
120}
121#endif
122
129#if !__has_builtin(__wfe)
130__force_inline static void __wfe(void) {
131 pico_default_asm_volatile ("wfe");
132}
133#endif
134
140#if !__has_builtin(__wfi)
141__force_inline static void __wfi(void) {
142 pico_default_asm_volatile("wfi");
143}
144#endif
145
152__force_inline static void __dmb(void) {
153 pico_default_asm_volatile("dmb" : : : "memory");
154}
155
163__force_inline static void __dsb(void) {
164 pico_default_asm_volatile("dsb" : : : "memory");
165}
166
174__force_inline static void __isb(void) {
175 pico_default_asm_volatile("isb" ::: "memory");
176}
177
182 // the original code below makes it hard for us to be included from C++ via a header
183 // which itself is in an extern "C", so just use __dmb instead, which is what
184 // is required on Cortex M0+
185 __dmb();
186//#ifndef __cplusplus
187// atomic_thread_fence(memory_order_acquire);
188//#else
189// std::atomic_thread_fence(std::memory_order_acquire);
190//#endif
191}
192
198 // the original code below makes it hard for us to be included from C++ via a header
199 // which itself is in an extern "C", so just use __dmb instead, which is what
200 // is required on Cortex M0+
201 __dmb();
202//#ifndef __cplusplus
203// atomic_thread_fence(memory_order_release);
204//#else
205// std::atomic_thread_fence(std::memory_order_release);
206//#endif
207}
208
215 uint32_t status;
216 pico_default_asm_volatile(
217 "mrs %0, PRIMASK\n"
218 "cpsid i"
219 : "=r" (status) ::);
220 return status;
221}
222
228__force_inline static void restore_interrupts(uint32_t status) {
229 pico_default_asm_volatile("msr PRIMASK,%0"::"r" (status) : );
230}
231
239 invalid_params_if(SYNC, lock_num >= NUM_SPIN_LOCKS);
240 return (spin_lock_t *) (SIO_BASE + SIO_SPINLOCK0_OFFSET + lock_num * 4);
241}
242
250 invalid_params_if(SYNC, (uint) lock < SIO_BASE + SIO_SPINLOCK0_OFFSET ||
251 (uint) lock >= NUM_SPIN_LOCKS * sizeof(spin_lock_t) + SIO_BASE + SIO_SPINLOCK0_OFFSET ||
252 ((uint) lock - SIO_BASE + SIO_SPINLOCK0_OFFSET) % sizeof(spin_lock_t) != 0);
253 return (uint) (lock - (spin_lock_t *) (SIO_BASE + SIO_SPINLOCK0_OFFSET));
254}
255
262 // Note we don't do a wfe or anything, because by convention these spin_locks are VERY SHORT LIVED and NEVER BLOCK and run
263 // with INTERRUPTS disabled (to ensure that)... therefore nothing on our core could be blocking us, so we just need to wait on another core
264 // anyway which should be finished soon
265 while (__builtin_expect(!*lock, 0));
267}
268
276 *lock = 0;
277}
278
288 uint32_t save = save_and_disable_interrupts();
290 return save;
291}
292
298inline static bool is_spin_locked(spin_lock_t *lock) {
299 check_hw_size(spin_lock_t, 4);
300 uint lock_num = spin_lock_get_num(lock);
301 return 0 != (*(io_ro_32 *) (SIO_BASE + SIO_SPINLOCK_ST_OFFSET) & (1u << lock_num));
302}
303
314__force_inline static void spin_unlock(spin_lock_t *lock, uint32_t saved_irq) {
315 spin_unlock_unsafe(lock);
316 restore_interrupts(saved_irq);
317}
318
327spin_lock_t *spin_lock_init(uint lock_num);
328
332void spin_locks_reset(void);
333
350
360void spin_lock_claim(uint lock_num);
361
371void spin_lock_claim_mask(uint32_t lock_num_mask);
372
380void spin_lock_unclaim(uint lock_num);
381
388int spin_lock_claim_unused(bool required);
389
398bool spin_lock_is_claimed(uint lock_num);
399
400// no longer use __mem_fence_acquire here, as it is overkill on cortex M0+
401#define remove_volatile_cast(t, x) ({__compiler_memory_barrier(); Clang_Pragma("clang diagnostic push"); Clang_Pragma("clang diagnostic ignored \"-Wcast-qual\""); (t)(x); Clang_Pragma("clang diagnostic pop"); })
402
403#ifdef __cplusplus
404}
405#endif
406
407#endif
static __force_inline uint32_t spin_lock_blocking(spin_lock_t *lock)
Acquire a spin lock safely.
Definition sync.h:287
int spin_lock_claim_unused(bool required)
Claim a free spin lock.
Definition sync.c:56
static __force_inline uint32_t save_and_disable_interrupts(void)
Save and disable interrupts.
Definition sync.h:214
void spin_lock_unclaim(uint lock_num)
Mark a spin lock as no longer used.
Definition sync.c:50
static __force_inline void __mem_fence_release(void)
Release a memory fence.
Definition sync.h:197
static __force_inline void spin_unlock_unsafe(spin_lock_t *lock)
Release a spin lock without re-enabling interrupts.
Definition sync.h:274
static __force_inline void spin_unlock(spin_lock_t *lock, uint32_t saved_irq)
Release a spin lock safely.
Definition sync.h:314
void spin_lock_claim_mask(uint32_t lock_num_mask)
Mark multiple spin locks as used.
Definition sync.c:44
static __force_inline void __sev(void)
Insert a SEV instruction in to the code path.
Definition sync.h:118
void spin_lock_claim(uint lock_num)
Mark a spin lock as used.
Definition sync.c:39
static __force_inline void __dmb(void)
Insert a DMB instruction in to the code path.
Definition sync.h:152
void spin_locks_reset(void)
Release all spin locks.
Definition sync.c:18
static __force_inline void __wfe(void)
Insert a WFE instruction in to the code path.
Definition sync.h:130
static __force_inline void restore_interrupts(uint32_t status)
Restore interrupts to a specified state.
Definition sync.h:228
static __force_inline spin_lock_t * spin_lock_instance(uint lock_num)
Get HW Spinlock instance from number.
Definition sync.h:238
uint next_striped_spin_lock_num(void)
Return a spin lock number from the striped range.
Definition sync.c:31
bool spin_lock_is_claimed(uint lock_num)
Determine if a spin lock is claimed.
Definition sync.c:60
static __force_inline void __mem_fence_acquire(void)
Acquire a memory fence.
Definition sync.h:181
static __force_inline void __wfi(void)
Insert a WFI instruction in to the code path.
Definition sync.h:141
static __force_inline void spin_lock_unsafe_blocking(spin_lock_t *lock)
Acquire a spin lock without disabling interrupts (hence unsafe)
Definition sync.h:261
volatile uint32_t spin_lock_t
A spin lock identifier.
Definition sync.h:56
spin_lock_t * spin_lock_init(uint lock_num)
Initialise a spin lock.
Definition sync.c:24
static __force_inline uint spin_lock_get_num(spin_lock_t *lock)
Get HW Spinlock number from instance.
Definition sync.h:249
static bool is_spin_locked(spin_lock_t *lock)
Check to see if a spinlock is currently acquired elsewhere.
Definition sync.h:298
static __force_inline void __isb(void)
Insert a ISB instruction in to the code path.
Definition sync.h:174
static __force_inline void __dsb(void)
Insert a DSB instruction in to the code path.
Definition sync.h:163
#define __force_inline
Attribute to force inlining of a function regardless of optimization level.
Definition platform.h:314