1use core::ptr::NonNull;
6
7use esp_hal::{system::Cpu, time::Instant};
8use esp_sync::NonReentrantMutex;
9
10use crate::{
11 SCHEDULER,
12 run_queue::Priority,
13 task::{TaskExt, TaskPtr},
14 wait_queue::WaitQueue,
15};
16
17enum SemaphoreInner {
18 Counting {
19 current: u32,
20 max: u32,
21 waiting: WaitQueue,
22 },
23 Mutex {
24 recursive: bool,
25 owner: Option<TaskPtr>,
26 original_priority: Priority,
27 lock_counter: u32,
28 waiting: WaitQueue,
29 },
30}
31
32impl SemaphoreInner {
33 fn try_take(&mut self) -> bool {
34 match self {
35 SemaphoreInner::Counting { current, .. } => {
36 if *current > 0 {
37 *current -= 1;
38 true
39 } else {
40 false
41 }
42 }
43 SemaphoreInner::Mutex {
44 recursive,
45 owner,
46 lock_counter,
47 original_priority,
48 ..
49 } => {
50 SCHEDULER.with(|scheduler| {
51 let current = scheduler.current_task(Cpu::current());
52 if let Some(owner) = owner {
53 if *owner == current && *recursive {
54 *lock_counter += 1;
55 true
56 } else {
57 let current_priority = current.priority(&mut scheduler.run_queue);
60 if owner.priority(&mut scheduler.run_queue) < current_priority {
61 owner.set_priority(&mut scheduler.run_queue, current_priority);
62 scheduler.resume_task(*owner);
63 }
64 false
65 }
66 } else {
67 *owner = Some(current);
68 *lock_counter += 1;
69 *original_priority = current.priority(&mut scheduler.run_queue);
70 true
71 }
72 })
73 }
74 }
75 }
76
77 fn try_take_from_isr(&mut self) -> bool {
78 match self {
79 SemaphoreInner::Counting { current, .. } => {
80 if *current > 0 {
81 *current -= 1;
82 true
83 } else {
84 false
85 }
86 }
87 SemaphoreInner::Mutex {
88 recursive,
89 owner,
90 lock_counter,
91 ..
92 } => {
93 let current = NonNull::dangling();
96 if let Some(owner) = owner {
97 if *owner == current && *recursive {
98 *lock_counter += 1;
99 true
100 } else {
101 false
102 }
103 } else {
104 *owner = Some(current);
105 *lock_counter += 1;
106 true
107 }
108 }
109 }
110 }
111
112 fn try_give(&mut self) -> bool {
113 match self {
114 SemaphoreInner::Counting { current, max, .. } => {
115 if *current < *max {
116 *current += 1;
117 true
118 } else {
119 false
120 }
121 }
122 SemaphoreInner::Mutex {
123 owner,
124 lock_counter,
125 original_priority,
126 ..
127 } => SCHEDULER.with(|scheduler| {
128 let current_cpu = Cpu::current() as usize;
129 let current = unwrap!(scheduler.per_cpu[current_cpu].current_task);
130
131 if *owner == Some(current) && *lock_counter > 0 {
132 *lock_counter -= 1;
133 if *lock_counter == 0
134 && let Some(owner) = owner.take()
135 {
136 owner.set_priority(&mut scheduler.run_queue, *original_priority);
137 }
138 true
139 } else {
140 false
141 }
142 }),
143 }
144 }
145
146 fn try_give_from_isr(&mut self) -> bool {
147 match self {
148 SemaphoreInner::Counting { current, max, .. } => {
149 if *current < *max {
150 *current += 1;
151 true
152 } else {
153 false
154 }
155 }
156 SemaphoreInner::Mutex {
157 owner,
158 lock_counter,
159 ..
160 } => {
161 let current = NonNull::dangling();
162 if *owner == Some(current) && *lock_counter > 0 {
163 *lock_counter -= 1;
164 if *lock_counter == 0 {
165 *owner = None;
166 }
167 true
168 } else {
169 false
170 }
171 }
172 }
173 }
174
175 fn current_count(&mut self) -> u32 {
176 match self {
177 SemaphoreInner::Counting { current, .. } => *current,
178 SemaphoreInner::Mutex { .. } => {
179 panic!("RecursiveMutex does not support current_count")
180 }
181 }
182 }
183
184 fn wait_with_deadline(&mut self, deadline: Instant) {
185 trace!("Semaphore wait_with_deadline - {:?}", deadline);
186 match self {
187 SemaphoreInner::Counting { waiting, .. } => waiting.wait_with_deadline(deadline),
188 SemaphoreInner::Mutex { waiting, .. } => waiting.wait_with_deadline(deadline),
189 }
190 }
191
192 fn notify(&mut self) {
193 trace!("Semaphore notify");
194 match self {
195 SemaphoreInner::Counting { waiting, .. } => waiting.notify(),
196 SemaphoreInner::Mutex { waiting, .. } => waiting.notify(),
197 }
198 }
199}
200
201pub struct Semaphore {
203 inner: NonReentrantMutex<SemaphoreInner>,
204}
205
206impl Semaphore {
207 pub const fn new_counting(initial: u32, max: u32) -> Self {
209 Semaphore {
210 inner: NonReentrantMutex::new(SemaphoreInner::Counting {
211 current: initial,
212 max,
213 waiting: WaitQueue::new(),
214 }),
215 }
216 }
217
218 pub const fn new_mutex(recursive: bool) -> Self {
222 Semaphore {
223 inner: NonReentrantMutex::new(SemaphoreInner::Mutex {
224 recursive,
225 owner: None,
226 lock_counter: 0,
227 original_priority: Priority::new(0),
228 waiting: WaitQueue::new(),
229 }),
230 }
231 }
232
233 pub fn try_take(&self) -> bool {
238 self.inner.with(|sem| sem.try_take())
239 }
240
241 pub fn try_take_from_isr(&self) -> bool {
246 self.inner.with(|sem| sem.try_take_from_isr())
247 }
248
249 pub fn take(&self, timeout_us: Option<u32>) -> bool {
256 if crate::with_deadline(timeout_us, |deadline| {
257 self.inner.with(|sem| {
258 if sem.try_take() {
259 true
260 } else {
261 sem.wait_with_deadline(deadline);
263 false
264 }
265 })
266 }) {
267 debug!("Semaphore - take - success");
268 true
269 } else {
270 debug!("Semaphore - take - timed out");
271 false
272 }
273 }
274
275 pub fn current_count(&self) -> u32 {
277 self.inner.with(|sem| sem.current_count())
278 }
279
280 pub fn give(&self) -> bool {
282 self.inner.with(|sem| {
283 if sem.try_give() {
284 sem.notify();
285 true
286 } else {
287 false
288 }
289 })
290 }
291
292 pub fn try_give_from_isr(&self) -> bool {
296 self.inner.with(|sem| {
297 if sem.try_give_from_isr() {
298 sem.notify();
299 true
300 } else {
301 false
302 }
303 })
304 }
305}