esp_wifi/compat/
common.rs1#![allow(unused)]
2
3use core::{
4 cell::RefCell,
5 fmt::Write,
6 mem::size_of_val,
7 ptr::{self, addr_of, addr_of_mut},
8};
9
10use allocator_api2::{boxed::Box, vec::Vec};
11use esp_wifi_sys::{c_types::c_char, include::malloc};
12
13use super::malloc::free;
14use crate::{
15 binary::c_types::{c_int, c_void},
16 compat::malloc::InternalMemory,
17 hal::sync::Locked,
18 memory_fence::memory_fence,
19 preempt::{current_task, yield_task},
20};
21
22pub(crate) const OSI_FUNCS_TIME_BLOCKING: u32 = u32::MAX;
23
24#[derive(Clone, Copy, Debug)]
25struct Mutex {
26 locking_pid: usize,
27 count: u32,
28 recursive: bool,
29}
30
31pub(crate) struct ConcurrentQueue {
32 raw_queue: Locked<RawQueue>,
33}
34
35impl ConcurrentQueue {
36 pub(crate) fn new(count: usize, item_size: usize) -> Self {
37 Self {
38 raw_queue: Locked::new(RawQueue::new(count, item_size)),
39 }
40 }
41
42 pub(crate) fn enqueue(&mut self, item: *mut c_void) -> i32 {
43 self.raw_queue.with(|q| unsafe { q.enqueue(item) })
44 }
45
46 pub(crate) fn try_dequeue(&mut self, item: *mut c_void) -> bool {
47 self.raw_queue.with(|q| unsafe { q.try_dequeue(item) })
48 }
49
50 pub(crate) fn remove(&mut self, item: *mut c_void) {
51 self.raw_queue.with(|q| unsafe { q.remove(item) })
52 }
53
54 pub(crate) fn count(&self) -> usize {
55 self.raw_queue.with(|q| unsafe { q.count() })
56 }
57}
58
59pub struct RawQueue {
64 item_size: usize,
65 capacity: usize,
66 current_read: usize,
67 current_write: usize,
68 storage: Box<[u8], InternalMemory>,
69}
70
71impl RawQueue {
72 pub fn new(capacity: usize, item_size: usize) -> Self {
74 let storage =
75 unsafe { Box::new_zeroed_slice_in(capacity * item_size, InternalMemory).assume_init() };
76
77 Self {
78 item_size,
79 capacity,
80 current_read: 0,
81 current_write: 0,
82 storage,
83 }
84 }
85
86 fn get(&self, index: usize) -> &[u8] {
87 let item_start = self.item_size * index;
88 &self.storage[item_start..][..self.item_size]
89 }
90
91 fn get_mut(&mut self, index: usize) -> &mut [u8] {
92 let item_start = self.item_size * index;
93 &mut self.storage[item_start..][..self.item_size]
94 }
95
96 fn full(&self) -> bool {
97 self.count() == self.capacity
98 }
99
100 fn empty(&self) -> bool {
101 self.count() == 0
102 }
103
104 unsafe fn enqueue(&mut self, item: *mut c_void) -> i32 {
105 if !self.full() {
106 let item = unsafe { core::slice::from_raw_parts(item as *const u8, self.item_size) };
107
108 let dst = self.get_mut(self.current_write);
109 dst.copy_from_slice(item);
110
111 self.current_write = (self.current_write + 1) % self.capacity;
112 1
113 } else {
114 0
115 }
116 }
117
118 unsafe fn try_dequeue(&mut self, item: *mut c_void) -> bool {
119 if !self.empty() {
120 let item = unsafe { core::slice::from_raw_parts_mut(item as *mut u8, self.item_size) };
121
122 let src = self.get(self.current_read);
123 item.copy_from_slice(src);
124
125 self.current_read = (self.current_read + 1) % self.capacity;
126
127 true
128 } else {
129 false
130 }
131 }
132
133 unsafe fn remove(&mut self, item: *mut c_void) {
134 let item_slice = unsafe { core::slice::from_raw_parts(item as *const u8, self.item_size) };
138 let count = self.count();
139
140 if count == 0 {
141 return;
142 }
143
144 let mut tmp_item = Vec::<u8, _>::new_in(InternalMemory);
145 tmp_item.reserve_exact(self.item_size);
146 tmp_item.resize(self.item_size, 0);
147
148 for _ in 0..count {
149 if !unsafe { self.try_dequeue(tmp_item.as_mut_ptr().cast()) } {
150 break;
151 }
152 if &tmp_item[..] != item_slice {
153 unsafe { self.enqueue(tmp_item.as_mut_ptr().cast()) };
154 }
155 }
156 }
157
158 fn count(&self) -> usize {
159 if self.current_write >= self.current_read {
160 self.current_write - self.current_read
161 } else {
162 self.capacity - self.current_read + self.current_write
163 }
164 }
165}
166
167pub unsafe fn str_from_c<'a>(s: *const c_char) -> &'a str {
168 unsafe {
169 let c_str = core::ffi::CStr::from_ptr(s.cast());
170 core::str::from_utf8_unchecked(c_str.to_bytes())
171 }
172}
173
174#[unsafe(no_mangle)]
175unsafe extern "C" fn strnlen(chars: *const c_char, maxlen: usize) -> usize {
176 let mut len = 0;
177 loop {
178 unsafe {
179 if chars.offset(len).read_volatile() == 0 {
180 break;
181 }
182 len += 1;
183 }
184 }
185
186 len as usize
187}
188
189pub(crate) fn sem_create(max: u32, init: u32) -> *mut c_void {
190 unsafe {
191 let ptr = malloc(4) as *mut u32;
192 ptr.write_volatile(init);
193
194 trace!("sem created res = {:?}", ptr);
195 ptr.cast()
196 }
197}
198
199pub(crate) fn sem_delete(semphr: *mut c_void) {
200 trace!(">>> sem delete");
201
202 unsafe {
203 free(semphr.cast());
204 }
205}
206
207pub(crate) fn sem_take(semphr: *mut c_void, tick: u32) -> i32 {
208 let tick = if tick == OSI_FUNCS_TIME_BLOCKING && crate::is_interrupts_disabled() {
212 warn!("blocking sem_take probably called from an ISR - return early");
213 1
214 } else {
215 tick
216 };
217
218 trace!(">>>> semphr_take {:?} block_time_tick {}", semphr, tick);
219
220 let forever = tick == OSI_FUNCS_TIME_BLOCKING;
221 let timeout = tick as u64;
222 let start = crate::time::systimer_count();
223
224 let sem = semphr as *mut u32;
225
226 'outer: loop {
227 let res = critical_section::with(|_| unsafe {
228 memory_fence();
229 let cnt = *sem;
230 if cnt > 0 {
231 *sem = cnt - 1;
232 1
233 } else {
234 0
235 }
236 });
237
238 if res == 1 {
239 trace!(">>>> return from semphr_take");
240 return 1;
241 }
242
243 if !forever && crate::time::elapsed_time_since(start) > timeout {
244 break 'outer;
245 }
246
247 yield_task();
248 }
249
250 trace!(">>>> return from semphr_take with timeout");
251 0
252}
253
254pub(crate) fn sem_give(semphr: *mut c_void) -> i32 {
255 trace!("semphr_give {:?}", semphr);
256 let sem = semphr as *mut u32;
257
258 critical_section::with(|_| unsafe {
259 let cnt = *sem;
260 *sem = cnt + 1;
261 1
262 })
263}
264
265pub(crate) fn thread_sem_get() -> *mut c_void {
266 trace!("wifi_thread_semphr_get");
267 crate::preempt::current_task_thread_semaphore()
268}
269
270pub(crate) fn create_recursive_mutex() -> *mut c_void {
271 let mutex = Mutex {
272 locking_pid: 0xffff_ffff,
273 count: 0,
274 recursive: true,
275 };
276
277 let ptr = unsafe { malloc(size_of_val(&mutex) as u32) as *mut Mutex };
278 unsafe {
279 ptr.write(mutex);
280 }
281 memory_fence();
282
283 trace!("recursive_mutex_create called {:?}", ptr);
284 ptr as *mut c_void
285}
286
287pub(crate) fn mutex_delete(mutex: *mut c_void) {
288 let ptr = mutex as *mut Mutex;
289 unsafe {
290 free(mutex.cast());
291 }
292}
293
294pub(crate) fn lock_mutex(mutex: *mut c_void) -> i32 {
296 trace!("mutex_lock ptr = {:?}", mutex);
297
298 let ptr = mutex as *mut Mutex;
299 let current_task = current_task() as usize;
300
301 loop {
302 let mutex_locked = critical_section::with(|_| unsafe {
303 if (*ptr).count == 0 {
304 (*ptr).locking_pid = current_task;
305 (*ptr).count += 1;
306 true
307 } else if (*ptr).locking_pid == current_task {
308 (*ptr).count += 1;
309 true
310 } else {
311 false
312 }
313 });
314 memory_fence();
315
316 if mutex_locked {
317 return 1;
318 }
319
320 yield_task();
321 }
322}
323
324pub(crate) fn unlock_mutex(mutex: *mut c_void) -> i32 {
325 trace!("mutex_unlock {:?}", mutex);
326
327 let ptr = mutex as *mut Mutex;
328 critical_section::with(|_| unsafe {
329 memory_fence();
330 if (*ptr).count > 0 {
331 (*ptr).count -= 1;
332 1
333 } else {
334 0
335 }
336 })
337}
338
339pub(crate) fn create_queue(queue_len: c_int, item_size: c_int) -> *mut ConcurrentQueue {
340 trace!("wifi_create_queue len={} size={}", queue_len, item_size,);
341
342 let queue = ConcurrentQueue::new(queue_len as usize, item_size as usize);
343 let ptr = unsafe { malloc(size_of_val(&queue) as u32) as *mut ConcurrentQueue };
344 unsafe {
345 ptr.write(queue);
346 }
347
348 trace!("created queue @{:?}", ptr);
349
350 ptr
351}
352
353pub(crate) fn delete_queue(queue: *mut ConcurrentQueue) {
354 trace!("delete_queue {:?}", queue);
355
356 unsafe {
357 ptr::drop_in_place(queue);
358 crate::compat::malloc::free(queue.cast());
359 }
360}
361
362pub(crate) fn send_queued(
363 queue: *mut ConcurrentQueue,
364 item: *mut c_void,
365 block_time_tick: u32,
366) -> i32 {
367 trace!(
368 "queue_send queue {:?} item {:x} block_time_tick {}",
369 queue, item as usize, block_time_tick
370 );
371
372 let queue: *mut ConcurrentQueue = queue.cast();
373 unsafe { (*queue).enqueue(item) }
374}
375
376pub(crate) fn receive_queued(
377 queue: *mut ConcurrentQueue,
378 item: *mut c_void,
379 block_time_tick: u32,
380) -> i32 {
381 trace!(
382 "queue_recv {:?} item {:?} block_time_tick {}",
383 queue, item, block_time_tick
384 );
385
386 let forever = block_time_tick == OSI_FUNCS_TIME_BLOCKING;
387 let timeout = block_time_tick as u64;
388 let start = crate::time::systimer_count();
389
390 loop {
391 if unsafe { (*queue).try_dequeue(item) } {
392 trace!("received");
393 return 1;
394 }
395
396 if !forever && crate::time::elapsed_time_since(start) > timeout {
397 trace!("queue_recv returns with timeout");
398 return -1;
399 }
400
401 yield_task();
402 }
403}
404
405pub(crate) fn number_of_messages_in_queue(queue: *const ConcurrentQueue) -> u32 {
406 trace!("queue_msg_waiting {:?}", queue);
407
408 let queue: *const ConcurrentQueue = queue.cast();
409 unsafe { (*queue).count() as u32 }
410}
411
412#[unsafe(no_mangle)]
415pub(crate) unsafe extern "C" fn sleep(
416 seconds: crate::binary::c_types::c_uint,
417) -> crate::binary::c_types::c_uint {
418 trace!("sleep");
419
420 unsafe {
421 usleep(seconds * 1_000);
422 }
423 0
424}
425
426#[unsafe(no_mangle)]
429unsafe extern "C" fn usleep(us: u32) -> crate::binary::c_types::c_int {
430 trace!("usleep");
431 unsafe extern "C" {
432 fn esp_rom_delay_us(us: u32);
433 }
434
435 unsafe {
436 esp_rom_delay_us(us);
437 }
438 0
439}
440
441#[unsafe(no_mangle)]
442unsafe extern "C" fn putchar(c: i32) -> crate::binary::c_types::c_int {
443 trace!("putchar {}", c as u8 as char);
444 c
445}