esp_wifi/compat/
common.rs
1#![allow(unused)]
2
3use core::{
4 cell::RefCell,
5 fmt::Write,
6 mem::size_of_val,
7 ptr::{self, addr_of, addr_of_mut},
8};
9
10use esp_wifi_sys::{c_types::c_char, include::malloc};
11
12use super::malloc::free;
13use crate::{
14 binary::c_types::{c_int, c_void},
15 hal::sync::Locked,
16 memory_fence::memory_fence,
17 preempt::{current_task, yield_task},
18};
19
20pub(crate) const OSI_FUNCS_TIME_BLOCKING: u32 = u32::MAX;
21
22#[derive(Clone, Copy, Debug)]
23struct Mutex {
24 locking_pid: usize,
25 count: u32,
26 recursive: bool,
27}
28
29pub(crate) struct ConcurrentQueue {
30 raw_queue: Locked<RawQueue>,
31}
32
33impl ConcurrentQueue {
34 pub(crate) fn new(count: usize, item_size: usize) -> Self {
35 Self {
36 raw_queue: Locked::new(RawQueue::new(count, item_size)),
37 }
38 }
39
40 fn release_storage(&mut self) {
41 self.raw_queue.with(|q| unsafe { q.release_storage() })
42 }
43
44 pub(crate) fn enqueue(&mut self, item: *mut c_void) -> i32 {
45 self.raw_queue.with(|q| unsafe { q.enqueue(item) })
46 }
47
48 pub(crate) fn try_dequeue(&mut self, item: *mut c_void) -> bool {
49 self.raw_queue.with(|q| unsafe { q.try_dequeue(item) })
50 }
51
52 pub(crate) fn remove(&mut self, item: *mut c_void) {
53 self.raw_queue.with(|q| unsafe { q.remove(item) })
54 }
55
56 pub(crate) fn count(&self) -> usize {
57 self.raw_queue.with(|q| unsafe { q.count() })
58 }
59}
60
61impl Drop for ConcurrentQueue {
62 fn drop(&mut self) {
63 self.release_storage();
64 }
65}
66
67pub struct RawQueue {
72 capacity: usize,
73 item_size: usize,
74 current_read: usize,
75 current_write: usize,
76 storage: *mut u8,
77}
78
79impl RawQueue {
80 pub fn new(capacity: usize, item_size: usize) -> Self {
82 let storage = unsafe { malloc((capacity * item_size) as u32) as *mut u8 };
83 assert!(!storage.is_null());
84
85 Self {
86 capacity,
87 item_size,
88 current_read: 0,
89 current_write: 0,
90 storage,
91 }
92 }
93
94 unsafe fn release_storage(&mut self) {
96 unsafe {
97 free(self.storage);
98 }
99 self.storage = core::ptr::null_mut();
100 }
101
102 unsafe fn enqueue(&mut self, item: *mut c_void) -> i32 {
103 if self.count() < self.capacity {
104 unsafe {
105 let p = self.storage.byte_add(self.item_size * self.current_write);
106 p.copy_from(item as *mut u8, self.item_size);
107 self.current_write = (self.current_write + 1) % self.capacity;
108 }
109
110 1
111 } else {
112 0
113 }
114 }
115
116 unsafe fn try_dequeue(&mut self, item: *mut c_void) -> bool {
117 if self.count() > 0 {
118 unsafe {
119 let p = self.storage.byte_add(self.item_size * self.current_read) as *const c_void;
120 item.copy_from(p, self.item_size);
121 self.current_read = (self.current_read + 1) % self.capacity;
122 }
123 true
124 } else {
125 false
126 }
127 }
128
129 unsafe fn remove(&mut self, item: *mut c_void) {
130 let item_slice = core::slice::from_raw_parts_mut(item as *mut u8, self.item_size);
134 let count = self.count();
135
136 if count == 0 {
137 return;
138 }
139
140 let tmp_item = crate::compat::malloc::malloc(self.item_size);
141
142 if tmp_item.is_null() {
143 panic!("Out of memory");
144 }
145
146 for _ in 0..count {
147 if self.try_dequeue(tmp_item as *mut c_void) {
148 let tmp_slice = core::slice::from_raw_parts_mut(tmp_item, self.item_size);
149 if tmp_slice != item_slice {
150 self.enqueue(tmp_item as *mut c_void);
151 }
152 }
153 }
154
155 crate::compat::malloc::free(tmp_item);
156 }
157
158 unsafe fn count(&self) -> usize {
159 if self.current_write >= self.current_read {
160 self.current_write - self.current_read
161 } else {
162 self.capacity - self.current_read + self.current_write
163 }
164 }
165}
166
167pub unsafe fn str_from_c<'a>(s: *const c_char) -> &'a str {
168 let c_str = core::ffi::CStr::from_ptr(s.cast());
169 core::str::from_utf8_unchecked(c_str.to_bytes())
170}
171
172#[no_mangle]
173unsafe extern "C" fn strnlen(chars: *const c_char, maxlen: usize) -> usize {
174 let mut len = 0;
175 loop {
176 if chars.offset(len).read_volatile() == 0 {
177 break;
178 }
179 len += 1;
180 }
181
182 len as usize
183}
184
185pub(crate) fn sem_create(max: u32, init: u32) -> *mut c_void {
186 unsafe {
187 let ptr = malloc(4) as *mut u32;
188 ptr.write_volatile(init);
189
190 trace!("sem created res = {:?}", ptr);
191 ptr.cast()
192 }
193}
194
195pub(crate) fn sem_delete(semphr: *mut c_void) {
196 trace!(">>> sem delete");
197
198 unsafe {
199 free(semphr.cast());
200 }
201}
202
203pub(crate) fn sem_take(semphr: *mut c_void, tick: u32) -> i32 {
204 trace!(">>>> semphr_take {:?} block_time_tick {}", semphr, tick);
205
206 let forever = tick == OSI_FUNCS_TIME_BLOCKING;
207 let timeout = tick as u64;
208 let start = crate::time::systimer_count();
209
210 let sem = semphr as *mut u32;
211
212 'outer: loop {
213 let res = critical_section::with(|_| unsafe {
214 memory_fence();
215 let cnt = *sem;
216 if cnt > 0 {
217 *sem = cnt - 1;
218 1
219 } else {
220 0
221 }
222 });
223
224 if res == 1 {
225 trace!(">>>> return from semphr_take");
226 return 1;
227 }
228
229 if !forever && crate::time::elapsed_time_since(start) > timeout {
230 break 'outer;
231 }
232
233 yield_task();
234 }
235
236 trace!(">>>> return from semphr_take with timeout");
237 0
238}
239
240pub(crate) fn sem_give(semphr: *mut c_void) -> i32 {
241 trace!("semphr_give {:?}", semphr);
242 let sem = semphr as *mut u32;
243
244 critical_section::with(|_| unsafe {
245 let cnt = *sem;
246 *sem = cnt + 1;
247 1
248 })
249}
250
251pub(crate) fn thread_sem_get() -> *mut c_void {
252 trace!("wifi_thread_semphr_get");
253 crate::preempt::current_task_thread_semaphore()
254}
255
256pub(crate) fn create_recursive_mutex() -> *mut c_void {
257 let mutex = Mutex {
258 locking_pid: 0xffff_ffff,
259 count: 0,
260 recursive: true,
261 };
262
263 let ptr = unsafe { malloc(size_of_val(&mutex) as u32) as *mut Mutex };
264 unsafe {
265 ptr.write(mutex);
266 }
267 memory_fence();
268
269 trace!("recursive_mutex_create called {:?}", ptr);
270 ptr as *mut c_void
271}
272
273pub(crate) fn mutex_delete(mutex: *mut c_void) {
274 let ptr = mutex as *mut Mutex;
275 unsafe {
276 free(mutex.cast());
277 }
278}
279
280pub(crate) fn lock_mutex(mutex: *mut c_void) -> i32 {
282 trace!("mutex_lock ptr = {:?}", mutex);
283
284 let ptr = mutex as *mut Mutex;
285 let current_task = current_task() as usize;
286
287 loop {
288 let mutex_locked = critical_section::with(|_| unsafe {
289 if (*ptr).count == 0 {
290 (*ptr).locking_pid = current_task;
291 (*ptr).count += 1;
292 true
293 } else if (*ptr).locking_pid == current_task {
294 (*ptr).count += 1;
295 true
296 } else {
297 false
298 }
299 });
300 memory_fence();
301
302 if mutex_locked {
303 return 1;
304 }
305
306 yield_task();
307 }
308}
309
310pub(crate) fn unlock_mutex(mutex: *mut c_void) -> i32 {
311 trace!("mutex_unlock {:?}", mutex);
312
313 let ptr = mutex as *mut Mutex;
314 critical_section::with(|_| unsafe {
315 memory_fence();
316 if (*ptr).count > 0 {
317 (*ptr).count -= 1;
318 1
319 } else {
320 0
321 }
322 })
323}
324
325pub(crate) fn create_queue(queue_len: c_int, item_size: c_int) -> *mut ConcurrentQueue {
326 trace!("wifi_create_queue len={} size={}", queue_len, item_size,);
327
328 let queue = ConcurrentQueue::new(queue_len as usize, item_size as usize);
329 let ptr = unsafe { malloc(size_of_val(&queue) as u32) as *mut ConcurrentQueue };
330 unsafe {
331 ptr.write(queue);
332 }
333
334 trace!("created queue @{:?}", ptr);
335
336 ptr
337}
338
339pub(crate) fn delete_queue(queue: *mut ConcurrentQueue) {
340 trace!("delete_queue {:?}", queue);
341
342 unsafe {
343 ptr::drop_in_place(queue);
344 crate::compat::malloc::free(queue.cast());
345 }
346}
347
348pub(crate) fn send_queued(
349 queue: *mut ConcurrentQueue,
350 item: *mut c_void,
351 block_time_tick: u32,
352) -> i32 {
353 trace!(
354 "queue_send queue {:?} item {:x} block_time_tick {}",
355 queue,
356 item as usize,
357 block_time_tick
358 );
359
360 let queue: *mut ConcurrentQueue = queue.cast();
361 unsafe { (*queue).enqueue(item) }
362}
363
364pub(crate) fn receive_queued(
365 queue: *mut ConcurrentQueue,
366 item: *mut c_void,
367 block_time_tick: u32,
368) -> i32 {
369 trace!(
370 "queue_recv {:?} item {:?} block_time_tick {}",
371 queue,
372 item,
373 block_time_tick
374 );
375
376 let forever = block_time_tick == OSI_FUNCS_TIME_BLOCKING;
377 let timeout = block_time_tick as u64;
378 let start = crate::time::systimer_count();
379
380 loop {
381 if unsafe { (*queue).try_dequeue(item) } {
382 trace!("received");
383 return 1;
384 }
385
386 if !forever && crate::time::elapsed_time_since(start) > timeout {
387 trace!("queue_recv returns with timeout");
388 return -1;
389 }
390
391 yield_task();
392 }
393}
394
395pub(crate) fn number_of_messages_in_queue(queue: *const ConcurrentQueue) -> u32 {
396 trace!("queue_msg_waiting {:?}", queue);
397
398 let queue: *const ConcurrentQueue = queue.cast();
399 unsafe { (*queue).count() as u32 }
400}
401
402#[no_mangle]
405pub(crate) unsafe extern "C" fn sleep(
406 seconds: crate::binary::c_types::c_uint,
407) -> crate::binary::c_types::c_uint {
408 trace!("sleep");
409
410 usleep(seconds * 1_000);
411 0
412}
413
414#[no_mangle]
417unsafe extern "C" fn usleep(us: u32) -> crate::binary::c_types::c_int {
418 trace!("usleep");
419 extern "C" {
420 fn esp_rom_delay_us(us: u32);
421 }
422 esp_rom_delay_us(us);
423 0
424}
425
426#[no_mangle]
427unsafe extern "C" fn putchar(c: i32) -> crate::binary::c_types::c_int {
428 trace!("putchar {}", c as u8 as char);
429 c
430}