mirror of
https://gitee.com/fantix/kloop.git
synced 2024-11-21 18:01:00 +00:00
Use atomic API
This commit is contained in:
parent
dd4a38a610
commit
f3c43bf145
5 changed files with 40 additions and 45 deletions
27
src/kloop/includes/atomic.pxd
Normal file
27
src/kloop/includes/atomic.pxd
Normal file
|
@ -0,0 +1,27 @@
|
|||
# Copyright (c) 2022 Fantix King http://fantix.pro
|
||||
# kLoop is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
|
||||
|
||||
cdef extern from "<stdatomic.h>" nogil:
|
||||
ctypedef enum MemoryOrder "memory_order":
|
||||
relaxed "memory_order_relaxed"
|
||||
acquire "memory_order_acquire"
|
||||
release "memory_order_release"
|
||||
seq_cst "memory_order_seq_cst"
|
||||
|
||||
ctypedef unsigned uint "atomic_uint"
|
||||
|
||||
unsigned load_explicit "atomic_load_explicit" (
|
||||
uint* object, MemoryOrder order
|
||||
)
|
||||
void store_explicit "atomic_store_explicit" (
|
||||
uint* object, unsigned desired, MemoryOrder order
|
||||
)
|
||||
void thread_fence "atomic_thread_fence" (MemoryOrder order)
|
|
@ -1,20 +0,0 @@
|
|||
/* Copied from liburing: src/include/liburing/barrier.h */
|
||||
|
||||
#include <stdatomic.h>
|
||||
|
||||
#define IO_URING_WRITE_ONCE(var, val) \
|
||||
atomic_store_explicit((_Atomic __typeof__(var) *)&(var), \
|
||||
(val), memory_order_relaxed)
|
||||
#define IO_URING_READ_ONCE(var) \
|
||||
atomic_load_explicit((_Atomic __typeof__(var) *)&(var), \
|
||||
memory_order_relaxed)
|
||||
|
||||
#define io_uring_smp_store_release(p, v) \
|
||||
atomic_store_explicit((_Atomic __typeof__(*(p)) *)(p), (v), \
|
||||
memory_order_release)
|
||||
#define io_uring_smp_load_acquire(p) \
|
||||
atomic_load_explicit((_Atomic __typeof__(*(p)) *)(p), \
|
||||
memory_order_acquire)
|
||||
|
||||
#define io_uring_smp_mb() \
|
||||
atomic_thread_fence(memory_order_seq_cst)
|
|
@ -1,16 +0,0 @@
|
|||
# Copyright (c) 2022 Fantix King http://fantix.pro
|
||||
# kLoop is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
|
||||
|
||||
cdef extern from "includes/barrier.h" nogil:
|
||||
unsigned IO_URING_READ_ONCE(unsigned var)
|
||||
void io_uring_smp_store_release(void* p, unsigned v)
|
||||
unsigned int io_uring_smp_load_acquire(void* p)
|
||||
void io_uring_smp_mb()
|
|
@ -16,7 +16,7 @@ from libc cimport errno, string
|
|||
from posix cimport mman, unistd, time
|
||||
from posix.types cimport mode_t
|
||||
|
||||
from .includes cimport libc, linux, barrier
|
||||
from .includes cimport atomic, libc, linux
|
||||
|
||||
include "./handle.pxd"
|
||||
include "./queue.pxd"
|
||||
|
|
|
@ -107,7 +107,7 @@ cdef inline unsigned ring_sq_flush(SubmissionQueue* sq) nogil:
|
|||
tail += 1
|
||||
sq.sqe_head += 1
|
||||
to_submit -= 1
|
||||
barrier.io_uring_smp_store_release(sq.ktail, tail)
|
||||
atomic.store_explicit(<atomic.uint*>sq.ktail, tail, atomic.release)
|
||||
return tail - sq.khead[0]
|
||||
|
||||
|
||||
|
@ -124,7 +124,9 @@ cdef int ring_select(Ring* ring, long long timeout) nogil except -1:
|
|||
|
||||
# Call enter if we have no CQE ready and timeout is not 0, or else we
|
||||
# handle the ready CQEs first.
|
||||
ready = barrier.io_uring_smp_load_acquire(cq.ktail) - cq.khead[0]
|
||||
ready = atomic.load_explicit(
|
||||
<atomic.uint*>cq.ktail, atomic.acquire
|
||||
) - cq.khead[0]
|
||||
if not ready and timeout != 0:
|
||||
flags |= linux.IORING_ENTER_GETEVENTS
|
||||
if timeout > 0:
|
||||
|
@ -138,9 +140,9 @@ cdef int ring_select(Ring* ring, long long timeout) nogil except -1:
|
|||
# there is something for the kernel to handle.
|
||||
submit = ring_sq_flush(sq)
|
||||
if submit:
|
||||
barrier.io_uring_smp_mb()
|
||||
if barrier.IO_URING_READ_ONCE(
|
||||
sq.kflags[0]
|
||||
atomic.thread_fence(atomic.seq_cst)
|
||||
if atomic.load_explicit(
|
||||
<atomic.uint*>sq.kflags, atomic.relaxed
|
||||
) & linux.IORING_SQ_NEED_WAKEUP:
|
||||
arg.ts = 0
|
||||
flags |= linux.IORING_ENTER_SQ_WAKEUP
|
||||
|
@ -163,7 +165,9 @@ cdef int ring_select(Ring* ring, long long timeout) nogil except -1:
|
|||
PyErr_SetFromErrno(IOError)
|
||||
return -1
|
||||
|
||||
ready = barrier.io_uring_smp_load_acquire(cq.ktail) - cq.khead[0]
|
||||
ready = atomic.load_explicit(
|
||||
<atomic.uint*>cq.ktail, atomic.acquire
|
||||
) - cq.khead[0]
|
||||
|
||||
return ready
|
||||
|
||||
|
@ -179,7 +183,7 @@ cdef inline void ring_cq_pop(CompletionQueue* cq, RingCallback** callback) nogil
|
|||
if ret != NULL:
|
||||
ret.res = cqe.res
|
||||
callback[0] = ret
|
||||
barrier.io_uring_smp_store_release(cq.khead, head + 1)
|
||||
atomic.store_explicit(<atomic.uint*>cq.khead, head + 1, atomic.release)
|
||||
|
||||
|
||||
cdef inline linux.io_uring_sqe* ring_sq_submit(
|
||||
|
@ -195,7 +199,7 @@ cdef inline linux.io_uring_sqe* ring_sq_submit(
|
|||
cdef:
|
||||
unsigned int head, next
|
||||
linux.io_uring_sqe* sqe
|
||||
head = barrier.io_uring_smp_load_acquire(sq.khead)
|
||||
head = atomic.load_explicit(<atomic.uint*>sq.khead, atomic.acquire)
|
||||
next = sq.sqe_tail + 1
|
||||
if next - head <= sq.kring_entries[0]:
|
||||
sqe = &sq.sqes[sq.sqe_tail & sq.kring_mask[0]]
|
||||
|
|
Loading…
Reference in a new issue