HelenOS sources
This source file includes following definitions.
- segment_begin
- segment_end
- get_first_segment
- trb_segment_alloc
- trb_segment_free
- xhci_trb_ring_init
- xhci_trb_ring_fini
- trb_ring_resolve_link
- trb_ring_enqueue_phys
- trb_generates_interrupt
- xhci_trb_ring_enqueue_multiple
- xhci_trb_ring_enqueue
- xhci_trb_ring_reset_dequeue_state
- xhci_trb_ring_size
- xhci_event_ring_init
- xhci_event_ring_reset
- xhci_event_ring_fini
- event_ring_dequeue_phys
- xhci_event_ring_dequeue
- xhci_sw_ring_init
- xhci_sw_ring_enqueue
- xhci_sw_ring_dequeue
- xhci_sw_ring_stop
- xhci_sw_ring_restart
- xhci_sw_ring_fini
#include <errno.h>
#include <assert.h>
#include <ddi.h>
#include <as.h>
#include <align.h>
#include <barrier.h>
#include <usb/debug.h>
#include "hw_struct/trb.h"
#include "trb_ring.h"
#define SEGMENT_FOOTER_SIZE (sizeof(link_t) + sizeof(uintptr_t))
#define SEGMENT_TRB_COUNT ((PAGE_SIZE - SEGMENT_FOOTER_SIZE) / sizeof(xhci_trb_t))
#define SEGMENT_TRB_USEFUL_COUNT (SEGMENT_TRB_COUNT - 1)
struct trb_segment {
xhci_trb_t trb_storage [SEGMENT_TRB_COUNT];
link_t segments_link;
uintptr_t phys;
} __attribute__((aligned(PAGE_SIZE)));
static_assert(sizeof(trb_segment_t) == PAGE_SIZE, "");
static inline xhci_trb_t *segment_begin(trb_segment_t *segment)
{
return segment->trb_storage;
}
static inline xhci_trb_t *segment_end(trb_segment_t *segment)
{
return segment_begin(segment) + SEGMENT_TRB_COUNT;
}
static inline trb_segment_t *get_first_segment(list_t *segments)
{
return list_get_instance(list_first(segments), trb_segment_t, segments_link);
}
static errno_t trb_segment_alloc(trb_segment_t **segment)
{
*segment = AS_AREA_ANY;
uintptr_t phys;
const int err = dmamem_map_anonymous(PAGE_SIZE,
DMAMEM_4GiB, AS_AREA_READ | AS_AREA_WRITE, 0,
&phys, (void **) segment);
if (err)
return err;
memset(*segment, 0, PAGE_SIZE);
(*segment)->phys = phys;
usb_log_debug("Allocated new ring segment.");
return EOK;
}
static void trb_segment_free(trb_segment_t *segment)
{
dmamem_unmap_anonymous(segment);
}
errno_t xhci_trb_ring_init(xhci_trb_ring_t *ring, size_t initial_size)
{
errno_t err;
if (initial_size == 0)
initial_size = SEGMENT_TRB_USEFUL_COUNT;
list_initialize(&ring->segments);
size_t segment_count = (initial_size + SEGMENT_TRB_USEFUL_COUNT - 1) /
SEGMENT_TRB_USEFUL_COUNT;
for (size_t i = 0; i < segment_count; ++i) {
struct trb_segment *segment;
if ((err = trb_segment_alloc(&segment)) != EOK)
return err;
list_append(&segment->segments_link, &ring->segments);
ring->segment_count = i + 1;
}
trb_segment_t *const segment = get_first_segment(&ring->segments);
xhci_trb_t *last = segment_end(segment) - 1;
xhci_trb_link_fill(last, segment->phys);
TRB_LINK_SET_TC(*last, true);
ring->enqueue_segment = segment;
ring->enqueue_trb = segment_begin(segment);
ring->dequeue = segment->phys;
ring->pcs = 1;
fibril_mutex_initialize(&ring->guard);
return EOK;
}
void xhci_trb_ring_fini(xhci_trb_ring_t *ring)
{
assert(ring);
list_foreach_safe(ring->segments, cur, next) {
trb_segment_t *segment =
list_get_instance(cur, trb_segment_t, segments_link);
trb_segment_free(segment);
}
}
static void trb_ring_resolve_link(xhci_trb_ring_t *ring)
{
link_t *next_segment =
list_next(&ring->enqueue_segment->segments_link, &ring->segments);
if (!next_segment)
next_segment = list_first(&ring->segments);
assert(next_segment);
ring->enqueue_segment =
list_get_instance(next_segment, trb_segment_t, segments_link);
ring->enqueue_trb = segment_begin(ring->enqueue_segment);
}
static uintptr_t trb_ring_enqueue_phys(xhci_trb_ring_t *ring)
{
size_t trb_id = ring->enqueue_trb - segment_begin(ring->enqueue_segment);
return ring->enqueue_segment->phys + trb_id * sizeof(xhci_trb_t);
}
static bool trb_generates_interrupt(xhci_trb_t *trb)
{
return TRB_TYPE(*trb) >= XHCI_TRB_TYPE_ENABLE_SLOT_CMD ||
TRB_IOC(*trb);
}
errno_t xhci_trb_ring_enqueue_multiple(xhci_trb_ring_t *ring, xhci_trb_t *first_trb,
size_t trbs, uintptr_t *phys)
{
errno_t err;
assert(trbs > 0);
if (trbs > xhci_trb_ring_size(ring))
return ELIMIT;
fibril_mutex_lock(&ring->guard);
xhci_trb_t *const saved_enqueue_trb = ring->enqueue_trb;
trb_segment_t *const saved_enqueue_segment = ring->enqueue_segment;
if (phys)
*phys = (uintptr_t)NULL;
xhci_trb_t *trb = first_trb;
for (size_t i = 0; i < trbs; ++i, ++trb) {
if (phys && trb_generates_interrupt(trb)) {
if (*phys) {
err = ENOTSUP;
goto err;
}
*phys = trb_ring_enqueue_phys(ring);
}
ring->enqueue_trb++;
if (TRB_TYPE(*ring->enqueue_trb) == XHCI_TRB_TYPE_LINK)
trb_ring_resolve_link(ring);
if (trb_ring_enqueue_phys(ring) == ring->dequeue) {
err = EAGAIN;
goto err;
}
}
ring->enqueue_segment = saved_enqueue_segment;
ring->enqueue_trb = saved_enqueue_trb;
trb = first_trb;
for (size_t i = 0; i < trbs; ++i, ++trb) {
TRB_SET_CYCLE(*trb, ring->pcs);
xhci_trb_copy_to_pio(ring->enqueue_trb, trb);
usb_log_debug2("TRB ring(%p): Enqueued TRB %p", ring, trb);
ring->enqueue_trb++;
if (TRB_TYPE(*ring->enqueue_trb) == XHCI_TRB_TYPE_LINK) {
TRB_SET_CYCLE(*ring->enqueue_trb, ring->pcs);
if (TRB_LINK_TC(*ring->enqueue_trb)) {
ring->pcs = !ring->pcs;
usb_log_debug("TRB ring(%p): PCS toggled", ring);
}
trb_ring_resolve_link(ring);
}
}
fibril_mutex_unlock(&ring->guard);
return EOK;
err:
ring->enqueue_segment = saved_enqueue_segment;
ring->enqueue_trb = saved_enqueue_trb;
fibril_mutex_unlock(&ring->guard);
return err;
}
errno_t xhci_trb_ring_enqueue(xhci_trb_ring_t *ring, xhci_trb_t *td, uintptr_t *phys)
{
return xhci_trb_ring_enqueue_multiple(ring, td, 1, phys);
}
void xhci_trb_ring_reset_dequeue_state(xhci_trb_ring_t *ring, uintptr_t *addr)
{
assert(ring);
ring->dequeue = trb_ring_enqueue_phys(ring);
if (addr)
*addr = ring->dequeue | ring->pcs;
}
size_t xhci_trb_ring_size(xhci_trb_ring_t *ring)
{
return ring->segment_count * SEGMENT_TRB_USEFUL_COUNT;
}
errno_t xhci_event_ring_init(xhci_event_ring_t *ring, size_t initial_size)
{
errno_t err;
if (initial_size == 0)
initial_size = SEGMENT_TRB_COUNT;
list_initialize(&ring->segments);
size_t segment_count = (initial_size + SEGMENT_TRB_COUNT - 1) / SEGMENT_TRB_COUNT;
size_t erst_size = segment_count * sizeof(xhci_erst_entry_t);
if (dma_buffer_alloc(&ring->erst, erst_size)) {
xhci_event_ring_fini(ring);
return ENOMEM;
}
xhci_erst_entry_t *erst = ring->erst.virt;
memset(erst, 0, erst_size);
for (size_t i = 0; i < segment_count; i++) {
trb_segment_t *segment;
if ((err = trb_segment_alloc(&segment)) != EOK) {
xhci_event_ring_fini(ring);
return err;
}
list_append(&segment->segments_link, &ring->segments);
ring->segment_count = i + 1;
xhci_fill_erst_entry(&erst[i], segment->phys, SEGMENT_TRB_COUNT);
}
fibril_mutex_initialize(&ring->guard);
usb_log_debug("Initialized event ring.");
return EOK;
}
void xhci_event_ring_reset(xhci_event_ring_t *ring)
{
list_foreach(ring->segments, segments_link, trb_segment_t, segment)
memset(segment->trb_storage, 0, sizeof(segment->trb_storage));
trb_segment_t *const segment = get_first_segment(&ring->segments);
ring->dequeue_segment = segment;
ring->dequeue_trb = segment_begin(segment);
ring->dequeue_ptr = segment->phys;
ring->ccs = 1;
}
void xhci_event_ring_fini(xhci_event_ring_t *ring)
{
list_foreach_safe(ring->segments, cur, next) {
trb_segment_t *segment = list_get_instance(cur, trb_segment_t, segments_link);
trb_segment_free(segment);
}
dma_buffer_free(&ring->erst);
}
static uintptr_t event_ring_dequeue_phys(xhci_event_ring_t *ring)
{
uintptr_t trb_id = ring->dequeue_trb - segment_begin(ring->dequeue_segment);
return ring->dequeue_segment->phys + trb_id * sizeof(xhci_trb_t);
}
errno_t xhci_event_ring_dequeue(xhci_event_ring_t *ring, xhci_trb_t *event)
{
fibril_mutex_lock(&ring->guard);
ring->dequeue_ptr = event_ring_dequeue_phys(ring);
if (TRB_CYCLE(*ring->dequeue_trb) != ring->ccs) {
fibril_mutex_unlock(&ring->guard);
return ENOENT;
}
read_barrier();
memcpy(event, ring->dequeue_trb, sizeof(xhci_trb_t));
ring->dequeue_trb++;
const unsigned index = ring->dequeue_trb - segment_begin(ring->dequeue_segment);
if (index >= SEGMENT_TRB_COUNT) {
link_t *next_segment =
list_next(&ring->dequeue_segment->segments_link, &ring->segments);
if (!next_segment) {
next_segment = list_first(&ring->segments);
ring->ccs = !ring->ccs;
}
ring->dequeue_segment =
list_get_instance(next_segment, trb_segment_t, segments_link);
ring->dequeue_trb = segment_begin(ring->dequeue_segment);
}
fibril_mutex_unlock(&ring->guard);
return EOK;
}
void xhci_sw_ring_init(xhci_sw_ring_t *ring, size_t size)
{
ring->begin = calloc(size, sizeof(xhci_trb_t));
ring->end = ring->begin + size;
fibril_mutex_initialize(&ring->guard);
fibril_condvar_initialize(&ring->enqueued_cv);
fibril_condvar_initialize(&ring->dequeued_cv);
xhci_sw_ring_restart(ring);
}
errno_t xhci_sw_ring_enqueue(xhci_sw_ring_t *ring, xhci_trb_t *trb)
{
assert(ring);
assert(trb);
fibril_mutex_lock(&ring->guard);
while (ring->running && TRB_CYCLE(*ring->enqueue))
fibril_condvar_wait(&ring->dequeued_cv, &ring->guard);
*ring->enqueue = *trb;
TRB_SET_CYCLE(*ring->enqueue, 1);
if (++ring->enqueue == ring->end)
ring->enqueue = ring->begin;
fibril_condvar_signal(&ring->enqueued_cv);
fibril_mutex_unlock(&ring->guard);
return ring->running ? EOK : EINTR;
}
errno_t xhci_sw_ring_dequeue(xhci_sw_ring_t *ring, xhci_trb_t *trb)
{
assert(ring);
assert(trb);
fibril_mutex_lock(&ring->guard);
while (ring->running && !TRB_CYCLE(*ring->dequeue))
fibril_condvar_wait(&ring->enqueued_cv, &ring->guard);
*trb = *ring->dequeue;
TRB_SET_CYCLE(*ring->dequeue, 0);
if (++ring->dequeue == ring->end)
ring->dequeue = ring->begin;
fibril_condvar_signal(&ring->dequeued_cv);
fibril_mutex_unlock(&ring->guard);
return ring->running ? EOK : EINTR;
}
void xhci_sw_ring_stop(xhci_sw_ring_t *ring)
{
ring->running = false;
fibril_condvar_broadcast(&ring->enqueued_cv);
fibril_condvar_broadcast(&ring->dequeued_cv);
}
void xhci_sw_ring_restart(xhci_sw_ring_t *ring)
{
ring->enqueue = ring->dequeue = ring->begin;
memset(ring->begin, 0, sizeof(xhci_trb_t) * (ring->end - ring->begin));
ring->running = true;
}
void xhci_sw_ring_fini(xhci_sw_ring_t *ring)
{
free(ring->begin);
}
HelenOS homepage, sources at GitHub