HelenOS sources
This source file includes following definitions.
- get_cmd_ring
- xhci_init_commands
- xhci_fini_commands
- xhci_cmd_init
- xhci_cmd_fini
- find_command
- cr_set_state
- wait_for_ring_open
- enqueue_command
- xhci_stop_command_ring
- xhci_nuke_command_ring
- xhci_start_command_ring
- abort_command_ring
- report_error
- xhci_handle_command_completion
- no_op_cmd
- enable_slot_cmd
- disable_slot_cmd
- address_device_cmd
- configure_endpoint_cmd
- evaluate_context_cmd
- reset_endpoint_cmd
- stop_endpoint_cmd
- set_tr_dequeue_pointer_cmd
- reset_device_cmd
- get_port_bandwidth_cmd
- try_abort_current_command
- wait_for_cmd_completion
- xhci_cmd_sync
- xhci_cmd_sync_fini
- xhci_cmd_async_fini
#include <errno.h>
#include <str_error.h>
#include <usb/debug.h>
#include "commands.h"
#include "debug.h"
#include "hc.h"
#include "hw_struct/context.h"
#include "hw_struct/trb.h"
#define TRB_SET_TSP(trb, tsp) (trb).control |= host2xhci(32, (((tsp) & 0x1) << 9))
#define TRB_SET_TYPE(trb, type) (trb).control |= host2xhci(32, (type) << 10)
#define TRB_SET_DC(trb, dc) (trb).control |= host2xhci(32, (dc) << 9)
#define TRB_SET_EP(trb, ep) (trb).control |= host2xhci(32, ((ep) & 0x5) << 16)
#define TRB_SET_STREAM(trb, st) (trb).control |= host2xhci(32, ((st) & 0xFFFF) << 16)
#define TRB_SET_SUSP(trb, susp) (trb).control |= host2xhci(32, ((susp) & 0x1) << 23)
#define TRB_SET_SLOT(trb, slot) (trb).control |= host2xhci(32, (slot) << 24)
#define TRB_SET_DEV_SPEED(trb, speed) (trb).control |= host2xhci(32, (speed & 0xF) << 16)
#define TRB_SET_DEQUEUE_PTR(trb, dptr) (trb).parameter |= host2xhci(64, (dptr))
#define TRB_SET_ICTX(trb, phys) (trb).parameter |= host2xhci(64, (phys) & (~0xF))
#define TRB_GET_CODE(trb) XHCI_DWORD_EXTRACT((trb).status, 31, 24)
#define TRB_GET_SLOT(trb) XHCI_DWORD_EXTRACT((trb).control, 31, 24)
#define TRB_GET_PHYS(trb) (XHCI_QWORD_EXTRACT((trb).parameter, 63, 4) << 4)
static xhci_cmd_ring_t *get_cmd_ring(xhci_hc_t *hc)
{
assert(hc);
return &hc->cr;
}
errno_t xhci_init_commands(xhci_hc_t *hc)
{
xhci_cmd_ring_t *cr = get_cmd_ring(hc);
errno_t err;
if ((err = xhci_trb_ring_init(&cr->trb_ring, 0)))
return err;
fibril_mutex_initialize(&cr->guard);
fibril_condvar_initialize(&cr->state_cv);
fibril_condvar_initialize(&cr->stopped_cv);
list_initialize(&cr->cmd_list);
return EOK;
}
void xhci_fini_commands(xhci_hc_t *hc)
{
assert(hc);
xhci_stop_command_ring(hc);
xhci_cmd_ring_t *cr = get_cmd_ring(hc);
fibril_mutex_lock(&cr->guard);
xhci_trb_ring_fini(&cr->trb_ring);
fibril_mutex_unlock(&cr->guard);
}
void xhci_cmd_init(xhci_cmd_t *cmd, xhci_cmd_type_t type)
{
memset(cmd, 0, sizeof(*cmd));
link_initialize(&cmd->_header.link);
fibril_mutex_initialize(&cmd->_header.completed_mtx);
fibril_condvar_initialize(&cmd->_header.completed_cv);
cmd->_header.cmd = type;
}
void xhci_cmd_fini(xhci_cmd_t *cmd)
{
list_remove(&cmd->_header.link);
dma_buffer_free(&cmd->input_ctx);
dma_buffer_free(&cmd->bandwidth_ctx);
if (cmd->_header.async) {
free(cmd);
}
}
static inline xhci_cmd_t *find_command(xhci_hc_t *hc, uint64_t phys)
{
xhci_cmd_ring_t *cr = get_cmd_ring(hc);
assert(fibril_mutex_is_locked(&cr->guard));
link_t *cmd_link = list_first(&cr->cmd_list);
while (cmd_link != NULL) {
xhci_cmd_t *cmd = list_get_instance(cmd_link, xhci_cmd_t,
_header.link);
if (cmd->_header.trb_phys == phys)
break;
cmd_link = list_next(cmd_link, &cr->cmd_list);
}
return cmd_link ?
list_get_instance(cmd_link, xhci_cmd_t, _header.link) :
NULL;
}
static void cr_set_state(xhci_cmd_ring_t *cr, xhci_cr_state_t state)
{
assert(fibril_mutex_is_locked(&cr->guard));
cr->state = state;
if (state == XHCI_CR_STATE_OPEN || state == XHCI_CR_STATE_CLOSED)
fibril_condvar_broadcast(&cr->state_cv);
}
static errno_t wait_for_ring_open(xhci_cmd_ring_t *cr)
{
assert(fibril_mutex_is_locked(&cr->guard));
while (true) {
switch (cr->state) {
case XHCI_CR_STATE_CHANGING:
case XHCI_CR_STATE_FULL:
fibril_condvar_wait(&cr->state_cv, &cr->guard);
break;
case XHCI_CR_STATE_OPEN:
return EOK;
case XHCI_CR_STATE_CLOSED:
return ENAK;
}
}
}
static inline errno_t enqueue_command(xhci_hc_t *hc, xhci_cmd_t *cmd)
{
xhci_cmd_ring_t *cr = get_cmd_ring(hc);
assert(cmd);
fibril_mutex_lock(&cr->guard);
if (wait_for_ring_open(cr)) {
fibril_mutex_unlock(&cr->guard);
return ENAK;
}
usb_log_debug("Sending command %s",
xhci_trb_str_type(TRB_TYPE(cmd->_header.trb)));
list_append(&cmd->_header.link, &cr->cmd_list);
errno_t err = EOK;
while (err == EOK) {
err = xhci_trb_ring_enqueue(&cr->trb_ring,
&cmd->_header.trb, &cmd->_header.trb_phys);
if (err != EAGAIN)
break;
cr_set_state(cr, XHCI_CR_STATE_FULL);
err = wait_for_ring_open(cr);
}
if (err == EOK)
hc_ring_doorbell(hc, 0, 0);
fibril_mutex_unlock(&cr->guard);
return err;
}
void xhci_stop_command_ring(xhci_hc_t *hc)
{
xhci_cmd_ring_t *cr = get_cmd_ring(hc);
fibril_mutex_lock(&cr->guard);
cr_set_state(cr, XHCI_CR_STATE_CLOSED);
XHCI_REG_SET(hc->op_regs, XHCI_OP_CS, 1);
while (XHCI_REG_RD(hc->op_regs, XHCI_OP_CRR))
fibril_condvar_wait(&cr->stopped_cv, &cr->guard);
fibril_mutex_unlock(&cr->guard);
}
void xhci_nuke_command_ring(xhci_hc_t *hc)
{
xhci_cmd_ring_t *cr = get_cmd_ring(hc);
fibril_mutex_lock(&cr->guard);
cr_set_state(cr, XHCI_CR_STATE_CLOSED);
XHCI_REG_SET(hc->op_regs, XHCI_OP_CS, 1);
fibril_mutex_unlock(&cr->guard);
}
void xhci_start_command_ring(xhci_hc_t *hc)
{
xhci_cmd_ring_t *cr = get_cmd_ring(hc);
fibril_mutex_lock(&cr->guard);
cr_set_state(cr, XHCI_CR_STATE_OPEN);
fibril_mutex_unlock(&cr->guard);
}
static void abort_command_ring(xhci_hc_t *hc)
{
XHCI_REG_SET(hc->op_regs, XHCI_OP_CA, 1);
}
static const char *trb_codes [] = {
#define TRBC(t) [XHCI_TRBC_##t] = #t
TRBC(INVALID),
TRBC(SUCCESS),
TRBC(DATA_BUFFER_ERROR),
TRBC(BABBLE_DETECTED_ERROR),
TRBC(USB_TRANSACTION_ERROR),
TRBC(TRB_ERROR),
TRBC(STALL_ERROR),
TRBC(RESOURCE_ERROR),
TRBC(BANDWIDTH_ERROR),
TRBC(NO_SLOTS_ERROR),
TRBC(INVALID_STREAM_ERROR),
TRBC(SLOT_NOT_ENABLED_ERROR),
TRBC(EP_NOT_ENABLED_ERROR),
TRBC(SHORT_PACKET),
TRBC(RING_UNDERRUN),
TRBC(RING_OVERRUN),
TRBC(VF_EVENT_RING_FULL),
TRBC(PARAMETER_ERROR),
TRBC(BANDWIDTH_OVERRUN_ERROR),
TRBC(CONTEXT_STATE_ERROR),
TRBC(NO_PING_RESPONSE_ERROR),
TRBC(EVENT_RING_FULL_ERROR),
TRBC(INCOMPATIBLE_DEVICE_ERROR),
TRBC(MISSED_SERVICE_ERROR),
TRBC(COMMAND_RING_STOPPED),
TRBC(COMMAND_ABORTED),
TRBC(STOPPED),
TRBC(STOPPED_LENGTH_INVALID),
TRBC(STOPPED_SHORT_PACKET),
TRBC(MAX_EXIT_LATENCY_TOO_LARGE_ERROR),
[30] = "<reserved>",
TRBC(ISOCH_BUFFER_OVERRUN),
TRBC(EVENT_LOST_ERROR),
TRBC(UNDEFINED_ERROR),
TRBC(INVALID_STREAM_ID_ERROR),
TRBC(SECONDARY_BANDWIDTH_ERROR),
TRBC(SPLIT_TRANSACTION_ERROR),
[XHCI_TRBC_MAX] = NULL
#undef TRBC
};
static void report_error(int code)
{
if (code < XHCI_TRBC_MAX && trb_codes[code] != NULL)
usb_log_error("Command resulted in error: %s.", trb_codes[code]);
else
usb_log_error("Command resulted in reserved or "
"vendor specific error.");
}
errno_t xhci_handle_command_completion(xhci_hc_t *hc, xhci_trb_t *trb)
{
xhci_cmd_ring_t *cr = get_cmd_ring(hc);
assert(trb);
fibril_mutex_lock(&cr->guard);
int code = TRB_GET_CODE(*trb);
if (code == XHCI_TRBC_COMMAND_RING_STOPPED) {
usb_log_debug("Command ring stopped.");
fibril_condvar_broadcast(&cr->stopped_cv);
fibril_mutex_unlock(&cr->guard);
return EOK;
}
const uint64_t phys = TRB_GET_PHYS(*trb);
xhci_trb_ring_update_dequeue(&cr->trb_ring, phys);
if (cr->state == XHCI_CR_STATE_FULL)
cr_set_state(cr, XHCI_CR_STATE_OPEN);
xhci_cmd_t *command = find_command(hc, phys);
if (command == NULL) {
usb_log_error("No command struct for completion event found.");
if (code != XHCI_TRBC_SUCCESS)
report_error(code);
return EOK;
}
list_remove(&command->_header.link);
if (command->_header.cmd == XHCI_CMD_NO_OP && code == XHCI_TRBC_TRB_ERROR)
code = XHCI_TRBC_SUCCESS;
command->status = code;
command->slot_id = TRB_GET_SLOT(*trb);
usb_log_debug("Completed command %s",
xhci_trb_str_type(TRB_TYPE(command->_header.trb)));
if (code != XHCI_TRBC_SUCCESS) {
report_error(code);
xhci_dump_trb(&command->_header.trb);
}
fibril_mutex_unlock(&cr->guard);
fibril_mutex_lock(&command->_header.completed_mtx);
command->_header.completed = true;
fibril_condvar_broadcast(&command->_header.completed_cv);
fibril_mutex_unlock(&command->_header.completed_mtx);
if (command->_header.async) {
xhci_cmd_fini(command);
}
return EOK;
}
static errno_t no_op_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
{
assert(hc);
xhci_trb_clean(&cmd->_header.trb);
TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_NO_OP_CMD);
return enqueue_command(hc, cmd);
}
static errno_t enable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
{
assert(hc);
xhci_trb_clean(&cmd->_header.trb);
TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ENABLE_SLOT_CMD);
cmd->_header.trb.control |=
host2xhci(32, XHCI_REG_RD(hc->xecp, XHCI_EC_SP_SLOT_TYPE) << 16);
return enqueue_command(hc, cmd);
}
static errno_t disable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
{
assert(hc);
assert(cmd);
xhci_trb_clean(&cmd->_header.trb);
TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_DISABLE_SLOT_CMD);
TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
return enqueue_command(hc, cmd);
}
static errno_t address_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
{
assert(hc);
assert(cmd);
assert(dma_buffer_is_set(&cmd->input_ctx));
xhci_trb_clean(&cmd->_header.trb);
const uintptr_t phys = dma_buffer_phys_base(&cmd->input_ctx);
TRB_SET_ICTX(cmd->_header.trb, phys);
TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ADDRESS_DEVICE_CMD);
TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
return enqueue_command(hc, cmd);
}
static errno_t configure_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
{
assert(hc);
assert(cmd);
xhci_trb_clean(&cmd->_header.trb);
if (!cmd->deconfigure) {
assert(dma_buffer_is_set(&cmd->input_ctx));
const uintptr_t phys = dma_buffer_phys_base(&cmd->input_ctx);
TRB_SET_ICTX(cmd->_header.trb, phys);
}
TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_CONFIGURE_ENDPOINT_CMD);
TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
TRB_SET_DC(cmd->_header.trb, cmd->deconfigure);
return enqueue_command(hc, cmd);
}
static errno_t evaluate_context_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
{
assert(hc);
assert(cmd);
assert(dma_buffer_is_set(&cmd->input_ctx));
xhci_trb_clean(&cmd->_header.trb);
const uintptr_t phys = dma_buffer_phys_base(&cmd->input_ctx);
TRB_SET_ICTX(cmd->_header.trb, phys);
TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD);
TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
return enqueue_command(hc, cmd);
}
static errno_t reset_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
{
assert(hc);
assert(cmd);
xhci_trb_clean(&cmd->_header.trb);
TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_ENDPOINT_CMD);
TRB_SET_TSP(cmd->_header.trb, cmd->tsp);
TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
return enqueue_command(hc, cmd);
}
static errno_t stop_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
{
assert(hc);
assert(cmd);
xhci_trb_clean(&cmd->_header.trb);
TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_STOP_ENDPOINT_CMD);
TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
TRB_SET_SUSP(cmd->_header.trb, cmd->susp);
TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
return enqueue_command(hc, cmd);
}
static errno_t set_tr_dequeue_pointer_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
{
assert(hc);
assert(cmd);
xhci_trb_clean(&cmd->_header.trb);
TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_SET_TR_DEQUEUE_POINTER_CMD);
TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
TRB_SET_STREAM(cmd->_header.trb, cmd->stream_id);
TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
TRB_SET_DEQUEUE_PTR(cmd->_header.trb, cmd->dequeue_ptr);
return enqueue_command(hc, cmd);
}
static errno_t reset_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
{
assert(hc);
assert(cmd);
xhci_trb_clean(&cmd->_header.trb);
TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_DEVICE_CMD);
TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
return enqueue_command(hc, cmd);
}
static errno_t get_port_bandwidth_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
{
assert(hc);
assert(cmd);
xhci_trb_clean(&cmd->_header.trb);
const uintptr_t phys = dma_buffer_phys_base(&cmd->input_ctx);
TRB_SET_ICTX(cmd->_header.trb, phys);
TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_GET_PORT_BANDWIDTH_CMD);
TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
TRB_SET_DEV_SPEED(cmd->_header.trb, cmd->device_speed);
return enqueue_command(hc, cmd);
}
typedef errno_t (*cmd_handler) (xhci_hc_t *hc, xhci_cmd_t *cmd);
static cmd_handler cmd_handlers [] = {
[XHCI_CMD_ENABLE_SLOT] = enable_slot_cmd,
[XHCI_CMD_DISABLE_SLOT] = disable_slot_cmd,
[XHCI_CMD_ADDRESS_DEVICE] = address_device_cmd,
[XHCI_CMD_CONFIGURE_ENDPOINT] = configure_endpoint_cmd,
[XHCI_CMD_EVALUATE_CONTEXT] = evaluate_context_cmd,
[XHCI_CMD_RESET_ENDPOINT] = reset_endpoint_cmd,
[XHCI_CMD_STOP_ENDPOINT] = stop_endpoint_cmd,
[XHCI_CMD_SET_TR_DEQUEUE_POINTER] = set_tr_dequeue_pointer_cmd,
[XHCI_CMD_RESET_DEVICE] = reset_device_cmd,
[XHCI_CMD_FORCE_EVENT] = NULL,
[XHCI_CMD_NEGOTIATE_BANDWIDTH] = NULL,
[XHCI_CMD_SET_LATENCY_TOLERANCE_VALUE] = NULL,
[XHCI_CMD_GET_PORT_BANDWIDTH] = get_port_bandwidth_cmd,
[XHCI_CMD_FORCE_HEADER] = NULL,
[XHCI_CMD_NO_OP] = no_op_cmd
};
static errno_t try_abort_current_command(xhci_hc_t *hc)
{
xhci_cmd_ring_t *cr = get_cmd_ring(hc);
fibril_mutex_lock(&cr->guard);
if (cr->state == XHCI_CR_STATE_CLOSED) {
fibril_mutex_unlock(&cr->guard);
return ENAK;
}
if (cr->state == XHCI_CR_STATE_CHANGING) {
fibril_mutex_unlock(&cr->guard);
return EOK;
}
usb_log_error("Timeout while waiting for command: "
"aborting current command.");
cr_set_state(cr, XHCI_CR_STATE_CHANGING);
abort_command_ring(hc);
fibril_condvar_wait_timeout(&cr->stopped_cv, &cr->guard,
XHCI_CR_ABORT_TIMEOUT);
if (XHCI_REG_RD(hc->op_regs, XHCI_OP_CRR)) {
usb_log_error("Command didn't abort.");
cr_set_state(cr, XHCI_CR_STATE_CLOSED);
fibril_mutex_unlock(&cr->guard);
return ENAK;
}
cr_set_state(cr, XHCI_CR_STATE_OPEN);
fibril_mutex_unlock(&cr->guard);
usb_log_error("Command ring stopped. Starting again.");
hc_ring_doorbell(hc, 0, 0);
return EOK;
}
static errno_t wait_for_cmd_completion(xhci_hc_t *hc, xhci_cmd_t *cmd)
{
errno_t rv = EOK;
if (fibril_get_id() == hc->event_handler) {
usb_log_error("Deadlock detected in waiting for command.");
abort();
}
fibril_mutex_lock(&cmd->_header.completed_mtx);
while (!cmd->_header.completed) {
rv = fibril_condvar_wait_timeout(&cmd->_header.completed_cv,
&cmd->_header.completed_mtx, XHCI_COMMAND_TIMEOUT);
if (!cmd->_header.completed && rv == ETIMEOUT) {
fibril_mutex_unlock(&cmd->_header.completed_mtx);
rv = try_abort_current_command(hc);
if (rv)
return rv;
fibril_mutex_lock(&cmd->_header.completed_mtx);
}
}
fibril_mutex_unlock(&cmd->_header.completed_mtx);
return rv;
}
errno_t xhci_cmd_sync(xhci_hc_t *hc, xhci_cmd_t *cmd)
{
assert(hc);
assert(cmd);
errno_t err;
if (!cmd_handlers[cmd->_header.cmd]) {
return ENOTSUP;
}
if ((err = cmd_handlers[cmd->_header.cmd](hc, cmd))) {
return err;
}
if ((err = wait_for_cmd_completion(hc, cmd))) {
return err;
}
switch (cmd->status) {
case XHCI_TRBC_SUCCESS:
return EOK;
case XHCI_TRBC_USB_TRANSACTION_ERROR:
return ESTALL;
case XHCI_TRBC_RESOURCE_ERROR:
case XHCI_TRBC_BANDWIDTH_ERROR:
case XHCI_TRBC_NO_SLOTS_ERROR:
return ELIMIT;
case XHCI_TRBC_SLOT_NOT_ENABLED_ERROR:
return ENOENT;
default:
return EINVAL;
}
}
errno_t xhci_cmd_sync_fini(xhci_hc_t *hc, xhci_cmd_t *cmd)
{
const errno_t err = xhci_cmd_sync(hc, cmd);
xhci_cmd_fini(cmd);
return err;
}
errno_t xhci_cmd_async_fini(xhci_hc_t *hc, xhci_cmd_t *stack_cmd)
{
assert(hc);
assert(stack_cmd);
xhci_cmd_t *heap_cmd = (xhci_cmd_t *) malloc(sizeof(xhci_cmd_t));
if (!heap_cmd) {
return ENOMEM;
}
memcpy(heap_cmd, stack_cmd, sizeof(xhci_cmd_t));
heap_cmd->_header.async = true;
errno_t err;
if (!cmd_handlers[heap_cmd->_header.cmd]) {
err = ENOTSUP;
goto err_heap_cmd;
}
if ((err = cmd_handlers[heap_cmd->_header.cmd](hc, heap_cmd))) {
goto err_heap_cmd;
}
return EOK;
err_heap_cmd:
free(heap_cmd);
return err;
}
HelenOS homepage, sources at GitHub