HelenOS sources
This source file includes following definitions.
- virtio_setup_dma_bufs
- virtio_teardown_dma_bufs
- virtio_virtq_desc_set
- virtio_virtq_desc_get_next
- virtio_create_desc_free_list
- virtio_alloc_desc
- virtio_free_desc
- virtio_virtq_produce_available
- virtio_virtq_consume_used
- virtio_virtq_setup
- virtio_virtq_teardown
- virtio_device_setup_start
- virtio_device_setup_finalize
- virtio_device_setup_fail
#include "virtio-pci.h"
#include <as.h>
#include <align.h>
#include <macros.h>
#include <stdalign.h>
#include <ddf/log.h>
#include <barrier.h>
errno_t virtio_setup_dma_bufs(unsigned int buffers, size_t size,
bool write, void *buf[], uintptr_t buf_p[])
{
void *virt = AS_AREA_ANY;
uintptr_t phys;
errno_t rc = dmamem_map_anonymous(buffers * size, 0,
write ? AS_AREA_WRITE | AS_AREA_READ : AS_AREA_READ, 0, &phys,
&virt);
if (rc != EOK)
return rc;
ddf_msg(LVL_NOTE, "DMA buffers: %p-%p", virt, virt + buffers * size);
for (unsigned i = 0; i < buffers; i++) {
buf[i] = virt + i * size;
buf_p[i] = phys + i * size;
}
return EOK;
}
void virtio_teardown_dma_bufs(void *buf[])
{
if (buf[0]) {
dmamem_unmap_anonymous(buf[0]);
buf[0] = NULL;
}
}
void virtio_virtq_desc_set(virtio_dev_t *vdev, uint16_t num, uint16_t descno,
uint64_t addr, uint32_t len, uint16_t flags, uint16_t next)
{
virtq_desc_t *d = &vdev->queues[num].desc[descno];
pio_write_le64(&d->addr, addr);
pio_write_le32(&d->len, len);
pio_write_le16(&d->flags, flags);
pio_write_le16(&d->next, next);
}
uint16_t virtio_virtq_desc_get_next(virtio_dev_t *vdev, uint16_t num,
uint16_t descno)
{
virtq_desc_t *d = &vdev->queues[num].desc[descno];
if (!(pio_read_le16(&d->flags) & VIRTQ_DESC_F_NEXT))
return (uint16_t) -1U;
return pio_read_le16(&d->next);
}
void virtio_create_desc_free_list(virtio_dev_t *vdev, uint16_t num,
uint16_t size, uint16_t *head)
{
for (unsigned i = 0; i < size; i++) {
virtio_virtq_desc_set(vdev, num, i, 0, 0,
VIRTQ_DESC_F_NEXT, (i + 1 == size) ? 0xffffu : i + 1);
}
*head = 0;
}
uint16_t virtio_alloc_desc(virtio_dev_t *vdev, uint16_t num, uint16_t *head)
{
virtq_t *q = &vdev->queues[num];
fibril_mutex_lock(&q->lock);
uint16_t descno = *head;
if (descno != (uint16_t) -1U)
*head = virtio_virtq_desc_get_next(vdev, num, descno);
fibril_mutex_unlock(&q->lock);
return descno;
}
void virtio_free_desc(virtio_dev_t *vdev, uint16_t num, uint16_t *head,
uint16_t descno)
{
virtq_t *q = &vdev->queues[num];
fibril_mutex_lock(&q->lock);
virtio_virtq_desc_set(vdev, num, descno, 0, 0, VIRTQ_DESC_F_NEXT,
*head);
*head = descno;
fibril_mutex_unlock(&q->lock);
}
void virtio_virtq_produce_available(virtio_dev_t *vdev, uint16_t num,
uint16_t descno)
{
virtq_t *q = &vdev->queues[num];
fibril_mutex_lock(&q->lock);
uint16_t idx = pio_read_le16(&q->avail->idx);
pio_write_le16(&q->avail->ring[idx % q->queue_size], descno);
write_barrier();
pio_write_le16(&q->avail->idx, idx + 1);
write_barrier();
pio_write_le16(q->notify, num);
fibril_mutex_unlock(&q->lock);
}
bool virtio_virtq_consume_used(virtio_dev_t *vdev, uint16_t num,
uint16_t *descno, uint32_t *len)
{
virtq_t *q = &vdev->queues[num];
fibril_mutex_lock(&q->lock);
uint16_t last_idx = q->used_last_idx % q->queue_size;
if (last_idx == (pio_read_le16(&q->used->idx) % q->queue_size)) {
fibril_mutex_unlock(&q->lock);
return false;
}
*descno = (uint16_t) pio_read_le32(&q->used->ring[last_idx].id);
*len = pio_read_le32(&q->used->ring[last_idx].len);
q->used_last_idx++;
fibril_mutex_unlock(&q->lock);
return true;
}
errno_t virtio_virtq_setup(virtio_dev_t *vdev, uint16_t num, uint16_t size)
{
virtq_t *q = &vdev->queues[num];
virtio_pci_common_cfg_t *cfg = vdev->common_cfg;
pio_write_le16(&cfg->queue_select, num);
if (size > pio_read_16(&cfg->queue_size)) {
ddf_msg(LVL_ERROR, "Virtq %u: not enough descriptors", num);
return ENOMEM;
}
pio_write_le16(&cfg->queue_size, size);
ddf_msg(LVL_NOTE, "Virtq %u: %u descriptors", num, (unsigned) size);
size_t avail_offset = 0;
size_t used_offset = 0;
size_t mem_size = sizeof(virtq_desc_t[size]);
mem_size = ALIGN_UP(mem_size, alignof(virtq_avail_t));
avail_offset = mem_size;
mem_size += sizeof(virtq_avail_t) + sizeof(ioport16_t[size]) +
sizeof(ioport16_t);
mem_size = ALIGN_UP(mem_size, alignof(virtq_used_t));
used_offset = mem_size;
mem_size += sizeof(virtq_used_t) + sizeof(virtq_used_elem_t[size]) +
sizeof(ioport16_t);
q->virt = AS_AREA_ANY;
errno_t rc = dmamem_map_anonymous(mem_size, 0,
AS_AREA_READ | AS_AREA_WRITE, 0, &q->phys, &q->virt);
if (rc != EOK) {
q->virt = NULL;
return rc;
}
fibril_mutex_initialize(&q->lock);
q->size = mem_size;
q->queue_size = size;
q->desc = q->virt;
q->avail = q->virt + avail_offset;
q->used = q->virt + used_offset;
q->used_last_idx = 0;
memset(q->virt, 0, q->size);
pio_write_le64(&cfg->queue_desc, q->phys);
pio_write_le64(&cfg->queue_avail, q->phys + avail_offset);
pio_write_le64(&cfg->queue_used, q->phys + used_offset);
ddf_msg(LVL_NOTE, "DMA memory for virtq %d: virt=%p, phys=%p, size=%zu",
num, q->virt, (void *) q->phys, q->size);
q->notify = vdev->notify_base +
pio_read_le16(&cfg->queue_notif_off) * vdev->notify_off_multiplier;
ddf_msg(LVL_NOTE, "notification register: %p", q->notify);
pio_write_le16(&cfg->queue_enable, 1);
ddf_msg(LVL_NOTE, "virtq %d set", num);
return rc;
}
void virtio_virtq_teardown(virtio_dev_t *vdev, uint16_t num)
{
virtio_pci_common_cfg_t *cfg = vdev->common_cfg;
pio_write_le16(&cfg->queue_enable, 0);
virtq_t *q = &vdev->queues[num];
if (q->size)
dmamem_unmap_anonymous(q->virt);
}
errno_t virtio_device_setup_start(virtio_dev_t *vdev, uint32_t features)
{
virtio_pci_common_cfg_t *cfg = vdev->common_cfg;
uint8_t status = VIRTIO_DEV_STATUS_RESET;
pio_write_8(&cfg->device_status, status);
status |= VIRTIO_DEV_STATUS_ACKNOWLEDGE;
pio_write_8(&cfg->device_status, status);
status |= VIRTIO_DEV_STATUS_DRIVER;
pio_write_8(&cfg->device_status, status);
pio_write_le32(&cfg->device_feature_select, VIRTIO_FEATURES_0_31);
uint32_t device_features = pio_read_le32(&cfg->device_feature);
uint32_t reserved_features = VIRTIO_F_VERSION_1;
pio_write_le32(&cfg->device_feature_select, VIRTIO_FEATURES_32_63);
uint32_t device_reserved_features = pio_read_le32(&cfg->device_feature);
ddf_msg(LVL_NOTE, "offered features %x, reserved features %x",
device_features, device_reserved_features);
if (features != (features & device_features))
return ENOTSUP;
features &= device_features;
if (reserved_features != (reserved_features & device_reserved_features))
return ENOTSUP;
reserved_features &= device_reserved_features;
pio_write_le32(&cfg->driver_feature_select, VIRTIO_FEATURES_0_31);
pio_write_le32(&cfg->driver_feature, features);
pio_write_le32(&cfg->driver_feature_select, VIRTIO_FEATURES_32_63);
pio_write_le32(&cfg->driver_feature, reserved_features);
ddf_msg(LVL_NOTE, "accepted features %x, reserved features %x",
features, reserved_features);
status |= VIRTIO_DEV_STATUS_FEATURES_OK;
pio_write_8(&cfg->device_status, status);
status = pio_read_8(&cfg->device_status);
if (!(status & VIRTIO_DEV_STATUS_FEATURES_OK))
return ENOTSUP;
return EOK;
}
void virtio_device_setup_finalize(virtio_dev_t *vdev)
{
virtio_pci_common_cfg_t *cfg = vdev->common_cfg;
uint8_t status = pio_read_8(&cfg->device_status);
pio_write_8(&cfg->device_status, status | VIRTIO_DEV_STATUS_DRIVER_OK);
}
void virtio_device_setup_fail(virtio_dev_t *vdev)
{
virtio_pci_common_cfg_t *cfg = vdev->common_cfg;
uint8_t status = pio_read_8(&cfg->device_status);
pio_write_8(&cfg->device_status, status | VIRTIO_DEV_STATUS_FAILED);
}
HelenOS homepage, sources at GitHub