Skip to content
Snippets Groups Projects
Commit a48259bc authored by Avi Kivity's avatar Avi Kivity
Browse files

Merge branch 'master' of github.com:cloudius-systems/osv

parents 89954499 2f8df2f1
No related branches found
No related tags found
No related merge requests found
......@@ -92,6 +92,8 @@ drivers += elf.o
drivers += drivers/device.o drivers/device-factory.o
drivers += drivers/driver.o drivers/driver-factory.o
drivers += drivers/virtio.o
drivers += drivers/virtio-vring.o
drivers += drivers/virtio-net.o
drivers += drivers/clock.o drivers/kvmclock.o
drivers += drivers/char/console.o
......
#include "drivers/virtio.hh"
#include "drivers/virtio-net.hh"
#include "debug.hh"
namespace virtio {
virtio_net::virtio_net()
: virtio_driver(VIRTIO_NET_DEVICE_ID)
{
}
virtio_net::~virtio_net()
{
}
bool virtio_net::Init(Device *d)
{
virtio_driver::Init(d);
add_dev_status(VIRTIO_CONFIG_S_DRIVER_OK);
return true;
}
}
#ifndef VIRTIO_NET_DRIVER_H
#define VIRTIO_NET_DRIVER_H
#include "drivers/virtio.hh"
namespace virtio {
class virtio_net : public virtio_driver {
public:
// The feature bitmap for virtio net
enum NetFeatures {
VIRTIO_NET_F_CSUM=0, /* Host handles pkts w/ partial csum */
VIRTIO_NET_F_GUEST_CSUM=1, /* Guest handles pkts w/ partial csum */
VIRTIO_NET_F_MAC=5, /* Host has given MAC address. */
VIRTIO_NET_F_GSO=6, /* Host handles pkts w/ any GSO type */
VIRTIO_NET_F_GUEST_TSO4=7, /* Guest can handle TSOv4 in. */
VIRTIO_NET_F_GUEST_TSO6=8, /* Guest can handle TSOv6 in. */
VIRTIO_NET_F_GUEST_ECN=9, /* Guest can handle TSO[6] w/ ECN in. */
VIRTIO_NET_F_GUEST_UFO=10, /* Guest can handle UFO in. */
VIRTIO_NET_F_HOST_TSO4=11, /* Host can handle TSOv4 in. */
VIRTIO_NET_F_HOST_TSO6=12, /* Host can handle TSOv6 in. */
VIRTIO_NET_F_HOST_ECN=13, /* Host can handle TSO[6] w/ ECN in. */
VIRTIO_NET_F_HOST_UFO=14, /* Host can handle UFO in. */
VIRTIO_NET_F_MRG_RXBUF=15, /* Host can merge receive buffers. */
VIRTIO_NET_F_STATUS=16, /* virtio_net_config.status available */
VIRTIO_NET_F_CTRL_VQ=17, /* Control channel available */
VIRTIO_NET_F_CTRL_RX=18, /* Control channel RX mode support */
VIRTIO_NET_F_CTRL_VLAN=19, /* Control channel VLAN filtering */
VIRTIO_NET_F_CTRL_RX_EXTRA=20, /* Extra RX mode control support */
VIRTIO_NET_F_GUEST_ANNOUNCE=21 /* Guest can announce device on the
network */
};
enum {
VIRTIO_NET_DEVICE_ID=0x1000,
};
virtio_net();
virtual ~virtio_net();
virtual bool Init(Device *d);
virtual u32 get_driver_features(void) { return ((1 << VIRTIO_NET_F_CSUM) | (1 << VIRTIO_NET_F_MAC)); }
private:
};
}
#endif
#include <string.h>
#include "mempool.hh"
#include "drivers/virtio.hh"
#include "drivers/virtio-vring.hh"
using namespace memory;
namespace virtio {
vring::vring(unsigned int num)
{
// Alloc enough pages for the vring...
unsigned sz = VIRTIO_ALIGN(vring::get_size(num, VIRTIO_PCI_VRING_ALIGN));
_paddr = malloc(sz);
memset(_paddr, 0, sz);
// Set up pointers
_num = num;
_desc = (vring_desc *)_paddr;
_avail = (vring_avail *)(_paddr + num*sizeof(vring_desc));
_used = (vring_used *)(((unsigned long)&_avail->_ring[num] +
sizeof(u16) + VIRTIO_PCI_VRING_ALIGN-1) & ~(VIRTIO_PCI_VRING_ALIGN-1));
}
vring::~vring()
{
free(_paddr);
}
void * vring::get_paddr(void)
{
return (_paddr);
}
unsigned vring::get_size(unsigned int num, unsigned long align)
{
return (((sizeof(vring_desc) * num + sizeof(u16) * (3 + num)
+ align - 1) & ~(align - 1))
+ sizeof(u16) * 3 + sizeof(vring_used_elem) * num);
}
int vring::need_event(u16 event_idx, u16 new_idx, u16 old)
{
// Note: Xen has similar logic for notification hold-off
// in include/xen/interface/io/ring.h with req_event and req_prod
// corresponding to event_idx + 1 and new_idx respectively.
// Note also that req_event and req_prod in Xen start at 1,
// event indexes in virtio start at 0.
return ( (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old) );
}
}
#ifndef VIRTIO_VRING_H
#define VIRTIO_VRING_H
namespace virtio {
// Buffer descriptors in the ring
class vring_desc {
public:
enum {
// This marks a buffer as continuing via the next field.
VRING_DESC_F_NEXT=1,
// This marks a buffer as write-only (otherwise read-only).
VRING_DESC_F_WRITE=2,
// This means the buffer contains a list of buffer descriptors.
VRING_DESC_F_INDIRECT=4
};
u64 get_paddr(void) { return (_paddr); }
u32 get_len(void) { return (_len); }
u16 next_idx(void) { return (_next); }
// flags
bool is_chained(void) { return ((_flags & VRING_DESC_F_NEXT) == VRING_DESC_F_NEXT); };
bool is_write(void) { return ((_flags & VRING_DESC_F_WRITE) == VRING_DESC_F_WRITE); };
bool is_indirect(void) { return ((_flags & VRING_DESC_F_INDIRECT) == VRING_DESC_F_INDIRECT); };
private:
u64 _paddr;
u32 _len;
u16 _flags;
u16 _next;
};
// Guest to host
class vring_avail{
public:
u16 _flags;
u16 _idx;
u16 _ring[];
};
class vring_used_elem {
public:
// Index of start of used vring_desc chain. (u32 for padding reasons)
u32 _id;
// Number of descriptors in chain
u32 _len;
};
// Host to guest
class vring_used {
public:
u16 _flags;
u16 _idx;
vring_used_elem _used_elements[];
};
class vring {
public:
enum {
/* The Host uses this in used->flags to advise the Guest: don't kick me when
* you add a buffer. It's unreliable, so it's simply an optimization. Guest
* will still kick if it's out of buffers. */
VRING_USED_F_NO_NOTIFY = 1,
/* The Guest uses this in avail->flags to advise the Host: don't interrupt me
* when you consume a buffer. It's unreliable, so it's simply an
* optimization. */
VRING_AVAIL_F_NO_INTERRUPT = 1,
/* We support indirect buffer descriptors */
VIRTIO_RING_F_INDIRECT_DESC = 28,
/* The Guest publishes the used index for which it expects an interrupt
* at the end of the avail ring. Host should ignore the avail->flags field. */
/* The Host publishes the avail index for which it expects a kick
* at the end of the used ring. Guest should ignore the used->flags field. */
VIRTIO_RING_F_EVENT_IDX = 29,
};
vring(unsigned int num);
virtual ~vring();
void * get_paddr(void);
static unsigned get_size(unsigned int num, unsigned long align);
// The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX
// Assuming a given event_idx value from the other size, if
// we have just incremented index from old to new_idx,
// should we trigger an event?
static int need_event(u16 event_idx, u16 new_idx, u16 old);
private:
// The physical of the physical address handed to the virtio device
void *_paddr;
// Total number of descriptors in ring
unsigned int _num;
// Flat list of chained descriptors
vring_desc *_desc;
// Available for host consumption
vring_avail *_avail;
// Available for guest consumption
vring_used *_used;
};
}
#endif // VIRTIO_VRING_H
#include <string.h>
#include "drivers/virtio.hh"
#include "drivers/virtio-vring.hh"
#include "debug.hh"
using namespace pci;
bool
Virtio::earlyInitChecks() {
if (!Driver::earlyInitChecks()) return false;
namespace virtio {
virtio_driver::virtio_driver(u16 device_id)
: Driver(VIRTIO_VENDOR_ID, device_id)
{
for (int i=0; i < max_virtqueues_nr; i++) {
_queues[i] = NULL;
}
u8 rev;
if (getRevision() != VIRTIO_PCI_ABI_VERSION) {
debug(fmt("Wrong virtio revision=%x") % rev);
return false;
num_queues = 0;
}
if (_id < VIRTIO_PCI_ID_MIN || _id > VIRTIO_PCI_ID_MAX) {
debug(fmt("Wrong virtio dev id %x") % _id);
virtio_driver::~virtio_driver()
{
for (int i=0; i < max_virtqueues_nr; i++) {
if (NULL != _queues[i]) {
delete (_queues[i]);
}
}
}
bool virtio_driver::earlyInitChecks()
{
if (!Driver::earlyInitChecks()) {
return false;
}
u8 rev;
if (getRevision() != VIRTIO_PCI_ABI_VERSION) {
debug(fmt("Wrong virtio revision=%x") % rev);
return false;
}
return false;
if (_id < VIRTIO_PCI_ID_MIN || _id > VIRTIO_PCI_ID_MAX) {
debug(fmt("Wrong virtio dev id %x") % _id);
return false;
}
debug(fmt("%s passed. Subsystem: vid:%x:id:%x") % __FUNCTION__ % (u16)getSubsysVid() % (u16)getSubsysId());
return true;
}
debug(fmt("%s passed. Subsystem: vid:%x:id:%x") % __FUNCTION__ % (u16)getSubsysVid() % (u16)getSubsysId());
return true;
}
bool virtio_driver::Init(Device* dev)
{
if (!earlyInitChecks()) {
return false;
}
bool
Virtio::Init(Device* dev) {
if (!Driver::Init(dev)) {
return (false);
}
if (!earlyInitChecks()) return false;
debug(fmt("Virtio:Init %x:%x") % _vid % _id);
if (!Driver::Init(dev)) return false;
// Acknowledge device
add_dev_status(VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER);
debug(fmt("Virtio:Init %x:%x") % _vid % _id);
// Generic init of virtqueues
probe_virt_queues();
setup_features();
_bars[0]->write(VIRTIO_PCI_STATUS, (u8)(VIRTIO_CONFIG_S_ACKNOWLEDGE |
VIRTIO_CONFIG_S_DRIVER));
#if 0
for (int i=0;i<32;i++)
debug(fmt("%d:%d ") % i % get_device_feature_bit(i), false);
debug(fmt("\n"), false);
#endif
probe_virt_queues();
return true;
}
for (int i=0;i<32;i++)
debug(fmt("%d:%d ") % i % get_device_feature_bit(i), false);
debug(fmt("\n"), false);
bool virtio_driver::probe_virt_queues(void)
{
u16 queuesel = 0;
u16 qsize = 0;
do {
if (queuesel >= max_virtqueues_nr) {
return false;
}
// Read queue size
pci_conf_write(VIRTIO_PCI_QUEUE_SEL, queuesel);
qsize = pci_conf_readw(VIRTIO_PCI_QUEUE_NUM);
if (0 == qsize) {
break;
}
// Init a new queue
vring * queue = new vring(qsize);
_queues[queuesel++] = queue;
// Tell host about pfn
pci_conf_write(VIRTIO_PCI_QUEUE_PFN, (u32)((u64)queue->get_paddr() >> VIRTIO_PCI_QUEUE_ADDR_SHIFT));
// Debug print
debug(fmt("Queue[%d] -> size %d, paddr %x") % (queuesel-1) % qsize % queue->get_paddr());
} while (true);
return true;
}
_bars[0]->write(VIRTIO_PCI_STATUS, (u8)(VIRTIO_CONFIG_S_DRIVER_OK));
return true;
}
bool virtio_driver::setup_features(void)
{
u32 dev_features = this->get_device_features();
u32 drv_features = this->get_driver_features();
void Virtio::dumpConfig() const {
Driver::dumpConfig();
debug(fmt("Virtio vid:id= %x:%x") % _vid % _id);
}
u32 subset = dev_features & drv_features;
void
Virtio::vring_init(struct vring *vr, unsigned int num, void *p, unsigned long align) {
vr->num = num;
vr->desc = reinterpret_cast<struct vring_desc*>(p);
vr->avail = reinterpret_cast<struct vring_avail*>(p) + num*sizeof(struct vring_desc);
vr->used = reinterpret_cast<struct vring_used*>(((unsigned long)&vr->avail->ring[num] + sizeof(u16) \
+ align-1) & ~(align - 1));
}
// Configure transport features
// TBD
return (subset == 1);
unsigned
Virtio::vring_size(unsigned int num, unsigned long align) {
return ((sizeof(struct vring_desc) * num + sizeof(u16) * (3 + num)
+ align - 1) & ~(align - 1))
+ sizeof(u16) * 3 + sizeof(struct vring_used_elem) * num;
}
}
/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
/* Assuming a given event_idx value from the other size, if
* we have just incremented index from old to new_idx,
* should we trigger an event? */
int
Virtio::vring_need_event(u16 event_idx, u16 new_idx, u16 old) {
/* Note: Xen has similar logic for notification hold-off
* in include/xen/interface/io/ring.h with req_event and req_prod
* corresponding to event_idx + 1 and new_idx respectively.
* Note also that req_event and req_prod in Xen start at 1,
* event indexes in virtio start at 0. */
return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old);
}
void virtio_driver::dumpConfig() const
{
Driver::dumpConfig();
debug(fmt("Virtio vid:id= %x:%x") % _vid % _id);
}
bool
Virtio::get_device_feature_bit(int bit) {
u32 features = _bars[0]->read(VIRTIO_PCI_HOST_FEATURES);
return bool(features & (1 << bit));
}
void
Virtio::set_guest_feature_bit(int bit, bool on) {
u32 features = _bars[0]->read(VIRTIO_PCI_GUEST_FEATURES);
features = (on)? features | (1 << bit) : features & ~(1 << bit);
_bars[0]->write(VIRTIO_PCI_GUEST_FEATURES, features);
}
u32 virtio_driver::get_device_features(void)
{
return (get_virtio_config(VIRTIO_PCI_HOST_FEATURES));
}
void
Virtio::set_guest_features(u32 features) {
_bars[0]->write(VIRTIO_PCI_GUEST_FEATURES, features);
}
bool virtio_driver::get_device_feature_bit(int bit)
{
return (get_virtio_config_bit(VIRTIO_PCI_HOST_FEATURES, bit));
}
void
Virtio::pci_conf_write(int offset, void* buf, int length) {
u8* ptr = reinterpret_cast<u8*>(buf);
for (int i=0;i<length;i++)
_bars[0]->write(offset+i, ptr[i]);
}
void virtio_driver::set_guest_features(u32 features)
{
set_virtio_config(VIRTIO_PCI_GUEST_FEATURES, features);
}
void
Virtio::pci_conf_read(int offset, void* buf, int length) {
unsigned char* ptr = reinterpret_cast<unsigned char*>(buf);
for (int i=0;i<length;i++)
ptr[i] = _bars[0]->readb(offset+i);
}
void virtio_driver::set_guest_feature_bit(int bit, bool on)
{
set_virtio_config_bit(VIRTIO_PCI_GUEST_FEATURES, bit, on);
}
u32 virtio_driver::get_dev_status(void)
{
return (get_virtio_config(VIRTIO_PCI_STATUS));
}
void virtio_driver::set_dev_status(u32 status)
{
set_virtio_config(VIRTIO_PCI_STATUS, status);
}
void virtio_driver::add_dev_status(u32 status)
{
set_dev_status(get_dev_status() | status);
}
void
Virtio::probe_virt_queues() {
u16 queuesel = 0;
u16 qsize;
void virtio_driver::del_dev_status(u32 status)
{
set_dev_status(get_dev_status() & ~status);
}
do {
pci_conf_write(VIRTIO_PCI_QUEUE_SEL, queuesel);
qsize = pci_conf_readw(VIRTIO_PCI_QUEUE_NUM);
debug(fmt("queue %d, size %d") % queuesel % qsize);
u32 virtio_driver::get_virtio_config(int offset)
{
return (_bars[0]->read(offset));
}
void virtio_driver::set_virtio_config(int offset, u32 val)
{
_bars[0]->write(offset, val);
}
bool virtio_driver::get_virtio_config_bit(int offset, int bit)
{
return (get_virtio_config(offset) & (1 << bit));
}
void virtio_driver::set_virtio_config_bit(int offset, int bit, bool on)
{
u32 val = get_virtio_config(offset);
u32 newval = ( val & ~(1 << bit) ) | ((int)(on)<<bit);
set_virtio_config(offset, newval);
}
void virtio_driver::pci_conf_write(int offset, void* buf, int length)
{
u8* ptr = reinterpret_cast<u8*>(buf);
for (int i=0;i<length;i++)
_bars[0]->write(offset+i, ptr[i]);
}
void virtio_driver::pci_conf_read(int offset, void* buf, int length)
{
unsigned char* ptr = reinterpret_cast<unsigned char*>(buf);
for (int i=0;i<length;i++)
ptr[i] = _bars[0]->readb(offset+i);
}
if (!qsize) break;
queuesel++;
} while (1);
}
#ifndef VIRTIO_DRIVER_H
#define VIRTIO_DRIVER_H
#include <list>
#include <string>
#include "arch/x64/processor.hh"
#include "drivers/pci.hh"
#include "drivers/driver.hh"
class Virtio : public Driver {
public:
enum {
VIRTIO_VENDOR_ID = 0x1af4,
VIRTIO_PCI_ID_MIN = 0x1000,
VIRTIO_PCI_ID_MAX = 0x103f,
VIRTIO_ID_NET = 1,
VIRTIO_ID_BLOCK = 2,
VIRTIO_ID_CONSOLE = 3,
VIRTIO_ID_RNG = 4,
VIRTIO_ID_BALLOON = 5,
VIRTIO_ID_RPMSG = 7,
VIRTIO_ID_SCSI = 8,
VIRTIO_ID_9P = 9,
VIRTIO_ID_RPROC_SERIAL = 11,
#include "drivers/virtio-vring.hh"
namespace virtio {
};
enum VIRTIO_CONFIG {
/* Status byte for guest to report progress, and synchronize features. */
......@@ -81,134 +69,88 @@ public:
};
/* The remaining space is defined by each driver as the per-driver
* configuration space */
#define VIRTIO_PCI_CONFIG(dev) ((dev)->msix_enabled ? 24 : 20)
enum VIRTIO_VRING {
/* This marks a buffer as continuing via the next field. */
VRING_DESC_F_NEXT = 1,
/* This marks a buffer as write-only (otherwise read-only). */
VRING_DESC_F_WRITE = 2,
/* This means the buffer contains a list of buffer descriptors. */
VRING_DESC_F_INDIRECT = 4,
/* The Host uses this in used->flags to advise the Guest: don't kick me when
* you add a buffer. It's unreliable, so it's simply an optimization. Guest
* will still kick if it's out of buffers. */
VRING_USED_F_NO_NOTIFY = 1,
/* The Guest uses this in avail->flags to advise the Host: don't interrupt me
* when you consume a buffer. It's unreliable, so it's simply an
* optimization. */
VRING_AVAIL_F_NO_INTERRUPT = 1,
/* We support indirect buffer descriptors */
VIRTIO_RING_F_INDIRECT_DESC = 28,
/* The Guest publishes the used index for which it expects an interrupt
* at the end of the avail ring. Host should ignore the avail->flags field. */
/* The Host publishes the avail index for which it expects a kick
* at the end of the used ring. Guest should ignore the used->flags field. */
VIRTIO_RING_F_EVENT_IDX = 29,
#define VIRTIO_ALIGN(x) ((x + (VIRTIO_PCI_VRING_ALIGN-1)) & ~(VIRTIO_PCI_VRING_ALIGN-1))
const int max_virtqueues_nr = 64;
class virtio_driver : public Driver {
public:
enum {
VIRTIO_VENDOR_ID = 0x1af4,
VIRTIO_PCI_ID_MIN = 0x1000,
VIRTIO_PCI_ID_MAX = 0x103f,
VIRTIO_ID_NET = 1,
VIRTIO_ID_BLOCK = 2,
VIRTIO_ID_CONSOLE = 3,
VIRTIO_ID_RNG = 4,
VIRTIO_ID_BALLOON = 5,
VIRTIO_ID_RPMSG = 7,
VIRTIO_ID_SCSI = 8,
VIRTIO_ID_9P = 9,
VIRTIO_ID_RPROC_SERIAL = 11,
};
// The remaining space is defined by each driver as the per-driver
// configuration space
#define VIRTIO_PCI_CONFIG(dev) ((dev)->msix_enabled ? 24 : 20)
virtio_driver(u16 device_id);
virtual ~virtio_driver();
virtual bool Init(Device *d);
virtual void dumpConfig() const;
protected:
vring *_queues[max_virtqueues_nr];
int num_queues;
virtual bool earlyInitChecks(void);
bool probe_virt_queues(void);
bool setup_features(void);
// Actual drivers should implement this
virtual u32 get_driver_features(void) { return (0); }
///////////////////
// Device access //
///////////////////
// guest/host features physical access
u32 get_device_features(void);
bool get_device_feature_bit(int bit);
void set_guest_features(u32 features);
void set_guest_feature_bit(int bit, bool on);
// device status
u32 get_dev_status(void);
void set_dev_status(u32 status);
void add_dev_status(u32 status);
void del_dev_status(u32 status);
// access the virtio conf address space set by pci bar 0
u32 get_virtio_config(int offset);
void set_virtio_config(int offset, u32 val);
bool get_virtio_config_bit(int offset, int bit);
void set_virtio_config_bit(int offset, int bit, bool on);
void pci_conf_read(int offset, void* buf, int length);
void pci_conf_write(int offset, void* buf, int length);
u8 pci_conf_readb(int offset) {return _bars[0]->readb(offset);};
u16 pci_conf_readw(int offset) {return _bars[0]->readw(offset);};
u32 pci_conf_readl(int offset) {return _bars[0]->read(offset);};
void pci_conf_write(int offset, u8 val) {_bars[0]->write(offset, val);};
void pci_conf_write(int offset, u16 val) {_bars[0]->write(offset, val);};
void pci_conf_write(int offset, u32 val) {_bars[0]->write(offset, val);};
private:
};
/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
struct vring_desc {
/* Address (guest-physical). */
u64 addr;
/* Length. */
u32 len;
/* The flags as indicated above. */
u16 flags;
/* We chain unused descriptors via this, too */
u16 next;
};
struct vring_avail {
u16 flags;
u16 idx;
u16 ring[];
};
/* u32 is used here for ids for padding reasons. */
struct vring_used_elem {
/* Index of start of used descriptor chain. */
u32 id;
/* Total length of the descriptor chain which was used (written to) */
u32 len;
};
struct vring_used {
u16 flags;
u16 idx;
struct vring_used_elem ring[];
};
struct vring {
unsigned int num;
struct vring_desc *desc;
struct vring_avail *avail;
struct vring_used *used;
};
/* The standard layout for the ring is a continuous chunk of memory which looks
* like this. We assume num is a power of 2.
*
* struct vring
* {
* // The actual descriptors (16 bytes each)
* struct vring_desc desc[num];
*
* // A ring of available descriptor heads with free-running index.
* __u16 avail_flags;
* __u16 avail_idx;
* __u16 available[num];
* __u16 used_event_idx;
*
* // Padding to the next align boundary.
* char pad[];
*
* // A ring of used descriptor heads with free-running index.
* __u16 used_flags;
* __u16 used_idx;
* struct vring_used_elem used[num];
* __u16 avail_event_idx;
* };
*/
/* We publish the used event index at the end of the available ring, and vice
* versa. They are at the end for backwards compatibility. */
#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
#define vring_avail_event(vr) (*(u16 *)&(vr)->used->ring[(vr)->num])
Virtio(u16 id) : Driver(VIRTIO_VENDOR_ID, id) {};
virtual void dumpConfig() const;
virtual bool Init(Device *d);
protected:
virtual bool earlyInitChecks();
void vring_init(struct vring *vr, unsigned int num, void *p,
unsigned long align);
unsigned vring_size(unsigned int num, unsigned long align);
int vring_need_event(u16 event_idx, u16 new_idx, u16 old);
virtual bool get_device_feature_bit(int bit);
virtual void set_guest_feature_bit(int bit, bool on);
virtual void set_guest_features(u32 features);
void pci_conf_read(int offset, void* buf, int length);
void pci_conf_write(int offset, void* buf, int length);
u8 pci_conf_readb(int offset) {return _bars[0]->readb(offset);};
u16 pci_conf_readw(int offset) {return _bars[0]->readw(offset);};
u32 pci_conf_readl(int offset) {return _bars[0]->read(offset);};
void pci_conf_write(int offset, u8 val) {_bars[0]->write(offset, val);};
void pci_conf_write(int offset, u16 val) {_bars[0]->write(offset, val);};
void pci_conf_write(int offset, u32 val) {_bars[0]->write(offset, val);};
void probe_virt_queues();
private:
};
}
#endif
......@@ -14,7 +14,8 @@
#include <string.h>
//#include <locale>
#include "drivers/virtio.hh"
#include "drivers/virtio-net.hh"
#include "drivers/driver-factory.hh"
#include "sched.hh"
#include "drivers/clock.hh"
......@@ -200,7 +201,7 @@ void main_thread(elf::program& prog)
pci::pci_device_enumeration();
DeviceFactory::Instance()->DumpDevices();
Driver *d = new Virtio(0x1000);
Driver *d = new virtio::virtio_net();
DriverFactory::Instance()->RegisterDriver(d);
DeviceFactory::Instance()->InitializeDrivers();
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment