Skip to content
Snippets Groups Projects
Commit 5bcb95d9 authored by Dor Laor's avatar Dor Laor
Browse files

Move from a request array approach back to allocation.

virtio_blk pre-allocates requests into a cache to avoid re-allocation
(possibly an unneeded optimization with the current allocator).  However,
it doesn't take into account that requests can be completed out-of-order,
and simply reuses requests in a cyclic order. Noted by Avi although
I had it made using a peak into the index ring but its too complex
solution. There is no performance degradation w/ smp due to the good
allocator we have today.
parent 1e65eb54
No related branches found
No related tags found
No related merge requests found
......@@ -101,8 +101,6 @@ virtio_blk::virtio_blk(pci::device& pci_dev)
t->start();
_msi.easy_register({ { 0, nullptr, t } });
_requests = new virtio_blk::virtio_blk_req[get_virt_queue(0)->size()];
add_dev_status(VIRTIO_CONFIG_S_DRIVER_OK);
struct virtio_blk_priv* prv;
......@@ -120,8 +118,6 @@ virtio_blk::~virtio_blk()
{
//TODO: In theory maintain the list of free instances and gc it
// including the thread objects and their stack
if (_requests) delete [] _requests;
}
bool virtio_blk::read_config()
......@@ -172,6 +168,7 @@ void virtio_blk::response_worker() {
while((req = static_cast<virtio_blk_req*>(queue->get_buf_elem(&len))) != nullptr) {
if (req->bio) biodone(req->bio, true);
req->bio = nullptr;
delete req;
queue->get_buf_finalize();
}
}
......@@ -222,7 +219,7 @@ int virtio_blk::make_virtio_request(struct bio* bio)
return ENOTBLK;
}
virtio_blk_req* req = &_requests[_req_idx++%queue->size()];
virtio_blk_req* req = new virtio_blk_req;
req->bio = bio;
virtio_blk_outhdr* hdr = &req->hdr;
hdr->type = type;
......@@ -256,6 +253,7 @@ int virtio_blk::make_virtio_request(struct bio* bio)
if (!queue->add_buf(req)) {
biodone(bio, false);
delete req;
return EBUSY;
}
......
......@@ -176,10 +176,6 @@ namespace virtio {
static int _instance;
int _id;
bool _ro;
// ring-size array of requests that saves per item allocation
virtio_blk_req* _requests = nullptr;
// index into the above array
u16 _req_idx = 0;
// This mutex protects parallel make_request invocations
mutex _lock;
......
......@@ -148,8 +148,6 @@ namespace virtio {
return;
}
_requests = new virtio_net_req[_rx_queue->size()];
if_initname(_ifn, "eth", _id);
_ifn->if_mtu = ETHERMTU;
_ifn->if_softc = static_cast<void*>(this);
......@@ -176,10 +174,10 @@ namespace virtio {
{
//TODO: In theory maintain the list of free instances and gc it
// including the thread objects and their stack
// Will need to clear the pending requests in the ring too
ether_ifdetach(_ifn);
if_free(_ifn);
if (_requests) delete [] _requests;
}
bool virtio_net::read_config()
......@@ -316,7 +314,7 @@ namespace virtio {
bool virtio_net::tx(struct mbuf *m_head, bool flush)
{
struct mbuf *m;
virtio_net_req *req = &_requests[_req_idx++%_tx_queue->size()];
virtio_net_req *req = new virtio_net_req;
req->um.reset(m_head);
_tx_queue->_sg_vec.clear();
......@@ -338,17 +336,14 @@ namespace virtio {
_tx_queue->get_buf_gc();
} else {
virtio_net_d("%s: no room", __FUNCTION__);
req->um.reset();
_req_idx--;
delete req;
return false;
}
}
if (!_tx_queue->add_buf(req)) {
trace_virtio_net_tx_failed_add_buf(_ifn->if_index);
// TODO clear this ugliness
req->um.reset();
_req_idx--;
delete req;
return false;
}
......@@ -377,7 +372,7 @@ namespace virtio {
virtio_net_req * req;
while((req = static_cast<virtio_net_req*>(_tx_queue->get_buf_elem(&len))) != nullptr) {
req->um.reset();
delete req;
_tx_queue->get_buf_finalize();
}
});
......
......@@ -260,10 +260,6 @@ namespace virtio {
int _id;
struct ifnet* _ifn;
// ring-size array of requests that saves per item allocation
virtio_net_req* _requests = nullptr;
// index into the above array
u16 _req_idx = 0;
// tx gc lock that can be called by the gc thread or the tx xmitter
mutex _lock;
};
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment