Skip to content
Snippets Groups Projects
Commit f99c90b9 authored by Vlad Zolotarov's avatar Vlad Zolotarov Committed by Pekka Enberg
Browse files

virtio-net: Move the Tx related code into the txq class.

parent cff3aac2
No related branches found
No related tags found
No related merge requests found
...@@ -200,7 +200,7 @@ bool net::ack_irq() ...@@ -200,7 +200,7 @@ bool net::ack_irq()
net::net(pci::device& dev) net::net(pci::device& dev)
: virtio_driver(dev), : virtio_driver(dev),
_rxq(get_virt_queue(0), [this] { this->receiver(); }), _rxq(get_virt_queue(0), [this] { this->receiver(); }),
_txq(get_virt_queue(1)) _txq(this, get_virt_queue(1))
{ {
sched::thread* poll_task = &_rxq.poll_task; sched::thread* poll_task = &_rxq.poll_task;
...@@ -552,55 +552,66 @@ void net::fill_rx_ring() ...@@ -552,55 +552,66 @@ void net::fill_rx_ring()
vq->kick(); vq->kick();
} }
// TODO: Does it really have to be "locked"? inline int net::tx_locked(struct mbuf* m_head)
int net::tx_locked(struct mbuf* m_head, bool flush)
{ {
DEBUG_ASSERT(_tx_ring_lock.owned(), "_tx_ring_lock is not locked!"); return _txq.try_xmit_one_locked(m_head);
}
struct mbuf* m; inline int net::txq::xmit_prep(mbuf* m_head, net_req*& cooky)
{
net_req* req = new net_req; net_req* req = new net_req;
vring* vq = _txq.vqueue; mbuf* m;
auto vq_sg_vec = &vq->_sg_vec;
int rc = 0;
struct txq_stats* stats = &_txq.stats;
u64 tx_bytes = 0;
req->um.reset(m_head); req->um.reset(m_head);
if (m_head->M_dat.MH.MH_pkthdr.csum_flags != 0) { if (m_head->M_dat.MH.MH_pkthdr.csum_flags != 0) {
m = tx_offload(m_head, &req->mhdr.hdr); m = offload(m_head, &req->mhdr.hdr);
if ((m_head = m) == nullptr) { if ((m_head = m) == nullptr) {
delete req; delete req;
/* The buffer is not well-formed */ /* The buffer is not well-formed */
rc = EINVAL; return EINVAL;
goto out;
} }
} }
if (_mergeable_bufs) { cooky = req;
return 0;
}
int net::txq::try_xmit_one_locked(mbuf *m_head)
{
mbuf* m;
u16 vec_sz = 0;
u64 tx_bytes = 0;
net_req* req;
int rc = xmit_prep(m_head, req);
if (rc) {
goto out;
}
if (_parent->_mergeable_bufs) {
req->mhdr.num_buffers = 0; req->mhdr.num_buffers = 0;
} }
vq->init_sg(); vqueue->init_sg();
vq->add_out_sg(static_cast<void*>(&req->mhdr), _hdr_size); vqueue->add_out_sg(static_cast<void*>(&req->mhdr),
sizeof(net_hdr_mrg_rxbuf));
for (m = m_head; m != NULL; m = m->m_hdr.mh_next) { for (m = m_head; m != NULL; m = m->m_hdr.mh_next) {
int frag_len = m->m_hdr.mh_len; int frag_len = m->m_hdr.mh_len;
if (frag_len != 0) { if (frag_len != 0) {
net_d("Frag len=%d:", frag_len); net_d("Frag len=%d:", frag_len);
vec_sz++;
vq->add_out_sg(m->m_hdr.mh_data, m->m_hdr.mh_len);
tx_bytes += frag_len; tx_bytes += frag_len;
vqueue->add_out_sg(m->m_hdr.mh_data, m->m_hdr.mh_len);
} }
} }
if (!vq->avail_ring_has_room(vq->_sg_vec.size())) { if (!vqueue->avail_ring_has_room(vec_sz)) {
// can't call it, this is a get buf thing if (vqueue->used_ring_not_empty()) {
if (vq->used_ring_not_empty()) { trace_virtio_net_tx_no_space_calling_gc(_parent->_ifn->if_index);
trace_virtio_net_tx_no_space_calling_gc(_ifn->if_index); gc();
tx_gc();
} else { } else {
net_d("%s: no room", __FUNCTION__); net_d("%s: no room", __FUNCTION__);
delete req; delete req;
...@@ -610,44 +621,43 @@ int net::tx_locked(struct mbuf* m_head, bool flush) ...@@ -610,44 +621,43 @@ int net::tx_locked(struct mbuf* m_head, bool flush)
} }
} }
if (!vq->add_buf(req)) { if (!vqueue->add_buf(req)) {
trace_virtio_net_tx_failed_add_buf(_ifn->if_index); trace_virtio_net_tx_failed_add_buf(_parent->_ifn->if_index);
delete req; delete req;
rc = ENOBUFS; rc = ENOBUFS;
goto out; goto out;
} }
trace_virtio_net_tx_packet(_ifn->if_index, vq_sg_vec->size()); trace_virtio_net_tx_packet(_parent->_ifn->if_index, vec_sz);
out: out:
/* Update the statistics */ /* Update the statistics */
switch (rc) { switch (rc) {
case 0: /* success */ case 0: /* success */
stats->tx_bytes += tx_bytes; stats.tx_bytes += tx_bytes;
stats->tx_packets++; stats.tx_packets++;
if (req->mhdr.hdr.flags & net_hdr::VIRTIO_NET_HDR_F_NEEDS_CSUM) if (req->mhdr.hdr.flags & net_hdr::VIRTIO_NET_HDR_F_NEEDS_CSUM)
stats->tx_csum++; stats.tx_csum++;
if (req->mhdr.hdr.gso_type) if (req->mhdr.hdr.gso_type)
stats->tx_tso++; stats.tx_tso++;
break; break;
case ENOBUFS: case ENOBUFS:
stats->tx_drops++; stats.tx_drops++;
break; break;
default: default:
stats->tx_err++; stats.tx_err++;
} }
return rc; return rc;
} }
struct mbuf* mbuf* net::txq::offload(mbuf* m, net_hdr* hdr)
net::tx_offload(struct mbuf* m, struct net_hdr* hdr)
{ {
struct ether_header* eh; struct ether_header* eh;
struct ether_vlan_header* evh; struct ether_vlan_header* evh;
...@@ -715,7 +725,7 @@ net::tx_offload(struct mbuf* m, struct net_hdr* hdr) ...@@ -715,7 +725,7 @@ net::tx_offload(struct mbuf* m, struct net_hdr* hdr)
hdr->gso_size = m->M_dat.MH.MH_pkthdr.tso_segsz; hdr->gso_size = m->M_dat.MH.MH_pkthdr.tso_segsz;
if (tcp->th_flags & TH_CWR) { if (tcp->th_flags & TH_CWR) {
if (!_tso_ecn) { if (!_parent->_tso_ecn) {
virtio_w("TSO with ECN not supported by host\n"); virtio_w("TSO with ECN not supported by host\n");
m_freem(m); m_freem(m);
return nullptr; return nullptr;
...@@ -728,20 +738,19 @@ net::tx_offload(struct mbuf* m, struct net_hdr* hdr) ...@@ -728,20 +738,19 @@ net::tx_offload(struct mbuf* m, struct net_hdr* hdr)
return m; return m;
} }
void net::tx_gc() void net::txq::gc()
{ {
net_req* req; net_req* req;
u32 len; u32 len;
vring* vq = _txq.vqueue;
u16 req_cnt = 0; u16 req_cnt = 0;
// //
// "finalize" at least every quoter of a ring to let the host work in // "finalize" at least every quoter of a ring to let the host work in
// paralel with us. // paralel with us.
// //
const u16 fin_thr = static_cast<u16>(vq->size()) / 4; const u16 fin_thr = static_cast<u16>(vqueue->size()) / 4;
req = static_cast<net_req*>(vq->get_buf_elem(&len)); req = static_cast<net_req*>(vqueue->get_buf_elem(&len));
while(req != nullptr) { while(req != nullptr) {
delete req; delete req;
...@@ -749,20 +758,20 @@ void net::tx_gc() ...@@ -749,20 +758,20 @@ void net::tx_gc()
req_cnt++; req_cnt++;
if (req_cnt >= fin_thr) { if (req_cnt >= fin_thr) {
vq->get_buf_finalize(true); vqueue->get_buf_finalize(true);
req_cnt = 0; req_cnt = 0;
} else { } else {
vq->get_buf_finalize(false); vqueue->get_buf_finalize(false);
} }
req = static_cast<net_req*>(vq->get_buf_elem(&len)); req = static_cast<net_req*>(vqueue->get_buf_elem(&len));
} }
if (req_cnt) { if (req_cnt) {
vq->db_used(); vqueue->db_used();
} }
vq->get_buf_gc(); vqueue->get_buf_gc();
} }
u32 net::get_driver_features() u32 net::get_driver_features()
......
...@@ -223,17 +223,14 @@ public: ...@@ -223,17 +223,14 @@ public:
/** /**
* Transmit a single mbuf. * Transmit a single mbuf.
* @param m_head a buffer to transmits * @param m_head a buffer to transmits
* @param flush kick() if TRUE *
* @note should be called under the _tx_ring_lock. * @note should be called under the _tx_ring_lock.
* *
* @return 0 in case of success and an appropriate error code * @return 0 in case of success and an appropriate error code
* otherwise * otherwise
*/ */
int tx_locked(struct mbuf* m_head, bool flush = false); int tx_locked(struct mbuf* m_head);
struct mbuf* tx_offload(struct mbuf* m, struct net_hdr* hdr);
void kick(int queue) {_queues[queue]->kick();}
void tx_gc();
static hw_driver* probe(hw_device* dev); static hw_driver* probe(hw_device* dev);
/** /**
...@@ -304,9 +301,49 @@ private: ...@@ -304,9 +301,49 @@ private:
/* Single Tx queue object */ /* Single Tx queue object */
struct txq { struct txq {
txq(vring* vq) : vqueue(vq) {}; txq(net* parent, vring* vq) :
vqueue(vq), _parent(parent) {};
/**
* Try to send a Tx frame.
* @param m_head
*
* @return 0 if packet has been successfully sent, EINVAL if a packet is
* not well-formed and ENOBUFS if there was no room on a HW ring
* to send the packet.
*/
int try_xmit_one_locked(mbuf* m_head);
vring* vqueue; vring* vqueue;
struct txq_stats stats = { 0 }; txq_stats stats = { 0 };
private:
/**
* Checks the packet and returns the net_req (returned in a "cooky")
* @param m_head
* @param cooky
*
* @return 0 if packet is ok and EINVAL if it's not well-formed.
*/
int xmit_prep(mbuf* m_head, net_req*& cooky);
/**
* Free the descriptors for the completed packets.
*/
void gc();
/**
* Update the packet handle and the net_hdr according to various offload
* features.
* @param m Tx packet handle
* @param hdr net_hdr to update
*
* @return The updated Tx packet handle. If packet wasn't well-formed
* nullptr will be returned.
*/
mbuf* offload(mbuf* m, net_hdr* hdr);
net* _parent;
}; };
/** /**
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment