mirror of
https://github.com/LuckfoxTECH/luckfox-pico.git
synced 2026-01-19 09:52:31 +01:00
project:build.sh: Added fastboot support; custom modifications to U-Boot and kernel implemented using patches.
project:cfg:BoardConfig_IPC: Added fastboot BoardConfig file and firmware post-scripts, distinguishing between the BoardConfigs for Luckfox Pico Pro and Luckfox Pico Max. project:app: Added fastboot_client and rk_smart_door for quick boot applications; updated rkipc app to adapt to the latest media library. media:samples: Added more usage examples. media:rockit: Fixed bugs; removed support for retrieving data frames from VPSS. media:isp: Updated rkaiq library and related tools to support connection to RKISP_Tuner. sysdrv:Makefile: Added support for compiling drv_ko on Luckfox Pico Ultra W using Ubuntu; added support for custom root filesystem. sysdrv:tools:board: Updated Buildroot optional mirror sources, updated some software versions, and stored device tree files and configuration files that undergo multiple modifications for U-Boot and kernel separately. sysdrv:source:mcu: Used RISC-V MCU SDK with RT-Thread system, mainly for initializing camera AE during quick boot. sysdrv:source:uboot: Added support for fastboot; added high baud rate DDR bin for serial firmware upgrades. sysdrv:source:kernel: Upgraded to version 5.10.160; increased NPU frequency for RV1106G3; added support for fastboot. Signed-off-by: luckfox-eng29 <eng29@luckfox.com>
This commit is contained in:
@@ -48,7 +48,6 @@
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
typedef unsigned int pending_ring_idx_t;
|
||||
#define INVALID_PENDING_RING_IDX (~0U)
|
||||
|
||||
struct pending_tx_info {
|
||||
struct xen_netif_tx_request req; /* tx request */
|
||||
@@ -82,8 +81,6 @@ struct xenvif_rx_meta {
|
||||
/* Discriminate from any valid pending_idx value. */
|
||||
#define INVALID_PENDING_IDX 0xFFFF
|
||||
|
||||
#define MAX_BUFFER_OFFSET XEN_PAGE_SIZE
|
||||
|
||||
#define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
|
||||
|
||||
/* The maximum number of frags is derived from the size of a grant (same
|
||||
@@ -367,11 +364,6 @@ void xenvif_free(struct xenvif *vif);
|
||||
int xenvif_xenbus_init(void);
|
||||
void xenvif_xenbus_fini(void);
|
||||
|
||||
int xenvif_schedulable(struct xenvif *vif);
|
||||
|
||||
int xenvif_queue_stopped(struct xenvif_queue *queue);
|
||||
void xenvif_wake_queue(struct xenvif_queue *queue);
|
||||
|
||||
/* (Un)Map communication rings. */
|
||||
void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue);
|
||||
int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
|
||||
@@ -394,17 +386,13 @@ int xenvif_dealloc_kthread(void *data);
|
||||
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
|
||||
|
||||
bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
|
||||
void xenvif_rx_action(struct xenvif_queue *queue);
|
||||
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
|
||||
bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
|
||||
|
||||
void xenvif_carrier_on(struct xenvif *vif);
|
||||
|
||||
/* Callback from stack when TX packet can be released */
|
||||
void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
|
||||
|
||||
/* Unmap a pending page and release it back to the guest */
|
||||
void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
|
||||
|
||||
static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
|
||||
{
|
||||
return MAX_PENDING_REQS -
|
||||
|
||||
@@ -70,7 +70,7 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
|
||||
wake_up(&queue->dealloc_wq);
|
||||
}
|
||||
|
||||
int xenvif_schedulable(struct xenvif *vif)
|
||||
static int xenvif_schedulable(struct xenvif *vif)
|
||||
{
|
||||
return netif_running(vif->dev) &&
|
||||
test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
|
||||
@@ -178,20 +178,6 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
int xenvif_queue_stopped(struct xenvif_queue *queue)
|
||||
{
|
||||
struct net_device *dev = queue->vif->dev;
|
||||
unsigned int id = queue->id;
|
||||
return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
|
||||
}
|
||||
|
||||
void xenvif_wake_queue(struct xenvif_queue *queue)
|
||||
{
|
||||
struct net_device *dev = queue->vif->dev;
|
||||
unsigned int id = queue->id;
|
||||
netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
|
||||
}
|
||||
|
||||
static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
@@ -269,14 +255,16 @@ xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
|
||||
skb_clear_hash(skb);
|
||||
|
||||
xenvif_rx_queue_tail(queue, skb);
|
||||
if (!xenvif_rx_queue_tail(queue, skb))
|
||||
goto drop;
|
||||
|
||||
xenvif_kick_thread(queue);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
drop:
|
||||
vif->dev->stats.tx_dropped++;
|
||||
dev_kfree_skb(skb);
|
||||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
||||
@@ -112,6 +112,8 @@ static void make_tx_response(struct xenvif_queue *queue,
|
||||
s8 st);
|
||||
static void push_tx_responses(struct xenvif_queue *queue);
|
||||
|
||||
static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
|
||||
|
||||
static inline int tx_work_todo(struct xenvif_queue *queue);
|
||||
|
||||
static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
|
||||
@@ -330,10 +332,13 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
|
||||
|
||||
|
||||
struct xenvif_tx_cb {
|
||||
u16 pending_idx;
|
||||
u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
|
||||
u8 copy_count;
|
||||
};
|
||||
|
||||
#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
|
||||
#define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
|
||||
#define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
|
||||
|
||||
static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
|
||||
u16 pending_idx,
|
||||
@@ -368,31 +373,93 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
|
||||
struct sk_buff *skb,
|
||||
struct xen_netif_tx_request *txp,
|
||||
struct gnttab_map_grant_ref *gop,
|
||||
unsigned int frag_overflow,
|
||||
struct sk_buff *nskb)
|
||||
static void xenvif_get_requests(struct xenvif_queue *queue,
|
||||
struct sk_buff *skb,
|
||||
struct xen_netif_tx_request *first,
|
||||
struct xen_netif_tx_request *txfrags,
|
||||
unsigned *copy_ops,
|
||||
unsigned *map_ops,
|
||||
unsigned int frag_overflow,
|
||||
struct sk_buff *nskb,
|
||||
unsigned int extra_count,
|
||||
unsigned int data_len)
|
||||
{
|
||||
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
skb_frag_t *frags = shinfo->frags;
|
||||
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
|
||||
int start;
|
||||
u16 pending_idx;
|
||||
pending_ring_idx_t index;
|
||||
unsigned int nr_slots;
|
||||
struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops;
|
||||
struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
|
||||
struct xen_netif_tx_request *txp = first;
|
||||
|
||||
nr_slots = shinfo->nr_frags;
|
||||
nr_slots = shinfo->nr_frags + 1;
|
||||
|
||||
/* Skip first skb fragment if it is on same page as header fragment. */
|
||||
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
|
||||
copy_count(skb) = 0;
|
||||
|
||||
for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
|
||||
shinfo->nr_frags++, txp++, gop++) {
|
||||
/* Create copy ops for exactly data_len bytes into the skb head. */
|
||||
__skb_put(skb, data_len);
|
||||
while (data_len > 0) {
|
||||
int amount = data_len > txp->size ? txp->size : data_len;
|
||||
|
||||
cop->source.u.ref = txp->gref;
|
||||
cop->source.domid = queue->vif->domid;
|
||||
cop->source.offset = txp->offset;
|
||||
|
||||
cop->dest.domid = DOMID_SELF;
|
||||
cop->dest.offset = (offset_in_page(skb->data +
|
||||
skb_headlen(skb) -
|
||||
data_len)) & ~XEN_PAGE_MASK;
|
||||
cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
|
||||
- data_len);
|
||||
|
||||
cop->len = amount;
|
||||
cop->flags = GNTCOPY_source_gref;
|
||||
|
||||
index = pending_index(queue->pending_cons);
|
||||
pending_idx = queue->pending_ring[index];
|
||||
callback_param(queue, pending_idx).ctx = NULL;
|
||||
copy_pending_idx(skb, copy_count(skb)) = pending_idx;
|
||||
copy_count(skb)++;
|
||||
|
||||
cop++;
|
||||
data_len -= amount;
|
||||
|
||||
if (amount == txp->size) {
|
||||
/* The copy op covered the full tx_request */
|
||||
|
||||
memcpy(&queue->pending_tx_info[pending_idx].req,
|
||||
txp, sizeof(*txp));
|
||||
queue->pending_tx_info[pending_idx].extra_count =
|
||||
(txp == first) ? extra_count : 0;
|
||||
|
||||
if (txp == first)
|
||||
txp = txfrags;
|
||||
else
|
||||
txp++;
|
||||
queue->pending_cons++;
|
||||
nr_slots--;
|
||||
} else {
|
||||
/* The copy op partially covered the tx_request.
|
||||
* The remainder will be mapped.
|
||||
*/
|
||||
txp->offset += amount;
|
||||
txp->size -= amount;
|
||||
}
|
||||
}
|
||||
|
||||
for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
|
||||
shinfo->nr_frags++, gop++) {
|
||||
index = pending_index(queue->pending_cons++);
|
||||
pending_idx = queue->pending_ring[index];
|
||||
xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
|
||||
xenvif_tx_create_map_op(queue, pending_idx, txp,
|
||||
txp == first ? extra_count : 0, gop);
|
||||
frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
|
||||
|
||||
if (txp == first)
|
||||
txp = txfrags;
|
||||
else
|
||||
txp++;
|
||||
}
|
||||
|
||||
if (frag_overflow) {
|
||||
@@ -413,7 +480,8 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
|
||||
skb_shinfo(skb)->frag_list = nskb;
|
||||
}
|
||||
|
||||
return gop;
|
||||
(*copy_ops) = cop - queue->tx_copy_ops;
|
||||
(*map_ops) = gop - queue->tx_map_ops;
|
||||
}
|
||||
|
||||
static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
|
||||
@@ -449,7 +517,7 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
|
||||
struct gnttab_copy **gopp_copy)
|
||||
{
|
||||
struct gnttab_map_grant_ref *gop_map = *gopp_map;
|
||||
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
|
||||
u16 pending_idx;
|
||||
/* This always points to the shinfo of the skb being checked, which
|
||||
* could be either the first or the one on the frag_list
|
||||
*/
|
||||
@@ -460,24 +528,37 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
|
||||
struct skb_shared_info *first_shinfo = NULL;
|
||||
int nr_frags = shinfo->nr_frags;
|
||||
const bool sharedslot = nr_frags &&
|
||||
frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
|
||||
int i, err;
|
||||
frag_get_pending_idx(&shinfo->frags[0]) ==
|
||||
copy_pending_idx(skb, copy_count(skb) - 1);
|
||||
int i, err = 0;
|
||||
|
||||
/* Check status of header. */
|
||||
err = (*gopp_copy)->status;
|
||||
if (unlikely(err)) {
|
||||
if (net_ratelimit())
|
||||
netdev_dbg(queue->vif->dev,
|
||||
"Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
|
||||
(*gopp_copy)->status,
|
||||
pending_idx,
|
||||
(*gopp_copy)->source.u.ref);
|
||||
/* The first frag might still have this slot mapped */
|
||||
if (!sharedslot)
|
||||
xenvif_idx_release(queue, pending_idx,
|
||||
XEN_NETIF_RSP_ERROR);
|
||||
for (i = 0; i < copy_count(skb); i++) {
|
||||
int newerr;
|
||||
|
||||
/* Check status of header. */
|
||||
pending_idx = copy_pending_idx(skb, i);
|
||||
|
||||
newerr = (*gopp_copy)->status;
|
||||
if (likely(!newerr)) {
|
||||
/* The first frag might still have this slot mapped */
|
||||
if (i < copy_count(skb) - 1 || !sharedslot)
|
||||
xenvif_idx_release(queue, pending_idx,
|
||||
XEN_NETIF_RSP_OKAY);
|
||||
} else {
|
||||
err = newerr;
|
||||
if (net_ratelimit())
|
||||
netdev_dbg(queue->vif->dev,
|
||||
"Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
|
||||
(*gopp_copy)->status,
|
||||
pending_idx,
|
||||
(*gopp_copy)->source.u.ref);
|
||||
/* The first frag might still have this slot mapped */
|
||||
if (i < copy_count(skb) - 1 || !sharedslot)
|
||||
xenvif_idx_release(queue, pending_idx,
|
||||
XEN_NETIF_RSP_ERROR);
|
||||
}
|
||||
(*gopp_copy)++;
|
||||
}
|
||||
(*gopp_copy)++;
|
||||
|
||||
check_frags:
|
||||
for (i = 0; i < nr_frags; i++, gop_map++) {
|
||||
@@ -524,14 +605,6 @@ check_frags:
|
||||
if (err)
|
||||
continue;
|
||||
|
||||
/* First error: if the header haven't shared a slot with the
|
||||
* first frag, release it as well.
|
||||
*/
|
||||
if (!sharedslot)
|
||||
xenvif_idx_release(queue,
|
||||
XENVIF_TX_CB(skb)->pending_idx,
|
||||
XEN_NETIF_RSP_OKAY);
|
||||
|
||||
/* Invalidate preceding fragments of this skb. */
|
||||
for (j = 0; j < i; j++) {
|
||||
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
|
||||
@@ -801,7 +874,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
|
||||
unsigned *copy_ops,
|
||||
unsigned *map_ops)
|
||||
{
|
||||
struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
|
||||
struct sk_buff *skb, *nskb;
|
||||
int ret;
|
||||
unsigned int frag_overflow;
|
||||
@@ -883,8 +955,12 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
|
||||
continue;
|
||||
}
|
||||
|
||||
data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ?
|
||||
XEN_NETBACK_TX_COPY_LEN : txreq.size;
|
||||
|
||||
ret = xenvif_count_requests(queue, &txreq, extra_count,
|
||||
txfrags, work_to_do);
|
||||
|
||||
if (unlikely(ret < 0))
|
||||
break;
|
||||
|
||||
@@ -910,9 +986,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
|
||||
index = pending_index(queue->pending_cons);
|
||||
pending_idx = queue->pending_ring[index];
|
||||
|
||||
data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
|
||||
ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
|
||||
XEN_NETBACK_TX_COPY_LEN : txreq.size;
|
||||
if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size)
|
||||
data_len = txreq.size;
|
||||
|
||||
skb = xenvif_alloc_skb(data_len);
|
||||
if (unlikely(skb == NULL)) {
|
||||
@@ -923,8 +998,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
|
||||
}
|
||||
|
||||
skb_shinfo(skb)->nr_frags = ret;
|
||||
if (data_len < txreq.size)
|
||||
skb_shinfo(skb)->nr_frags++;
|
||||
/* At this point shinfo->nr_frags is in fact the number of
|
||||
* slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
|
||||
*/
|
||||
@@ -986,54 +1059,19 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
|
||||
type);
|
||||
}
|
||||
|
||||
XENVIF_TX_CB(skb)->pending_idx = pending_idx;
|
||||
|
||||
__skb_put(skb, data_len);
|
||||
queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
|
||||
queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
|
||||
queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
|
||||
|
||||
queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
|
||||
virt_to_gfn(skb->data);
|
||||
queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
|
||||
queue->tx_copy_ops[*copy_ops].dest.offset =
|
||||
offset_in_page(skb->data) & ~XEN_PAGE_MASK;
|
||||
|
||||
queue->tx_copy_ops[*copy_ops].len = data_len;
|
||||
queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
|
||||
|
||||
(*copy_ops)++;
|
||||
|
||||
if (data_len < txreq.size) {
|
||||
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
|
||||
pending_idx);
|
||||
xenvif_tx_create_map_op(queue, pending_idx, &txreq,
|
||||
extra_count, gop);
|
||||
gop++;
|
||||
} else {
|
||||
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
|
||||
INVALID_PENDING_IDX);
|
||||
memcpy(&queue->pending_tx_info[pending_idx].req,
|
||||
&txreq, sizeof(txreq));
|
||||
queue->pending_tx_info[pending_idx].extra_count =
|
||||
extra_count;
|
||||
}
|
||||
|
||||
queue->pending_cons++;
|
||||
|
||||
gop = xenvif_get_requests(queue, skb, txfrags, gop,
|
||||
frag_overflow, nskb);
|
||||
xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops,
|
||||
map_ops, frag_overflow, nskb, extra_count,
|
||||
data_len);
|
||||
|
||||
__skb_queue_tail(&queue->tx_queue, skb);
|
||||
|
||||
queue->tx.req_cons = idx;
|
||||
|
||||
if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
|
||||
if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) ||
|
||||
(*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
|
||||
break;
|
||||
}
|
||||
|
||||
(*map_ops) = gop - queue->tx_map_ops;
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1112,9 +1150,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
|
||||
while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
|
||||
struct xen_netif_tx_request *txp;
|
||||
u16 pending_idx;
|
||||
unsigned data_len;
|
||||
|
||||
pending_idx = XENVIF_TX_CB(skb)->pending_idx;
|
||||
pending_idx = copy_pending_idx(skb, 0);
|
||||
txp = &queue->pending_tx_info[pending_idx].req;
|
||||
|
||||
/* Check the remap error code. */
|
||||
@@ -1133,18 +1170,6 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
|
||||
continue;
|
||||
}
|
||||
|
||||
data_len = skb->len;
|
||||
callback_param(queue, pending_idx).ctx = NULL;
|
||||
if (data_len < txp->size) {
|
||||
/* Append the packet payload as a fragment. */
|
||||
txp->offset += data_len;
|
||||
txp->size -= data_len;
|
||||
} else {
|
||||
/* Schedule a response immediately. */
|
||||
xenvif_idx_release(queue, pending_idx,
|
||||
XEN_NETIF_RSP_OKAY);
|
||||
}
|
||||
|
||||
if (txp->flags & XEN_NETTXF_csum_blank)
|
||||
skb->ip_summed = CHECKSUM_PARTIAL;
|
||||
else if (txp->flags & XEN_NETTXF_data_validated)
|
||||
@@ -1330,7 +1355,7 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
|
||||
/* Called after netfront has transmitted */
|
||||
int xenvif_tx_action(struct xenvif_queue *queue, int budget)
|
||||
{
|
||||
unsigned nr_mops, nr_cops = 0;
|
||||
unsigned nr_mops = 0, nr_cops = 0;
|
||||
int work_done, ret;
|
||||
|
||||
if (unlikely(!tx_work_todo(queue)))
|
||||
@@ -1417,7 +1442,7 @@ static void push_tx_responses(struct xenvif_queue *queue)
|
||||
notify_remote_via_irq(queue->tx_irq);
|
||||
}
|
||||
|
||||
void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
|
||||
static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
|
||||
{
|
||||
int ret;
|
||||
struct gnttab_unmap_grant_ref tx_unmap_op;
|
||||
|
||||
@@ -82,9 +82,10 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
|
||||
return false;
|
||||
}
|
||||
|
||||
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
|
||||
bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool ret = true;
|
||||
|
||||
spin_lock_irqsave(&queue->rx_queue.lock, flags);
|
||||
|
||||
@@ -92,8 +93,7 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
|
||||
struct net_device *dev = queue->vif->dev;
|
||||
|
||||
netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
|
||||
kfree_skb(skb);
|
||||
queue->vif->dev->stats.rx_dropped++;
|
||||
ret = false;
|
||||
} else {
|
||||
if (skb_queue_empty(&queue->rx_queue))
|
||||
xenvif_update_needed_slots(queue, skb);
|
||||
@@ -104,6 +104,8 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
|
||||
@@ -486,7 +488,7 @@ static void xenvif_rx_skb(struct xenvif_queue *queue)
|
||||
|
||||
#define RX_BATCH_SIZE 64
|
||||
|
||||
void xenvif_rx_action(struct xenvif_queue *queue)
|
||||
static void xenvif_rx_action(struct xenvif_queue *queue)
|
||||
{
|
||||
struct sk_buff_head completed_skbs;
|
||||
unsigned int work_done = 0;
|
||||
@@ -495,6 +497,7 @@ void xenvif_rx_action(struct xenvif_queue *queue)
|
||||
queue->rx_copy.completed = &completed_skbs;
|
||||
|
||||
while (xenvif_rx_ring_slots_available(queue) &&
|
||||
!skb_queue_empty(&queue->rx_queue) &&
|
||||
work_done < RX_BATCH_SIZE) {
|
||||
xenvif_rx_skb(queue);
|
||||
work_done++;
|
||||
|
||||
@@ -256,7 +256,6 @@ static void backend_disconnect(struct backend_info *be)
|
||||
unsigned int queue_index;
|
||||
|
||||
xen_unregister_watchers(vif);
|
||||
xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
xenvif_debugfs_delif(vif);
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
@@ -984,6 +983,7 @@ static int netback_remove(struct xenbus_device *dev)
|
||||
struct backend_info *be = dev_get_drvdata(&dev->dev);
|
||||
|
||||
unregister_hotplug_status_watch(be);
|
||||
xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
|
||||
if (be->vif) {
|
||||
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
|
||||
backend_disconnect(be);
|
||||
|
||||
Reference in New Issue
Block a user