Skip to content

Commit

Permalink
tgupdate: merge t/mptcp-add-bpf_mptcp_sched_ops base into t/mptcp-add…
Browse files Browse the repository at this point in the history
…-bpf_mptcp_sched_ops
  • Loading branch information
jenkins-tessares committed Dec 19, 2023
2 parents 5fed391 + 3dd6105 commit 5763f7d
Show file tree
Hide file tree
Showing 9 changed files with 66 additions and 98 deletions.
5 changes: 4 additions & 1 deletion drivers/net/ethernet/atheros/atl1e/atl1e_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -866,10 +866,13 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n",
offset, adapter->ring_size);
err = -1;
goto failed;
goto free_buffer;
}

return 0;
free_buffer:
kfree(tx_ring->tx_buffer);
tx_ring->tx_buffer = NULL;
failed:
if (adapter->ring_vir_addr != NULL) {
dma_free_coherent(&pdev->dev, adapter->ring_size,
Expand Down
11 changes: 2 additions & 9 deletions drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,6 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
for (i = 0; i < num_frags ; i++) {
skb_frag_t *frag = &sinfo->frags[i];
struct bnxt_sw_tx_bd *frag_tx_buf;
struct pci_dev *pdev = bp->pdev;
dma_addr_t frag_mapping;
int frag_len;

Expand All @@ -73,16 +72,10 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];

frag_len = skb_frag_size(frag);
frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0,
frag_len, DMA_TO_DEVICE);

if (unlikely(dma_mapping_error(&pdev->dev, frag_mapping)))
return NULL;

dma_unmap_addr_set(frag_tx_buf, mapping, frag_mapping);

flags = frag_len << TX_BD_LEN_SHIFT;
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
frag_mapping = page_pool_get_dma_addr(skb_frag_page(frag)) +
skb_frag_off(frag);
txbd->tx_bd_haddr = cpu_to_le64(frag_mapping);

len = frag_len;
Expand Down
17 changes: 14 additions & 3 deletions drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
Original file line number Diff line number Diff line change
Expand Up @@ -399,9 +399,10 @@ static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
struct otx2_nic *pfvf = netdev_priv(dev);
u8 old_pfc_en;
int err;

/* Save PFC configuration to interface */
old_pfc_en = pfvf->pfc_en;
pfvf->pfc_en = pfc->pfc_en;

if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX)
Expand All @@ -411,20 +412,30 @@ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
* supported by the tx queue configuration
*/
err = otx2_check_pfc_config(pfvf);
if (err)
if (err) {
pfvf->pfc_en = old_pfc_en;
return err;
}

process_pfc:
err = otx2_config_priority_flow_ctrl(pfvf);
if (err)
if (err) {
pfvf->pfc_en = old_pfc_en;
return err;
}

/* Request Per channel Bpids */
if (pfc->pfc_en)
otx2_nix_config_bp(pfvf, true);

err = otx2_pfc_txschq_update(pfvf);
if (err) {
if (pfc->pfc_en)
otx2_nix_config_bp(pfvf, false);

otx2_pfc_txschq_stop(pfvf);
pfvf->pfc_en = old_pfc_en;
otx2_config_priority_flow_ctrl(pfvf);
dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__);
return err;
}
Expand Down
82 changes: 6 additions & 76 deletions drivers/net/ethernet/wangxun/libwx/wx_lib.c
Original file line number Diff line number Diff line change
Expand Up @@ -160,60 +160,6 @@ static __le32 wx_test_staterr(union wx_rx_desc *rx_desc,
return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
}

static bool wx_can_reuse_rx_page(struct wx_rx_buffer *rx_buffer,
int rx_buffer_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;

/* avoid re-using remote and pfmemalloc pages */
if (!dev_page_is_reusable(page))
return false;

#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
return false;
#endif

/* If we have drained the page fragment pool we need to update
* the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds.
*/
if (unlikely(pagecnt_bias == 1)) {
page_ref_add(page, USHRT_MAX - 1);
rx_buffer->pagecnt_bias = USHRT_MAX;
}

return true;
}

/**
* wx_reuse_rx_page - page flip buffer and store it back on the ring
* @rx_ring: rx descriptor ring to store buffers on
* @old_buff: donor buffer to have page reused
*
* Synchronizes page for reuse by the adapter
**/
static void wx_reuse_rx_page(struct wx_ring *rx_ring,
struct wx_rx_buffer *old_buff)
{
u16 nta = rx_ring->next_to_alloc;
struct wx_rx_buffer *new_buff;

new_buff = &rx_ring->rx_buffer_info[nta];

/* update, and store next to alloc */
nta++;
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;

/* transfer page from old buffer to new buffer */
new_buff->page = old_buff->page;
new_buff->page_dma = old_buff->page_dma;
new_buff->page_offset = old_buff->page_offset;
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}

static void wx_dma_sync_frag(struct wx_ring *rx_ring,
struct wx_rx_buffer *rx_buffer)
{
Expand Down Expand Up @@ -270,8 +216,6 @@ static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring,
size,
DMA_FROM_DEVICE);
skip_sync:
rx_buffer->pagecnt_bias--;

return rx_buffer;
}

Expand All @@ -280,19 +224,9 @@ static void wx_put_rx_buffer(struct wx_ring *rx_ring,
struct sk_buff *skb,
int rx_buffer_pgcnt)
{
if (wx_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
/* hand second half of page back to the ring */
wx_reuse_rx_page(rx_ring, rx_buffer);
} else {
if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma)
/* the page has been released from the ring */
WX_CB(skb)->page_released = true;
else
page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);

__page_frag_cache_drain(rx_buffer->page,
rx_buffer->pagecnt_bias);
}
if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma)
/* the page has been released from the ring */
WX_CB(skb)->page_released = true;

/* clear contents of rx_buffer */
rx_buffer->page = NULL;
Expand Down Expand Up @@ -335,11 +269,12 @@ static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring,
if (size <= WX_RXBUFFER_256) {
memcpy(__skb_put(skb, size), page_addr,
ALIGN(size, sizeof(long)));
rx_buffer->pagecnt_bias++;

page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, true);
return skb;
}

skb_mark_for_recycle(skb);

if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP))
WX_CB(skb)->dma = rx_buffer->dma;

Expand Down Expand Up @@ -382,8 +317,6 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring,
bi->page_dma = dma;
bi->page = page;
bi->page_offset = 0;
page_ref_add(page, USHRT_MAX - 1);
bi->pagecnt_bias = USHRT_MAX;

return true;
}
Expand Down Expand Up @@ -723,7 +656,6 @@ static int wx_clean_rx_irq(struct wx_q_vector *q_vector,
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
rx_buffer->pagecnt_bias++;
break;
}

Expand Down Expand Up @@ -2248,8 +2180,6 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring)

/* free resources associated with mapping */
page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
__page_frag_cache_drain(rx_buffer->page,
rx_buffer->pagecnt_bias);

i++;
rx_buffer++;
Expand Down
1 change: 0 additions & 1 deletion drivers/net/ethernet/wangxun/libwx/wx_type.h
Original file line number Diff line number Diff line change
Expand Up @@ -787,7 +787,6 @@ struct wx_rx_buffer {
dma_addr_t page_dma;
struct page *page;
unsigned int page_offset;
u16 pagecnt_bias;
};

struct wx_queue_stats {
Expand Down
6 changes: 4 additions & 2 deletions drivers/net/phy/phy_device.c
Original file line number Diff line number Diff line change
Expand Up @@ -1550,7 +1550,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
goto error;

phy_resume(phydev);
phy_led_triggers_register(phydev);
if (!phydev->is_on_sfp_module)
phy_led_triggers_register(phydev);

/**
* If the external phy used by current mac interface is managed by
Expand Down Expand Up @@ -1822,7 +1823,8 @@ void phy_detach(struct phy_device *phydev)
}
phydev->phylink = NULL;

phy_led_triggers_unregister(phydev);
if (!phydev->is_on_sfp_module)
phy_led_triggers_unregister(phydev);

if (phydev->mdio.dev.driver)
module_put(phydev->mdio.dev.driver->owner);
Expand Down
2 changes: 1 addition & 1 deletion net/core/stream.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ int sk_stream_wait_connect(struct sock *sk, long *timeo_p)
remove_wait_queue(sk_sleep(sk), &wait);
sk->sk_write_pending--;
} while (!done);
return 0;
return done < 0 ? done : 0;
}
EXPORT_SYMBOL(sk_stream_wait_connect);

Expand Down
1 change: 1 addition & 0 deletions net/ife/ife.c
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ void *ife_decode(struct sk_buff *skb, u16 *metalen)
if (unlikely(!pskb_may_pull(skb, total_pull)))
return NULL;

ifehdr = (struct ifeheadr *)(skb->data + skb->dev->hard_header_len);
skb_set_mac_header(skb, total_pull);
__skb_pull(skb, total_pull);
*metalen = ifehdrln - IFE_METAHDRLEN;
Expand Down
39 changes: 34 additions & 5 deletions net/rose/af_rose.c
Original file line number Diff line number Diff line change
Expand Up @@ -182,21 +182,47 @@ void rose_kill_by_neigh(struct rose_neigh *neigh)
*/
static void rose_kill_by_device(struct net_device *dev)
{
struct sock *s;
struct sock *sk, *array[16];
struct rose_sock *rose;
bool rescan;
int i, cnt;

start:
rescan = false;
cnt = 0;
spin_lock_bh(&rose_list_lock);
sk_for_each(s, &rose_list) {
struct rose_sock *rose = rose_sk(s);
sk_for_each(sk, &rose_list) {
rose = rose_sk(sk);
if (rose->device == dev) {
if (cnt == ARRAY_SIZE(array)) {
rescan = true;
break;
}
sock_hold(sk);
array[cnt++] = sk;
}
}
spin_unlock_bh(&rose_list_lock);

for (i = 0; i < cnt; i++) {
sk = array[cnt];
rose = rose_sk(sk);
lock_sock(sk);
spin_lock_bh(&rose_list_lock);
if (rose->device == dev) {
rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
rose_disconnect(sk, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
if (rose->neighbour)
rose->neighbour->use--;
netdev_put(rose->device, &rose->dev_tracker);
rose->device = NULL;
}
spin_unlock_bh(&rose_list_lock);
release_sock(sk);
sock_put(sk);
cond_resched();
}
spin_unlock_bh(&rose_list_lock);
if (rescan)
goto start;
}

/*
Expand Down Expand Up @@ -656,7 +682,10 @@ static int rose_release(struct socket *sock)
break;
}

spin_lock_bh(&rose_list_lock);
netdev_put(rose->device, &rose->dev_tracker);
rose->device = NULL;
spin_unlock_bh(&rose_list_lock);
sock->sk = NULL;
release_sock(sk);
sock_put(sk);
Expand Down

0 comments on commit 5763f7d

Please sign in to comment.