aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-drv.c')
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c106
1 files changed, 66 insertions, 40 deletions
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 53ce1222b11d..64034ff081a0 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -877,7 +877,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
877 877
878 DBGPR("-->xgbe_start\n"); 878 DBGPR("-->xgbe_start\n");
879 879
880 hw_if->init(pdata); 880 ret = hw_if->init(pdata);
881 if (ret)
882 return ret;
881 883
882 ret = phy_if->phy_start(pdata); 884 ret = phy_if->phy_start(pdata);
883 if (ret) 885 if (ret)
@@ -1760,13 +1762,12 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
1760{ 1762{
1761 struct sk_buff *skb; 1763 struct sk_buff *skb;
1762 u8 *packet; 1764 u8 *packet;
1763 unsigned int copy_len;
1764 1765
1765 skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len); 1766 skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
1766 if (!skb) 1767 if (!skb)
1767 return NULL; 1768 return NULL;
1768 1769
1769 /* Start with the header buffer which may contain just the header 1770 /* Pull in the header buffer which may contain just the header
1770 * or the header plus data 1771 * or the header plus data
1771 */ 1772 */
1772 dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base, 1773 dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
@@ -1775,30 +1776,49 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
1775 1776
1776 packet = page_address(rdata->rx.hdr.pa.pages) + 1777 packet = page_address(rdata->rx.hdr.pa.pages) +
1777 rdata->rx.hdr.pa.pages_offset; 1778 rdata->rx.hdr.pa.pages_offset;
1778 copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len; 1779 skb_copy_to_linear_data(skb, packet, len);
1779 copy_len = min(rdata->rx.hdr.dma_len, copy_len); 1780 skb_put(skb, len);
1780 skb_copy_to_linear_data(skb, packet, copy_len);
1781 skb_put(skb, copy_len);
1782
1783 len -= copy_len;
1784 if (len) {
1785 /* Add the remaining data as a frag */
1786 dma_sync_single_range_for_cpu(pdata->dev,
1787 rdata->rx.buf.dma_base,
1788 rdata->rx.buf.dma_off,
1789 rdata->rx.buf.dma_len,
1790 DMA_FROM_DEVICE);
1791
1792 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1793 rdata->rx.buf.pa.pages,
1794 rdata->rx.buf.pa.pages_offset,
1795 len, rdata->rx.buf.dma_len);
1796 rdata->rx.buf.pa.pages = NULL;
1797 }
1798 1781
1799 return skb; 1782 return skb;
1800} 1783}
1801 1784
1785static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata,
1786 struct xgbe_packet_data *packet)
1787{
1788 /* Always zero if not the first descriptor */
1789 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST))
1790 return 0;
1791
1792 /* First descriptor with split header, return header length */
1793 if (rdata->rx.hdr_len)
1794 return rdata->rx.hdr_len;
1795
1796 /* First descriptor but not the last descriptor and no split header,
1797 * so the full buffer was used
1798 */
1799 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
1800 return rdata->rx.hdr.dma_len;
1801
1802 /* First descriptor and last descriptor and no split header, so
1803 * calculate how much of the buffer was used
1804 */
1805 return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len);
1806}
1807
1808static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata,
1809 struct xgbe_packet_data *packet,
1810 unsigned int len)
1811{
1812 /* Always the full buffer if not the last descriptor */
1813 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
1814 return rdata->rx.buf.dma_len;
1815
1816 /* Last descriptor so calculate how much of the buffer was used
1817 * for the last bit of data
1818 */
1819 return rdata->rx.len - len;
1820}
1821
1802static int xgbe_tx_poll(struct xgbe_channel *channel) 1822static int xgbe_tx_poll(struct xgbe_channel *channel)
1803{ 1823{
1804 struct xgbe_prv_data *pdata = channel->pdata; 1824 struct xgbe_prv_data *pdata = channel->pdata;
@@ -1881,8 +1901,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
1881 struct napi_struct *napi; 1901 struct napi_struct *napi;
1882 struct sk_buff *skb; 1902 struct sk_buff *skb;
1883 struct skb_shared_hwtstamps *hwtstamps; 1903 struct skb_shared_hwtstamps *hwtstamps;
1884 unsigned int incomplete, error, context_next, context; 1904 unsigned int last, error, context_next, context;
1885 unsigned int len, rdesc_len, max_len; 1905 unsigned int len, buf1_len, buf2_len, max_len;
1886 unsigned int received = 0; 1906 unsigned int received = 0;
1887 int packet_count = 0; 1907 int packet_count = 0;
1888 1908
@@ -1892,7 +1912,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
1892 if (!ring) 1912 if (!ring)
1893 return 0; 1913 return 0;
1894 1914
1895 incomplete = 0; 1915 last = 0;
1896 context_next = 0; 1916 context_next = 0;
1897 1917
1898 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; 1918 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
@@ -1926,9 +1946,8 @@ read_again:
1926 received++; 1946 received++;
1927 ring->cur++; 1947 ring->cur++;
1928 1948
1929 incomplete = XGMAC_GET_BITS(packet->attributes, 1949 last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1930 RX_PACKET_ATTRIBUTES, 1950 LAST);
1931 INCOMPLETE);
1932 context_next = XGMAC_GET_BITS(packet->attributes, 1951 context_next = XGMAC_GET_BITS(packet->attributes,
1933 RX_PACKET_ATTRIBUTES, 1952 RX_PACKET_ATTRIBUTES,
1934 CONTEXT_NEXT); 1953 CONTEXT_NEXT);
@@ -1937,7 +1956,7 @@ read_again:
1937 CONTEXT); 1956 CONTEXT);
1938 1957
1939 /* Earlier error, just drain the remaining data */ 1958 /* Earlier error, just drain the remaining data */
1940 if ((incomplete || context_next) && error) 1959 if ((!last || context_next) && error)
1941 goto read_again; 1960 goto read_again;
1942 1961
1943 if (error || packet->errors) { 1962 if (error || packet->errors) {
@@ -1949,16 +1968,22 @@ read_again:
1949 } 1968 }
1950 1969
1951 if (!context) { 1970 if (!context) {
1952 /* Length is cumulative, get this descriptor's length */ 1971 /* Get the data length in the descriptor buffers */
1953 rdesc_len = rdata->rx.len - len; 1972 buf1_len = xgbe_rx_buf1_len(rdata, packet);
1954 len += rdesc_len; 1973 len += buf1_len;
1974 buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
1975 len += buf2_len;
1955 1976
1956 if (rdesc_len && !skb) { 1977 if (!skb) {
1957 skb = xgbe_create_skb(pdata, napi, rdata, 1978 skb = xgbe_create_skb(pdata, napi, rdata,
1958 rdesc_len); 1979 buf1_len);
1959 if (!skb) 1980 if (!skb) {
1960 error = 1; 1981 error = 1;
1961 } else if (rdesc_len) { 1982 goto skip_data;
1983 }
1984 }
1985
1986 if (buf2_len) {
1962 dma_sync_single_range_for_cpu(pdata->dev, 1987 dma_sync_single_range_for_cpu(pdata->dev,
1963 rdata->rx.buf.dma_base, 1988 rdata->rx.buf.dma_base,
1964 rdata->rx.buf.dma_off, 1989 rdata->rx.buf.dma_off,
@@ -1968,13 +1993,14 @@ read_again:
1968 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 1993 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1969 rdata->rx.buf.pa.pages, 1994 rdata->rx.buf.pa.pages,
1970 rdata->rx.buf.pa.pages_offset, 1995 rdata->rx.buf.pa.pages_offset,
1971 rdesc_len, 1996 buf2_len,
1972 rdata->rx.buf.dma_len); 1997 rdata->rx.buf.dma_len);
1973 rdata->rx.buf.pa.pages = NULL; 1998 rdata->rx.buf.pa.pages = NULL;
1974 } 1999 }
1975 } 2000 }
1976 2001
1977 if (incomplete || context_next) 2002skip_data:
2003 if (!last || context_next)
1978 goto read_again; 2004 goto read_again;
1979 2005
1980 if (!skb) 2006 if (!skb)
@@ -2033,7 +2059,7 @@ next_packet:
2033 } 2059 }
2034 2060
2035 /* Check if we need to save state before leaving */ 2061 /* Check if we need to save state before leaving */
2036 if (received && (incomplete || context_next)) { 2062 if (received && (!last || context_next)) {
2037 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 2063 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2038 rdata->state_saved = 1; 2064 rdata->state_saved = 1;
2039 rdata->state.skb = skb; 2065 rdata->state.skb = skb;