6872de5a1b02809bed54ecf7bea78ea7d5c027d4
[sitara-epos/sitara-epos-kernel.git] / drivers / net / ethernet / ti / cpsw.c
1 /*
2  * Texas Instruments Ethernet Switch Driver
3  *
4  * Copyright (C) 2010 Texas Instruments
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation version 2.
9  *
10  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11  * kind, whether express or implied; without even the implied warranty
12  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15 #include <linux/kernel.h>
16 #include <linux/io.h>
17 #include <linux/clk.h>
18 #include <linux/timer.h>
19 #include <linux/module.h>
20 #include <linux/platform_device.h>
21 #include <linux/if_ether.h>
22 #include <linux/etherdevice.h>
23 #include <linux/netdevice.h>
24 #include <linux/phy.h>
25 #include <linux/workqueue.h>
26 #include <linux/delay.h>
27 #include <linux/interrupt.h>
29 #include <linux/cpsw.h>
31 #include "cpsw_ale.h"
32 #include "davinci_cpdma.h"
35 #define CPSW_DEBUG      (NETIF_MSG_HW           | NETIF_MSG_WOL         | \
36                          NETIF_MSG_DRV          | NETIF_MSG_LINK        | \
37                          NETIF_MSG_IFUP         | NETIF_MSG_INTR        | \
38                          NETIF_MSG_PROBE        | NETIF_MSG_TIMER       | \
39                          NETIF_MSG_IFDOWN       | NETIF_MSG_RX_ERR      | \
40                          NETIF_MSG_TX_ERR       | NETIF_MSG_TX_DONE     | \
41                          NETIF_MSG_PKTDATA      | NETIF_MSG_TX_QUEUED   | \
42                          NETIF_MSG_RX_STATUS)
44 #define msg(level, type, format, ...)                           \
45 do {                                                            \
46         if (netif_msg_##type(priv) && net_ratelimit())          \
47                 dev_##level(priv->dev, format, ## __VA_ARGS__); \
48 } while (0)
50 #define CPDMA_RXTHRESH          0x0c0
51 #define CPDMA_RXFREE            0x0e0
52 #define CPDMA_TXHDP_VER1        0x100
53 #define CPDMA_TXHDP_VER2        0x200
54 #define CPDMA_RXHDP_VER1        0x120
55 #define CPDMA_RXHDP_VER2        0x220
56 #define CPDMA_TXCP_VER1         0x140
57 #define CPDMA_TXCP_VER2         0x240
58 #define CPDMA_RXCP_VER1         0x160
59 #define CPDMA_RXCP_VER2         0x260
61 #define CPSW_POLL_WEIGHT        64
62 #define CPSW_MIN_PACKET_SIZE    60
63 #define CPSW_MAX_PACKET_SIZE    (1500 + 14 + 4 + 4)
65 #define CPSW_PHY_SPEED          1000
67 #define CPSW_IRQ_QUIRK
68 #ifdef CPSW_IRQ_QUIRK
69 #define cpsw_enable_irq(priv)   \
70         do {                    \
71                 u32 i;          \
72                 for (i = 0; i < priv->num_irqs; i++) \
73                         enable_irq(priv->irqs_table[i]); \
74         } while (0);
75 #define cpsw_disable_irq(priv)  \
76         do {                    \
77                 u32 i;          \
78                 for (i = 0; i < priv->num_irqs; i++) \
79                         disable_irq_nosync(priv->irqs_table[i]); \
80         } while (0);
81 #else
82 #define cpsw_enable_irq(priv) do { } while (0);
83 #define cpsw_disable_irq(priv) do { } while (0);
84 #endif
86 static int debug_level;
87 module_param(debug_level, int, 0);
88 MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
90 static int ale_ageout = 10;
91 module_param(ale_ageout, int, 0);
92 MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");
94 static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
95 module_param(rx_packet_max, int, 0);
96 MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
98 struct cpsw_ss_regs {
99         u32     id_ver;
100         u32     soft_reset;
101         u32     control;
102         u32     int_control;
103         u32     rx_thresh_en;
104         u32     rx_en;
105         u32     tx_en;
106         u32     misc_en;
107 };
109 struct cpsw_regs {
110         u32     id_ver;
111         u32     control;
112         u32     soft_reset;
113         u32     stat_port_en;
114         u32     ptype;
115 };
117 struct cpsw_slave_regs {
118         u32     max_blks;
119         u32     blk_cnt;
120         u32     flow_thresh;
121         u32     port_vlan;
122         u32     tx_pri_map;
123 #ifdef CONFIG_ARCH_TI814X
124         u32     ts_ctl;
125         u32     ts_seq_ltype;
126         u32     ts_vlan;
127 #endif
128         u32     sa_lo;
129         u32     sa_hi;
130 };
132 struct cpsw_host_regs {
133         u32     max_blks;
134         u32     blk_cnt;
135         u32     flow_thresh;
136         u32     port_vlan;
137         u32     tx_pri_map;
138         u32     cpdma_tx_pri_map;
139         u32     cpdma_rx_chan_map;
140 };
142 struct cpsw_sliver_regs {
143         u32     id_ver;
144         u32     mac_control;
145         u32     mac_status;
146         u32     soft_reset;
147         u32     rx_maxlen;
148         u32     __reserved_0;
149         u32     rx_pause;
150         u32     tx_pause;
151         u32     __reserved_1;
152         u32     rx_pri_map;
153 };
155 struct cpsw_hw_stats {
156         u32     rxgoodframes;
157         u32     rxbroadcastframes;
158         u32     rxmulticastframes;
159         u32     rxpauseframes;
160         u32     rxcrcerrors;
161         u32     rxaligncodeerrors;
162         u32     rxoversizedframes;
163         u32     rxjabberframes;
164         u32     rxundersizedframes;
165         u32     rxfragments;
166         u32     __pad_0[2];
167         u32     rxoctets;
168         u32     txgoodframes;
169         u32     txbroadcastframes;
170         u32     txmulticastframes;
171         u32     txpauseframes;
172         u32     txdeferredframes;
173         u32     txcollisionframes;
174         u32     txsinglecollframes;
175         u32     txmultcollframes;
176         u32     txexcessivecollisions;
177         u32     txlatecollisions;
178         u32     txunderrun;
179         u32     txcarriersenseerrors;
180         u32     txoctets;
181         u32     octetframes64;
182         u32     octetframes65t127;
183         u32     octetframes128t255;
184         u32     octetframes256t511;
185         u32     octetframes512t1023;
186         u32     octetframes1024tup;
187         u32     netoctets;
188         u32     rxsofoverruns;
189         u32     rxmofoverruns;
190         u32     rxdmaoverruns;
191 };
193 struct cpsw_slave {
194         struct cpsw_slave_regs __iomem  *regs;
195         struct cpsw_sliver_regs __iomem *sliver;
196         int                             slave_num;
197         u32                             mac_control;
198         struct cpsw_slave_data          *data;
199         struct phy_device               *phy;
200 };
202 struct cpsw_priv {
203         spinlock_t                      lock;
204         struct platform_device          *pdev;
205         struct net_device               *ndev;
206         struct resource                 *cpsw_res;
207         struct resource                 *cpsw_ss_res;
208         struct napi_struct              napi;
209 #define napi_to_priv(napi)      container_of(napi, struct cpsw_priv, napi)
210         struct device                   *dev;
211         struct cpsw_platform_data       data;
212         struct cpsw_regs __iomem        *regs;
213         struct cpsw_ss_regs __iomem     *ss_regs;
214         struct cpsw_hw_stats __iomem    *hw_stats;
215         struct cpsw_host_regs __iomem   *host_port_regs;
216         u32                             msg_enable;
217         struct net_device_stats         stats;
218         int                             rx_packet_max;
219         int                             host_port;
220         struct clk                      *clk;
221         u8                              mac_addr[ETH_ALEN];
222         struct cpsw_slave               *slaves;
223 #define for_each_slave(priv, func, arg...)                      \
224         do {                                                    \
225                 int idx;                                        \
226                 for (idx = 0; idx < (priv)->data.slaves; idx++) \
227                         (func)((priv)->slaves + idx, ##arg);    \
228         } while (0)
230         struct cpdma_ctlr               *dma;
231         struct cpdma_chan               *txch, *rxch;
232         struct cpsw_ale                 *ale;
234 #ifdef CPSW_IRQ_QUIRK
235         /* snapshot of IRQ numbers */
236         u32 irqs_table[4];
237         u32 num_irqs;
238 #endif
240 };
242 static void cpsw_intr_enable(struct cpsw_priv *priv)
244         __raw_writel(0xFF, &priv->ss_regs->tx_en);
245         __raw_writel(0xFF, &priv->ss_regs->rx_en);
247         cpdma_ctlr_int_ctrl(priv->dma, true);
248         return;
251 static void cpsw_intr_disable(struct cpsw_priv *priv)
253         __raw_writel(0, &priv->ss_regs->tx_en);
254         __raw_writel(0, &priv->ss_regs->rx_en);
256         cpdma_ctlr_int_ctrl(priv->dma, false);
257         return;
260 void cpsw_tx_handler(void *token, int len, int status)
262         struct sk_buff          *skb = token;
263         struct net_device       *ndev = skb->dev;
264         struct cpsw_priv        *priv = netdev_priv(ndev);
266         if (unlikely(netif_queue_stopped(ndev)))
267                 netif_start_queue(ndev);
268         priv->stats.tx_packets++;
269         priv->stats.tx_bytes += len;
270         dev_kfree_skb_any(skb);
273 void cpsw_rx_handler(void *token, int len, int status)
275         struct sk_buff          *skb = token;
276         struct net_device       *ndev = skb->dev;
277         struct cpsw_priv        *priv = netdev_priv(ndev);
278         int                     ret = 0;
280         if (likely(status >= 0)) {
281                 skb_put(skb, len);
282                 skb->protocol = eth_type_trans(skb, ndev);
283                 netif_receive_skb(skb);
284                 priv->stats.rx_bytes += len;
285                 priv->stats.rx_packets++;
286                 skb = NULL;
287         }
290         if (unlikely(!netif_running(ndev))) {
291                 if (skb)
292                         dev_kfree_skb_any(skb);
293                 return;
294         }
296         if (likely(!skb)) {
297                 skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max);
298                 if (WARN_ON(!skb))
299                         return;
301                 ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
302                                 skb_tailroom(skb), GFP_KERNEL);
303         }
305         WARN_ON(ret < 0);
309 static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
311         struct cpsw_priv *priv = dev_id;
313         if (likely(netif_running(priv->ndev))) {
314                 cpsw_intr_disable(priv);
315                 cpsw_disable_irq(priv);
316                 napi_schedule(&priv->napi);
317         }
320         return IRQ_HANDLED;
323 static int cpsw_poll(struct napi_struct *napi, int budget)
325         struct cpsw_priv        *priv = napi_to_priv(napi);
326         int                     num_tx, num_rx;
329         num_tx = cpdma_chan_process(priv->txch, 128);
330         num_rx = cpdma_chan_process(priv->rxch, budget);
332         if (num_rx || num_tx)
333                 msg(dbg, intr, "poll %d rx, %d tx pkts\n", num_rx, num_tx);
336         if (num_rx < budget) {
337                 napi_complete(napi);
338                 cpdma_ctlr_eoi(priv->dma);
339                 cpsw_intr_enable(priv);
340                 cpsw_enable_irq(priv);
341         }
343         return num_rx;
346 static inline void soft_reset(const char *module, void __iomem *reg)
348         unsigned long timeout = jiffies + HZ;
350         __raw_writel(1, reg);
351         do {
352                 cpu_relax();
353         } while ((__raw_readl(reg) & 1) && time_after(timeout, jiffies));
355         WARN(__raw_readl(reg) & 1, "failed to soft-reset %s\n", module);
358 #define mac_hi(mac)     (((mac)[0] << 0) | ((mac)[1] << 8) |    \
359                          ((mac)[2] << 16) | ((mac)[3] << 24))
360 #define mac_lo(mac)     (((mac)[4] << 0) | ((mac)[5] << 8))
362 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
363                                struct cpsw_priv *priv)
365         __raw_writel(mac_hi(priv->mac_addr), &slave->regs->sa_hi);
366         __raw_writel(mac_lo(priv->mac_addr), &slave->regs->sa_lo);
369 static inline u32 cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
371         if (priv->host_port == 0)
372                 return slave_num + 1;
373         else
374                 return slave_num;
377 static void _cpsw_adjust_link(struct cpsw_slave *slave,
378                               struct cpsw_priv *priv, bool *link)
380         struct phy_device       *phy = slave->phy;
381         u32                     mac_control = 0;
382         u32                     slave_port;
384         if (!phy)
385                 return;
387         slave_port = cpsw_get_slave_port(priv, slave->slave_num);
389         if (phy->link) {
390                 /* enable forwarding */
391                 cpsw_ale_control_set(priv->ale, slave_port,
392                         ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
394                 mac_control = priv->data.mac_control;
395                 if (phy->speed == 10)
396                         mac_control |= BIT(18); /* In Band mode */
397                 if (phy->speed == 1000)
398                         mac_control |= BIT(7);  /* GIGABITEN    */
399                 if (phy->duplex)
400                         mac_control |= BIT(0);  /* FULLDUPLEXEN */
401                 if (phy->interface == PHY_INTERFACE_MODE_RGMII) /* RGMII */
402                         mac_control |= (BIT(15)|BIT(16));
403                 *link = true;
404         } else {
405                 cpsw_ale_control_set(priv->ale, slave_port,
406                              ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
407                 mac_control = 0;
408         }
410         if (mac_control != slave->mac_control) {
411                 phy_print_status(phy);
412                 __raw_writel(mac_control, &slave->sliver->mac_control);
413         }
415         slave->mac_control = mac_control;
418 static void cpsw_adjust_link(struct net_device *ndev)
420         struct cpsw_priv        *priv = netdev_priv(ndev);
421         bool                    link = false;
423         for_each_slave(priv, _cpsw_adjust_link, priv, &link);
425         if (link) {
426                 netif_carrier_on(ndev);
427                 if (netif_running(ndev))
428                         netif_wake_queue(ndev);
429         } else {
430                 netif_carrier_off(ndev);
431                 netif_stop_queue(ndev);
432         }
435 static inline int __show_stat(char *buf, int maxlen, const char* name, u32 val)
437         static char *leader = "........................................";
439         if (!val)
440                 return 0;
441         else
442                 return snprintf(buf, maxlen, "%s %s %10d\n", name,
443                                 leader + strlen(name), val);
446 static ssize_t cpsw_hw_stats_show(struct device *dev,
447                                      struct device_attribute *attr,
448                                      char *buf)
450         struct net_device       *ndev = to_net_dev(dev);
451         struct cpsw_priv        *priv = netdev_priv(ndev);
452         int                     len = 0;
453         struct cpdma_chan_stats dma_stats;
455 #define show_stat(x) do {                                               \
456         len += __show_stat(buf + len, SZ_4K - len, #x,                  \
457                            __raw_readl(&priv->hw_stats->x));            \
458 } while (0)
460 #define show_dma_stat(x) do {                                           \
461         len += __show_stat(buf + len, SZ_4K - len, #x, dma_stats.x);    \
462 } while (0)
464         len += snprintf(buf + len, SZ_4K - len, "CPSW Statistics:\n");
465         show_stat(rxgoodframes);        show_stat(rxbroadcastframes);
466         show_stat(rxmulticastframes);   show_stat(rxpauseframes);
467         show_stat(rxcrcerrors);         show_stat(rxaligncodeerrors);
468         show_stat(rxoversizedframes);   show_stat(rxjabberframes);
469         show_stat(rxundersizedframes);  show_stat(rxfragments);
470         show_stat(rxoctets);            show_stat(txgoodframes);
471         show_stat(txbroadcastframes);   show_stat(txmulticastframes);
472         show_stat(txpauseframes);       show_stat(txdeferredframes);
473         show_stat(txcollisionframes);   show_stat(txsinglecollframes);
474         show_stat(txmultcollframes);    show_stat(txexcessivecollisions);
475         show_stat(txlatecollisions);    show_stat(txunderrun);
476         show_stat(txcarriersenseerrors); show_stat(txoctets);
477         show_stat(octetframes64);       show_stat(octetframes65t127);
478         show_stat(octetframes128t255);  show_stat(octetframes256t511);
479         show_stat(octetframes512t1023); show_stat(octetframes1024tup);
480         show_stat(netoctets);           show_stat(rxsofoverruns);
481         show_stat(rxmofoverruns);       show_stat(rxdmaoverruns);
483         cpdma_chan_get_stats(priv->rxch, &dma_stats);
484         len += snprintf(buf + len, SZ_4K - len, "\nRX DMA Statistics:\n");
485         show_dma_stat(head_enqueue);    show_dma_stat(tail_enqueue);
486         show_dma_stat(pad_enqueue);     show_dma_stat(misqueued);
487         show_dma_stat(desc_alloc_fail); show_dma_stat(pad_alloc_fail);
488         show_dma_stat(runt_receive_buff); show_dma_stat(runt_transmit_buff);
489         show_dma_stat(empty_dequeue);   show_dma_stat(busy_dequeue);
490         show_dma_stat(good_dequeue);    show_dma_stat(teardown_dequeue);
492         cpdma_chan_get_stats(priv->txch, &dma_stats);
493         len += snprintf(buf + len, SZ_4K - len, "\nTX DMA Statistics:\n");
494         show_dma_stat(head_enqueue);    show_dma_stat(tail_enqueue);
495         show_dma_stat(pad_enqueue);     show_dma_stat(misqueued);
496         show_dma_stat(desc_alloc_fail); show_dma_stat(pad_alloc_fail);
497         show_dma_stat(runt_receive_buff); show_dma_stat(runt_transmit_buff);
498         show_dma_stat(empty_dequeue);   show_dma_stat(busy_dequeue);
499         show_dma_stat(good_dequeue);    show_dma_stat(teardown_dequeue);
501         return len;
504 DEVICE_ATTR(hw_stats, S_IRUGO, cpsw_hw_stats_show, NULL);
506 #define PHY_CONFIG_REG  22
507 static void cpsw_set_phy_config(struct cpsw_priv *priv, struct phy_device *phy)
509         struct cpsw_platform_data *pdata = priv->pdev->dev.platform_data;
510         struct mii_bus *miibus;
511         int phy_addr = 0;
512         u16 val = 0;
513         u16 tmp = 0;
515         if (!pdata->gigabit_en)
516                 return;
518         if (!phy)
519                 return;
521         miibus = phy->bus;
523         if (!miibus)
524                 return;
526         phy_addr = phy->addr;
528         /* Following lines enable gigbit advertisement capability even in case
529          * the advertisement is not enabled by default
530          */
531         val = miibus->read(miibus, phy_addr, MII_BMCR);
532         val |= (BMCR_SPEED100 | BMCR_ANENABLE | BMCR_FULLDPLX);
533         miibus->write(miibus, phy_addr, MII_BMCR, val);
534         tmp = miibus->read(miibus, phy_addr, MII_BMCR);
536         /* Enable gigabit support only if the speed is 1000Mbps */
537         if (phy->speed == CPSW_PHY_SPEED) {
538                 tmp = miibus->read(miibus, phy_addr, MII_BMSR);
539                 if (tmp & 0x1) {
540                         val = miibus->read(miibus, phy_addr, MII_CTRL1000);
541                         val |= BIT(9);
542                         miibus->write(miibus, phy_addr, MII_CTRL1000, val);
543                         tmp = miibus->read(miibus, phy_addr, MII_CTRL1000);
544                 }
545         }
547         val = miibus->read(miibus, phy_addr, MII_ADVERTISE);
548         val |= (ADVERTISE_10HALF | ADVERTISE_10FULL | \
549                 ADVERTISE_100HALF | ADVERTISE_100FULL);
550         miibus->write(miibus, phy_addr, MII_ADVERTISE, val);
551         tmp = miibus->read(miibus, phy_addr, MII_ADVERTISE);
553         /* TODO : This check is required. This should be
554          * moved to a board init section as its specific
555          * to a phy.*/
556         if (phy->phy_id == 0x0282F014) {
557                 /* This enables TX_CLK-ing in case of 10/100MBps operation */
558                 val = miibus->read(miibus, phy_addr, PHY_CONFIG_REG);
559                 val |= BIT(5);
560                 miibus->write(miibus, phy_addr, PHY_CONFIG_REG, val);
561                 tmp = miibus->read(miibus, phy_addr, PHY_CONFIG_REG);
562         }
564         return;
567 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
569         char name[32];
570         u32 slave_port;
572         sprintf(name, "slave-%d", slave->slave_num);
574         soft_reset(name, &slave->sliver->soft_reset);
576         /* setup priority mapping */
577         __raw_writel(0x76543210, &slave->sliver->rx_pri_map);
578         __raw_writel(0x33221100, &slave->regs->tx_pri_map);
580         /* setup max packet size, and mac address */
581         __raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen);
582         cpsw_set_slave_mac(slave, priv);
584         slave->mac_control = 0; /* no link yet */
586         slave_port = cpsw_get_slave_port(priv, slave->slave_num);
587         cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
588                            1 << slave_port);
590         slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
591                                  &cpsw_adjust_link, 0, slave->data->phy_if);
592         if (IS_ERR(slave->phy)) {
593                 msg(err, ifup, "phy %s not found on slave %d\n",
594                     slave->data->phy_id, slave->slave_num);
595                 slave->phy = NULL;
596         } else {
597                 printk(KERN_ERR"\nCPSW phy found : id is : 0x%x\n",
598                         slave->phy->phy_id);
599                 cpsw_set_phy_config(priv, slave->phy);
600                 phy_start(slave->phy);
601         }
604 static void cpsw_init_host_port(struct cpsw_priv *priv)
606         /* soft reset the controller and initialize ale */
607         soft_reset("cpsw", &priv->regs->soft_reset);
608         cpsw_ale_start(priv->ale);
610         /* switch to vlan unaware mode */
611         cpsw_ale_control_set(priv->ale, 0, ALE_VLAN_AWARE, 0);
613         /* setup host port priority mapping */
614         __raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
615         __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
617         cpsw_ale_control_set(priv->ale, priv->host_port,
618                              ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
620         cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port,
621                           0);
622                            /* ALE_SECURE); */
623         cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
624                            1 << priv->host_port);
627 static int cpsw_ndo_open(struct net_device *ndev)
629         struct cpsw_priv *priv = netdev_priv(ndev);
630         int i, ret;
631         u32 reg;
633         cpsw_intr_disable(priv);
634         netif_carrier_off(ndev);
636         ret = clk_enable(priv->clk);
637         if (ret < 0) {
638                 dev_err(priv->dev, "unable to turn on device clock\n");
639                 return ret;
640         }
642         ret = device_create_file(&ndev->dev, &dev_attr_hw_stats);
643         if (ret < 0) {
644                 dev_err(priv->dev, "unable to add device attr\n");
645                 return ret;
646         }
648         if (priv->data.phy_control)
649                 (*priv->data.phy_control)(true);
651         reg = __raw_readl(&priv->regs->id_ver);
653         msg(info, ifup, "initializing cpsw version %d.%d (%d)\n",
654             (reg >> 8 & 0x7), reg & 0xff, (reg >> 11) & 0x1f);
656         /* initialize host and slave ports */
657         cpsw_init_host_port(priv);
658         for_each_slave(priv, cpsw_slave_open, priv);
660         /* setup tx dma to fixed prio and zero offset */
661         cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
662         cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
664         /* disable priority elevation and enable statistics on all ports */
665         __raw_writel(0, &priv->regs->ptype);
667         /* enable statistics collection only on the host port */
668         /* __raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en); */
669         __raw_writel(0x7, &priv->regs->stat_port_en);
671         if (WARN_ON(!priv->data.rx_descs))
672                 priv->data.rx_descs = 128;
674         for (i = 0; i < priv->data.rx_descs; i++) {
675                 struct sk_buff *skb;
677                 ret = -ENOMEM;
678                 skb = netdev_alloc_skb_ip_align(priv->ndev,
679                                                 priv->rx_packet_max);
680                 if (!skb)
681                         break;
682                 ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
683                                         skb_tailroom(skb), GFP_KERNEL);
684                 if (WARN_ON(ret < 0))
685                         break;
686         }
687         /* continue even if we didn't manage to submit all receive descs */
688         msg(info, ifup, "submitted %d rx descriptors\n", i);
690         cpdma_ctlr_start(priv->dma);
691         cpsw_intr_enable(priv);
692         napi_enable(&priv->napi);
693         cpdma_ctlr_eoi(priv->dma);
695         return 0;
698 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
700         if (!slave->phy)
701                 return;
702         phy_stop(slave->phy);
703         phy_disconnect(slave->phy);
704         slave->phy = NULL;
707 static int cpsw_ndo_stop(struct net_device *ndev)
709         struct cpsw_priv *priv = netdev_priv(ndev);
711         msg(info, ifdown, "shutting down cpsw device\n");
712         cpsw_intr_disable(priv);
713         cpdma_ctlr_int_ctrl(priv->dma, false);
714         cpdma_ctlr_stop(priv->dma);
715         netif_stop_queue(priv->ndev);
716         napi_disable(&priv->napi);
717         netif_carrier_off(priv->ndev);
718         cpsw_ale_stop(priv->ale);
719         device_remove_file(&ndev->dev, &dev_attr_hw_stats);
720         for_each_slave(priv, cpsw_slave_stop, priv);
721         if (priv->data.phy_control)
722                 (*priv->data.phy_control)(false);
723         clk_disable(priv->clk);
724         return 0;
727 static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
728                                        struct net_device *ndev)
730         struct cpsw_priv *priv = netdev_priv(ndev);
731         int ret;
733         ndev->trans_start = jiffies;
735         ret = skb_padto(skb, CPSW_MIN_PACKET_SIZE);
736         if (unlikely(ret < 0)) {
737                 msg(err, tx_err, "packet pad failed");
738                 goto fail;
739         }
741         ret = cpdma_chan_submit(priv->txch, skb, skb->data,
742                                 skb->len, GFP_KERNEL);
743         if (unlikely(ret != 0)) {
744                 msg(err, tx_err, "desc submit failed");
745                 goto fail;
746         }
748         return NETDEV_TX_OK;
749 fail:
750         priv->stats.tx_dropped++;
751         netif_stop_queue(ndev);
752         return NETDEV_TX_BUSY;
755 static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags)
757         /*
758          * The switch cannot operate in promiscuous mode without substantial
759          * headache.  For promiscuous mode to work, we would need to put the
760          * ALE in bypass mode and route all traffic to the host port.
761          * Subsequently, the host will need to operate as a "bridge", learn,
762          * and flood as needed.  For now, we simply complain here and
763          * do nothing about it :-)
764          */
765         if ((flags & IFF_PROMISC) && (ndev->flags & IFF_PROMISC))
766                 dev_err(&ndev->dev, "promiscuity ignored!\n");
768         /*
769          * The switch cannot filter multicast traffic unless it is configured
770          * in "VLAN Aware" mode.  Unfortunately, VLAN awareness requires a
771          * whole bunch of additional logic that this driver does not implement
772          * at present.
773          */
774         if ((flags & IFF_ALLMULTI) && !(ndev->flags & IFF_ALLMULTI))
775                 dev_err(&ndev->dev, "multicast traffic cannot be filtered!\n");
778 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *addr)
780         struct cpsw_priv *priv = netdev_priv(ndev);
782         cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port);
783         memcpy(priv->mac_addr, ndev->dev_addr, ETH_ALEN);
784         cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port,
785                            0);
786                            /* ALE_SECURE); */
787         for_each_slave(priv, cpsw_set_slave_mac, priv);
788         return 0;
791 static void cpsw_ndo_tx_timeout(struct net_device *ndev)
793         struct cpsw_priv *priv = netdev_priv(ndev);
795         msg(err, tx_err, "transmit timeout, restarting dma");
796         priv->stats.tx_errors++;
797         cpsw_intr_disable(priv);
798         cpdma_ctlr_int_ctrl(priv->dma, false);
799         cpdma_chan_stop(priv->txch);
800         cpdma_chan_start(priv->txch);
801         cpdma_ctlr_int_ctrl(priv->dma, true);
802         cpsw_intr_enable(priv);
803         cpdma_ctlr_eoi(priv->dma);
806 static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
808         struct cpsw_priv *priv = netdev_priv(ndev);
809         return &priv->stats;
812 #ifdef CONFIG_NET_POLL_CONTROLLER
813 static void cpsw_ndo_poll_controller(struct net_device *ndev)
815         struct cpsw_priv *priv = netdev_priv(ndev);
817         cpsw_intr_disable(priv);
818         cpdma_ctlr_int_ctrl(priv->dma, false);
819         cpsw_interrupt(ndev->irq, priv);
820         cpdma_ctlr_int_ctrl(priv->dma, true);
821         cpsw_intr_enable(priv);
822         cpdma_ctlr_eoi(priv->dma);
824 #endif
826 static const struct net_device_ops cpsw_netdev_ops = {
827         .ndo_open               = cpsw_ndo_open,
828         .ndo_stop               = cpsw_ndo_stop,
829         .ndo_start_xmit         = cpsw_ndo_start_xmit,
830         .ndo_change_rx_flags    = cpsw_ndo_change_rx_flags,
831         .ndo_set_mac_address    = cpsw_ndo_set_mac_address,
832         .ndo_validate_addr      = eth_validate_addr,
833         .ndo_tx_timeout         = cpsw_ndo_tx_timeout,
834         .ndo_get_stats          = cpsw_ndo_get_stats,
835 #ifdef CONFIG_NET_POLL_CONTROLLER
836         .ndo_poll_controller    = cpsw_ndo_poll_controller,
837 #endif
838 };
840 static void cpsw_get_drvinfo(struct net_device *ndev,
841                              struct ethtool_drvinfo *info)
843         struct cpsw_priv *priv = netdev_priv(ndev);
844         strcpy(info->driver, "TI CPSW Driver v1.0");
845         strcpy(info->version, "1.0");
846         strcpy(info->bus_info, priv->pdev->name);
849 static u32 cpsw_get_msglevel(struct net_device *ndev)
851         struct cpsw_priv *priv = netdev_priv(ndev);
852         return priv->msg_enable;
855 static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
857         struct cpsw_priv *priv = netdev_priv(ndev);
858         priv->msg_enable = value;
861 static const struct ethtool_ops cpsw_ethtool_ops = {
862         .get_drvinfo    = cpsw_get_drvinfo,
863         .get_msglevel   = cpsw_get_msglevel,
864         .set_msglevel   = cpsw_set_msglevel,
865         .get_link       = ethtool_op_get_link,
866 };
868 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
870         void __iomem            *regs = priv->regs;
871         int                     slave_num = slave->slave_num;
872         struct cpsw_slave_data  *data = priv->data.slave_data + slave_num;
874         slave->data     = data;
875         slave->regs     = regs + data->slave_reg_ofs;
876         slave->sliver   = regs + data->sliver_reg_ofs;
879 static int __devinit cpsw_probe(struct platform_device *pdev)
881         struct cpsw_platform_data       *data = pdev->dev.platform_data;
882         struct net_device               *ndev;
883         struct cpsw_priv                *priv;
884         struct cpdma_params             dma_params;
885         struct cpsw_ale_params          ale_params;
886         void __iomem                    *regs;
887         struct resource                 *res;
888         int ret = 0, i, k = 0;
890         if (!data) {
891                 pr_err("cpsw: platform data missing\n");
892                 return -ENODEV;
893         }
895         ndev = alloc_etherdev(sizeof(struct cpsw_priv));
896         if (!ndev) {
897                 pr_err("cpsw: error allocating net_device\n");
898                 return -ENOMEM;
899         }
901         platform_set_drvdata(pdev, ndev);
902         priv = netdev_priv(ndev);
903         spin_lock_init(&priv->lock);
904         priv->data = *data;
905         priv->pdev = pdev;
906         priv->ndev = ndev;
907         priv->dev  = &ndev->dev;
908         priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
909         priv->rx_packet_max = max(rx_packet_max, 128);
911         if (is_valid_ether_addr(data->mac_addr)) {
912                 memcpy(priv->mac_addr, data->mac_addr, ETH_ALEN);
913                 printk(KERN_INFO"Detected MACID=%x:%x:%x:%x:%x:%x\n",
914                         priv->mac_addr[0], priv->mac_addr[1],
915                         priv->mac_addr[2], priv->mac_addr[3],
916                         priv->mac_addr[4], priv->mac_addr[5]);
917         } else {
918                 random_ether_addr(priv->mac_addr);
919                 printk(KERN_INFO"Detected MACID=%x:%x:%x:%x:%x:%x\n",
920                         priv->mac_addr[0], priv->mac_addr[1],
921                         priv->mac_addr[2], priv->mac_addr[3],
922                         priv->mac_addr[4], priv->mac_addr[5]);
923         }
925         memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
927         priv->slaves = kzalloc(sizeof(struct cpsw_slave) * data->slaves,
928                                GFP_KERNEL);
929         if (!priv->slaves) {
930                 dev_err(priv->dev, "failed to allocate slave ports\n");
931                 ret = -EBUSY;
932                 goto clean_ndev_ret;
933         }
934         for (i = 0; i < data->slaves; i++)
935                 priv->slaves[i].slave_num = i;
937         priv->clk = clk_get(&pdev->dev, NULL);
938         if (IS_ERR(priv->clk))
939                 dev_err(priv->dev, "failed to get device clock\n");
940         priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
941         if (!priv->cpsw_res) {
942                 dev_err(priv->dev, "error getting i/o resource\n");
943                 ret = -ENOENT;
944                 goto clean_clk_ret;
945         }
947         if (!request_mem_region(priv->cpsw_res->start,
948                 resource_size(priv->cpsw_res), ndev->name)) {
949                 dev_err(priv->dev, "failed request i/o region\n");
950                 ret = -ENXIO;
951                 goto clean_clk_ret;
952         }
954         regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res));
955         if (!regs) {
956                 dev_err(priv->dev, "unable to map i/o region\n");
957                 goto clean_cpsw_iores_ret;
958         }
959         priv->regs = regs;
960         priv->host_port = data->host_port_num;
961         priv->host_port_regs = regs + data->host_port_reg_ofs;
962         priv->hw_stats = regs + data->hw_stats_reg_ofs;
964         priv->cpsw_ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
965         if (!priv->cpsw_ss_res) {
966                 dev_err(priv->dev, "error getting i/o resource\n");
967                 ret = -ENOENT;
968                 goto clean_clk_ret;
969         }
971         if (!request_mem_region(priv->cpsw_ss_res->start,
972                 resource_size(priv->cpsw_ss_res), ndev->name)) {
973                 dev_err(priv->dev, "failed request i/o region\n");
974                 ret = -ENXIO;
975                 goto clean_clk_ret;
976         }
978         regs = ioremap(priv->cpsw_ss_res->start,
979                         resource_size(priv->cpsw_ss_res));
980         if (!regs) {
981                 dev_err(priv->dev, "unable to map i/o region\n");
982                 goto clean_cpsw_ss_iores_ret;
983         }
984         priv->ss_regs = regs;
987         for_each_slave(priv, cpsw_slave_init, priv);
989         memset(&dma_params, 0, sizeof(dma_params));
990         dma_params.dev                  = &pdev->dev;
991         dma_params.dmaregs              = (void __iomem *)(((u32)priv->regs) +
992                                                 data->cpdma_reg_ofs);
993         dma_params.rxthresh             = (void __iomem *)(((u32)priv->regs) +
994                                         data->cpdma_reg_ofs + CPDMA_RXTHRESH);
995         dma_params.rxfree               = (void __iomem *)(((u32)priv->regs) +
996                                         data->cpdma_reg_ofs + CPDMA_RXFREE);
998         if (data->version == CPSW_VERSION_2) {
999                 dma_params.txhdp        = (void __iomem *)(((u32)priv->regs) +
1000                                         data->cpdma_reg_ofs + CPDMA_TXHDP_VER2);
1001                 dma_params.rxhdp        = (void __iomem *)(((u32)priv->regs) +
1002                                         data->cpdma_reg_ofs + CPDMA_RXHDP_VER2);
1003                 dma_params.txcp         = (void __iomem *)(((u32)priv->regs) +
1004                                         data->cpdma_reg_ofs + CPDMA_TXCP_VER2);
1005                 dma_params.rxcp         = (void __iomem *)(((u32)priv->regs) +
1006                                         data->cpdma_reg_ofs + CPDMA_RXCP_VER2);
1007         } else {
1008                 dma_params.txhdp        = (void __iomem *)(((u32)priv->regs) +
1009                                         data->cpdma_reg_ofs + CPDMA_TXHDP_VER1);
1010                 dma_params.rxhdp        = (void __iomem *)(((u32)priv->regs) +
1011                                         data->cpdma_reg_ofs + CPDMA_RXHDP_VER1);
1012                 dma_params.txcp         = (void __iomem *)(((u32)priv->regs) +
1013                                         data->cpdma_reg_ofs + CPDMA_TXCP_VER1);
1014                 dma_params.rxcp         = (void __iomem *)(((u32)priv->regs) +
1015                                         data->cpdma_reg_ofs + CPDMA_RXCP_VER1);
1016         }
1018         dma_params.num_chan             = data->channels;
1019         dma_params.has_soft_reset       = true;
1020         dma_params.min_packet_size      = CPSW_MIN_PACKET_SIZE;
1021         dma_params.desc_mem_size        = data->bd_ram_size;
1022         dma_params.desc_align           = 16;
1023         dma_params.has_ext_regs         = true;
1024         dma_params.desc_mem_phys        = data->no_bd_ram ? 0 :
1025                         (u32 __force)priv->cpsw_res->start + data->bd_ram_ofs;
1026         dma_params.desc_hw_addr         = data->hw_ram_addr ?
1027                                 data->hw_ram_addr : dma_params.desc_mem_phys ;
1029         priv->dma = cpdma_ctlr_create(&dma_params);
1030         if (!priv->dma) {
1031                 dev_err(priv->dev, "error initializing dma\n");
1032                 ret = -ENOMEM;
1033                 goto clean_iomap_ret;
1034         }
1036         priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0),
1037                                        cpsw_tx_handler);
1038         priv->rxch = cpdma_chan_create(priv->dma, rx_chan_num(0),
1039                                        cpsw_rx_handler);
1041         if (WARN_ON(!priv->txch || !priv->rxch)) {
1042                 dev_err(priv->dev, "error initializing dma channels\n");
1043                 ret = -ENOMEM;
1044                 goto clean_dma_ret;
1045         }
1047         memset(&ale_params, 0, sizeof(ale_params));
1048         ale_params.dev          = &ndev->dev;
1049         ale_params.ale_regs     = (void *)((u32)priv->regs) +
1050                                         ((u32)data->ale_reg_ofs);
1051         ale_params.ale_ageout   = ale_ageout;
1052         ale_params.ale_entries  = data->ale_entries;
1053         ale_params.ale_ports    = data->slaves;
1055         priv->ale = cpsw_ale_create(&ale_params);
1056         if (!priv->ale) {
1057                 dev_err(priv->dev, "error initializing ale engine\n");
1058                 ret = -ENODEV;
1059                 goto clean_dma_ret;
1060         }
1062         ndev->irq = platform_get_irq(pdev, 0);
1063         if (ndev->irq < 0) {
1064                 dev_err(priv->dev, "error getting irq resource\n");
1065                 ret = -ENOENT;
1066                 goto clean_ale_ret;
1067         }
1069         while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
1070                 for (i = res->start; i <= res->end; i++) {
1071                         if (request_irq(i, cpsw_interrupt, IRQF_DISABLED,
1072                                         dev_name(&pdev->dev), priv)) {
1073                                 dev_err(priv->dev, "error attaching irq\n");
1074                                 goto clean_ale_ret;
1075                         }
1076                         #ifdef CPSW_IRQ_QUIRK
1077                         priv->irqs_table[k] = i;
1078                         priv->num_irqs = k;
1079                         #endif
1080                 }
1081                 k++;
1082         }
1084         ndev->flags |= IFF_ALLMULTI;    /* see cpsw_ndo_change_rx_flags() */
1086         ndev->netdev_ops = &cpsw_netdev_ops;
1087         SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
1088         netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT);
1090         /* register the network device */
1091         SET_NETDEV_DEV(ndev, &pdev->dev);
1092         ret = register_netdev(ndev);
1093         if (ret) {
1094                 dev_err(priv->dev, "error registering net device\n");
1095                 ret = -ENODEV;
1096                 goto clean_irq_ret;
1097         }
1099         msg(notice, probe, "initialized device (regs %x, irq %d)\n",
1100             priv->cpsw_res->start, ndev->irq);
1102         return 0;
1104 clean_irq_ret:
1105         free_irq(ndev->irq, priv);
1106 clean_ale_ret:
1107         cpsw_ale_destroy(priv->ale);
1108 clean_dma_ret:
1109         cpdma_chan_destroy(priv->txch);
1110         cpdma_chan_destroy(priv->rxch);
1111         cpdma_ctlr_destroy(priv->dma);
1112 clean_iomap_ret:
1113         iounmap(priv->regs);
1114 clean_cpsw_ss_iores_ret:
1115         release_mem_region(priv->cpsw_ss_res->start,
1116                                 resource_size(priv->cpsw_ss_res));
1117 clean_cpsw_iores_ret:
1118         release_mem_region(priv->cpsw_res->start,
1119                                 resource_size(priv->cpsw_res));
1120 clean_clk_ret:
1121         clk_put(priv->clk);
1122         kfree(priv->slaves);
1123 clean_ndev_ret:
1124         free_netdev(ndev);
1125         return ret;
1128 static int __devexit cpsw_remove(struct platform_device *pdev)
1130         struct net_device *ndev = platform_get_drvdata(pdev);
1131         struct cpsw_priv *priv = netdev_priv(ndev);
1133         msg(notice, probe, "removing device\n");
1134         platform_set_drvdata(pdev, NULL);
1136         free_irq(ndev->irq, priv);
1137         cpsw_ale_destroy(priv->ale);
1138         cpdma_chan_destroy(priv->txch);
1139         cpdma_chan_destroy(priv->rxch);
1140         cpdma_ctlr_destroy(priv->dma);
1141         iounmap(priv->regs);
1142         release_mem_region(priv->cpsw_res->start,
1143                                 resource_size(priv->cpsw_res));
1144         release_mem_region(priv->cpsw_ss_res->start,
1145                                 resource_size(priv->cpsw_ss_res));
1146         clk_put(priv->clk);
1147         kfree(priv->slaves);
1148         free_netdev(ndev);
1150         return 0;
1153 static int cpsw_suspend(struct device *dev)
1155         struct platform_device  *pdev = to_platform_device(dev);
1156         struct net_device       *ndev = platform_get_drvdata(pdev);
1158         if (netif_running(ndev))
1159                 cpsw_ndo_stop(ndev);
1160         return 0;
1163 static int cpsw_resume(struct device *dev)
1165         struct platform_device  *pdev = to_platform_device(dev);
1166         struct net_device       *ndev = platform_get_drvdata(pdev);
1168         if (netif_running(ndev))
1169                 cpsw_ndo_open(ndev);
1170         return 0;
1173 static const struct dev_pm_ops cpsw_pm_ops = {
1174         .suspend        = cpsw_suspend,
1175         .resume         = cpsw_resume,
1176 };
1178 static struct platform_driver cpsw_driver = {
1179         .driver = {
1180                 .name    = "cpsw",
1181                 .owner   = THIS_MODULE,
1182                 .pm      = &cpsw_pm_ops,
1183         },
1184         .probe = cpsw_probe,
1185         .remove = __devexit_p(cpsw_remove),
1186 };
1188 static int __init cpsw_init(void)
1190         return platform_driver_register(&cpsw_driver);
1192 late_initcall(cpsw_init);
1194 static void __exit cpsw_exit(void)
1196         platform_driver_unregister(&cpsw_driver);
1198 module_exit(cpsw_exit);
1200 MODULE_LICENSE("GPL");
1201 MODULE_DESCRIPTION("TI CPSW Ethernet driver");