Merge branch 'p-ti-linux-3.14.y-common' into p-ti-linux-3.14.y-android
authorVishal Mahaveer <vishalm@ti.com>
Thu, 17 Sep 2015 12:10:55 +0000 (07:10 -0500)
committerVishal Mahaveer <vishalm@ti.com>
Thu, 17 Sep 2015 12:10:55 +0000 (07:10 -0500)
* p-ti-linux-3.14.y-common:
  dmaengine: omap-dma: Support for polled memcpy transfers
  hack: increase tty buffer size and complain if tty buffer full
  dma: omap-dma: don't return an error if pause failed due to completion
  dma: omap-dma: don't return an error if pause a non-existent transfers
  serial: 8250: omap: restore registers on shutdown
  serial: 8250: move rx_running out of the bitfield
  dma: omap-dma: add support for pause of non-cyclic transfers
  dma: add __must_check annotation for dmaengine_pause()
  tty: serial: 8250_omap: do not use RX DMA if pause is not supported

Change-Id: Iecf0f9f2f72b340fad72cd2f9552995caad7371b
Signed-off-by: Vishal Mahaveer <vishalm@ti.com>
drivers/dma/omap-dma.c
drivers/tty/serial/8250/8250.h
drivers/tty/serial/8250/8250_omap.c
drivers/tty/tty_buffer.c
include/linux/dmaengine.h

index ba527e612c66d16cd7e54a58ddc11e344bd276e2..d327bb2e73ef541defc50c1286d9276eac9cec6f 100644 (file)
@@ -50,6 +50,7 @@ struct omap_chan {
        struct dma_slave_config cfg;
        unsigned dma_sig;
        bool cyclic;
+       bool start_no_delay;
        bool paused;
 
        int dma_ch;
@@ -299,7 +300,30 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
        omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
 }
 
-static void omap_dma_stop(struct omap_chan *c)
+static void omap_dma_drain_chan(struct omap_chan *c)
+{
+       int i;
+       uint32_t val;
+
+       /* Wait for sDMA FIFO to drain */
+       for (i = 0; ; i++) {
+               val = omap_dma_chan_read(c, CCR);
+               if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
+                       break;
+
+               if (i > 100)
+                       break;
+
+               udelay(5);
+       }
+
+       if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
+               dev_err(c->vc.chan.device->dev,
+                       "DMA drain did not complete on lch %d\n",
+                       c->dma_ch);
+}
+
+static int omap_dma_stop(struct omap_chan *c)
 {
        struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
        uint32_t val;
@@ -312,7 +336,6 @@ static void omap_dma_stop(struct omap_chan *c)
        val = omap_dma_chan_read(c, CCR);
        if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
                uint32_t sysconfig;
-               unsigned i;
 
                sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
                val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
@@ -323,27 +346,18 @@ static void omap_dma_stop(struct omap_chan *c)
                val &= ~CCR_ENABLE;
                omap_dma_chan_write(c, CCR, val);
 
-               /* Wait for sDMA FIFO to drain */
-               for (i = 0; ; i++) {
-                       val = omap_dma_chan_read(c, CCR);
-                       if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
-                               break;
-
-                       if (i > 100)
-                               break;
-
-                       udelay(5);
-               }
-
-               if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
-                       dev_err(c->vc.chan.device->dev,
-                               "DMA drain did not complete on lch %d\n",
-                               c->dma_ch);
+               omap_dma_drain_chan(c);
 
                omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
        } else {
+
+               if (!(val & CCR_ENABLE))
+                       return 0;
+
                val &= ~CCR_ENABLE;
                omap_dma_chan_write(c, CCR, val);
+
+               omap_dma_drain_chan(c);
        }
 
        mb();
@@ -358,6 +372,7 @@ static void omap_dma_stop(struct omap_chan *c)
 
                omap_dma_chan_write(c, CLNK_CTRL, val);
        }
+       return 0;
 }
 
 static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
@@ -709,6 +724,12 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
        if (ret == DMA_COMPLETE || !txstate)
                return ret;
 
+       if (c->start_no_delay) {
+               uint32_t val = omap_dma_chan_read(c, CCR);
+               if (!(val & CCR_ENABLE))
+                       return DMA_COMPLETE;
+       }
+
        spin_lock_irqsave(&c->vc.lock, flags);
        vd = vchan_find_desc(&c->vc, cookie);
        if (vd) {
@@ -728,6 +749,8 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
        } else {
                txstate->residue = 0;
        }
+       if (ret == DMA_IN_PROGRESS && c->paused)
+               ret = DMA_PAUSED;
        spin_unlock_irqrestore(&c->vc.lock, flags);
 
        return ret;
@@ -744,15 +767,15 @@ static void omap_dma_issue_pending(struct dma_chan *chan)
                 * c->cyclic is used only by audio and in this case the DMA need
                 * to be started without delay.
                 */
-               if (!c->cyclic) {
+               if (c->cyclic || c->start_no_delay) {
+                       omap_dma_start_desc(c);
+               } else {
                        struct omap_dmadev *d = to_omap_dma_dev(chan->device);
                        spin_lock(&d->lock);
                        if (list_empty(&c->node))
                                list_add_tail(&c->node, &d->pending);
                        spin_unlock(&d->lock);
                        tasklet_schedule(&d->task);
-               } else {
-                       omap_dma_start_desc(c);
                }
        }
        spin_unlock_irqrestore(&c->vc.lock, flags);
@@ -923,6 +946,8 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
        d->cicr = CICR_DROP_IE;
        if (flags & DMA_PREP_INTERRUPT)
                d->cicr |= CICR_FRAME_IE;
+       else
+               c->start_no_delay = true;
 
        d->csdp = es;
 
@@ -961,8 +986,10 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
        return vchan_tx_prep(&c->vc, &d->vd, flags);
 }
 
-static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
+static int omap_dma_slave_config(struct dma_chan *chan,
+               struct dma_slave_config *cfg)
 {
+       struct omap_chan *c = to_omap_dma_chan(chan);
        if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
            cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
                return -EINVAL;
@@ -972,8 +999,9 @@ static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *c
        return 0;
 }
 
-static int omap_dma_terminate_all(struct omap_chan *c)
+static int omap_dma_terminate_all(struct dma_chan *chan)
 {
+       struct omap_chan *c = to_omap_dma_chan(chan);
        struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
        unsigned long flags;
        LIST_HEAD(head);
@@ -998,10 +1026,10 @@ static int omap_dma_terminate_all(struct omap_chan *c)
                        omap_dma_stop(c);
        }
 
-       if (c->cyclic) {
-               c->cyclic = false;
-               c->paused = false;
-       }
+       c->cyclic = false;
+       c->paused = false;
+
+       c->start_no_delay = false;
 
        vchan_get_all_descriptors(&c->vc, &head);
        spin_unlock_irqrestore(&c->vc.lock, flags);
@@ -1010,27 +1038,71 @@ static int omap_dma_terminate_all(struct omap_chan *c)
        return 0;
 }
 
-static int omap_dma_pause(struct omap_chan *c)
+static int omap_dma_pause(struct dma_chan *chan)
 {
-       /* Pause/Resume only allowed with cyclic mode */
-       if (!c->cyclic)
-               return -EINVAL;
+       struct omap_chan *c = to_omap_dma_chan(chan);
+       struct omap_dmadev *od = to_omap_dma_dev(chan->device);
+       unsigned long flags;
+       int ret = -EINVAL;
+       bool can_pause;
+
+       spin_lock_irqsave(&od->irq_lock, flags);
 
-       if (!c->paused) {
-               omap_dma_stop(c);
-               c->paused = true;
+       if (!c->desc) {
+               ret = 0;
+               goto out;
        }
 
-       return 0;
+       if (c->cyclic)
+               can_pause = true;
+
+       /*
+        * We do not allow DMA_MEM_TO_DEV transfers to be paused.
+        * From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer:
+        * "When a channel is disabled during a transfer, the channel undergoes
+        * an abort, unless it is hardware-source-synchronized …".
+        * A source-synchronised channel is one where the fetching of data is
+        * under control of the device. In other words, a device-to-memory
+        * transfer. So, a destination-synchronised channel (which would be a
+        * memory-to-device transfer) undergoes an abort if the the CCR_ENABLE
+        * bit is cleared.
+        * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel
+        * aborts immediately after completion of current read/write
+        * transactions and then the FIFO is cleaned up." The term "cleaned up"
+        * is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE
+        * are both clear _before_ disabling the channel, otherwise data loss
+        * will occur.
+        * The problem is that if the channel is active, then device activity
+        * can result in DMA activity starting between reading those as both
+        * clear and the write to DMA_CCR to clear the enable bit hitting the
+        * hardware. If the DMA hardware can't drain the data in its FIFO to the
+        * destination, then data loss "might" occur (say if we write to an UART
+        * and the UART is not accepting any further data).
+        */
+       else if (c->desc->dir == DMA_DEV_TO_MEM)
+               can_pause = true;
+
+       if (can_pause && !c->paused) {
+               ret = omap_dma_stop(c);
+               if (!ret)
+                       c->paused = true;
+       }
+out:
+       spin_unlock_irqrestore(&od->irq_lock, flags);
+
+       return ret;
 }
 
-static int omap_dma_resume(struct omap_chan *c)
+static int omap_dma_resume(struct dma_chan *chan)
 {
-       /* Pause/Resume only allowed with cyclic mode */
-       if (!c->cyclic)
-               return -EINVAL;
+       struct omap_chan *c = to_omap_dma_chan(chan);
+       struct omap_dmadev *od = to_omap_dma_dev(chan->device);
+       unsigned long flags;
+       int ret = -EINVAL;
+
+       spin_lock_irqsave(&od->irq_lock, flags);
 
-       if (c->paused) {
+       if (c->paused && c->desc) {
                mb();
 
                /* Restore channel link register */
@@ -1038,32 +1110,34 @@ static int omap_dma_resume(struct omap_chan *c)
 
                omap_dma_start(c, c->desc);
                c->paused = false;
+               ret = 0;
        }
+       spin_unlock_irqrestore(&od->irq_lock, flags);
 
-       return 0;
+       return ret;
 }
 
 static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
        unsigned long arg)
 {
-       struct omap_chan *c = to_omap_dma_chan(chan);
        int ret;
 
        switch (cmd) {
        case DMA_SLAVE_CONFIG:
-               ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
+               ret = omap_dma_slave_config(chan,
+                               (struct dma_slave_config *)arg);
                break;
 
        case DMA_TERMINATE_ALL:
-               ret = omap_dma_terminate_all(c);
+               ret = omap_dma_terminate_all(chan);
                break;
 
        case DMA_PAUSE:
-               ret = omap_dma_pause(c);
+               ret = omap_dma_pause(chan);
                break;
 
        case DMA_RESUME:
-               ret = omap_dma_resume(c);
+               ret = omap_dma_resume(chan);
                break;
 
        default:
index c9c6c7b4c515cf88b388408d37524e008a2361fd..46d4fff436692c95632e9cbecb1aa6cf21e48a86 100644 (file)
@@ -44,9 +44,9 @@ struct uart_8250_dma {
        size_t                  rx_size;
        size_t                  tx_size;
 
-       unsigned char           tx_running:1;
-       unsigned char           tx_err:1;
-       unsigned char           rx_running:1;
+       unsigned char           tx_running;
+       unsigned char           tx_err;
+       unsigned char           rx_running;
 };
 
 struct old_serial_port {
index 61e47e4399874cba505c1476d1521f5bc14cd0ce..b5f6e3846928cafa21b23ded0e6e954773ea540e 100644 (file)
@@ -106,6 +106,7 @@ struct omap8250_priv {
        struct work_struct qos_work;
        struct uart_8250_dma omap8250_dma;
        spinlock_t rx_dma_lock;
+       bool rx_dma_broken;
 };
 
 static u32 uart_read(struct uart_8250_port *up, u32 reg)
@@ -614,6 +615,11 @@ static void omap_8250_shutdown(struct uart_port *port)
        serial_out(up, UART_OMAP_WER, 0);
        serial8250_do_shutdown(port);
 
+       if (up->dma && priv->delayed_restore) {
+               priv->delayed_restore = 0;
+               omap8250_restore_regs(up);
+       }
+
        pm_runtime_mark_last_busy(port->dev);
        pm_runtime_put_autosuspend(port->dev);
 
@@ -666,6 +672,7 @@ static void __dma_rx_do_complete(struct uart_8250_port *p, bool error)
        struct dma_tx_state     state;
        int                     count;
        unsigned long           flags;
+       int ret;
 
        dma_sync_single_for_cpu(dma->rxchan->device->dev, dma->rx_addr,
                                dma->rx_size, DMA_FROM_DEVICE);
@@ -681,7 +688,10 @@ static void __dma_rx_do_complete(struct uart_8250_port *p, bool error)
 
        count = dma->rx_size - state.residue;
 
-       tty_insert_flip_string(tty_port, dma->rx_buf, count);
+       ret = tty_insert_flip_string(tty_port, dma->rx_buf, count);
+       if (ret != count)
+               pr_err("%s(%d) %d vs %d\n", __func__, __LINE__, ret, count);
+
        p->port.icount.rx += count;
 unlock:
        spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
@@ -702,6 +712,7 @@ static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
        struct omap8250_priv    *priv = p->port.private_data;
        struct uart_8250_dma    *dma = p->dma;
        unsigned long           flags;
+       int ret;
 
        spin_lock_irqsave(&priv->rx_dma_lock, flags);
 
@@ -710,7 +721,9 @@ static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
                return;
        }
 
-       dmaengine_pause(dma->rxchan);
+       ret = dmaengine_pause(dma->rxchan);
+       if (WARN_ON_ONCE(ret))
+               priv->rx_dma_broken = true;
 
        spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
 
@@ -754,6 +767,9 @@ static int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
                break;
        }
 
+       if (priv->rx_dma_broken)
+               return -EINVAL;
+
        spin_lock_irqsave(&priv->rx_dma_lock, flags);
 
        if (dma->rx_running)
index 143deb62467d57a1f1216d3f418c2bfccb72201c..94622cd67da51bd369cff8eb2323979f4a5fc317 100644 (file)
@@ -25,7 +25,7 @@
  * Byte threshold to limit memory consumption for flip buffers.
  * The actual memory limit is > 2x this amount.
  */
-#define TTYB_DEFAULT_MEM_LIMIT 65536
+#define TTYB_DEFAULT_MEM_LIMIT (65536 * 16)
 
 /*
  * We default to dicing tty buffer allocations to this many characters
index c93efc7fa6677f7acc77c612d9e2abe06340bed1..f4d8a24aadf8b3ac3f7d343c354d3c4852312b90 100644 (file)
@@ -797,7 +797,7 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan)
        return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
 }
 
-static inline int dmaengine_pause(struct dma_chan *chan)
+static inline int __must_check dmaengine_pause(struct dma_chan *chan)
 {
        return dmaengine_device_control(chan, DMA_PAUSE, 0);
 }