]> Gitweb @ Texas Instruments - Open Source Git Repositories - git.TI.com/gitweb - keystone-linux/linux.git/commitdiff
hwqueue: keystone: qos: add quality of service driver
authorSandeep Paulraj <s-paulraj@ti.com>
Tue, 13 Nov 2012 02:25:59 +0000 (21:25 -0500)
committerMurali Karicheri <m-karicheri2@ti.com>
Wed, 3 Dec 2014 16:27:18 +0000 (11:27 -0500)
Signed-off-by: Sandeep Paulraj <s-paulraj@ti.com>
Documentation/devicetree/bindings/hwqueue/keystone-qos.txt [new file with mode: 0644]
drivers/hwqueue/Makefile
drivers/hwqueue/keystone_hwqueue.c
drivers/hwqueue/keystone_hwqueue.h
drivers/hwqueue/keystone_hwqueue_qos.c [new file with mode: 0644]
drivers/hwqueue/keystone_qos.h [new file with mode: 0644]

diff --git a/Documentation/devicetree/bindings/hwqueue/keystone-qos.txt b/Documentation/devicetree/bindings/hwqueue/keystone-qos.txt
new file mode 100644 (file)
index 0000000..61d25b4
--- /dev/null
@@ -0,0 +1,186 @@
+QoS Tree Configuration
+----------------------
+
+The QoS implementation allows for an abstracted tree of scheduler nodes
+represented in device tree form.  For example:
+
+  +-----------+
+  |  inputs   |  . . .
+  +-----------+              +-----------+        +-----------+
+        |       |      |     |  inputs   |        |  inputs   |
+        +-------+------+     +-----------+        +-----------+
+                |                  |                    |
+        +--------------+   +---------------+    +--------------+
+        |     prio 0   |   |    prio 1     |    |    prio 2    |
+        |   unordered  |   |    inputs     |    |    inputs    |
+        +--------------+   +---------------+    +--------------+
+                |                 |                    |
+                +-----------------+--------------------+
+                                  |
+                        +--------------------+
+                        |  strict prio node  |
+                        +--------------------+
+                                  |
+                       output to network transmit
+
+At each node, shaping and dropping parameters may be specified, within limits
+of the constraints outlined in this document.  The following sections detail
+the device tree attributes applicable for this implementation.
+
+QoS Node Attributes
+-------------------
+
+The following attributes are recognized within QoS configuration nodes:
+
+  - "strict-priority" and "weighted-round-robin"
+
+    e.g. strict-priority;
+
+    This attribute specifies the type of scheduling performed at a node.  It
+    is an error to specify both of these attributes in a particular node.  The
+    absence of both of these attributes defaults the node type to unordered
+    (first come first serve).
+
+  - "weight"
+
+    e.g. weight = <50>;
+
+    This attribute specifies the weight attached to the child node of a
+    weighted-round-robin node.  It is an error to specify this attribute on a
+    node whose parent is not a weighted-round-robin node.
+
+  - "priority"
+
+    e.g. priority = <1>;
+
+    This attribute specifies the priority attached to the child node of a
+    strict-priority node.  It is an error to specify this attribute on a
+    node whose parent is not a strict-priority node.  It is also an error for
+    child nodes of a strict-priority node to have the same priority specified.
+
+  - "byte-units" or "packet-units"
+
+    e.g. byte-units;
+
+    The presence of this attribute indicates that the scheduler accounts for
+    traffic in byte or packet units.  If this attribute is not specified for a
+    given node, the accounting mode is inherited from its parent node.  If
+    this attribute is not specified for the root node, the accounting mode
+    defaults to byte units.
+
+  - "output-rate"
+
+    e.g. output-rate = <100000 1000>;
+
+    The first element of this attribute specifies the output shaped rate in
+    bytes/second or packets/second (depending on the accounting mode for the
+    node).  If this attribute is absent, it defaults to infinity (i.e., no
+    shaping).
+
+    The second element of this attribute specifies the maximum accumulated
+    credits in bytes or packets (depending on the accounting mode for the
+    node).  If this attribute is absent, it defaults to infinity (i.e.,
+    accumulate as many credits as possible).
+
+  - "overhead-bytes"
+
+    e.g. overhead-bytes = <24>;
+
+    This attribute specifies a per-packet overhead (in bytes) applied in the byte
+    accounting mode.  This can be used to account for framing overhead on the wire.
+    This attribute is inherited from parent nodes if absent.  If not defined for the
+    root node, a default value of 24 will be used.  This attribute is passed
+    through by inheritence (but ignored) on packet accounted nodes.
+
+  - "output-queue"
+
+    e.g. output-queue = <645>;
+
+    This specifies the QMSS queue on which output packets are pushed.  This
+    attribute must be defined only for the root node in the qos tree.  Child
+    nodes in the tree will ignore this attribute if specified.
+
+  - "input-queues"
+
+    e.g. input-queues = <0 4 8 12 56>;
+
+    This specifies a set of ingress queues that feed into a QoS node. This attribute
+    must be defined only for leaf nodes in the QoS tree.  Specifying input queues 
+    on non-leaf nodes is treated as an error.  The absence of input queues on
+    a leaf node is also treated as an error.
+
+  - "stats-class"
+
+    e.g. stats-class = "linux-best-effort";
+
+    The stats-class attribute ties one or more input stage nodes to a set of
+    traffic statistics (forwarded/discarded bytes, etc.).  The system has a
+    limited set of statistics blocks (up to 48), and an attempt to exceed this
+    count is an error.  This attribute is legal only for leaf nodes, and a
+    stats-class attribute on an intermediate node will be treated as an error.
+
+  - "drop-policy"
+
+    The drop-policy attribute specifies a drop policy to apply to a QoS
+    node (tail drop, random early drop, no drop, etc.) when the traffic
+    pattern exceeds specifies parameters.  The drop-policy parameters are
+    configured separately within device tree (see "Traffic Police Policy
+    Attributes section below).  This attribute defaults to "no drop" for
+    applicable input stage nodes.
+
+    If a node in the QoS tree specifies a drop-policy, it is an error if
+    any of its descendent nodes (children, children of children, ...) are of
+    weighted-round-robin or strict-priority types.
+
+
+1.5 Traffic Police Policy Attributes
+------------------------------------
+
+The following attributes are recognized within traffic drop policy nodes:
+
+  - "byte-units" or "packet-units"
+
+    e.g. byte-units;
+
+    The presence of this attribute indicates that the dropr accounts for
+    traffic in byte or packet units.  If this attribute is not specified, it
+    defaults to byte units.  Policies that use random early drop must be of
+    byte unit type.
+
+  - "limit"
+
+    e.g. limit = <10000>;
+
+    Instantaneous queue depth limit (in bytes or packets) at which tail drop
+    takes effect.  This may be specified in combination with random early
+    drop, which operates on average queue depth (instead of instantaneous).
+    The absence of this attribute, or a zero value for this attribute disables
+    tail drop behavior.
+
+  - "random-early-drop"
+
+    e.g. random-early-drop = <1000 50000 2000>;
+
+    The random-early-drop attribute specifies the following three parameters
+    in order:
+
+      - low threshold: No packets are dropped when the average queue depth is
+                       below this threshold (in bytes). This parameter must
+                       be specified.
+                       
+      - high threshold:        All packets are dropped when the average queue depth
+                       above this threshold (in bytes). This parameter is
+                       optional, and defaults to twice the low threshold.
+
+      - half-life:     Specified in milli seconds. This is used to calculate
+                       the average queue depth. This parameter
+                       is optional and defaults to 2000.
+
+
+Internally Calculated
+---------------------
+ - throttle rate - might want to provide device tree override as well
+ - wrr/sp/be queue counts
+ - egress queue number (except final output)
+ - RNG seeds
+
index b6d12494198098ec2b50cdd64ecabb064c557340..3d7b76165949efc2099b442619d26735db89c884 100644 (file)
@@ -5,4 +5,5 @@
 obj-$(CONFIG_HWQUEUE)          += hwqueue_core.o
 obj-$(CONFIG_HWQUEUE_TEST)     += hwqueue_test.o
 obj-$(CONFIG_HWQUEUE_KEYSTONE) += keystone_hwqueue.o           \
-                                  keystone_hwqueue_acc.o
+                                  keystone_hwqueue_acc.o       \
+                                  keystone_hwqueue_qos.o
index cb7ed1a66376b47565d0043b439623ec70203a53..a5b3bbafacbc0b6cd3ebc758b9cbbb21c139b33e 100644 (file)
@@ -708,6 +708,14 @@ static int khwq_setup_queue_range(struct khwq_device *kdev,
                }
        }
 
+       if (of_get_property(node, "qos-cfg", NULL)) {
+               ret = khwq_init_qos_range(kdev, node, range);
+               if (ret < 0) {
+                       devm_kfree(dev, range);
+                       return ret;
+               }
+       }
+
        /* set threshold to 1, and flush out the queues */
        for_each_qmgr(kdev, qmgr) {
                start = max(qmgr->start_queue, range->queue_base);
index c8903a8eb7dad029dc8668117701b7475150510a..4721a8752354d58f4ad7faec146287a7b00371c5 100644 (file)
@@ -135,20 +135,6 @@ struct khwq_link_ram_block {
        size_t           size;
 };
 
-struct khwq_pdsp_info {
-       const char                              *name;
-       struct khwq_reg_pdsp_regs  __iomem      *regs;
-       union {
-               void __iomem                            *command;
-               struct khwq_reg_acc_command __iomem     *acc_command;
-       };
-       void __iomem                            *intd;
-       u32 __iomem                             *iram;
-       const char                              *firmware;
-       u32                                      id;
-       struct list_head                         list;
-};
-
 struct khwq_acc_info {
        u32                      pdsp_id;
        u32                      start_channel;
@@ -170,6 +156,22 @@ struct khwq_acc_channel {
        atomic_t                 retrigger_count;
 };
 
+struct khwq_pdsp_info {
+       const char                              *name;
+       struct khwq_reg_pdsp_regs  __iomem      *regs;
+       union {
+               void __iomem                            *command;
+               struct khwq_reg_acc_command __iomem     *acc_command;
+               u32 __iomem                             *qos_command;
+       };
+       void __iomem                            *intd;
+       u32 __iomem                             *iram;
+       const char                              *firmware;
+       u32                                      id;
+       struct list_head                         list;
+       struct khwq_qos_info                    *qos_info;
+};
+
 struct khwq_range_ops;
 
 struct khwq_range_info {
@@ -183,6 +185,7 @@ struct khwq_range_info {
        struct khwq_range_ops   *ops;
        struct khwq_acc_info     acc_info;
        struct khwq_acc_channel *acc;
+       struct khwq_qos_info    *qos_info;
 };
 
 struct khwq_range_ops {
@@ -267,6 +270,9 @@ struct khwq_instance {
 #define for_each_qmgr(kdev, qmgr)                              \
        list_for_each_entry(qmgr, &kdev->qmgrs, list)
 
+#define for_each_policy(info, policy)                          \
+       list_for_each_entry(policy, &info->drop_policies, list)
+
 static inline struct khwq_pdsp_info *
 khwq_find_pdsp(struct khwq_device *kdev, unsigned pdsp_id)
 {
@@ -297,4 +303,7 @@ khwq_find_qmgr(struct hwqueue_instance *inst)
 int khwq_init_acc_range(struct khwq_device *kdev, struct device_node *node,
                        struct khwq_range_info *range);
 
+int khwq_init_qos_range(struct khwq_device *kdev, struct device_node *node,
+                       struct khwq_range_info *range);
+
 #endif /* __KEYSTONE_HWQUEUE_H__ */
diff --git a/drivers/hwqueue/keystone_hwqueue_qos.c b/drivers/hwqueue/keystone_hwqueue_qos.c
new file mode 100644 (file)
index 0000000..906bbad
--- /dev/null
@@ -0,0 +1,2774 @@
+/*
+ * Keystone hardware queue driver
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com
+ * Contact: Prabhu Kuttiyam <pkuttiyam@ti.com>
+ *         Cyril Chemparathy <cyril@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/hwqueue.h>
+#include <linux/ktree.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+
+#include "hwqueue_internal.h"
+#include "keystone_hwqueue.h"
+#include "keystone_qos.h"
+
+#define        KHWQ_QOS_TIMER_INTERVAL (HZ * 10)
+#define BITMASK(s, n)          (BITS(n) << (s))
+
+static int khwq_qos_write_cmd(struct khwq_qos_info *info, u32 cmd)
+{
+       struct khwq_pdsp_info *pdsp;
+       struct khwq_device *kdev;
+       unsigned long timeo;
+       u32 result;
+
+       pdsp = info->pdsp;
+       kdev = info->kdev;
+
+       dev_dbg(kdev->dev, "=== command <-- %08x\n", cmd);
+       __raw_writel(cmd, &pdsp->qos_command[0]);
+
+       timeo = jiffies + msecs_to_jiffies(QOS_COMMAND_TIMEOUT);
+       do {
+               result = __raw_readl(&pdsp->qos_command[0]);
+               udelay(QOS_COMMAND_DELAY);
+       } while ((result & 0xff) && time_before(jiffies, timeo));
+
+       if (result & 0xff) {
+               dev_err(kdev->dev, "=== result --> lockup [%x]!\n",
+                       result);
+               return -EBUSY;
+       }
+
+       result = __raw_readl(&pdsp->qos_command[1]);
+
+       if (result != QOS_RETCODE_SUCCESS) {
+               dev_err(kdev->dev, "=== result --> %08x ! failed\n", result);
+               return -EIO;
+       } else {
+               dev_dbg(kdev->dev, "=== result --> %08x\n", result);
+               return 0;
+       }
+}
+
+static void khwq_qos_dump_words(struct khwq_device *kdev, u32 ofs, u32 *data,
+                               int words, const char *dir)
+{
+       switch (words) {
+       case 4: dev_dbg(kdev->dev, "==== [%04x] %s %08x %08x %08x %08x\n",
+                       ofs, dir, data[0], data[1], data[2], data[3]);
+               break;
+       case 3: dev_dbg(kdev->dev, "==== [%04x] %s %08x %08x %08x\n",
+                       ofs, dir, data[0], data[1], data[2]);
+               break;
+       case 2: dev_dbg(kdev->dev, "==== [%04x] %s %08x %08x\n",
+                       ofs, dir, data[0], data[1]);
+               break;
+       case 1: dev_dbg(kdev->dev, "==== [%04x] %s %08x\n",
+                       ofs, dir, data[0]);
+       case 0:
+               break;
+       default:
+               BUG();
+               break;
+       }
+}
+
+static void khwq_qos_write_words(struct khwq_device *kdev, u32 ofs,
+                                u32 __iomem *out, u32 *in, int words)
+{
+       int i;
+       khwq_qos_dump_words(kdev, ofs, in, words, "<--");
+       for (i = 0; i < words; i++)
+               __raw_writel(in[i], &out[i]);
+}
+
+static void khwq_qos_read_words(struct khwq_device *kdev, u32 ofs, u32 *out,
+                               u32 __iomem *in, int words)
+{
+       int i;
+       for (i = 0; i < words; i++)
+               out[i] = __raw_readl(&in[i]);
+       khwq_qos_dump_words(kdev, ofs, out, words, "-->");
+}
+
+static int khwq_qos_write_shadow(struct khwq_qos_info *info, u32 options,
+                                u32 ofs, u32 *data, u32 words)
+{
+       u32 __iomem *out = info->pdsp->command + QOS_SHADOW_OFFSET + ofs;
+       struct khwq_device *kdev = info->kdev;
+       int words_left = words;
+       u32 command;
+       int error;
+
+       assert_spin_locked(&info->lock);
+
+       while (data && words_left) {
+               words = min(words_left, 4);
+               khwq_qos_write_words(kdev, ofs, out, data, words);
+               ofs        += words * sizeof(u32);
+               out        += words;
+               data       += words;
+               words_left -= words;
+       }
+
+       command = QOS_CMD_PORT_SHADOW | QOS_COPY_SHADOW_TO_ACTIVE | options;
+       error = khwq_qos_write_cmd(info, command);
+
+       return error;
+}
+
+static int khwq_qos_read_shadow(struct khwq_qos_info *info, u32 options,
+                               u32 ofs, u32 *data, u32 words)
+{
+       u32 __iomem *in = info->pdsp->command + QOS_SHADOW_OFFSET + ofs;
+       struct khwq_device *kdev = info->kdev;
+       int words_left = words;
+       u32 command;
+       int error;
+
+       assert_spin_locked(&info->lock);
+
+       command = QOS_CMD_PORT_SHADOW | QOS_COPY_ACTIVE_TO_SHADOW | options;
+       error = khwq_qos_write_cmd(info, command);
+       if (error)
+               return error;
+
+       while (data && words_left) {
+               words = min(words_left, 4);
+               khwq_qos_read_words(kdev, ofs, data, in, words);
+               ofs        += words * sizeof(u32);
+               in         += words;
+               data       += words;
+               words_left -= words;
+       }
+
+       return 0;
+}
+
+static int khwq_qos_program_drop_sched(struct khwq_qos_info *info)
+{
+       u32 config[4];
+
+       config[0] = (info->drop_cfg.qos_ticks << 8 |
+                    info->drop_cfg.drop_ticks);
+       config[1] = info->drop_cfg.seed[0];
+       config[2] = info->drop_cfg.seed[1];
+       config[3] = info->drop_cfg.seed[2];
+
+       return khwq_qos_write_shadow(info, QOS_DROP_SCHED_CFG << 24, 0,
+                                    config, 4);
+}
+
+static int khwq_qos_request_stats(struct khwq_qos_info *info, int index)
+{
+       struct khwq_qos_stats *stats = &info->stats;
+       struct khwq_device *kdev = info->kdev;
+       u32 __iomem *ofs;
+       u64 *to;
+       u32 command;
+       int error;
+       
+       spin_lock_bh(&info->lock);
+
+       ofs = info->pdsp->command + QOS_STATS_OFFSET;
+
+       command = (QOS_CMD_STATS_REQUEST | (0x8f << 8) |
+                       (index << 16));
+       error = khwq_qos_write_cmd(info, command);
+       if (error) {
+               dev_err(kdev->dev, "failed to request stats for block %d\n",
+                       index);
+               goto out;
+       }
+
+       to = (stats->data + index * 0x20);
+
+       to[0] += __raw_readl(&ofs[0]);
+       to[1] += __raw_readl(&ofs[2]);
+       to[2] += __raw_readl(&ofs[4]);
+       to[3] += __raw_readl(&ofs[5]);
+
+       dev_dbg(kdev->dev, "%d %llx %llx %llx %llx\n",
+                               index, to[0], to[1], to[2], to[3]);
+
+out:
+       spin_unlock_bh(&info->lock);
+       return error;
+}
+
+static int khwq_qos_update_stats(struct khwq_qos_info *info)
+{
+       struct khwq_qos_stats *stats = &info->stats;
+       int i, error;
+       
+       for (i = (stats->count - 1); i >= stats->start; i--) {
+               if (!test_bit(i, stats->avail)) {
+                       error = khwq_qos_request_stats(info, i);
+                       if (error)
+                               return error;
+               }
+       }
+
+       return 0;
+}
+
+static void khwq_qos_timer(unsigned long arg)
+{
+       struct khwq_qos_info *info = (struct khwq_qos_info *)arg;
+       struct khwq_device *kdev = info->kdev;
+       int error;
+
+       error = khwq_qos_update_stats(info);
+       if (error) {
+               dev_err(kdev->dev, "error updating stats\n");
+               return;
+       }
+
+       info->timer.expires = jiffies + KHWQ_QOS_TIMER_INTERVAL;
+       add_timer(&info->timer);
+
+       return;
+}
+
+struct khwq_qos_attr {
+       struct attribute        attr;
+       int                     offset;
+};
+
+#define __KHWQ_QOS_STATS_ATTR(_name, _offset)                          \
+(struct khwq_qos_attr) {                                       \
+       .attr = { .name = __stringify(_name), .mode = S_IRUGO },        \
+       .offset = _offset,                                              \
+}
+
+#define KHWQ_QOS_STATS_ATTR(_name, _offset)            \
+       &__KHWQ_QOS_STATS_ATTR(_name, _offset).attr
+
+static struct attribute *khwq_qos_stats_attrs[] = {
+       KHWQ_QOS_STATS_ATTR(bytes_forwarded,    0x00),
+       KHWQ_QOS_STATS_ATTR(bytes_discarded,    0x08),
+       KHWQ_QOS_STATS_ATTR(packets_forwarded,  0x10),
+       KHWQ_QOS_STATS_ATTR(packets_discarded,  0x18),
+       NULL
+};
+
+static ssize_t khwq_qos_stats_attr_show(struct kobject *kobj,
+                                       struct attribute *_attr, char *buf)
+{
+       struct khwq_qos_attr *attr;
+       struct khwq_qos_stats_class *class;
+       struct khwq_qos_info *info;
+       struct khwq_qos_stats *stats;
+       int offset, index, error;
+       u64 *val;
+
+       class = container_of(kobj, struct khwq_qos_stats_class, kobj);
+       attr = container_of(_attr, struct khwq_qos_attr, attr);
+       info = class->info;
+       stats = &info->stats;
+       index = class->stats_block_idx;
+       offset = attr->offset;
+
+       error = khwq_qos_request_stats(info, index);
+       if (error)
+               return error;
+
+       val = stats->data + (index * 0x20) + offset;
+
+       return snprintf(buf, PAGE_SIZE, "%lld\n", *val);
+}
+
+static struct kobj_type khwq_qos_stats_ktype = {
+       .sysfs_ops = &(struct sysfs_ops) { .show = khwq_qos_stats_attr_show },
+       .default_attrs = khwq_qos_stats_attrs,
+};
+
+static void khwq_qos_free_drop_policy(struct khwq_device *kdev,
+                                     struct khwq_qos_drop_policy *policy)
+{
+       list_del(&policy->list);
+       kobject_del(&policy->kobj);
+       kobject_put(&policy->kobj);
+       devm_kfree(kdev->dev, policy);
+}
+
+static void khwq_qos_free_drop_policies(struct khwq_device *kdev,
+                                       struct khwq_qos_info *info)
+{
+       struct khwq_qos_drop_policy *policy;
+
+       while (!list_empty(&info->drop_policies)) {
+               policy = list_first_entry(&info->drop_policies,
+                                         struct khwq_qos_drop_policy,
+                                         list);
+               khwq_qos_free_drop_policy(kdev, policy);
+       }
+}
+
+static int khwq_program_drop_policy(struct khwq_qos_info *info,
+                                   struct khwq_qos_drop_policy *policy,
+                                   bool sync)
+{
+       struct khwq_device *kdev = info->kdev;
+       int error;
+       u32 val, time_constant, diff;
+       u32 thresh_recip;
+
+       val = (policy->acct == QOS_BYTE_ACCT) ? BIT(0) : 0;
+
+       error = khwq_qos_set_drop_cfg_unit_flags(kdev, policy->drop_cfg_idx,
+                                                val, false);
+       if (error)
+               return error;
+
+       error = khwq_qos_set_drop_cfg_mode(kdev, policy->drop_cfg_idx,
+                                          policy->mode, false);
+       if (error)
+               return error;
+
+       error = khwq_qos_set_drop_cfg_tail_thresh(kdev, policy->drop_cfg_idx,
+                                                 policy->limit, false);
+       if (error)
+               return error;
+
+       if (policy->mode == QOS_TAILDROP)
+               return 0;
+
+       error = khwq_qos_set_drop_cfg_red_low(kdev, policy->drop_cfg_idx,
+                                             policy->red_low, false);
+       if (error)
+               return error;
+
+       error = khwq_qos_set_drop_cfg_red_high(kdev, policy->drop_cfg_idx,
+                                              policy->red_high, false);
+
+       val = policy->half_life * (info->ticks_per_sec / 1000);
+       time_constant = ilog2((3 * val) + 1);
+
+       error = khwq_qos_set_drop_cfg_time_const(kdev, policy->drop_cfg_idx,
+                                                  time_constant, false);
+
+       diff = ((policy->red_high - policy->red_low) >> time_constant);
+
+       thresh_recip = 1 << 31;
+       thresh_recip /= (diff >> 1);
+
+       error = khwq_qos_set_drop_cfg_thresh_recip(kdev, policy->drop_cfg_idx,
+                                                  thresh_recip, false);
+
+       return error;
+}
+
+#define __KHWQ_QOS_DROP_POLICY_ATTR(_name)                     \
+(struct khwq_qos_attr) {                                       \
+       .attr = { .name = __stringify(_name), .mode = S_IRUGO|S_IWUSR },\
+       .offset = offsetof(struct khwq_qos_drop_policy, _name), \
+}
+
+#define KHWQ_QOS_DROP_POLICY_ATTR(_name)               \
+       &__KHWQ_QOS_DROP_POLICY_ATTR(_name).attr
+
+static struct attribute *khwq_qos_drop_policy_attrs[] = {
+       KHWQ_QOS_DROP_POLICY_ATTR(limit),
+       KHWQ_QOS_DROP_POLICY_ATTR(red_low),
+       KHWQ_QOS_DROP_POLICY_ATTR(red_high),
+       KHWQ_QOS_DROP_POLICY_ATTR(half_life),
+       KHWQ_QOS_DROP_POLICY_ATTR(max_drop_prob),
+       NULL
+};
+
+static struct attribute *khwq_qos_drop_policy_taildrop_attrs[] = {
+       KHWQ_QOS_DROP_POLICY_ATTR(limit),
+       NULL
+};
+
+static ssize_t khwq_qos_drop_policy_attr_show(struct kobject *kobj,
+                                             struct attribute *_attr,
+                                             char *buf)
+{
+       struct khwq_qos_drop_policy *policy;
+       struct khwq_qos_attr *attr;
+       int offset;
+       u32 *val;
+
+       policy = container_of(kobj, struct khwq_qos_drop_policy, kobj);
+       attr = container_of(_attr, struct khwq_qos_attr, attr);
+       offset = attr->offset;
+
+       val = (((void *)policy) + offset);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", *val);
+}
+
+static ssize_t khwq_qos_drop_policy_attr_store(struct kobject *kobj,
+                                              struct attribute *_attr,
+                                              const char *buf, size_t size)
+{
+       struct khwq_qos_drop_policy *policy;
+       struct khwq_qos_info *info;
+       struct khwq_device *kdev;
+       struct khwq_qos_attr *attr;
+       int offset, error, field;
+       u32 *val;
+
+       policy = container_of(kobj, struct khwq_qos_drop_policy, kobj);
+       attr = container_of(_attr, struct khwq_qos_attr, attr);
+       offset = attr->offset;
+       info = policy->info;
+       kdev = info->kdev;
+
+       error = kstrtouint(buf, 0, &field);
+       if (error)
+               return error;
+
+       val = (((void *)policy) + offset);
+       *val = field;
+
+       error = khwq_program_drop_policy(info, policy, false);
+       if (error) {
+               khwq_qos_free_drop_policy(kdev, policy);
+               policy->drop_cfg_idx = -1;
+               return error;
+       }
+
+       error = khwq_qos_sync_drop_cfg(kdev, -1);
+       if (error)
+               dev_err(kdev->dev, "failed to sync drop configs\n");
+
+       return size;
+}
+
+static struct kobj_type khwq_qos_policy_taildrop_ktype = {
+       .sysfs_ops = &(struct sysfs_ops) {
+               .show   = khwq_qos_drop_policy_attr_show,
+               .store  = khwq_qos_drop_policy_attr_store,
+       },
+       .default_attrs = khwq_qos_drop_policy_taildrop_attrs,
+};
+
+static struct kobj_type khwq_qos_policy_ktype = {
+       .sysfs_ops = &(struct sysfs_ops) {
+               .show   = khwq_qos_drop_policy_attr_show,
+               .store  = khwq_qos_drop_policy_attr_store,
+       },
+       .default_attrs = khwq_qos_drop_policy_attrs,
+};
+
+static int khwq_program_drop_policies(struct khwq_qos_info *info)
+{
+       struct khwq_device *kdev = info->kdev;
+       struct khwq_qos_drop_policy *policy;
+       int error;
+
+       for_each_policy(info, policy) {
+               if (!policy->usecount && policy != info->default_drop_policy)
+                       continue;
+
+               policy->drop_cfg_idx = khwq_qos_alloc_drop_cfg(kdev);
+               if (policy->drop_cfg_idx < 0) {
+                       dev_err(kdev->dev, "too many drop policies\n");
+                       error = -EOVERFLOW;
+                       break;
+               }
+
+               error = khwq_program_drop_policy(info, policy, false);
+               if (error) {
+                       khwq_qos_free_drop_policy(kdev, policy);
+                       policy->drop_cfg_idx = -1;
+                       break;
+               }
+
+               dev_dbg(kdev->dev, "added policy %s\n", policy->name);
+       }
+
+       if (!error) {
+               error = khwq_qos_sync_drop_cfg(kdev, -1);
+               if (error)
+                       dev_err(kdev->dev, "failed to sync drop configs\n");
+       }
+
+       return error;
+}
+
+static struct khwq_qos_stats_class *
+khwq_qos_init_stats_class(struct khwq_qos_info *info, const char *name)
+{
+       struct khwq_device *kdev = info->kdev;
+       struct khwq_qos_stats_class *class;
+       struct khwq_qos_stats *stats = &info->stats;
+       int idx, error;
+
+       class = devm_kzalloc(kdev->dev, sizeof(*class), GFP_KERNEL);
+       if (!class)
+               return NULL;
+       class->name = name;
+       class->info = info;
+
+       spin_lock_bh(&info->lock);
+
+       idx = find_last_bit(stats->avail, stats->count);
+       if (idx < stats->count)
+               clear_bit(idx, stats->avail);
+       else {
+               spin_unlock_bh(&info->lock);
+               return NULL;
+       }
+
+       spin_unlock_bh(&info->lock);
+
+       class->stats_block_idx = idx;
+
+       list_add_tail(&class->list, &info->stats_classes);
+
+       error = kobject_init_and_add(&class->kobj, &khwq_qos_stats_ktype,
+                               info->kobj_stats, class->name);
+       if (error) {
+               dev_err(kdev->dev, "failed to create sysfs file\n");
+               return NULL;
+       }
+
+       return class;
+}
+
+static void khwq_qos_free_stats_class(struct khwq_qos_info *info,
+                                     struct khwq_qos_stats_class *class)
+{
+       struct khwq_device *kdev = info->kdev;
+       struct khwq_qos_stats *stats = &info->stats;
+       
+       spin_lock_bh(&info->lock);
+
+       list_del(&class->list);
+       set_bit(class->stats_block_idx, stats->avail);
+
+       spin_unlock_bh(&info->lock);
+
+       kobject_del(&class->kobj);
+       kobject_put(&class->kobj);
+       devm_kfree(kdev->dev, class);
+}
+
+static void khwq_qos_free_stats_classes(struct khwq_device *kdev,
+                                       struct khwq_qos_info *info)
+{
+       struct khwq_qos_stats_class *class;
+
+       while (!list_empty(&info->stats_classes)) {
+               class = list_first_entry(&info->stats_classes,
+                                        struct khwq_qos_stats_class,
+                                        list);
+               khwq_qos_free_stats_class(info, class);
+       }
+}
+
+static struct khwq_qos_stats_class *
+khwq_qos_find_stats_class(struct khwq_qos_info *info, const char *name)
+{
+       struct khwq_qos_stats_class *class;
+       list_for_each_entry(class, &info->stats_classes, list)
+               if (!strcmp(class->name, name))
+                       return class;
+       return NULL;
+}
+
+static struct khwq_qos_drop_policy *
+khwq_qos_find_drop_policy(struct khwq_qos_info *info, const char *name)
+{
+       struct khwq_qos_drop_policy *policy;
+       list_for_each_entry(policy, &info->drop_policies, list)
+               if (!strcmp(policy->name, name))
+                       return policy;
+       return NULL;
+}
+
+static struct khwq_qos_drop_policy *
+khwq_qos_get_drop_policy(struct khwq_device *kdev, struct khwq_qos_info *info,
+                        struct device_node *node)
+{
+       struct khwq_qos_drop_policy *policy;
+       u32 length, elements, temp[4];
+       const char *name;
+       int error;
+
+       policy = devm_kzalloc(kdev->dev, sizeof(*policy), GFP_KERNEL);
+       if (!policy)
+               return ERR_PTR(-ENOMEM);
+
+       policy->info = info;
+
+       error = of_property_read_string(node, "label", &name);
+       policy->name = (error < 0) ? node->name : name;
+       if (khwq_qos_find_drop_policy(info, policy->name)) {
+               dev_err(kdev->dev, "duplicate drop policy %s\n", policy->name);
+               devm_kfree(kdev->dev, policy);
+               return ERR_PTR(-EINVAL);
+       }
+
+       policy->mode = QOS_TAILDROP;
+
+       policy->acct = QOS_BYTE_ACCT;
+       if (of_find_property(node, "packet-units", NULL))
+               policy->acct = QOS_PACKET_ACCT;
+
+       error = of_property_read_u32(node, "limit", &policy->limit);
+       if (error < 0)
+               policy->limit = 0;
+
+       if (!of_find_property(node, "random-early-drop", &length))
+               goto done;
+
+       policy->mode = QOS_RED;
+
+       if (policy->acct == QOS_PACKET_ACCT) {
+               dev_err(kdev->dev, "red policy must account bytes\n");
+               devm_kfree(kdev->dev, policy);
+               return ERR_PTR(-EINVAL);
+       }
+
+       elements = length / sizeof(u32);
+       if (elements < 1 || elements > 4) {
+               dev_err(kdev->dev, "invalid number of elements in red info\n");
+               devm_kfree(kdev->dev, policy);
+               return ERR_PTR(-EINVAL);
+       }
+
+       error = of_property_read_u32_array(node, "random-early-drop", temp,
+                                          elements);
+       if (error < 0) {
+               dev_err(kdev->dev, "could not obtain red info\n");
+               devm_kfree(kdev->dev, policy);
+               return ERR_PTR(-EINVAL);
+       }
+
+       policy->red_low = temp[0];
+
+       policy->red_high = 2 * policy->red_low;
+       if (elements > 1)
+               policy->red_high = temp[1];
+
+       policy->max_drop_prob = 20;
+       if (elements > 2)
+               policy->max_drop_prob = temp[2];
+       if (policy->max_drop_prob >= 100) {
+               dev_warn(kdev->dev, "invalid max drop prob %d on policy %s, taking defaults\n",
+                        policy->max_drop_prob, policy->name);
+               policy->max_drop_prob = 20;
+       }
+
+       policy->half_life = 2000;
+       if (elements > 3)
+               policy->half_life = temp[2];
+
+done:
+       if (of_find_property(node, "default", NULL)) {
+               if (info->default_drop_policy) {
+                       dev_warn(kdev->dev, "duplicate default policy %s\n",
+                                policy->name);
+               } else
+                       info->default_drop_policy = policy;
+       }
+       
+       if (policy->mode == QOS_RED)
+               error = kobject_init_and_add(&policy->kobj,
+                                    &khwq_qos_policy_ktype,
+                                    info->kobj_policies, policy->name);
+       else
+               error = kobject_init_and_add(&policy->kobj,
+                                    &khwq_qos_policy_taildrop_ktype,
+                                    info->kobj_policies, policy->name);
+
+       if (error) {
+               dev_err(kdev->dev, "failed to create sysfs "
+                               "entries for policy %s\n", policy->name);
+               devm_kfree(kdev->dev, policy);
+               return ERR_PTR(-EINVAL);;
+       }
+
+       return policy;
+}
+
+static int khwq_qos_get_drop_policies(struct khwq_device *kdev,
+                                     struct khwq_qos_info *info,
+                                     struct device_node *node)
+{
+       struct khwq_qos_drop_policy *policy;
+       struct device_node *child;
+       int error = 0;
+
+       for_each_child_of_node(node, child) {
+               policy = khwq_qos_get_drop_policy(kdev, info, child);
+               if (IS_ERR_OR_NULL(policy)) {
+                       error = PTR_ERR(policy);
+                       break;
+               }
+               list_add_tail(&policy->list, &info->drop_policies);
+       }
+
+       if (!error && !info->default_drop_policy) {
+               dev_err(kdev->dev, "must specify a default drop policy!\n");
+               error = -ENOENT;
+       }
+
+       if (!error)
+               return 0;
+
+       khwq_qos_free_drop_policies(kdev, info);
+
+       return error;
+}
+
+static struct khwq_qos_shadow *
+khwq_find_shadow(struct khwq_device *kdev, enum khwq_qos_shadow_type type,
+                int idx, int offset, bool internal)
+{
+       struct khwq_qos_shadow *shadow;
+       struct khwq_pdsp_info *pdsp;
+       struct khwq_qos_info *info;
+
+       pdsp = khwq_find_pdsp(kdev, khwq_qos_id_to_pdsp(idx));
+       if (!pdsp || !pdsp->qos_info) {
+               dev_err(kdev->dev, "cannot shadow, type %d, pdsp %p, info %p\n",
+                       type, pdsp, pdsp ? pdsp->qos_info : NULL);
+               return NULL;
+       }
+
+       info = pdsp->qos_info;
+       shadow = &info->shadows[type];
+
+       idx = khwq_qos_id_to_idx(idx);
+       if (idx >= shadow->count || offset >= shadow->size) {
+               dev_err(kdev->dev, "bad shadow access, idx %d, count %d, "
+                       "offset %d, size %d\n",
+                       idx, shadow->count, offset, shadow->size);
+               return NULL;
+       }
+
+       if (!internal && test_bit(idx, shadow->avail)) {
+               dev_err(kdev->dev, "idx %d not in use\n", idx);
+               return NULL;
+       }
+
+       return shadow;
+}
+
+int khwq_qos_get(struct khwq_device *kdev, enum khwq_qos_shadow_type type,
+                  const char *name, int idx, int offset, int startbit,
+                  int nbits, u32 *value)
+{
+       struct khwq_qos_shadow *shadow;
+       u32 *element;
+
+       shadow = khwq_find_shadow(kdev, type, idx, offset, false);
+       if (WARN_ON(!shadow))
+               return -EINVAL;
+
+       idx = khwq_qos_id_to_idx(idx);
+       offset += idx * shadow->size;
+       element = shadow->data + offset;
+       *value = (*element >> startbit) & BITS(nbits);
+       dev_dbg(kdev->dev, "=== %s(%d) --> %x\n", name, idx, *value);
+       return 0;
+}
+
+int khwq_qos_set(struct khwq_device *kdev, enum khwq_qos_shadow_type type,
+                  const char *name, int idx, int offset, int startbit,
+                  int nbits, bool sync, u32 value, bool internal)
+{
+       struct khwq_qos_shadow *shadow;
+       u32 *element;
+       u32 outval;
+
+       shadow = khwq_find_shadow(kdev, type, idx, offset, internal);
+       if (WARN_ON(!shadow))
+               return -EINVAL;
+
+       idx = khwq_qos_id_to_idx(idx);
+       offset += idx * shadow->size;
+       element = shadow->data + offset;
+       outval  = *element & ~BITMASK(startbit, nbits);
+       WARN_ON(value & ~BITS(nbits));
+       outval |= (value & BITS(nbits)) << startbit;
+       dev_dbg(kdev->dev, "=== %s(%d) <-- %x [%08x --> %08x]\n", name, idx,
+               value, *element, outval);
+       *element = outval;
+
+       set_bit(idx, shadow->dirty);
+       if (sync)
+               return shadow->sync ? shadow->sync(shadow, idx) : - ENOSYS;
+       return 0;
+}
+
+int khwq_qos_control(struct khwq_device *kdev, enum khwq_qos_shadow_type type,
+                      enum khwq_qos_control_type ctrl, int idx, u32 arg,
+                      bool internal)
+{
+       struct khwq_qos_shadow *shadow;
+
+       shadow = khwq_find_shadow(kdev, type, idx, 0, internal);
+       if (WARN_ON(!shadow))
+               return -EINVAL;
+
+       idx = khwq_qos_id_to_idx(idx);
+       if (!shadow->control)
+               return -ENOSYS;
+       return shadow->control(shadow, ctrl, idx, arg);
+}
+
+int khwq_qos_sync(struct khwq_device *kdev, enum khwq_qos_shadow_type type,
+                   int idx, bool internal)
+{
+       struct khwq_pdsp_info *pdsp;
+       struct khwq_qos_shadow *shadow;
+       struct khwq_qos_info *info;
+       int error = 0;
+
+       if (type < 0 || type >= QOS_MAX_SHADOW) {
+               dev_err(kdev->dev, "bad shadow type %d\n", type);
+               return -EINVAL;
+       }
+
+       if (idx >= 0) {
+               shadow = khwq_find_shadow(kdev, type, idx, 0, internal);
+               if (WARN_ON(!shadow))
+                       return -EINVAL;
+               idx = khwq_qos_id_to_idx(idx);
+               return shadow->sync ? shadow->sync(shadow, idx) : - ENOSYS;
+       }
+
+       /* sync all */
+       for_each_pdsp(kdev, pdsp) {
+               info = pdsp->qos_info;
+               if (!info)
+                       continue;
+               shadow = &info->shadows[type];
+
+               error = shadow->sync ? shadow->sync(shadow, idx) : 0;
+               if (error)
+                       break;
+       }
+
+       return error;
+}
+
+int khwq_qos_alloc(struct khwq_device *kdev, enum khwq_qos_shadow_type type)
+{
+       struct khwq_pdsp_info *pdsp;
+       struct khwq_qos_info *info;
+       struct khwq_qos_shadow *shadow;
+       int idx;
+
+       for_each_pdsp(kdev, pdsp) {
+               info = pdsp->qos_info;
+               if (!info)
+                       continue;
+
+               shadow = &info->shadows[type];
+               spin_lock_bh(&info->lock);
+
+               idx = find_last_bit(shadow->avail, shadow->count);
+               if (idx < shadow->count) {
+                       clear_bit(idx, shadow->avail);
+                       spin_unlock_bh(&info->lock);
+                       return khwq_qos_make_id(pdsp->id, idx);
+               }
+
+               spin_unlock_bh(&info->lock);
+       }
+
+       return -ENOSPC;
+}
+
+int khwq_qos_free(struct khwq_device *kdev, enum khwq_qos_shadow_type type,
+                   int idx)
+{
+       struct khwq_qos_shadow *shadow;
+
+       shadow = khwq_find_shadow(kdev, type, idx, 0, false);
+       if (WARN_ON(!shadow))
+               return -EINVAL;
+
+       idx = khwq_qos_id_to_idx(idx);
+       if (WARN_ON(test_bit(idx, shadow->dirty)        ||
+                   test_bit(idx, shadow->running)      ||
+                   test_bit(idx, shadow->avail)))
+           return -EBUSY;
+
+       set_bit(idx, shadow->avail);
+
+       return 0;
+}
+
+static int khwq_qos_alloc_drop_queue(struct khwq_device *kdev, int _idx)
+{
+       struct khwq_pdsp_info *pdsp;
+       struct khwq_qos_info *info;
+       struct khwq_qos_shadow *shadow;
+       int idx, base, count;
+
+       for_each_pdsp(kdev, pdsp) {
+               info = pdsp->qos_info;
+               if (!info)
+                       continue;
+
+               shadow = &info->shadows[QOS_DROP_QUEUE_CFG];
+               base   = info->drop_sched_queue_base;
+               count  = shadow->count;
+
+               if (base > _idx || base + count < _idx)
+                       continue;
+
+               idx = _idx - base;
+
+               if (test_and_clear_bit(idx, shadow->avail))
+                       return khwq_qos_make_id(pdsp->id, idx);
+               else
+                       return -EBUSY;
+       }
+       return -ENODEV;
+}
+
+static int khwq_qos_free_drop_queue(struct khwq_device *kdev, int idx)
+{
+       return khwq_qos_free(kdev, QOS_DROP_QUEUE_CFG, idx);
+}
+
+static int khwq_qos_sched_port_enable(struct khwq_qos_shadow *shadow, int idx,
+                                      bool enable)
+{
+       struct khwq_qos_info *info = shadow->info;
+       struct khwq_device *kdev = info->kdev;
+       int error = 0, start = idx, end = idx + 1;
+       u32 command;
+
+       if (WARN_ON(idx >= shadow->count))
+               return -EINVAL;
+
+       if (idx < 0) {
+               start = 0;
+               end = shadow->count;
+       }
+
+       /* fail if our state is dirty */
+       for (idx = start; idx < end; idx ++) {
+               if (WARN_ON(test_bit(idx, shadow->dirty)))
+                       return -EBUSY;
+       }
+
+       for (idx = start; idx < end; idx ++) {
+               if (enable && test_bit(idx, shadow->running)) {
+                       dev_warn(kdev->dev, "forced enable on running %s %d\n",
+                                shadow->name, idx);
+               }
+               if (!enable && !test_bit(idx, shadow->running)) {
+                       dev_warn(kdev->dev, "forced disable on halted %s %d\n",
+                                shadow->name, idx);
+               }
+               command = (QOS_CMD_ENABLE_PORT                  |
+                          (enable ? QOS_ENABLE : QOS_DISABLE)  |
+                          (shadow->start + idx) << 16);
+               error = khwq_qos_write_cmd(info, command);
+               if (error) {
+                       dev_err(kdev->dev, "failed to %s %s index %d\n",
+                               enable ? "enable" : "disable", shadow->name, idx);
+                       break;
+               }
+               if (enable)
+                       set_bit(idx, shadow->running);
+               else
+                       clear_bit(idx, shadow->running);
+       }
+
+       return 0;
+}
+
+static int khwq_qos_sched_port_get_input(struct khwq_qos_shadow *shadow,
+                                        int idx, u32 queue)
+{
+       struct khwq_qos_info *info = shadow->info;
+       struct khwq_device *kdev = info->kdev;
+
+       if (WARN_ON(idx < 0 || idx >= shadow->count ||
+                   test_bit(idx, shadow->avail)))
+               return -EINVAL;
+       if (queue >= shadow->info->inputs_per_port) {
+               dev_err(kdev->dev, "requested queue %d out of range\n",
+                       queue);
+               return -EINVAL;
+       }
+
+       return (shadow->start + idx) * info->inputs_per_port +
+               queue + info->sched_port_queue_base;
+}
+
+static int __khwq_qos_control_sched_port(struct khwq_qos_shadow *shadow,
+                                        enum khwq_qos_control_type ctrl,
+                                        int idx, u32 arg)
+{
+       struct khwq_qos_info *info = shadow->info;
+       int error = 0;
+
+       spin_lock_bh(&info->lock);
+
+       switch (ctrl) {
+
+       case QOS_CONTROL_ENABLE:
+               error = khwq_qos_sched_port_enable(shadow, idx, !!arg);
+               break;
+
+       case QOS_CONTROL_GET_INPUT:
+               error = khwq_qos_sched_port_get_input(shadow, idx, arg);
+               break;
+
+       default:
+               error = -ENOSYS;
+               break;
+       }
+
+       spin_unlock_bh(&info->lock);
+
+       return error;
+}
+
+static int khwq_qos_sync_shadow_unified(struct khwq_qos_shadow *shadow, int idx)
+{
+       struct khwq_qos_info *info = shadow->info;
+       int count, error, offset;
+       u32 *from;
+
+       if (WARN_ON(idx >= shadow->count))
+               return -EINVAL;
+
+       spin_lock_bh(&info->lock);
+
+       /* now fill in our data */
+       if (idx < 0) {
+               offset  = shadow->start * shadow->size;
+               from    = shadow->data;
+               count   = (shadow->size * shadow->count) / sizeof(u32);
+       } else {
+               offset  = (shadow->start + idx) * shadow->size;
+               from    = shadow->data + idx * shadow->size;
+               count   = shadow->size / sizeof(u32);
+       }
+
+       /* first get the pdsp to copy active config to shadow area */
+       error = khwq_qos_read_shadow(info, shadow->type << 24, 0, NULL, 0);
+       if (error)
+               goto bail;
+
+       /* now fill in our data and write it back */
+       error = khwq_qos_write_shadow(info, shadow->type << 24, offset,
+                                     from, count);
+       if (error)
+               goto bail;
+
+       if (idx < 0) {
+               count = BITS_TO_LONGS(shadow->count) * sizeof(long);
+               memset(shadow->dirty, 0, count);
+       } else
+               clear_bit(idx, shadow->dirty);
+
+       error = 0;
+
+bail:
+       spin_unlock_bh(&info->lock);
+
+       return error;
+}
+
+static int khwq_qos_sync_shadow_single(struct khwq_qos_shadow *shadow, int idx)
+{
+       struct khwq_qos_info *info = shadow->info;
+       struct khwq_pdsp_info *pdsp = info->pdsp;
+       int count, error = 0, start = idx, end = idx + 1;
+       u32 __iomem *to;
+       u32 *from;
+       u32 command;
+
+       if (WARN_ON(idx >= shadow->count))
+               return -EINVAL;
+
+       if (idx < 0) {
+               start = 0;
+               end = shadow->count;
+       }
+
+       spin_lock_bh(&info->lock);
+
+       for (idx = start; idx < end; idx ++) {
+               from    = shadow->data + (idx * shadow->size);
+               to      = pdsp->command + QOS_SHADOW_OFFSET;
+               count   = shadow->size / sizeof(u32);
+
+               command = (shadow->start + idx) << 16 | shadow->type << 24;
+               error = khwq_qos_write_shadow(info, command, 0, from, count);
+               if (error)
+                       break;
+
+               clear_bit(idx, shadow->dirty);
+       }
+
+       spin_unlock_bh(&info->lock);
+
+       return error;
+}
+
+static void khwq_qos_free_shadow(struct khwq_qos_info *info,
+                                enum khwq_qos_shadow_type type)
+{
+       struct khwq_qos_shadow *shadow = &info->shadows[type];
+       struct khwq_device *kdev = info->kdev;
+
+       if (shadow->data)
+               devm_kfree(kdev->dev, shadow->data);
+}
+
+static int khwq_qos_init_shadow(struct khwq_qos_info *info,
+                               enum khwq_qos_shadow_type type, const char *name,
+                               struct device_node *node, bool unified)
+{
+       struct khwq_qos_shadow *shadow = &info->shadows[type];
+       struct khwq_device *kdev = info->kdev;
+       int error, size, alloc_size;
+       u32 temp[3];
+
+       shadow->info = info;
+       shadow->name = name;
+
+       error = of_property_read_u32_array(node, name, temp, 3);
+       if (error < 0) {
+               dev_err(kdev->dev, "invalid shadow config for %s\n",
+                       name);
+               return -ENODEV;
+       }
+
+       shadow->start   = temp[0];
+       shadow->count   = temp[1];
+       shadow->size    = temp[2];
+       shadow->type    = type;
+       shadow->sync    = (unified ? khwq_qos_sync_shadow_unified :
+                                    khwq_qos_sync_shadow_single);
+       if (type == QOS_SCHED_PORT_CFG)
+               shadow->control = __khwq_qos_control_sched_port;
+
+       if (shadow->size % 4) {
+               dev_err(kdev->dev, "misaligned shadow size for %s\n",
+                       name);
+               return -ENODEV;
+       }
+
+       size = shadow->size * shadow->count;
+       alloc_size = size + 3 * BITS_TO_LONGS(shadow->count) * sizeof(long);
+
+       shadow->data = devm_kzalloc(kdev->dev, alloc_size, GFP_KERNEL);
+       if (!shadow->data) {
+               dev_err(kdev->dev, "shadow alloc failed for %s\n",
+                       name);
+               return -ENOMEM;
+       }
+       shadow->dirty   = shadow->data + size;
+       shadow->avail   = shadow->dirty + BITS_TO_LONGS(shadow->count);
+       shadow->running = shadow->avail  + BITS_TO_LONGS(shadow->count);
+
+       /* mark all as available */
+       memset(shadow->avail, 0xff,
+              BITS_TO_LONGS(shadow->count) * sizeof(long));
+
+       return 0;
+}
+
+static int khwq_qos_init_stats(struct khwq_qos_info *info,
+                               struct device_node *node)
+{
+       struct khwq_qos_stats *stats = &info->stats;
+       struct khwq_device *kdev = info->kdev;
+       int error, size, alloc_size;
+       u32 temp[2];
+
+       error = of_property_read_u32_array(node, "statistics-profiles",
+                                          temp, 2);
+       if (error < 0) {
+               dev_err(kdev->dev, "invalid statistics config\n");
+               return -ENODEV;
+       }
+
+       stats->start    = temp[0];
+       stats->count    = temp[1];
+
+       size = stats->count * sizeof(u64) * 4;
+       alloc_size = size + 3 * BITS_TO_LONGS(stats->count) * sizeof(long);
+
+       stats->data = devm_kzalloc(kdev->dev, alloc_size, GFP_KERNEL);
+       if (!stats->data) {
+               dev_err(kdev->dev, "stats alloc failed\n");
+               return -ENOMEM;
+       }
+
+       stats->dirty    = stats->data + size;
+       stats->avail    = stats->dirty + BITS_TO_LONGS(stats->count);
+       stats->running  = stats->avail  + BITS_TO_LONGS(stats->count);
+
+       /* mark all as available */
+       memset(stats->avail, 0xff,
+              BITS_TO_LONGS(stats->count) * sizeof(long));
+
+       return 0;
+}
+
+static void khwq_qos_free_stats(struct khwq_qos_info *info)
+{
+       struct khwq_qos_stats *stats = &info->stats;
+       struct khwq_device *kdev = info->kdev;
+
+       if (stats->data)
+               devm_kfree(kdev->dev, stats->data);
+}
+
+static void __khwq_free_qos_range(struct khwq_device *kdev,
+                                 struct khwq_qos_info *info)
+{
+       khwq_qos_free_shadow(info, QOS_SCHED_PORT_CFG);
+       khwq_qos_free_shadow(info, QOS_DROP_CFG_PROF);
+       khwq_qos_free_shadow(info, QOS_DROP_OUT_PROF);
+       khwq_qos_free_shadow(info, QOS_DROP_QUEUE_CFG);
+       khwq_qos_free_stats(info);
+       khwq_qos_free_drop_policies(kdev, info);
+       khwq_qos_free_stats_classes(kdev, info);
+       ktree_remove_tree(&info->qos_tree);
+       kobject_del(info->kobj);
+       kobject_put(info->kobj);
+       kobject_del(info->kobj_stats);
+       kobject_put(info->kobj_stats);
+       kobject_del(info->kobj_policies);
+       kobject_put(info->kobj_policies);
+       devm_kfree(info->kdev->dev, info);
+}
+
+void khwq_free_qos_range(struct khwq_device *kdev,
+                        struct khwq_range_info *range)
+{
+       if (range->qos_info)
+               __khwq_free_qos_range(kdev, range->qos_info);
+}
+
+static int khwq_qos_prio_check(struct ktree_node *child, void *arg)
+{
+       struct khwq_qos_tree_node *node = arg;
+       struct khwq_qos_tree_node *sibling = to_qnode(child);
+
+       return (sibling->priority == node->priority) ? -EINVAL : 0;
+}
+
+struct khwq_qos_drop_policy *
+khwq_qos_inherited_drop_policy(struct khwq_qos_tree_node *node)
+{
+       for (; node; node = node->parent)
+               if (node->drop_policy)
+                       return node->drop_policy;
+       return NULL;
+}
+
+static int khwq_qos_drop_policy_check(struct ktree_node *node, void *arg)
+{
+       struct khwq_qos_info *info = arg;
+       struct khwq_qos_tree_node *qnode = to_qnode(node);
+
+       if (qnode->type != QOS_NODE_DEFAULT || qnode->drop_policy)
+               return -EINVAL;
+       return ktree_for_each_child(&qnode->node, khwq_qos_drop_policy_check,
+                                   info);
+}
+
+static void khwq_qos_get_tree_node(struct ktree_node *node)
+{
+       /* nothing for now */
+}
+
+static void khwq_qos_put_tree_node(struct ktree_node *node)
+{
+       struct khwq_qos_tree_node *qnode = to_qnode(node);
+       kfree(qnode);
+}
+
+static ssize_t qnode_stats_class_show(struct khwq_qos_tree_node *qnode,
+                                            char *buf)
+{
+       struct khwq_qos_stats_class *class;
+
+       class = qnode->stats_class;
+
+       if (!class)
+               return -ENODEV;
+
+       return snprintf(buf, PAGE_SIZE, "%s\n", class->name);
+}
+
+static ssize_t qnode_drop_policy_show(struct khwq_qos_tree_node *qnode,
+                                            char *buf)
+{
+       struct khwq_qos_drop_policy *policy;
+
+       policy = qnode->drop_policy;
+
+       if (!policy)
+               return -ENODEV;
+
+       return snprintf(buf, PAGE_SIZE, "%s\n", policy->name);
+}
+
+static ssize_t qnode_weight_show(struct khwq_qos_tree_node *qnode,
+                                            char *buf)
+{
+       if (qnode->weight == -1)
+               return -EINVAL;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", qnode->weight);
+}
+
+static ssize_t qnode_priority_show(struct khwq_qos_tree_node *qnode,
+                                            char *buf)
+{
+       if (qnode->priority == -1)
+               return -EINVAL;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", qnode->priority);
+}
+
+static ssize_t qnode_output_rate_show(struct khwq_qos_tree_node *qnode,
+                                            char *buf)
+{
+       if (qnode->output_rate == -1)
+               return -EINVAL;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", qnode->output_rate);
+}
+
+static ssize_t qnode_burst_size_show(struct khwq_qos_tree_node *qnode,
+                                            char *buf)
+{
+       if (qnode->burst_size == -1)
+               return -EINVAL;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", qnode->burst_size);
+}
+
+static ssize_t qnode_overhead_bytes_show(struct khwq_qos_tree_node *qnode,
+                                            char *buf)
+{
+       return snprintf(buf, PAGE_SIZE, "%d\n", qnode->overhead_bytes);
+}
+
+static ssize_t qnode_output_queue_show(struct khwq_qos_tree_node *qnode,
+                                            char *buf)
+{
+       if (qnode->output_queue == -1)
+               return -EINVAL;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", qnode->output_queue);
+}
+
+struct khwq_qos_qnode_attribute {
+       struct attribute attr;
+       ssize_t (*show)(struct khwq_qos_tree_node *qnode, char *);
+       ssize_t (*store)(struct khwq_qos_tree_node *qnode,
+                        const char *, size_t);
+};
+
+#define KHWQ_QOS_QNODE_ATTR(_name, _mode, _show, _store) \
+       struct khwq_qos_qnode_attribute attr_qnode_##_name = \
+       __ATTR(_name, _mode, _show, _store)
+
+static KHWQ_QOS_QNODE_ATTR(stats_class, S_IRUGO, qnode_stats_class_show,
+                          NULL);
+static KHWQ_QOS_QNODE_ATTR(drop_policy, S_IRUGO, qnode_drop_policy_show,
+                          NULL);
+static KHWQ_QOS_QNODE_ATTR(priority, S_IRUGO,
+                          qnode_priority_show, NULL);
+static KHWQ_QOS_QNODE_ATTR(output_queue, S_IRUGO, qnode_output_queue_show,
+                          NULL);
+static KHWQ_QOS_QNODE_ATTR(weight, S_IRUGO|S_IWUSR,
+                          qnode_weight_show, NULL);
+static KHWQ_QOS_QNODE_ATTR(output_rate, S_IRUGO|S_IWUSR,
+                          qnode_output_rate_show, NULL);
+static KHWQ_QOS_QNODE_ATTR(burst_size, S_IRUGO|S_IWUSR,
+                          qnode_burst_size_show, NULL);
+static KHWQ_QOS_QNODE_ATTR(overhead_bytes, S_IRUGO|S_IWUSR,
+                          qnode_overhead_bytes_show,
+                          NULL);
+
+static struct attribute *khwq_qos_qnode_sysfs_attrs[] = {
+       &attr_qnode_stats_class.attr,
+       &attr_qnode_drop_policy.attr,
+       &attr_qnode_weight.attr,
+       &attr_qnode_priority.attr,
+       &attr_qnode_output_rate.attr,
+       &attr_qnode_burst_size.attr,
+       &attr_qnode_overhead_bytes.attr,
+       &attr_qnode_output_queue.attr,
+       NULL
+};
+
+static ssize_t khwq_qos_qnode_attr_show(struct kobject *kobj,
+                                struct attribute *attr, char *buf)
+{
+       struct khwq_qos_qnode_attribute *qnode_attr;
+       struct khwq_qos_tree_node *qnode;
+
+       qnode = container_of(kobj, struct khwq_qos_tree_node, kobj);
+       qnode_attr = container_of(attr, struct khwq_qos_qnode_attribute,
+                                  attr);
+
+       if (!qnode_attr->show)
+               return -ENOENT;
+
+       return qnode_attr->show(qnode, buf);
+}
+
+
+static ssize_t khwq_qos_qnode_attr_store(struct kobject *kobj,
+                                         struct attribute *attr,
+                                         const char *buf, size_t size)
+{
+       struct khwq_qos_qnode_attribute *qnode_attr;
+       struct khwq_qos_tree_node *qnode;
+
+       qnode = container_of(kobj, struct khwq_qos_tree_node, kobj);
+       qnode_attr = container_of(attr, struct khwq_qos_qnode_attribute,
+                                  attr);
+
+       if (!qnode_attr->store)
+               return -ENOENT;
+
+       return qnode_attr->store(qnode, buf, size);
+}
+
+static const struct sysfs_ops khwq_qos_qnode_sysfs_ops = {
+       .show = khwq_qos_qnode_attr_show,
+       .store = khwq_qos_qnode_attr_store,
+};
+
+static struct kobj_type khwq_qos_qnode_ktype = {
+       .sysfs_ops = &khwq_qos_qnode_sysfs_ops,
+       .default_attrs = khwq_qos_qnode_sysfs_attrs,
+};
+
+static int khwq_qos_cmp(struct ktree_node *_a, struct ktree_node *_b,
+                       void *arg)
+{
+       void *a = to_qnode(_a);
+       void *b = to_qnode(_b);
+       int offset = (int)arg;
+       return *((u32 *)(a + offset)) - *((u32 *)(b + offset));
+}
+
+static int khwq_qos_check_overflow(struct khwq_qos_tree_node *qnode,
+                                  int val)
+{
+       int tmp;
+
+       tmp = (qnode->acct == QOS_BYTE_ACCT) ?
+                       (val & ~BITS(32 - QOS_CREDITS_BYTE_SHIFT)) :
+                       (val & ~BITS(32 - QOS_CREDITS_PACKET_SHIFT));
+
+       if (tmp)
+               return -EINVAL;
+       
+       return 0;
+}
+
+static int khwq_qos_tree_parse(struct khwq_qos_info *info,
+                                   struct device_node *node,
+                                   struct khwq_qos_tree_node *parent)
+{
+       struct khwq_qos_tree_node *qnode;
+       struct khwq_device *kdev = info->kdev;
+       int length, i, error = 0, elements;
+       struct device_node *child;
+       bool has_children;
+       const char *name;
+       struct kobject *parent_kobj;
+       u32 temp[2];
+
+       /* first find out if we are a leaf node */
+       child = of_get_next_child(node, NULL);
+       has_children = !!child;
+       of_node_put(child);
+
+       qnode = devm_kzalloc(kdev->dev, sizeof(*qnode), GFP_KERNEL);
+       if (!qnode) {
+               dev_err(kdev->dev, "failed to alloc qos node\n");
+               return -ENOMEM;
+       }
+
+       if (!parent)
+               parent_kobj = info->kobj;
+       else
+               parent_kobj = &parent->kobj;
+
+       qnode->info = info;
+       qnode->parent = parent;
+       qnode->name = node->name;
+
+       error = kobject_init_and_add(&qnode->kobj,
+                                    &khwq_qos_qnode_ktype,
+                                    parent_kobj, qnode->name);
+       if (error) {
+               dev_err(kdev->dev, "failed to create sysfs "
+                       "entries for qnode %s\n", qnode->name);
+               goto error_destroy;
+       }
+
+       of_property_read_string(node, "label", &qnode->name);
+       dev_dbg(kdev->dev, "processing node %s, parent %s%s\n",
+               qnode->name, parent ? parent->name : "(none)",
+               has_children ? "" : ", leaf");
+
+       qnode->type = QOS_NODE_DEFAULT;
+       if (of_find_property(node, "strict-priority", NULL))
+               qnode->type = QOS_NODE_PRIO;
+       if (of_find_property(node, "weighted-round-robin", NULL)) {
+               if (qnode->type != QOS_NODE_DEFAULT) {
+                       dev_err(kdev->dev, "multiple node types in %s\n",
+                               qnode->name);
+                       error = -EINVAL;
+                       goto error_free;
+               }
+               qnode->type = QOS_NODE_WRR;
+       }
+       if (!parent && qnode->type == QOS_NODE_DEFAULT) {
+               dev_err(kdev->dev, "root node %s must be wrr/prio\n",
+                       qnode->name);
+               error = -EINVAL;
+               goto error_free;
+       }
+       if (!has_children && qnode->type != QOS_NODE_DEFAULT) {
+               dev_err(kdev->dev, "leaf node %s must not be wrr/prio\n",
+                       qnode->name);
+               error = -EINVAL;
+               goto error_free;
+       }
+
+       dev_dbg(kdev->dev, "node %s: type %d\n", qnode->name, qnode->type);
+
+       qnode->weight = -1;
+       of_property_read_u32(node, "weight", &qnode->weight);
+       if (qnode->weight != -1) {
+               if (qnode->weight > QOS_MAX_WEIGHT) {
+                       dev_err(kdev->dev, "cannot have weight more than 1M\n");
+                       error = -EINVAL;
+                       goto error_free;
+               }
+               if (!parent || parent->type != QOS_NODE_WRR) {
+                       dev_err(kdev->dev, "unexpected weight on node %s\n",
+                               qnode->name);
+                       error = -EINVAL;
+                       goto error_free;
+               }
+       } else if (parent && parent->type == QOS_NODE_WRR) {
+               dev_err(kdev->dev, "expected weight on wrr child node %s\n",
+                       qnode->name);
+               error = -EINVAL;
+               goto error_free;
+       }
+       dev_dbg(kdev->dev, "node %s: weight %d\n", qnode->name, qnode->weight);
+
+       qnode->priority = -1;
+       of_property_read_u32(node, "priority", &qnode->priority);
+       if (qnode->priority != -1) {
+               if (!parent || parent->type != QOS_NODE_PRIO) {
+                       dev_err(kdev->dev, "unexpected priority on node %s\n",
+                               qnode->name);
+                       error = -EINVAL;
+                       goto error_free;
+               }
+               error = ktree_for_each_child(&parent->node,
+                                            khwq_qos_prio_check, qnode);
+               if (error) {
+                       dev_err(kdev->dev, "duplicate priority %d on node %s\n",
+                               qnode->priority, qnode->name);
+                       error = -EINVAL;
+                       goto error_free;
+               }
+       } else if (parent && parent->type == QOS_NODE_PRIO) {
+               dev_err(kdev->dev, "expected prio on strict prio child %s\n",
+                       qnode->name);
+               error = -EINVAL;
+               goto error_free;
+       }
+       dev_dbg(kdev->dev, "node %s: priority %d\n", qnode->name,
+               qnode->priority);
+
+       qnode->acct = QOS_BYTE_ACCT;
+       if (of_find_property(node, "byte-units", NULL))
+               qnode->acct = QOS_BYTE_ACCT;
+       else if (of_find_property(node, "packet-units", NULL))
+               qnode->acct = QOS_PACKET_ACCT;
+       else if (parent)
+               qnode->acct = parent->acct;
+       dev_dbg(kdev->dev, "node %s: accounting %s\n", qnode->name,
+               qnode->acct == QOS_PACKET_ACCT ? "packet" : "bytes");
+
+       qnode->output_queue = -1;
+       error = of_property_read_u32(node, "output-queue", &qnode->output_queue);
+       if (error && !parent) {
+               dev_err(kdev->dev, "root qos node %s needs an output queue\n",
+                       qnode->name);
+               goto error_free;
+       }
+       if (!error && parent) {
+               dev_warn(kdev->dev, "output queue ignored on node %s\n",
+                        qnode->name);
+               qnode->output_queue = -1;
+       }
+       dev_dbg(kdev->dev, "node %s: output queue %d\n", qnode->name,
+               qnode->output_queue);
+
+       qnode->overhead_bytes = parent ? parent->overhead_bytes : 24;
+       of_property_read_u32(node, "overhead-bytes", &qnode->overhead_bytes);
+       dev_dbg(kdev->dev, "node %s: overhead bytes %d\n", qnode->name,
+               qnode->overhead_bytes);
+
+       error = of_property_read_string(node, "drop-policy", &name);
+       if (!error) {
+               qnode->drop_policy = khwq_qos_find_drop_policy(info, name);
+               if (!qnode->drop_policy) {
+                       dev_err(kdev->dev, "invalid drop policy %s\n", name);
+                       error = -EINVAL;
+                       goto error_free;
+               }
+               qnode->drop_policy->usecount ++;
+       }
+       if (!has_children && !khwq_qos_inherited_drop_policy(qnode))
+               qnode->drop_policy = info->default_drop_policy;
+
+       dev_dbg(kdev->dev, "node %s: drop policy %s\n", qnode->name,
+               qnode->drop_policy ? qnode->drop_policy->name : "(none)");
+
+       error = of_property_read_string(node, "stats-class", &name);
+       if (!error) {
+               qnode->stats_class = khwq_qos_find_stats_class(info, name);
+               if (!qnode->stats_class)
+                       qnode->stats_class = khwq_qos_init_stats_class(info, name);
+               if (!qnode->stats_class) {
+                       dev_err(kdev->dev, "failed to create stats class %s\n", name);
+                       error = -ENODEV;
+                       goto error_free;
+               }
+               qnode->stats_class->usecount ++;
+       }
+       if (has_children && qnode->stats_class) {
+               dev_err(kdev->dev, "unexpected stats class on non-leaf %s",
+                       qnode->name);
+               error = -EINVAL;
+               goto error_free;
+       }
+       dev_dbg(kdev->dev, "node %s: stats class %s\n", qnode->name,
+               qnode->stats_class ? qnode->stats_class->name : "(none)");
+
+       qnode->output_rate = parent ? parent->output_rate : -1;
+       qnode->burst_size = parent ? parent->burst_size : -1;
+       if (of_find_property(node, "output-rate", &length)) {
+               elements = length / sizeof(u32);
+               elements = max(elements, 2);
+               error = of_property_read_u32_array(node, "output-rate", temp,
+                                                  elements);
+               if (error) {
+                       dev_err(kdev->dev, "error reading output-rate on %s\n",
+                               qnode->name);
+                       goto error_free;
+               }
+
+               error = khwq_qos_check_overflow(qnode, (temp[0] /
+                                                       info->ticks_per_sec));
+               if (error) {
+                       dev_err(kdev->dev, "burst rate credit overflow\n");
+                       goto error_free;
+               }
+
+               qnode->output_rate = temp[0];
+
+               if (elements > 1) {
+                       error = khwq_qos_check_overflow(qnode, temp[1]);
+                       if (error) {
+                               dev_err(kdev->dev, "burst size credit overflow\n");
+                               goto error_free;
+                       }
+
+                       qnode->burst_size = temp[1];
+               }
+       }
+       dev_dbg(kdev->dev, "node %s: output rate %d, burst %d\n", qnode->name,
+               qnode->output_rate, qnode->burst_size);
+
+       if (of_find_property(node, "input-queues", &length)) {
+               qnode->num_input_queues = length / sizeof(u32);
+               if (qnode->num_input_queues >= ARRAY_SIZE(qnode->input_queues)) {
+                       dev_err(kdev->dev, "too many input_queues to node %s\n",
+                               qnode->name);
+                       error = -EOVERFLOW;
+                       goto error_free;
+               }
+               error = of_property_read_u32_array(node, "input-queues",
+                                                  qnode->input_queues,
+                                                  qnode->num_input_queues);
+               if (error) {
+                       dev_err(kdev->dev, "error getting input_queues on node %s\n",
+                               qnode->name);
+                       goto error_free;
+               }
+       }
+       if (has_children && qnode->num_input_queues) {
+               dev_err(kdev->dev, "unexpected input-queues on non-leaf %s",
+                       qnode->name);
+               error = -EINVAL;
+               goto error_free;
+       }
+       if (!has_children && !qnode->num_input_queues) {
+               dev_err(kdev->dev, "expected input-queues on leaf %s",
+                       qnode->name);
+               error = -EINVAL;
+               goto error_free;
+       }
+       for (i = 0; i < qnode->num_input_queues; i++) {
+               dev_dbg(kdev->dev, "node %s: input queue %d\n", qnode->name,
+                       qnode->input_queues[i]);
+       }
+
+
+       if (!parent)
+               ktree_set_root(&info->qos_tree, &qnode->node);
+       else
+               ktree_add_child_last(&parent->node, &qnode->node);
+
+       for_each_child_of_node(node, child) {
+               error = khwq_qos_tree_parse(info, child, qnode);
+               if (error)
+                       goto error_destroy;
+       }
+
+       if (qnode->drop_policy) {
+               error = ktree_for_each_child(&qnode->node,
+                                            khwq_qos_drop_policy_check,
+                                            info);
+               if (error)
+                       goto error_destroy;
+       }
+
+       if (qnode->type == QOS_NODE_PRIO) {
+               int o = offsetof(struct khwq_qos_tree_node, priority);
+               ktree_sort_children(&qnode->node, khwq_qos_cmp, (void *)o);
+       }
+       else if (qnode->type == QOS_NODE_WRR) {
+               int o = offsetof(struct khwq_qos_tree_node, weight);
+               ktree_sort_children(&qnode->node, khwq_qos_cmp, (void *)o);
+       }
+
+       return 0;
+
+error_destroy:
+       ktree_remove_node(&qnode->node);
+error_free:
+       if (qnode && qnode->drop_policy)
+               qnode->drop_policy->usecount--;
+       if (qnode && qnode->stats_class)
+               qnode->stats_class->usecount--;
+       devm_kfree(kdev->dev, qnode);
+       return error;
+}
+
+static int khwq_qos_tree_map_nodes(struct ktree_node *node, void *arg)
+{
+       struct khwq_qos_tree_node *qnode = to_qnode(node);
+       struct khwq_qos_tree_node *parent = qnode->parent;
+       struct khwq_qos_info *info = arg;
+       struct khwq_device *kdev = info->kdev;
+
+       qnode->child_port_count =  0;
+       qnode->child_count      =  0;
+       qnode->parent_input     =  0;
+       qnode->child_weight_sum =  0;
+       qnode->child_weight_max =  0;
+       qnode->child_weight_min = -1;
+       qnode->is_drop_input    = false;
+
+       if (qnode->drop_policy)
+               qnode->is_drop_input = true;
+
+       if (parent) {
+               /* where do we plugin into our parent? */
+               qnode->parent_input = parent->child_count;
+
+               /* provide our parent with info */
+               parent->child_count ++;
+               parent->child_weight_sum += qnode->weight;
+               if (qnode->weight > parent->child_weight_max)
+                       parent->child_weight_max = qnode->weight;
+               if (qnode->weight < parent->child_weight_min)
+                       parent->child_weight_min = qnode->weight;
+
+               /* inherit if parent is an input to drop sched */
+               if (parent->is_drop_input)
+                       qnode->is_drop_input = true;
+       }
+
+       ktree_for_each_child(&qnode->node, khwq_qos_tree_map_nodes, info);
+
+       qnode->has_sched_port = (qnode->type == QOS_NODE_PRIO   ||
+                                qnode->type == QOS_NODE_WRR    ||
+                                qnode->child_port_count);
+
+       if (qnode->has_sched_port && parent)
+               parent->child_port_count ++;
+
+       if (qnode->child_count > info->inputs_per_port) {
+               dev_err(kdev->dev, "too many input_queues (%d) to node %s\n",
+                       qnode->child_count, qnode->name);
+               return -EOVERFLOW;
+       }
+       return 0;
+}
+
+static int khwq_qos_tree_alloc_nodes(struct ktree_node *node, void *arg)
+{
+       struct khwq_qos_info *info = arg;
+       struct khwq_device *kdev = info->kdev;
+       struct khwq_qos_tree_node *qnode = to_qnode(node);
+       struct khwq_qos_tree_node *parent = qnode->parent;
+       int error, i;
+
+       if (qnode->has_sched_port) {
+               error = khwq_qos_alloc_sched_port(kdev);
+               if (error < 0) {
+                       dev_err(kdev->dev, "node %s: failed to alloc sched port [%d]\n",
+                               qnode->name, error);
+                       return error;
+               }
+               qnode->sched_port_idx = error;
+       } else
+               qnode->sched_port_idx = qnode->parent->sched_port_idx;
+
+
+       if (parent) {
+               if (WARN_ON(qnode->output_queue != -1))
+                       return -EINVAL;
+               if (parent->type == QOS_NODE_DEFAULT)
+                       qnode->parent_input = qnode->parent->parent_input;
+               error = khwq_qos_control_sched_port(kdev, QOS_CONTROL_GET_INPUT,
+                                                   parent->sched_port_idx,
+                                                   qnode->parent_input);
+               if (WARN_ON(error < 0))
+                       return error;
+               qnode->output_queue = error;
+       }
+
+       dev_dbg(kdev->dev, "node %s: mapped to output queue %d (port %d)\n",
+               qnode->name, qnode->output_queue,
+               khwq_qos_id_to_idx(qnode->sched_port_idx));
+
+       if (qnode->drop_policy) {
+               error = khwq_qos_alloc_drop_out(kdev);
+               if (error < 0) {
+                       dev_err(kdev->dev, "node %s: failed to alloc sched port [%d]\n",
+                               qnode->name, error);
+                       return error;
+               }
+               qnode->drop_out_idx = error;
+               dev_dbg(kdev->dev, "allocated drop out %d for node %s\n",
+                       khwq_qos_id_to_idx(qnode->drop_out_idx), qnode->name);
+       }
+
+       if (qnode->is_drop_input) {
+
+               if (!qnode->drop_out_idx)
+                       qnode->drop_out_idx = parent->drop_out_idx;
+
+               for (i = 0; i < qnode->num_input_queues; i++) {
+                       error = khwq_qos_alloc_drop_queue(kdev,
+                                                         qnode->input_queues[i]);
+                       if (error < 0) {
+                               dev_err(kdev->dev,
+                                       "failed to alloc input queue %d on node %s\n",
+                                       qnode->input_queues[i], qnode->name);
+                               return error;
+                       }
+                       qnode->drop_queue_idx[i] = error;
+                       dev_dbg(kdev->dev, "allocated drop queue %d for node %s\n",
+                               qnode->input_queues[i], qnode->name);
+               }
+       }
+
+       error = ktree_for_each_child(&qnode->node, khwq_qos_tree_alloc_nodes,
+                                    info);
+
+       return error;
+}
+
+static int khwq_qos_tree_start_port(struct khwq_qos_info *info,
+                                     struct khwq_qos_tree_node *qnode)
+{
+       int error, val, idx = qnode->sched_port_idx, temp;
+       struct khwq_device *kdev = info->kdev;
+       bool sync = false;
+       int inputs, i;
+
+       if (!qnode->has_sched_port)
+               return 0;
+
+       dev_dbg(kdev->dev, "programming sched port index %d for node %s\n",
+               khwq_qos_id_to_idx(idx), qnode->name);
+
+       val = (qnode->acct == QOS_BYTE_ACCT) ? 0xf : 0;
+       error = khwq_qos_set_sched_unit_flags(kdev, idx, val, sync);
+       if (WARN_ON(error))
+               return error;
+
+       error = khwq_qos_set_sched_group_count(kdev, idx, 1, sync);
+       if (WARN_ON(error))
+               return error;
+
+       val = qnode->output_queue;
+       error = khwq_qos_set_sched_out_queue(kdev, idx, val, sync);
+       if (WARN_ON(error))
+               return error;
+
+       val = qnode->overhead_bytes;
+       error = khwq_qos_set_sched_overhead_bytes(kdev, idx, val, sync);
+       if (WARN_ON(error))
+               return error;
+
+       val = 0;
+       error = khwq_qos_set_sched_out_throttle(kdev, idx, val, sync);
+       if (WARN_ON(error))
+               return error;
+
+       temp = qnode->output_rate / info->ticks_per_sec;
+       val = (qnode->acct == QOS_BYTE_ACCT) ?
+               (temp << QOS_CREDITS_BYTE_SHIFT) :
+               (temp << QOS_CREDITS_PACKET_SHIFT);
+       error = khwq_qos_set_sched_cir_credit(kdev, idx, val, sync);
+       if (WARN_ON(error))
+               return error;
+
+       temp = qnode->burst_size;
+       val = (qnode->acct == QOS_BYTE_ACCT) ?
+               (temp << QOS_CREDITS_BYTE_SHIFT) :
+               (temp << QOS_CREDITS_PACKET_SHIFT);
+       error = khwq_qos_set_sched_cir_max(kdev, idx, val, sync);
+       if (WARN_ON(error))
+               return error;
+
+       inputs = (qnode->type == QOS_NODE_DEFAULT) ? 1 : qnode->child_count;
+
+       error = khwq_qos_set_sched_total_q_count(kdev, idx, inputs, sync);
+       if (WARN_ON(error))
+               return error;
+
+       val = (qnode->type == QOS_NODE_PRIO) ? inputs : 0;
+       error = khwq_qos_set_sched_sp_q_count(kdev, idx, val, sync);
+       if (WARN_ON(error))
+               return error;
+
+       val = (qnode->type == QOS_NODE_WRR) ? inputs : 0;
+       error = khwq_qos_set_sched_wrr_q_count(kdev, idx, val, sync);
+       if (WARN_ON(error))
+               return error;
+
+       for (i = 0; i < inputs; i++) {
+               val = 0;
+               error = khwq_qos_set_sched_cong_thresh(kdev, idx, i, val, sync);
+               if (WARN_ON(error))
+                       return error;
+
+               val = 0;
+               if (qnode->type == QOS_NODE_WRR) {
+                       if (qnode->acct == QOS_BYTE_ACCT) {
+                               val = (QOS_BYTE_NORMALIZATION_FACTOR /
+                                       qnode->child_weight_sum);
+                               val *= (inputs);
+                               val <<= 6;
+                       } else {
+                               val = (inputs *
+                                      QOS_PACKET_NORMALIZATION_FACTOR);
+                               val <<= 3;
+                               val *= (((qnode->child_weight_max +
+                                        qnode->child_weight_min) / 2) *
+                                       inputs);
+                               val /= qnode->child_weight_sum;
+                       }
+               }
+
+               error = khwq_qos_set_sched_wrr_credit(kdev, idx, i, val, sync);
+               if (WARN_ON(error))
+                       return error;
+       }
+
+       error = khwq_qos_sync_sched_port(kdev, idx);
+       if (error) {
+               dev_err(kdev->dev, "error writing sched config for %s\n",
+                       qnode->name);
+               return error;
+       }
+
+       error = khwq_qos_control_sched_port(kdev, QOS_CONTROL_ENABLE, idx,
+                                           true);
+       if (error) {
+               dev_err(kdev->dev, "error enabling sched port for %s\n",
+                       qnode->name);
+               return error;
+       }
+       return 0;
+}
+
+static int khwq_qos_tree_start_drop_out(struct khwq_qos_info *info,
+                                     struct khwq_qos_tree_node *qnode)
+{
+       struct khwq_qos_drop_policy *policy = qnode->drop_policy;
+       int error, val, idx = qnode->drop_out_idx;
+       struct khwq_device *kdev = info->kdev;
+       bool sync = false;
+
+       if (!policy)
+               return 0;
+
+       dev_dbg(kdev->dev, "programming drop out index %d for node %s\n",
+               khwq_qos_id_to_idx(idx), qnode->name);
+
+       val = qnode->output_queue;
+       error = khwq_qos_set_drop_out_queue_number(kdev, idx, val, sync);
+       if (WARN_ON(error))
+               return error;
+
+       val = (policy->max_drop_prob << 16) / 100;
+       error = khwq_qos_set_drop_out_red_prob(kdev, idx, val, sync);
+       if (WARN_ON(error))
+               return error;
+
+       val = khwq_qos_id_to_idx(policy->drop_cfg_idx);
+       error = khwq_qos_set_drop_out_cfg_prof_idx(kdev, idx, val, sync);
+       if (WARN_ON(error))
+               return error;
+
+       error = khwq_qos_set_drop_out_enable(kdev, idx, 1, sync);
+       if (WARN_ON(error))
+               return error;
+
+       error = khwq_qos_set_drop_out_avg_depth(kdev, idx, 0, sync);
+       if (WARN_ON(error))
+               return error;
+
+       return 0;
+}
+
+static int khwq_qos_tree_start_drop_queue(struct khwq_qos_info *info,
+                                           struct khwq_qos_tree_node *qnode)
+{
+       struct khwq_qos_stats_class *class = qnode->stats_class;
+       struct khwq_device *kdev = info->kdev;
+       int i, idx, error;
+       bool sync = false;
+       u32 val;
+
+       if (!qnode->is_drop_input)
+               return 0;
+
+       for (i = 0; i < qnode->num_input_queues; i++) {
+               idx = qnode->drop_queue_idx[i];
+
+               dev_dbg(kdev->dev, "programming drop queue %d for node %s\n",
+                       khwq_qos_id_to_idx(idx), qnode->name);
+
+               val = khwq_qos_id_to_idx(qnode->drop_out_idx);
+               error = khwq_qos_set_drop_q_out_prof_idx(kdev, idx, val, sync);
+               if (WARN_ON(error))
+                       return error;
+
+               error = khwq_qos_set_drop_q_stat_blk_idx(kdev, idx,
+                                                        class->stats_block_idx,
+                                                        sync);
+               if (WARN_ON(error))
+                       return error;
+
+               error = khwq_qos_set_drop_q_valid(kdev, idx, 1, sync);
+               if (WARN_ON(error))
+                       return error;
+       }
+       return 0;
+}
+
+static int khwq_qos_tree_start_nodes(struct ktree_node *node, void *arg)
+{
+       struct khwq_qos_info *info = arg;
+       struct khwq_qos_tree_node *qnode = to_qnode(node);
+       struct khwq_device *kdev = info->kdev;
+       int error;
+
+       error = khwq_qos_tree_start_port(info, qnode);
+       if (error)
+               return error;
+
+       error = khwq_qos_tree_start_drop_out(info, qnode);
+       if (error)
+               return error;
+
+       error = khwq_qos_tree_start_drop_queue(info, qnode);
+       if (error)
+               return error;
+
+       error = ktree_for_each_child(&qnode->node,
+                                    khwq_qos_tree_start_nodes, info);
+       if (error)
+               dev_err(kdev->dev, "error programming subtree at %s\n",
+                       qnode->name);
+       return error;
+
+}
+
+static int khwq_qos_tree_init(struct khwq_qos_info *info)
+{
+       struct ktree_node *root;
+       int error;
+
+       root = ktree_get_root(&info->qos_tree);
+       if (WARN_ON(!root))
+               return -ENODEV;
+
+       error = khwq_qos_tree_map_nodes(root, info);
+       if (WARN_ON(error))
+               return error;
+
+       error = khwq_qos_tree_alloc_nodes(root, info);
+       if (error)
+               goto bail;
+
+       error = 0;
+bail:
+       ktree_put_node(root);
+       return error;
+}
+
+static int khwq_qos_tree_start(struct khwq_qos_info *info)
+{
+       struct ktree_node *root;
+       int error;
+
+       root = ktree_get_root(&info->qos_tree);
+       if (WARN_ON(!root))
+               return -ENODEV;
+
+       error = khwq_qos_tree_start_nodes(root, info);
+       if (WARN_ON(error))
+               goto bail;
+
+       error = khwq_qos_sync_drop_queue(info->kdev, -1);
+       if (error) {
+               dev_err(info->kdev->dev, "error syncing drop queues\n");
+               goto bail;
+       }
+
+       error = khwq_qos_sync_drop_out(info->kdev, -1);
+       if (error) {
+               dev_err(info->kdev->dev, "error syncing drop outs\n");
+               goto bail;
+       }
+       error = 0;
+
+bail:
+       ktree_put_node(root);
+       return error;
+}
+
+static int khwq_qos_stop_drop_queues(struct khwq_qos_info *info)
+{
+       struct khwq_qos_shadow *shadow = &info->shadows[QOS_DROP_QUEUE_CFG];
+       struct khwq_device *kdev = info->kdev;
+       struct khwq_pdsp_info *pdsp;
+       int i, error, idx;
+
+       pdsp = info->pdsp;
+
+       for (i = shadow->start; i < (shadow->start + shadow->count); i++) {
+               if (!test_bit(i, shadow->avail)) {
+                       idx = khwq_qos_make_id(pdsp->id, i);
+                       error = khwq_qos_set_drop_q_valid(kdev, idx, 0, false);
+                       if (WARN_ON(error))
+                               return error;
+
+                       error = khwq_qos_set_drop_q_stat_blk_idx(kdev, idx, 0,
+                                                                false);
+                       if (WARN_ON(error))
+                               return error;
+
+                       error = khwq_qos_set_drop_q_out_prof_idx(kdev, idx, 0,
+                                                                false);
+                       if (WARN_ON(error))
+                               return error;
+               }
+       }
+
+       error = khwq_qos_sync_drop_queue(info->kdev, -1);
+       if (error) {
+               dev_err(kdev->dev, "error syncing drop queues\n");
+               return error;
+       }
+
+       return 0;
+}
+
+static int khwq_qos_stop_drop_outs(struct khwq_qos_info *info)
+{
+       struct khwq_qos_shadow *shadow = &info->shadows[QOS_DROP_OUT_PROF];
+       struct khwq_device *kdev = info->kdev;
+       struct khwq_pdsp_info *pdsp;
+       int i, error, idx;
+
+       pdsp = info->pdsp;
+
+       for (i = shadow->start; i < (shadow->start + shadow->count); i++) {
+               if (!test_bit(i, shadow->avail)) {
+                       idx = khwq_qos_make_id(pdsp->id, i);
+                       error = khwq_qos_set_drop_out_enable(kdev, idx, 0,
+                                                            false);
+                       if (WARN_ON(error))
+                               return error;
+
+                       error = khwq_qos_set_drop_out_queue_number(kdev, idx, 0,
+                                                            false);
+                       if (WARN_ON(error))
+                               return error;
+
+                       error = khwq_qos_set_drop_out_red_prob(kdev, idx, 0,
+                                                            false);
+                       if (WARN_ON(error))
+                               return error;
+
+                       error = khwq_qos_set_drop_out_cfg_prof_idx(kdev, idx, 0,
+                                                            false);
+                       if (WARN_ON(error))
+                               return error;
+
+                       error = khwq_qos_set_drop_out_avg_depth(kdev, idx, 0,
+                                                            false);
+                       if (WARN_ON(error))
+                               return error;
+               }
+       }
+
+       error = khwq_qos_sync_drop_out(info->kdev, -1);
+       if (error) {
+               dev_err(kdev->dev, "error syncing drop out\n");
+               return error;
+       }
+
+       return 0;
+}
+
+static int khwq_qos_stop_sched_port_queues(struct khwq_qos_info *info)
+{
+       struct khwq_qos_shadow *shadow = &info->shadows[QOS_SCHED_PORT_CFG];
+       struct khwq_device *kdev = info->kdev;
+       struct khwq_pdsp_info *pdsp;
+       int i, j, error, idx;
+       u32 queues;
+
+       pdsp = info->pdsp;
+
+       for (i = shadow->start; i < (shadow->start + shadow->count); i++) {
+               if (!test_bit(i, shadow->avail)) {
+                       idx = khwq_qos_make_id(pdsp->id, i);
+                       error = khwq_qos_set_sched_unit_flags(kdev, idx, 0xf,
+                                                             false);
+                       if (WARN_ON(error))
+                               return error;
+
+                       error = khwq_qos_get_sched_total_q_count(kdev, idx,
+                                                               &queues);
+                       if (WARN_ON(error))
+                               return error;
+
+                       for (j = 0; j < queues; j++) {
+                               error = khwq_qos_set_sched_cong_thresh(kdev,
+                                                                      idx, j,
+                                                                      1, false);
+                               if (WARN_ON(error))
+                                       return error;
+
+                               error = khwq_qos_set_sched_wrr_credit(kdev, idx,
+                                                                     j, 0,
+                                                                     false);
+                               if (WARN_ON(error))
+                                       return error;
+                       }
+
+                       error = khwq_qos_set_sched_out_queue(kdev, idx, 0,
+                                                            false);
+                       if (WARN_ON(error))
+                               return error;
+
+                       error = khwq_qos_set_sched_overhead_bytes(kdev, idx, 0,
+                                                                 false);
+                       if (WARN_ON(error))
+                               return error;
+
+                       error = khwq_qos_set_sched_out_throttle(kdev, idx, 0,
+                                                               false);
+                       if (WARN_ON(error))
+                               return error;
+
+                       error = khwq_qos_set_sched_cir_credit(kdev, idx, 0,
+                                                             false);
+                       if (WARN_ON(error))
+                               return error;
+
+                       error = khwq_qos_set_sched_cir_max(kdev, idx, 0, false);
+                       if (WARN_ON(error))
+                               return error;
+
+                       error = khwq_qos_set_sched_out_throttle(kdev, idx, 0,
+                                                               false);
+                       if (WARN_ON(error))
+                               return error;
+
+                       error = khwq_qos_set_sched_total_q_count(kdev, idx, 0,
+                                                               false);
+                       if (WARN_ON(error))
+                               return error;
+
+                       error = khwq_qos_set_sched_sp_q_count(kdev, idx, 0,
+                                                               false);
+                       if (WARN_ON(error))
+                               return error;
+
+                       error = khwq_qos_set_sched_wrr_q_count(kdev, idx, 0,
+                                                               false);
+                       if (WARN_ON(error))
+                               return error;
+
+                       error = khwq_qos_sync_sched_port(kdev, idx);
+                       if (error)
+                               return error;
+
+                       error = khwq_qos_control_sched_port(kdev,
+                                                           QOS_CONTROL_ENABLE,
+                                                           idx,
+                                                           false);
+                       if (error) {
+                               dev_err(kdev->dev, "error disabling sched port "
+                                       "%d\n", i);
+                               return error;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int khwq_qos_tree_stop(struct khwq_qos_info *info)
+{
+       int error;
+
+       error = khwq_qos_stop_sched_port_queues(info);
+       if (error)
+               return error;
+
+       error = khwq_qos_stop_drop_queues(info);
+       if (error)
+               return error;
+
+       error = khwq_qos_stop_drop_outs(info);
+       if (error)
+               return error;
+
+       return 0;
+}
+
+static int khwq_qos_init_base(struct khwq_qos_info *info, int base, int num)
+{
+       struct khwq_device *kdev = info->kdev;
+       int end = base + num;
+       int sched_port_queues, drop_sched_queues;
+
+       base = ALIGN(base, 32);
+       info->drop_sched_queue_base = base;
+       drop_sched_queues = info->shadows[QOS_DROP_QUEUE_CFG].count;
+       base += drop_sched_queues;
+
+       base = ALIGN(base, 32);
+       info->sched_port_queue_base = base;
+       sched_port_queues = (info->shadows[QOS_SCHED_PORT_CFG].count *
+                            info->inputs_per_port);
+       base += sched_port_queues;
+
+       if (base >= end) {
+               dev_err(kdev->dev, "too few queues (%d), need %d + %d\n",
+                       num, sched_port_queues, drop_sched_queues);
+               return -ENODEV;
+       } else {
+               dev_info(kdev->dev, "qos: sched port @%d, drop sched @%d\n",
+                        info->sched_port_queue_base,
+                        info->drop_sched_queue_base);
+               return 0;
+       }
+}
+
+static int khwq_qos_init_queue(struct khwq_range_info *range,
+                              struct hwqueue_instance *inst)
+{
+       return 0;
+}
+
+static int khwq_qos_open_queue(struct khwq_range_info *range,
+                              struct hwqueue_instance *inst, unsigned flags)
+{
+       struct khwq_qos_info *info;
+       struct khwq_device *kdev;
+       u32 command;
+       int error;
+
+       info = range->qos_info;
+       kdev = info->kdev;
+
+       info->refcount++;
+       if (info->refcount == 1) {
+               error = khwq_qos_tree_start(info);
+               if (error) {
+                       dev_err(kdev->dev, "failed to program qos tree\n");
+                       return error;
+               }
+
+               /* Enable the drop scheduler */
+               command = (QOS_CMD_ENABLE_PORT |
+                          QOS_DROP_SCHED_ENABLE | QOS_ENABLE);
+               error = khwq_qos_write_cmd(info, command);
+               if (error)
+                       dev_err(kdev->dev, "failed to enable drop scheduler\n");
+
+               init_timer(&info->timer);
+               info->timer.data                = (unsigned long)info;
+               info->timer.function            = khwq_qos_timer;
+               info->timer.expires             = jiffies +
+                                                       KHWQ_QOS_TIMER_INTERVAL;
+               add_timer(&info->timer);
+       }
+
+       return 0;
+}
+
+static int khwq_qos_close_queue(struct khwq_range_info *range,
+                               struct hwqueue_instance *inst)
+{
+       struct khwq_qos_info *info;
+       struct khwq_device *kdev;
+       u32 command;
+       int error;
+
+       info = range->qos_info;
+       kdev = info->kdev;
+
+       info->refcount--;
+       if (!info->refcount) {  
+               /* Disable the drop scheduler */
+               command = (QOS_CMD_ENABLE_PORT |
+                          QOS_DROP_SCHED_ENABLE | QOS_DISABLE);
+               error = khwq_qos_write_cmd(info, command);
+               if (error)
+                       dev_err(kdev->dev, "failed to disable "
+                               "drop scheduler\n");
+
+               error = khwq_qos_tree_stop(info);
+               if (error) {
+                       dev_err(kdev->dev, "failed to close qos tree\n");
+                       return error;
+               }
+       
+               del_timer_sync(&info->timer);   
+       }
+
+       return 0;
+}
+
+static int khwq_qos_free_range(struct khwq_range_info *range)
+{
+       return 0;
+}
+
+static int khwq_qos_set_notify(struct khwq_range_info *range,
+                              struct hwqueue_instance *inst, bool enabled)
+{
+       return 0;
+}
+
+static int khwq_qos_init_range(struct khwq_range_info *range)
+{
+       struct khwq_pdsp_info *pdsp;
+       struct khwq_qos_info *info;
+       struct khwq_device *kdev;
+       u32 command, magic, version;
+       int error, i, idx, timer_config;
+
+       info = range->qos_info;
+       pdsp = info->pdsp;
+       kdev = info->kdev;
+
+       magic = __raw_readl(pdsp->command + QOS_MAGIC_OFFSET);
+       version = __raw_readl(pdsp->command + QOS_VERSION_OFFSET);
+
+       if ((magic >> 16) != QOS_MAGIC_DROPSCHED) {
+               dev_err(kdev->dev, "invalid qos magic word %x\n", magic);
+               return -EINVAL;
+       }
+
+       dev_info(kdev->dev, "qos version 0x%x, magic %s\n", version,
+                ((magic >> 16) == QOS_MAGIC_DROPSCHED) ? "valid" : "invalid");
+
+       for (i = 0 ; i < info->shadows[QOS_SCHED_PORT_CFG].count; i++) {
+               idx = khwq_qos_make_id(pdsp->id, i);
+               __khwq_qos_set_sched_overhead_bytes(kdev, idx,
+                                                   QOS_DEFAULT_OVERHEAD_BYTES,
+                                                   false);
+       }
+
+       for (i = 0 ; i < info->shadows[QOS_DROP_CFG_PROF].count; i++) {
+               idx = khwq_qos_make_id(pdsp->id, i);
+               __khwq_qos_set_drop_cfg_tail_thresh(kdev, idx, -1, false);
+       }
+
+       /* command for drop scheduler base */
+       command = (QOS_CMD_SET_QUEUE_BASE | QOS_QUEUE_BASE_DROP_SCHED |
+                  (info->drop_sched_queue_base << 16));
+       error = khwq_qos_write_cmd(info, command);
+       if (error) {
+               dev_err(kdev->dev, "failed to set drop sched base\n");
+               return error;
+       }
+
+       /* command for qos scheduler base */
+       command = (QOS_CMD_SET_QUEUE_BASE | QOS_QUEUE_BASE_QOS_SCHED |
+                  (info->sched_port_queue_base << 16));
+       error = khwq_qos_write_cmd(info, command);
+       if (error) {
+               dev_err(kdev->dev, "failed to set qos sched base\n");
+               return error;
+       }
+
+       /* calculate the timer config from the pdsp tick */
+       timer_config = (QMSS_CLK_RATE / info->ticks_per_sec);
+       timer_config /= 2;
+       command = (QOS_CMD_SET_TIMER_CONFIG | ((timer_config & 0xffff) << 16));
+       error = khwq_qos_write_cmd(info, command);
+       if (error) {
+               dev_err(kdev->dev, "failed to set timer\n");
+               return error;
+       }
+
+       error = khwq_qos_program_drop_sched(info);
+       if (error) {
+               dev_err(kdev->dev, "failed to initialize drop scheduler\n");
+               return error;
+       }
+
+       error = khwq_program_drop_policies(info);
+       if (error) {
+               dev_err(kdev->dev, "failed to initialize drop policies\n");
+               return error;
+       }
+
+       return 0;
+}
+
+struct khwq_range_ops khwq_qos_range_ops = {
+       .set_notify     = khwq_qos_set_notify,
+       .init_queue     = khwq_qos_init_queue,
+       .open_queue     = khwq_qos_open_queue,
+       .close_queue    = khwq_qos_close_queue,
+       .init_range     = khwq_qos_init_range,
+       .free_range     = khwq_qos_free_range,
+};
+
+int khwq_init_qos_range(struct khwq_device *kdev, struct device_node *node,
+                       struct khwq_range_info *range)
+{
+       struct khwq_pdsp_info *pdsp = NULL;
+       struct khwq_qos_info *info;
+       struct device_node *child;
+       struct device *dev = kdev->dev;
+       u32 temp[7];
+       int error;
+
+       info = devm_kzalloc(kdev->dev, sizeof(*info), GFP_KERNEL);
+       if (!info) {
+               dev_err(kdev->dev, "failed to alloc qos info\n");
+               return -ENOMEM;
+       }
+
+       info->kobj = kobject_create_and_add("qos",
+                                       kobject_get(&dev->kobj));
+       if (!info->kobj) {
+               dev_err(kdev->dev, "could not create sysfs entries for qos\n");
+               devm_kfree(kdev->dev, info);
+               return -ENODEV;
+       }
+
+       info->kobj_stats = kobject_create_and_add("statistics", info->kobj);
+       if (!info->kobj_stats) {
+               dev_err(kdev->dev, "could not create sysfs entries for "
+                       "qos statistics\n");
+               devm_kfree(kdev->dev, info);
+               return -ENODEV;
+       }
+
+       info->kobj_policies = kobject_create_and_add("drop-policies",
+                                                    info->kobj);
+       if (!info->kobj_policies) {
+               dev_err(kdev->dev, "could not create sysfs entries for "
+                       "qos drop policies\n");
+               devm_kfree(kdev->dev, info);
+               return -ENODEV;
+       }
+       
+       error = of_property_read_u32(node, "pdsp-id", &info->pdsp_id);
+       if (error < 0) {
+               dev_err(kdev->dev, "pdsp id must be specified\n");
+               devm_kfree(kdev->dev, info);
+               return error;
+       }
+
+       pdsp = khwq_find_pdsp(kdev, info->pdsp_id);
+       if (!pdsp) {
+               dev_err(kdev->dev, "pdsp id %d not found for range %s\n",
+                       info->pdsp_id, range->name);
+               devm_kfree(kdev->dev, info);
+               return -ENODEV;
+       }
+
+       if (pdsp->qos_info) {
+               dev_err(kdev->dev, "pdsp id %d is busy\n", info->pdsp_id);
+               devm_kfree(kdev->dev, info);
+               return -EBUSY;
+       }
+
+       info->pdsp = pdsp;
+       info->kdev = kdev;
+       INIT_LIST_HEAD(&info->drop_policies);
+       INIT_LIST_HEAD(&info->stats_classes);
+       spin_lock_init(&info->lock);
+       /* TODO: add refcount handlers */
+       ktree_init(&info->qos_tree, khwq_qos_get_tree_node,
+                  khwq_qos_put_tree_node);
+
+       error = of_property_read_u32_array(node, "qos-cfg", temp, 7);
+       if (error < 0) {
+               dev_err(kdev->dev, "failed to obtain qos scheduler config\n");
+               goto bail;
+       }
+       info->inputs_per_port     = temp[0];
+       info->drop_cfg.int_num    = temp[1];
+       info->drop_cfg.qos_ticks  = temp[2];
+       info->drop_cfg.drop_ticks = temp[3];
+       info->drop_cfg.seed[0]    = temp[4];
+       info->drop_cfg.seed[1]    = temp[5];
+       info->drop_cfg.seed[2]    = temp[6];
+
+       error = of_property_read_u32(node, "tick-per-sec",
+                                    &info->ticks_per_sec);
+       if (error < 0)
+               info->ticks_per_sec = 10000;
+
+       error = khwq_qos_init_shadow(info, QOS_SCHED_PORT_CFG,
+                                    "sched-port-configs", node, false);
+       if (error)
+               goto bail;
+
+       error = khwq_qos_init_shadow(info, QOS_DROP_CFG_PROF,
+                                    "drop-cfg-profiles", node, true);
+       if (error)
+               goto bail;
+
+       error = khwq_qos_init_shadow(info, QOS_DROP_OUT_PROF,
+                                    "drop-out-profiles", node, true);
+       if (error)
+               goto bail;
+
+       error = khwq_qos_init_shadow(info, QOS_DROP_QUEUE_CFG,
+                                    "drop-queue-configs", node, true);
+       if (error)
+               goto bail;
+
+       error = khwq_qos_init_stats(info, node);
+       if (error)
+               goto bail;
+
+       error = khwq_qos_init_base(info, range->queue_base, range->num_queues);
+       if (error)
+               goto bail;
+
+       pdsp->qos_info  = info;
+       range->qos_info = info;
+
+       child = of_parse_phandle(node, "drop-policies", 0);
+       if (!child)
+               child = of_find_child_by_name(node, "drop-policies");
+       if (!child) {
+               dev_err(kdev->dev, "could not find drop policies\n");
+               goto bail;
+       }
+
+       error = khwq_qos_get_drop_policies(kdev, info, child);
+       if (error)
+               goto bail;
+
+       child = of_parse_phandle(node, "qos-tree", 0);
+       if (!child)
+               child = of_find_child_by_name(node, "qos-tree");
+       if (!child) {
+               dev_err(kdev->dev, "could not find qos tree\n");
+               goto bail;
+       }
+
+       error = khwq_qos_tree_parse(info, child, NULL);
+       if (error)
+               goto bail;
+
+       error = khwq_qos_tree_init(info);
+       if (error)
+               goto bail;
+
+       range->ops = &khwq_qos_range_ops;
+
+       return 0;
+
+bail:
+       __khwq_free_qos_range(kdev, info);
+
+       range->qos_info = NULL;
+       range->ops      = NULL;
+       if (pdsp)
+               pdsp->qos_info  = NULL;
+
+       return error;
+}
diff --git a/drivers/hwqueue/keystone_qos.h b/drivers/hwqueue/keystone_qos.h
new file mode 100644 (file)
index 0000000..43d4d80
--- /dev/null
@@ -0,0 +1,374 @@
+/*
+ * Keystone Accumulator driver
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * Contact: Sandeep Paulraj <s-paulraj@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __KEYSTONE_QOS_FW_H
+#define __KEYSTONE_QOS_FW_H
+
+#define QOS_MAX_INPUTS                 128
+
+#define QMSS_CLK_RATE                  327680000
+
+#define QOS_RETCODE_SUCCESS            1
+
+#define QOS_COMMAND_TIMEOUT            20      /* msecs */
+#define QOS_COMMAND_DELAY              10      /* usecs */
+
+#define QOS_CMD_GET_QUEUE_BASE         0x80
+#define QOS_CMD_SET_QUEUE_BASE         0x81
+#define QOS_CMD_SET_TIMER_CONFIG       0x82
+#define QOS_CMD_ENABLE_PORT            0x90
+#define QOS_CMD_PORT_SHADOW            0x91
+#define QOS_CMD_STATS_REQUEST          0x92
+
+#define QOS_MAGIC_DROPSCHED            0x8020
+
+#define QOS_QUEUE_BASE_DROP_SCHED      BIT(8)
+#define QOS_QUEUE_BASE_QOS_SCHED       0
+
+#define QOS_ENABLE                     BIT(8)
+#define QOS_DISABLE                    0
+
+#define QOS_COPY_ACTIVE_TO_SHADOW      0
+#define        QOS_COPY_SHADOW_TO_ACTIVE       BIT(8)
+
+#define QOS_DROP_SCHED_CFG             4
+#define QOS_DROP_SCHED_ENABLE          BIT(24)
+
+#define QOS_DEFAULT_OVERHEAD_BYTES     24
+
+#define QOS_CREDITS_PACKET_SHIFT       20
+#define QOS_CREDITS_BYTE_SHIFT         11
+
+#define QOS_BYTE_NORMALIZATION_FACTOR  3000
+#define QOS_PACKET_NORMALIZATION_FACTOR        2
+
+#define        QOS_MAX_WEIGHT                  SZ_1M
+
+#define to_qnode(_n)   container_of(_n, struct khwq_qos_tree_node, node)
+
+enum khwq_qos_normalize {
+       DIVIDE,
+       MULTIPLY
+};
+
+enum khwq_qos_accounting_type {
+       QOS_PACKET_ACCT,
+       QOS_BYTE_ACCT
+};
+
+enum khwq_qos_drop_mode {
+       QOS_TAILDROP,
+       QOS_RED,
+};
+
+enum khwq_qos_tree_node_type {
+       QOS_NODE_PRIO,
+       QOS_NODE_WRR,
+       QOS_NODE_DEFAULT,
+};
+
+enum khwq_qos_shadow_type {
+       QOS_SCHED_PORT_CFG,
+       QOS_DROP_CFG_PROF,
+       QOS_DROP_QUEUE_CFG,
+       QOS_DROP_OUT_PROF,
+
+       QOS_MAX_SHADOW  /* last */
+};
+
+enum khwq_qos_control_type {
+       QOS_CONTROL_ENABLE,
+       QOS_CONTROL_GET_INPUT,
+};
+
+struct khwq_qos_shadow {
+       enum khwq_qos_shadow_type         type;
+       struct khwq_qos_info             *info;
+       const char                       *name;
+       void                             *data;
+       unsigned long                    *dirty;
+       unsigned long                    *avail;
+       unsigned long                    *running;
+       int                               start, count, size;
+       int             (*sync)(struct khwq_qos_shadow *shadow, int idx);
+       int             (*control)(struct khwq_qos_shadow *shadow,
+                                  enum khwq_qos_control_type ctrl,
+                                  int idx, u32 arg);
+};
+
+struct khwq_qos_stats {
+       enum khwq_qos_shadow_type         type;
+       const char                       *name;
+       void                             *data;
+       unsigned long                    *dirty;
+       unsigned long                    *avail;
+       unsigned long                    *running;
+       int                               start, count, size;
+       int             (*get_stats)(struct khwq_qos_stats *stats, int idx);
+};
+
+struct khwq_qos_drop_policy {
+       const char                      *name;
+       struct khwq_qos_info            *info;
+       bool                             usecount;
+       enum khwq_qos_accounting_type    acct;
+       enum khwq_qos_drop_mode          mode;
+       u32                              limit;
+       u32                              red_low;
+       u32                              red_high;
+       u32                              half_life;
+       u32                              max_drop_prob;
+       int                              drop_cfg_idx;
+       struct list_head                 list;
+       struct kobject                   kobj;
+};
+
+struct khwq_qos_info {
+       spinlock_t                       lock;
+       struct khwq_device              *kdev;
+       struct khwq_pdsp_info           *pdsp;
+       u32                              refcount;
+       struct khwq_qos_shadow           shadows[QOS_MAX_SHADOW];
+       struct khwq_qos_stats            stats;
+       struct ktree                     qos_tree;
+       struct list_head                 drop_policies;
+       struct list_head                 stats_classes;
+       struct khwq_qos_drop_policy     *default_drop_policy;
+
+       int      sched_port_queue_base,
+                drop_sched_queue_base,
+                inputs_per_port,
+                ticks_per_sec,
+                pdsp_id;
+
+       struct {
+               u8               int_num;
+               u8               qos_ticks;
+               u8               drop_ticks;
+               u32              seed[3];
+       } drop_cfg;
+
+       struct timer_list                timer;
+
+       struct kobject                   *kobj;
+       struct kobject                   *kobj_stats;
+       struct kobject                   *kobj_policies;
+};
+
+struct khwq_qos_stats_class {
+       const char                      *name;
+       struct khwq_qos_info            *info;
+       struct list_head                 list;
+       int                              stats_block_idx;
+       bool                             usecount;
+       struct kobject                   kobj;
+};
+
+struct khwq_qos_tree_node {
+       struct khwq_qos_tree_node       *parent;
+       struct khwq_qos_info            *info;
+       struct ktree_node                node;
+       enum khwq_qos_tree_node_type     type;
+       u32                              weight;
+       u32                              priority;
+       enum khwq_qos_accounting_type    acct;
+       const char                      *name;
+       int                              overhead_bytes;
+       int                              output_rate;
+       int                              burst_size;
+       int                              num_input_queues;
+       u32                              input_queues[QOS_MAX_INPUTS];
+       struct khwq_qos_drop_policy     *drop_policy;
+       struct khwq_qos_stats_class     *stats_class;
+
+       int      child_port_count;      /* children that need ports     */
+       int      child_count;           /* number of children           */
+       int      parent_input;          /* input number of parent       */
+       u32      child_weight_min;      /* min of child weights         */
+       u32      child_weight_max;      /* max of child weights         */
+       u32      child_weight_sum;      /* sum of child weights         */
+       bool     is_drop_input;         /* indicates that child's output
+                                          feeds to the drop sched      */
+       bool     has_sched_port;        /* does this port need a sched? */
+       int      output_queue;          /* from DT or calculated        */
+
+       /* allocated resources */
+       int      sched_port_idx;        /* inherited by default nodes   */
+       int      drop_out_idx;          /* inherited by default nodes   */
+       int      drop_queue_idx[QOS_MAX_INPUTS];
+
+       struct kobject                   kobj;
+};
+
+struct khwq_semaphore_regs {
+       u32             sem;
+};
+
+struct khwq_push_stats_regs {
+       u32             stats_info;
+       u32             bytes_forwarded;
+       u32             bytes_discarded;
+       u32             packets_forwarded;
+       u32             packets_discarded;
+};
+
+struct khwq_query_stats_regs {
+       u32             bytes_forwarded_lsw;
+       u32             bytes_forwarded_msw;
+       u32             bytes_discarded_lsw;
+       u32             bytes_discarded_msw;
+       u32             packets_forwarded;
+       u32             packets_discarded;
+};
+
+#define QOS_SHADOW_OFFSET      0x40
+#define QOS_PUSH_STATS_OFFSET  0x2e0
+#define QOS_STATS_OFFSET       0x300
+#define QOS_MAGIC_OFFSET       0x1ff8
+#define QOS_VERSION_OFFSET     0x1ffc
+
+#define khwq_qos_id_to_idx(idx)                ((idx) & 0xffff)
+#define khwq_qos_id_to_pdsp(idx)       ((idx) >> 16)
+#define khwq_qos_make_id(pdsp, idx)    ((pdsp) << 16 | (idx))
+#define khwq_qos_id_to_queue(info, idx)                \
+       ((info)->drop_sched_queue_base + khwq_qos_id_to_idx(idx))
+
+int khwq_qos_alloc(struct khwq_device *kdev, enum khwq_qos_shadow_type type);
+int khwq_qos_free(struct khwq_device *kdev, enum khwq_qos_shadow_type type,
+                   int idx);
+int khwq_qos_control(struct khwq_device *kdev, enum khwq_qos_shadow_type type,
+                      enum khwq_qos_control_type ctrl, int idx, u32 arg,
+                      bool internal);
+int khwq_qos_sync(struct khwq_device *kdev, enum khwq_qos_shadow_type type,
+                   int idx, bool internal);
+int khwq_qos_get(struct khwq_device *kdev, enum khwq_qos_shadow_type type,
+                  const char *name, int idx, int offset, int startbit,
+                  int nbits, u32 *value);
+int khwq_qos_set(struct khwq_device *kdev, enum khwq_qos_shadow_type type,
+                  const char *name, int idx, int offset, int startbit,
+                  int nbits, bool sync, u32 value, bool internal);
+
+#define DEFINE_SHADOW(_type, _field)                                          \
+static inline int khwq_qos_control_##_field(struct khwq_device *kdev,          \
+                                           enum khwq_qos_control_type ctrl,   \
+                                           int idx, u32 arg)                  \
+{                                                                             \
+       return khwq_qos_control(kdev, _type, ctrl, idx, arg, false);           \
+}                                                                             \
+static inline int khwq_qos_sync_##_field(struct khwq_device *kdev,            \
+                                        int idx)                              \
+{                                                                             \
+       return khwq_qos_sync(kdev, _type, idx, false);                 \
+}
+
+DEFINE_SHADOW(QOS_DROP_CFG_PROF,       drop_cfg);
+DEFINE_SHADOW(QOS_DROP_OUT_PROF,       drop_out);
+DEFINE_SHADOW(QOS_SCHED_PORT_CFG,      sched_port);
+DEFINE_SHADOW(QOS_DROP_QUEUE_CFG,      drop_queue);
+
+#define DEFINE_ALLOC(_type, _field)                                           \
+static inline int khwq_qos_alloc_##_field(struct khwq_device *kdev)           \
+{                                                                             \
+       return khwq_qos_alloc(kdev, _type);                                    \
+}                                                                             \
+static inline int khwq_qos_free_##_field(struct khwq_device *kdev,            \
+                                        int idx)                              \
+{                                                                             \
+       return khwq_qos_free(kdev, _type, idx);                        \
+}
+
+DEFINE_ALLOC(QOS_DROP_CFG_PROF,         drop_cfg);
+DEFINE_ALLOC(QOS_DROP_OUT_PROF,         drop_out);
+DEFINE_ALLOC(QOS_SCHED_PORT_CFG, sched_port);
+
+#define DEFINE_FIELD_U32(_type, _field, _offset, _startbit, _nbits)     \
+static inline int khwq_qos_get_##_field(struct khwq_device *kdev,       \
+                                       int idx, u32 *value)             \
+{                                                                       \
+       return khwq_qos_get(kdev, _type, #_field, idx, _offset,  \
+                             _startbit, _nbits, value);                 \
+}                                                                       \
+static inline int khwq_qos_set_##_field(struct khwq_device *kdev,       \
+                                       int idx, u32 value, bool sync)   \
+{                                                                       \
+       return khwq_qos_set(kdev, _type, #_field, idx, _offset,  \
+                             _startbit, _nbits, sync, value, false);    \
+}                                                                       \
+static inline int __khwq_qos_set_##_field(struct khwq_device *kdev,     \
+                                       int idx, u32 value, bool sync)   \
+{                                                                       \
+       return khwq_qos_set(kdev, _type, #_field, idx, _offset,  \
+                             _startbit, _nbits, sync, value, true);     \
+}
+
+#define DEFINE_FIELD_U32_ARRAY(_type, _field, _offset, _size)           \
+static inline int khwq_qos_get_##_field(struct khwq_device *kdev,       \
+                                       int idx, int elem, u32 *value)   \
+{                                                                       \
+       int ofs = _offset + elem * _size;                                \
+       return khwq_qos_get(kdev, _type, #_field, idx, ofs, 0, 32,       \
+                             value);                                    \
+}                                                                       \
+static inline int khwq_qos_set_##_field(struct khwq_device *kdev,       \
+                               int idx, int elem, u32 value, bool sync) \
+{                                                                       \
+       int ofs = _offset + elem * _size;                                \
+       return khwq_qos_set(kdev, _type, #_field, idx, ofs, 0, 32,       \
+                             sync, value, false);                       \
+}                                                                       \
+static inline int __khwq_qos_set_##_field(struct khwq_device *kdev,     \
+                               int idx, int elem, u32 value, bool sync) \
+{                                                                       \
+       int ofs = _offset + elem * _size;                                \
+       return khwq_qos_set(kdev, _type, #_field, idx, ofs, 0, 32,       \
+                             sync, value, true);                        \
+}
+
+DEFINE_FIELD_U32(QOS_DROP_CFG_PROF, drop_cfg_unit_flags,   0x00,  0,  8)
+DEFINE_FIELD_U32(QOS_DROP_CFG_PROF, drop_cfg_mode,         0x00,  8,  8)
+DEFINE_FIELD_U32(QOS_DROP_CFG_PROF, drop_cfg_time_const,   0x00, 16,  8)
+DEFINE_FIELD_U32(QOS_DROP_CFG_PROF, drop_cfg_tail_thresh,  0x04,  0, 32)
+DEFINE_FIELD_U32(QOS_DROP_CFG_PROF, drop_cfg_red_low,      0x08,  0, 32)
+DEFINE_FIELD_U32(QOS_DROP_CFG_PROF, drop_cfg_red_high,     0x0c,  0, 32)
+DEFINE_FIELD_U32(QOS_DROP_CFG_PROF, drop_cfg_thresh_recip, 0x10,  0, 32)
+
+DEFINE_FIELD_U32(QOS_DROP_OUT_PROF, drop_out_queue_number, 0x00,  0, 16)
+DEFINE_FIELD_U32(QOS_DROP_OUT_PROF, drop_out_red_prob,     0x00, 16, 16)
+DEFINE_FIELD_U32(QOS_DROP_OUT_PROF, drop_out_cfg_prof_idx, 0x04,  0,  8)
+DEFINE_FIELD_U32(QOS_DROP_OUT_PROF, drop_out_enable,       0x04,  8,  8)
+DEFINE_FIELD_U32(QOS_DROP_OUT_PROF, drop_out_avg_depth,    0x08,  0, 32)
+
+DEFINE_FIELD_U32(QOS_DROP_QUEUE_CFG, drop_q_out_prof_idx,  0x00,  0,  8)
+DEFINE_FIELD_U32(QOS_DROP_QUEUE_CFG, drop_q_stat_blk_idx,  0x00,  8,  8)
+DEFINE_FIELD_U32(QOS_DROP_QUEUE_CFG, drop_q_stat_irq,      0x00, 16,  8)
+DEFINE_FIELD_U32(QOS_DROP_QUEUE_CFG, drop_q_valid,         0x00, 24,  8)
+
+DEFINE_FIELD_U32(QOS_SCHED_PORT_CFG, sched_unit_flags,     0x00,  0,  8)
+DEFINE_FIELD_U32(QOS_SCHED_PORT_CFG, sched_group_count,    0x00,  8,  8)
+DEFINE_FIELD_U32(QOS_SCHED_PORT_CFG, sched_out_queue,      0x00, 16, 16)
+DEFINE_FIELD_U32(QOS_SCHED_PORT_CFG, sched_overhead_bytes, 0x04,  0,  8)
+DEFINE_FIELD_U32(QOS_SCHED_PORT_CFG, sched_out_throttle,   0x04, 16, 16)
+DEFINE_FIELD_U32(QOS_SCHED_PORT_CFG, sched_cir_credit,     0x08,  0, 32)
+DEFINE_FIELD_U32(QOS_SCHED_PORT_CFG, sched_cir_max,        0x0c,  0, 32)
+DEFINE_FIELD_U32(QOS_SCHED_PORT_CFG, sched_total_q_count,  0x24,  0,  8)
+DEFINE_FIELD_U32(QOS_SCHED_PORT_CFG, sched_sp_q_count,     0x24,  8,  8)
+DEFINE_FIELD_U32(QOS_SCHED_PORT_CFG, sched_wrr_q_count,    0x24, 16,  8)
+
+DEFINE_FIELD_U32_ARRAY(QOS_SCHED_PORT_CFG, sched_wrr_credit,  0x28, 0x8)
+DEFINE_FIELD_U32_ARRAY(QOS_SCHED_PORT_CFG, sched_cong_thresh, 0x2c, 0x8)
+
+#endif
+