summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: 99dbca1)
raw | patch | inline | side by side (parent: 99dbca1)
author | Suman Anna <s-anna@ti.com> | |
Fri, 28 Apr 2017 21:46:12 +0000 (16:46 -0500) | ||
committer | Suman Anna <s-anna@ti.com> | |
Sat, 3 Mar 2018 04:45:51 +0000 (22:45 -0600) |
The last trace is a way of preserving the remoteproc traces past
remoteproc recovery. This is achieved by creating a new traceY_last
debugfs entry during a crash for each trace entry, and copying the
trace buffer contents into the corresponding last trace entry. This
copied contents can then be read out using a debugfs entry. The
trace entries themselves are cleaned up after the copy and are
recreated during a recovery reload.
Eg:
cat <debugfs_root>/remoteproc/remoteprocX/traceY_last
should give the traces that were printed out just before the recovery
happened on remoteproc X for trace Y.
Signed-off-by: Subramaniam Chanderashekarapuram <subramaniam.ca@ti.com>
Signed-off-by: Suman Anna <s-anna@ti.com>
remoteproc recovery. This is achieved by creating a new traceY_last
debugfs entry during a crash for each trace entry, and copying the
trace buffer contents into the corresponding last trace entry. This
copied contents can then be read out using a debugfs entry. The
trace entries themselves are cleaned up after the copy and are
recreated during a recovery reload.
Eg:
cat <debugfs_root>/remoteproc/remoteprocX/traceY_last
should give the traces that were printed out just before the recovery
happened on remoteproc X for trace Y.
Signed-off-by: Subramaniam Chanderashekarapuram <subramaniam.ca@ti.com>
Signed-off-by: Suman Anna <s-anna@ti.com>
drivers/remoteproc/remoteproc_core.c | patch | blob | history | |
include/linux/remoteproc.h | patch | blob | history |
index fa145fe01339643017aaa6ea7846951f60f6ae21..6856261b576b234831d39a9bce02cf74c837e422 100644 (file)
#include <linux/crc32.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_ring.h>
+#include <linux/vmalloc.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <asm/byteorder.h>
kfree(rvdev);
}
+/**
+ * rproc_handle_last_trace() - setup a buffer to capture the trace snapshot
+ * before recovery
+ * @rproc: the remote processor
+ * @trace: the trace resource descriptor
+ * @count: the index of the trace under process
+ *
+ * The last trace is allocated and the contents of the trace buffer are
+ * copied during a recovery cleanup. Once, the contents get copied, the
+ * trace buffers are cleaned up for re-use.
+ *
+ * It might also happen that the remoteproc binary changes between the
+ * time that it was loaded and the time that it crashed. In this case,
+ * the trace descriptors might have changed too. The last traces are
+ * re-built as required in this case.
+ *
+ * Returns 0 on success, or an appropriate error code otherwise
+ */
+static int rproc_handle_last_trace(struct rproc *rproc,
+ struct rproc_mem_entry *trace, int count)
+{
+ struct rproc_mem_entry *trace_last, *tmp_trace;
+ struct device *dev = &rproc->dev;
+ char name[15];
+ int i = 0;
+ bool new_trace = false;
+
+ if (!rproc || !trace)
+ return -EINVAL;
+
+ /* we need a new trace in this case */
+ if (count > rproc->num_last_traces) {
+ new_trace = true;
+ /*
+ * make sure snprintf always null terminates, even if truncating
+ */
+ snprintf(name, sizeof(name), "trace%d_last", (count - 1));
+ trace_last = kzalloc(sizeof(*trace_last), GFP_KERNEL);
+ if (!trace_last) {
+ dev_err(dev, "kzalloc failed for trace%d_last\n",
+ count);
+ return -ENOMEM;
+ }
+ } else {
+ /* try to reuse buffers here */
+ list_for_each_entry_safe(trace_last, tmp_trace,
+ &rproc->last_traces, node) {
+ if (++i == count)
+ break;
+ }
+
+ /* if we can reuse the trace, copy buffer and exit */
+ if (trace_last->len == trace->len)
+ goto copy_and_exit;
+
+ /* can reuse the trace struct but not the buffer */
+ vfree(trace_last->va);
+ trace_last->va = NULL;
+ trace_last->len = 0;
+ }
+
+ trace_last->len = trace->len;
+ trace_last->va = vmalloc(sizeof(u32) * trace_last->len);
+ if (!trace_last->va) {
+ dev_err(dev, "vmalloc failed for trace%d_last\n", count);
+ if (!new_trace) {
+ list_del(&trace_last->node);
+ rproc->num_last_traces--;
+ }
+ kfree(trace_last);
+ return -ENOMEM;
+ }
+
+ /* create the debugfs entry */
+ if (new_trace) {
+ trace_last->priv = rproc_create_trace_file(name, rproc,
+ trace_last);
+ if (!trace_last->priv) {
+ dev_err(dev, "trace%d_last create debugfs failed\n",
+ count);
+ vfree(trace_last->va);
+ kfree(trace_last);
+ return -EINVAL;
+ }
+
+ /* add it to the trace list */
+ list_add_tail(&trace_last->node, &rproc->last_traces);
+ rproc->num_last_traces++;
+ }
+
+copy_and_exit:
+ /* copy the trace to last trace */
+ memcpy(trace_last->va, trace->va, trace->len);
+
+ return 0;
+}
+
/**
* rproc_handle_trace() - handle a shared trace buffer resource
* @rproc: the remote processor
subdev->remove(subdev);
}
+/**
+ * rproc_free_last_trace() - helper function to cleanup a last trace entry
+ * @trace: the last trace element to be cleaned up
+ */
+static void rproc_free_last_trace(struct rproc_mem_entry *trace)
+{
+ rproc_remove_trace_file(trace->priv);
+ list_del(&trace->node);
+ vfree(trace->va);
+ kfree(trace);
+}
+
/**
* rproc_resource_cleanup() - clean up and free all acquired resources
* @rproc: rproc handle
struct rproc_mem_entry *entry, *tmp;
struct rproc_vdev *rvdev, *rvtmp;
struct device *dev = &rproc->dev;
+ int count = 0, i = rproc->num_traces;
/* clean up debugfs trace entries */
list_for_each_entry_safe(entry, tmp, &rproc->traces, node) {
+ /* handle last trace here */
+ if (rproc->state == RPROC_CRASHED)
+ rproc_handle_last_trace(rproc, entry, ++count);
+
rproc_remove_trace_file(entry->priv);
- rproc->num_traces--;
list_del(&entry->node);
kfree(entry);
}
+ rproc->num_traces = 0;
+
+ /*
+ * clean up debugfs last trace entries. This either deletes all last
+ * trace entries during cleanup or just the remaining entries, if any,
+ * in case of a crash.
+ */
+ list_for_each_entry_safe(entry, tmp, &rproc->last_traces, node) {
+ /* skip the valid traces */
+ if ((i--) && rproc->state == RPROC_CRASHED)
+ continue;
+ rproc_free_last_trace(entry);
+ rproc->num_last_traces--;
+ }
/* clean up iommu mapping entries */
list_for_each_entry_safe(entry, tmp, &rproc->mappings, node) {
INIT_LIST_HEAD(&rproc->carveouts);
INIT_LIST_HEAD(&rproc->mappings);
INIT_LIST_HEAD(&rproc->traces);
+ INIT_LIST_HEAD(&rproc->last_traces);
INIT_LIST_HEAD(&rproc->rvdevs);
INIT_LIST_HEAD(&rproc->subdevs);
*/
int rproc_del(struct rproc *rproc)
{
+ struct rproc_mem_entry *entry, *tmp;
+
if (!rproc)
return -EINVAL;
rproc->state = RPROC_DELETED;
mutex_unlock(&rproc->lock);
+ /* clean up debugfs last trace entries */
+ list_for_each_entry_safe(entry, tmp, &rproc->last_traces, node) {
+ rproc_free_last_trace(entry);
+ rproc->num_last_traces--;
+ }
+
rproc_delete_debug_dir(rproc);
/* the rproc is downref'ed as soon as it's removed from the klist */
index 89c2ba15c38f32a16b84159914a93189654d58e6..f67b71e1bfdc4d02b13aa5054d72c0ec7937662b 100644 (file)
* @dbg_dir: debugfs directory of this rproc device
* @traces: list of trace buffers
* @num_traces: number of trace buffers
+ * @last_traces: list of last trace buffers
+ * @num_last_traces: number of last trace buffers
* @carveouts: list of physically contiguous memory allocations
* @mappings: list of iommu mappings we initiated, needed on shutdown
* @bootaddr: address of first instruction to boot rproc with (optional)
struct dentry *dbg_dir;
struct list_head traces;
int num_traces;
+ struct list_head last_traces;
+ int num_last_traces;
struct list_head carveouts;
struct list_head mappings;
u32 bootaddr;