1 /*
2 * Remote Processor Framework
3 *
4 * Copyright (C) 2011 Texas Instruments, Inc.
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * Ohad Ben-Cohen <ohad@wizery.com>
8 * Brian Swetland <swetland@google.com>
9 * Mark Grosen <mgrosen@ti.com>
10 * Fernando Guzman Lugo <fernando.lugo@ti.com>
11 * Suman Anna <s-anna@ti.com>
12 * Robert Tivy <rtivy@ti.com>
13 * Armando Uribe De Leon <x0095078@ti.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * version 2 as published by the Free Software Foundation.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 */
25 #define pr_fmt(fmt) "%s: " fmt, __func__
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/device.h>
30 #include <linux/slab.h>
31 #include <linux/mutex.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/firmware.h>
34 #include <linux/string.h>
35 #include <linux/debugfs.h>
36 #include <linux/devcoredump.h>
37 #include <linux/remoteproc.h>
38 #include <linux/iommu.h>
39 #include <linux/idr.h>
40 #include <linux/elf.h>
41 #include <linux/crc32.h>
42 #include <linux/virtio_ids.h>
43 #include <linux/virtio_ring.h>
44 #include <linux/vmalloc.h>
45 #include <linux/of.h>
46 #include <linux/platform_device.h>
47 #include <asm/byteorder.h>
49 #include "remoteproc_internal.h"
51 static DEFINE_MUTEX(rproc_list_mutex);
52 static LIST_HEAD(rproc_list);
54 typedef int (*rproc_handle_resources_t)(struct rproc *rproc,
55 struct resource_table *table, int len);
56 typedef int (*rproc_handle_resource_t)(struct rproc *rproc,
57 void *, int offset, int avail);
59 /* Unique indices for remoteproc devices */
60 static DEFINE_IDA(rproc_dev_index);
62 static const char * const rproc_crash_names[] = {
63 [RPROC_MMUFAULT] = "mmufault",
64 [RPROC_WATCHDOG] = "watchdog",
65 [RPROC_FATAL_ERROR] = "fatal error",
66 };
68 /* translate rproc_crash_type to string */
69 static const char *rproc_crash_to_string(enum rproc_crash_type type)
70 {
71 if (type < ARRAY_SIZE(rproc_crash_names))
72 return rproc_crash_names[type];
73 return "unknown";
74 }
76 /*
77 * This is the IOMMU fault handler we register with the IOMMU API
78 * (when relevant; not all remote processors access memory through
79 * an IOMMU).
80 *
81 * IOMMU core will invoke this handler whenever the remote processor
82 * will try to access an unmapped device address.
83 */
84 static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev,
85 unsigned long iova, int flags, void *token)
86 {
87 struct rproc *rproc = token;
89 dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags);
91 rproc_report_crash(rproc, RPROC_MMUFAULT);
93 /*
94 * Let the iommu core know we're not really handling this fault;
95 * we just used it as a recovery trigger.
96 */
97 return -ENOSYS;
98 }
100 static int rproc_enable_iommu(struct rproc *rproc)
101 {
102 struct iommu_domain *domain;
103 struct device *dev = rproc->dev.parent;
104 int ret;
106 if (!rproc->has_iommu) {
107 dev_dbg(dev, "iommu not present\n");
108 return 0;
109 }
111 domain = iommu_domain_alloc(dev->bus);
112 if (!domain) {
113 dev_err(dev, "can't alloc iommu domain\n");
114 return -ENOMEM;
115 }
117 iommu_set_fault_handler(domain, rproc_iommu_fault, rproc);
119 ret = iommu_attach_device(domain, dev);
120 if (ret) {
121 dev_err(dev, "can't attach iommu device: %d\n", ret);
122 goto free_domain;
123 }
125 rproc->domain = domain;
127 return 0;
129 free_domain:
130 iommu_domain_free(domain);
131 return ret;
132 }
134 static void rproc_disable_iommu(struct rproc *rproc)
135 {
136 struct iommu_domain *domain = rproc->domain;
137 struct device *dev = rproc->dev.parent;
139 if (!domain)
140 return;
142 iommu_detach_device(domain, dev);
143 iommu_domain_free(domain);
144 }
146 /**
147 * rproc_da_to_va() - lookup the kernel virtual address for a remoteproc address
148 * @rproc: handle of a remote processor
149 * @da: remoteproc device address to translate
150 * @len: length of the memory region @da is pointing to
151 * @flags: flags to pass onto platform implementations for aiding translations
152 *
153 * Some remote processors will ask us to allocate them physically contiguous
154 * memory regions (which we call "carveouts"), and map them to specific
155 * device addresses (which are hardcoded in the firmware). They may also have
156 * dedicated memory regions internal to the processors, and use them either
157 * exclusively or alongside carveouts.
158 *
159 * They may then ask us to copy objects into specific device addresses (e.g.
160 * code/data sections) or expose us certain symbols in other device address
161 * (e.g. their trace buffer).
162 *
163 * This function is a helper function with which we can go over the allocated
164 * carveouts and translate specific device addresses to kernel virtual addresses
165 * so we can access the referenced memory. This function also allows to perform
166 * translations on the internal remoteproc memory regions through a platform
167 * implementation specific da_to_va ops, if present. The @flags field is passed
168 * onto these ops to aid the translation within the ops implementation. The
169 * @flags field is to be passed as a combination of the RPROC_FLAGS_xxx type
170 * and the pertinent flags value for that type.
171 *
172 * The function returns a valid kernel address on success or NULL on failure.
173 *
174 * Note: phys_to_virt(iommu_iova_to_phys(rproc->domain, da)) will work too,
175 * but only on kernel direct mapped RAM memory. Instead, we're just using
176 * here the output of the DMA API for the carveouts, which should be more
177 * correct.
178 */
179 void *rproc_da_to_va(struct rproc *rproc, u64 da, int len, u32 flags)
180 {
181 struct rproc_mem_entry *carveout;
182 void *ptr = NULL;
184 if (rproc->ops->da_to_va) {
185 ptr = rproc->ops->da_to_va(rproc, da, len, flags);
186 if (ptr)
187 goto out;
188 }
190 list_for_each_entry(carveout, &rproc->carveouts, node) {
191 int offset = da - carveout->da;
193 /* try next carveout if da is too small */
194 if (offset < 0)
195 continue;
197 /* try next carveout if da is too large */
198 if (offset + len > carveout->len)
199 continue;
201 ptr = carveout->va + offset;
203 break;
204 }
206 out:
207 return ptr;
208 }
209 EXPORT_SYMBOL(rproc_da_to_va);
211 /**
212 * rproc_pa_to_da() - lookup the rproc device address for a physical address
213 * @rproc: handle of a remote processor
214 * @pa: physical address of the buffer to translate
215 * @da: device address to return
216 *
217 * Communication clients of remote processors usually would need a means to
218 * convert a host buffer pointer to an equivalent device virtual address pointer
219 * that the code running on the remote processor can operate on. These buffer
220 * pointers can either be from the physically contiguous memory regions (or
221 * "carveouts") or can be some memory-mapped Device IO memory. This function
222 * provides a means to translate a given physical address to its associated
223 * device address.
224 *
225 * The function looks through both the carveouts and the device memory mappings
226 * since both of them are stored in separate lists.
227 *
228 * Returns 0 on success, or an appropriate error code otherwise. The translated
229 * device address is returned through the appropriate function argument.
230 */
231 int rproc_pa_to_da(struct rproc *rproc, phys_addr_t pa, u64 *da)
232 {
233 int ret = -EINVAL;
234 struct rproc_mem_entry *maps = NULL;
236 if (!rproc || !da)
237 return -EINVAL;
239 if (mutex_lock_interruptible(&rproc->lock))
240 return -EINTR;
242 if (rproc->state == RPROC_RUNNING || rproc->state == RPROC_SUSPENDED) {
243 /* Look in the mappings first */
244 list_for_each_entry(maps, &rproc->mappings, node) {
245 if (pa >= maps->dma && pa < (maps->dma + maps->len)) {
246 *da = maps->da + (pa - maps->dma);
247 ret = 0;
248 goto exit;
249 }
250 }
251 /* If not, check in the carveouts */
252 list_for_each_entry(maps, &rproc->carveouts, node) {
253 if (pa >= maps->dma && pa < (maps->dma + maps->len)) {
254 *da = maps->da + (pa - maps->dma);
255 ret = 0;
256 break;
257 }
258 }
259 }
260 exit:
261 mutex_unlock(&rproc->lock);
262 return ret;
263 }
264 EXPORT_SYMBOL(rproc_pa_to_da);
266 int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
267 {
268 struct rproc *rproc = rvdev->rproc;
269 struct device *dev = &rproc->dev;
270 struct rproc_vring *rvring = &rvdev->vring[i];
271 struct fw_rsc_vdev *rsc;
272 dma_addr_t dma;
273 void *va;
274 int ret, size, notifyid;
276 /* actual size of vring (in bytes) */
277 size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
279 /*
280 * Allocate non-cacheable memory for the vring. In the future
281 * this call will also configure the IOMMU for us
282 */
283 va = dma_alloc_coherent(dev->parent, size, &dma, GFP_KERNEL);
284 if (!va) {
285 dev_err(dev->parent, "dma_alloc_coherent failed\n");
286 return -EINVAL;
287 }
289 /*
290 * Assign an rproc-wide unique index for this vring
291 * TODO: assign a notifyid for rvdev updates as well
292 * TODO: support predefined notifyids (via resource table)
293 */
294 ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL);
295 if (ret < 0) {
296 dev_err(dev, "idr_alloc failed: %d\n", ret);
297 dma_free_coherent(dev->parent, size, va, dma);
298 return ret;
299 }
300 notifyid = ret;
302 /* Potentially bump max_notifyid */
303 if (notifyid > rproc->max_notifyid)
304 rproc->max_notifyid = notifyid;
306 dev_dbg(dev, "vring%d: va %pK dma %pad size 0x%x idr %d\n",
307 i, va, &dma, size, notifyid);
309 rvring->va = va;
310 rvring->dma = dma;
311 rvring->notifyid = notifyid;
313 /*
314 * Let the rproc know the notifyid and da of this vring.
315 * Not all platforms use dma_alloc_coherent to automatically
316 * set up the iommu. In this case the device address (da) will
317 * hold the physical address and not the device address.
318 */
319 rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
320 rsc->vring[i].da = dma;
321 rsc->vring[i].notifyid = notifyid;
322 return 0;
323 }
325 static int
326 rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
327 {
328 struct rproc *rproc = rvdev->rproc;
329 struct device *dev = &rproc->dev;
330 struct fw_rsc_vdev_vring *vring = &rsc->vring[i];
331 struct rproc_vring *rvring = &rvdev->vring[i];
333 dev_dbg(dev, "vdev rsc: vring%d: da 0x%x, qsz %d, align %d\n",
334 i, vring->da, vring->num, vring->align);
336 /* verify queue size and vring alignment are sane */
337 if (!vring->num || !vring->align) {
338 dev_err(dev, "invalid qsz (%d) or alignment (%d)\n",
339 vring->num, vring->align);
340 return -EINVAL;
341 }
343 rvring->len = vring->num;
344 rvring->align = vring->align;
345 rvring->rvdev = rvdev;
347 return 0;
348 }
350 void rproc_free_vring(struct rproc_vring *rvring)
351 {
352 int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
353 struct rproc *rproc = rvring->rvdev->rproc;
354 int idx = rvring->rvdev->vring - rvring;
355 struct fw_rsc_vdev *rsc;
357 dma_free_coherent(rproc->dev.parent, size, rvring->va, rvring->dma);
358 idr_remove(&rproc->notifyids, rvring->notifyid);
360 /* reset resource entry info */
361 rsc = (void *)rproc->table_ptr + rvring->rvdev->rsc_offset;
362 rsc->vring[idx].da = 0;
363 rsc->vring[idx].notifyid = -1;
364 }
366 static int rproc_vdev_do_start(struct rproc_subdev *subdev)
367 {
368 struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
370 return rproc_add_virtio_dev(rvdev, rvdev->id);
371 }
373 static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed)
374 {
375 struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
377 rproc_remove_virtio_dev(rvdev);
378 }
380 /**
381 * rproc_handle_vdev() - handle a vdev fw resource
382 * @rproc: the remote processor
383 * @rsc: the vring resource descriptor
384 * @avail: size of available data (for sanity checking the image)
385 *
386 * This resource entry requests the host to statically register a virtio
387 * device (vdev), and setup everything needed to support it. It contains
388 * everything needed to make it possible: the virtio device id, virtio
389 * device features, vrings information, virtio config space, etc...
390 *
391 * Before registering the vdev, the vrings are allocated from non-cacheable
392 * physically contiguous memory. Currently we only support two vrings per
393 * remote processor (temporary limitation). We might also want to consider
394 * doing the vring allocation only later when ->find_vqs() is invoked, and
395 * then release them upon ->del_vqs().
396 *
397 * Note: @da is currently not really handled correctly: we dynamically
398 * allocate it using the DMA API, ignoring requested hard coded addresses,
399 * and we don't take care of any required IOMMU programming. This is all
400 * going to be taken care of when the generic iommu-based DMA API will be
401 * merged. Meanwhile, statically-addressed iommu-based firmware images should
402 * use RSC_DEVMEM resource entries to map their required @da to the physical
403 * address of their base CMA region (ouch, hacky!).
404 *
405 * Returns 0 on success, or an appropriate error code otherwise
406 */
407 static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
408 int offset, int avail)
409 {
410 struct device *dev = &rproc->dev;
411 struct rproc_vdev *rvdev;
412 int i, ret;
414 /* make sure resource isn't truncated */
415 if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring)
416 + rsc->config_len > avail) {
417 dev_err(dev, "vdev rsc is truncated\n");
418 return -EINVAL;
419 }
421 /* make sure reserved bytes are zeroes */
422 if (rsc->reserved[0] || rsc->reserved[1]) {
423 dev_err(dev, "vdev rsc has non zero reserved bytes\n");
424 return -EINVAL;
425 }
427 dev_dbg(dev, "vdev rsc: id %d, dfeatures 0x%x, cfg len %d, %d vrings\n",
428 rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings);
430 /* we currently support only two vrings per rvdev */
431 if (rsc->num_of_vrings > ARRAY_SIZE(rvdev->vring)) {
432 dev_err(dev, "too many vrings: %d\n", rsc->num_of_vrings);
433 return -EINVAL;
434 }
436 rvdev = kzalloc(sizeof(*rvdev), GFP_KERNEL);
437 if (!rvdev)
438 return -ENOMEM;
440 kref_init(&rvdev->refcount);
442 rvdev->id = rsc->id;
443 rvdev->rproc = rproc;
445 /* parse the vrings */
446 for (i = 0; i < rsc->num_of_vrings; i++) {
447 ret = rproc_parse_vring(rvdev, rsc, i);
448 if (ret)
449 goto free_rvdev;
450 }
452 /* remember the resource offset*/
453 rvdev->rsc_offset = offset;
455 /* allocate the vring resources */
456 for (i = 0; i < rsc->num_of_vrings; i++) {
457 ret = rproc_alloc_vring(rvdev, i);
458 if (ret)
459 goto unwind_vring_allocations;
460 }
462 list_add_tail(&rvdev->node, &rproc->rvdevs);
464 rvdev->subdev.start = rproc_vdev_do_start;
465 rvdev->subdev.stop = rproc_vdev_do_stop;
467 rproc_add_subdev(rproc, &rvdev->subdev);
469 return 0;
471 unwind_vring_allocations:
472 for (i--; i >= 0; i--)
473 rproc_free_vring(&rvdev->vring[i]);
474 free_rvdev:
475 kfree(rvdev);
476 return ret;
477 }
479 void rproc_vdev_release(struct kref *ref)
480 {
481 struct rproc_vdev *rvdev = container_of(ref, struct rproc_vdev, refcount);
482 struct rproc_vring *rvring;
483 struct rproc *rproc = rvdev->rproc;
484 int id;
486 for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) {
487 rvring = &rvdev->vring[id];
488 if (!rvring->va)
489 continue;
491 rproc_free_vring(rvring);
492 }
494 rproc_remove_subdev(rproc, &rvdev->subdev);
495 list_del(&rvdev->node);
496 kfree(rvdev);
497 }
499 /**
500 * rproc_handle_last_trace() - setup a buffer to capture the trace snapshot
501 * before recovery
502 * @rproc: the remote processor
503 * @trace: the trace resource descriptor
504 * @count: the index of the trace under process
505 *
506 * The last trace is allocated and the contents of the trace buffer are
507 * copied during a recovery cleanup. Once, the contents get copied, the
508 * trace buffers are cleaned up for re-use.
509 *
510 * It might also happen that the remoteproc binary changes between the
511 * time that it was loaded and the time that it crashed. In this case,
512 * the trace descriptors might have changed too. The last traces are
513 * re-built as required in this case.
514 *
515 * Returns 0 on success, or an appropriate error code otherwise
516 */
517 static int rproc_handle_last_trace(struct rproc *rproc,
518 struct rproc_mem_entry *trace, int count)
519 {
520 struct rproc_mem_entry *trace_last, *tmp_trace;
521 struct device *dev = &rproc->dev;
522 char name[15];
523 int i = 0;
524 bool new_trace = false;
526 if (!rproc || !trace)
527 return -EINVAL;
529 /* we need a new trace in this case */
530 if (count > rproc->num_last_traces) {
531 new_trace = true;
532 /*
533 * make sure snprintf always null terminates, even if truncating
534 */
535 snprintf(name, sizeof(name), "trace%d_last", (count - 1));
536 trace_last = kzalloc(sizeof(*trace_last), GFP_KERNEL);
537 if (!trace_last) {
538 dev_err(dev, "kzalloc failed for trace%d_last\n",
539 count);
540 return -ENOMEM;
541 }
542 } else {
543 /* try to reuse buffers here */
544 list_for_each_entry_safe(trace_last, tmp_trace,
545 &rproc->last_traces, node) {
546 if (++i == count)
547 break;
548 }
550 /* if we can reuse the trace, copy buffer and exit */
551 if (trace_last->len == trace->len)
552 goto copy_and_exit;
554 /* can reuse the trace struct but not the buffer */
555 vfree(trace_last->va);
556 trace_last->va = NULL;
557 trace_last->len = 0;
558 }
560 trace_last->len = trace->len;
561 trace_last->va = vmalloc(sizeof(u32) * trace_last->len);
562 if (!trace_last->va) {
563 dev_err(dev, "vmalloc failed for trace%d_last\n", count);
564 if (!new_trace) {
565 list_del(&trace_last->node);
566 rproc->num_last_traces--;
567 }
568 kfree(trace_last);
569 return -ENOMEM;
570 }
572 /* create the debugfs entry */
573 if (new_trace) {
574 trace_last->priv = rproc_create_trace_file(name, rproc,
575 trace_last);
576 if (!trace_last->priv) {
577 dev_err(dev, "trace%d_last create debugfs failed\n",
578 count);
579 vfree(trace_last->va);
580 kfree(trace_last);
581 return -EINVAL;
582 }
584 /* add it to the trace list */
585 list_add_tail(&trace_last->node, &rproc->last_traces);
586 rproc->num_last_traces++;
587 }
589 copy_and_exit:
590 /* copy the trace to last trace */
591 memcpy(trace_last->va, trace->va, trace->len);
593 return 0;
594 }
596 /**
597 * rproc_handle_trace() - handle a shared trace buffer resource
598 * @rproc: the remote processor
599 * @rsc: the trace resource descriptor
600 * @avail: size of available data (for sanity checking the image)
601 *
602 * In case the remote processor dumps trace logs into memory,
603 * export it via debugfs.
604 *
605 * Currently, the 'da' member of @rsc should contain the device address
606 * where the remote processor is dumping the traces. Later we could also
607 * support dynamically allocating this address using the generic
608 * DMA API (but currently there isn't a use case for that).
609 *
610 * Returns 0 on success, or an appropriate error code otherwise
611 */
612 static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
613 int offset, int avail)
614 {
615 struct rproc_mem_entry *trace;
616 struct device *dev = &rproc->dev;
617 void *ptr;
618 char name[15];
620 if (sizeof(*rsc) > avail) {
621 dev_err(dev, "trace rsc is truncated\n");
622 return -EINVAL;
623 }
625 /* make sure reserved bytes are zeroes */
626 if (rsc->reserved) {
627 dev_err(dev, "trace rsc has non zero reserved bytes\n");
628 return -EINVAL;
629 }
631 /* what's the kernel address of this resource ? */
632 ptr = rproc_da_to_va(rproc, rsc->da, rsc->len, RPROC_FLAGS_NONE);
633 if (!ptr) {
634 dev_err(dev, "erroneous trace resource entry\n");
635 return -EINVAL;
636 }
638 trace = kzalloc(sizeof(*trace), GFP_KERNEL);
639 if (!trace)
640 return -ENOMEM;
642 /* set the trace buffer dma properties */
643 trace->len = rsc->len;
644 trace->va = ptr;
646 /* make sure snprintf always null terminates, even if truncating */
647 snprintf(name, sizeof(name), "trace%d", rproc->num_traces);
649 /* create the debugfs entry */
650 trace->priv = rproc_create_trace_file(name, rproc, trace);
651 if (!trace->priv) {
652 trace->va = NULL;
653 kfree(trace);
654 return -EINVAL;
655 }
657 list_add_tail(&trace->node, &rproc->traces);
659 rproc->num_traces++;
661 dev_dbg(dev, "%s added: va %pK, da 0x%x, len 0x%x\n",
662 name, ptr, rsc->da, rsc->len);
664 return 0;
665 }
667 /**
668 * rproc_handle_devmem() - handle devmem resource entry
669 * @rproc: remote processor handle
670 * @rsc: the devmem resource entry
671 * @avail: size of available data (for sanity checking the image)
672 *
673 * Remote processors commonly need to access certain on-chip peripherals.
674 *
675 * Some of these remote processors access memory via an iommu device,
676 * and might require us to configure their iommu before they can access
677 * the on-chip peripherals they need.
678 *
679 * This resource entry is a request to map such a peripheral device.
680 *
681 * These devmem entries will contain the physical address of the device in
682 * the 'pa' member. If a specific device address is expected, then 'da' will
683 * contain it (currently this is the only use case supported). 'len' will
684 * contain the size of the physical region we need to map.
685 *
686 * Currently we just "trust" those devmem entries to contain valid physical
687 * addresses, but this is going to change: we want the implementations to
688 * tell us ranges of physical addresses the firmware is allowed to request,
689 * and not allow firmwares to request access to physical addresses that
690 * are outside those ranges.
691 */
692 static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
693 int offset, int avail)
694 {
695 struct rproc_mem_entry *mapping;
696 struct device *dev = &rproc->dev;
697 int ret;
699 /* no point in handling this resource without a valid iommu domain */
700 if (!rproc->domain)
701 return 0;
703 if (sizeof(*rsc) > avail) {
704 dev_err(dev, "devmem rsc is truncated\n");
705 return -EINVAL;
706 }
708 /* make sure reserved bytes are zeroes */
709 if (rsc->reserved) {
710 dev_err(dev, "devmem rsc has non zero reserved bytes\n");
711 return -EINVAL;
712 }
714 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
715 if (!mapping)
716 return -ENOMEM;
718 if (!rproc->late_attach) {
719 ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len,
720 rsc->flags);
721 if (ret) {
722 dev_err(dev, "failed to map devmem: %d\n", ret);
723 goto out;
724 }
725 }
727 /*
728 * We'll need this info later when we'll want to unmap everything
729 * (e.g. on shutdown).
730 *
731 * We can't trust the remote processor not to change the resource
732 * table, so we must maintain this info independently.
733 */
734 mapping->dma = rsc->pa;
735 mapping->da = rsc->da;
736 mapping->len = rsc->len;
737 list_add_tail(&mapping->node, &rproc->mappings);
739 if (!rproc->late_attach)
740 dev_dbg(dev, "mapped devmem pa 0x%x, da 0x%x, len 0x%x\n",
741 rsc->pa, rsc->da, rsc->len);
742 else
743 dev_dbg(dev, "late-attach: processed devmem pa 0x%x, da 0x%x, len 0x%x\n",
744 rsc->pa, rsc->da, rsc->len);
746 return 0;
748 out:
749 kfree(mapping);
750 return ret;
751 }
753 /**
754 * rproc_handle_carveout() - handle phys contig memory allocation requests
755 * @rproc: rproc handle
756 * @rsc: the resource entry
757 * @avail: size of available data (for image validation)
758 *
759 * This function will handle firmware requests for allocation of physically
760 * contiguous memory regions.
761 *
762 * These request entries should come first in the firmware's resource table,
763 * as other firmware entries might request placing other data objects inside
764 * these memory regions (e.g. data/code segments, trace resource entries, ...).
765 *
766 * Allocating memory this way helps utilizing the reserved physical memory
767 * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries
768 * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB
769 * pressure is important; it may have a substantial impact on performance.
770 */
771 static int rproc_handle_carveout(struct rproc *rproc,
772 struct fw_rsc_carveout *rsc,
773 int offset, int avail)
774 {
775 struct rproc_mem_entry *carveout, *mapping;
776 struct device *dev = &rproc->dev;
777 dma_addr_t dma;
778 void *va;
779 int ret;
781 if (sizeof(*rsc) > avail) {
782 dev_err(dev, "carveout rsc is truncated\n");
783 return -EINVAL;
784 }
786 /* make sure reserved bytes are zeroes */
787 if (rsc->reserved) {
788 dev_err(dev, "carveout rsc has non zero reserved bytes\n");
789 return -EINVAL;
790 }
792 dev_dbg(dev, "carveout rsc: name: %s, da 0x%x, pa 0x%x, len 0x%x, flags 0x%x\n",
793 rsc->name, rsc->da, rsc->pa, rsc->len, rsc->flags);
795 carveout = kzalloc(sizeof(*carveout), GFP_KERNEL);
796 if (!carveout)
797 return -ENOMEM;
799 if (rproc->late_attach) {
800 va = dma_malloc_coherent(dev->parent, rsc->len, &dma,
801 GFP_KERNEL);
802 } else {
803 va = dma_alloc_coherent(dev->parent, rsc->len, &dma,
804 GFP_KERNEL);
805 }
806 if (!va) {
807 dev_err(dev->parent,
808 "failed to allocate dma memory: len 0x%x\n", rsc->len);
809 ret = -ENOMEM;
810 goto free_carv;
811 }
813 dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%x\n",
814 va, &dma, rsc->len);
816 /*
817 * Ok, this is non-standard.
818 *
819 * Sometimes we can't rely on the generic iommu-based DMA API
820 * to dynamically allocate the device address and then set the IOMMU
821 * tables accordingly, because some remote processors might
822 * _require_ us to use hard coded device addresses that their
823 * firmware was compiled with.
824 *
825 * In this case, we must use the IOMMU API directly and map
826 * the memory to the device address as expected by the remote
827 * processor.
828 *
829 * Obviously such remote processor devices should not be configured
830 * to use the iommu-based DMA API: we expect 'dma' to contain the
831 * physical address in this case.
832 */
833 if (rproc->domain) {
834 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
835 if (!mapping) {
836 ret = -ENOMEM;
837 goto dma_free;
838 }
840 if (!rproc->late_attach) {
841 ret = iommu_map(rproc->domain, rsc->da, dma, rsc->len,
842 rsc->flags);
843 if (ret) {
844 dev_err(dev, "iommu_map failed: %d\n", ret);
845 goto free_mapping;
846 }
847 }
849 /*
850 * We'll need this info later when we'll want to unmap
851 * everything (e.g. on shutdown).
852 *
853 * We can't trust the remote processor not to change the
854 * resource table, so we must maintain this info independently.
855 */
856 mapping->da = rsc->da;
857 mapping->len = rsc->len;
858 list_add_tail(&mapping->node, &rproc->mappings);
860 if (!rproc->late_attach)
861 dev_dbg(dev, "carveout mapped 0x%x to %pad\n",
862 rsc->da, &dma);
863 else
864 dev_dbg(dev, "late-attach: carveout processed 0x%x to %pad\n",
865 rsc->da, &dma);
867 }
869 /*
870 * Some remote processors might need to know the pa
871 * even though they are behind an IOMMU. E.g., OMAP4's
872 * remote M3 processor needs this so it can control
873 * on-chip hardware accelerators that are not behind
874 * the IOMMU, and therefor must know the pa.
875 *
876 * Generally we don't want to expose physical addresses
877 * if we don't have to (remote processors are generally
878 * _not_ trusted), so we might want to do this only for
879 * remote processor that _must_ have this (e.g. OMAP4's
880 * dual M3 subsystem).
881 *
882 * Non-IOMMU processors might also want to have this info.
883 * In this case, the device address and the physical address
884 * are the same.
885 */
886 rsc->pa = dma;
888 carveout->va = va;
889 carveout->len = rsc->len;
890 carveout->dma = dma;
891 carveout->da = rsc->da;
892 strlcpy(carveout->name, rsc->name, sizeof(carveout->name));
894 list_add_tail(&carveout->node, &rproc->carveouts);
896 return 0;
898 free_mapping:
899 kfree(mapping);
900 dma_free:
901 dma_free_coherent(dev->parent, rsc->len, va, dma);
902 free_carv:
903 kfree(carveout);
904 return ret;
905 }
907 /**
908 * rproc_handle_vendor_rsc() - provide implementation specific hook
909 * to handle vendor/custom resources
910 * @rproc: the remote processor
911 * @rsc: vendor resource to be handled by remoteproc drivers
912 * @offset: offset of the resource data in resource table
913 * @avail: size of available data
914 *
915 * Remoteproc implementations might want to add resource table entries
916 * that are not generic enough to be handled by the framework. This
917 * provides a hook to handle such custom resources. Note that a single
918 * hook is reused between RSC_PRELOAD_VENDOR and RSC_PRELOAD_VENDOR
919 * resources with the platform driver implementation distinguishing
920 * the two based on the sub-type resource.
921 *
922 * Returns 0 on success, or an appropriate error code otherwise
923 */
924 static int rproc_handle_vendor_rsc(struct rproc *rproc,
925 struct fw_rsc_vendor *rsc,
926 int offset, int avail)
927 {
928 struct device *dev = &rproc->dev;
930 if (!rproc->ops->handle_vendor_rsc) {
931 dev_err(dev, "vendor resource handler not implemented, ignoring resource\n");
932 return 0;
933 }
935 if (sizeof(*rsc) > avail) {
936 dev_err(dev, "vendor resource is truncated\n");
937 return -EINVAL;
938 }
940 return rproc->ops->handle_vendor_rsc(rproc, (void *)rsc);
941 }
943 /*
944 * A lookup table for resource handlers. The indices are defined in
945 * enum fw_resource_type.
946 */
947 static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = {
948 [RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout,
949 [RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem,
950 [RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace,
951 [RSC_PRELOAD_VENDOR] = (rproc_handle_resource_t)rproc_handle_vendor_rsc,
952 [RSC_VDEV] = (rproc_handle_resource_t)rproc_handle_vdev,
953 };
955 static rproc_handle_resource_t rproc_post_loading_handlers[RSC_LAST] = {
956 [RSC_POSTLOAD_VENDOR] =
957 (rproc_handle_resource_t)rproc_handle_vendor_rsc,
958 };
960 /* handle firmware resource entries before booting the remote processor */
961 static int rproc_handle_resources(struct rproc *rproc,
962 rproc_handle_resource_t handlers[RSC_LAST])
963 {
964 struct device *dev = &rproc->dev;
965 rproc_handle_resource_t handler;
966 int ret = 0, i;
968 if (!rproc->table_ptr)
969 return 0;
971 for (i = 0; i < rproc->table_ptr->num; i++) {
972 int offset = rproc->table_ptr->offset[i];
973 struct fw_rsc_hdr *hdr = (void *)rproc->table_ptr + offset;
974 int avail = rproc->table_sz - offset - sizeof(*hdr);
975 void *rsc = (void *)hdr + sizeof(*hdr);
977 /* make sure table isn't truncated */
978 if (avail < 0) {
979 dev_err(dev, "rsc table is truncated\n");
980 return -EINVAL;
981 }
983 dev_dbg(dev, "rsc: type %d\n", hdr->type);
985 if (hdr->type >= RSC_LAST) {
986 dev_warn(dev, "unsupported resource %d\n", hdr->type);
987 continue;
988 }
990 handler = handlers[hdr->type];
991 if (!handler)
992 continue;
994 ret = handler(rproc, rsc, offset + sizeof(*hdr), avail);
995 if (ret)
996 break;
997 }
999 return ret;
1000 }
1002 static int rproc_prepare_subdevices(struct rproc *rproc)
1003 {
1004 struct rproc_subdev *subdev;
1005 int ret;
1007 list_for_each_entry(subdev, &rproc->subdevs, node) {
1008 if (subdev->prepare) {
1009 ret = subdev->prepare(subdev);
1010 if (ret)
1011 goto unroll_preparation;
1012 }
1013 }
1015 return 0;
1017 unroll_preparation:
1018 list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node) {
1019 if (subdev->unprepare)
1020 subdev->unprepare(subdev);
1021 }
1023 return ret;
1024 }
1026 static int rproc_start_subdevices(struct rproc *rproc)
1027 {
1028 struct rproc_subdev *subdev;
1029 int ret;
1031 list_for_each_entry(subdev, &rproc->subdevs, node) {
1032 if (subdev->start) {
1033 ret = subdev->start(subdev);
1034 if (ret)
1035 goto unroll_registration;
1036 }
1037 }
1039 return 0;
1041 unroll_registration:
1042 list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node) {
1043 if (subdev->stop)
1044 subdev->stop(subdev, true);
1045 }
1047 return ret;
1048 }
1050 static void rproc_stop_subdevices(struct rproc *rproc, bool crashed)
1051 {
1052 struct rproc_subdev *subdev;
1054 list_for_each_entry_reverse(subdev, &rproc->subdevs, node) {
1055 if (subdev->stop)
1056 subdev->stop(subdev, crashed);
1057 }
1058 }
1060 static void rproc_unprepare_subdevices(struct rproc *rproc)
1061 {
1062 struct rproc_subdev *subdev;
1064 list_for_each_entry_reverse(subdev, &rproc->subdevs, node) {
1065 if (subdev->unprepare)
1066 subdev->unprepare(subdev);
1067 }
1068 }
1070 /**
1071 * rproc_coredump_cleanup() - clean up dump_segments list
1072 * @rproc: the remote processor handle
1073 */
1074 static void rproc_coredump_cleanup(struct rproc *rproc)
1075 {
1076 struct rproc_dump_segment *entry, *tmp;
1078 list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) {
1079 list_del(&entry->node);
1080 kfree(entry);
1081 }
1082 }
1084 /**
1085 * rproc_free_last_trace() - helper function to cleanup a last trace entry
1086 * @trace: the last trace element to be cleaned up
1087 */
1088 static void rproc_free_last_trace(struct rproc_mem_entry *trace)
1089 {
1090 rproc_remove_trace_file(trace->priv);
1091 list_del(&trace->node);
1092 vfree(trace->va);
1093 kfree(trace);
1094 }
1096 /**
1097 * rproc_resource_cleanup() - clean up and free all acquired resources
1098 * @rproc: rproc handle
1099 *
1100 * This function will free all resources acquired for @rproc, and it
1101 * is called whenever @rproc either shuts down or fails to boot.
1102 */
1103 static void rproc_resource_cleanup(struct rproc *rproc)
1104 {
1105 struct rproc_mem_entry *entry, *tmp;
1106 struct rproc_vdev *rvdev, *rvtmp;
1107 struct device *dev = &rproc->dev;
1108 int count = 0, i = rproc->num_traces;
1110 /* clean up debugfs trace entries */
1111 list_for_each_entry_safe(entry, tmp, &rproc->traces, node) {
1112 /* handle last trace here */
1113 if (rproc->state == RPROC_CRASHED)
1114 rproc_handle_last_trace(rproc, entry, ++count);
1116 rproc_remove_trace_file(entry->priv);
1117 list_del(&entry->node);
1118 kfree(entry);
1119 }
1120 rproc->num_traces = 0;
1122 /*
1123 * clean up debugfs last trace entries. This either deletes all last
1124 * trace entries during cleanup or just the remaining entries, if any,
1125 * in case of a crash.
1126 */
1127 list_for_each_entry_safe(entry, tmp, &rproc->last_traces, node) {
1128 /* skip the valid traces */
1129 if ((i--) && rproc->state == RPROC_CRASHED)
1130 continue;
1131 rproc_free_last_trace(entry);
1132 rproc->num_last_traces--;
1133 }
1135 /* clean up iommu mapping entries */
1136 list_for_each_entry_safe(entry, tmp, &rproc->mappings, node) {
1137 size_t unmapped;
1139 if (!rproc->late_attach) {
1140 unmapped = iommu_unmap(rproc->domain, entry->da,
1141 entry->len);
1142 if (unmapped != entry->len) {
1143 /* nothing much to do besides complaining */
1144 dev_err(dev, "failed to unmap %u/%zu\n",
1145 entry->len, unmapped);
1146 }
1147 }
1149 list_del(&entry->node);
1150 kfree(entry);
1151 }
1153 /* clean up carveout allocations */
1154 list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
1155 dma_free_coherent(dev->parent, entry->len, entry->va,
1156 entry->dma);
1157 list_del(&entry->node);
1158 kfree(entry);
1159 }
1161 /* clean up remote vdev entries */
1162 list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node)
1163 kref_put(&rvdev->refcount, rproc_vdev_release);
1165 rproc_coredump_cleanup(rproc);
1166 }
1168 /*
1169 * take a firmware and boot a remote processor with it.
1170 */
1171 static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
1172 {
1173 struct device *dev = &rproc->dev;
1174 const char *name = rproc->firmware;
1175 struct resource_table *loaded_table;
1176 int ret;
1178 ret = rproc_fw_sanity_check(rproc, fw);
1179 if (ret)
1180 return ret;
1182 if (!rproc->skip_firmware_request)
1183 dev_info(dev, "Booting fw image %s, size %zd\n",
1184 name, fw->size);
1185 else
1186 dev_info(dev, "Booting unspecified pre-loaded fw image\n");
1188 /*
1189 * if enabling an IOMMU isn't relevant for this rproc, this is
1190 * just a nop
1191 */
1192 ret = rproc_enable_iommu(rproc);
1193 if (ret) {
1194 dev_err(dev, "can't enable iommu: %d\n", ret);
1195 return ret;
1196 }
1198 /* Prepare rproc for firmware loading if needed */
1199 if (rproc->ops->prepare) {
1200 ret = rproc->ops->prepare(rproc);
1201 if (ret) {
1202 dev_err(dev, "can't prepare rproc %s: %d\n",
1203 rproc->name, ret);
1204 goto disable_iommu;
1205 }
1206 }
1208 rproc->bootaddr = rproc_get_boot_addr(rproc, fw);
1210 /* Load resource table, core dump segment list etc from the firmware */
1211 ret = rproc_parse_fw(rproc, fw);
1212 if (ret)
1213 goto unprepare_rproc;
1215 /* reset max_notifyid */
1216 rproc->max_notifyid = -1;
1218 /* handle fw resources which are required to boot rproc */
1219 ret = rproc_handle_resources(rproc, rproc_loading_handlers);
1220 if (ret) {
1221 dev_err(dev, "Failed to process resources: %d\n", ret);
1222 goto clean_up_resources;
1223 }
1225 if (!rproc->skip_load && !rproc->late_attach) {
1226 /* load the ELF segments to memory */
1227 ret = rproc_load_segments(rproc, fw);
1228 if (ret) {
1229 dev_err(dev, "Failed to load program segments: %d\n",
1230 ret);
1231 goto clean_up_resources;
1232 }
1233 } else {
1234 dev_dbg(dev, "Skipped program segments load for pre-booted rproc\n");
1235 }
1237 /*
1238 * The starting device has been given the rproc->cached_table as the
1239 * resource table. The address of the vring along with the other
1240 * allocated resources (carveouts etc) is stored in cached_table.
1241 * In order to pass this information to the remote device we must copy
1242 * this information to device memory. We also update the table_ptr so
1243 * that any subsequent changes will be applied to the loaded version.
1244 */
1245 loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
1246 if (loaded_table) {
1247 memcpy(loaded_table, rproc->cached_table, rproc->table_sz);
1248 rproc->table_ptr = loaded_table;
1249 }
1251 /* handle fw resources which require fw segments to be loaded */
1252 ret = rproc_handle_resources(rproc, rproc_post_loading_handlers);
1253 if (ret) {
1254 dev_err(dev, "Failed to process post-loading resources: %d\n",
1255 ret);
1256 goto reset_table_ptr;
1257 }
1259 ret = rproc_prepare_subdevices(rproc);
1260 if (ret) {
1261 dev_err(dev, "failed to prepare subdevices for %s: %d\n",
1262 rproc->name, ret);
1263 goto reset_table_ptr;
1264 }
1266 /* power up the remote processor */
1267 ret = rproc->ops->start(rproc);
1268 if (ret) {
1269 dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret);
1270 goto unprepare_subdevices;
1271 }
1273 /* Start any subdevices for the remote processor */
1274 ret = rproc_start_subdevices(rproc);
1275 if (ret) {
1276 dev_err(dev, "failed to probe subdevices for %s: %d\n",
1277 rproc->name, ret);
1278 goto stop_rproc;
1279 }
1281 rproc->state = RPROC_RUNNING;
1283 dev_info(dev, "remote processor %s is now up\n", rproc->name);
1285 return 0;
1287 stop_rproc:
1288 rproc->ops->stop(rproc);
1289 unprepare_subdevices:
1290 rproc_unprepare_subdevices(rproc);
1291 reset_table_ptr:
1292 rproc->table_ptr = rproc->cached_table;
1293 clean_up_resources:
1294 rproc_resource_cleanup(rproc);
1295 kfree(rproc->cached_table);
1296 rproc->cached_table = NULL;
1297 rproc->table_ptr = NULL;
1298 rproc->table_sz = 0;
1299 unprepare_rproc:
1300 /* release HW resources if needed */
1301 if (rproc->ops->unprepare)
1302 rproc->ops->unprepare(rproc);
1303 disable_iommu:
1304 rproc_disable_iommu(rproc);
1305 return ret;
1306 }
1308 /*
1309 * take a firmware and boot it up.
1310 *
1311 * Note: this function is called asynchronously upon registration of the
1312 * remote processor (so we must wait until it completes before we try
1313 * to unregister the device. one other option is just to use kref here,
1314 * that might be cleaner).
1315 */
1316 static void rproc_auto_boot_callback(const struct firmware *fw, void *context)
1317 {
1318 struct rproc *rproc = context;
1320 rproc_boot(rproc);
1322 release_firmware(fw);
1323 }
1325 static int rproc_trigger_auto_boot(struct rproc *rproc)
1326 {
1327 int ret;
1329 /*
1330 * We're initiating an asynchronous firmware loading, so we can
1331 * be built-in kernel code, without hanging the boot process.
1332 */
1333 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
1334 rproc->firmware, &rproc->dev, GFP_KERNEL,
1335 rproc, rproc_auto_boot_callback);
1336 if (ret < 0)
1337 dev_err(&rproc->dev, "request_firmware_nowait err: %d\n", ret);
1339 return ret;
1340 }
1342 /**
1343 * rproc_coredump_add_segment() - add segment of device memory to coredump
1344 * @rproc: handle of a remote processor
1345 * @da: device address
1346 * @size: size of segment
1347 *
1348 * Add device memory to the list of segments to be included in a coredump for
1349 * the remoteproc.
1350 *
1351 * Return: 0 on success, negative errno on error.
1352 */
1353 int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size)
1354 {
1355 struct rproc_dump_segment *segment;
1357 segment = kzalloc(sizeof(*segment), GFP_KERNEL);
1358 if (!segment)
1359 return -ENOMEM;
1361 segment->da = da;
1362 segment->size = size;
1364 list_add_tail(&segment->node, &rproc->dump_segments);
1366 return 0;
1367 }
1368 EXPORT_SYMBOL(rproc_coredump_add_segment);
1370 /**
1371 * rproc_coredump() - perform coredump
1372 * @rproc: rproc handle
1373 *
1374 * This function will generate an ELF header for the registered segments
1375 * and create a devcoredump device associated with rproc.
1376 */
1377 static void rproc_coredump(struct rproc *rproc)
1378 {
1379 struct rproc_dump_segment *segment;
1380 struct elf32_phdr *phdr;
1381 struct elf32_hdr *ehdr;
1382 size_t data_size;
1383 size_t offset;
1384 void *data;
1385 void *ptr;
1386 int phnum = 0;
1388 if (list_empty(&rproc->dump_segments))
1389 return;
1391 data_size = sizeof(*ehdr);
1392 list_for_each_entry(segment, &rproc->dump_segments, node) {
1393 data_size += sizeof(*phdr) + segment->size;
1395 phnum++;
1396 }
1398 data = vmalloc(data_size);
1399 if (!data)
1400 return;
1402 ehdr = data;
1404 memset(ehdr, 0, sizeof(*ehdr));
1405 memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
1406 ehdr->e_ident[EI_CLASS] = ELFCLASS32;
1407 ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
1408 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1409 ehdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
1410 ehdr->e_type = ET_CORE;
1411 ehdr->e_machine = EM_NONE;
1412 ehdr->e_version = EV_CURRENT;
1413 ehdr->e_entry = rproc->bootaddr;
1414 ehdr->e_phoff = sizeof(*ehdr);
1415 ehdr->e_ehsize = sizeof(*ehdr);
1416 ehdr->e_phentsize = sizeof(*phdr);
1417 ehdr->e_phnum = phnum;
1419 phdr = data + ehdr->e_phoff;
1420 offset = ehdr->e_phoff + sizeof(*phdr) * ehdr->e_phnum;
1421 list_for_each_entry(segment, &rproc->dump_segments, node) {
1422 memset(phdr, 0, sizeof(*phdr));
1423 phdr->p_type = PT_LOAD;
1424 phdr->p_offset = offset;
1425 phdr->p_vaddr = segment->da;
1426 phdr->p_paddr = segment->da;
1427 phdr->p_filesz = segment->size;
1428 phdr->p_memsz = segment->size;
1429 phdr->p_flags = PF_R | PF_W | PF_X;
1430 phdr->p_align = 0;
1432 ptr = rproc_da_to_va(rproc, segment->da, segment->size,
1433 RPROC_FLAGS_NONE);
1434 if (!ptr) {
1435 dev_err(&rproc->dev,
1436 "invalid coredump segment (%pad, %zu)\n",
1437 &segment->da, segment->size);
1438 memset(data + offset, 0xff, segment->size);
1439 } else {
1440 memcpy(data + offset, ptr, segment->size);
1441 }
1443 offset += phdr->p_filesz;
1444 phdr++;
1445 }
1447 dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL);
1448 }
1450 /**
1451 * rproc_trigger_recovery() - recover a remoteproc
1452 * @rproc: the remote processor
1453 *
1454 * The recovery is done by resetting all the virtio devices, that way all the
1455 * rpmsg drivers will be reseted along with the remote processor making the
1456 * remoteproc functional again.
1457 *
1458 * This function can sleep, so it cannot be called from atomic context.
1459 */
1460 int rproc_trigger_recovery(struct rproc *rproc)
1461 {
1462 dev_err(&rproc->dev, "recovering %s\n", rproc->name);
1464 init_completion(&rproc->crash_comp);
1466 /* shut down the remote */
1467 /* TODO: make sure this works with rproc->power > 1 */
1468 rproc_shutdown(rproc);
1470 /* wait until there is no more rproc users */
1471 wait_for_completion(&rproc->crash_comp);
1473 /*
1474 * boot the remote processor up again
1475 */
1476 rproc_boot(rproc);
1478 return 0;
1479 }
1481 /**
1482 * rproc_crash_handler_work() - handle a crash
1483 *
1484 * This function needs to handle everything related to a crash, like cpu
1485 * registers and stack dump, information to help to debug the fatal error, etc.
1486 */
1487 static void rproc_crash_handler_work(struct work_struct *work)
1488 {
1489 struct rproc *rproc = container_of(work, struct rproc, crash_handler);
1490 struct device *dev = &rproc->dev;
1492 dev_dbg(dev, "enter %s\n", __func__);
1494 mutex_lock(&rproc->lock);
1496 if (rproc->state == RPROC_CRASHED || rproc->state == RPROC_OFFLINE) {
1497 /* handle only the first crash detected */
1498 mutex_unlock(&rproc->lock);
1499 return;
1500 }
1502 rproc->state = RPROC_CRASHED;
1503 dev_err(dev, "handling crash #%u in %s\n", ++rproc->crash_cnt,
1504 rproc->name);
1506 mutex_unlock(&rproc->lock);
1508 if (!rproc->recovery_disabled)
1509 rproc_trigger_recovery(rproc);
1510 }
1512 /**
1513 * rproc_get_id() - return the id for the rproc device
1514 * @rproc: handle of a remote processor
1515 *
1516 * Each rproc device is associated with a platform device, which is created
1517 * either from device tree (majority newer platforms) or using legacy style
1518 * platform device creation (fewer legacy platforms). This function retrieves
1519 * an unique id for each remote processor and is useful for clients needing
1520 * to distinguish each of the remoteprocs. This unique id is derived using
1521 * the platform device id for non-DT devices, or an alternate alias id for
1522 * DT devices (since they do not have a valid platform device id). It is
1523 * assumed that the platform devices were created with known ids or were
1524 * given proper alias ids using the stem "rproc".
1525 *
1526 * Return: alias id for DT devices or platform device id for non-DT devices
1527 * associated with the rproc
1528 */
1529 int rproc_get_id(struct rproc *rproc)
1530 {
1531 struct device *dev = rproc->dev.parent;
1532 struct device_node *np = dev->of_node;
1533 struct platform_device *pdev = to_platform_device(dev);
1535 if (np)
1536 return of_alias_get_id(np, "rproc");
1537 else
1538 return pdev->id;
1539 }
1540 EXPORT_SYMBOL(rproc_get_id);
1542 /**
1543 * rproc_boot() - boot a remote processor
1544 * @rproc: handle of a remote processor
1545 *
1546 * Boot a remote processor (i.e. load its firmware, power it on, ...).
1547 *
1548 * If the remote processor is already powered on, this function immediately
1549 * returns (successfully).
1550 *
1551 * Returns 0 on success, and an appropriate error value otherwise.
1552 */
1553 int rproc_boot(struct rproc *rproc)
1554 {
1555 const struct firmware *firmware_p;
1556 struct device *dev;
1557 int ret;
1559 if (!rproc) {
1560 pr_err("invalid rproc handle\n");
1561 return -EINVAL;
1562 }
1564 dev = &rproc->dev;
1566 ret = mutex_lock_interruptible(&rproc->lock);
1567 if (ret) {
1568 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
1569 return ret;
1570 }
1572 if (rproc->state == RPROC_DELETED) {
1573 ret = -ENODEV;
1574 dev_err(dev, "can't boot deleted rproc %s\n", rproc->name);
1575 goto unlock_mutex;
1576 }
1578 /* skip the boot process if rproc is already powered up */
1579 if (atomic_inc_return(&rproc->power) > 1) {
1580 ret = 0;
1581 goto unlock_mutex;
1582 }
1584 dev_info(dev, "powering up %s\n", rproc->name);
1586 if (!rproc->skip_firmware_request) {
1587 /* load firmware */
1588 ret = request_firmware(&firmware_p, rproc->firmware, dev);
1589 if (ret < 0) {
1590 dev_err(dev, "request_firmware failed: %d\n", ret);
1591 goto downref_rproc;
1592 }
1593 }
1595 ret = rproc_fw_boot(rproc, firmware_p);
1597 if (!rproc->skip_firmware_request)
1598 release_firmware(firmware_p);
1600 downref_rproc:
1601 if (ret)
1602 atomic_dec(&rproc->power);
1603 unlock_mutex:
1604 mutex_unlock(&rproc->lock);
1605 return ret;
1606 }
1607 EXPORT_SYMBOL(rproc_boot);
1609 /**
1610 * rproc_shutdown() - power off the remote processor
1611 * @rproc: the remote processor
1612 *
1613 * Power off a remote processor (previously booted with rproc_boot()).
1614 *
1615 * In case @rproc is still being used by an additional user(s), then
1616 * this function will just decrement the power refcount and exit,
1617 * without really powering off the device.
1618 *
1619 * Every call to rproc_boot() must (eventually) be accompanied by a call
1620 * to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug.
1621 *
1622 * Notes:
1623 * - we're not decrementing the rproc's refcount, only the power refcount.
1624 * which means that the @rproc handle stays valid even after rproc_shutdown()
1625 * returns, and users can still use it with a subsequent rproc_boot(), if
1626 * needed.
1627 */
1628 void rproc_shutdown(struct rproc *rproc)
1629 {
1630 struct device *dev = &rproc->dev;
1631 int ret;
1632 bool crashed = false;
1634 ret = mutex_lock_interruptible(&rproc->lock);
1635 if (ret) {
1636 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
1637 return;
1638 }
1640 /* if the remote proc is still needed, bail out */
1641 if (!atomic_dec_and_test(&rproc->power))
1642 goto out;
1644 if (rproc->state == RPROC_CRASHED)
1645 crashed = true;
1647 /* remove any subdevices for the remote processor */
1648 rproc_stop_subdevices(rproc, crashed);
1650 /* power off the remote processor */
1651 ret = rproc->ops->stop(rproc);
1652 if (ret) {
1653 atomic_inc(&rproc->power);
1654 dev_err(dev, "can't stop rproc: %d\n", ret);
1655 goto out;
1656 }
1658 rproc_unprepare_subdevices(rproc);
1660 /* generate coredump */
1661 if (rproc->state == RPROC_CRASHED)
1662 rproc_coredump(rproc);
1664 /* the installed resource table may no longer be accessible */
1665 rproc->table_ptr = rproc->cached_table;
1667 /* clean up all acquired resources */
1668 rproc_resource_cleanup(rproc);
1670 /* release HW resources if needed */
1671 if (rproc->ops->unprepare)
1672 rproc->ops->unprepare(rproc);
1674 rproc_disable_iommu(rproc);
1676 /* Free the copy of the resource table */
1677 kfree(rproc->cached_table);
1678 rproc->cached_table = NULL;
1679 rproc->table_ptr = NULL;
1681 /* if in crash state, unlock crash handler */
1682 if (rproc->state == RPROC_CRASHED)
1683 complete_all(&rproc->crash_comp);
1685 rproc->state = RPROC_OFFLINE;
1686 rproc->late_attach = 0;
1688 dev_info(dev, "stopped remote processor %s\n", rproc->name);
1690 out:
1691 mutex_unlock(&rproc->lock);
1692 }
1693 EXPORT_SYMBOL(rproc_shutdown);
1695 /**
1696 * rproc_get_by_phandle() - find a remote processor by phandle
1697 * @phandle: phandle to the rproc
1698 *
1699 * Finds an rproc handle using the remote processor's phandle, and then
1700 * return a handle to the rproc.
1701 *
1702 * This function increments the remote processor's refcount, so always
1703 * use rproc_put() to decrement it back once rproc isn't needed anymore.
1704 *
1705 * Returns the rproc handle on success, and NULL on failure.
1706 */
1707 #ifdef CONFIG_OF
1708 struct rproc *rproc_get_by_phandle(phandle phandle)
1709 {
1710 struct rproc *rproc = NULL, *r;
1711 struct device_node *np;
1713 np = of_find_node_by_phandle(phandle);
1714 if (!np)
1715 return NULL;
1717 mutex_lock(&rproc_list_mutex);
1718 list_for_each_entry(r, &rproc_list, node) {
1719 if (r->dev.parent && r->dev.parent->of_node == np) {
1720 /* prevent underlying implementation from being removed */
1721 if (!try_module_get(r->dev.parent->driver->owner)) {
1722 dev_err(&r->dev, "can't get owner\n");
1723 break;
1724 }
1726 rproc = r;
1727 get_device(&rproc->dev);
1728 break;
1729 }
1730 }
1731 mutex_unlock(&rproc_list_mutex);
1733 of_node_put(np);
1735 return rproc;
1736 }
1737 #else
1738 struct rproc *rproc_get_by_phandle(phandle phandle)
1739 {
1740 return NULL;
1741 }
1742 #endif
1743 EXPORT_SYMBOL(rproc_get_by_phandle);
1745 /**
1746 * rproc_add() - register a remote processor
1747 * @rproc: the remote processor handle to register
1748 *
1749 * Registers @rproc with the remoteproc framework, after it has been
1750 * allocated with rproc_alloc().
1751 *
1752 * This is called by the platform-specific rproc implementation, whenever
1753 * a new remote processor device is probed.
1754 *
1755 * Returns 0 on success and an appropriate error code otherwise.
1756 *
1757 * Note: this function initiates an asynchronous firmware loading
1758 * context, which will look for virtio devices supported by the rproc's
1759 * firmware.
1760 *
1761 * If found, those virtio devices will be created and added, so as a result
1762 * of registering this remote processor, additional virtio drivers might be
1763 * probed.
1764 */
1765 int rproc_add(struct rproc *rproc)
1766 {
1767 struct device *dev = &rproc->dev;
1768 int ret;
1770 ret = device_add(dev);
1771 if (ret < 0)
1772 return ret;
1774 dev_info(dev, "%s is available\n", rproc->name);
1776 /* create debugfs entries */
1777 rproc_create_debug_dir(rproc);
1779 /* if rproc is marked always-on, request it to boot */
1780 if (rproc->auto_boot) {
1781 ret = rproc_trigger_auto_boot(rproc);
1782 if (ret < 0)
1783 return ret;
1784 }
1786 /* expose to rproc_get_by_phandle users */
1787 mutex_lock(&rproc_list_mutex);
1788 list_add(&rproc->node, &rproc_list);
1789 mutex_unlock(&rproc_list_mutex);
1791 return 0;
1792 }
1793 EXPORT_SYMBOL(rproc_add);
1795 /**
1796 * rproc_type_release() - release a remote processor instance
1797 * @dev: the rproc's device
1798 *
1799 * This function should _never_ be called directly.
1800 *
1801 * It will be called by the driver core when no one holds a valid pointer
1802 * to @dev anymore.
1803 */
1804 static void rproc_type_release(struct device *dev)
1805 {
1806 struct rproc *rproc = container_of(dev, struct rproc, dev);
1808 dev_info(&rproc->dev, "releasing %s\n", rproc->name);
1810 idr_destroy(&rproc->notifyids);
1812 if (rproc->index >= 0)
1813 ida_simple_remove(&rproc_dev_index, rproc->index);
1815 kfree(rproc->firmware);
1816 kfree(rproc->ops);
1817 kfree(rproc->name);
1818 kfree(rproc);
1819 }
1821 static const struct device_type rproc_type = {
1822 .name = "remoteproc",
1823 .release = rproc_type_release,
1824 };
1826 /**
1827 * rproc_alloc() - allocate a remote processor handle
1828 * @dev: the underlying device
1829 * @name: name of this remote processor
1830 * @ops: platform-specific handlers (mainly start/stop)
1831 * @firmware: name of firmware file to load, can be NULL
1832 * @len: length of private data needed by the rproc driver (in bytes)
1833 *
1834 * Allocates a new remote processor handle, but does not register
1835 * it yet. if @firmware is NULL, a default name is used.
1836 *
1837 * This function should be used by rproc implementations during initialization
1838 * of the remote processor.
1839 *
1840 * After creating an rproc handle using this function, and when ready,
1841 * implementations should then call rproc_add() to complete
1842 * the registration of the remote processor.
1843 *
1844 * On success the new rproc is returned, and on failure, NULL.
1845 *
1846 * Note: _never_ directly deallocate @rproc, even if it was not registered
1847 * yet. Instead, when you need to unroll rproc_alloc(), use rproc_free().
1848 */
1849 struct rproc *rproc_alloc(struct device *dev, const char *name,
1850 const struct rproc_ops *ops,
1851 const char *firmware, int len)
1852 {
1853 struct rproc *rproc;
1854 char *p, *template = "rproc-%s-fw";
1855 int name_len;
1857 if (!dev || !name || !ops)
1858 return NULL;
1860 if (!firmware) {
1861 /*
1862 * If the caller didn't pass in a firmware name then
1863 * construct a default name.
1864 */
1865 name_len = strlen(name) + strlen(template) - 2 + 1;
1866 p = kmalloc(name_len, GFP_KERNEL);
1867 if (!p)
1868 return NULL;
1869 snprintf(p, name_len, template, name);
1870 } else {
1871 p = kstrdup(firmware, GFP_KERNEL);
1872 if (!p)
1873 return NULL;
1874 }
1876 rproc = kzalloc(sizeof(struct rproc) + len, GFP_KERNEL);
1877 if (!rproc) {
1878 kfree(p);
1879 return NULL;
1880 }
1882 rproc->ops = kmemdup(ops, sizeof(*ops), GFP_KERNEL);
1883 if (!rproc->ops) {
1884 kfree(p);
1885 kfree(rproc);
1886 return NULL;
1887 }
1889 rproc->firmware = p;
1890 rproc->name = kstrdup(name, GFP_KERNEL);
1891 if (!rproc->name) {
1892 kfree(p);
1893 kfree(rproc->ops);
1894 kfree(rproc);
1895 return NULL;
1896 }
1897 rproc->priv = &rproc[1];
1898 rproc->auto_boot = true;
1900 device_initialize(&rproc->dev);
1901 rproc->dev.parent = dev;
1902 rproc->dev.type = &rproc_type;
1903 rproc->dev.class = &rproc_class;
1904 rproc->dev.driver_data = rproc;
1906 /* Assign a unique device index and name */
1907 rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL);
1908 if (rproc->index < 0) {
1909 dev_err(dev, "ida_simple_get failed: %d\n", rproc->index);
1910 put_device(&rproc->dev);
1911 return NULL;
1912 }
1914 dev_set_name(&rproc->dev, "remoteproc%d", rproc->index);
1916 atomic_set(&rproc->power, 0);
1918 /* Default to ELF loader if no load function is specified */
1919 if (!rproc->ops->load) {
1920 rproc->ops->load = rproc_elf_load_segments;
1921 rproc->ops->parse_fw = rproc_elf_load_rsc_table;
1922 rproc->ops->find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table;
1923 rproc->ops->sanity_check = rproc_elf_sanity_check;
1924 rproc->ops->get_boot_addr = rproc_elf_get_boot_addr;
1925 }
1927 mutex_init(&rproc->lock);
1929 idr_init(&rproc->notifyids);
1931 INIT_LIST_HEAD(&rproc->carveouts);
1932 INIT_LIST_HEAD(&rproc->mappings);
1933 INIT_LIST_HEAD(&rproc->traces);
1934 INIT_LIST_HEAD(&rproc->last_traces);
1935 INIT_LIST_HEAD(&rproc->rvdevs);
1936 INIT_LIST_HEAD(&rproc->subdevs);
1937 INIT_LIST_HEAD(&rproc->dump_segments);
1939 INIT_WORK(&rproc->crash_handler, rproc_crash_handler_work);
1940 init_completion(&rproc->crash_comp);
1942 rproc->state = RPROC_OFFLINE;
1944 return rproc;
1945 }
1946 EXPORT_SYMBOL(rproc_alloc);
1948 /**
1949 * rproc_free() - unroll rproc_alloc()
1950 * @rproc: the remote processor handle
1951 *
1952 * This function decrements the rproc dev refcount.
1953 *
1954 * If no one holds any reference to rproc anymore, then its refcount would
1955 * now drop to zero, and it would be freed.
1956 */
1957 void rproc_free(struct rproc *rproc)
1958 {
1959 put_device(&rproc->dev);
1960 }
1961 EXPORT_SYMBOL(rproc_free);
1963 /**
1964 * rproc_put() - release rproc reference
1965 * @rproc: the remote processor handle
1966 *
1967 * This function decrements the rproc dev refcount.
1968 *
1969 * If no one holds any reference to rproc anymore, then its refcount would
1970 * now drop to zero, and it would be freed.
1971 */
1972 void rproc_put(struct rproc *rproc)
1973 {
1974 module_put(rproc->dev.parent->driver->owner);
1975 put_device(&rproc->dev);
1976 }
1977 EXPORT_SYMBOL(rproc_put);
1979 /**
1980 * rproc_del() - unregister a remote processor
1981 * @rproc: rproc handle to unregister
1982 *
1983 * This function should be called when the platform specific rproc
1984 * implementation decides to remove the rproc device. it should
1985 * _only_ be called if a previous invocation of rproc_add()
1986 * has completed successfully.
1987 *
1988 * After rproc_del() returns, @rproc isn't freed yet, because
1989 * of the outstanding reference created by rproc_alloc. To decrement that
1990 * one last refcount, one still needs to call rproc_free().
1991 *
1992 * Returns 0 on success and -EINVAL if @rproc isn't valid.
1993 */
1994 int rproc_del(struct rproc *rproc)
1995 {
1996 struct rproc_mem_entry *entry, *tmp;
1998 if (!rproc)
1999 return -EINVAL;
2001 /* if rproc is marked always-on, rproc_add() booted it */
2002 /* TODO: make sure this works with rproc->power > 1 */
2003 if (rproc->auto_boot)
2004 rproc_shutdown(rproc);
2006 mutex_lock(&rproc->lock);
2007 rproc->state = RPROC_DELETED;
2008 mutex_unlock(&rproc->lock);
2010 /* clean up debugfs last trace entries */
2011 list_for_each_entry_safe(entry, tmp, &rproc->last_traces, node) {
2012 rproc_free_last_trace(entry);
2013 rproc->num_last_traces--;
2014 }
2016 rproc_delete_debug_dir(rproc);
2018 /* the rproc is downref'ed as soon as it's removed from the klist */
2019 mutex_lock(&rproc_list_mutex);
2020 list_del(&rproc->node);
2021 mutex_unlock(&rproc_list_mutex);
2023 device_del(&rproc->dev);
2025 return 0;
2026 }
2027 EXPORT_SYMBOL(rproc_del);
2029 /**
2030 * rproc_add_subdev() - add a subdevice to a remoteproc
2031 * @rproc: rproc handle to add the subdevice to
2032 * @subdev: subdev handle to register
2033 *
2034 * Caller is responsible for populating optional subdevice function pointers.
2035 */
2036 void rproc_add_subdev(struct rproc *rproc, struct rproc_subdev *subdev)
2037 {
2038 list_add_tail(&subdev->node, &rproc->subdevs);
2039 }
2040 EXPORT_SYMBOL(rproc_add_subdev);
2042 /**
2043 * rproc_remove_subdev() - remove a subdevice from a remoteproc
2044 * @rproc: rproc handle to remove the subdevice from
2045 * @subdev: subdev handle, previously registered with rproc_add_subdev()
2046 */
2047 void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev)
2048 {
2049 list_del(&subdev->node);
2050 }
2051 EXPORT_SYMBOL(rproc_remove_subdev);
2053 /**
2054 * rproc_get_by_child() - acquire rproc handle of @dev's ancestor
2055 * @dev: child device to find ancestor of
2056 *
2057 * Returns the ancestor rproc instance, or NULL if not found.
2058 */
2059 struct rproc *rproc_get_by_child(struct device *dev)
2060 {
2061 for (dev = dev->parent; dev; dev = dev->parent) {
2062 if (dev->type == &rproc_type)
2063 return dev->driver_data;
2064 }
2066 return NULL;
2067 }
2068 EXPORT_SYMBOL(rproc_get_by_child);
2070 /**
2071 * rproc_report_crash() - rproc crash reporter function
2072 * @rproc: remote processor
2073 * @type: crash type
2074 *
2075 * This function must be called every time a crash is detected by the low-level
2076 * drivers implementing a specific remoteproc. This should not be called from a
2077 * non-remoteproc driver.
2078 *
2079 * This function can be called from atomic/interrupt context.
2080 */
2081 void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type)
2082 {
2083 if (!rproc) {
2084 pr_err("NULL rproc pointer\n");
2085 return;
2086 }
2088 dev_err(&rproc->dev, "crash detected in %s: type %s\n",
2089 rproc->name, rproc_crash_to_string(type));
2091 /* create a new task to handle the error if not scheduled already */
2092 if (!work_busy(&rproc->crash_handler))
2093 schedule_work(&rproc->crash_handler);
2094 }
2095 EXPORT_SYMBOL(rproc_report_crash);
2097 /**
2098 * rproc_set_firmware() - assign a new firmware
2099 * @rproc: rproc handle to which the new firmware is being assigned
2100 * @fw_name: new firmware name to be assigned
2101 *
2102 * This function allows remoteproc drivers or clients to configure a custom
2103 * firmware name that is different from the default name used during remoteproc
2104 * registration. The function does not trigger a remote processor boot,
2105 * only sets the firmware name used for a subsequent boot. This function
2106 * should also be called only when the remote processor is offline.
2107 *
2108 * This allows either the userspace to configure a different name through
2109 * sysfs or a kernel-level remoteproc or a remoteproc client driver to set
2110 * a specific firmware when it is controlling the boot and shutdown of the
2111 * remote processor.
2112 *
2113 * Returns 0 on success or a negative value upon failure
2114 */
2115 int rproc_set_firmware(struct rproc *rproc, const char *fw_name)
2116 {
2117 struct device *dev;
2118 int ret, len;
2119 char *p;
2121 if (!rproc || !fw_name)
2122 return -EINVAL;
2124 dev = rproc->dev.parent;
2126 ret = mutex_lock_interruptible(&rproc->lock);
2127 if (ret) {
2128 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
2129 return -EINVAL;
2130 }
2132 if (rproc->state != RPROC_OFFLINE) {
2133 dev_err(dev, "can't change firmware while running\n");
2134 ret = -EBUSY;
2135 goto out;
2136 }
2138 len = strcspn(fw_name, "\n");
2139 if (!len) {
2140 dev_err(dev, "can't provide empty string for firmware name\n");
2141 ret = -EINVAL;
2142 goto out;
2143 }
2145 p = kstrndup(fw_name, len, GFP_KERNEL);
2146 if (!p) {
2147 ret = -ENOMEM;
2148 goto out;
2149 }
2151 kfree(rproc->firmware);
2152 rproc->firmware = p;
2154 out:
2155 mutex_unlock(&rproc->lock);
2156 return ret;
2157 }
2158 EXPORT_SYMBOL(rproc_set_firmware);
2160 static int __init remoteproc_init(void)
2161 {
2162 rproc_init_sysfs();
2163 rproc_init_debugfs();
2165 return 0;
2166 }
2167 module_init(remoteproc_init);
2169 static void __exit remoteproc_exit(void)
2170 {
2171 ida_destroy(&rproc_dev_index);
2173 rproc_exit_debugfs();
2174 rproc_exit_sysfs();
2175 }
2176 module_exit(remoteproc_exit);
2178 MODULE_LICENSE("GPL v2");
2179 MODULE_DESCRIPTION("Generic Remote Processor Framework");