1 /*
2 * User-space module driver
3 *
4 * Copyright (C) 2013-2015 Texas Instruments Incorporated - http://www.ti.com/
5 *
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 *
14 * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the
17 * distribution.
18 *
19 * Neither the name of Texas Instruments Incorporated nor the names of
20 * its contributors may be used to endorse or promote products derived
21 * from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
29 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
33 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 */
36 #include <linux/clk.h>
37 #include <linux/module.h>
38 #include <linux/of_gpio.h>
39 #include <linux/miscdevice.h>
40 #include <linux/platform_device.h>
41 #include <linux/slab.h>
42 #include <linux/virtio.h>
43 #include <linux/uio_driver.h>
44 #include <linux/uaccess.h>
45 #include <linux/spinlock.h>
46 #include <linux/miscdevice.h>
47 #include <linux/pm_runtime.h>
48 #include "uio_module_drv.h"
50 #define DRIVER_NAME "uio_module_drv"
51 #define DRIVER_VERSION "2.0"
53 /**
54 * struct uio_module_drv_info - local information for uio module driver
55 * @lock: Lock to protect shared resources with interrupt handler
56 * @dev: device pointer
57 * @misc: Misc device pointer
58 * @clk: Clock pointer
59 * @flags: flags to keep track of interrupt occurrence
60 * @lock: Lock to protect shared resources with interrupt handler
61 * @interrupt_mode: No disable on interrupt(0) or One shot mode(1)
62 * @workqueue: Workqueue for interrupt triggered tasks
63 * @mem: uio memory regions pointer
64 * @num_maps: Number of uio memory regions
65 */
66 struct uio_module_drv_info {
67 struct uio_info uio;
68 struct device *dev;
69 struct miscdevice misc;
70 struct clk *clk;
71 unsigned long flags;
72 spinlock_t lock;
73 int interrupt_mode;
74 struct work_struct workqueue;
75 struct kobject *map_dir;
76 struct uio_mem *mem;
77 int num_maps;
78 };
80 /*
81 * attributes
82 */
83 struct uio_map {
84 struct kobject kobj;
85 struct uio_mem *mem;
86 };
87 /**
88 * uio_module_drv_open - uio module driver open routine
89 */
90 static int uio_module_drv_open(struct uio_info *uio, struct inode *inode)
91 {
92 struct uio_module_drv_info *uio_module_drv = uio->priv;
94 if (uio_module_drv->clk)
95 return clk_prepare_enable(uio_module_drv->clk);
96 else
97 return 0;
98 }
100 /**
101 * uio_module_drv_release - uio module driver release routine
102 */
103 static int uio_module_drv_release(struct uio_info *uio, struct inode *inode)
104 {
105 struct uio_module_drv_info *uio_module_drv = uio->priv;
107 if (uio_module_drv->clk)
108 clk_disable_unprepare(uio_module_drv->clk);
109 return 0;
110 }
112 /**
113 * uio_module_drv_handler - uio module driver interrupt handler
114 */
115 static irqreturn_t uio_module_drv_handler(int irq, struct uio_info *uio)
116 {
117 struct uio_module_drv_info *uio_module_drv = uio->priv;
119 if (uio_module_drv->interrupt_mode == 1) {
120 spin_lock(&uio_module_drv->lock);
121 if (!__test_and_set_bit(0, &uio_module_drv->flags))
122 disable_irq_nosync(irq);
123 spin_unlock(&uio_module_drv->lock);
124 }
125 return IRQ_HANDLED;
126 }
128 /**
129 * uio_module_drv_irqcontrol - uio module driver interrupt control
130 */
131 static int uio_module_drv_irqcontrol(struct uio_info *uio, s32 irq_on)
132 {
133 struct uio_module_drv_info *uio_module_drv = uio->priv;
134 unsigned long flags;
136 spin_lock_irqsave(&uio_module_drv->lock, flags);
137 if (irq_on) {
138 if (__test_and_clear_bit(0, &uio_module_drv->flags))
139 enable_irq(uio->irq);
140 } else {
141 if (!__test_and_set_bit(0, &uio_module_drv->flags))
142 disable_irq_nosync(uio->irq);
143 }
144 spin_unlock_irqrestore(&uio_module_drv->lock, flags);
146 return 0;
147 }
149 /**
150 * uio_module_drv_dev_fop_open() : open for the uio module driver
151 */
152 static int uio_module_drv_dev_fop_open(struct inode *inode, struct file *file)
153 {
154 /* Need an empty open so that file->private_data gets populated */
155 return 0;
156 }
158 /**
159 * uio_module_drv_dev_fop_mmap() - provided mmap support for
160 * device memory. This checks if user request is in valid range before providing
161 * mmap access. The valid range can be configured using device tree or platform
162 * data.
163 */
164 static int uio_module_drv_dev_fop_mmap(
165 struct file *file,
166 struct vm_area_struct *vma
167 )
168 {
169 size_t size = vma->vm_end - vma->vm_start;
170 struct miscdevice *misc = file->private_data;
171 struct uio_module_drv_info *uio_module_drv =
172 container_of(misc, struct uio_module_drv_info, misc);
174 int index;
175 size_t req_offset;
177 index = vma->vm_pgoff & UIO_MODULE_DRV_MAP_INDEX_MASK;
178 /* Check if index is valid */
179 if (index >= uio_module_drv->num_maps) {
180 dev_err(uio_module_drv->dev,
181 "mmap index %d is outside the allowed range\n",
182 index);
183 return -EINVAL;
184 }
186 req_offset = (vma->vm_pgoff - index) << PAGE_SHIFT;
188 /* Check if it fits within the page of configured size */
189 /* NOTE: This allows exposing the whole page to user space */
190 if ((req_offset + size)
191 > ((uio_module_drv->mem[index].size + PAGE_SIZE-1) & (~(PAGE_SIZE-1)))) {
192 dev_err(uio_module_drv->dev,
193 "mmap index %d: mmap offset(0x%zx) and size (0x%zx) is outside the allowed range\n",
194 index, req_offset, size);
195 return -EINVAL;
196 }
198 vma->vm_page_prot = phys_mem_access_prot(
199 file, (uio_module_drv->mem[index].addr >> PAGE_SHIFT)
200 + (vma->vm_pgoff - index),
201 size, vma->vm_page_prot
202 );
204 if (remap_pfn_range(vma, vma->vm_start,
205 (uio_module_drv->mem[index].addr >> PAGE_SHIFT)
206 + (vma->vm_pgoff - index),
207 size, vma->vm_page_prot)) {
208 return -EAGAIN;
209 }
210 return 0;
211 }
213 /**
214 * uio_module_drv_transfer() - provides ability to write to memory areas
215 */
217 static ssize_t uio_module_drv_transfer(struct file *file, char __user *buf,
218 size_t count, loff_t *ppos, int flag)
219 {
220 struct miscdevice *misc = file->private_data;
221 struct uio_module_drv_info *uio_module_drv =
222 container_of(misc, struct uio_module_drv_info, misc);
223 uint32_t phys_addr;
224 void __iomem *virt_addr_p = NULL;
225 int index;
227 phys_addr = *ppos;
229 for (index = 0; index < uio_module_drv->num_maps; index++) {
230 if ( phys_addr >= uio_module_drv->mem[index].addr
231 && phys_addr < (uio_module_drv->mem[index].addr
232 + uio_module_drv->mem[index].size)) {
233 break;
234 }
235 }
236 if (index == uio_module_drv->num_maps) {
237 dev_err(uio_module_drv->dev,
238 "request address 0x%x is not in the allowed ranges\n",
239 phys_addr);
240 return -EINVAL;
241 }
243 virt_addr_p = ioremap_nocache(phys_addr, count);
244 if (!virt_addr_p) {
245 dev_err(uio_module_drv->dev, "Mapping of virtual memory failed\n");
246 return(-ENOMEM);
247 }
248 if (flag == 1) {
249 if (copy_from_user((__force void *) virt_addr_p, buf, count)) {
250 dev_err(uio_module_drv->dev, "copy_from_user failed\n");
251 return -EFAULT;
252 }
253 } else {
254 if (copy_to_user(buf, (__force void *)virt_addr_p, count)) {
255 dev_err(uio_module_drv->dev, "copy_to_user failed\n");
256 return -EFAULT;
257 }
258 }
259 iounmap(virt_addr_p);
260 return count;
261 }
263 /**
264 * uio_module_drv_dev_fop_write() - provides ability to write to memory areas
265 */
267 static ssize_t uio_module_drv_dev_fop_write(struct file *file,
268 const char __user *buf, size_t count,
269 loff_t *ppos)
270 {
271 return
272 uio_module_drv_transfer(
273 file, (char __user *)buf,
274 count, ppos, 1
275 );
276 }
278 /**
279 * uio_module_drv_dev_fop_read() - provides ability to read from memory areas
280 */
282 static ssize_t uio_module_drv_dev_fop_read(struct file *file, char __user *buf,
283 size_t count, loff_t *ppos)
284 {
285 return uio_module_drv_transfer(file, buf, count, ppos, 0);
286 }
288 static const struct file_operations uio_module_drv_dev_fops = {
289 .owner = THIS_MODULE,
290 .open = uio_module_drv_dev_fop_open,
291 .mmap = uio_module_drv_dev_fop_mmap,
292 .read = uio_module_drv_dev_fop_read,
293 .write = uio_module_drv_dev_fop_write,
294 .llseek = generic_file_llseek,
295 };
297 /**
298 * uio_module_drv_populate_segments() - scan the configuration for "mem" and
299 * populate in uio_module_drv local structure. This information will be used to
300 * process user mmap requests.
301 */
302 static inline int
303 uio_module_drv_populate_segments(struct device_node *np,
304 struct uio_module_drv_info *uio_module_drv)
305 {
306 int len, i;
307 u32 dt_value;
308 int num_maps = 0;
309 char *map_name;
311 if (of_get_property(np, "mem", &len)) {
312 /*
313 * check if length even multiple of sizeof(u32), i.e.,
314 * the dt bindings need to be of the form <addr length>
315 */
316 len = len / sizeof(u32);
317 if ((len % 2) != 0) {
318 dev_err(uio_module_drv->dev, "invalid address map in dt binding\n");
319 return -EINVAL;
320 }
321 num_maps = len / 2;
323 uio_module_drv->mem = devm_kzalloc(uio_module_drv->dev,
324 sizeof(struct uio_mem) * num_maps,
325 GFP_KERNEL);
326 if (!uio_module_drv->mem) {
327 dev_err(uio_module_drv->dev, "devm_kzalloc mapping failed\n");
328 return -ENOMEM;
329 }
331 /* populate the uio_module_drv structure for policing */
332 for (i = 0; i < num_maps; i++) {
333 uio_module_drv->mem[i].memtype = UIO_MEM_PHYS;
334 if (of_property_read_u32_index(np, "mem", 2 * i,
335 &dt_value)) {
336 dev_err(uio_module_drv->dev,
337 "Error reading dt bindings: addr\n");
338 return -ENODEV;
339 }
340 uio_module_drv->mem[i].addr = dt_value;
341 if (of_property_read_u32_index(np, "mem", (2 * i) + 1,
342 &dt_value)) {
343 dev_err(uio_module_drv->dev,
344 "Error reading dt bindings: size\n");
345 return -ENODEV;
346 }
347 uio_module_drv->mem[i].size = dt_value;
348 map_name = devm_kzalloc(uio_module_drv->dev, 16*sizeof(char), GFP_KERNEL);
349 if (!of_property_read_string_index(np, "mem-names", i,
350 (const char **)&map_name)) {
351 uio_module_drv->mem[i].name = map_name;
352 }
353 }
354 uio_module_drv->num_maps = num_maps;
355 }
356 return 0;
357 }
359 #define to_map(map) container_of(map, struct uio_map, kobj)
361 static ssize_t map_name_show(struct uio_mem *mem, char *buf)
362 {
363 if (unlikely(!mem->name))
364 mem->name = "";
366 return sprintf(buf, "%s\n", mem->name);
367 }
369 static ssize_t map_addr_show(struct uio_mem *mem, char *buf)
370 {
371 return sprintf(buf, "%pa\n", &mem->addr);
372 }
374 static ssize_t map_size_show(struct uio_mem *mem, char *buf)
375 {
376 return sprintf(buf, "%pa\n", &mem->size);
377 }
379 static ssize_t map_offset_show(struct uio_mem *mem, char *buf)
380 {
381 return sprintf(buf, "0x%llx\n", (unsigned long long)mem->addr & ~PAGE_MASK);
382 }
384 struct map_sysfs_entry {
385 struct attribute attr;
386 ssize_t (*show)(struct uio_mem *, char *);
387 ssize_t (*store)(struct uio_mem *, const char *, size_t);
388 };
390 static struct map_sysfs_entry name_attribute =
391 __ATTR(name, S_IRUGO, map_name_show, NULL);
392 static struct map_sysfs_entry addr_attribute =
393 __ATTR(addr, S_IRUGO, map_addr_show, NULL);
394 static struct map_sysfs_entry size_attribute =
395 __ATTR(size, S_IRUGO, map_size_show, NULL);
396 static struct map_sysfs_entry offset_attribute =
397 __ATTR(offset, S_IRUGO, map_offset_show, NULL);
399 static struct attribute *attrs[] = {
400 &name_attribute.attr,
401 &addr_attribute.attr,
402 &size_attribute.attr,
403 &offset_attribute.attr,
404 NULL, /* need to NULL terminate the list of attributes */
405 };
407 static void map_release(struct kobject *kobj)
408 {
409 struct uio_map *map = to_map(kobj);
410 kfree(map);
411 }
413 static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
414 char *buf)
415 {
416 struct uio_map *map = to_map(kobj);
417 struct uio_mem *mem = map->mem;
418 struct map_sysfs_entry *entry;
420 entry = container_of(attr, struct map_sysfs_entry, attr);
422 if (!entry->show)
423 return -EIO;
425 return entry->show(mem, buf);
426 }
428 static const struct sysfs_ops map_sysfs_ops = {
429 .show = map_type_show,
430 };
432 static struct kobj_type map_attr_type = {
433 .release = map_release,
434 .sysfs_ops = &map_sysfs_ops,
435 .default_attrs = attrs,
436 };
438 static int misc_dev_add_attributes(struct uio_module_drv_info *uio_module_drv)
439 {
440 int ret;
441 int mi;
442 int map_found = 0;
443 struct uio_mem *mem;
444 struct uio_map *map;
446 for (mi = 0; mi < uio_module_drv->num_maps; mi++) {
447 mem = &uio_module_drv->mem[mi];
448 if (mem->size == 0)
449 break;
450 if (!map_found) {
451 map_found = 1;
452 uio_module_drv->map_dir = kobject_create_and_add("maps",
453 &uio_module_drv->misc.this_device->kobj);
455 if (!uio_module_drv->map_dir) {
456 ret = -1;
457 goto err_map;
458 }
459 }
460 map = kzalloc(sizeof(*map), GFP_KERNEL);
461 if (!map)
462 goto err_map_kobj;
463 kobject_init(&map->kobj, &map_attr_type);
464 map->mem = mem;
465 mem->map = map;
466 ret = kobject_add(&map->kobj, uio_module_drv->map_dir, "map%d", mi);
467 if (ret)
468 goto err_map_kobj;
469 ret = kobject_uevent(&map->kobj, KOBJ_ADD);
470 if (ret)
471 goto err_map;
472 }
473 return 0;
475 err_map:
476 mi--;
477 err_map_kobj:
478 for (; mi >= 0; mi--) {
479 mem = &uio_module_drv->mem[mi];
480 map = mem->map;
481 kobject_put(&map->kobj);
482 }
483 kobject_put(uio_module_drv->map_dir);
484 dev_err(uio_module_drv->dev, "error creating sysfs files (%d)\n", ret);
485 return ret;
487 }
489 static void misc_dev_del_attributes(struct uio_module_drv_info *uio_module_drv)
490 {
491 int mi;
492 struct uio_mem *mem;
494 for (mi = 0; mi < uio_module_drv->num_maps; mi++) {
495 mem = &uio_module_drv->mem[mi];
496 if (mem->size == 0)
497 break;
498 kobject_put(&mem->map->kobj);
499 }
500 kobject_put(uio_module_drv->map_dir);
501 }
503 /**
504 * uio_module_drv_driver_probe() probe routine for the uio module driver
505 */
506 static int uio_module_drv_driver_probe(struct platform_device *pdev)
507 {
508 struct device *dev = &pdev->dev;
509 struct device_node *np = dev->of_node;
510 struct uio_module_drv_info *uio_module_drv;
511 struct miscdevice *misc;
512 struct uio_info *uio;
513 struct resource *r;
514 int error = 0;
515 int i;
516 int irq;
517 char *name, *tmp_name;
519 if (!np) {
520 dev_err(dev, "Non dt case not supported\n");
521 return -EINVAL;
522 }
524 uio_module_drv = kzalloc(sizeof(struct uio_module_drv_info), GFP_KERNEL);
526 if (!uio_module_drv) {
527 error = -ENOMEM;
528 goto fail;
529 }
531 uio_module_drv->dev = dev;
532 spin_lock_init(&uio_module_drv->lock);
533 uio_module_drv->flags = 0; /* interrupt is enabled to begin with */
535 uio = &uio_module_drv->uio;
537 name = strchr(dev_name(dev), '.');
538 if (name) {
539 name = name +1;
540 tmp_name = strchr(name, ':');
541 if (tmp_name)
542 uio->name = tmp_name + 1;
543 else
544 uio->name = name;
545 } else {
546 name = strchr(dev_name(dev), ':');
547 if (name)
548 uio->name = name + 1;
549 else
550 uio->name = dev_name(dev);
551 }
553 uio->version = DRIVER_VERSION;
554 uio->priv = uio_module_drv;
555 uio->handler = uio_module_drv_handler;
556 uio->irqcontrol = uio_module_drv_irqcontrol;
557 uio->open = uio_module_drv_open;
558 uio->release = uio_module_drv_release;
560 irq = platform_get_irq(pdev, 0);
561 if (!(irq < 0))
562 /* pass up control irq to user-space */
563 uio->irq = irq;
565 if (
566 of_property_read_u32(
567 np, "interrupt-mode",
568 &uio_module_drv->interrupt_mode
569 ) < 0
570 )
571 uio_module_drv->interrupt_mode = 0;
573 for (i = 0; i < MAX_UIO_MAPS; ++i) {
574 r = platform_get_resource(pdev, IORESOURCE_MEM, i);
575 if (!r)
576 break;
577 uio->mem[i].memtype = UIO_MEM_PHYS;
578 uio->mem[i].addr = r->start & PAGE_MASK;
579 uio->mem[i].size = PAGE_ALIGN(resource_size(r));
580 uio->mem[i].name = r->name;
581 }
583 error = uio_module_drv_populate_segments(np, uio_module_drv);
584 if (error) {
585 dev_err(dev, "failed populating memory segments\n");
586 goto fail_uio;
587 }
589 if (uio->irq || uio->mem[0].memtype != UIO_MEM_NONE) {
590 error = uio_register_device(dev, uio);
591 if (error) {
592 dev_err(dev, "failed to register uio device\n");
593 goto fail_uio;
594 }
595 }
596 platform_set_drvdata(pdev, uio_module_drv);
598 if (uio_module_drv->num_maps) {
599 misc = &uio_module_drv->misc;
600 misc->minor = MISC_DYNAMIC_MINOR;
601 misc->name = uio->name;
602 misc->fops = &uio_module_drv_dev_fops;
603 misc->parent = dev;
605 if (misc_register(misc)) {
606 dev_err(dev, "could not register misc device\n");
607 goto fail_misc_module;
608 }
609 dev_info(dev, "registered misc device %s\n", misc->name);
610 misc_dev_add_attributes(uio_module_drv);
611 }
613 /* If hw mod is present power up module */
614 pm_runtime_enable(dev);
616 /* tell PRCM to de-assert IDLE request */
617 error = pm_runtime_get_sync(dev);
618 if (error < 0) {
619 pm_runtime_put_noidle(dev);
620 goto fail_pm;
621 }
623 /* Enable clock for the module */
624 uio_module_drv->clk = clk_get(dev, NULL);
625 if (IS_ERR(uio_module_drv->clk))
626 uio_module_drv->clk = NULL;
628 return 0;
629 fail_pm:
630 pm_runtime_disable(dev);
631 misc_deregister(&uio_module_drv->misc);
632 misc_dev_del_attributes(uio_module_drv);
633 fail_misc_module:
634 uio_unregister_device(uio);
635 fail_uio:
636 devm_kfree(dev, uio_module_drv);
637 fail:
638 return error;
639 }
641 /**
642 * uio_module_drv_driver_remove() remove routine for the uio module driver
643 */
645 static int uio_module_drv_driver_remove(struct platform_device *pdev)
646 {
647 struct uio_module_drv_info *uio_module_drv = platform_get_drvdata(pdev);
648 struct uio_info *uio = &uio_module_drv->uio;
650 if (uio_module_drv) {
651 if (uio_module_drv->clk)
652 clk_put(uio_module_drv->clk);
653 /* tell PRCM to initiate IDLE request */
654 pm_runtime_put_sync(uio_module_drv->dev);
655 pm_runtime_disable(uio_module_drv->dev);
656 if (uio_module_drv->num_maps) {
657 misc_deregister(&uio_module_drv->misc);
658 misc_dev_del_attributes(uio_module_drv);
659 }
660 if (uio->irq || uio->mem[0].memtype != UIO_MEM_NONE)
661 uio_unregister_device(&uio_module_drv->uio);
662 }
663 platform_set_drvdata(pdev, NULL);
664 return 0;
665 }
667 static const struct of_device_id uio_module_drv_of_match[] = {
668 { .compatible = "ti,uio-module-drv", },
669 {},
670 };
671 MODULE_DEVICE_TABLE(of, uio_module_drv_of_match);
673 static struct platform_driver uio_module_drv_driver = {
674 .driver = {
675 .name = DRIVER_NAME,
676 .of_match_table = uio_module_drv_of_match,
677 },
678 .probe = uio_module_drv_driver_probe,
679 .remove = uio_module_drv_driver_remove,
680 };
682 module_platform_driver(uio_module_drv_driver);
683 MODULE_AUTHOR("Sam Nelson");
684 MODULE_LICENSE("GPL v2");
685 MODULE_DESCRIPTION("User-space driver for a generic module ");
686 MODULE_ALIAS("platform:" DRIVER_NAME);