1 /*
3 * drivers/gpu/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/fs.h>
21 #include <linux/anon_inodes.h>
22 #include <linux/ion.h>
23 #include <linux/list.h>
24 #include <linux/memblock.h>
25 #include <linux/miscdevice.h>
26 #include <linux/export.h>
27 #include <linux/mm.h>
28 #include <linux/mm_types.h>
29 #include <linux/rbtree.h>
30 #include <linux/sched.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/debugfs.h>
35 #include <linux/dma-buf.h>
37 #include "ion_priv.h"
39 /**
40 * struct ion_device - the metadata of the ion device node
41 * @dev: the actual misc device
42 * @buffers: an rb tree of all the existing buffers
43 * @buffer_lock: lock protecting the tree of buffers
44 * @lock: rwsem protecting the tree of heaps and clients
45 * @heaps: list of all the heaps in the system
46 * @user_clients: list of all the clients created from userspace
47 */
48 struct ion_device {
49 struct miscdevice dev;
50 struct rb_root buffers;
51 struct mutex buffer_lock;
52 struct rw_semaphore lock;
53 struct plist_head heaps;
54 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
55 unsigned long arg);
56 struct rb_root clients;
57 struct dentry *debug_root;
58 };
60 /**
61 * struct ion_client - a process/hw block local address space
62 * @node: node in the tree of all clients
63 * @dev: backpointer to ion device
64 * @handles: an rb tree of all the handles in this client
65 * @lock: lock protecting the tree of handles
66 * @name: used for debugging
67 * @task: used for debugging
68 *
69 * A client represents a list of buffers this client may access.
70 * The mutex stored here is used to protect both handles tree
71 * as well as the handles themselves, and should be held while modifying either.
72 */
73 struct ion_client {
74 struct rb_node node;
75 struct ion_device *dev;
76 struct rb_root handles;
77 struct mutex lock;
78 const char *name;
79 struct task_struct *task;
80 pid_t pid;
81 struct dentry *debug_root;
82 };
84 /**
85 * ion_handle - a client local reference to a buffer
86 * @ref: reference count
87 * @client: back pointer to the client the buffer resides in
88 * @buffer: pointer to the buffer
89 * @node: node in the client's handle rbtree
90 * @kmap_cnt: count of times this client has mapped to kernel
91 * @dmap_cnt: count of times this client has mapped for dma
92 *
93 * Modifications to node, map_cnt or mapping should be protected by the
94 * lock in the client. Other fields are never changed after initialization.
95 */
96 struct ion_handle {
97 struct kref ref;
98 struct ion_client *client;
99 struct ion_buffer *buffer;
100 struct rb_node node;
101 unsigned int kmap_cnt;
102 };
104 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
105 {
106 return ((buffer->flags & ION_FLAG_CACHED) &&
107 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
108 }
110 bool ion_buffer_cached(struct ion_buffer *buffer)
111 {
112 return !!(buffer->flags & ION_FLAG_CACHED);
113 }
115 /* this function should only be called while dev->lock is held */
116 static void ion_buffer_add(struct ion_device *dev,
117 struct ion_buffer *buffer)
118 {
119 struct rb_node **p = &dev->buffers.rb_node;
120 struct rb_node *parent = NULL;
121 struct ion_buffer *entry;
123 while (*p) {
124 parent = *p;
125 entry = rb_entry(parent, struct ion_buffer, node);
127 if (buffer < entry) {
128 p = &(*p)->rb_left;
129 } else if (buffer > entry) {
130 p = &(*p)->rb_right;
131 } else {
132 pr_err("%s: buffer already found.", __func__);
133 BUG();
134 }
135 }
137 rb_link_node(&buffer->node, parent, p);
138 rb_insert_color(&buffer->node, &dev->buffers);
139 }
141 static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
143 /* this function should only be called while dev->lock is held */
144 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
145 struct ion_device *dev,
146 unsigned long len,
147 unsigned long align,
148 unsigned long flags)
149 {
150 struct ion_buffer *buffer;
151 struct sg_table *table;
152 struct scatterlist *sg;
153 int i, ret;
155 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
156 if (!buffer)
157 return ERR_PTR(-ENOMEM);
159 buffer->heap = heap;
160 buffer->flags = flags;
161 kref_init(&buffer->ref);
163 ret = heap->ops->allocate(heap, buffer, len, align, flags);
164 if (ret) {
165 kfree(buffer);
166 return ERR_PTR(ret);
167 }
169 buffer->dev = dev;
170 buffer->size = len;
172 table = heap->ops->map_dma(heap, buffer);
173 if (IS_ERR_OR_NULL(table)) {
174 heap->ops->free(buffer);
175 kfree(buffer);
176 return ERR_PTR(PTR_ERR(table));
177 }
178 buffer->sg_table = table;
179 if (ion_buffer_fault_user_mappings(buffer)) {
180 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
181 i) {
182 if (sg_dma_len(sg) == PAGE_SIZE)
183 continue;
184 pr_err("%s: cached mappings that will be faulted in "
185 "must have pagewise sg_lists\n", __func__);
186 ret = -EINVAL;
187 goto err;
188 }
190 ret = ion_buffer_alloc_dirty(buffer);
191 if (ret)
192 goto err;
193 }
195 buffer->dev = dev;
196 buffer->size = len;
197 INIT_LIST_HEAD(&buffer->vmas);
198 mutex_init(&buffer->lock);
199 /* this will set up dma addresses for the sglist -- it is not
200 technically correct as per the dma api -- a specific
201 device isn't really taking ownership here. However, in practice on
202 our systems the only dma_address space is physical addresses.
203 Additionally, we can't afford the overhead of invalidating every
204 allocation via dma_map_sg. The implicit contract here is that
205 memory comming from the heaps is ready for dma, ie if it has a
206 cached mapping that mapping has been invalidated */
207 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
208 sg_dma_address(sg) = sg_phys(sg);
209 mutex_lock(&dev->buffer_lock);
210 ion_buffer_add(dev, buffer);
211 mutex_unlock(&dev->buffer_lock);
212 return buffer;
214 err:
215 heap->ops->unmap_dma(heap, buffer);
216 heap->ops->free(buffer);
217 kfree(buffer);
218 return ERR_PTR(ret);
219 }
221 static void ion_buffer_destroy(struct kref *kref)
222 {
223 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
224 struct ion_device *dev = buffer->dev;
226 if (WARN_ON(buffer->kmap_cnt > 0))
227 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
228 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
229 buffer->heap->ops->free(buffer);
230 mutex_lock(&dev->buffer_lock);
231 rb_erase(&buffer->node, &dev->buffers);
232 mutex_unlock(&dev->buffer_lock);
233 if (buffer->flags & ION_FLAG_CACHED)
234 kfree(buffer->dirty);
235 kfree(buffer);
236 }
238 static void ion_buffer_get(struct ion_buffer *buffer)
239 {
240 kref_get(&buffer->ref);
241 }
243 static int ion_buffer_put(struct ion_buffer *buffer)
244 {
245 return kref_put(&buffer->ref, ion_buffer_destroy);
246 }
248 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
249 {
250 mutex_lock(&buffer->lock);
251 buffer->handle_count++;
252 mutex_unlock(&buffer->lock);
253 }
255 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
256 {
257 /*
258 * when a buffer is removed from a handle, if it is not in
259 * any other handles, copy the taskcomm and the pid of the
260 * process it's being removed from into the buffer. At this
261 * point there will be no way to track what processes this buffer is
262 * being used by, it only exists as a dma_buf file descriptor.
263 * The taskcomm and pid can provide a debug hint as to where this fd
264 * is in the system
265 */
266 mutex_lock(&buffer->lock);
267 buffer->handle_count--;
268 BUG_ON(buffer->handle_count < 0);
269 if (!buffer->handle_count) {
270 struct task_struct *task;
272 task = current->group_leader;
273 get_task_comm(buffer->task_comm, task);
274 buffer->pid = task_pid_nr(task);
275 }
276 mutex_unlock(&buffer->lock);
277 }
279 static struct ion_handle *ion_handle_create(struct ion_client *client,
280 struct ion_buffer *buffer)
281 {
282 struct ion_handle *handle;
284 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
285 if (!handle)
286 return ERR_PTR(-ENOMEM);
287 kref_init(&handle->ref);
288 RB_CLEAR_NODE(&handle->node);
289 handle->client = client;
290 ion_buffer_get(buffer);
291 ion_buffer_add_to_handle(buffer);
292 handle->buffer = buffer;
294 return handle;
295 }
297 static void ion_handle_kmap_put(struct ion_handle *);
299 static void ion_handle_destroy(struct kref *kref)
300 {
301 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
302 struct ion_client *client = handle->client;
303 struct ion_buffer *buffer = handle->buffer;
305 mutex_lock(&buffer->lock);
306 while (handle->kmap_cnt)
307 ion_handle_kmap_put(handle);
308 mutex_unlock(&buffer->lock);
310 if (!RB_EMPTY_NODE(&handle->node))
311 rb_erase(&handle->node, &client->handles);
313 ion_buffer_remove_from_handle(buffer);
314 ion_buffer_put(buffer);
316 kfree(handle);
317 }
319 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
320 {
321 return handle->buffer;
322 }
324 static void ion_handle_get(struct ion_handle *handle)
325 {
326 kref_get(&handle->ref);
327 }
329 static int ion_handle_put(struct ion_handle *handle)
330 {
331 return kref_put(&handle->ref, ion_handle_destroy);
332 }
334 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
335 struct ion_buffer *buffer)
336 {
337 struct rb_node *n;
339 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
340 struct ion_handle *handle = rb_entry(n, struct ion_handle,
341 node);
342 if (handle->buffer == buffer)
343 return handle;
344 }
345 return NULL;
346 }
348 static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
349 {
350 struct rb_node *n = client->handles.rb_node;
352 while (n) {
353 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
354 node);
355 if (handle < handle_node)
356 n = n->rb_left;
357 else if (handle > handle_node)
358 n = n->rb_right;
359 else
360 return true;
361 }
362 return false;
363 }
365 static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
366 {
367 struct rb_node **p = &client->handles.rb_node;
368 struct rb_node *parent = NULL;
369 struct ion_handle *entry;
371 while (*p) {
372 parent = *p;
373 entry = rb_entry(parent, struct ion_handle, node);
375 if (handle < entry)
376 p = &(*p)->rb_left;
377 else if (handle > entry)
378 p = &(*p)->rb_right;
379 else
380 WARN(1, "%s: buffer already found.", __func__);
381 }
383 rb_link_node(&handle->node, parent, p);
384 rb_insert_color(&handle->node, &client->handles);
385 }
387 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
388 size_t align, unsigned int heap_id_mask,
389 unsigned int flags)
390 {
391 struct ion_handle *handle;
392 struct ion_device *dev = client->dev;
393 struct ion_buffer *buffer = NULL;
394 struct ion_heap *heap;
396 pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
397 len, align, heap_id_mask, flags);
398 /*
399 * traverse the list of heaps available in this system in priority
400 * order. If the heap type is supported by the client, and matches the
401 * request of the caller allocate from it. Repeat until allocate has
402 * succeeded or all heaps have been tried
403 */
404 if (WARN_ON(!len))
405 return ERR_PTR(-EINVAL);
407 len = PAGE_ALIGN(len);
409 down_read(&dev->lock);
410 plist_for_each_entry(heap, &dev->heaps, node) {
411 /* if the caller didn't specify this heap id */
412 if (!((1 << heap->id) & heap_id_mask))
413 continue;
414 buffer = ion_buffer_create(heap, dev, len, align, flags);
415 if (!IS_ERR_OR_NULL(buffer))
416 break;
417 }
418 up_read(&dev->lock);
420 if (buffer == NULL)
421 return ERR_PTR(-ENODEV);
423 if (IS_ERR(buffer))
424 return ERR_PTR(PTR_ERR(buffer));
426 handle = ion_handle_create(client, buffer);
428 /*
429 * ion_buffer_create will create a buffer with a ref_cnt of 1,
430 * and ion_handle_create will take a second reference, drop one here
431 */
432 ion_buffer_put(buffer);
434 if (!IS_ERR(handle)) {
435 mutex_lock(&client->lock);
436 ion_handle_add(client, handle);
437 mutex_unlock(&client->lock);
438 }
441 return handle;
442 }
443 EXPORT_SYMBOL(ion_alloc);
445 void ion_free(struct ion_client *client, struct ion_handle *handle)
446 {
447 bool valid_handle;
449 BUG_ON(client != handle->client);
451 mutex_lock(&client->lock);
452 valid_handle = ion_handle_validate(client, handle);
454 if (!valid_handle) {
455 WARN(1, "%s: invalid handle passed to free.\n", __func__);
456 mutex_unlock(&client->lock);
457 return;
458 }
459 ion_handle_put(handle);
460 mutex_unlock(&client->lock);
461 }
462 EXPORT_SYMBOL(ion_free);
464 int ion_phys(struct ion_client *client, struct ion_handle *handle,
465 ion_phys_addr_t *addr, size_t *len)
466 {
467 struct ion_buffer *buffer;
468 int ret;
470 mutex_lock(&client->lock);
471 if (!ion_handle_validate(client, handle)) {
472 mutex_unlock(&client->lock);
473 return -EINVAL;
474 }
476 buffer = handle->buffer;
478 if (!buffer->heap->ops->phys) {
479 pr_err("%s: ion_phys is not implemented by this heap.\n",
480 __func__);
481 mutex_unlock(&client->lock);
482 return -ENODEV;
483 }
484 mutex_unlock(&client->lock);
485 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
486 return ret;
487 }
488 EXPORT_SYMBOL(ion_phys);
490 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
491 {
492 void *vaddr;
494 if (buffer->kmap_cnt) {
495 buffer->kmap_cnt++;
496 return buffer->vaddr;
497 }
498 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
499 if (IS_ERR_OR_NULL(vaddr))
500 return vaddr;
501 buffer->vaddr = vaddr;
502 buffer->kmap_cnt++;
503 return vaddr;
504 }
506 static void *ion_handle_kmap_get(struct ion_handle *handle)
507 {
508 struct ion_buffer *buffer = handle->buffer;
509 void *vaddr;
511 if (handle->kmap_cnt) {
512 handle->kmap_cnt++;
513 return buffer->vaddr;
514 }
515 vaddr = ion_buffer_kmap_get(buffer);
516 if (IS_ERR_OR_NULL(vaddr))
517 return vaddr;
518 handle->kmap_cnt++;
519 return vaddr;
520 }
522 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
523 {
524 buffer->kmap_cnt--;
525 if (!buffer->kmap_cnt) {
526 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
527 buffer->vaddr = NULL;
528 }
529 }
531 static void ion_handle_kmap_put(struct ion_handle *handle)
532 {
533 struct ion_buffer *buffer = handle->buffer;
535 handle->kmap_cnt--;
536 if (!handle->kmap_cnt)
537 ion_buffer_kmap_put(buffer);
538 }
540 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
541 {
542 struct ion_buffer *buffer;
543 void *vaddr;
545 mutex_lock(&client->lock);
546 if (!ion_handle_validate(client, handle)) {
547 pr_err("%s: invalid handle passed to map_kernel.\n",
548 __func__);
549 mutex_unlock(&client->lock);
550 return ERR_PTR(-EINVAL);
551 }
553 buffer = handle->buffer;
555 if (!handle->buffer->heap->ops->map_kernel) {
556 pr_err("%s: map_kernel is not implemented by this heap.\n",
557 __func__);
558 mutex_unlock(&client->lock);
559 return ERR_PTR(-ENODEV);
560 }
562 mutex_lock(&buffer->lock);
563 vaddr = ion_handle_kmap_get(handle);
564 mutex_unlock(&buffer->lock);
565 mutex_unlock(&client->lock);
566 return vaddr;
567 }
568 EXPORT_SYMBOL(ion_map_kernel);
570 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
571 {
572 struct ion_buffer *buffer;
574 mutex_lock(&client->lock);
575 buffer = handle->buffer;
576 mutex_lock(&buffer->lock);
577 ion_handle_kmap_put(handle);
578 mutex_unlock(&buffer->lock);
579 mutex_unlock(&client->lock);
580 }
581 EXPORT_SYMBOL(ion_unmap_kernel);
583 static int ion_debug_client_show(struct seq_file *s, void *unused)
584 {
585 struct ion_client *client = s->private;
586 struct rb_node *n;
587 size_t sizes[ION_NUM_HEAP_IDS] = {0};
588 const char *names[ION_NUM_HEAP_IDS] = {0};
589 int i;
591 mutex_lock(&client->lock);
592 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
593 struct ion_handle *handle = rb_entry(n, struct ion_handle,
594 node);
595 unsigned int id = handle->buffer->heap->id;
597 if (!names[id])
598 names[id] = handle->buffer->heap->name;
599 sizes[id] += handle->buffer->size;
600 }
601 mutex_unlock(&client->lock);
603 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
604 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
605 if (!names[i])
606 continue;
607 seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
608 }
609 return 0;
610 }
612 static int ion_debug_client_open(struct inode *inode, struct file *file)
613 {
614 return single_open(file, ion_debug_client_show, inode->i_private);
615 }
617 static const struct file_operations debug_client_fops = {
618 .open = ion_debug_client_open,
619 .read = seq_read,
620 .llseek = seq_lseek,
621 .release = single_release,
622 };
624 struct ion_client *ion_client_create(struct ion_device *dev,
625 const char *name)
626 {
627 struct ion_client *client;
628 struct task_struct *task;
629 struct rb_node **p;
630 struct rb_node *parent = NULL;
631 struct ion_client *entry;
632 char debug_name[64];
633 pid_t pid;
635 get_task_struct(current->group_leader);
636 task_lock(current->group_leader);
637 pid = task_pid_nr(current->group_leader);
638 /* don't bother to store task struct for kernel threads,
639 they can't be killed anyway */
640 if (current->group_leader->flags & PF_KTHREAD) {
641 put_task_struct(current->group_leader);
642 task = NULL;
643 } else {
644 task = current->group_leader;
645 }
646 task_unlock(current->group_leader);
648 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
649 if (!client) {
650 if (task)
651 put_task_struct(current->group_leader);
652 return ERR_PTR(-ENOMEM);
653 }
655 client->dev = dev;
656 client->handles = RB_ROOT;
657 mutex_init(&client->lock);
658 client->name = name;
659 client->task = task;
660 client->pid = pid;
662 down_write(&dev->lock);
663 p = &dev->clients.rb_node;
664 while (*p) {
665 parent = *p;
666 entry = rb_entry(parent, struct ion_client, node);
668 if (client < entry)
669 p = &(*p)->rb_left;
670 else if (client > entry)
671 p = &(*p)->rb_right;
672 }
673 rb_link_node(&client->node, parent, p);
674 rb_insert_color(&client->node, &dev->clients);
676 snprintf(debug_name, 64, "%u", client->pid);
677 client->debug_root = debugfs_create_file(debug_name, 0664,
678 dev->debug_root, client,
679 &debug_client_fops);
680 up_write(&dev->lock);
682 return client;
683 }
684 EXPORT_SYMBOL(ion_client_create);
686 void ion_client_destroy(struct ion_client *client)
687 {
688 struct ion_device *dev = client->dev;
689 struct rb_node *n;
691 pr_debug("%s: %d\n", __func__, __LINE__);
692 while ((n = rb_first(&client->handles))) {
693 struct ion_handle *handle = rb_entry(n, struct ion_handle,
694 node);
695 ion_handle_destroy(&handle->ref);
696 }
697 down_write(&dev->lock);
698 if (client->task)
699 put_task_struct(client->task);
700 rb_erase(&client->node, &dev->clients);
701 debugfs_remove_recursive(client->debug_root);
702 up_write(&dev->lock);
704 kfree(client);
705 }
706 EXPORT_SYMBOL(ion_client_destroy);
708 struct sg_table *ion_sg_table(struct ion_client *client,
709 struct ion_handle *handle)
710 {
711 struct ion_buffer *buffer;
712 struct sg_table *table;
714 mutex_lock(&client->lock);
715 if (!ion_handle_validate(client, handle)) {
716 pr_err("%s: invalid handle passed to map_dma.\n",
717 __func__);
718 mutex_unlock(&client->lock);
719 return ERR_PTR(-EINVAL);
720 }
721 buffer = handle->buffer;
722 table = buffer->sg_table;
723 mutex_unlock(&client->lock);
724 return table;
725 }
726 EXPORT_SYMBOL(ion_sg_table);
728 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
729 struct device *dev,
730 enum dma_data_direction direction);
732 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
733 enum dma_data_direction direction)
734 {
735 struct dma_buf *dmabuf = attachment->dmabuf;
736 struct ion_buffer *buffer = dmabuf->priv;
738 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
739 return buffer->sg_table;
740 }
742 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
743 struct sg_table *table,
744 enum dma_data_direction direction)
745 {
746 }
748 static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
749 {
750 unsigned long pages = buffer->sg_table->nents;
751 unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
753 buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
754 if (!buffer->dirty)
755 return -ENOMEM;
756 return 0;
757 }
759 struct ion_vma_list {
760 struct list_head list;
761 struct vm_area_struct *vma;
762 };
764 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
765 struct device *dev,
766 enum dma_data_direction dir)
767 {
768 struct scatterlist *sg;
769 int i;
770 struct ion_vma_list *vma_list;
772 pr_debug("%s: syncing for device %s\n", __func__,
773 dev ? dev_name(dev) : "null");
775 if (!ion_buffer_fault_user_mappings(buffer))
776 return;
778 mutex_lock(&buffer->lock);
779 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
780 if (!test_bit(i, buffer->dirty))
781 continue;
782 dma_sync_sg_for_device(dev, sg, 1, dir);
783 clear_bit(i, buffer->dirty);
784 }
785 list_for_each_entry(vma_list, &buffer->vmas, list) {
786 struct vm_area_struct *vma = vma_list->vma;
788 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
789 NULL);
790 }
791 mutex_unlock(&buffer->lock);
792 }
794 int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
795 {
796 struct ion_buffer *buffer = vma->vm_private_data;
797 struct scatterlist *sg;
798 int i;
800 mutex_lock(&buffer->lock);
801 set_bit(vmf->pgoff, buffer->dirty);
803 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
804 if (i != vmf->pgoff)
805 continue;
806 dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
807 vm_insert_page(vma, (unsigned long)vmf->virtual_address,
808 sg_page(sg));
809 break;
810 }
811 mutex_unlock(&buffer->lock);
812 return VM_FAULT_NOPAGE;
813 }
815 static void ion_vm_open(struct vm_area_struct *vma)
816 {
817 struct ion_buffer *buffer = vma->vm_private_data;
818 struct ion_vma_list *vma_list;
820 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
821 if (!vma_list)
822 return;
823 vma_list->vma = vma;
824 mutex_lock(&buffer->lock);
825 list_add(&vma_list->list, &buffer->vmas);
826 mutex_unlock(&buffer->lock);
827 pr_debug("%s: adding %p\n", __func__, vma);
828 }
830 static void ion_vm_close(struct vm_area_struct *vma)
831 {
832 struct ion_buffer *buffer = vma->vm_private_data;
833 struct ion_vma_list *vma_list, *tmp;
835 pr_debug("%s\n", __func__);
836 mutex_lock(&buffer->lock);
837 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
838 if (vma_list->vma != vma)
839 continue;
840 list_del(&vma_list->list);
841 kfree(vma_list);
842 pr_debug("%s: deleting %p\n", __func__, vma);
843 break;
844 }
845 mutex_unlock(&buffer->lock);
846 }
848 struct vm_operations_struct ion_vma_ops = {
849 .open = ion_vm_open,
850 .close = ion_vm_close,
851 .fault = ion_vm_fault,
852 };
854 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
855 {
856 struct ion_buffer *buffer = dmabuf->priv;
857 int ret = 0;
859 if (!buffer->heap->ops->map_user) {
860 pr_err("%s: this heap does not define a method for mapping "
861 "to userspace\n", __func__);
862 return -EINVAL;
863 }
865 if (ion_buffer_fault_user_mappings(buffer)) {
866 vma->vm_private_data = buffer;
867 vma->vm_ops = &ion_vma_ops;
868 ion_vm_open(vma);
869 return 0;
870 }
872 if (!(buffer->flags & ION_FLAG_CACHED))
873 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
875 mutex_lock(&buffer->lock);
876 /* now map it to userspace */
877 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
878 mutex_unlock(&buffer->lock);
880 if (ret)
881 pr_err("%s: failure mapping buffer to userspace\n",
882 __func__);
884 return ret;
885 }
887 static void ion_dma_buf_release(struct dma_buf *dmabuf)
888 {
889 struct ion_buffer *buffer = dmabuf->priv;
890 ion_buffer_put(buffer);
891 }
893 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
894 {
895 struct ion_buffer *buffer = dmabuf->priv;
896 return buffer->vaddr + offset * PAGE_SIZE;
897 }
899 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
900 void *ptr)
901 {
902 return;
903 }
905 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
906 size_t len,
907 enum dma_data_direction direction)
908 {
909 struct ion_buffer *buffer = dmabuf->priv;
910 void *vaddr;
912 if (!buffer->heap->ops->map_kernel) {
913 pr_err("%s: map kernel is not implemented by this heap.\n",
914 __func__);
915 return -ENODEV;
916 }
918 mutex_lock(&buffer->lock);
919 vaddr = ion_buffer_kmap_get(buffer);
920 mutex_unlock(&buffer->lock);
921 if (IS_ERR(vaddr))
922 return PTR_ERR(vaddr);
923 if (!vaddr)
924 return -ENOMEM;
925 return 0;
926 }
928 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
929 size_t len,
930 enum dma_data_direction direction)
931 {
932 struct ion_buffer *buffer = dmabuf->priv;
934 mutex_lock(&buffer->lock);
935 ion_buffer_kmap_put(buffer);
936 mutex_unlock(&buffer->lock);
937 }
939 struct dma_buf_ops dma_buf_ops = {
940 .map_dma_buf = ion_map_dma_buf,
941 .unmap_dma_buf = ion_unmap_dma_buf,
942 .mmap = ion_mmap,
943 .release = ion_dma_buf_release,
944 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
945 .end_cpu_access = ion_dma_buf_end_cpu_access,
946 .kmap_atomic = ion_dma_buf_kmap,
947 .kunmap_atomic = ion_dma_buf_kunmap,
948 .kmap = ion_dma_buf_kmap,
949 .kunmap = ion_dma_buf_kunmap,
950 };
952 int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
953 {
954 struct ion_buffer *buffer;
955 struct dma_buf *dmabuf;
956 bool valid_handle;
957 int fd;
959 mutex_lock(&client->lock);
960 valid_handle = ion_handle_validate(client, handle);
961 mutex_unlock(&client->lock);
962 if (!valid_handle) {
963 WARN(1, "%s: invalid handle passed to share.\n", __func__);
964 return -EINVAL;
965 }
967 buffer = handle->buffer;
968 ion_buffer_get(buffer);
969 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
970 if (IS_ERR(dmabuf)) {
971 ion_buffer_put(buffer);
972 return PTR_ERR(dmabuf);
973 }
974 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
975 if (fd < 0)
976 dma_buf_put(dmabuf);
978 return fd;
979 }
980 EXPORT_SYMBOL(ion_share_dma_buf);
982 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
983 {
984 struct dma_buf *dmabuf;
985 struct ion_buffer *buffer;
986 struct ion_handle *handle;
988 dmabuf = dma_buf_get(fd);
989 if (IS_ERR_OR_NULL(dmabuf))
990 return ERR_PTR(PTR_ERR(dmabuf));
991 /* if this memory came from ion */
993 if (dmabuf->ops != &dma_buf_ops) {
994 pr_err("%s: can not import dmabuf from another exporter\n",
995 __func__);
996 dma_buf_put(dmabuf);
997 return ERR_PTR(-EINVAL);
998 }
999 buffer = dmabuf->priv;
1001 mutex_lock(&client->lock);
1002 /* if a handle exists for this buffer just take a reference to it */
1003 handle = ion_handle_lookup(client, buffer);
1004 if (!IS_ERR_OR_NULL(handle)) {
1005 ion_handle_get(handle);
1006 goto end;
1007 }
1008 handle = ion_handle_create(client, buffer);
1009 if (IS_ERR_OR_NULL(handle))
1010 goto end;
1011 ion_handle_add(client, handle);
1012 end:
1013 mutex_unlock(&client->lock);
1014 dma_buf_put(dmabuf);
1015 return handle;
1016 }
1017 EXPORT_SYMBOL(ion_import_dma_buf);
1019 static int ion_sync_for_device(struct ion_client *client, int fd)
1020 {
1021 struct dma_buf *dmabuf;
1022 struct ion_buffer *buffer;
1024 dmabuf = dma_buf_get(fd);
1025 if (IS_ERR_OR_NULL(dmabuf))
1026 return PTR_ERR(dmabuf);
1028 /* if this memory came from ion */
1029 if (dmabuf->ops != &dma_buf_ops) {
1030 pr_err("%s: can not sync dmabuf from another exporter\n",
1031 __func__);
1032 dma_buf_put(dmabuf);
1033 return -EINVAL;
1034 }
1035 buffer = dmabuf->priv;
1037 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1038 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1039 dma_buf_put(dmabuf);
1040 return 0;
1041 }
1043 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1044 {
1045 struct ion_client *client = filp->private_data;
1047 switch (cmd) {
1048 case ION_IOC_ALLOC:
1049 {
1050 struct ion_allocation_data data;
1052 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1053 return -EFAULT;
1054 data.handle = ion_alloc(client, data.len, data.align,
1055 data.heap_id_mask, data.flags);
1057 if (IS_ERR(data.handle))
1058 return PTR_ERR(data.handle);
1060 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1061 ion_free(client, data.handle);
1062 return -EFAULT;
1063 }
1064 break;
1065 }
1066 case ION_IOC_FREE:
1067 {
1068 struct ion_handle_data data;
1069 bool valid;
1071 if (copy_from_user(&data, (void __user *)arg,
1072 sizeof(struct ion_handle_data)))
1073 return -EFAULT;
1074 mutex_lock(&client->lock);
1075 valid = ion_handle_validate(client, data.handle);
1076 mutex_unlock(&client->lock);
1077 if (!valid)
1078 return -EINVAL;
1079 ion_free(client, data.handle);
1080 break;
1081 }
1082 case ION_IOC_SHARE:
1083 case ION_IOC_MAP:
1084 {
1085 struct ion_fd_data data;
1087 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1088 return -EFAULT;
1089 data.fd = ion_share_dma_buf(client, data.handle);
1090 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1091 return -EFAULT;
1092 if (data.fd < 0)
1093 return data.fd;
1094 break;
1095 }
1096 case ION_IOC_IMPORT:
1097 {
1098 struct ion_fd_data data;
1099 int ret = 0;
1100 if (copy_from_user(&data, (void __user *)arg,
1101 sizeof(struct ion_fd_data)))
1102 return -EFAULT;
1103 data.handle = ion_import_dma_buf(client, data.fd);
1104 if (IS_ERR(data.handle)) {
1105 ret = PTR_ERR(data.handle);
1106 data.handle = NULL;
1107 }
1108 if (copy_to_user((void __user *)arg, &data,
1109 sizeof(struct ion_fd_data)))
1110 return -EFAULT;
1111 if (ret < 0)
1112 return ret;
1113 break;
1114 }
1115 case ION_IOC_SYNC:
1116 {
1117 struct ion_fd_data data;
1118 if (copy_from_user(&data, (void __user *)arg,
1119 sizeof(struct ion_fd_data)))
1120 return -EFAULT;
1121 ion_sync_for_device(client, data.fd);
1122 break;
1123 }
1124 case ION_IOC_CUSTOM:
1125 {
1126 struct ion_device *dev = client->dev;
1127 struct ion_custom_data data;
1129 if (!dev->custom_ioctl)
1130 return -ENOTTY;
1131 if (copy_from_user(&data, (void __user *)arg,
1132 sizeof(struct ion_custom_data)))
1133 return -EFAULT;
1134 return dev->custom_ioctl(client, data.cmd, data.arg);
1135 }
1136 default:
1137 return -ENOTTY;
1138 }
1139 return 0;
1140 }
1142 static int ion_release(struct inode *inode, struct file *file)
1143 {
1144 struct ion_client *client = file->private_data;
1146 pr_debug("%s: %d\n", __func__, __LINE__);
1147 ion_client_destroy(client);
1148 return 0;
1149 }
1151 static int ion_open(struct inode *inode, struct file *file)
1152 {
1153 struct miscdevice *miscdev = file->private_data;
1154 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1155 struct ion_client *client;
1157 pr_debug("%s: %d\n", __func__, __LINE__);
1158 client = ion_client_create(dev, "user");
1159 if (IS_ERR_OR_NULL(client))
1160 return PTR_ERR(client);
1161 file->private_data = client;
1163 return 0;
1164 }
1166 static const struct file_operations ion_fops = {
1167 .owner = THIS_MODULE,
1168 .open = ion_open,
1169 .release = ion_release,
1170 .unlocked_ioctl = ion_ioctl,
1171 };
1173 static size_t ion_debug_heap_total(struct ion_client *client,
1174 unsigned int id)
1175 {
1176 size_t size = 0;
1177 struct rb_node *n;
1179 mutex_lock(&client->lock);
1180 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1181 struct ion_handle *handle = rb_entry(n,
1182 struct ion_handle,
1183 node);
1184 if (handle->buffer->heap->id == id)
1185 size += handle->buffer->size;
1186 }
1187 mutex_unlock(&client->lock);
1188 return size;
1189 }
1191 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1192 {
1193 struct ion_heap *heap = s->private;
1194 struct ion_device *dev = heap->dev;
1195 struct rb_node *n;
1196 size_t total_size = 0;
1197 size_t total_orphaned_size = 0;
1199 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1200 seq_printf(s, "----------------------------------------------------\n");
1202 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1203 struct ion_client *client = rb_entry(n, struct ion_client,
1204 node);
1205 size_t size = ion_debug_heap_total(client, heap->id);
1206 if (!size)
1207 continue;
1208 if (client->task) {
1209 char task_comm[TASK_COMM_LEN];
1211 get_task_comm(task_comm, client->task);
1212 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1213 client->pid, size);
1214 } else {
1215 seq_printf(s, "%16.s %16u %16u\n", client->name,
1216 client->pid, size);
1217 }
1218 }
1219 seq_printf(s, "----------------------------------------------------\n");
1220 seq_printf(s, "orphaned allocations (info is from last known client):"
1221 "\n");
1222 mutex_lock(&dev->buffer_lock);
1223 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1224 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1225 node);
1226 if (buffer->heap->id != heap->id)
1227 continue;
1228 total_size += buffer->size;
1229 if (!buffer->handle_count) {
1230 seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm,
1231 buffer->pid, buffer->size, buffer->kmap_cnt,
1232 atomic_read(&buffer->ref.refcount));
1233 total_orphaned_size += buffer->size;
1234 }
1235 }
1236 mutex_unlock(&dev->buffer_lock);
1237 seq_printf(s, "----------------------------------------------------\n");
1238 seq_printf(s, "%16.s %16u\n", "total orphaned",
1239 total_orphaned_size);
1240 seq_printf(s, "%16.s %16u\n", "total ", total_size);
1241 seq_printf(s, "----------------------------------------------------\n");
1243 if (heap->debug_show)
1244 heap->debug_show(heap, s, unused);
1246 return 0;
1247 }
1249 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1250 {
1251 return single_open(file, ion_debug_heap_show, inode->i_private);
1252 }
1254 static const struct file_operations debug_heap_fops = {
1255 .open = ion_debug_heap_open,
1256 .read = seq_read,
1257 .llseek = seq_lseek,
1258 .release = single_release,
1259 };
1261 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1262 {
1263 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1264 !heap->ops->unmap_dma)
1265 pr_err("%s: can not add heap with invalid ops struct.\n",
1266 __func__);
1268 heap->dev = dev;
1269 down_write(&dev->lock);
1270 /* use negative heap->id to reverse the priority -- when traversing
1271 the list later attempt higher id numbers first */
1272 plist_node_init(&heap->node, -heap->id);
1273 plist_add(&heap->node, &dev->heaps);
1274 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1275 &debug_heap_fops);
1276 up_write(&dev->lock);
1277 }
1279 struct ion_device *ion_device_create(long (*custom_ioctl)
1280 (struct ion_client *client,
1281 unsigned int cmd,
1282 unsigned long arg))
1283 {
1284 struct ion_device *idev;
1285 int ret;
1287 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1288 if (!idev)
1289 return ERR_PTR(-ENOMEM);
1291 idev->dev.minor = MISC_DYNAMIC_MINOR;
1292 idev->dev.name = "ion";
1293 idev->dev.fops = &ion_fops;
1294 idev->dev.parent = NULL;
1295 ret = misc_register(&idev->dev);
1296 if (ret) {
1297 pr_err("ion: failed to register misc device.\n");
1298 return ERR_PTR(ret);
1299 }
1301 idev->debug_root = debugfs_create_dir("ion", NULL);
1302 if (IS_ERR_OR_NULL(idev->debug_root))
1303 pr_err("ion: failed to create debug files.\n");
1305 idev->custom_ioctl = custom_ioctl;
1306 idev->buffers = RB_ROOT;
1307 mutex_init(&idev->buffer_lock);
1308 init_rwsem(&idev->lock);
1309 plist_head_init(&idev->heaps);
1310 idev->clients = RB_ROOT;
1311 return idev;
1312 }
1314 void ion_device_destroy(struct ion_device *dev)
1315 {
1316 misc_deregister(&dev->dev);
1317 /* XXX need to free the heaps and clients ? */
1318 kfree(dev);
1319 }
1321 void __init ion_reserve(struct ion_platform_data *data)
1322 {
1323 int i;
1325 for (i = 0; i < data->nr; i++) {
1326 if (data->heaps[i].size == 0)
1327 continue;
1329 if (data->heaps[i].base == 0) {
1330 phys_addr_t paddr;
1331 paddr = memblock_alloc_base(data->heaps[i].size,
1332 data->heaps[i].align,
1333 MEMBLOCK_ALLOC_ANYWHERE);
1334 if (!paddr) {
1335 pr_err("%s: error allocating memblock for "
1336 "heap %d\n",
1337 __func__, i);
1338 continue;
1339 }
1340 data->heaps[i].base = paddr;
1341 } else {
1342 int ret = memblock_reserve(data->heaps[i].base,
1343 data->heaps[i].size);
1344 if (ret)
1345 pr_err("memblock reserve of %x@%lx failed\n",
1346 data->heaps[i].size,
1347 data->heaps[i].base);
1348 }
1349 pr_info("%s: %s reserved base %lx size %d\n", __func__,
1350 data->heaps[i].name,
1351 data->heaps[i].base,
1352 data->heaps[i].size);
1353 }
1354 }