]> Gitweb @ Texas Instruments - Open Source Git Repositories - git.TI.com/gitweb - android-sdk/kernel-video.git/blob - drivers/gpu/ion/ion.c
gpu: ion: add support for more cache operations
[android-sdk/kernel-video.git] / drivers / gpu / ion / ion.c
1 /*
3  * drivers/gpu/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/fs.h>
21 #include <linux/anon_inodes.h>
22 #include <linux/ion.h>
23 #include <linux/list.h>
24 #include <linux/memblock.h>
25 #include <linux/miscdevice.h>
26 #include <linux/export.h>
27 #include <linux/mm.h>
28 #include <linux/mm_types.h>
29 #include <linux/rbtree.h>
30 #include <linux/sched.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/debugfs.h>
35 #include <linux/dma-buf.h>
37 #include "ion_priv.h"
39 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
40 {
41         return ((buffer->flags & ION_FLAG_CACHED) &&
42                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
43 }
45 bool ion_buffer_cached(struct ion_buffer *buffer)
46 {
47         return !!(buffer->flags & ION_FLAG_CACHED);
48 }
50 /* this function should only be called while dev->lock is held */
51 static void ion_buffer_add(struct ion_device *dev,
52                            struct ion_buffer *buffer)
53 {
54         struct rb_node **p = &dev->buffers.rb_node;
55         struct rb_node *parent = NULL;
56         struct ion_buffer *entry;
58         while (*p) {
59                 parent = *p;
60                 entry = rb_entry(parent, struct ion_buffer, node);
62                 if (buffer < entry) {
63                         p = &(*p)->rb_left;
64                 } else if (buffer > entry) {
65                         p = &(*p)->rb_right;
66                 } else {
67                         pr_err("%s: buffer already found.", __func__);
68                         BUG();
69                 }
70         }
72         rb_link_node(&buffer->node, parent, p);
73         rb_insert_color(&buffer->node, &dev->buffers);
74 }
76 static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
78 /* this function should only be called while dev->lock is held */
79 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
80                                      struct ion_device *dev,
81                                      unsigned long len,
82                                      unsigned long align,
83                                      unsigned long flags)
84 {
85         struct ion_buffer *buffer;
86         struct sg_table *table;
87         struct scatterlist *sg;
88         int i, ret;
90         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
91         if (!buffer)
92                 return ERR_PTR(-ENOMEM);
94         buffer->heap = heap;
95         buffer->flags = flags;
96         kref_init(&buffer->ref);
98         ret = heap->ops->allocate(heap, buffer, len, align, flags);
99         if (ret) {
100                 kfree(buffer);
101                 return ERR_PTR(ret);
102         }
104         buffer->dev = dev;
105         buffer->size = len;
107         table = heap->ops->map_dma(heap, buffer);
108         if (IS_ERR_OR_NULL(table)) {
109                 heap->ops->free(buffer);
110                 kfree(buffer);
111                 return ERR_PTR(PTR_ERR(table));
112         }
113         buffer->sg_table = table;
114         if (ion_buffer_fault_user_mappings(buffer)) {
115                 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
116                             i) {
117                         if (sg_dma_len(sg) == PAGE_SIZE)
118                                 continue;
119                         pr_err("%s: cached mappings that will be faulted in "
120                                "must have pagewise sg_lists\n", __func__);
121                         ret = -EINVAL;
122                         goto err;
123                 }
125                 ret = ion_buffer_alloc_dirty(buffer);
126                 if (ret)
127                         goto err;
128         }
130         buffer->dev = dev;
131         buffer->size = len;
132         INIT_LIST_HEAD(&buffer->vmas);
133         mutex_init(&buffer->lock);
134         /* this will set up dma addresses for the sglist -- it is not
135            technically correct as per the dma api -- a specific
136            device isn't really taking ownership here.  However, in practice on
137            our systems the only dma_address space is physical addresses.
138            Additionally, we can't afford the overhead of invalidating every
139            allocation via dma_map_sg. The implicit contract here is that
140            memory comming from the heaps is ready for dma, ie if it has a
141            cached mapping that mapping has been invalidated */
142         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
143                 sg_dma_address(sg) = sg_phys(sg);
144         mutex_lock(&dev->buffer_lock);
145         ion_buffer_add(dev, buffer);
146         mutex_unlock(&dev->buffer_lock);
147         return buffer;
149 err:
150         heap->ops->unmap_dma(heap, buffer);
151         heap->ops->free(buffer);
152         kfree(buffer);
153         return ERR_PTR(ret);
156 static void ion_buffer_destroy(struct kref *kref)
158         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
159         struct ion_device *dev = buffer->dev;
161         if (WARN_ON(buffer->kmap_cnt > 0))
162                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
163         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
164         buffer->heap->ops->free(buffer);
165         mutex_lock(&dev->buffer_lock);
166         rb_erase(&buffer->node, &dev->buffers);
167         mutex_unlock(&dev->buffer_lock);
168         if (buffer->flags & ION_FLAG_CACHED)
169                 kfree(buffer->dirty);
170         kfree(buffer);
173 static void ion_buffer_get(struct ion_buffer *buffer)
175         kref_get(&buffer->ref);
178 static int ion_buffer_put(struct ion_buffer *buffer)
180         return kref_put(&buffer->ref, ion_buffer_destroy);
183 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
185         mutex_lock(&buffer->lock);
186         buffer->handle_count++;
187         mutex_unlock(&buffer->lock);
190 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
192         /*
193          * when a buffer is removed from a handle, if it is not in
194          * any other handles, copy the taskcomm and the pid of the
195          * process it's being removed from into the buffer.  At this
196          * point there will be no way to track what processes this buffer is
197          * being used by, it only exists as a dma_buf file descriptor.
198          * The taskcomm and pid can provide a debug hint as to where this fd
199          * is in the system
200          */
201         mutex_lock(&buffer->lock);
202         buffer->handle_count--;
203         BUG_ON(buffer->handle_count < 0);
204         if (!buffer->handle_count) {
205                 struct task_struct *task;
207                 task = current->group_leader;
208                 get_task_comm(buffer->task_comm, task);
209                 buffer->pid = task_pid_nr(task);
210         }
211         mutex_unlock(&buffer->lock);
214 static struct ion_handle *ion_handle_create(struct ion_client *client,
215                                      struct ion_buffer *buffer)
217         struct ion_handle *handle;
219         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
220         if (!handle)
221                 return ERR_PTR(-ENOMEM);
222         kref_init(&handle->ref);
223         RB_CLEAR_NODE(&handle->node);
224         handle->client = client;
225         ion_buffer_get(buffer);
226         ion_buffer_add_to_handle(buffer);
227         handle->buffer = buffer;
229         return handle;
232 static void ion_handle_kmap_put(struct ion_handle *);
234 static void ion_handle_destroy(struct kref *kref)
236         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
237         struct ion_client *client = handle->client;
238         struct ion_buffer *buffer = handle->buffer;
240         mutex_lock(&buffer->lock);
241         while (handle->kmap_cnt)
242                 ion_handle_kmap_put(handle);
243         mutex_unlock(&buffer->lock);
245         if (!RB_EMPTY_NODE(&handle->node))
246                 rb_erase(&handle->node, &client->handles);
248         ion_buffer_remove_from_handle(buffer);
249         ion_buffer_put(buffer);
251         kfree(handle);
254 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
256         return handle->buffer;
259 static void ion_handle_get(struct ion_handle *handle)
261         kref_get(&handle->ref);
264 static int ion_handle_put(struct ion_handle *handle)
266         return kref_put(&handle->ref, ion_handle_destroy);
269 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
270                                             struct ion_buffer *buffer)
272         struct rb_node *n;
274         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
275                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
276                                                      node);
277                 if (handle->buffer == buffer)
278                         return handle;
279         }
280         return NULL;
283 static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
285         struct rb_node *n = client->handles.rb_node;
287         while (n) {
288                 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
289                                                           node);
290                 if (handle < handle_node)
291                         n = n->rb_left;
292                 else if (handle > handle_node)
293                         n = n->rb_right;
294                 else
295                         return true;
296         }
297         return false;
300 static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
302         struct rb_node **p = &client->handles.rb_node;
303         struct rb_node *parent = NULL;
304         struct ion_handle *entry;
306         while (*p) {
307                 parent = *p;
308                 entry = rb_entry(parent, struct ion_handle, node);
310                 if (handle < entry)
311                         p = &(*p)->rb_left;
312                 else if (handle > entry)
313                         p = &(*p)->rb_right;
314                 else
315                         WARN(1, "%s: buffer already found.", __func__);
316         }
318         rb_link_node(&handle->node, parent, p);
319         rb_insert_color(&handle->node, &client->handles);
322 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
323                              size_t align, unsigned int heap_id_mask,
324                              unsigned int flags)
326         struct ion_handle *handle;
327         struct ion_device *dev = client->dev;
328         struct ion_buffer *buffer = NULL;
329         struct ion_heap *heap;
331         pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
332                  len, align, heap_id_mask, flags);
333         /*
334          * traverse the list of heaps available in this system in priority
335          * order.  If the heap type is supported by the client, and matches the
336          * request of the caller allocate from it.  Repeat until allocate has
337          * succeeded or all heaps have been tried
338          */
339         if (WARN_ON(!len))
340                 return ERR_PTR(-EINVAL);
342         len = PAGE_ALIGN(len);
344         down_read(&dev->lock);
345         plist_for_each_entry(heap, &dev->heaps, node) {
346                 /* if the caller didn't specify this heap id */
347                 if (!((1 << heap->id) & heap_id_mask))
348                         continue;
349                 buffer = ion_buffer_create(heap, dev, len, align, flags);
350                 if (!IS_ERR_OR_NULL(buffer))
351                         break;
352         }
353         up_read(&dev->lock);
355         if (buffer == NULL)
356                 return ERR_PTR(-ENODEV);
358         if (IS_ERR(buffer))
359                 return ERR_PTR(PTR_ERR(buffer));
361         handle = ion_handle_create(client, buffer);
363         /*
364          * ion_buffer_create will create a buffer with a ref_cnt of 1,
365          * and ion_handle_create will take a second reference, drop one here
366          */
367         ion_buffer_put(buffer);
369         if (!IS_ERR(handle)) {
370                 mutex_lock(&client->lock);
371                 ion_handle_add(client, handle);
372                 mutex_unlock(&client->lock);
373         }
376         return handle;
378 EXPORT_SYMBOL(ion_alloc);
380 void ion_free(struct ion_client *client, struct ion_handle *handle)
382         bool valid_handle;
384         BUG_ON(client != handle->client);
386         mutex_lock(&client->lock);
387         valid_handle = ion_handle_validate(client, handle);
389         if (!valid_handle) {
390                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
391                 mutex_unlock(&client->lock);
392                 return;
393         }
394         ion_handle_put(handle);
395         mutex_unlock(&client->lock);
397 EXPORT_SYMBOL(ion_free);
399 int ion_phys(struct ion_client *client, struct ion_handle *handle,
400              ion_phys_addr_t *addr, size_t *len)
402         struct ion_buffer *buffer;
403         int ret;
405         mutex_lock(&client->lock);
406         if (!ion_handle_validate(client, handle)) {
407                 mutex_unlock(&client->lock);
408                 return -EINVAL;
409         }
411         buffer = handle->buffer;
413         if (!buffer->heap->ops->phys) {
414                 pr_err("%s: ion_phys is not implemented by this heap.\n",
415                        __func__);
416                 mutex_unlock(&client->lock);
417                 return -ENODEV;
418         }
419         mutex_unlock(&client->lock);
420         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
421         return ret;
423 EXPORT_SYMBOL(ion_phys);
425 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
427         void *vaddr;
429         if (buffer->kmap_cnt) {
430                 buffer->kmap_cnt++;
431                 return buffer->vaddr;
432         }
433         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
434         if (IS_ERR_OR_NULL(vaddr))
435                 return vaddr;
436         buffer->vaddr = vaddr;
437         buffer->kmap_cnt++;
438         return vaddr;
441 static void *ion_handle_kmap_get(struct ion_handle *handle)
443         struct ion_buffer *buffer = handle->buffer;
444         void *vaddr;
446         if (handle->kmap_cnt) {
447                 handle->kmap_cnt++;
448                 return buffer->vaddr;
449         }
450         vaddr = ion_buffer_kmap_get(buffer);
451         if (IS_ERR_OR_NULL(vaddr))
452                 return vaddr;
453         handle->kmap_cnt++;
454         return vaddr;
457 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
459         buffer->kmap_cnt--;
460         if (!buffer->kmap_cnt) {
461                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
462                 buffer->vaddr = NULL;
463         }
466 static void ion_handle_kmap_put(struct ion_handle *handle)
468         struct ion_buffer *buffer = handle->buffer;
470         handle->kmap_cnt--;
471         if (!handle->kmap_cnt)
472                 ion_buffer_kmap_put(buffer);
475 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
477         struct ion_buffer *buffer;
478         void *vaddr;
480         mutex_lock(&client->lock);
481         if (!ion_handle_validate(client, handle)) {
482                 pr_err("%s: invalid handle passed to map_kernel.\n",
483                        __func__);
484                 mutex_unlock(&client->lock);
485                 return ERR_PTR(-EINVAL);
486         }
488         buffer = handle->buffer;
490         if (!handle->buffer->heap->ops->map_kernel) {
491                 pr_err("%s: map_kernel is not implemented by this heap.\n",
492                        __func__);
493                 mutex_unlock(&client->lock);
494                 return ERR_PTR(-ENODEV);
495         }
497         mutex_lock(&buffer->lock);
498         vaddr = ion_handle_kmap_get(handle);
499         mutex_unlock(&buffer->lock);
500         mutex_unlock(&client->lock);
501         return vaddr;
503 EXPORT_SYMBOL(ion_map_kernel);
505 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
507         struct ion_buffer *buffer;
509         mutex_lock(&client->lock);
510         buffer = handle->buffer;
511         mutex_lock(&buffer->lock);
512         ion_handle_kmap_put(handle);
513         mutex_unlock(&buffer->lock);
514         mutex_unlock(&client->lock);
516 EXPORT_SYMBOL(ion_unmap_kernel);
518 static int ion_debug_client_show(struct seq_file *s, void *unused)
520         struct ion_client *client = s->private;
521         struct rb_node *n;
522         size_t sizes[ION_NUM_HEAP_IDS] = {0};
523         const char *names[ION_NUM_HEAP_IDS] = {0};
524         int i;
526         mutex_lock(&client->lock);
527         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
528                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
529                                                      node);
530                 unsigned int id = handle->buffer->heap->id;
532                 if (!names[id])
533                         names[id] = handle->buffer->heap->name;
534                 sizes[id] += handle->buffer->size;
535         }
536         mutex_unlock(&client->lock);
538         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
539         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
540                 if (!names[i])
541                         continue;
542                 seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
543         }
544         return 0;
547 static int ion_debug_client_open(struct inode *inode, struct file *file)
549         return single_open(file, ion_debug_client_show, inode->i_private);
552 static const struct file_operations debug_client_fops = {
553         .open = ion_debug_client_open,
554         .read = seq_read,
555         .llseek = seq_lseek,
556         .release = single_release,
557 };
559 struct ion_client *ion_client_create(struct ion_device *dev,
560                                      const char *name)
562         struct ion_client *client;
563         struct task_struct *task;
564         struct rb_node **p;
565         struct rb_node *parent = NULL;
566         struct ion_client *entry;
567         char debug_name[64];
568         pid_t pid;
570         get_task_struct(current->group_leader);
571         task_lock(current->group_leader);
572         pid = task_pid_nr(current->group_leader);
573         /* don't bother to store task struct for kernel threads,
574            they can't be killed anyway */
575         if (current->group_leader->flags & PF_KTHREAD) {
576                 put_task_struct(current->group_leader);
577                 task = NULL;
578         } else {
579                 task = current->group_leader;
580         }
581         task_unlock(current->group_leader);
583         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
584         if (!client) {
585                 if (task)
586                         put_task_struct(current->group_leader);
587                 return ERR_PTR(-ENOMEM);
588         }
590         client->dev = dev;
591         client->handles = RB_ROOT;
592         mutex_init(&client->lock);
593         client->name = name;
594         client->task = task;
595         client->pid = pid;
597         down_write(&dev->lock);
598         p = &dev->clients.rb_node;
599         while (*p) {
600                 parent = *p;
601                 entry = rb_entry(parent, struct ion_client, node);
603                 if (client < entry)
604                         p = &(*p)->rb_left;
605                 else if (client > entry)
606                         p = &(*p)->rb_right;
607         }
608         rb_link_node(&client->node, parent, p);
609         rb_insert_color(&client->node, &dev->clients);
611         snprintf(debug_name, 64, "%u", client->pid);
612         client->debug_root = debugfs_create_file(debug_name, 0664,
613                                                  dev->debug_root, client,
614                                                  &debug_client_fops);
615         up_write(&dev->lock);
617         return client;
619 EXPORT_SYMBOL(ion_client_create);
621 void ion_client_destroy(struct ion_client *client)
623         struct ion_device *dev = client->dev;
624         struct rb_node *n;
626         pr_debug("%s: %d\n", __func__, __LINE__);
627         while ((n = rb_first(&client->handles))) {
628                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
629                                                      node);
630                 ion_handle_destroy(&handle->ref);
631         }
632         down_write(&dev->lock);
633         if (client->task)
634                 put_task_struct(client->task);
635         rb_erase(&client->node, &dev->clients);
636         debugfs_remove_recursive(client->debug_root);
637         up_write(&dev->lock);
639         kfree(client);
641 EXPORT_SYMBOL(ion_client_destroy);
643 struct sg_table *ion_sg_table(struct ion_client *client,
644                               struct ion_handle *handle)
646         struct ion_buffer *buffer;
647         struct sg_table *table;
649         mutex_lock(&client->lock);
650         if (!ion_handle_validate(client, handle)) {
651                 pr_err("%s: invalid handle passed to map_dma.\n",
652                        __func__);
653                 mutex_unlock(&client->lock);
654                 return ERR_PTR(-EINVAL);
655         }
656         buffer = handle->buffer;
657         table = buffer->sg_table;
658         mutex_unlock(&client->lock);
659         return table;
661 EXPORT_SYMBOL(ion_sg_table);
663 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
664                                        struct device *dev,
665                                        enum dma_data_direction direction);
667 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
668                                         enum dma_data_direction direction)
670         struct dma_buf *dmabuf = attachment->dmabuf;
671         struct ion_buffer *buffer = dmabuf->priv;
673         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
674         return buffer->sg_table;
677 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
678                               struct sg_table *table,
679                               enum dma_data_direction direction)
683 static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
685         unsigned long pages = buffer->sg_table->nents;
686         unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
688         buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
689         if (!buffer->dirty)
690                 return -ENOMEM;
691         return 0;
694 struct ion_vma_list {
695         struct list_head list;
696         struct vm_area_struct *vma;
697 };
699 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
700                                        struct device *dev,
701                                        enum dma_data_direction dir)
703         struct scatterlist *sg;
704         int i;
705         struct ion_vma_list *vma_list;
707         pr_debug("%s: syncing for device %s\n", __func__,
708                  dev ? dev_name(dev) : "null");
710         if (!ion_buffer_fault_user_mappings(buffer))
711                 return;
713         mutex_lock(&buffer->lock);
714         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
715                 if (!test_bit(i, buffer->dirty))
716                         continue;
717                 dma_sync_sg_for_device(dev, sg, 1, dir);
718                 clear_bit(i, buffer->dirty);
719         }
720         list_for_each_entry(vma_list, &buffer->vmas, list) {
721                 struct vm_area_struct *vma = vma_list->vma;
723                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
724                                NULL);
725         }
726         mutex_unlock(&buffer->lock);
729 int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
731         struct ion_buffer *buffer = vma->vm_private_data;
732         struct scatterlist *sg;
733         int i;
735         mutex_lock(&buffer->lock);
736         set_bit(vmf->pgoff, buffer->dirty);
738         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
739                 if (i != vmf->pgoff)
740                         continue;
741                 dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
742                 vm_insert_page(vma, (unsigned long)vmf->virtual_address,
743                                sg_page(sg));
744                 break;
745         }
746         mutex_unlock(&buffer->lock);
747         return VM_FAULT_NOPAGE;
750 static void ion_vm_open(struct vm_area_struct *vma)
752         struct ion_buffer *buffer = vma->vm_private_data;
753         struct ion_vma_list *vma_list;
755         vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
756         if (!vma_list)
757                 return;
758         vma_list->vma = vma;
759         mutex_lock(&buffer->lock);
760         list_add(&vma_list->list, &buffer->vmas);
761         mutex_unlock(&buffer->lock);
762         pr_debug("%s: adding %p\n", __func__, vma);
765 static void ion_vm_close(struct vm_area_struct *vma)
767         struct ion_buffer *buffer = vma->vm_private_data;
768         struct ion_vma_list *vma_list, *tmp;
770         pr_debug("%s\n", __func__);
771         mutex_lock(&buffer->lock);
772         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
773                 if (vma_list->vma != vma)
774                         continue;
775                 list_del(&vma_list->list);
776                 kfree(vma_list);
777                 pr_debug("%s: deleting %p\n", __func__, vma);
778                 break;
779         }
780         mutex_unlock(&buffer->lock);
783 struct vm_operations_struct ion_vma_ops = {
784         .open = ion_vm_open,
785         .close = ion_vm_close,
786         .fault = ion_vm_fault,
787 };
789 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
791         struct ion_buffer *buffer = dmabuf->priv;
792         int ret = 0;
794         if (!buffer->heap->ops->map_user) {
795                 pr_err("%s: this heap does not define a method for mapping "
796                        "to userspace\n", __func__);
797                 return -EINVAL;
798         }
800         if (ion_buffer_fault_user_mappings(buffer)) {
801                 vma->vm_private_data = buffer;
802                 vma->vm_ops = &ion_vma_ops;
803                 ion_vm_open(vma);
804                 return 0;
805         }
807         if (!(buffer->flags & ION_FLAG_CACHED))
808                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
810         mutex_lock(&buffer->lock);
811         /* now map it to userspace */
812         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
813         mutex_unlock(&buffer->lock);
815         if (ret)
816                 pr_err("%s: failure mapping buffer to userspace\n",
817                        __func__);
819         return ret;
822 static void ion_dma_buf_release(struct dma_buf *dmabuf)
824         struct ion_buffer *buffer = dmabuf->priv;
825         ion_buffer_put(buffer);
828 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
830         struct ion_buffer *buffer = dmabuf->priv;
831         return buffer->vaddr + offset * PAGE_SIZE;
834 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
835                                void *ptr)
837         return;
840 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
841                                         size_t len,
842                                         enum dma_data_direction direction)
844         struct ion_buffer *buffer = dmabuf->priv;
845         void *vaddr;
847         if (!buffer->heap->ops->map_kernel) {
848                 pr_err("%s: map kernel is not implemented by this heap.\n",
849                        __func__);
850                 return -ENODEV;
851         }
853         mutex_lock(&buffer->lock);
854         vaddr = ion_buffer_kmap_get(buffer);
855         mutex_unlock(&buffer->lock);
856         if (IS_ERR(vaddr))
857                 return PTR_ERR(vaddr);
858         if (!vaddr)
859                 return -ENOMEM;
860         return 0;
863 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
864                                        size_t len,
865                                        enum dma_data_direction direction)
867         struct ion_buffer *buffer = dmabuf->priv;
869         mutex_lock(&buffer->lock);
870         ion_buffer_kmap_put(buffer);
871         mutex_unlock(&buffer->lock);
874 struct dma_buf_ops dma_buf_ops = {
875         .map_dma_buf = ion_map_dma_buf,
876         .unmap_dma_buf = ion_unmap_dma_buf,
877         .mmap = ion_mmap,
878         .release = ion_dma_buf_release,
879         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
880         .end_cpu_access = ion_dma_buf_end_cpu_access,
881         .kmap_atomic = ion_dma_buf_kmap,
882         .kunmap_atomic = ion_dma_buf_kunmap,
883         .kmap = ion_dma_buf_kmap,
884         .kunmap = ion_dma_buf_kunmap,
885 };
887 int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
889         struct ion_buffer *buffer;
890         struct dma_buf *dmabuf;
891         bool valid_handle;
892         int fd;
894         mutex_lock(&client->lock);
895         valid_handle = ion_handle_validate(client, handle);
896         mutex_unlock(&client->lock);
897         if (!valid_handle) {
898                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
899                 return -EINVAL;
900         }
902         buffer = handle->buffer;
903         ion_buffer_get(buffer);
904         dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
905         if (IS_ERR(dmabuf)) {
906                 ion_buffer_put(buffer);
907                 return PTR_ERR(dmabuf);
908         }
909         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
910         if (fd < 0)
911                 dma_buf_put(dmabuf);
913         return fd;
915 EXPORT_SYMBOL(ion_share_dma_buf);
917 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
919         struct dma_buf *dmabuf;
920         struct ion_buffer *buffer;
921         struct ion_handle *handle;
923         dmabuf = dma_buf_get(fd);
924         if (IS_ERR_OR_NULL(dmabuf))
925                 return ERR_PTR(PTR_ERR(dmabuf));
926         /* if this memory came from ion */
928         if (dmabuf->ops != &dma_buf_ops) {
929                 pr_err("%s: can not import dmabuf from another exporter\n",
930                        __func__);
931                 dma_buf_put(dmabuf);
932                 return ERR_PTR(-EINVAL);
933         }
934         buffer = dmabuf->priv;
936         mutex_lock(&client->lock);
937         /* if a handle exists for this buffer just take a reference to it */
938         handle = ion_handle_lookup(client, buffer);
939         if (!IS_ERR_OR_NULL(handle)) {
940                 ion_handle_get(handle);
941                 goto end;
942         }
943         handle = ion_handle_create(client, buffer);
944         if (IS_ERR_OR_NULL(handle))
945                 goto end;
946         ion_handle_add(client, handle);
947 end:
948         mutex_unlock(&client->lock);
949         dma_buf_put(dmabuf);
950         return handle;
952 EXPORT_SYMBOL(ion_import_dma_buf);
954 static int ion_sync_for_device(struct ion_client *client, int fd, enum ion_data_direction dir)
956         struct dma_buf *dmabuf;
957         struct ion_buffer *buffer;
959         dmabuf = dma_buf_get(fd);
960         if (IS_ERR_OR_NULL(dmabuf))
961                 return PTR_ERR(dmabuf);
963         /* if this memory came from ion */
964         if (dmabuf->ops != &dma_buf_ops) {
965                 pr_err("%s: can not sync dmabuf from another exporter\n",
966                        __func__);
967                 dma_buf_put(dmabuf);
968                 return -EINVAL;
969         }
970         buffer = dmabuf->priv;
972         if(dir == ION_FROM_DEVICE)
973                 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
974                                        buffer->sg_table->nents, DMA_FROM_DEVICE);
975         else if(dir == ION_TO_DEVICE)
976                 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
977                                        buffer->sg_table->nents, DMA_TO_DEVICE);
978         else if(dir == ION_BIDIRECTIONAL)
979                 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
980                                        buffer->sg_table->nents, DMA_BIDIRECTIONAL);
982         dma_buf_put(dmabuf);
983         return 0;
986 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
988         struct ion_client *client = filp->private_data;
990         switch (cmd) {
991         case ION_IOC_ALLOC:
992         {
993                 struct ion_allocation_data data;
995                 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
996                         return -EFAULT;
997                 data.handle = ion_alloc(client, data.len, data.align,
998                                              data.heap_id_mask, data.flags);
1000                 if (IS_ERR(data.handle))
1001                         return PTR_ERR(data.handle);
1003                 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1004                         ion_free(client, data.handle);
1005                         return -EFAULT;
1006                 }
1007                 break;
1008         }
1009         case ION_IOC_FREE:
1010         {
1011                 struct ion_handle_data data;
1012                 bool valid;
1014                 if (copy_from_user(&data, (void __user *)arg,
1015                                    sizeof(struct ion_handle_data)))
1016                         return -EFAULT;
1017                 mutex_lock(&client->lock);
1018                 valid = ion_handle_validate(client, data.handle);
1019                 mutex_unlock(&client->lock);
1020                 if (!valid)
1021                         return -EINVAL;
1022                 ion_free(client, data.handle);
1023                 break;
1024         }
1025         case ION_IOC_SHARE:
1026         case ION_IOC_MAP:
1027         {
1028                 struct ion_fd_data data;
1030                 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1031                         return -EFAULT;
1032                 data.fd = ion_share_dma_buf(client, data.handle);
1033                 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1034                         return -EFAULT;
1035                 if (data.fd < 0)
1036                         return data.fd;
1037                 break;
1038         }
1039         case ION_IOC_IMPORT:
1040         {
1041                 struct ion_fd_data data;
1042                 int ret = 0;
1043                 if (copy_from_user(&data, (void __user *)arg,
1044                                    sizeof(struct ion_fd_data)))
1045                         return -EFAULT;
1046                 data.handle = ion_import_dma_buf(client, data.fd);
1047                 if (IS_ERR(data.handle)) {
1048                         ret = PTR_ERR(data.handle);
1049                         data.handle = NULL;
1050                 }
1051                 if (copy_to_user((void __user *)arg, &data,
1052                                  sizeof(struct ion_fd_data)))
1053                         return -EFAULT;
1054                 if (ret < 0)
1055                         return ret;
1056                 break;
1057         }
1058         case ION_IOC_SYNC:
1059         {
1060                 struct ion_fd_data data;
1061                 if (copy_from_user(&data, (void __user *)arg,
1062                                    sizeof(struct ion_fd_data)))
1063                         return -EFAULT;
1064                 ion_sync_for_device(client, data.fd, data.dir);
1065                 break;
1066         }
1067         case ION_IOC_CUSTOM:
1068         {
1069                 struct ion_device *dev = client->dev;
1070                 struct ion_custom_data data;
1072                 if (!dev->custom_ioctl)
1073                         return -ENOTTY;
1074                 if (copy_from_user(&data, (void __user *)arg,
1075                                 sizeof(struct ion_custom_data)))
1076                         return -EFAULT;
1077                 return dev->custom_ioctl(client, data.cmd, data.arg);
1078         }
1079         default:
1080                 return -ENOTTY;
1081         }
1082         return 0;
1085 static int ion_release(struct inode *inode, struct file *file)
1087         struct ion_client *client = file->private_data;
1089         pr_debug("%s: %d\n", __func__, __LINE__);
1090         ion_client_destroy(client);
1091         return 0;
1094 static int ion_open(struct inode *inode, struct file *file)
1096         struct miscdevice *miscdev = file->private_data;
1097         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1098         struct ion_client *client;
1100         pr_debug("%s: %d\n", __func__, __LINE__);
1101         client = ion_client_create(dev, "user");
1102         if (IS_ERR_OR_NULL(client))
1103                 return PTR_ERR(client);
1104         file->private_data = client;
1106         return 0;
1109 static const struct file_operations ion_fops = {
1110         .owner          = THIS_MODULE,
1111         .open           = ion_open,
1112         .release        = ion_release,
1113         .unlocked_ioctl = ion_ioctl,
1114 };
1116 static size_t ion_debug_heap_total(struct ion_client *client,
1117                                    unsigned int id)
1119         size_t size = 0;
1120         struct rb_node *n;
1122         mutex_lock(&client->lock);
1123         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1124                 struct ion_handle *handle = rb_entry(n,
1125                                                      struct ion_handle,
1126                                                      node);
1127                 if (handle->buffer->heap->id == id)
1128                         size += handle->buffer->size;
1129         }
1130         mutex_unlock(&client->lock);
1131         return size;
1134 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1136         struct ion_heap *heap = s->private;
1137         struct ion_device *dev = heap->dev;
1138         struct rb_node *n;
1139         size_t total_size = 0;
1140         size_t total_orphaned_size = 0;
1142         seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1143         seq_printf(s, "----------------------------------------------------\n");
1145         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1146                 struct ion_client *client = rb_entry(n, struct ion_client,
1147                                                      node);
1148                 size_t size = ion_debug_heap_total(client, heap->id);
1149                 if (!size)
1150                         continue;
1151                 if (client->task) {
1152                         char task_comm[TASK_COMM_LEN];
1154                         get_task_comm(task_comm, client->task);
1155                         seq_printf(s, "%16.s %16u %16u\n", task_comm,
1156                                    client->pid, size);
1157                 } else {
1158                         seq_printf(s, "%16.s %16u %16u\n", client->name,
1159                                    client->pid, size);
1160                 }
1161         }
1162         seq_printf(s, "----------------------------------------------------\n");
1163         seq_printf(s, "orphaned allocations (info is from last known client):"
1164                    "\n");
1165         mutex_lock(&dev->buffer_lock);
1166         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1167                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1168                                                      node);
1169                 if (buffer->heap->id != heap->id)
1170                         continue;
1171                 total_size += buffer->size;
1172                 if (!buffer->handle_count) {
1173                         seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm,
1174                                    buffer->pid, buffer->size, buffer->kmap_cnt,
1175                                    atomic_read(&buffer->ref.refcount));
1176                         total_orphaned_size += buffer->size;
1177                 }
1178         }
1179         mutex_unlock(&dev->buffer_lock);
1180         seq_printf(s, "----------------------------------------------------\n");
1181         seq_printf(s, "%16.s %16u\n", "total orphaned",
1182                    total_orphaned_size);
1183         seq_printf(s, "%16.s %16u\n", "total ", total_size);
1184         seq_printf(s, "----------------------------------------------------\n");
1186         if (heap->debug_show)
1187                 heap->debug_show(heap, s, unused);
1189         return 0;
1192 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1194         return single_open(file, ion_debug_heap_show, inode->i_private);
1197 static const struct file_operations debug_heap_fops = {
1198         .open = ion_debug_heap_open,
1199         .read = seq_read,
1200         .llseek = seq_lseek,
1201         .release = single_release,
1202 };
1204 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1206         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1207             !heap->ops->unmap_dma)
1208                 pr_err("%s: can not add heap with invalid ops struct.\n",
1209                        __func__);
1211         heap->dev = dev;
1212         down_write(&dev->lock);
1213         /* use negative heap->id to reverse the priority -- when traversing
1214            the list later attempt higher id numbers first */
1215         plist_node_init(&heap->node, -heap->id);
1216         plist_add(&heap->node, &dev->heaps);
1217         debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1218                             &debug_heap_fops);
1219         up_write(&dev->lock);
1222 struct ion_device *ion_device_create(long (*custom_ioctl)
1223                                      (struct ion_client *client,
1224                                       unsigned int cmd,
1225                                       unsigned long arg))
1227         struct ion_device *idev;
1228         int ret;
1230         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1231         if (!idev)
1232                 return ERR_PTR(-ENOMEM);
1234         idev->dev.minor = MISC_DYNAMIC_MINOR;
1235         idev->dev.name = "ion";
1236         idev->dev.fops = &ion_fops;
1237         idev->dev.parent = NULL;
1238         ret = misc_register(&idev->dev);
1239         if (ret) {
1240                 pr_err("ion: failed to register misc device.\n");
1241                 return ERR_PTR(ret);
1242         }
1244         idev->debug_root = debugfs_create_dir("ion", NULL);
1245         if (IS_ERR_OR_NULL(idev->debug_root))
1246                 pr_err("ion: failed to create debug files.\n");
1248         idev->custom_ioctl = custom_ioctl;
1249         idev->buffers = RB_ROOT;
1250         mutex_init(&idev->buffer_lock);
1251         init_rwsem(&idev->lock);
1252         plist_head_init(&idev->heaps);
1253         idev->clients = RB_ROOT;
1254         return idev;
1257 void ion_device_destroy(struct ion_device *dev)
1259         misc_deregister(&dev->dev);
1260         /* XXX need to free the heaps and clients ? */
1261         kfree(dev);
1264 void __init ion_reserve(struct ion_platform_data *data)
1266         int i;
1268         for (i = 0; i < data->nr; i++) {
1269                 if (data->heaps[i].size == 0)
1270                         continue;
1272                 if (data->heaps[i].base == 0) {
1273                         phys_addr_t paddr;
1274                         paddr = memblock_alloc_base(data->heaps[i].size,
1275                                                     data->heaps[i].align,
1276                                                     MEMBLOCK_ALLOC_ANYWHERE);
1277                         if (!paddr) {
1278                                 pr_err("%s: error allocating memblock for "
1279                                        "heap %d\n",
1280                                         __func__, i);
1281                                 continue;
1282                         }
1283                         data->heaps[i].base = paddr;
1284                 } else {
1285                         int ret = memblock_reserve(data->heaps[i].base,
1286                                                data->heaps[i].size);
1287                         if (ret)
1288                                 pr_err("memblock reserve of %x@%lx failed\n",
1289                                        data->heaps[i].size,
1290                                        data->heaps[i].base);
1291                 }
1292                 pr_info("%s: %s reserved base %lx size %d\n", __func__,
1293                         data->heaps[i].name,
1294                         data->heaps[i].base,
1295                         data->heaps[i].size);
1296         }