1 /*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
17 #include <linux/debugfs.h>
18 #include <linux/export.h>
19 #include <linux/file.h>
20 #include <linux/fs.h>
21 #include <linux/kernel.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/sync.h>
27 #include <linux/uaccess.h>
29 #include <linux/anon_inodes.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/sync.h>
34 static void sync_fence_signal_pt(struct sync_pt *pt);
35 static int _sync_pt_has_signaled(struct sync_pt *pt);
36 static void sync_fence_free(struct kref *kref);
37 static void sync_dump(void);
39 static LIST_HEAD(sync_timeline_list_head);
40 static DEFINE_SPINLOCK(sync_timeline_list_lock);
42 static LIST_HEAD(sync_fence_list_head);
43 static DEFINE_SPINLOCK(sync_fence_list_lock);
45 struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
46 int size, const char *name)
47 {
48 struct sync_timeline *obj;
49 unsigned long flags;
51 if (size < sizeof(struct sync_timeline))
52 return NULL;
54 obj = kzalloc(size, GFP_KERNEL);
55 if (obj == NULL)
56 return NULL;
58 kref_init(&obj->kref);
59 obj->ops = ops;
60 strlcpy(obj->name, name, sizeof(obj->name));
62 INIT_LIST_HEAD(&obj->child_list_head);
63 spin_lock_init(&obj->child_list_lock);
65 INIT_LIST_HEAD(&obj->active_list_head);
66 spin_lock_init(&obj->active_list_lock);
68 spin_lock_irqsave(&sync_timeline_list_lock, flags);
69 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
70 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
72 return obj;
73 }
74 EXPORT_SYMBOL(sync_timeline_create);
76 static void sync_timeline_free(struct kref *kref)
77 {
78 struct sync_timeline *obj =
79 container_of(kref, struct sync_timeline, kref);
80 unsigned long flags;
82 if (obj->ops->release_obj)
83 obj->ops->release_obj(obj);
85 spin_lock_irqsave(&sync_timeline_list_lock, flags);
86 list_del(&obj->sync_timeline_list);
87 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
89 kfree(obj);
90 }
92 void sync_timeline_destroy(struct sync_timeline *obj)
93 {
94 obj->destroyed = true;
96 /*
97 * If this is not the last reference, signal any children
98 * that their parent is going away.
99 */
101 if (!kref_put(&obj->kref, sync_timeline_free))
102 sync_timeline_signal(obj);
103 }
104 EXPORT_SYMBOL(sync_timeline_destroy);
106 static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
107 {
108 unsigned long flags;
110 pt->parent = obj;
112 spin_lock_irqsave(&obj->child_list_lock, flags);
113 list_add_tail(&pt->child_list, &obj->child_list_head);
114 spin_unlock_irqrestore(&obj->child_list_lock, flags);
115 }
117 static void sync_timeline_remove_pt(struct sync_pt *pt)
118 {
119 struct sync_timeline *obj = pt->parent;
120 unsigned long flags;
122 spin_lock_irqsave(&obj->active_list_lock, flags);
123 if (!list_empty(&pt->active_list))
124 list_del_init(&pt->active_list);
125 spin_unlock_irqrestore(&obj->active_list_lock, flags);
127 spin_lock_irqsave(&obj->child_list_lock, flags);
128 if (!list_empty(&pt->child_list)) {
129 list_del_init(&pt->child_list);
130 }
131 spin_unlock_irqrestore(&obj->child_list_lock, flags);
132 }
134 void sync_timeline_signal(struct sync_timeline *obj)
135 {
136 unsigned long flags;
137 LIST_HEAD(signaled_pts);
138 struct list_head *pos, *n;
140 trace_sync_timeline(obj);
142 spin_lock_irqsave(&obj->active_list_lock, flags);
144 list_for_each_safe(pos, n, &obj->active_list_head) {
145 struct sync_pt *pt =
146 container_of(pos, struct sync_pt, active_list);
148 if (_sync_pt_has_signaled(pt)) {
149 list_del_init(pos);
150 list_add(&pt->signaled_list, &signaled_pts);
151 kref_get(&pt->fence->kref);
152 }
153 }
155 spin_unlock_irqrestore(&obj->active_list_lock, flags);
157 list_for_each_safe(pos, n, &signaled_pts) {
158 struct sync_pt *pt =
159 container_of(pos, struct sync_pt, signaled_list);
161 list_del_init(pos);
162 sync_fence_signal_pt(pt);
163 kref_put(&pt->fence->kref, sync_fence_free);
164 }
165 }
166 EXPORT_SYMBOL(sync_timeline_signal);
168 struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
169 {
170 struct sync_pt *pt;
172 if (size < sizeof(struct sync_pt))
173 return NULL;
175 pt = kzalloc(size, GFP_KERNEL);
176 if (pt == NULL)
177 return NULL;
179 INIT_LIST_HEAD(&pt->active_list);
180 kref_get(&parent->kref);
181 sync_timeline_add_pt(parent, pt);
183 return pt;
184 }
185 EXPORT_SYMBOL(sync_pt_create);
187 void sync_pt_free(struct sync_pt *pt)
188 {
189 if (pt->parent->ops->free_pt)
190 pt->parent->ops->free_pt(pt);
192 sync_timeline_remove_pt(pt);
194 kref_put(&pt->parent->kref, sync_timeline_free);
196 kfree(pt);
197 }
198 EXPORT_SYMBOL(sync_pt_free);
200 /* call with pt->parent->active_list_lock held */
201 static int _sync_pt_has_signaled(struct sync_pt *pt)
202 {
203 int old_status = pt->status;
205 if (!pt->status)
206 pt->status = pt->parent->ops->has_signaled(pt);
208 if (!pt->status && pt->parent->destroyed)
209 pt->status = -ENOENT;
211 if (pt->status != old_status)
212 pt->timestamp = ktime_get();
214 return pt->status;
215 }
217 static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
218 {
219 return pt->parent->ops->dup(pt);
220 }
222 /* Adds a sync pt to the active queue. Called when added to a fence */
223 static void sync_pt_activate(struct sync_pt *pt)
224 {
225 struct sync_timeline *obj = pt->parent;
226 unsigned long flags;
227 int err;
229 spin_lock_irqsave(&obj->active_list_lock, flags);
231 err = _sync_pt_has_signaled(pt);
232 if (err != 0)
233 goto out;
235 list_add_tail(&pt->active_list, &obj->active_list_head);
237 out:
238 spin_unlock_irqrestore(&obj->active_list_lock, flags);
239 }
241 static int sync_fence_release(struct inode *inode, struct file *file);
242 static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
243 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
244 unsigned long arg);
247 static const struct file_operations sync_fence_fops = {
248 .release = sync_fence_release,
249 .poll = sync_fence_poll,
250 .unlocked_ioctl = sync_fence_ioctl,
251 };
253 static struct sync_fence *sync_fence_alloc(const char *name)
254 {
255 struct sync_fence *fence;
256 unsigned long flags;
258 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
259 if (fence == NULL)
260 return NULL;
262 fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
263 fence, 0);
264 if (fence->file == NULL)
265 goto err;
267 kref_init(&fence->kref);
268 strlcpy(fence->name, name, sizeof(fence->name));
270 INIT_LIST_HEAD(&fence->pt_list_head);
271 INIT_LIST_HEAD(&fence->waiter_list_head);
272 spin_lock_init(&fence->waiter_list_lock);
274 init_waitqueue_head(&fence->wq);
276 spin_lock_irqsave(&sync_fence_list_lock, flags);
277 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
278 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
280 return fence;
282 err:
283 kfree(fence);
284 return NULL;
285 }
287 /* TODO: implement a create which takes more that one sync_pt */
288 struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
289 {
290 struct sync_fence *fence;
292 if (pt->fence)
293 return NULL;
295 fence = sync_fence_alloc(name);
296 if (fence == NULL)
297 return NULL;
299 pt->fence = fence;
300 list_add(&pt->pt_list, &fence->pt_list_head);
301 sync_pt_activate(pt);
303 /*
304 * signal the fence in case pt was activated before
305 * sync_pt_activate(pt) was called
306 */
307 sync_fence_signal_pt(pt);
309 return fence;
310 }
311 EXPORT_SYMBOL(sync_fence_create);
313 static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
314 {
315 struct list_head *pos;
317 list_for_each(pos, &src->pt_list_head) {
318 struct sync_pt *orig_pt =
319 container_of(pos, struct sync_pt, pt_list);
320 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
322 if (new_pt == NULL)
323 return -ENOMEM;
325 new_pt->fence = dst;
326 list_add(&new_pt->pt_list, &dst->pt_list_head);
327 }
329 return 0;
330 }
332 static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
333 {
334 struct list_head *src_pos, *dst_pos, *n;
336 list_for_each(src_pos, &src->pt_list_head) {
337 struct sync_pt *src_pt =
338 container_of(src_pos, struct sync_pt, pt_list);
339 bool collapsed = false;
341 list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
342 struct sync_pt *dst_pt =
343 container_of(dst_pos, struct sync_pt, pt_list);
344 /* collapse two sync_pts on the same timeline
345 * to a single sync_pt that will signal at
346 * the later of the two
347 */
348 if (dst_pt->parent == src_pt->parent) {
349 if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) {
350 struct sync_pt *new_pt =
351 sync_pt_dup(src_pt);
352 if (new_pt == NULL)
353 return -ENOMEM;
355 new_pt->fence = dst;
356 list_replace(&dst_pt->pt_list,
357 &new_pt->pt_list);
358 sync_pt_free(dst_pt);
359 }
360 collapsed = true;
361 break;
362 }
363 }
365 if (!collapsed) {
366 struct sync_pt *new_pt = sync_pt_dup(src_pt);
368 if (new_pt == NULL)
369 return -ENOMEM;
371 new_pt->fence = dst;
372 list_add(&new_pt->pt_list, &dst->pt_list_head);
373 }
374 }
376 return 0;
377 }
379 static void sync_fence_detach_pts(struct sync_fence *fence)
380 {
381 struct list_head *pos, *n;
383 list_for_each_safe(pos, n, &fence->pt_list_head) {
384 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
385 sync_timeline_remove_pt(pt);
386 }
387 }
389 static void sync_fence_free_pts(struct sync_fence *fence)
390 {
391 struct list_head *pos, *n;
393 list_for_each_safe(pos, n, &fence->pt_list_head) {
394 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
395 sync_pt_free(pt);
396 }
397 }
399 struct sync_fence *sync_fence_fdget(int fd)
400 {
401 struct file *file = fget(fd);
403 if (file == NULL)
404 return NULL;
406 if (file->f_op != &sync_fence_fops)
407 goto err;
409 return file->private_data;
411 err:
412 fput(file);
413 return NULL;
414 }
415 EXPORT_SYMBOL(sync_fence_fdget);
417 void sync_fence_put(struct sync_fence *fence)
418 {
419 fput(fence->file);
420 }
421 EXPORT_SYMBOL(sync_fence_put);
423 void sync_fence_install(struct sync_fence *fence, int fd)
424 {
425 fd_install(fd, fence->file);
426 }
427 EXPORT_SYMBOL(sync_fence_install);
429 static int sync_fence_get_status(struct sync_fence *fence)
430 {
431 struct list_head *pos;
432 int status = 1;
434 list_for_each(pos, &fence->pt_list_head) {
435 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
436 int pt_status = pt->status;
438 if (pt_status < 0) {
439 status = pt_status;
440 break;
441 } else if (status == 1) {
442 status = pt_status;
443 }
444 }
446 return status;
447 }
449 struct sync_fence *sync_fence_merge(const char *name,
450 struct sync_fence *a, struct sync_fence *b)
451 {
452 struct sync_fence *fence;
453 struct list_head *pos;
454 int err;
456 fence = sync_fence_alloc(name);
457 if (fence == NULL)
458 return NULL;
460 err = sync_fence_copy_pts(fence, a);
461 if (err < 0)
462 goto err;
464 err = sync_fence_merge_pts(fence, b);
465 if (err < 0)
466 goto err;
468 list_for_each(pos, &fence->pt_list_head) {
469 struct sync_pt *pt =
470 container_of(pos, struct sync_pt, pt_list);
471 sync_pt_activate(pt);
472 }
474 /*
475 * signal the fence in case one of it's pts were activated before
476 * they were activated
477 */
478 sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
479 struct sync_pt,
480 pt_list));
482 return fence;
483 err:
484 sync_fence_free_pts(fence);
485 kfree(fence);
486 return NULL;
487 }
488 EXPORT_SYMBOL(sync_fence_merge);
490 static void sync_fence_signal_pt(struct sync_pt *pt)
491 {
492 LIST_HEAD(signaled_waiters);
493 struct sync_fence *fence = pt->fence;
494 struct list_head *pos;
495 struct list_head *n;
496 unsigned long flags;
497 int status;
499 status = sync_fence_get_status(fence);
501 spin_lock_irqsave(&fence->waiter_list_lock, flags);
502 /*
503 * this should protect against two threads racing on the signaled
504 * false -> true transition
505 */
506 if (status && !fence->status) {
507 list_for_each_safe(pos, n, &fence->waiter_list_head)
508 list_move(pos, &signaled_waiters);
510 fence->status = status;
511 } else {
512 status = 0;
513 }
514 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
516 if (status) {
517 list_for_each_safe(pos, n, &signaled_waiters) {
518 struct sync_fence_waiter *waiter =
519 container_of(pos, struct sync_fence_waiter,
520 waiter_list);
522 list_del(pos);
523 waiter->callback(fence, waiter);
524 }
525 wake_up(&fence->wq);
526 }
527 }
529 int sync_fence_wait_async(struct sync_fence *fence,
530 struct sync_fence_waiter *waiter)
531 {
532 unsigned long flags;
533 int err = 0;
535 spin_lock_irqsave(&fence->waiter_list_lock, flags);
537 if (fence->status) {
538 err = fence->status;
539 goto out;
540 }
542 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
543 out:
544 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
546 return err;
547 }
548 EXPORT_SYMBOL(sync_fence_wait_async);
550 int sync_fence_cancel_async(struct sync_fence *fence,
551 struct sync_fence_waiter *waiter)
552 {
553 struct list_head *pos;
554 struct list_head *n;
555 unsigned long flags;
556 int ret = -ENOENT;
558 spin_lock_irqsave(&fence->waiter_list_lock, flags);
559 /*
560 * Make sure waiter is still in waiter_list because it is possible for
561 * the waiter to be removed from the list while the callback is still
562 * pending.
563 */
564 list_for_each_safe(pos, n, &fence->waiter_list_head) {
565 struct sync_fence_waiter *list_waiter =
566 container_of(pos, struct sync_fence_waiter,
567 waiter_list);
568 if (list_waiter == waiter) {
569 list_del(pos);
570 ret = 0;
571 break;
572 }
573 }
574 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
575 return ret;
576 }
577 EXPORT_SYMBOL(sync_fence_cancel_async);
579 static bool sync_fence_check(struct sync_fence *fence)
580 {
581 /*
582 * Make sure that reads to fence->status are ordered with the
583 * wait queue event triggering
584 */
585 smp_rmb();
586 return fence->status != 0;
587 }
589 int sync_fence_wait(struct sync_fence *fence, long timeout)
590 {
591 int err = 0;
592 struct sync_pt *pt;
594 trace_sync_wait(fence, 1);
595 list_for_each_entry(pt, &fence->pt_list_head, pt_list)
596 trace_sync_pt(pt);
598 if (timeout > 0) {
599 timeout = msecs_to_jiffies(timeout);
600 err = wait_event_interruptible_timeout(fence->wq,
601 sync_fence_check(fence),
602 timeout);
603 } else if (timeout < 0) {
604 err = wait_event_interruptible(fence->wq,
605 sync_fence_check(fence));
606 }
607 trace_sync_wait(fence, 0);
609 if (err < 0)
610 return err;
612 if (fence->status < 0) {
613 pr_info("fence error %d on [%p]\n", fence->status, fence);
614 sync_dump();
615 return fence->status;
616 }
618 if (fence->status == 0) {
619 if (timeout > 0) {
620 pr_info("fence timeout on [%p] after %dms\n", fence,
621 jiffies_to_msecs(timeout));
622 sync_dump();
623 }
624 return -ETIME;
625 }
627 return 0;
628 }
629 EXPORT_SYMBOL(sync_fence_wait);
631 static void sync_fence_free(struct kref *kref)
632 {
633 struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
635 sync_fence_free_pts(fence);
637 kfree(fence);
638 }
640 static int sync_fence_release(struct inode *inode, struct file *file)
641 {
642 struct sync_fence *fence = file->private_data;
643 unsigned long flags;
645 /*
646 * We need to remove all ways to access this fence before droping
647 * our ref.
648 *
649 * start with its membership in the global fence list
650 */
651 spin_lock_irqsave(&sync_fence_list_lock, flags);
652 list_del(&fence->sync_fence_list);
653 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
655 /*
656 * remove its pts from their parents so that sync_timeline_signal()
657 * can't reference the fence.
658 */
659 sync_fence_detach_pts(fence);
661 kref_put(&fence->kref, sync_fence_free);
663 return 0;
664 }
666 static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
667 {
668 struct sync_fence *fence = file->private_data;
670 poll_wait(file, &fence->wq, wait);
672 /*
673 * Make sure that reads to fence->status are ordered with the
674 * wait queue event triggering
675 */
676 smp_rmb();
678 if (fence->status == 1)
679 return POLLIN;
680 else if (fence->status < 0)
681 return POLLERR;
682 else
683 return 0;
684 }
686 static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
687 {
688 __s32 value;
690 if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
691 return -EFAULT;
693 return sync_fence_wait(fence, value);
694 }
696 static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
697 {
698 int fd = get_unused_fd();
699 int err;
700 struct sync_fence *fence2, *fence3;
701 struct sync_merge_data data;
703 if (fd < 0)
704 return fd;
706 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
707 err = -EFAULT;
708 goto err_put_fd;
709 }
711 fence2 = sync_fence_fdget(data.fd2);
712 if (fence2 == NULL) {
713 err = -ENOENT;
714 goto err_put_fd;
715 }
717 data.name[sizeof(data.name) - 1] = '\0';
718 fence3 = sync_fence_merge(data.name, fence, fence2);
719 if (fence3 == NULL) {
720 err = -ENOMEM;
721 goto err_put_fence2;
722 }
724 data.fence = fd;
725 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
726 err = -EFAULT;
727 goto err_put_fence3;
728 }
730 sync_fence_install(fence3, fd);
731 sync_fence_put(fence2);
732 return 0;
734 err_put_fence3:
735 sync_fence_put(fence3);
737 err_put_fence2:
738 sync_fence_put(fence2);
740 err_put_fd:
741 put_unused_fd(fd);
742 return err;
743 }
745 static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
746 {
747 struct sync_pt_info *info = data;
748 int ret;
750 if (size < sizeof(struct sync_pt_info))
751 return -ENOMEM;
753 info->len = sizeof(struct sync_pt_info);
755 if (pt->parent->ops->fill_driver_data) {
756 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
757 size - sizeof(*info));
758 if (ret < 0)
759 return ret;
761 info->len += ret;
762 }
764 strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
765 strlcpy(info->driver_name, pt->parent->ops->driver_name,
766 sizeof(info->driver_name));
767 info->status = pt->status;
768 info->timestamp_ns = ktime_to_ns(pt->timestamp);
770 return info->len;
771 }
773 static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
774 unsigned long arg)
775 {
776 struct sync_fence_info_data *data;
777 struct list_head *pos;
778 __u32 size;
779 __u32 len = 0;
780 int ret;
782 if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
783 return -EFAULT;
785 if (size < sizeof(struct sync_fence_info_data))
786 return -EINVAL;
788 if (size > 4096)
789 size = 4096;
791 data = kzalloc(size, GFP_KERNEL);
792 if (data == NULL)
793 return -ENOMEM;
795 strlcpy(data->name, fence->name, sizeof(data->name));
796 data->status = fence->status;
797 len = sizeof(struct sync_fence_info_data);
799 list_for_each(pos, &fence->pt_list_head) {
800 struct sync_pt *pt =
801 container_of(pos, struct sync_pt, pt_list);
803 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
805 if (ret < 0)
806 goto out;
808 len += ret;
809 }
811 data->len = len;
813 if (copy_to_user((void __user *)arg, data, len))
814 ret = -EFAULT;
815 else
816 ret = 0;
818 out:
819 kfree(data);
821 return ret;
822 }
824 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
825 unsigned long arg)
826 {
827 struct sync_fence *fence = file->private_data;
828 switch (cmd) {
829 case SYNC_IOC_WAIT:
830 return sync_fence_ioctl_wait(fence, arg);
832 case SYNC_IOC_MERGE:
833 return sync_fence_ioctl_merge(fence, arg);
835 case SYNC_IOC_FENCE_INFO:
836 return sync_fence_ioctl_fence_info(fence, arg);
838 default:
839 return -ENOTTY;
840 }
841 }
843 #ifdef CONFIG_DEBUG_FS
844 static const char *sync_status_str(int status)
845 {
846 if (status > 0)
847 return "signaled";
848 else if (status == 0)
849 return "active";
850 else
851 return "error";
852 }
854 static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
855 {
856 int status = pt->status;
857 seq_printf(s, " %s%spt %s",
858 fence ? pt->parent->name : "",
859 fence ? "_" : "",
860 sync_status_str(status));
861 if (pt->status) {
862 struct timeval tv = ktime_to_timeval(pt->timestamp);
863 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
864 }
866 if (pt->parent->ops->timeline_value_str &&
867 pt->parent->ops->pt_value_str) {
868 char value[64];
869 pt->parent->ops->pt_value_str(pt, value, sizeof(value));
870 seq_printf(s, ": %s", value);
871 if (fence) {
872 pt->parent->ops->timeline_value_str(pt->parent, value,
873 sizeof(value));
874 seq_printf(s, " / %s", value);
875 }
876 } else if (pt->parent->ops->print_pt) {
877 seq_printf(s, ": ");
878 pt->parent->ops->print_pt(s, pt);
879 }
881 seq_printf(s, "\n");
882 }
884 static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
885 {
886 struct list_head *pos;
887 unsigned long flags;
889 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
891 if (obj->ops->timeline_value_str) {
892 char value[64];
893 obj->ops->timeline_value_str(obj, value, sizeof(value));
894 seq_printf(s, ": %s", value);
895 } else if (obj->ops->print_obj) {
896 seq_printf(s, ": ");
897 obj->ops->print_obj(s, obj);
898 }
900 seq_printf(s, "\n");
902 spin_lock_irqsave(&obj->child_list_lock, flags);
903 list_for_each(pos, &obj->child_list_head) {
904 struct sync_pt *pt =
905 container_of(pos, struct sync_pt, child_list);
906 sync_print_pt(s, pt, false);
907 }
908 spin_unlock_irqrestore(&obj->child_list_lock, flags);
909 }
911 static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
912 {
913 struct list_head *pos;
914 unsigned long flags;
916 seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
917 sync_status_str(fence->status));
919 list_for_each(pos, &fence->pt_list_head) {
920 struct sync_pt *pt =
921 container_of(pos, struct sync_pt, pt_list);
922 sync_print_pt(s, pt, true);
923 }
925 spin_lock_irqsave(&fence->waiter_list_lock, flags);
926 list_for_each(pos, &fence->waiter_list_head) {
927 struct sync_fence_waiter *waiter =
928 container_of(pos, struct sync_fence_waiter,
929 waiter_list);
931 seq_printf(s, "waiter %pF\n", waiter->callback);
932 }
933 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
934 }
936 static int sync_debugfs_show(struct seq_file *s, void *unused)
937 {
938 unsigned long flags;
939 struct list_head *pos;
941 seq_printf(s, "objs:\n--------------\n");
943 spin_lock_irqsave(&sync_timeline_list_lock, flags);
944 list_for_each(pos, &sync_timeline_list_head) {
945 struct sync_timeline *obj =
946 container_of(pos, struct sync_timeline,
947 sync_timeline_list);
949 sync_print_obj(s, obj);
950 seq_printf(s, "\n");
951 }
952 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
954 seq_printf(s, "fences:\n--------------\n");
956 spin_lock_irqsave(&sync_fence_list_lock, flags);
957 list_for_each(pos, &sync_fence_list_head) {
958 struct sync_fence *fence =
959 container_of(pos, struct sync_fence, sync_fence_list);
961 sync_print_fence(s, fence);
962 seq_printf(s, "\n");
963 }
964 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
965 return 0;
966 }
968 static int sync_debugfs_open(struct inode *inode, struct file *file)
969 {
970 return single_open(file, sync_debugfs_show, inode->i_private);
971 }
973 static const struct file_operations sync_debugfs_fops = {
974 .open = sync_debugfs_open,
975 .read = seq_read,
976 .llseek = seq_lseek,
977 .release = single_release,
978 };
980 static __init int sync_debugfs_init(void)
981 {
982 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
983 return 0;
984 }
985 late_initcall(sync_debugfs_init);
987 #define DUMP_CHUNK 256
988 static char sync_dump_buf[64 * 1024];
989 void sync_dump(void)
990 {
991 struct seq_file s = {
992 .buf = sync_dump_buf,
993 .size = sizeof(sync_dump_buf) - 1,
994 };
995 int i;
997 sync_debugfs_show(&s, NULL);
999 for (i = 0; i < s.count; i += DUMP_CHUNK) {
1000 if ((s.count - i) > DUMP_CHUNK) {
1001 char c = s.buf[i + DUMP_CHUNK];
1002 s.buf[i + DUMP_CHUNK] = 0;
1003 pr_cont("%s", s.buf + i);
1004 s.buf[i + DUMP_CHUNK] = c;
1005 } else {
1006 s.buf[s.count] = 0;
1007 pr_cont("%s", s.buf + i);
1008 }
1009 }
1010 }
1011 #else
1012 static void sync_dump(void)
1013 {
1014 }
1015 #endif