diff options
author | Ken Wang | 2015-07-09 00:48:25 -0500 |
---|---|---|
committer | Alex Deucher | 2015-08-05 12:47:52 -0500 |
commit | 5b01908d1c1f94700e68381afe948045d1ef54a1 (patch) | |
tree | b7a31339458e65e588c284480ef57046ede55b06 | |
parent | 8097d08ee46ea032c66ce9db0a4db585cd0c796b (diff) | |
download | external-libgbm-5b01908d1c1f94700e68381afe948045d1ef54a1.tar.gz external-libgbm-5b01908d1c1f94700e68381afe948045d1ef54a1.tar.xz external-libgbm-5b01908d1c1f94700e68381afe948045d1ef54a1.zip |
amdgpu: add base_preferred parameter to amdgpu_vamgr_find_va
base_preferred parameter is added to amdgpu_vamgr_find_va
so UMD can specify preferred va address when allocating.
Signed-off-by: Ken Wang <Qingqing.Wang@amd.com>
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
-rw-r--r-- | amdgpu/amdgpu_bo.c | 2 | ||||
-rw-r--r-- | amdgpu/amdgpu_internal.h | 4 | ||||
-rw-r--r-- | amdgpu/amdgpu_vamgr.c | 46 |
3 files changed, 35 insertions, 17 deletions
diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c index 07df1c56..1ef15162 100644 --- a/amdgpu/amdgpu_bo.c +++ b/amdgpu/amdgpu_bo.c | |||
@@ -62,7 +62,7 @@ static int amdgpu_bo_map(amdgpu_bo_handle bo, uint32_t alignment) | |||
62 | memset(&va, 0, sizeof(va)); | 62 | memset(&va, 0, sizeof(va)); |
63 | 63 | ||
64 | bo->virtual_mc_base_address = amdgpu_vamgr_find_va(dev->vamgr, | 64 | bo->virtual_mc_base_address = amdgpu_vamgr_find_va(dev->vamgr, |
65 | bo->alloc_size, alignment); | 65 | bo->alloc_size, alignment, 0); |
66 | 66 | ||
67 | if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS) | 67 | if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS) |
68 | return -ENOSPC; | 68 | return -ENOSPC; |
diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h index ee1cb61c..8e6fbf40 100644 --- a/amdgpu/amdgpu_internal.h +++ b/amdgpu/amdgpu_internal.h | |||
@@ -123,8 +123,8 @@ struct amdgpu_bo_va_mgr* amdgpu_vamgr_get_global(struct amdgpu_device *dev); | |||
123 | 123 | ||
124 | void amdgpu_vamgr_reference(struct amdgpu_bo_va_mgr **dst, struct amdgpu_bo_va_mgr *src); | 124 | void amdgpu_vamgr_reference(struct amdgpu_bo_va_mgr **dst, struct amdgpu_bo_va_mgr *src); |
125 | 125 | ||
126 | uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, | 126 | uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size, |
127 | uint64_t size, uint64_t alignment); | 127 | uint64_t alignment, uint64_t base_preferred); |
128 | 128 | ||
129 | void amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, | 129 | void amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, |
130 | uint64_t size); | 130 | uint64_t size); |
diff --git a/amdgpu/amdgpu_vamgr.c b/amdgpu/amdgpu_vamgr.c index b15729fa..2328e5d7 100644 --- a/amdgpu/amdgpu_vamgr.c +++ b/amdgpu/amdgpu_vamgr.c | |||
@@ -68,8 +68,8 @@ void amdgpu_vamgr_reference(struct amdgpu_bo_va_mgr **dst, | |||
68 | *dst = src; | 68 | *dst = src; |
69 | } | 69 | } |
70 | 70 | ||
71 | uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, | 71 | uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size, |
72 | uint64_t size, uint64_t alignment) | 72 | uint64_t alignment, uint64_t base_required) |
73 | { | 73 | { |
74 | struct amdgpu_bo_va_hole *hole, *n; | 74 | struct amdgpu_bo_va_hole *hole, *n; |
75 | uint64_t offset = 0, waste = 0; | 75 | uint64_t offset = 0, waste = 0; |
@@ -77,16 +77,27 @@ uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, | |||
77 | alignment = MAX2(alignment, mgr->va_alignment); | 77 | alignment = MAX2(alignment, mgr->va_alignment); |
78 | size = ALIGN(size, mgr->va_alignment); | 78 | size = ALIGN(size, mgr->va_alignment); |
79 | 79 | ||
80 | if (base_required % alignment) | ||
81 | return AMDGPU_INVALID_VA_ADDRESS; | ||
82 | |||
80 | pthread_mutex_lock(&mgr->bo_va_mutex); | 83 | pthread_mutex_lock(&mgr->bo_va_mutex); |
81 | /* TODO: using more appropriate way to track the holes */ | 84 | /* TODO: using more appropriate way to track the holes */ |
82 | /* first look for a hole */ | 85 | /* first look for a hole */ |
83 | LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) { | 86 | LIST_FOR_EACH_ENTRY_SAFE(hole, n, &vamgr.va_holes, list) { |
84 | offset = hole->offset; | 87 | if (base_required) { |
85 | waste = offset % alignment; | 88 | if(hole->offset > base_required || |
86 | waste = waste ? alignment - waste : 0; | 89 | (hole->offset + hole->size) < (base_required + size)) |
87 | offset += waste; | 90 | continue; |
88 | if (offset >= (hole->offset + hole->size)) { | 91 | waste = base_required - hole->offset; |
89 | continue; | 92 | offset = base_required; |
93 | } else { | ||
94 | offset = hole->offset; | ||
95 | waste = offset % alignment; | ||
96 | waste = waste ? alignment - waste : 0; | ||
97 | offset += waste; | ||
98 | if (offset >= (hole->offset + hole->size)) { | ||
99 | continue; | ||
100 | } | ||
90 | } | 101 | } |
91 | if (!waste && hole->size == size) { | 102 | if (!waste && hole->size == size) { |
92 | offset = hole->offset; | 103 | offset = hole->offset; |
@@ -97,8 +108,7 @@ uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, | |||
97 | } | 108 | } |
98 | if ((hole->size - waste) > size) { | 109 | if ((hole->size - waste) > size) { |
99 | if (waste) { | 110 | if (waste) { |
100 | n = calloc(1, | 111 | n = calloc(1, sizeof(struct amdgpu_bo_va_hole)); |
101 | sizeof(struct amdgpu_bo_va_hole)); | ||
102 | n->size = waste; | 112 | n->size = waste; |
103 | n->offset = hole->offset; | 113 | n->offset = hole->offset; |
104 | list_add(&n->list, &hole->list); | 114 | list_add(&n->list, &hole->list); |
@@ -115,9 +125,16 @@ uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, | |||
115 | } | 125 | } |
116 | } | 126 | } |
117 | 127 | ||
118 | offset = mgr->va_offset; | 128 | if (base_required) { |
119 | waste = offset % alignment; | 129 | if (base_required < mgr->va_offset) |
120 | waste = waste ? alignment - waste : 0; | 130 | return AMDGPU_INVALID_VA_ADDRESS; |
131 | offset = mgr->va_offset; | ||
132 | waste = base_required - mgr->va_offset; | ||
133 | } else { | ||
134 | offset = mgr->va_offset; | ||
135 | waste = offset % alignment; | ||
136 | waste = waste ? alignment - waste : 0; | ||
137 | } | ||
121 | 138 | ||
122 | if (offset + waste + size > mgr->va_max) { | 139 | if (offset + waste + size > mgr->va_max) { |
123 | pthread_mutex_unlock(&mgr->bo_va_mutex); | 140 | pthread_mutex_unlock(&mgr->bo_va_mutex); |
@@ -130,6 +147,7 @@ uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, | |||
130 | n->offset = offset; | 147 | n->offset = offset; |
131 | list_add(&n->list, &mgr->va_holes); | 148 | list_add(&n->list, &mgr->va_holes); |
132 | } | 149 | } |
150 | |||
133 | offset += waste; | 151 | offset += waste; |
134 | mgr->va_offset += size + waste; | 152 | mgr->va_offset += size + waste; |
135 | pthread_mutex_unlock(&mgr->bo_va_mutex); | 153 | pthread_mutex_unlock(&mgr->bo_va_mutex); |