diff options
Diffstat (limited to 'amdgpu/amdgpu.h')
-rw-r--r-- | amdgpu/amdgpu.h | 1276 |
1 files changed, 1276 insertions, 0 deletions
diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h new file mode 100644 index 00000000..11a86eff --- /dev/null +++ b/amdgpu/amdgpu.h | |||
@@ -0,0 +1,1276 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | /** | ||
25 | * \file amdgpu.h | ||
26 | * | ||
27 | * Declare public libdrm_amdgpu API | ||
28 | * | ||
29 | * This file define API exposed by libdrm_amdgpu library. | ||
30 | * User wanted to use libdrm_amdgpu functionality must include | ||
31 | * this file. | ||
32 | * | ||
33 | */ | ||
34 | #ifndef _AMDGPU_H_ | ||
35 | #define _AMDGPU_H_ | ||
36 | |||
37 | #include <stdint.h> | ||
38 | #include <stdbool.h> | ||
39 | |||
40 | struct drm_amdgpu_info_hw_ip; | ||
41 | |||
42 | /*--------------------------------------------------------------------------*/ | ||
43 | /* --------------------------- Defines ------------------------------------ */ | ||
44 | /*--------------------------------------------------------------------------*/ | ||
45 | |||
46 | /** | ||
47 | * Define max. number of Command Buffers (IB) which could be sent to the single | ||
48 | * hardware IP to accommodate CE/DE requirements | ||
49 | * | ||
50 | * \sa amdgpu_cs_ib_info | ||
51 | */ | ||
52 | #define AMDGPU_CS_MAX_IBS_PER_SUBMIT 4 | ||
53 | |||
54 | /** | ||
55 | * | ||
56 | */ | ||
57 | #define AMDGPU_TIMEOUT_INFINITE 0xffffffffffffffffull | ||
58 | |||
59 | /** | ||
60 | * The special flag for GFX submission to identify that this is CE IB | ||
61 | * \sa amdgpu_cs_ib_info | ||
62 | */ | ||
63 | #define AMDGPU_CS_GFX_IB_CE 0x1 | ||
64 | |||
65 | /** | ||
66 | * The special flag to mark that this IB will re-used | ||
67 | * by client and should not be automatically return back | ||
68 | * to free pool by libdrm_amdgpu when submission is completed. | ||
69 | * | ||
70 | * \sa amdgpu_cs_ib_info | ||
71 | */ | ||
72 | #define AMDGPU_CS_REUSE_IB 0x2 | ||
73 | |||
74 | /** | ||
75 | * The special resource flag for IB submission. | ||
76 | * When VRAM is full, some resources may be moved to GTT to make place | ||
77 | * for other resources which want to be in VRAM. This flag affects the order | ||
78 | * in which resources are moved back to VRAM until there is no space there. | ||
79 | * The resources with the highest priority will be moved first. | ||
80 | * The value can be between 0 and 15, inclusive. | ||
81 | */ | ||
82 | #define AMDGPU_IB_RESOURCE_PRIORITY(x) ((x) & 0xf) | ||
83 | |||
84 | |||
85 | /*--------------------------------------------------------------------------*/ | ||
86 | /* ----------------------------- Enums ------------------------------------ */ | ||
87 | /*--------------------------------------------------------------------------*/ | ||
88 | |||
89 | /** | ||
90 | * Enum describing possible handle types | ||
91 | * | ||
92 | * \sa amdgpu_bo_import, amdgpu_bo_export | ||
93 | * | ||
94 | */ | ||
95 | enum amdgpu_bo_handle_type { | ||
96 | /** GEM flink name (needs DRM authentication, used by DRI2) */ | ||
97 | amdgpu_bo_handle_type_gem_flink_name = 0, | ||
98 | |||
99 | /** KMS handle which is used by all driver ioctls */ | ||
100 | amdgpu_bo_handle_type_kms = 1, | ||
101 | |||
102 | /** DMA-buf fd handle */ | ||
103 | amdgpu_bo_handle_type_dma_buf_fd = 2 | ||
104 | }; | ||
105 | |||
106 | /** | ||
107 | * Enum describing possible context reset states | ||
108 | * | ||
109 | * \sa amdgpu_cs_query_reset_state() | ||
110 | * | ||
111 | */ | ||
112 | enum amdgpu_cs_ctx_reset_state { | ||
113 | /** No reset was detected */ | ||
114 | amdgpu_cs_reset_no_error = 0, | ||
115 | |||
116 | /** Reset/TDR was detected and context caused */ | ||
117 | amdgpu_cs_reset_guilty = 1, | ||
118 | |||
119 | /** Reset/TDR was detected caused by other context */ | ||
120 | amdgpu_cs_reset_innocent = 2, | ||
121 | |||
122 | /** Reset TDR was detected by cause of it unknown */ | ||
123 | amdgpu_cs_reset_unknown = 3 | ||
124 | }; | ||
125 | |||
126 | /** | ||
127 | * For performance reasons and to simplify logic libdrm_amdgpu will handle | ||
128 | * IBs only some pre-defined sizes. | ||
129 | * | ||
130 | * \sa amdgpu_cs_alloc_ib() | ||
131 | */ | ||
132 | enum amdgpu_cs_ib_size { | ||
133 | amdgpu_cs_ib_size_4K = 1, | ||
134 | amdgpu_cs_ib_size_16K = 2, | ||
135 | amdgpu_cs_ib_size_32K = 3, | ||
136 | amdgpu_cs_ib_size_64K = 4, | ||
137 | amdgpu_cs_ib_size_128K = 5 | ||
138 | }; | ||
139 | |||
140 | /** The number of different IB sizes */ | ||
141 | #define AMDGPU_CS_IB_SIZE_NUM 6 | ||
142 | |||
143 | |||
144 | /*--------------------------------------------------------------------------*/ | ||
145 | /* -------------------------- Datatypes ----------------------------------- */ | ||
146 | /*--------------------------------------------------------------------------*/ | ||
147 | |||
148 | /** | ||
149 | * Define opaque pointer to context associated with fd. | ||
150 | * This context will be returned as the result of | ||
151 | * "initialize" function and should be pass as the first | ||
152 | * parameter to any API call | ||
153 | */ | ||
154 | typedef struct amdgpu_device *amdgpu_device_handle; | ||
155 | |||
156 | /** | ||
157 | * Define GPU Context type as pointer to opaque structure | ||
158 | * Example of GPU Context is the "rendering" context associated | ||
159 | * with OpenGL context (glCreateContext) | ||
160 | */ | ||
161 | typedef struct amdgpu_context *amdgpu_context_handle; | ||
162 | |||
163 | /** | ||
164 | * Define handle for amdgpu resources: buffer, GDS, etc. | ||
165 | */ | ||
166 | typedef struct amdgpu_bo *amdgpu_bo_handle; | ||
167 | |||
168 | /** | ||
169 | * Define handle to be used when dealing with command | ||
170 | * buffers (a.k.a. ibs) | ||
171 | * | ||
172 | */ | ||
173 | typedef struct amdgpu_ib *amdgpu_ib_handle; | ||
174 | |||
175 | |||
176 | /*--------------------------------------------------------------------------*/ | ||
177 | /* -------------------------- Structures ---------------------------------- */ | ||
178 | /*--------------------------------------------------------------------------*/ | ||
179 | |||
180 | /** | ||
181 | * Structure describing memory allocation request | ||
182 | * | ||
183 | * \sa amdgpu_bo_alloc() | ||
184 | * | ||
185 | */ | ||
186 | struct amdgpu_bo_alloc_request { | ||
187 | /** Allocation request. It must be aligned correctly. */ | ||
188 | uint64_t alloc_size; | ||
189 | |||
190 | /** | ||
191 | * It may be required to have some specific alignment requirements | ||
192 | * for physical back-up storage (e.g. for displayable surface). | ||
193 | * If 0 there is no special alignment requirement | ||
194 | */ | ||
195 | uint64_t phys_alignment; | ||
196 | |||
197 | /** | ||
198 | * UMD should specify where to allocate memory and how it | ||
199 | * will be accessed by the CPU. | ||
200 | */ | ||
201 | uint32_t preferred_heap; | ||
202 | |||
203 | /** Additional flags passed on allocation */ | ||
204 | uint64_t flags; | ||
205 | }; | ||
206 | |||
207 | /** | ||
208 | * Structure describing memory allocation request | ||
209 | * | ||
210 | * \sa amdgpu_bo_alloc() | ||
211 | */ | ||
212 | struct amdgpu_bo_alloc_result { | ||
213 | /** Assigned virtual MC Base Address */ | ||
214 | uint64_t virtual_mc_base_address; | ||
215 | |||
216 | /** Handle of allocated memory to be used by the given process only. */ | ||
217 | amdgpu_bo_handle buf_handle; | ||
218 | }; | ||
219 | |||
220 | /** | ||
221 | * Special UMD specific information associated with buffer. | ||
222 | * | ||
223 | * It may be need to pass some buffer charactersitic as part | ||
224 | * of buffer sharing. Such information are defined UMD and | ||
225 | * opaque for libdrm_amdgpu as well for kernel driver. | ||
226 | * | ||
227 | * \sa amdgpu_bo_set_metadata(), amdgpu_bo_query_info, | ||
228 | * amdgpu_bo_import(), amdgpu_bo_export | ||
229 | * | ||
230 | */ | ||
231 | struct amdgpu_bo_metadata { | ||
232 | /** Special flag associated with surface */ | ||
233 | uint64_t flags; | ||
234 | |||
235 | /** | ||
236 | * ASIC-specific tiling information (also used by DCE). | ||
237 | * The encoding is defined by the AMDGPU_TILING_* definitions. | ||
238 | */ | ||
239 | uint64_t tiling_info; | ||
240 | |||
241 | /** Size of metadata associated with the buffer, in bytes. */ | ||
242 | uint32_t size_metadata; | ||
243 | |||
244 | /** UMD specific metadata. Opaque for kernel */ | ||
245 | uint32_t umd_metadata[64]; | ||
246 | }; | ||
247 | |||
248 | /** | ||
249 | * Structure describing allocated buffer. Client may need | ||
250 | * to query such information as part of 'sharing' buffers mechanism | ||
251 | * | ||
252 | * \sa amdgpu_bo_set_metadata(), amdgpu_bo_query_info(), | ||
253 | * amdgpu_bo_import(), amdgpu_bo_export() | ||
254 | */ | ||
255 | struct amdgpu_bo_info { | ||
256 | /** Allocated memory size */ | ||
257 | uint64_t alloc_size; | ||
258 | |||
259 | /** | ||
260 | * It may be required to have some specific alignment requirements | ||
261 | * for physical back-up storage. | ||
262 | */ | ||
263 | uint64_t phys_alignment; | ||
264 | |||
265 | /** | ||
266 | * Assigned virtual MC Base Address. | ||
267 | * \note This information will be returned only if this buffer was | ||
268 | * allocated in the same process otherwise 0 will be returned. | ||
269 | */ | ||
270 | uint64_t virtual_mc_base_address; | ||
271 | |||
272 | /** Heap where to allocate memory. */ | ||
273 | uint32_t preferred_heap; | ||
274 | |||
275 | /** Additional allocation flags. */ | ||
276 | uint64_t alloc_flags; | ||
277 | |||
278 | /** Metadata associated with buffer if any. */ | ||
279 | struct amdgpu_bo_metadata metadata; | ||
280 | }; | ||
281 | |||
282 | /** | ||
283 | * Structure with information about "imported" buffer | ||
284 | * | ||
285 | * \sa amdgpu_bo_import() | ||
286 | * | ||
287 | */ | ||
288 | struct amdgpu_bo_import_result { | ||
289 | /** Handle of memory/buffer to use */ | ||
290 | amdgpu_bo_handle buf_handle; | ||
291 | |||
292 | /** Buffer size */ | ||
293 | uint64_t alloc_size; | ||
294 | |||
295 | /** Assigned virtual MC Base Address */ | ||
296 | uint64_t virtual_mc_base_address; | ||
297 | }; | ||
298 | |||
299 | |||
300 | /** | ||
301 | * | ||
302 | * Structure to describe GDS partitioning information. | ||
303 | * \note OA and GWS resources are asscoiated with GDS partition | ||
304 | * | ||
305 | * \sa amdgpu_gpu_resource_query_gds_info | ||
306 | * | ||
307 | */ | ||
308 | struct amdgpu_gds_resource_info { | ||
309 | uint32_t gds_gfx_partition_size; | ||
310 | uint32_t compute_partition_size; | ||
311 | uint32_t gds_total_size; | ||
312 | uint32_t gws_per_gfx_partition; | ||
313 | uint32_t gws_per_compute_partition; | ||
314 | uint32_t oa_per_gfx_partition; | ||
315 | uint32_t oa_per_compute_partition; | ||
316 | }; | ||
317 | |||
318 | |||
319 | |||
320 | /** | ||
321 | * Structure describing result of request to allocate GDS | ||
322 | * | ||
323 | * \sa amdgpu_gpu_resource_gds_alloc | ||
324 | * | ||
325 | */ | ||
326 | struct amdgpu_gds_alloc_info { | ||
327 | /** Handle assigned to gds allocation */ | ||
328 | amdgpu_bo_handle resource_handle; | ||
329 | |||
330 | /** How much was really allocated */ | ||
331 | uint32_t gds_memory_size; | ||
332 | |||
333 | /** Number of GWS resources allocated */ | ||
334 | uint32_t gws; | ||
335 | |||
336 | /** Number of OA resources allocated */ | ||
337 | uint32_t oa; | ||
338 | }; | ||
339 | |||
340 | /** | ||
341 | * Structure to described allocated command buffer (a.k.a. IB) | ||
342 | * | ||
343 | * \sa amdgpu_cs_alloc_ib() | ||
344 | * | ||
345 | */ | ||
346 | struct amdgpu_cs_ib_alloc_result { | ||
347 | /** IB allocation handle */ | ||
348 | amdgpu_ib_handle handle; | ||
349 | |||
350 | /** Assigned GPU VM MC Address of command buffer */ | ||
351 | uint64_t mc_address; | ||
352 | |||
353 | /** Address to be used for CPU access */ | ||
354 | void *cpu; | ||
355 | }; | ||
356 | |||
357 | /** | ||
358 | * Structure describing IB | ||
359 | * | ||
360 | * \sa amdgpu_cs_request, amdgpu_cs_submit() | ||
361 | * | ||
362 | */ | ||
363 | struct amdgpu_cs_ib_info { | ||
364 | /** Special flags */ | ||
365 | uint64_t flags; | ||
366 | |||
367 | /** Handle of command buffer */ | ||
368 | amdgpu_ib_handle ib_handle; | ||
369 | |||
370 | /** | ||
371 | * Size of Command Buffer to be submitted. | ||
372 | * - The size is in units of dwords (4 bytes). | ||
373 | * - Must be less or equal to the size of allocated IB | ||
374 | * - Could be 0 | ||
375 | */ | ||
376 | uint32_t size; | ||
377 | }; | ||
378 | |||
379 | /** | ||
380 | * Structure describing submission request | ||
381 | * | ||
382 | * \note We could have several IBs as packet. e.g. CE, CE, DE case for gfx | ||
383 | * | ||
384 | * \sa amdgpu_cs_submit() | ||
385 | */ | ||
386 | struct amdgpu_cs_request { | ||
387 | /** Specify flags with additional information */ | ||
388 | uint64_t flags; | ||
389 | |||
390 | /** Specify HW IP block type to which to send the IB. */ | ||
391 | unsigned ip_type; | ||
392 | |||
393 | /** IP instance index if there are several IPs of the same type. */ | ||
394 | unsigned ip_instance; | ||
395 | |||
396 | /** | ||
397 | * Specify ring index of the IP. We could have several rings | ||
398 | * in the same IP. E.g. 0 for SDMA0 and 1 for SDMA1. | ||
399 | */ | ||
400 | uint32_t ring; | ||
401 | |||
402 | /** | ||
403 | * Specify number of resource handles passed. | ||
404 | * Size of 'handles' array | ||
405 | * | ||
406 | */ | ||
407 | uint32_t number_of_resources; | ||
408 | |||
409 | /** Array of resources used by submission. */ | ||
410 | amdgpu_bo_handle *resources; | ||
411 | |||
412 | /** Array of resources flags. This is optional and can be NULL. */ | ||
413 | uint8_t *resource_flags; | ||
414 | |||
415 | /** Number of IBs to submit in the field ibs. */ | ||
416 | uint32_t number_of_ibs; | ||
417 | |||
418 | /** | ||
419 | * IBs to submit. Those IBs will be submit together as single entity | ||
420 | */ | ||
421 | struct amdgpu_cs_ib_info *ibs; | ||
422 | }; | ||
423 | |||
424 | /** | ||
425 | * Structure describing request to check submission state using fence | ||
426 | * | ||
427 | * \sa amdgpu_cs_query_fence_status() | ||
428 | * | ||
429 | */ | ||
430 | struct amdgpu_cs_query_fence { | ||
431 | |||
432 | /** In which context IB was sent to execution */ | ||
433 | amdgpu_context_handle context; | ||
434 | |||
435 | /** Timeout in nanoseconds. */ | ||
436 | uint64_t timeout_ns; | ||
437 | |||
438 | /** To which HW IP type the fence belongs */ | ||
439 | unsigned ip_type; | ||
440 | |||
441 | /** IP instance index if there are several IPs of the same type. */ | ||
442 | unsigned ip_instance; | ||
443 | |||
444 | /** Ring index of the HW IP */ | ||
445 | uint32_t ring; | ||
446 | |||
447 | /** Flags */ | ||
448 | uint64_t flags; | ||
449 | |||
450 | /** Specify fence for which we need to check | ||
451 | * submission status.*/ | ||
452 | uint64_t fence; | ||
453 | }; | ||
454 | |||
455 | /** | ||
456 | * Structure which provide information about GPU VM MC Address space | ||
457 | * alignments requirements | ||
458 | * | ||
459 | * \sa amdgpu_query_buffer_size_alignment | ||
460 | */ | ||
461 | struct amdgpu_buffer_size_alignments { | ||
462 | /** Size alignment requirement for allocation in | ||
463 | * local memory */ | ||
464 | uint64_t size_local; | ||
465 | |||
466 | /** | ||
467 | * Size alignment requirement for allocation in remote memory | ||
468 | */ | ||
469 | uint64_t size_remote; | ||
470 | }; | ||
471 | |||
472 | |||
473 | /** | ||
474 | * Structure which provide information about heap | ||
475 | * | ||
476 | * \sa amdgpu_query_heap_info() | ||
477 | * | ||
478 | */ | ||
479 | struct amdgpu_heap_info { | ||
480 | /** Theoretical max. available memory in the given heap */ | ||
481 | uint64_t heap_size; | ||
482 | |||
483 | /** | ||
484 | * Number of bytes allocated in the heap. This includes all processes | ||
485 | * and private allocations in the kernel. It changes when new buffers | ||
486 | * are allocated, freed, and moved. It cannot be larger than | ||
487 | * heap_size. | ||
488 | */ | ||
489 | uint64_t heap_usage; | ||
490 | |||
491 | /** | ||
492 | * Theoretical possible max. size of buffer which | ||
493 | * could be allocated in the given heap | ||
494 | */ | ||
495 | uint64_t max_allocation; | ||
496 | }; | ||
497 | |||
498 | |||
499 | |||
500 | /** | ||
501 | * Describe GPU h/w info needed for UMD correct initialization | ||
502 | * | ||
503 | * \sa amdgpu_query_gpu_info() | ||
504 | */ | ||
505 | struct amdgpu_gpu_info { | ||
506 | /** Asic id */ | ||
507 | uint32_t asic_id; | ||
508 | /**< Chip revision */ | ||
509 | uint32_t chip_rev; | ||
510 | /** Chip external revision */ | ||
511 | uint32_t chip_external_rev; | ||
512 | /** Family ID */ | ||
513 | uint32_t family_id; | ||
514 | /** Special flags */ | ||
515 | uint64_t ids_flags; | ||
516 | /** max engine clock*/ | ||
517 | uint64_t max_engine_clk; | ||
518 | /** number of shader engines */ | ||
519 | uint32_t num_shader_engines; | ||
520 | /** number of shader arrays per engine */ | ||
521 | uint32_t num_shader_arrays_per_engine; | ||
522 | /** Number of available good shader pipes */ | ||
523 | uint32_t avail_quad_shader_pipes; | ||
524 | /** Max. number of shader pipes.(including good and bad pipes */ | ||
525 | uint32_t max_quad_shader_pipes; | ||
526 | /** Number of parameter cache entries per shader quad pipe */ | ||
527 | uint32_t cache_entries_per_quad_pipe; | ||
528 | /** Number of available graphics context */ | ||
529 | uint32_t num_hw_gfx_contexts; | ||
530 | /** Number of render backend pipes */ | ||
531 | uint32_t rb_pipes; | ||
532 | /** Active render backend pipe number */ | ||
533 | uint32_t active_rb_pipes; | ||
534 | /** Enabled render backend pipe mask */ | ||
535 | uint32_t enabled_rb_pipes_mask; | ||
536 | /** Frequency of GPU Counter */ | ||
537 | uint32_t gpu_counter_freq; | ||
538 | /** CC_RB_BACKEND_DISABLE.BACKEND_DISABLE per SE */ | ||
539 | uint32_t backend_disable[4]; | ||
540 | /** Value of MC_ARB_RAMCFG register*/ | ||
541 | uint32_t mc_arb_ramcfg; | ||
542 | /** Value of GB_ADDR_CONFIG */ | ||
543 | uint32_t gb_addr_cfg; | ||
544 | /** Values of the GB_TILE_MODE0..31 registers */ | ||
545 | uint32_t gb_tile_mode[32]; | ||
546 | /** Values of GB_MACROTILE_MODE0..15 registers */ | ||
547 | uint32_t gb_macro_tile_mode[16]; | ||
548 | /** Value of PA_SC_RASTER_CONFIG register per SE */ | ||
549 | uint32_t pa_sc_raster_cfg[4]; | ||
550 | /** Value of PA_SC_RASTER_CONFIG_1 register per SE */ | ||
551 | uint32_t pa_sc_raster_cfg1[4]; | ||
552 | /* CU info */ | ||
553 | uint32_t cu_active_number; | ||
554 | uint32_t cu_ao_mask; | ||
555 | uint32_t cu_bitmap[4][4]; | ||
556 | }; | ||
557 | |||
558 | |||
559 | /*--------------------------------------------------------------------------*/ | ||
560 | /*------------------------- Functions --------------------------------------*/ | ||
561 | /*--------------------------------------------------------------------------*/ | ||
562 | |||
563 | /* | ||
564 | * Initialization / Cleanup | ||
565 | * | ||
566 | */ | ||
567 | |||
568 | |||
569 | /** | ||
570 | * | ||
571 | * \param fd - \c [in] File descriptor for AMD GPU device | ||
572 | * received previously as the result of | ||
573 | * e.g. drmOpen() call. | ||
574 | * For legacy fd type, the DRI2/DRI3 authentication | ||
575 | * should be done before calling this function. | ||
576 | * \param major_version - \c [out] Major version of library. It is assumed | ||
577 | * that adding new functionality will cause | ||
578 | * increase in major version | ||
579 | * \param minor_version - \c [out] Minor version of library | ||
580 | * \param device_handle - \c [out] Pointer to opaque context which should | ||
581 | * be passed as the first parameter on each | ||
582 | * API call | ||
583 | * | ||
584 | * | ||
585 | * \return 0 on success\n | ||
586 | * >0 - AMD specific error code\n | ||
587 | * <0 - Negative POSIX Error code | ||
588 | * | ||
589 | * | ||
590 | * \sa amdgpu_device_deinitialize() | ||
591 | */ | ||
592 | int amdgpu_device_initialize(int fd, | ||
593 | uint32_t *major_version, | ||
594 | uint32_t *minor_version, | ||
595 | amdgpu_device_handle *device_handle); | ||
596 | |||
597 | |||
598 | |||
599 | /** | ||
600 | * | ||
601 | * When access to such library does not needed any more the special | ||
602 | * function must be call giving opportunity to clean up any | ||
603 | * resources if needed. | ||
604 | * | ||
605 | * \param device_handle - \c [in] Context associated with file | ||
606 | * descriptor for AMD GPU device | ||
607 | * received previously as the | ||
608 | * result e.g. of drmOpen() call. | ||
609 | * | ||
610 | * \return 0 on success\n | ||
611 | * >0 - AMD specific error code\n | ||
612 | * <0 - Negative POSIX Error code | ||
613 | * | ||
614 | * \sa amdgpu_device_initialize() | ||
615 | * | ||
616 | */ | ||
617 | int amdgpu_device_deinitialize(amdgpu_device_handle device_handle); | ||
618 | |||
619 | |||
620 | /* | ||
621 | * Memory Management | ||
622 | * | ||
623 | */ | ||
624 | |||
625 | /** | ||
626 | * Allocate memory to be used by UMD for GPU related operations | ||
627 | * | ||
628 | * \param dev - \c [in] Device handle. | ||
629 | * See #amdgpu_device_initialize() | ||
630 | * \param alloc_buffer - \c [in] Pointer to the structure describing an | ||
631 | * allocation request | ||
632 | * \param info - \c [out] Pointer to structure which return | ||
633 | * information about allocated memory | ||
634 | * | ||
635 | * \return 0 on success\n | ||
636 | * >0 - AMD specific error code\n | ||
637 | * <0 - Negative POSIX Error code | ||
638 | * | ||
639 | * \sa amdgpu_bo_free() | ||
640 | */ | ||
641 | int amdgpu_bo_alloc(amdgpu_device_handle dev, | ||
642 | struct amdgpu_bo_alloc_request *alloc_buffer, | ||
643 | struct amdgpu_bo_alloc_result *info); | ||
644 | |||
645 | /** | ||
646 | * Associate opaque data with buffer to be queried by another UMD | ||
647 | * | ||
648 | * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() | ||
649 | * \param buf_handle - \c [in] Buffer handle | ||
650 | * \param info - \c [in] Metadata to associated with buffer | ||
651 | * | ||
652 | * \return 0 on success\n | ||
653 | * >0 - AMD specific error code\n | ||
654 | * <0 - Negative POSIX Error code | ||
655 | */ | ||
656 | int amdgpu_bo_set_metadata(amdgpu_bo_handle buf_handle, | ||
657 | struct amdgpu_bo_metadata *info); | ||
658 | |||
659 | /** | ||
660 | * Query buffer information including metadata previusly associated with | ||
661 | * buffer. | ||
662 | * | ||
663 | * \param dev - \c [in] Device handle. | ||
664 | * See #amdgpu_device_initialize() | ||
665 | * \param buf_handle - \c [in] Buffer handle | ||
666 | * \param info - \c [out] Structure describing buffer | ||
667 | * | ||
668 | * \return 0 on success\n | ||
669 | * >0 - AMD specific error code\n | ||
670 | * <0 - Negative POSIX Error code | ||
671 | * | ||
672 | * \sa amdgpu_bo_set_metadata(), amdgpu_bo_alloc() | ||
673 | */ | ||
674 | int amdgpu_bo_query_info(amdgpu_bo_handle buf_handle, | ||
675 | struct amdgpu_bo_info *info); | ||
676 | |||
677 | /** | ||
678 | * Allow others to get access to buffer | ||
679 | * | ||
680 | * \param dev - \c [in] Device handle. | ||
681 | * See #amdgpu_device_initialize() | ||
682 | * \param buf_handle - \c [in] Buffer handle | ||
683 | * \param type - \c [in] Type of handle requested | ||
684 | * \param shared_handle - \c [out] Special "shared" handle | ||
685 | * | ||
686 | * \return 0 on success\n | ||
687 | * >0 - AMD specific error code\n | ||
688 | * <0 - Negative POSIX Error code | ||
689 | * | ||
690 | * \sa amdgpu_bo_import() | ||
691 | * | ||
692 | */ | ||
693 | int amdgpu_bo_export(amdgpu_bo_handle buf_handle, | ||
694 | enum amdgpu_bo_handle_type type, | ||
695 | uint32_t *shared_handle); | ||
696 | |||
697 | /** | ||
698 | * Request access to "shared" buffer | ||
699 | * | ||
700 | * \param dev - \c [in] Device handle. | ||
701 | * See #amdgpu_device_initialize() | ||
702 | * \param type - \c [in] Type of handle requested | ||
703 | * \param shared_handle - \c [in] Shared handle received as result "import" | ||
704 | * operation | ||
705 | * \param output - \c [out] Pointer to structure with information | ||
706 | * about imported buffer | ||
707 | * | ||
708 | * \return 0 on success\n | ||
709 | * >0 - AMD specific error code\n | ||
710 | * <0 - Negative POSIX Error code | ||
711 | * | ||
712 | * \note Buffer must be "imported" only using new "fd" (different from | ||
713 | * one used by "exporter"). | ||
714 | * | ||
715 | * \sa amdgpu_bo_export() | ||
716 | * | ||
717 | */ | ||
718 | int amdgpu_bo_import(amdgpu_device_handle dev, | ||
719 | enum amdgpu_bo_handle_type type, | ||
720 | uint32_t shared_handle, | ||
721 | struct amdgpu_bo_import_result *output); | ||
722 | |||
723 | /** | ||
724 | * Free previosuly allocated memory | ||
725 | * | ||
726 | * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() | ||
727 | * \param buf_handle - \c [in] Buffer handle to free | ||
728 | * | ||
729 | * \return 0 on success\n | ||
730 | * >0 - AMD specific error code\n | ||
731 | * <0 - Negative POSIX Error code | ||
732 | * | ||
733 | * \note In the case of memory shared between different applications all | ||
734 | * resources will be “physically” freed only all such applications | ||
735 | * will be terminated | ||
736 | * \note If is UMD responsibility to ‘free’ buffer only when there is no | ||
737 | * more GPU access | ||
738 | * | ||
739 | * \sa amdgpu_bo_set_metadata(), amdgpu_bo_alloc() | ||
740 | * | ||
741 | */ | ||
742 | int amdgpu_bo_free(amdgpu_bo_handle buf_handle); | ||
743 | |||
744 | /** | ||
745 | * Request CPU access to GPU accessable memory | ||
746 | * | ||
747 | * \param buf_handle - \c [in] Buffer handle | ||
748 | * \param cpu - \c [out] CPU address to be used for access | ||
749 | * | ||
750 | * \return 0 on success\n | ||
751 | * >0 - AMD specific error code\n | ||
752 | * <0 - Negative POSIX Error code | ||
753 | * | ||
754 | * \sa amdgpu_bo_cpu_unmap() | ||
755 | * | ||
756 | */ | ||
757 | int amdgpu_bo_cpu_map(amdgpu_bo_handle buf_handle, void **cpu); | ||
758 | |||
759 | /** | ||
760 | * Release CPU access to GPU memory | ||
761 | * | ||
762 | * \param buf_handle - \c [in] Buffer handle | ||
763 | * | ||
764 | * \return 0 on success\n | ||
765 | * >0 - AMD specific error code\n | ||
766 | * <0 - Negative POSIX Error code | ||
767 | * | ||
768 | * \sa amdgpu_bo_cpu_map() | ||
769 | * | ||
770 | */ | ||
771 | int amdgpu_bo_cpu_unmap(amdgpu_bo_handle buf_handle); | ||
772 | |||
773 | |||
774 | /** | ||
775 | * Wait until a buffer is not used by the device. | ||
776 | * | ||
777 | * \param dev - \c [in] Device handle. See #amdgpu_lib_initialize() | ||
778 | * \param buf_handle - \c [in] Buffer handle. | ||
779 | * \param timeout_ns - Timeout in nanoseconds. | ||
780 | * \param buffer_busy - 0 if buffer is idle, all GPU access was completed | ||
781 | * and no GPU access is scheduled. | ||
782 | * 1 GPU access is in fly or scheduled | ||
783 | * | ||
784 | * \return 0 - on success | ||
785 | * <0 - AMD specific error code | ||
786 | */ | ||
787 | int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle, | ||
788 | uint64_t timeout_ns, | ||
789 | bool *buffer_busy); | ||
790 | |||
791 | |||
792 | /* | ||
793 | * Special GPU Resources | ||
794 | * | ||
795 | */ | ||
796 | |||
797 | |||
798 | |||
799 | /** | ||
800 | * Query information about GDS | ||
801 | * | ||
802 | * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() | ||
803 | * \param gds_info - \c [out] Pointer to structure to get GDS information | ||
804 | * | ||
805 | * \return 0 on success\n | ||
806 | * >0 - AMD specific error code\n | ||
807 | * <0 - Negative POSIX Error code | ||
808 | * | ||
809 | */ | ||
810 | int amdgpu_gpu_resource_query_gds_info(amdgpu_device_handle dev, | ||
811 | struct amdgpu_gds_resource_info * | ||
812 | gds_info); | ||
813 | |||
814 | |||
815 | /** | ||
816 | * Allocate GDS partitions | ||
817 | * | ||
818 | * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() | ||
819 | * \param gds_size - \c [in] Size of gds allocation. Must be aligned | ||
820 | * accordingly. | ||
821 | * \param alloc_info - \c [out] Pointer to structure to receive information | ||
822 | * about allocation | ||
823 | * | ||
824 | * \return 0 on success\n | ||
825 | * >0 - AMD specific error code\n | ||
826 | * <0 - Negative POSIX Error code | ||
827 | * | ||
828 | * | ||
829 | */ | ||
830 | int amdgpu_gpu_resource_gds_alloc(amdgpu_device_handle dev, | ||
831 | uint32_t gds_size, | ||
832 | struct amdgpu_gds_alloc_info *alloc_info); | ||
833 | |||
834 | |||
835 | |||
836 | |||
837 | /** | ||
838 | * Release GDS resource. When GDS and associated resources not needed any | ||
839 | * more UMD should free them | ||
840 | * | ||
841 | * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() | ||
842 | * \param handle - \c [in] Handle assigned to GDS allocation | ||
843 | * | ||
844 | * \return 0 on success\n | ||
845 | * >0 - AMD specific error code\n | ||
846 | * <0 - Negative POSIX Error code | ||
847 | * | ||
848 | */ | ||
849 | int amdgpu_gpu_resource_gds_free(amdgpu_bo_handle handle); | ||
850 | |||
851 | |||
852 | |||
853 | /* | ||
854 | * GPU Execution context | ||
855 | * | ||
856 | */ | ||
857 | |||
858 | /** | ||
859 | * Create GPU execution Context | ||
860 | * | ||
861 | * For the purpose of GPU Scheduler and GPU Robustness extensions it is | ||
862 | * necessary to have information/identify rendering/compute contexts. | ||
863 | * It also may be needed to associate some specific requirements with such | ||
864 | * contexts. Kernel driver will guarantee that submission from the same | ||
865 | * context will always be executed in order (first come, first serve). | ||
866 | * | ||
867 | * | ||
868 | * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() | ||
869 | * \param context - \c [out] GPU Context handle | ||
870 | * | ||
871 | * \return 0 on success\n | ||
872 | * >0 - AMD specific error code\n | ||
873 | * <0 - Negative POSIX Error code | ||
874 | * | ||
875 | * \sa amdgpu_cs_ctx_free() | ||
876 | * | ||
877 | */ | ||
878 | int amdgpu_cs_ctx_create(amdgpu_device_handle dev, | ||
879 | amdgpu_context_handle *context); | ||
880 | |||
881 | /** | ||
882 | * | ||
883 | * Destroy GPU execution context when not needed any more | ||
884 | * | ||
885 | * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() | ||
886 | * \param context - \c [in] GPU Context handle | ||
887 | * | ||
888 | * \return 0 on success\n | ||
889 | * >0 - AMD specific error code\n | ||
890 | * <0 - Negative POSIX Error code | ||
891 | * | ||
892 | * \sa amdgpu_cs_ctx_create() | ||
893 | * | ||
894 | */ | ||
895 | int amdgpu_cs_ctx_free(amdgpu_device_handle dev, | ||
896 | amdgpu_context_handle context); | ||
897 | |||
898 | /** | ||
899 | * Query reset state for the specific GPU Context | ||
900 | * | ||
901 | * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() | ||
902 | * \param context - \c [in] GPU Context handle | ||
903 | * \param state - \c [out] Reset state status | ||
904 | * | ||
905 | * \return 0 on success\n | ||
906 | * >0 - AMD specific error code\n | ||
907 | * <0 - Negative POSIX Error code | ||
908 | * | ||
909 | * \sa amdgpu_cs_ctx_create() | ||
910 | * | ||
911 | */ | ||
912 | int amdgpu_cs_query_reset_state(amdgpu_device_handle dev, | ||
913 | amdgpu_context_handle context, | ||
914 | enum amdgpu_cs_ctx_reset_state *state); | ||
915 | |||
916 | |||
917 | /* | ||
918 | * Command Buffers Management | ||
919 | * | ||
920 | */ | ||
921 | |||
922 | |||
923 | /** | ||
924 | * Allocate memory to be filled with PM4 packets and be served as the first | ||
925 | * entry point of execution (a.k.a. Indirect Buffer) | ||
926 | * | ||
927 | * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() | ||
928 | * \param context - \c [in] GPU Context which will use IB | ||
929 | * \param ib_size - \c [in] Size of allocation | ||
930 | * \param output - \c [out] Pointer to structure to get information about | ||
931 | * allocated IB | ||
932 | * | ||
933 | * \return 0 on success\n | ||
934 | * >0 - AMD specific error code\n | ||
935 | * <0 - Negative POSIX Error code | ||
936 | * | ||
937 | * \sa amdgpu_cs_free_ib() | ||
938 | * | ||
939 | */ | ||
940 | int amdgpu_cs_alloc_ib(amdgpu_device_handle dev, | ||
941 | amdgpu_context_handle context, | ||
942 | enum amdgpu_cs_ib_size ib_size, | ||
943 | struct amdgpu_cs_ib_alloc_result *output); | ||
944 | |||
945 | /** | ||
946 | * If UMD has allocates IBs which doesn’t need any more than those IBs must | ||
947 | * be explicitly freed | ||
948 | * | ||
949 | * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() | ||
950 | * \param context - \c [in] GPU Context containing IB | ||
951 | * \param handle - \c [in] IB handle | ||
952 | * | ||
953 | * \return 0 on success\n | ||
954 | * >0 - AMD specific error code\n | ||
955 | * <0 - Negative POSIX Error code | ||
956 | * | ||
957 | * \note Libdrm_amdgpu will guarantee that it will correctly detect when it | ||
958 | * is safe to return IB to free pool | ||
959 | * | ||
960 | * \sa amdgpu_cs_alloc_ib() | ||
961 | * | ||
962 | */ | ||
963 | int amdgpu_cs_free_ib(amdgpu_device_handle dev, | ||
964 | amdgpu_context_handle context, | ||
965 | amdgpu_ib_handle handle); | ||
966 | |||
967 | /** | ||
968 | * Send request to submit command buffers to hardware. | ||
969 | * | ||
970 | * Kernel driver could use GPU Scheduler to make decision when physically | ||
971 | * sent this request to the hardware. Accordingly this request could be put | ||
972 | * in queue and sent for execution later. The only guarantee is that request | ||
973 | * from the same GPU context to the same ip:ip_instance:ring will be executed in | ||
974 | * order. | ||
975 | * | ||
976 | * | ||
977 | * \param dev - \c [in] Device handle. | ||
978 | * See #amdgpu_device_initialize() | ||
979 | * \param context - \c [in] GPU Context | ||
980 | * \param flags - \c [in] Global submission flags | ||
981 | * \param ibs_request - \c [in] Pointer to submission requests. | ||
982 | * We could submit to the several | ||
983 | * engines/rings simulteniously as | ||
984 | * 'atomic' operation | ||
985 | * \param number_of_requests - \c [in] Number of submission requests | ||
986 | * \param fences - \c [out] Pointer to array of data to get | ||
987 | * fences to identify submission | ||
988 | * requests. Timestamps are valid | ||
989 | * in this GPU context and could be used | ||
990 | * to identify/detect completion of | ||
991 | * submission request | ||
992 | * | ||
993 | * \return 0 on success\n | ||
994 | * >0 - AMD specific error code\n | ||
995 | * <0 - Negative POSIX Error code | ||
996 | * | ||
997 | * \note It is assumed that by default IB will be returned to free pool | ||
998 | * automatically by libdrm_amdgpu when submission will completed. | ||
999 | * It is possible for UMD to make decision to re-use the same IB in | ||
1000 | * this case it should be explicitly freed.\n | ||
1001 | * Accordingly, by default, after submission UMD should not touch passed | ||
1002 | * IBs. If UMD needs to re-use IB then the special flag AMDGPU_CS_REUSE_IB | ||
1003 | * must be passed. | ||
1004 | * | ||
1005 | * \note It is required to pass correct resource list with buffer handles | ||
1006 | * which will be accessible by command buffers from submission | ||
1007 | * This will allow kernel driver to correctly implement "paging". | ||
1008 | * Failure to do so will have unpredictable results. | ||
1009 | * | ||
1010 | * \sa amdgpu_command_buffer_alloc(), amdgpu_command_buffer_free(), | ||
1011 | * amdgpu_cs_query_fence_status() | ||
1012 | * | ||
1013 | */ | ||
1014 | int amdgpu_cs_submit(amdgpu_device_handle dev, | ||
1015 | amdgpu_context_handle context, | ||
1016 | uint64_t flags, | ||
1017 | struct amdgpu_cs_request *ibs_request, | ||
1018 | uint32_t number_of_requests, | ||
1019 | uint64_t *fences); | ||
1020 | |||
1021 | /** | ||
1022 | * Query status of Command Buffer Submission | ||
1023 | * | ||
1024 | * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() | ||
1025 | * \param fence - \c [in] Structure describing fence to query | ||
1026 | * \param expired - \c [out] If fence expired or not.\n | ||
1027 | * 0 – if fence is not expired\n | ||
1028 | * !0 - otherwise | ||
1029 | * | ||
1030 | * \return 0 on success\n | ||
1031 | * >0 - AMD specific error code\n | ||
1032 | * <0 - Negative POSIX Error code | ||
1033 | * | ||
1034 | * \note If UMD wants only to check operation status and returned immediately | ||
1035 | * then timeout value as 0 must be passed. In this case success will be | ||
1036 | * returned in the case if submission was completed or timeout error | ||
1037 | * code. | ||
1038 | * | ||
1039 | * \sa amdgpu_cs_submit() | ||
1040 | */ | ||
1041 | int amdgpu_cs_query_fence_status(amdgpu_device_handle dev, | ||
1042 | struct amdgpu_cs_query_fence *fence, | ||
1043 | uint32_t *expired); | ||
1044 | |||
1045 | |||
1046 | /* | ||
1047 | * Query / Info API | ||
1048 | * | ||
1049 | */ | ||
1050 | |||
1051 | |||
1052 | /** | ||
1053 | * Query allocation size alignments | ||
1054 | * | ||
1055 | * UMD should query information about GPU VM MC size alignments requirements | ||
1056 | * to be able correctly choose required allocation size and implement | ||
1057 | * internal optimization if needed. | ||
1058 | * | ||
1059 | * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() | ||
1060 | * \param info - \c [out] Pointer to structure to get size alignment | ||
1061 | * requirements | ||
1062 | * | ||
1063 | * \return 0 on success\n | ||
1064 | * >0 - AMD specific error code\n | ||
1065 | * <0 - Negative POSIX Error code | ||
1066 | * | ||
1067 | */ | ||
1068 | int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev, | ||
1069 | struct amdgpu_buffer_size_alignments | ||
1070 | *info); | ||
1071 | |||
1072 | |||
1073 | |||
1074 | /** | ||
1075 | * Query firmware versions | ||
1076 | * | ||
1077 | * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() | ||
1078 | * \param fw_type - \c [in] AMDGPU_INFO_FW_* | ||
1079 | * \param ip_instance - \c [in] Index of the IP block of the same type. | ||
1080 | * \param index - \c [in] Index of the engine. (for SDMA and MEC) | ||
1081 | * \param version - \c [out] Pointer to to the "version" return value | ||
1082 | * \param feature - \c [out] Pointer to to the "feature" return value | ||
1083 | * | ||
1084 | * \return 0 on success\n | ||
1085 | * >0 - AMD specific error code\n | ||
1086 | * <0 - Negative POSIX Error code | ||
1087 | * | ||
1088 | */ | ||
1089 | int amdgpu_query_firmware_version(amdgpu_device_handle dev, unsigned fw_type, | ||
1090 | unsigned ip_instance, unsigned index, | ||
1091 | uint32_t *version, uint32_t *feature); | ||
1092 | |||
1093 | |||
1094 | |||
1095 | /** | ||
1096 | * Query the number of HW IP instances of a certain type. | ||
1097 | * | ||
1098 | * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() | ||
1099 | * \param type - \c [in] Hardware IP block type = AMDGPU_HW_IP_* | ||
1100 | * \param count - \c [out] Pointer to structure to get information | ||
1101 | * | ||
1102 | * \return 0 on success\n | ||
1103 | * >0 - AMD specific error code\n | ||
1104 | * <0 - Negative POSIX Error code | ||
1105 | */ | ||
1106 | int amdgpu_query_hw_ip_count(amdgpu_device_handle dev, unsigned type, | ||
1107 | uint32_t *count); | ||
1108 | |||
1109 | |||
1110 | |||
1111 | /** | ||
1112 | * Query engine information | ||
1113 | * | ||
1114 | * This query allows UMD to query information different engines and their | ||
1115 | * capabilities. | ||
1116 | * | ||
1117 | * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() | ||
1118 | * \param type - \c [in] Hardware IP block type = AMDGPU_HW_IP_* | ||
1119 | * \param ip_instance - \c [in] Index of the IP block of the same type. | ||
1120 | * \param info - \c [out] Pointer to structure to get information | ||
1121 | * | ||
1122 | * \return 0 on success\n | ||
1123 | * >0 - AMD specific error code\n | ||
1124 | * <0 - Negative POSIX Error code | ||
1125 | */ | ||
1126 | int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type, | ||
1127 | unsigned ip_instance, | ||
1128 | struct drm_amdgpu_info_hw_ip *info); | ||
1129 | |||
1130 | |||
1131 | |||
1132 | |||
1133 | /** | ||
1134 | * Query heap information | ||
1135 | * | ||
1136 | * This query allows UMD to query potentially available memory resources and | ||
1137 | * adjust their logic if necessary. | ||
1138 | * | ||
1139 | * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() | ||
1140 | * \param heap - \c [in] Heap type | ||
1141 | * \param info - \c [in] Pointer to structure to get needed information | ||
1142 | * | ||
1143 | * \return 0 on success\n | ||
1144 | * >0 - AMD specific error code\n | ||
1145 | * <0 - Negative POSIX Error code | ||
1146 | * | ||
1147 | */ | ||
1148 | int amdgpu_query_heap_info(amdgpu_device_handle dev, | ||
1149 | uint32_t heap, | ||
1150 | uint32_t flags, | ||
1151 | struct amdgpu_heap_info *info); | ||
1152 | |||
1153 | |||
1154 | |||
1155 | /** | ||
1156 | * Get the CRTC ID from the mode object ID | ||
1157 | * | ||
1158 | * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() | ||
1159 | * \param id - \c [in] Mode object ID | ||
1160 | * \param result - \c [in] Pointer to the CRTC ID | ||
1161 | * | ||
1162 | * \return 0 on success\n | ||
1163 | * >0 - AMD specific error code\n | ||
1164 | * <0 - Negative POSIX Error code | ||
1165 | * | ||
1166 | */ | ||
1167 | int amdgpu_query_crtc_from_id(amdgpu_device_handle dev, unsigned id, | ||
1168 | int32_t *result); | ||
1169 | |||
1170 | |||
1171 | |||
1172 | /** | ||
1173 | * Query GPU H/w Info | ||
1174 | * | ||
1175 | * Query hardware specific information | ||
1176 | * | ||
1177 | * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() | ||
1178 | * \param heap - \c [in] Heap type | ||
1179 | * \param info - \c [in] Pointer to structure to get needed information | ||
1180 | * | ||
1181 | * \return 0 on success\n | ||
1182 | * >0 - AMD specific error code\n | ||
1183 | * <0 - Negative POSIX Error code | ||
1184 | * | ||
1185 | */ | ||
1186 | int amdgpu_query_gpu_info(amdgpu_device_handle dev, | ||
1187 | struct amdgpu_gpu_info *info); | ||
1188 | |||
1189 | |||
1190 | |||
1191 | /** | ||
1192 | * Query hardware or driver information. | ||
1193 | * | ||
1194 | * The return size is query-specific and depends on the "info_id" parameter. | ||
1195 | * No more than "size" bytes is returned. | ||
1196 | * | ||
1197 | * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() | ||
1198 | * \param info_id - \c [in] AMDGPU_INFO_* | ||
1199 | * \param size - \c [in] Size of the returned value. | ||
1200 | * \param value - \c [out] Pointer to the return value. | ||
1201 | * | ||
1202 | * \return 0 on success\n | ||
1203 | * >0 - AMD specific error code\n | ||
1204 | * <0 - Negative POSIX error code | ||
1205 | * | ||
1206 | */ | ||
1207 | int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id, | ||
1208 | unsigned size, void *value); | ||
1209 | |||
1210 | |||
1211 | |||
1212 | /** | ||
1213 | * Read a set of consecutive memory-mapped registers. | ||
1214 | * Not all registers are allowed to be read by userspace. | ||
1215 | * | ||
1216 | * \param dev - \c [in] Device handle. See #amdgpu_device_initialize( | ||
1217 | * \param dword_offset - \c [in] Register offset in dwords | ||
1218 | * \param count - \c [in] The number of registers to read starting | ||
1219 | * from the offset | ||
1220 | * \param instance - \c [in] GRBM_GFX_INDEX selector. It may have other | ||
1221 | * uses. Set it to 0xffffffff if unsure. | ||
1222 | * \param flags - \c [in] Flags with additional information. | ||
1223 | * \param values - \c [out] The pointer to return values. | ||
1224 | * | ||
1225 | * \return 0 on success\n | ||
1226 | * >0 - AMD specific error code\n | ||
1227 | * <0 - Negative POSIX error code | ||
1228 | * | ||
1229 | */ | ||
1230 | int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset, | ||
1231 | unsigned count, uint32_t instance, uint32_t flags, | ||
1232 | uint32_t *values); | ||
1233 | |||
1234 | |||
1235 | |||
1236 | /** | ||
1237 | * Request GPU access to user allocated memory e.g. via "malloc" | ||
1238 | * | ||
1239 | * \param dev - [in] Device handle. See #amdgpu_device_initialize() | ||
1240 | * \param cpu - [in] CPU address of user allocated memory which we | ||
1241 | * want to map to GPU address space (make GPU accessible) | ||
1242 | * (This address must be correctly aligned). | ||
1243 | * \param size - [in] Size of allocation (must be correctly aligned) | ||
1244 | * \param amdgpu_bo_alloc_result - [out] Handle of allocation to be passed as resource | ||
1245 | * on submission and be used in other operations.(e.g. for VA submission) | ||
1246 | * ( Temporally defined amdgpu_bo_alloc_result as parameter for return mc address. ) | ||
1247 | * | ||
1248 | * | ||
1249 | * \return 0 on success | ||
1250 | * >0 - AMD specific error code | ||
1251 | * <0 - Negative POSIX Error code | ||
1252 | * | ||
1253 | * | ||
1254 | * \note | ||
1255 | * This call doesn't guarantee that such memory will be persistently | ||
1256 | * "locked" / make non-pageable. The purpose of this call is to provide | ||
1257 | * opportunity for GPU get access to this resource during submission. | ||
1258 | * | ||
1259 | * The maximum amount of memory which could be mapped in this call depends | ||
1260 | * if overcommit is disabled or not. If overcommit is disabled than the max. | ||
1261 | * amount of memory to be pinned will be limited by left "free" size in total | ||
1262 | * amount of memory which could be locked simultaneously ("GART" size). | ||
1263 | * | ||
1264 | * Supported (theoretical) max. size of mapping is restricted only by | ||
1265 | * "GART" size. | ||
1266 | * | ||
1267 | * It is responsibility of caller to correctly specify access rights | ||
1268 | * on VA assignment. | ||
1269 | */ | ||
1270 | int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev, | ||
1271 | void *cpu, | ||
1272 | uint64_t size, | ||
1273 | struct amdgpu_bo_alloc_result *info); | ||
1274 | |||
1275 | |||
1276 | #endif /* #ifdef _AMDGPU_H_ */ | ||