1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
25 #ifdef HAVE_CONFIG_H
26 #include <config.h>
27 #endif
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <stdint.h>
32 #include <string.h>
33 #include <stdbool.h>
34 #include <assert.h>
35 #include <errno.h>
36 #include <sys/mman.h>
37 #include <fcntl.h>
39 #include <xf86drm.h>
40 #include <xf86atomic.h>
41 #include "libdrm_lists.h"
42 #include "nouveau_drm.h"
44 #include "nouveau.h"
45 #include "private.h"
47 #ifdef DEBUG
48 uint32_t nouveau_debug = 0;
50 static void
51 debug_init(char *args)
52 {
53 if (args) {
54 int n = strtol(args, NULL, 0);
55 if (n >= 0)
56 nouveau_debug = n;
57 }
58 }
59 #endif
61 /* this is the old libdrm's version of nouveau_device_wrap(), the symbol
62 * is kept here to prevent AIGLX from crashing if the DDX is linked against
63 * the new libdrm, but the DRI driver against the old
64 */
65 int
66 nouveau_device_open_existing(struct nouveau_device **pdev, int close, int fd,
67 drm_context_t ctx)
68 {
69 return -EACCES;
70 }
72 int
73 nouveau_device_wrap(int fd, int close, struct nouveau_device **pdev)
74 {
75 struct nouveau_device_priv *nvdev = calloc(1, sizeof(*nvdev));
76 struct nouveau_device *dev = &nvdev->base;
77 uint64_t chipset, vram, gart, bousage;
78 drmVersionPtr ver;
79 int ret;
81 #ifdef DEBUG
82 debug_init(getenv("NOUVEAU_LIBDRM_DEBUG"));
83 #endif
85 if (!nvdev)
86 return -ENOMEM;
87 nvdev->base.fd = fd;
89 ver = drmGetVersion(fd);
90 if (ver) dev->drm_version = (ver->version_major << 24) |
91 (ver->version_minor << 8) |
92 ver->version_patchlevel;
93 drmFreeVersion(ver);
95 if ( dev->drm_version != 0x00000010 &&
96 (dev->drm_version < 0x01000000 ||
97 dev->drm_version >= 0x02000000)) {
98 nouveau_device_del(&dev);
99 return -EINVAL;
100 }
102 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_CHIPSET_ID, &chipset);
103 if (ret == 0)
104 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_FB_SIZE, &vram);
105 if (ret == 0)
106 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_AGP_SIZE, &gart);
107 if (ret) {
108 nouveau_device_del(&dev);
109 return ret;
110 }
112 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_HAS_BO_USAGE, &bousage);
113 if (ret == 0)
114 nvdev->have_bo_usage = (bousage != 0);
116 nvdev->close = close;
117 DRMINITLISTHEAD(&nvdev->bo_list);
118 nvdev->base.object.oclass = NOUVEAU_DEVICE_CLASS;
119 nvdev->base.lib_version = 0x01000000;
120 nvdev->base.chipset = chipset;
121 nvdev->base.vram_size = vram;
122 nvdev->base.gart_size = gart;
123 nvdev->base.vram_limit = (nvdev->base.vram_size * 80) / 100;
124 nvdev->base.gart_limit = (nvdev->base.gart_size * 80) / 100;
126 *pdev = &nvdev->base;
127 return 0;
128 }
130 int
131 nouveau_device_open(const char *busid, struct nouveau_device **pdev)
132 {
133 int ret = -ENODEV, fd = drmOpen("nouveau", busid);
134 if (fd >= 0) {
135 ret = nouveau_device_wrap(fd, 1, pdev);
136 if (ret)
137 drmClose(fd);
138 }
139 return ret;
140 }
142 void
143 nouveau_device_del(struct nouveau_device **pdev)
144 {
145 struct nouveau_device_priv *nvdev = nouveau_device(*pdev);
146 if (nvdev) {
147 if (nvdev->close)
148 drmClose(nvdev->base.fd);
149 free(nvdev->client);
150 free(nvdev);
151 *pdev = NULL;
152 }
153 }
155 int
156 nouveau_getparam(struct nouveau_device *dev, uint64_t param, uint64_t *value)
157 {
158 struct drm_nouveau_getparam r = { param, 0 };
159 int fd = dev->fd, ret =
160 drmCommandWriteRead(fd, DRM_NOUVEAU_GETPARAM, &r, sizeof(r));
161 *value = r.value;
162 return ret;
163 }
165 int
166 nouveau_setparam(struct nouveau_device *dev, uint64_t param, uint64_t value)
167 {
168 struct drm_nouveau_setparam r = { param, value };
169 return drmCommandWrite(dev->fd, DRM_NOUVEAU_SETPARAM, &r, sizeof(r));
170 }
172 int
173 nouveau_client_new(struct nouveau_device *dev, struct nouveau_client **pclient)
174 {
175 struct nouveau_device_priv *nvdev = nouveau_device(dev);
176 struct nouveau_client_priv *pcli;
177 int id = 0, i, ret = -ENOMEM;
178 uint32_t *clients;
180 for (i = 0; i < nvdev->nr_client; i++) {
181 id = ffs(nvdev->client[i]) - 1;
182 if (id >= 0)
183 goto out;
184 }
186 clients = realloc(nvdev->client, sizeof(uint32_t) * (i + 1));
187 if (!clients)
188 return ret;
189 nvdev->client = clients;
190 nvdev->client[i] = 0;
191 nvdev->nr_client++;
193 out:
194 pcli = calloc(1, sizeof(*pcli));
195 if (pcli) {
196 nvdev->client[i] |= (1 << id);
197 pcli->base.device = dev;
198 pcli->base.id = (i * 32) + id;
199 ret = 0;
200 }
202 *pclient = &pcli->base;
203 return ret;
204 }
206 void
207 nouveau_client_del(struct nouveau_client **pclient)
208 {
209 struct nouveau_client_priv *pcli = nouveau_client(*pclient);
210 struct nouveau_device_priv *nvdev;
211 if (pcli) {
212 int id = pcli->base.id;
213 nvdev = nouveau_device(pcli->base.device);
214 nvdev->client[id / 32] &= ~(1 << (id % 32));
215 free(pcli->kref);
216 free(pcli);
217 }
218 }
220 int
221 nouveau_object_new(struct nouveau_object *parent, uint64_t handle,
222 uint32_t oclass, void *data, uint32_t length,
223 struct nouveau_object **pobj)
224 {
225 struct nouveau_device *dev;
226 struct nouveau_object *obj;
227 int ret = -EINVAL;
229 if (length == 0)
230 length = sizeof(struct nouveau_object *);
231 obj = malloc(sizeof(*obj) + length);
232 obj->parent = parent;
233 obj->handle = handle;
234 obj->oclass = oclass;
235 obj->length = length;
236 obj->data = obj + 1;
237 if (data)
238 memcpy(obj->data, data, length);
239 *(struct nouveau_object **)obj->data = obj;
241 dev = nouveau_object_find(obj, NOUVEAU_DEVICE_CLASS);
242 switch (parent->oclass) {
243 case NOUVEAU_DEVICE_CLASS:
244 switch (obj->oclass) {
245 case NOUVEAU_FIFO_CHANNEL_CLASS:
246 {
247 if (dev->chipset < 0xc0)
248 ret = abi16_chan_nv04(obj);
249 else
250 if (dev->chipset < 0xe0)
251 ret = abi16_chan_nvc0(obj);
252 else
253 ret = abi16_chan_nve0(obj);
254 }
255 break;
256 default:
257 break;
258 }
259 break;
260 case NOUVEAU_FIFO_CHANNEL_CLASS:
261 switch (obj->oclass) {
262 case NOUVEAU_NOTIFIER_CLASS:
263 ret = abi16_ntfy(obj);
264 break;
265 default:
266 ret = abi16_engobj(obj);
267 break;
268 }
269 default:
270 break;
271 }
273 if (ret) {
274 free(obj);
275 return ret;
276 }
278 *pobj = obj;
279 return 0;
280 }
282 void
283 nouveau_object_del(struct nouveau_object **pobj)
284 {
285 struct nouveau_object *obj = *pobj;
286 struct nouveau_device *dev;
287 if (obj) {
288 dev = nouveau_object_find(obj, NOUVEAU_DEVICE_CLASS);
289 if (obj->oclass == NOUVEAU_FIFO_CHANNEL_CLASS) {
290 struct drm_nouveau_channel_free req;
291 req.channel = obj->handle;
292 drmCommandWrite(dev->fd, DRM_NOUVEAU_CHANNEL_FREE,
293 &req, sizeof(req));
294 } else {
295 struct drm_nouveau_gpuobj_free req;
296 req.channel = obj->parent->handle;
297 req.handle = obj->handle;
298 drmCommandWrite(dev->fd, DRM_NOUVEAU_GPUOBJ_FREE,
299 &req, sizeof(req));
300 }
301 }
302 free(obj);
303 *pobj = NULL;
304 }
306 void *
307 nouveau_object_find(struct nouveau_object *obj, uint32_t pclass)
308 {
309 while (obj && obj->oclass != pclass) {
310 obj = obj->parent;
311 if (pclass == NOUVEAU_PARENT_CLASS)
312 break;
313 }
314 return obj;
315 }
317 static void
318 nouveau_bo_del(struct nouveau_bo *bo)
319 {
320 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
321 struct drm_gem_close req = { bo->handle };
322 DRMLISTDEL(&nvbo->head);
323 if (bo->map)
324 munmap(bo->map, bo->size);
325 drmIoctl(bo->device->fd, DRM_IOCTL_GEM_CLOSE, &req);
326 free(nvbo);
327 }
329 int
330 nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, uint32_t align,
331 uint64_t size, union nouveau_bo_config *config,
332 struct nouveau_bo **pbo)
333 {
334 struct nouveau_device_priv *nvdev = nouveau_device(dev);
335 struct nouveau_bo_priv *nvbo = calloc(1, sizeof(*nvbo));
336 struct nouveau_bo *bo = &nvbo->base;
337 int ret;
339 if (!nvbo)
340 return -ENOMEM;
341 atomic_set(&nvbo->refcnt, 1);
342 bo->device = dev;
343 bo->flags = flags;
344 bo->size = size;
346 ret = abi16_bo_init(bo, align, config);
347 if (ret) {
348 free(nvbo);
349 return ret;
350 }
352 DRMLISTADD(&nvbo->head, &nvdev->bo_list);
354 *pbo = bo;
355 return 0;
356 }
358 int
359 nouveau_bo_wrap(struct nouveau_device *dev, uint32_t handle,
360 struct nouveau_bo **pbo)
361 {
362 struct nouveau_device_priv *nvdev = nouveau_device(dev);
363 struct drm_nouveau_gem_info req = { .handle = handle };
364 struct nouveau_bo_priv *nvbo;
365 int ret;
367 DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) {
368 if (nvbo->base.handle == handle) {
369 *pbo = NULL;
370 nouveau_bo_ref(&nvbo->base, pbo);
371 return 0;
372 }
373 }
375 ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_GEM_INFO,
376 &req, sizeof(req));
377 if (ret)
378 return ret;
380 nvbo = calloc(1, sizeof(*nvbo));
381 if (nvbo) {
382 atomic_set(&nvbo->refcnt, 1);
383 nvbo->base.device = dev;
384 abi16_bo_info(&nvbo->base, &req);
385 DRMLISTADD(&nvbo->head, &nvdev->bo_list);
386 *pbo = &nvbo->base;
387 return 0;
388 }
390 return -ENOMEM;
391 }
393 int
394 nouveau_bo_name_ref(struct nouveau_device *dev, uint32_t name,
395 struct nouveau_bo **pbo)
396 {
397 struct nouveau_device_priv *nvdev = nouveau_device(dev);
398 struct nouveau_bo_priv *nvbo;
399 struct drm_gem_open req = { .name = name };
400 int ret;
402 DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) {
403 if (nvbo->name == name) {
404 *pbo = NULL;
405 nouveau_bo_ref(&nvbo->base, pbo);
406 return 0;
407 }
408 }
410 ret = drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req);
411 if (ret == 0) {
412 ret = nouveau_bo_wrap(dev, req.handle, pbo);
413 nouveau_bo((*pbo))->name = name;
414 }
416 return ret;
417 }
419 int
420 nouveau_bo_name_get(struct nouveau_bo *bo, uint32_t *name)
421 {
422 struct drm_gem_flink req = { .handle = bo->handle };
423 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
424 if (!nvbo->name) {
425 int ret = drmIoctl(bo->device->fd, DRM_IOCTL_GEM_FLINK, &req);
426 if (ret)
427 return ret;
428 nvbo->name = req.name;
429 }
430 *name = nvbo->name;
431 return 0;
432 }
434 void
435 nouveau_bo_ref(struct nouveau_bo *bo, struct nouveau_bo **pref)
436 {
437 struct nouveau_bo *ref = *pref;
438 if (bo) {
439 atomic_inc(&nouveau_bo(bo)->refcnt);
440 }
441 if (ref) {
442 if (atomic_dec_and_test(&nouveau_bo(ref)->refcnt))
443 nouveau_bo_del(ref);
444 }
445 *pref = bo;
446 }
448 int
449 nouveau_bo_prime_handle_ref(struct nouveau_device *dev, int prime_fd,
450 struct nouveau_bo **bo)
451 {
452 int ret;
453 unsigned int handle;
455 ret = drmPrimeFDToHandle(dev->fd, prime_fd, &handle);
456 if (ret) {
457 nouveau_bo_ref(NULL, bo);
458 return ret;
459 }
461 ret = nouveau_bo_wrap(dev, handle, bo);
462 if (ret) {
463 nouveau_bo_ref(NULL, bo);
464 return ret;
465 }
467 return 0;
468 }
470 int
471 nouveau_bo_set_prime(struct nouveau_bo *bo, int *prime_fd)
472 {
473 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
474 int ret;
476 ret = drmPrimeHandleToFD(bo->device->fd, nvbo->base.handle, DRM_CLOEXEC, prime_fd);
477 if (ret)
478 return ret;
479 return 0;
480 }
482 int
483 nouveau_bo_wait(struct nouveau_bo *bo, uint32_t access,
484 struct nouveau_client *client)
485 {
486 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
487 struct drm_nouveau_gem_cpu_prep req;
488 struct nouveau_pushbuf *push;
489 int ret = 0;
491 if (!(access & NOUVEAU_BO_RDWR))
492 return 0;
494 push = cli_push_get(client, bo);
495 if (push && push->channel)
496 nouveau_pushbuf_kick(push, push->channel);
498 if (!nvbo->name && !(nvbo->access & NOUVEAU_BO_WR) &&
499 !( access & NOUVEAU_BO_WR))
500 return 0;
502 req.handle = bo->handle;
503 req.flags = 0;
504 if (access & NOUVEAU_BO_WR)
505 req.flags |= NOUVEAU_GEM_CPU_PREP_WRITE;
506 if (access & NOUVEAU_BO_NOBLOCK)
507 req.flags |= NOUVEAU_GEM_CPU_PREP_NOWAIT;
509 ret = drmCommandWrite(bo->device->fd, DRM_NOUVEAU_GEM_CPU_PREP,
510 &req, sizeof(req));
511 if (ret == 0)
512 nvbo->access = 0;
513 return ret;
514 }
516 int
517 nouveau_bo_map(struct nouveau_bo *bo, uint32_t access,
518 struct nouveau_client *client)
519 {
520 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
521 if (bo->map == NULL) {
522 bo->map = mmap(0, bo->size, PROT_READ | PROT_WRITE,
523 MAP_SHARED, bo->device->fd, nvbo->map_handle);
524 if (bo->map == MAP_FAILED) {
525 bo->map = NULL;
526 return -errno;
527 }
528 }
529 return nouveau_bo_wait(bo, access, client);
530 }