1 /*
2 * Copyright 2012 Red Hat
3 *
4 * This file is subject to the terms and conditions of the GNU General
5 * Public License version 2. See the file COPYING in the main
6 * directory of this archive for more details.
7 *
8 * Authors: Matthew Garrett
9 * Dave Airlie
10 */
11 #include <linux/module.h>
12 #include <drm/drmP.h>
13 #include <drm/drm_fb_helper.h>
15 #include <linux/fb.h>
17 #include "cirrus_drv.h"
19 static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
20 int x, int y, int width, int height)
21 {
22 int i;
23 struct drm_gem_object *obj;
24 struct cirrus_bo *bo;
25 int src_offset, dst_offset;
26 int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
27 int ret;
28 bool unmap = false;
29 bool store_for_later = false;
30 int x2, y2;
31 unsigned long flags;
33 obj = afbdev->gfb.obj;
34 bo = gem_to_cirrus_bo(obj);
36 /*
37 * try and reserve the BO, if we fail with busy
38 * then the BO is being moved and we should
39 * store up the damage until later.
40 */
41 ret = cirrus_bo_reserve(bo, true);
42 if (ret) {
43 if (ret != -EBUSY)
44 return;
45 store_for_later = true;
46 }
48 x2 = x + width - 1;
49 y2 = y + height - 1;
50 spin_lock_irqsave(&afbdev->dirty_lock, flags);
52 if (afbdev->y1 < y)
53 y = afbdev->y1;
54 if (afbdev->y2 > y2)
55 y2 = afbdev->y2;
56 if (afbdev->x1 < x)
57 x = afbdev->x1;
58 if (afbdev->x2 > x2)
59 x2 = afbdev->x2;
61 if (store_for_later) {
62 afbdev->x1 = x;
63 afbdev->x2 = x2;
64 afbdev->y1 = y;
65 afbdev->y2 = y2;
66 spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
67 return;
68 }
70 afbdev->x1 = afbdev->y1 = INT_MAX;
71 afbdev->x2 = afbdev->y2 = 0;
72 spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
74 if (!bo->kmap.virtual) {
75 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
76 if (ret) {
77 DRM_ERROR("failed to kmap fb updates\n");
78 cirrus_bo_unreserve(bo);
79 return;
80 }
81 unmap = true;
82 }
83 for (i = y; i < y + height; i++) {
84 /* assume equal stride for now */
85 src_offset = dst_offset = i * afbdev->gfb.base.pitches[0] + (x * bpp);
86 memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
88 }
89 if (unmap)
90 ttm_bo_kunmap(&bo->kmap);
92 cirrus_bo_unreserve(bo);
93 }
95 static void cirrus_fillrect(struct fb_info *info,
96 const struct fb_fillrect *rect)
97 {
98 struct cirrus_fbdev *afbdev = info->par;
99 sys_fillrect(info, rect);
100 cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
101 rect->height);
102 }
104 static void cirrus_copyarea(struct fb_info *info,
105 const struct fb_copyarea *area)
106 {
107 struct cirrus_fbdev *afbdev = info->par;
108 sys_copyarea(info, area);
109 cirrus_dirty_update(afbdev, area->dx, area->dy, area->width,
110 area->height);
111 }
113 static void cirrus_imageblit(struct fb_info *info,
114 const struct fb_image *image)
115 {
116 struct cirrus_fbdev *afbdev = info->par;
117 sys_imageblit(info, image);
118 cirrus_dirty_update(afbdev, image->dx, image->dy, image->width,
119 image->height);
120 }
123 static struct fb_ops cirrusfb_ops = {
124 .owner = THIS_MODULE,
125 .fb_check_var = drm_fb_helper_check_var,
126 .fb_set_par = drm_fb_helper_set_par,
127 .fb_fillrect = cirrus_fillrect,
128 .fb_copyarea = cirrus_copyarea,
129 .fb_imageblit = cirrus_imageblit,
130 .fb_pan_display = drm_fb_helper_pan_display,
131 .fb_blank = drm_fb_helper_blank,
132 .fb_setcmap = drm_fb_helper_setcmap,
133 };
135 static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
136 struct drm_mode_fb_cmd2 *mode_cmd,
137 struct drm_gem_object **gobj_p)
138 {
139 struct drm_device *dev = afbdev->helper.dev;
140 u32 bpp, depth;
141 u32 size;
142 struct drm_gem_object *gobj;
144 int ret = 0;
145 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
147 if (bpp > 24)
148 return -EINVAL;
149 size = mode_cmd->pitches[0] * mode_cmd->height;
150 ret = cirrus_gem_create(dev, size, true, &gobj);
151 if (ret)
152 return ret;
154 *gobj_p = gobj;
155 return ret;
156 }
158 static int cirrusfb_create(struct cirrus_fbdev *gfbdev,
159 struct drm_fb_helper_surface_size *sizes)
160 {
161 struct drm_device *dev = gfbdev->helper.dev;
162 struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
163 struct fb_info *info;
164 struct drm_framebuffer *fb;
165 struct drm_mode_fb_cmd2 mode_cmd;
166 struct device *device = &dev->pdev->dev;
167 void *sysram;
168 struct drm_gem_object *gobj = NULL;
169 struct cirrus_bo *bo = NULL;
170 int size, ret;
172 mode_cmd.width = sizes->surface_width;
173 mode_cmd.height = sizes->surface_height;
174 mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
175 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
176 sizes->surface_depth);
177 size = mode_cmd.pitches[0] * mode_cmd.height;
179 ret = cirrusfb_create_object(gfbdev, &mode_cmd, &gobj);
180 if (ret) {
181 DRM_ERROR("failed to create fbcon backing object %d\n", ret);
182 return ret;
183 }
185 bo = gem_to_cirrus_bo(gobj);
187 sysram = vmalloc(size);
188 if (!sysram)
189 return -ENOMEM;
191 info = framebuffer_alloc(0, device);
192 if (info == NULL)
193 return -ENOMEM;
195 info->par = gfbdev;
197 ret = cirrus_framebuffer_init(cdev->dev, &gfbdev->gfb, &mode_cmd, gobj);
198 if (ret)
199 return ret;
201 gfbdev->sysram = sysram;
202 gfbdev->size = size;
204 fb = &gfbdev->gfb.base;
205 if (!fb) {
206 DRM_INFO("fb is NULL\n");
207 return -EINVAL;
208 }
210 /* setup helper */
211 gfbdev->helper.fb = fb;
212 gfbdev->helper.fbdev = info;
214 strcpy(info->fix.id, "cirrusdrmfb");
217 info->flags = FBINFO_DEFAULT;
218 info->fbops = &cirrusfb_ops;
220 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
221 drm_fb_helper_fill_var(info, &gfbdev->helper, sizes->fb_width,
222 sizes->fb_height);
224 /* setup aperture base/size for vesafb takeover */
225 info->apertures = alloc_apertures(1);
226 if (!info->apertures) {
227 ret = -ENOMEM;
228 goto out_iounmap;
229 }
230 info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
231 info->apertures->ranges[0].size = cdev->mc.vram_size;
233 info->screen_base = sysram;
234 info->screen_size = size;
236 info->fix.mmio_start = 0;
237 info->fix.mmio_len = 0;
239 ret = fb_alloc_cmap(&info->cmap, 256, 0);
240 if (ret) {
241 DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
242 ret = -ENOMEM;
243 goto out_iounmap;
244 }
246 DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
247 DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
248 DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
249 DRM_INFO("fb depth is %d\n", fb->depth);
250 DRM_INFO(" pitch is %d\n", fb->pitches[0]);
252 return 0;
253 out_iounmap:
254 return ret;
255 }
257 static int cirrus_fb_find_or_create_single(struct drm_fb_helper *helper,
258 struct drm_fb_helper_surface_size
259 *sizes)
260 {
261 struct cirrus_fbdev *gfbdev = (struct cirrus_fbdev *)helper;
262 int new_fb = 0;
263 int ret;
265 if (!helper->fb) {
266 ret = cirrusfb_create(gfbdev, sizes);
267 if (ret)
268 return ret;
269 new_fb = 1;
270 }
271 return new_fb;
272 }
274 static int cirrus_fbdev_destroy(struct drm_device *dev,
275 struct cirrus_fbdev *gfbdev)
276 {
277 struct fb_info *info;
278 struct cirrus_framebuffer *gfb = &gfbdev->gfb;
280 if (gfbdev->helper.fbdev) {
281 info = gfbdev->helper.fbdev;
283 unregister_framebuffer(info);
284 if (info->cmap.len)
285 fb_dealloc_cmap(&info->cmap);
286 framebuffer_release(info);
287 }
289 if (gfb->obj) {
290 drm_gem_object_unreference_unlocked(gfb->obj);
291 gfb->obj = NULL;
292 }
294 vfree(gfbdev->sysram);
295 drm_fb_helper_fini(&gfbdev->helper);
296 drm_framebuffer_cleanup(&gfb->base);
298 return 0;
299 }
301 static struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
302 .gamma_set = cirrus_crtc_fb_gamma_set,
303 .gamma_get = cirrus_crtc_fb_gamma_get,
304 .fb_probe = cirrus_fb_find_or_create_single,
305 };
307 int cirrus_fbdev_init(struct cirrus_device *cdev)
308 {
309 struct cirrus_fbdev *gfbdev;
310 int ret;
311 int bpp_sel = 24;
313 /*bpp_sel = 8;*/
314 gfbdev = kzalloc(sizeof(struct cirrus_fbdev), GFP_KERNEL);
315 if (!gfbdev)
316 return -ENOMEM;
318 cdev->mode_info.gfbdev = gfbdev;
319 gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
320 spin_lock_init(&gfbdev->dirty_lock);
322 ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
323 cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
324 if (ret) {
325 kfree(gfbdev);
326 return ret;
327 }
328 drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
329 drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
331 return 0;
332 }
334 void cirrus_fbdev_fini(struct cirrus_device *cdev)
335 {
336 if (!cdev->mode_info.gfbdev)
337 return;
339 cirrus_fbdev_destroy(cdev->dev, cdev->mode_info.gfbdev);
340 kfree(cdev->mode_info.gfbdev);
341 cdev->mode_info.gfbdev = NULL;
342 }