index d469a99a25536244c968c510fe726b0bdbbf8e8d..cbd16cf6c522b436f07c12f32ea1b572a14f373f 100644 (file)
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include "../../../drivers/staging/omapdrm/omap_dmm_tiler.h"
+#include "../../../drivers/gpu/drm/omapdrm/omap_dmm_tiler.h"
#include <asm/mach/map.h>
#include <asm/page.h>
#include "../../../arch/arm/mach-omap2/soc.h"
bool lump; /* true for a single lump allocation */
u32 n_phys_pages; /* number of physical pages */
u32 *phys_addrs; /* array addrs of pages */
- struct page **phys_pages;/* array page pointers */
u32 n_tiler_pages; /* number of tiler pages */
u32 *tiler_addrs; /* array of addrs of tiler pages */
int fmt; /* tiler buffer format */
first entry onf tiler_addrs */
u32 vsize; /* virtual stride of buffer */
u32 vstride; /* virtual size of buffer */
+ u32 phys_stride; /* Physical stride of the buffer */
+ u32 flags; /* Flags specifying cached or not */
};
static int omap_tiler_heap_allocate(struct ion_heap *heap,
unsigned long size, unsigned long align,
unsigned long flags)
{
- if (size == 0)
+ struct omap_tiler_info *info;
+
+ /* This means the buffer is already allocated and populated, we're getting here because
+ * of dummy handle creation, so simply return*/
+ if (size == 0) {
+ /*
+ * Store the pointer to struct omap_tiler_info * into buffer here.
+ * This will be used later on inside map_dma function to create
+ * the sg list for tiler buffer
+ */
+ info = (struct omap_tiler_info *) flags;
+ if (!info)
+ pr_err("%s: flags argument is not setupg\n", __func__);
+ buffer->priv_virt = info;
+ /* Re-update correct flags inside buffer */
+ buffer->flags = info->flags;
return 0;
+ }
pr_err("%s: This should never be called directly -- use the "
"OMAP_ION_TILER_ALLOC flag to the ION_IOC_CUSTOM "
info->lump = true;
for (i = 0; i < info->n_phys_pages; i++)
info->phys_addrs[i] = addr + i * PAGE_SIZE;
- info->phys_pages[i] = phys_to_page(info->phys_addrs[i]);
return 0;
}
goto err;
}
info->phys_addrs[i] = addr;
- info->phys_pages[i] = phys_to_page(addr);
}
return 0;
u32 n_phys_pages;
u32 n_tiler_pages;
int i = 0, ret;
- uint32_t phys_stride, remainder;
+ uint32_t remainder;
dma_addr_t ssptr;
if (data->fmt == TILFMT_PAGE && data->h != 1) {
}
info = kzalloc(sizeof(struct omap_tiler_info) +
- sizeof(u32) * n_phys_pages +
sizeof(u32) * n_phys_pages +
sizeof(u32) * n_tiler_pages, GFP_KERNEL);
if (!info)
info->n_phys_pages = n_phys_pages;
info->n_tiler_pages = n_tiler_pages;
info->phys_addrs = (u32 *)(info + 1);
- info->phys_pages = (struct page **) (info->phys_addrs + n_phys_pages);
info->tiler_addrs = info->phys_addrs + n_phys_pages;
info->fmt = data->fmt;
+ info->flags = data->flags;
/* Allocate tiler space
FIXME: we only support PAGE_SIZE alignment right now. */
info->tiler_handle = tiler_reserve_2d(data->fmt, data->w,
data->h, PAGE_SIZE);
+ info->tiler_handle->width = data->w;
+ info->tiler_handle->height = data->h;
+
if (IS_ERR_OR_NULL(info->tiler_handle)) {
ret = PTR_ERR(info->tiler_handle);
pr_err("%s: failure to allocate address space from tiler\n",
/* get physical address of tiler buffer */
info->tiler_start = tiler_ssptr(info->tiler_handle);
- /*todo: need to check if this will work when passed to phys to page fn */
- buffer->priv_phys = info->tiler_start;
-
/* fill in tiler pages by using ssptr and stride */
info->vstride = info->tiler_handle->stride;
info->vsize = n_tiler_pages << PAGE_SHIFT;
- phys_stride = (data->fmt == TILFMT_PAGE) ? info->vstride :
+ info->phys_stride = (data->fmt == TILFMT_PAGE) ? info->vstride :
tiler_stride(data->fmt, 0);
ssptr = info->tiler_start;
remainder = info->vstride;
line */
if (!remainder) {
remainder = info->vstride;
- ssptr += phys_stride - info->vstride;
+ ssptr += info->phys_stride - info->vstride;
}
}
if (ret)
goto err_got_tiler;
- ret = tiler_pin(info->tiler_handle, info->phys_pages,
- info->n_phys_pages,0, false);
+ ret = tiler_pin_phys(info->tiler_handle, info->phys_addrs,
+ info->n_phys_pages);
+
if (ret) {
pr_err("%s: failure to pin pages to tiler\n",
__func__);
data->stride = info->vstride;
/* create an ion handle for the allocation */
- handle = ion_alloc(client, 0, 0, 1<<OMAP_ION_HEAP_TILER, 1 << OMAP_ION_HEAP_TILER);
+ handle = ion_alloc(client, -1, 0, 1 << OMAP_ION_HEAP_TILER, (unsigned int) info);
if (IS_ERR_OR_NULL(handle)) {
ret = PTR_ERR(handle);
pr_err("%s: failure to allocate handle to manage "
buffer = ion_handle_buffer(handle);
buffer->size = n_tiler_pages * PAGE_SIZE;
- buffer->priv_virt = info;
data->handle = handle;
data->offset = (size_t)(info->tiler_start & ~PAGE_MASK);
tiler_release(info->tiler_handle);
if ((buffer->heap->id == OMAP_ION_HEAP_TILER) ||
- (buffer->heap->id == OMAP_ION_HEAP_NONSECURE_TILER)) {
+ (buffer->heap->id == OMAP_ION_HEAP_NONSECURE_TILER))
omap_tiler_free_carveout(buffer->heap, info);
- }
kfree(info);
}
*tiler_addrs = info->tiler_addrs;
return 0;
}
+EXPORT_SYMBOL(omap_tiler_pages);
int omap_tiler_vinfo(struct ion_client *client, struct ion_handle *handle,
unsigned int *vstride, unsigned int *vsize)
int i, ret = 0;
pgprot_t vm_page_prot;
- /* Use writecombined mappings unless on OMAP5. If OMAP5, use
+ /* Use writecombined mappings unless on OMAP5 or DRA7. If OMAP5 or DRA7, use
shared device due to h/w issue. */
- if (soc_is_omap54xx())
+ if (soc_is_omap54xx() || soc_is_dra7xx())
vm_page_prot = __pgprot_modify(vma->vm_page_prot, L_PTE_MT_MASK,
L_PTE_MT_DEV_SHARED);
else
return ret;
}
+static struct scatterlist *sg_alloc(unsigned int nents, gfp_t gfp_mask)
+{
+ return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
+}
+
+static void sg_free(struct scatterlist *sg, unsigned int nents)
+{
+ kfree(sg);
+}
+
+
+
struct sg_table *omap_tiler_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- struct sg_table *table;
- int ret;
+ int ret, i;
+ struct sg_table *table = NULL;
+ struct scatterlist *sg;
+ struct omap_tiler_info *info = NULL;
+ static phys_addr_t paddr;
- /*
- * this function is currently a placeholder. this needs to be modified to pass the
- * correct physical pointers
- */
+
+ info = buffer->priv_virt;
+
+ if(!info)
+ return table;
table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!table)
return ERR_PTR(-ENOMEM);
- ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ /* sg_alloc_table can only allocate multi-page scatter-gather list tables
+ * if the architecture supports scatter-gather lists chaining. ARM doesn't
+ * fit in that category.
+ * Use __sg_alloc_table instead of sg_alloc_table and allocate all entries
+ * in one go. Otherwise trying to allocate beyond SG_MAX_SINGLE_ALLOC
+ * when height > SG_MAX_SINGLE_ALLOC will hit a BUG_ON in __sg_alloc_table.
+ */
+
+ ret = __sg_alloc_table(table, info->tiler_handle->height, -1, GFP_KERNEL, sg_alloc);
if (ret) {
kfree(table);
return ERR_PTR(ret);
}
- sg_set_page(table->sgl, phys_to_page(buffer->priv_phys), buffer->size,
- 0);
+
+ sg = table->sgl;
+ for (i = 0; i < info->tiler_handle->height; i++) {
+ paddr = info->tiler_start+ (i * info->phys_stride);
+ sg_set_page(sg, phys_to_page(paddr), info->vstride, 0);
+ sg = sg_next(sg);
+ }
+
return table;
}
void omap_tiler_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- sg_free_table(buffer->sg_table);
+ __sg_free_table(buffer->sg_table, -1, sg_free);
}
void *ion_tiler_heap_map_kernel(struct ion_heap *heap,