1 /*
2 * GPMC support functions
3 *
4 * Copyright (C) 2005-2006 Nokia Corporation
5 *
6 * Author: Juha Yrjola
7 *
8 * Copyright (C) 2009 Texas Instruments
9 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15 #undef DEBUG
17 #include <linux/platform_device.h>
19 #include <linux/irq.h>
20 #include <linux/kernel.h>
21 #include <linux/init.h>
22 #include <linux/err.h>
23 #include <linux/clk.h>
24 #include <linux/ioport.h>
25 #include <linux/spinlock.h>
26 #include <linux/io.h>
27 #include <linux/module.h>
28 #include <linux/interrupt.h>
29 #include <linux/pm_runtime.h>
31 #include <asm/mach-types.h>
32 #include <plat/gpmc.h>
33 #include <plat/nand.h>
35 #include <plat/sdrc.h>
37 /* GPMC register offsets */
38 #define GPMC_REVISION 0x00
39 #define GPMC_SYSCONFIG 0x10
40 #define GPMC_SYSSTATUS 0x14
41 #define GPMC_IRQSTATUS 0x18
42 #define GPMC_IRQENABLE 0x1c
43 #define GPMC_TIMEOUT_CONTROL 0x40
44 #define GPMC_ERR_ADDRESS 0x44
45 #define GPMC_ERR_TYPE 0x48
46 #define GPMC_CONFIG 0x50
47 #define GPMC_STATUS 0x54
48 #define GPMC_PREFETCH_CONFIG1 0x1e0
49 #define GPMC_PREFETCH_CONFIG2 0x1e4
50 #define GPMC_PREFETCH_CONTROL 0x1ec
51 #define GPMC_PREFETCH_STATUS 0x1f0
52 #define GPMC_ECC_CONFIG 0x1f4
53 #define GPMC_ECC_CONTROL 0x1f8
54 #define GPMC_ECC_SIZE_CONFIG 0x1fc
55 #define GPMC_ECC1_RESULT 0x200
56 #define GPMC_ECC_BCH_RESULT_0 0x240
58 #define GPMC_CS0_OFFSET 0x60
59 #define GPMC_CS_SIZE 0x30
61 #define GPMC_MEM_START 0x00000000
62 #define GPMC_MEM_END 0x3FFFFFFF
63 #define BOOT_ROM_SPACE 0x100000 /* 1MB */
65 #define GPMC_CHUNK_SHIFT 24 /* 16 MB */
66 #define GPMC_SECTION_SHIFT 28 /* 128 MB */
68 #define CS_NUM_SHIFT 24
69 #define ENABLE_PREFETCH (0x1 << 7)
70 #define DMA_MPU_MODE 2
72 /* Structure to save gpmc cs context */
73 struct gpmc_cs_config {
74 u32 config1;
75 u32 config2;
76 u32 config3;
77 u32 config4;
78 u32 config5;
79 u32 config6;
80 u32 config7;
81 int is_valid;
82 };
84 /*
85 * Structure to save/restore gpmc context
86 * to support core off on OMAP3
87 */
88 struct omap3_gpmc_regs {
89 u32 sysconfig;
90 u32 irqenable;
91 u32 timeout_ctrl;
92 u32 config;
93 u32 prefetch_config1;
94 u32 prefetch_config2;
95 u32 prefetch_control;
96 struct gpmc_cs_config cs_context[GPMC_CS_NUM];
97 };
100 #define DRIVER_NAME "omap-gpmc"
102 struct gpmc {
103 struct device *dev;
104 void __iomem *io_base;
105 unsigned long phys_base;
106 u32 memsize;
107 unsigned int cs_map;
108 int ecc_used;
109 spinlock_t mem_lock;
110 struct resource mem_root;
111 struct resource cs_mem[GPMC_CS_NUM];
112 };
114 static struct gpmc *gpmc;
116 static void gpmc_write_reg(int idx, u32 val)
117 {
118 writel(val, gpmc->io_base + idx);
119 }
121 static u32 gpmc_read_reg(int idx)
122 {
123 return readl(gpmc->io_base + idx);
124 }
126 static void gpmc_cs_write_byte(int cs, int idx, u8 val)
127 {
128 void __iomem *reg_addr;
130 reg_addr = gpmc->io_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
131 writeb(val, reg_addr);
132 }
134 static u8 gpmc_cs_read_byte(int cs, int idx)
135 {
136 void __iomem *reg_addr;
138 reg_addr = gpmc->io_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
139 return readb(reg_addr);
140 }
142 void gpmc_cs_write_reg(int cs, int idx, u32 val)
143 {
144 void __iomem *reg_addr;
146 if (!gpmc) {
147 pr_err("%s invoked without initializing GPMC\n", __func__);
148 return;
149 }
151 reg_addr = gpmc->io_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
152 writel(val, reg_addr);
153 }
155 u32 gpmc_cs_read_reg(int cs, int idx)
156 {
157 void __iomem *reg_addr;
159 if (!gpmc) {
160 pr_err("%s invoked without initializing GPMC\n", __func__);
161 return 0;
162 }
164 reg_addr = gpmc->io_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
165 return readl(reg_addr);
166 }
168 static struct clk *gpmc_l3_clk;
170 static void __devinit gpmc_clk_init(struct device *dev)
171 {
172 char *ck = NULL;
174 if (cpu_is_omap24xx())
175 ck = "core_l3_ck";
176 else if (cpu_is_omap34xx())
177 ck = "gpmc_fck";
178 else if (cpu_is_omap44xx())
179 ck = "gpmc_ck";
181 if (WARN_ON(!ck))
182 return;
184 gpmc_l3_clk = clk_get(NULL, ck);
185 if (IS_ERR(gpmc_l3_clk)) {
186 printk(KERN_ERR "Could not get GPMC clock %s\n", ck);
187 BUG();
188 }
190 pm_runtime_enable(dev);
191 pm_runtime_get_sync(dev);
192 }
194 /* TODO: Add support for gpmc_fck to clock framework and use it */
195 unsigned long gpmc_get_fclk_period(void)
196 {
197 unsigned long rate = clk_get_rate(gpmc_l3_clk);
199 if (rate == 0) {
200 printk(KERN_WARNING "gpmc_l3_clk not enabled\n");
201 return 0;
202 }
204 rate /= 1000;
205 rate = 1000000000 / rate; /* In picoseconds */
207 return rate;
208 }
210 unsigned int gpmc_ns_to_ticks(unsigned int time_ns)
211 {
212 unsigned long tick_ps;
214 /* Calculate in picosecs to yield more exact results */
215 tick_ps = gpmc_get_fclk_period();
217 return (time_ns * 1000 + tick_ps - 1) / tick_ps;
218 }
220 unsigned int gpmc_ps_to_ticks(unsigned int time_ps)
221 {
222 unsigned long tick_ps;
224 /* Calculate in picosecs to yield more exact results */
225 tick_ps = gpmc_get_fclk_period();
227 return (time_ps + tick_ps - 1) / tick_ps;
228 }
230 unsigned int gpmc_ticks_to_ns(unsigned int ticks)
231 {
232 return ticks * gpmc_get_fclk_period() / 1000;
233 }
235 unsigned int gpmc_round_ns_to_ticks(unsigned int time_ns)
236 {
237 unsigned long ticks = gpmc_ns_to_ticks(time_ns);
239 return ticks * gpmc_get_fclk_period() / 1000;
240 }
242 #ifdef DEBUG
243 static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
244 int time, const char *name)
245 #else
246 static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
247 int time)
248 #endif
249 {
250 u32 l;
251 int ticks, mask, nr_bits;
253 if (time == 0)
254 ticks = 0;
255 else
256 ticks = gpmc_ns_to_ticks(time);
257 nr_bits = end_bit - st_bit + 1;
258 if (ticks >= 1 << nr_bits) {
259 #ifdef DEBUG
260 printk(KERN_INFO "GPMC CS%d: %-10s* %3d ns, %3d ticks >= %d\n",
261 cs, name, time, ticks, 1 << nr_bits);
262 #endif
263 return -1;
264 }
266 mask = (1 << nr_bits) - 1;
267 l = gpmc_cs_read_reg(cs, reg);
268 #ifdef DEBUG
269 printk(KERN_INFO
270 "GPMC CS%d: %-10s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
271 cs, name, ticks, gpmc_get_fclk_period() * ticks / 1000,
272 (l >> st_bit) & mask, time);
273 #endif
274 l &= ~(mask << st_bit);
275 l |= ticks << st_bit;
276 gpmc_cs_write_reg(cs, reg, l);
278 return 0;
279 }
281 #ifdef DEBUG
282 #define GPMC_SET_ONE(reg, st, end, field) \
283 if (set_gpmc_timing_reg(cs, (reg), (st), (end), \
284 t->field, #field) < 0) \
285 return -1
286 #else
287 #define GPMC_SET_ONE(reg, st, end, field) \
288 if (set_gpmc_timing_reg(cs, (reg), (st), (end), t->field) < 0) \
289 return -1
290 #endif
292 int gpmc_cs_calc_divider(int cs, unsigned int sync_clk)
293 {
294 int div;
295 u32 l;
297 l = sync_clk + (gpmc_get_fclk_period() - 1);
298 div = l / gpmc_get_fclk_period();
299 if (div > 4)
300 return -1;
301 if (div <= 0)
302 div = 1;
304 return div;
305 }
307 int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t)
308 {
309 int div;
310 u32 l;
312 div = gpmc_cs_calc_divider(cs, t->sync_clk);
313 if (div < 0)
314 return -1;
316 GPMC_SET_ONE(GPMC_CS_CONFIG2, 0, 3, cs_on);
317 GPMC_SET_ONE(GPMC_CS_CONFIG2, 8, 12, cs_rd_off);
318 GPMC_SET_ONE(GPMC_CS_CONFIG2, 16, 20, cs_wr_off);
320 GPMC_SET_ONE(GPMC_CS_CONFIG3, 0, 3, adv_on);
321 GPMC_SET_ONE(GPMC_CS_CONFIG3, 8, 12, adv_rd_off);
322 GPMC_SET_ONE(GPMC_CS_CONFIG3, 16, 20, adv_wr_off);
324 GPMC_SET_ONE(GPMC_CS_CONFIG4, 0, 3, oe_on);
325 GPMC_SET_ONE(GPMC_CS_CONFIG4, 8, 12, oe_off);
326 GPMC_SET_ONE(GPMC_CS_CONFIG4, 16, 19, we_on);
327 GPMC_SET_ONE(GPMC_CS_CONFIG4, 24, 28, we_off);
329 GPMC_SET_ONE(GPMC_CS_CONFIG5, 0, 4, rd_cycle);
330 GPMC_SET_ONE(GPMC_CS_CONFIG5, 8, 12, wr_cycle);
331 GPMC_SET_ONE(GPMC_CS_CONFIG5, 16, 20, access);
333 GPMC_SET_ONE(GPMC_CS_CONFIG5, 24, 27, page_burst_access);
335 if (cpu_is_omap34xx()) {
336 GPMC_SET_ONE(GPMC_CS_CONFIG6, 16, 19, wr_data_mux_bus);
337 GPMC_SET_ONE(GPMC_CS_CONFIG6, 24, 28, wr_access);
338 }
340 /* caller is expected to have initialized CONFIG1 to cover
341 * at least sync vs async
342 */
343 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
344 if (l & (GPMC_CONFIG1_READTYPE_SYNC | GPMC_CONFIG1_WRITETYPE_SYNC)) {
345 #ifdef DEBUG
346 printk(KERN_INFO "GPMC CS%d CLK period is %lu ns (div %d)\n",
347 cs, (div * gpmc_get_fclk_period()) / 1000, div);
348 #endif
349 l &= ~0x03;
350 l |= (div - 1);
351 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, l);
352 }
354 return 0;
355 }
357 static void gpmc_cs_enable_mem(int cs, u32 base, u32 size)
358 {
359 u32 l;
360 u32 mask;
362 mask = (1 << GPMC_SECTION_SHIFT) - size;
363 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
364 l &= ~0x3f;
365 l = (base >> GPMC_CHUNK_SHIFT) & 0x3f;
366 l &= ~(0x0f << 8);
367 l |= ((mask >> GPMC_CHUNK_SHIFT) & 0x0f) << 8;
368 l |= GPMC_CONFIG7_CSVALID;
369 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
370 }
372 static void gpmc_cs_disable_mem(int cs)
373 {
374 u32 l;
376 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
377 l &= ~GPMC_CONFIG7_CSVALID;
378 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
379 }
381 static void gpmc_cs_get_memconf(int cs, u32 *base, u32 *size)
382 {
383 u32 l;
384 u32 mask;
386 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
387 *base = (l & 0x3f) << GPMC_CHUNK_SHIFT;
388 mask = (l >> 8) & 0x0f;
389 *size = (1 << GPMC_SECTION_SHIFT) - (mask << GPMC_CHUNK_SHIFT);
391 if (cpu_is_am33xx()) {
392 *base = 0x8000000;
393 *size = 0x10000000;
394 }
395 }
397 static int gpmc_cs_mem_enabled(int cs)
398 {
399 u32 l;
401 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
402 return l & GPMC_CONFIG7_CSVALID;
403 }
405 int gpmc_cs_set_reserved(int cs, int reserved)
406 {
407 if (cs > GPMC_CS_NUM)
408 return -ENODEV;
410 gpmc->cs_map &= ~(1 << cs);
411 gpmc->cs_map |= (reserved ? 1 : 0) << cs;
413 return 0;
414 }
416 int gpmc_cs_reserved(int cs)
417 {
418 if (cs > GPMC_CS_NUM)
419 return -ENODEV;
421 return gpmc->cs_map & (1 << cs);
422 }
424 static unsigned long gpmc_mem_align(unsigned long size)
425 {
426 int order;
428 size = (size - 1) >> (GPMC_CHUNK_SHIFT - 1);
429 order = GPMC_CHUNK_SHIFT - 1;
430 do {
431 size >>= 1;
432 order++;
433 } while (size);
434 size = 1 << order;
435 return size;
436 }
438 static int gpmc_cs_insert_mem(int cs, unsigned long base, unsigned long size)
439 {
440 struct resource *res = &gpmc->cs_mem[cs];
441 int r;
443 size = gpmc_mem_align(size);
444 spin_lock(&gpmc->mem_lock);
445 res->start = base;
446 res->end = base + size - 1;
447 r = request_resource(&gpmc->mem_root, res);
448 spin_unlock(&gpmc->mem_lock);
450 return r;
451 }
453 int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
454 {
455 struct resource *res = &gpmc->cs_mem[cs];
456 int r = -1;
458 if (cs > GPMC_CS_NUM)
459 return -ENODEV;
461 size = gpmc_mem_align(size);
462 if (size > (1 << GPMC_SECTION_SHIFT))
463 return -ENOMEM;
465 spin_lock(&gpmc->mem_lock);
466 if (gpmc_cs_reserved(cs)) {
467 r = -EBUSY;
468 goto out;
469 }
470 if (gpmc_cs_mem_enabled(cs))
471 r = adjust_resource(res, res->start & ~(size - 1), size);
472 if (r < 0)
473 r = allocate_resource(&gpmc->mem_root, res, size, 0, ~0,
474 size, NULL, NULL);
475 if (r < 0)
476 goto out;
478 gpmc_cs_enable_mem(cs, res->start, resource_size(res));
479 *base = res->start;
480 gpmc_cs_set_reserved(cs, 1);
481 out:
482 spin_unlock(&gpmc->mem_lock);
483 return r;
484 }
485 EXPORT_SYMBOL(gpmc_cs_request);
487 void gpmc_cs_free(int cs)
488 {
489 spin_lock(&gpmc->mem_lock);
490 if (cs >= GPMC_CS_NUM || cs < 0 || !gpmc_cs_reserved(cs)) {
491 printk(KERN_ERR "Trying to free non-reserved GPMC CS%d\n", cs);
492 BUG();
493 spin_unlock(&gpmc->mem_lock);
494 return;
495 }
496 gpmc_cs_disable_mem(cs);
497 release_resource(&gpmc->cs_mem[cs]);
498 gpmc_cs_set_reserved(cs, 0);
499 spin_unlock(&gpmc->mem_lock);
500 }
501 EXPORT_SYMBOL(gpmc_cs_free);
503 /**
504 * gpmc_read_status - read access request to get the different gpmc status
505 * @cmd: command type
506 * @return status
507 */
508 int gpmc_read_status(int cmd)
509 {
510 int status = -EINVAL;
511 u32 regval = 0;
513 switch (cmd) {
514 case GPMC_GET_IRQ_STATUS:
515 status = gpmc_read_reg(GPMC_IRQSTATUS);
516 break;
518 case GPMC_PREFETCH_FIFO_CNT:
519 regval = gpmc_read_reg(GPMC_PREFETCH_STATUS);
520 status = GPMC_PREFETCH_STATUS_FIFO_CNT(regval);
521 break;
523 case GPMC_PREFETCH_COUNT:
524 regval = gpmc_read_reg(GPMC_PREFETCH_STATUS);
525 status = GPMC_PREFETCH_STATUS_COUNT(regval);
526 break;
528 case GPMC_STATUS_BUFFER:
529 regval = gpmc_read_reg(GPMC_STATUS);
530 /* 1 : buffer is available to write */
531 status = regval & GPMC_STATUS_BUFF_EMPTY;
532 break;
534 default:
535 printk(KERN_ERR "gpmc_read_status: Not supported\n");
536 }
537 return status;
538 }
539 EXPORT_SYMBOL(gpmc_read_status);
541 /**
542 * gpmc_cs_configure - write request to configure gpmc
543 * @cs: chip select number
544 * @cmd: command type
545 * @wval: value to write
546 * @return status of the operation
547 */
548 int gpmc_cs_configure(int cs, int cmd, int wval)
549 {
550 int err = 0;
551 u32 regval = 0;
553 switch (cmd) {
554 case GPMC_ENABLE_IRQ:
555 gpmc_write_reg(GPMC_IRQENABLE, wval);
556 break;
558 case GPMC_SET_IRQ_STATUS:
559 gpmc_write_reg(GPMC_IRQSTATUS, wval);
560 break;
562 case GPMC_CONFIG_WP:
563 regval = gpmc_read_reg(GPMC_CONFIG);
564 if (wval)
565 regval &= ~GPMC_CONFIG_WRITEPROTECT; /* WP is ON */
566 else
567 regval |= GPMC_CONFIG_WRITEPROTECT; /* WP is OFF */
568 gpmc_write_reg(GPMC_CONFIG, regval);
569 break;
571 case GPMC_CONFIG_RDY_BSY:
572 regval = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
573 if (wval)
574 regval |= WR_RD_PIN_MONITORING;
575 else
576 regval &= ~WR_RD_PIN_MONITORING;
577 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval);
578 break;
580 case GPMC_CONFIG_DEV_SIZE:
581 regval = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
582 regval |= GPMC_CONFIG1_DEVICESIZE(wval);
583 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval);
584 break;
586 case GPMC_CONFIG_DEV_TYPE:
587 regval = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
588 regval |= GPMC_CONFIG1_DEVICETYPE(wval);
589 if (wval == GPMC_DEVICETYPE_NOR)
590 regval |= GPMC_CONFIG1_MUXADDDATA;
591 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval);
592 break;
594 default:
595 printk(KERN_ERR "gpmc_configure_cs: Not supported\n");
596 err = -EINVAL;
597 }
599 return err;
600 }
601 EXPORT_SYMBOL(gpmc_cs_configure);
603 /**
604 * gpmc_nand_read - nand specific read access request
605 * @cs: chip select number
606 * @cmd: command type
607 */
608 int gpmc_nand_read(int cs, int cmd)
609 {
610 int rval = -EINVAL;
612 switch (cmd) {
613 case GPMC_NAND_DATA:
614 rval = gpmc_cs_read_byte(cs, GPMC_CS_NAND_DATA);
615 break;
617 default:
618 printk(KERN_ERR "gpmc_read_nand_ctrl: Not supported\n");
619 }
620 return rval;
621 }
622 EXPORT_SYMBOL(gpmc_nand_read);
624 /**
625 * gpmc_nand_write - nand specific write request
626 * @cs: chip select number
627 * @cmd: command type
628 * @wval: value to write
629 */
630 int gpmc_nand_write(int cs, int cmd, int wval)
631 {
632 int err = 0;
634 switch (cmd) {
635 case GPMC_NAND_COMMAND:
636 gpmc_cs_write_byte(cs, GPMC_CS_NAND_COMMAND, wval);
637 break;
639 case GPMC_NAND_ADDRESS:
640 gpmc_cs_write_byte(cs, GPMC_CS_NAND_ADDRESS, wval);
641 break;
643 case GPMC_NAND_DATA:
644 gpmc_cs_write_byte(cs, GPMC_CS_NAND_DATA, wval);
646 default:
647 printk(KERN_ERR "gpmc_write_nand_ctrl: Not supported\n");
648 err = -EINVAL;
649 }
650 return err;
651 }
652 EXPORT_SYMBOL(gpmc_nand_write);
656 /**
657 * gpmc_prefetch_enable - configures and starts prefetch transfer
658 * @cs: cs (chip select) number
659 * @fifo_th: fifo threshold to be used for read/ write
660 * @dma_mode: dma mode enable (1) or disable (0)
661 * @u32_count: number of bytes to be transferred
662 * @is_write: prefetch read(0) or write post(1) mode
663 */
664 int gpmc_prefetch_enable(int cs, int fifo_th, int dma_mode,
665 unsigned int u32_count, int is_write)
666 {
668 if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX) {
669 pr_err("gpmc: fifo threshold is not supported\n");
670 return -1;
671 } else if (!(gpmc_read_reg(GPMC_PREFETCH_CONTROL))) {
672 /* Set the amount of bytes to be prefetched */
673 gpmc_write_reg(GPMC_PREFETCH_CONFIG2, u32_count);
675 /* Set dma/mpu mode, the prefetch read / post write and
676 * enable the engine. Set which cs is has requested for.
677 */
678 gpmc_write_reg(GPMC_PREFETCH_CONFIG1, ((cs << CS_NUM_SHIFT) |
679 PREFETCH_FIFOTHRESHOLD(fifo_th) |
680 ENABLE_PREFETCH |
681 (dma_mode << DMA_MPU_MODE) |
682 (0x1 & is_write)));
684 /* Start the prefetch engine */
685 gpmc_write_reg(GPMC_PREFETCH_CONTROL, 0x1);
686 } else {
687 return -EBUSY;
688 }
690 return 0;
691 }
692 EXPORT_SYMBOL(gpmc_prefetch_enable);
694 /**
695 * gpmc_prefetch_reset - disables and stops the prefetch engine
696 */
697 int gpmc_prefetch_reset(int cs)
698 {
699 u32 config1;
701 /* check if the same module/cs is trying to reset */
702 config1 = gpmc_read_reg(GPMC_PREFETCH_CONFIG1);
703 if (((config1 >> CS_NUM_SHIFT) & 0x7) != cs)
704 return -EINVAL;
706 /* Stop the PFPW engine */
707 gpmc_write_reg(GPMC_PREFETCH_CONTROL, 0x0);
709 /* Reset/disable the PFPW engine */
710 gpmc_write_reg(GPMC_PREFETCH_CONFIG1, 0x0);
712 return 0;
713 }
714 EXPORT_SYMBOL(gpmc_prefetch_reset);
716 static void __devinit gpmc_mem_init(void)
717 {
718 int cs;
719 unsigned long boot_rom_space = 0;
721 /* never allocate the first page, to facilitate bug detection;
722 * even if we didn't boot from ROM.
723 */
724 boot_rom_space = BOOT_ROM_SPACE;
725 /* In apollon the CS0 is mapped as 0x0000 0000 */
726 if (machine_is_omap_apollon())
727 boot_rom_space = 0;
728 gpmc->mem_root.start = GPMC_MEM_START + boot_rom_space;
729 gpmc->mem_root.end = GPMC_MEM_END;
731 /* Reserve all regions that has been set up by bootloader */
732 for (cs = 0; cs < GPMC_CS_NUM; cs++) {
733 u32 base, size;
735 if (!gpmc_cs_mem_enabled(cs))
736 continue;
737 gpmc_cs_get_memconf(cs, &base, &size);
738 if (gpmc_cs_insert_mem(cs, base, size) < 0)
739 BUG();
740 }
741 }
743 struct device *gpmc_dev;
745 static int __devinit gpmc_probe(struct platform_device *pdev)
746 {
747 u32 l;
748 int ret = -EINVAL;
749 struct resource *res = NULL;
750 struct gpmc_devices_info *gpmc_device = pdev->dev.platform_data;
751 void *p;
753 /* XXX: This should go away with HWMOD & runtime PM adaptation */
754 gpmc_clk_init(&pdev->dev);
756 gpmc_dev = &pdev->dev;
758 gpmc = devm_kzalloc(&pdev->dev, sizeof(struct gpmc), GFP_KERNEL);
759 if (!gpmc)
760 return -ENOMEM;
762 gpmc->dev = &pdev->dev;
764 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
765 if (!res) {
766 ret = -ENOENT;
767 dev_err(gpmc->dev, "Failed to get resource: memory\n");
768 goto err_res;
769 }
770 gpmc->phys_base = res->start;
771 gpmc->memsize = resource_size(res);
773 if (request_mem_region(gpmc->phys_base,
774 gpmc->memsize, DRIVER_NAME) == NULL) {
775 ret = -ENOMEM;
776 dev_err(gpmc->dev, "Failed to request memory region\n");
777 goto err_mem;
778 }
780 gpmc->io_base = ioremap(gpmc->phys_base, gpmc->memsize);
781 if (!gpmc->io_base) {
782 ret = -ENOMEM;
783 dev_err(gpmc->dev, "Failed to ioremap memory\n");
784 goto err_remap;
785 }
787 gpmc->ecc_used = -EINVAL;
788 spin_lock_init(&gpmc->mem_lock);
789 platform_set_drvdata(pdev, gpmc);
791 l = gpmc_read_reg(GPMC_REVISION);
792 dev_info(gpmc->dev, "GPMC revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f);
794 gpmc_mem_init();
796 for (p = gpmc_device->pdata; p; gpmc_device++, p = gpmc_device->pdata)
797 if (gpmc_device->flag & GPMC_DEVICE_NAND)
798 gpmc_nand_init((struct omap_nand_platform_data *) p);
799 return 0;
801 err_remap:
802 release_mem_region(gpmc->phys_base, gpmc->memsize);
803 err_mem:
804 err_res:
805 devm_kfree(&pdev->dev, gpmc);
806 return ret;
807 }
809 static int __devexit gpmc_remove(struct platform_device *pdev)
810 {
811 struct gpmc *gpmc = platform_get_drvdata(pdev);
813 platform_set_drvdata(pdev, NULL);
814 iounmap(gpmc->io_base);
815 release_mem_region(gpmc->phys_base, gpmc->memsize);
816 devm_kfree(&pdev->dev, gpmc);
818 return 0;
819 }
821 static struct platform_driver gpmc_driver = {
822 .probe = gpmc_probe,
823 .remove = __devexit_p(gpmc_remove),
824 .driver = {
825 .name = DRIVER_NAME,
826 .owner = THIS_MODULE,
827 },
828 };
830 module_platform_driver(gpmc_driver);
832 int gpmc_suspend(void)
833 {
834 omap3_gpmc_save_context();
835 pm_runtime_put_sync(gpmc_dev);
836 return 0;
837 }
839 int gpmc_resume(void)
840 {
841 pm_runtime_get_sync(gpmc_dev);
842 omap3_gpmc_restore_context();
843 return 0;
844 }
846 #ifdef CONFIG_ARCH_OMAP3
847 static struct omap3_gpmc_regs gpmc_context;
849 void omap3_gpmc_save_context(void)
850 {
851 int i;
853 gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
854 gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
855 gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
856 gpmc_context.config = gpmc_read_reg(GPMC_CONFIG);
857 gpmc_context.prefetch_config1 = gpmc_read_reg(GPMC_PREFETCH_CONFIG1);
858 gpmc_context.prefetch_config2 = gpmc_read_reg(GPMC_PREFETCH_CONFIG2);
859 gpmc_context.prefetch_control = gpmc_read_reg(GPMC_PREFETCH_CONTROL);
860 for (i = 0; i < GPMC_CS_NUM; i++) {
861 gpmc_context.cs_context[i].is_valid = gpmc_cs_mem_enabled(i);
862 if (gpmc_context.cs_context[i].is_valid) {
863 gpmc_context.cs_context[i].config1 =
864 gpmc_cs_read_reg(i, GPMC_CS_CONFIG1);
865 gpmc_context.cs_context[i].config2 =
866 gpmc_cs_read_reg(i, GPMC_CS_CONFIG2);
867 gpmc_context.cs_context[i].config3 =
868 gpmc_cs_read_reg(i, GPMC_CS_CONFIG3);
869 gpmc_context.cs_context[i].config4 =
870 gpmc_cs_read_reg(i, GPMC_CS_CONFIG4);
871 gpmc_context.cs_context[i].config5 =
872 gpmc_cs_read_reg(i, GPMC_CS_CONFIG5);
873 gpmc_context.cs_context[i].config6 =
874 gpmc_cs_read_reg(i, GPMC_CS_CONFIG6);
875 gpmc_context.cs_context[i].config7 =
876 gpmc_cs_read_reg(i, GPMC_CS_CONFIG7);
877 }
878 }
879 }
881 void omap3_gpmc_restore_context(void)
882 {
883 int i;
885 gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
886 gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
887 gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
888 gpmc_write_reg(GPMC_CONFIG, gpmc_context.config);
889 gpmc_write_reg(GPMC_PREFETCH_CONFIG1, gpmc_context.prefetch_config1);
890 gpmc_write_reg(GPMC_PREFETCH_CONFIG2, gpmc_context.prefetch_config2);
891 gpmc_write_reg(GPMC_PREFETCH_CONTROL, gpmc_context.prefetch_control);
892 for (i = 0; i < GPMC_CS_NUM; i++) {
893 if (gpmc_context.cs_context[i].is_valid) {
894 gpmc_cs_write_reg(i, GPMC_CS_CONFIG1,
895 gpmc_context.cs_context[i].config1);
896 gpmc_cs_write_reg(i, GPMC_CS_CONFIG2,
897 gpmc_context.cs_context[i].config2);
898 gpmc_cs_write_reg(i, GPMC_CS_CONFIG3,
899 gpmc_context.cs_context[i].config3);
900 gpmc_cs_write_reg(i, GPMC_CS_CONFIG4,
901 gpmc_context.cs_context[i].config4);
902 gpmc_cs_write_reg(i, GPMC_CS_CONFIG5,
903 gpmc_context.cs_context[i].config5);
904 gpmc_cs_write_reg(i, GPMC_CS_CONFIG6,
905 gpmc_context.cs_context[i].config6);
906 gpmc_cs_write_reg(i, GPMC_CS_CONFIG7,
907 gpmc_context.cs_context[i].config7);
908 }
909 }
910 }
911 #endif /* CONFIG_ARCH_OMAP3 */
913 /**
914 * gpmc_enable_hwecc - enable hardware ecc functionality
915 * @ecc_type: ecc type e.g. Hamming, BCH
916 * @cs: chip select number
917 * @mode: read/write mode
918 * @dev_width: device bus width(1 for x16, 0 for x8)
919 * @ecc_size: bytes for which ECC will be generated
920 */
921 int gpmc_enable_hwecc(int ecc_type, int cs, int mode,
922 int dev_width, int ecc_size)
923 {
924 unsigned int bch_mod = 0, bch_wrapmode = 0, eccsize1 = 0, eccsize0 = 0;
925 unsigned int ecc_conf_val = 0, ecc_size_conf_val = 0;
927 switch (mode) {
928 case GPMC_ECC_READ:
929 if (ecc_type == OMAP_ECC_BCH4_CODE_HW) {
930 eccsize1 = 0xD; eccsize0 = 0x48;
931 bch_mod = 0;
932 bch_wrapmode = 0x09;
933 } else if (ecc_type == OMAP_ECC_BCH8_CODE_HW) {
934 eccsize1 = 0x2; eccsize0 = 0x1A;
935 bch_mod = 1;
936 bch_wrapmode = 0x01;
937 } else
938 eccsize1 = ((ecc_size >> 1) - 1);
939 break;
940 case GPMC_ECC_READSYN:
941 break;
942 case GPMC_ECC_WRITE:
943 if (ecc_type == OMAP_ECC_BCH4_CODE_HW) {
944 eccsize1 = 0x20; eccsize0 = 0x00;
945 bch_mod = 0;
946 bch_wrapmode = 0x06;
947 } else if (ecc_type == OMAP_ECC_BCH8_CODE_HW) {
948 eccsize1 = 0x1c; eccsize0 = 0x00;
949 bch_mod = 1;
950 bch_wrapmode = 0x01;
951 } else
952 eccsize1 = ((ecc_size >> 1) - 1);
953 break;
954 default:
955 printk(KERN_INFO "Error: Unrecognized Mode[%d]!\n", mode);
956 break;
957 }
959 /* clear ecc and enable bits */
960 if ((ecc_type == OMAP_ECC_BCH4_CODE_HW) ||
961 (ecc_type == OMAP_ECC_BCH8_CODE_HW)) {
962 gpmc_write_reg(GPMC_ECC_CONTROL, 0x00000001);
963 ecc_size_conf_val = (eccsize1 << 22) | (eccsize0 << 12);
964 ecc_conf_val = ((0x01 << 16) | (bch_mod << 12)
965 | (bch_wrapmode << 8) | (dev_width << 7)
966 | (0x00 << 4) | (cs << 1) | (0x1));
967 } else {
968 gpmc_write_reg(GPMC_ECC_CONTROL, 0x00000101);
969 ecc_size_conf_val = (eccsize1 << 22) | 0x0000000F;
970 ecc_conf_val = (dev_width << 7) | (cs << 1) | (0x1);
971 }
973 gpmc_write_reg(GPMC_ECC_SIZE_CONFIG, ecc_size_conf_val);
974 gpmc_write_reg(GPMC_ECC_CONFIG, ecc_conf_val);
975 gpmc_write_reg(GPMC_ECC_CONTROL, 0x00000101);
976 return 0;
977 }
979 /**
980 * gpmc_calculate_ecc - generate non-inverted ecc bytes
981 * @ecc_type: ecc type e.g. Hamming, BCH
982 * @cs: chip select number
983 * @dat: data pointer over which ecc is computed
984 * @ecc_code: ecc code buffer
985 *
986 * Using non-inverted ECC is considered ugly since writing a blank
987 * page (padding) will clear the ECC bytes. This is not a problem as long
988 * no one is trying to write data on the seemingly unused page. Reading
989 * an erased page will produce an ECC mismatch between generated and read
990 * ECC bytes that has to be dealt with separately.
991 */
992 int gpmc_calculate_ecc(int ecc_type, int cs,
993 const u_char *dat, u_char *ecc_code)
994 {
995 unsigned int reg;
996 unsigned int val1 = 0x0, val2 = 0x0;
997 unsigned int val3 = 0x0, val4 = 0x0;
998 int i;
1000 if ((ecc_type == OMAP_ECC_BCH4_CODE_HW) ||
1001 (ecc_type == OMAP_ECC_BCH8_CODE_HW)) {
1002 for (i = 0; i < 1; i++) {
1003 /*
1004 * Reading HW ECC_BCH_Results
1005 * 0x240-0x24C, 0x250-0x25C, 0x260-0x26C, 0x270-0x27C
1006 */
1007 reg = GPMC_ECC_BCH_RESULT_0 + (0x10 * i);
1008 val1 = gpmc_read_reg(reg);
1009 val2 = gpmc_read_reg(reg + 4);
1010 if (ecc_type == OMAP_ECC_BCH8_CODE_HW) {
1011 val3 = gpmc_read_reg(reg + 8);
1012 val4 = gpmc_read_reg(reg + 12);
1014 *ecc_code++ = (val4 & 0xFF);
1015 *ecc_code++ = ((val3 >> 24) & 0xFF);
1016 *ecc_code++ = ((val3 >> 16) & 0xFF);
1017 *ecc_code++ = ((val3 >> 8) & 0xFF);
1018 *ecc_code++ = (val3 & 0xFF);
1019 *ecc_code++ = ((val2 >> 24) & 0xFF);
1020 }
1021 *ecc_code++ = ((val2 >> 16) & 0xFF);
1022 *ecc_code++ = ((val2 >> 8) & 0xFF);
1023 *ecc_code++ = (val2 & 0xFF);
1024 *ecc_code++ = ((val1 >> 24) & 0xFF);
1025 *ecc_code++ = ((val1 >> 16) & 0xFF);
1026 *ecc_code++ = ((val1 >> 8) & 0xFF);
1027 *ecc_code++ = (val1 & 0xFF);
1028 }
1029 } else {
1030 /* read ecc result */
1031 val1 = gpmc_read_reg(GPMC_ECC1_RESULT);
1032 *ecc_code++ = val1; /* P128e, ..., P1e */
1033 *ecc_code++ = val1 >> 16; /* P128o, ..., P1o */
1034 /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
1035 *ecc_code++ = ((val1 >> 8) & 0x0f) | ((val1 >> 20) & 0xf0);
1036 }
1037 return 0;
1038 }