author | Sundar Raman <a0393242@ti.com> | |
Thu, 25 Jul 2013 20:05:38 +0000 (15:05 -0500) | ||
committer | Sundar Raman <a0393242@ti.com> | |
Thu, 25 Jul 2013 20:05:38 +0000 (15:05 -0500) |
21 files changed:
index 896d885f94f1d12d99045bc3d2749d17c227b094..7a56b6d4ac25e3128ed7e455a830d5eae06f0fb8 100644 (file)
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
};
+
+ ocp {
+ gpu: gpu@0x56000000 {
+ gpu-supply = <&avs_gpu>;
+ };
+ };
+
};
&dra7_pmx_core {
index 659b3b766312822feae18f3c702418a4b5172151..7f92247a03fc2b9f0ad120249bf8d2a09c5f2f94 100644 (file)
compatible = "ti,omap-clock";
};
+ dpll_gpu_m2_ck: dpll_gpu_m2_ck {
+ #clock-cells = <0>;
+ compatible = "ti,omap-clock";
+ };
+
+ dpll_core_h14x2_ck: dpll_core_h14x2_ck {
+ #clock-cells = <0>;
+ compatible = "ti,omap-clock";
+ };
+
+ dpll_per_h14x2_ck: dpll_per_h14x2_ck {
+ #clock-cells = <0>;
+ compatible = "ti,omap-clock";
+ };
+
+ gpu_core_gclk_mux: gpu_core_gclk_mux {
+ #clock-cells = <0>;
+ compatible = "ti,omap-clock";
+ };
+
+ gpu_hyd_gclk_mux: gpu_hyd_gclk_mux {
+ #clock-cells = <0>;
+ compatible = "ti,omap-clock";
+ };
+
sdma: dma-controller@4a056000 {
compatible = "ti,omap4430-sdma";
reg = <0x4a056000 0x1000>;
ti,hwmods = "dmm";
};
+ gpu: gpu@0x56000000 {
+ compatible = "ti,omap4-gpu";
+ reg = <0x56000000 0xffff>;
+ interrupts = <0 21 0x4>;
+ ti,hwmods = "gpu";
+ operating-points = <
+ /* kHz uV */
+ 425600 1090000
+ 500000 1210000
+ 532000 1280000
+ >;
+ clocks = <&dpll_core_h14x2_ck>, <&dpll_per_h14x2_ck>,
+ <&dpll_gpu_m2_ck>, <&gpu_core_gclk_mux>,
+ <&gpu_hyd_gclk_mux>;
+ clock-names = "core", "per", "gpu", "gpu_core", "gpu_hyd";
+ };
+
bandgap {
reg = <0x4a0021e0 0xc
0x4a00232c 0xc
index 8edf3b640a9df3e8ffe3020744b3821edecbceb6..e3514f7e7c0fc12bd941f7cd90e239015ed32855 100644 (file)
CONFIG_ARM_ERRATA_411920=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
+CONFIG_PREEMPT=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="root=/dev/mmcblk0p2 rootwait console=ttyO2,115200"
CONFIG_REGULATOR_TPS6507X=y
CONFIG_REGULATOR_TPS65217=y
CONFIG_REGULATOR_TPS65910=y
+CONFIG_REGULATOR_TIAVSCLASS0=y
CONFIG_MEDIA_SUPPORT=m
CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_VIDEO_DEV=m
index d60b2059954e3b56ba772cba5638655e9c6312d9..eca7a77df802bc33cd90ffb19a3ad42ba9c1419e 100644 (file)
NULL,
};
-DT_MACHINE_START(DRA7XX_DT, "Generic DRA7XX (Flattened Device Tree)")
+DT_MACHINE_START(DRA7XX_DT, "Jacinto6 evm board")
.reserve = omap_reserve,
.smp = smp_ops(omap4_smp_ops),
.map_io = omap5_map_io,
index 87f9d563caf9535319c4209079178af47ccc0103..e77d58771c7c45a15f104887ba6df2358fabfe15 100644 (file)
DEFINE_STRUCT_CLK(l3_iclk_div, mpu_dpll_hs_clk_div_parents,
apll_pcie_clkvcoldo_ops);
+static const char *gpu_l3_iclk_parents[] = {
+ "l3_iclk_div",
+};
+
+static struct clk gpu_l3_iclk;
+
+static struct clk_hw_omap gpu_l3_iclk_hw = {
+ .hw = {
+ .clk = &gpu_l3_iclk,
+ },
+};
+
+DEFINE_STRUCT_CLK(gpu_l3_iclk, gpu_l3_iclk_parents, apll_pcie_clkvcoldo_ops);
+
static const struct clk_div_table l3init_60m_fclk_rates[] = {
{ .div = 1, .val = 0 },
{ .div = 8, .val = 1 },
CLK(NULL, "hdmi_div_clk", &hdmi_div_clk, CK_7XX),
CLK(NULL, "hdmi_dpll_clk_mux", &hdmi_dpll_clk_mux, CK_7XX),
CLK(NULL, "l3_iclk_div", &l3_iclk_div, CK_7XX),
+ CLK(NULL, "gpu_l3_iclk", &gpu_l3_iclk, CK_7XX),
CLK(NULL, "l3init_60m_fclk", &l3init_60m_fclk, CK_7XX),
CLK(NULL, "l4_root_clk_div", &l4_root_clk_div, CK_7XX),
CLK(NULL, "mlb_clk", &mlb_clk, CK_7XX),
diff --git a/arch/arm/mach-omap2/clockdomains54xx_data.c b/arch/arm/mach-omap2/clockdomains54xx_data.c
index 1a3c69d2e14c8f63b1337150cf646a5204a18ffd..c0678ed61dd55f5b5aac26963117289efb05b188 100644 (file)
.dep_bit = OMAP54XX_DSS_STATDEP_SHIFT,
.wkdep_srcs = dss_wkup_sleep_deps,
.sleepdep_srcs = dss_wkup_sleep_deps,
- .flags = CLKDM_CAN_HWSUP_SWSUP,
+ .flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain dsp_54xx_clkdm = {
diff --git a/arch/arm/mach-omap2/clockdomains7xx_data.c b/arch/arm/mach-omap2/clockdomains7xx_data.c
index a261f6fa51852622e5a5fa36a7244725bbac08dc..6379e5f25a2eda784271a37cebd405999a06d1e2 100644 (file)
.dep_bit = DRA7XX_DSS_STATDEP_SHIFT,
.wkdep_srcs = dss_wkup_sleep_deps,
.sleepdep_srcs = dss_wkup_sleep_deps,
- .flags = CLKDM_CAN_HWSUP_SWSUP,
+ .flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain emif_7xx_clkdm = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index ee7f4a9a77aaa3820ed3037d3575314aed78dfa2..73c152d17a91fabd2d6bdd95abdef72805c1e945 100644 (file)
/* Base offset for all OMAP5 dma requests */
#define OMAP54XX_DMA_REQ_START 1
+/* Backward references (IPs with Bus Master capability) */
+static struct omap_hwmod omap54xx_bb2d_hwmod;
+
/*
* IP blocks
{ .irq = -1 }
};
+static struct omap_hwmod_addr_space omap54xx_bb2d_addrs[] = {
+ {
+ .pa_start = 0x59000000,
+ .pa_end = 0x590007ff,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+/* l3_main_2 -> bb2d */
+static struct omap_hwmod_ocp_if omap54xx_l3_main_2__bb2d = {
+ .master = &omap54xx_l3_main_2_hwmod,
+ .slave = &omap54xx_bb2d_hwmod,
+ .clk = "l3_iclk_div",
+ .addr = omap54xx_bb2d_addrs,
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
static struct omap_hwmod omap54xx_bb2d_hwmod = {
.name = "bb2d",
.class = &omap54xx_bb2d_hwmod_class,
.user = OCP_USER_MPU,
};
-/* l3_main_2 -> bb2d */
-static struct omap_hwmod_ocp_if omap54xx_l3_main_2__bb2d = {
- .master = &omap54xx_l3_main_2_hwmod,
- .slave = &omap54xx_bb2d_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
static struct omap_hwmod_addr_space omap54xx_c2c_addrs[] = {
{
index b648382f25bf124083d1e7f6b0fe6fdf1fec0f18..1976fc952c506c39cb29902792f8d55002a7b5ce 100644 (file)
},
};
+/*
+ * 'gpu' class
+ * 2d/3d graphics accelerator
+ */
+
+static struct omap_hwmod_class_sysconfig dra7xx_gpu_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
+ MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class dra7xx_gpu_hwmod_class = {
+ .name = "gpu",
+ .sysc = &dra7xx_gpu_sysc,
+};
+
+/* gpu */
+static struct omap_hwmod_irq_info dra7xx_gpu_irqs[] = {
+ { .irq = 21 + DRA7XX_IRQ_GIC_START },
+ { .irq = -1 }
+};
+
+static struct omap_hwmod dra7xx_gpu_hwmod = {
+ .name = "gpu",
+ .class = &dra7xx_gpu_hwmod_class,
+ .clkdm_name = "gpu_clkdm",
+ .mpu_irqs = dra7xx_gpu_irqs,
+ .main_clk = "gpu_core_gclk_mux",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = DRA7XX_CM_GPU_GPU_CLKCTRL_OFFSET,
+ .context_offs = DRA7XX_RM_GPU_GPU_CONTEXT_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
/*
* 'hdq1w' class
*
{
.pa_start = 0x59000000,
.pa_end = 0x590007ff,
+ .flags = ADDR_TYPE_RT
},
{ }
};
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
+static struct omap_hwmod_addr_space dra7xx_gpu_addrs[] = {
+ {
+ .name = "klio",
+ .pa_start = 0x56000000,
+ .pa_end = 0x56001fff,
+ },
+ {
+ .name = "hydra2",
+ .pa_start = 0x56004000,
+ .pa_end = 0x56004fff,
+ },
+ {
+ .name = "klio_0",
+ .pa_start = 0x56008000,
+ .pa_end = 0x56009fff,
+ },
+ {
+ .name = "klio_1",
+ .pa_start = 0x5600c000,
+ .pa_end = 0x5600dfff,
+ },
+ {
+ .name = "klio_hl",
+ .pa_start = 0x5600fe00,
+ .pa_end = 0x5600ffff,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+/* l3_main_1 -> gpu */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__gpu = {
+ .master = &dra7xx_l3_main_1_hwmod,
+ .slave = &dra7xx_gpu_hwmod,
+ .clk = "gpu_l3_iclk",
+ .addr = dra7xx_gpu_addrs,
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
static struct omap_hwmod_addr_space dra7xx_hdq1w_addrs[] = {
{
.pa_start = 0x480b2000,
&dra7xx_l4_per1__gpio7,
&dra7xx_l4_per1__gpio8,
&dra7xx_l3_main_1__gpmc,
+ &dra7xx_l3_main_1__gpu,
&dra7xx_l4_per1__hdq1w,
&dra7xx_l4_per1__i2c1,
&dra7xx_l4_per1__i2c2,
index c9a66bf36c9a4506f190186666dee325c1dbf3c3..cd8310af031df77bedc4afedaa31c1c894f9ee34 100644 (file)
/*
+ * arch/arm/plat-omap/include/mach/cpu.h
+ *
* OMAP cpu type detection
*
* Copyright (C) 2004, 2008 Nokia Corporation
#ifndef __ASM_ARCH_OMAP_CPU_H
#define __ASM_ARCH_OMAP_CPU_H
-#ifdef CONFIG_ARCH_OMAP1
-#include <mach/soc.h>
+#include <linux/bitops.h>
+#include <plat/multi.h>
+
+/*
+ * Omap device type i.e. EMU/HS/TST/GP/BAD
+ */
+#define OMAP2_DEVICE_TYPE_TEST 0
+#define OMAP2_DEVICE_TYPE_EMU 1
+#define OMAP2_DEVICE_TYPE_SEC 2
+#define OMAP2_DEVICE_TYPE_GP 3
+#define OMAP2_DEVICE_TYPE_BAD 4
+
+int omap_type(void);
+
+/*
+ * API to retrieve the OMAP sysboot value.
+ * NOTE: depending on the SoC the meaning of the bits
+ * OR bit-combinations might vary.
+ */
+u8 omap_get_sysboot_value(void);
+/*
+ * omap_rev bits:
+ * CPU id bits (0730, 1510, 1710, 2422...) [31:16]
+ * CPU revision (See _REV_ defined in cpu.h) [15:08]
+ * CPU class bits (15xx, 16xx, 24xx, 34xx...) [07:00]
+ */
+unsigned int omap_rev(void);
+
+/*
+ * Get the CPU revision for OMAP devices
+ */
+#define GET_OMAP_REVISION() ((omap_rev() >> 8) & 0xff)
+
+/*
+ * Macros to group OMAP into cpu classes.
+ * These can be used in most places.
+ * cpu_is_omap7xx(): True for OMAP730, OMAP850
+ * cpu_is_omap15xx(): True for OMAP1510, OMAP5910 and OMAP310
+ * cpu_is_omap16xx(): True for OMAP1610, OMAP5912 and OMAP1710
+ * cpu_is_omap24xx(): True for OMAP2420, OMAP2422, OMAP2423, OMAP2430
+ * cpu_is_omap242x(): True for OMAP2420, OMAP2422, OMAP2423
+ * cpu_is_omap243x(): True for OMAP2430
+ * cpu_is_omap343x(): True for OMAP3430
+ * cpu_is_omap443x(): True for OMAP4430
+ * cpu_is_omap446x(): True for OMAP4460
+ * cpu_is_omap447x(): True for OMAP4470
+ * cpu_is_omap543x(): True for OMAP5430, OMAP5432
+ */
+#define GET_OMAP_CLASS (omap_rev() & 0xff)
+
+#define IS_OMAP_CLASS(class, id) \
+static inline int is_omap ##class (void) \
+{ \
+ return (GET_OMAP_CLASS == (id)) ? 1 : 0; \
+}
+
+#define GET_AM_CLASS ((omap_rev() >> 24) & 0xff)
+
+#define IS_AM_CLASS(class, id) \
+static inline int is_am ##class (void) \
+{ \
+ return (GET_AM_CLASS == (id)) ? 1 : 0; \
+}
+
+#define GET_TI_CLASS ((omap_rev() >> 24) & 0xff)
+
+#define IS_TI_CLASS(class, id) \
+static inline int is_ti ##class (void) \
+{ \
+ return (GET_TI_CLASS == (id)) ? 1 : 0; \
+}
+
+#define GET_OMAP_SUBCLASS ((omap_rev() >> 20) & 0x0fff)
+
+#define IS_OMAP_SUBCLASS(subclass, id) \
+static inline int is_omap ##subclass (void) \
+{ \
+ return (GET_OMAP_SUBCLASS == (id)) ? 1 : 0; \
+}
+
+#define IS_TI_SUBCLASS(subclass, id) \
+static inline int is_ti ##subclass (void) \
+{ \
+ return (GET_OMAP_SUBCLASS == (id)) ? 1 : 0; \
+}
+
+#define IS_AM_SUBCLASS(subclass, id) \
+static inline int is_am ##subclass (void) \
+{ \
+ return (GET_OMAP_SUBCLASS == (id)) ? 1 : 0; \
+}
+
+IS_OMAP_CLASS(7xx, 0x07)
+IS_OMAP_CLASS(15xx, 0x15)
+IS_OMAP_CLASS(16xx, 0x16)
+IS_OMAP_CLASS(24xx, 0x24)
+IS_OMAP_CLASS(34xx, 0x34)
+IS_OMAP_CLASS(44xx, 0x44)
+IS_OMAP_CLASS(54xx, 0x54)
+IS_AM_CLASS(33xx, 0x33)
+
+IS_TI_CLASS(81xx, 0x81)
+
+IS_OMAP_SUBCLASS(242x, 0x242)
+IS_OMAP_SUBCLASS(243x, 0x243)
+IS_OMAP_SUBCLASS(343x, 0x343)
+IS_OMAP_SUBCLASS(363x, 0x363)
+IS_OMAP_SUBCLASS(443x, 0x443)
+IS_OMAP_SUBCLASS(446x, 0x446)
+IS_OMAP_SUBCLASS(447x, 0x447)
+IS_OMAP_SUBCLASS(543x, 0x543)
+
+IS_TI_SUBCLASS(816x, 0x816)
+IS_TI_SUBCLASS(814x, 0x814)
+IS_AM_SUBCLASS(335x, 0x335)
+
+#define cpu_is_omap7xx() 0
+#define cpu_is_omap15xx() 0
+#define cpu_is_omap16xx() 0
+#define cpu_is_omap24xx() 0
+#define cpu_is_omap242x() 0
+#define cpu_is_omap243x() 0
+#define cpu_is_omap34xx() 0
+#define cpu_is_omap343x() 0
+#define cpu_is_ti81xx() 0
+#define cpu_is_ti816x() 0
+#define cpu_is_ti814x() 0
+#define cpu_is_am33xx() 0
+#define cpu_is_am335x() 0
+#define cpu_is_omap44xx() 0
+#define cpu_is_omap443x() 0
+#define cpu_is_omap446x() 0
+#define cpu_is_omap447x() 0
+#define cpu_is_omap54xx() 0
+#define cpu_is_omap543x() 0
+
+#if defined(MULTI_OMAP1)
+# if defined(CONFIG_ARCH_OMAP730)
+# undef cpu_is_omap7xx
+# define cpu_is_omap7xx() is_omap7xx()
+# endif
+# if defined(CONFIG_ARCH_OMAP850)
+# undef cpu_is_omap7xx
+# define cpu_is_omap7xx() is_omap7xx()
+# endif
+# if defined(CONFIG_ARCH_OMAP15XX)
+# undef cpu_is_omap15xx
+# define cpu_is_omap15xx() is_omap15xx()
+# endif
+# if defined(CONFIG_ARCH_OMAP16XX)
+# undef cpu_is_omap16xx
+# define cpu_is_omap16xx() is_omap16xx()
+# endif
+#else
+# if defined(CONFIG_ARCH_OMAP730)
+# undef cpu_is_omap7xx
+# define cpu_is_omap7xx() 1
+# endif
+# if defined(CONFIG_ARCH_OMAP850)
+# undef cpu_is_omap7xx
+# define cpu_is_omap7xx() 1
+# endif
+# if defined(CONFIG_ARCH_OMAP15XX)
+# undef cpu_is_omap15xx
+# define cpu_is_omap15xx() 1
+# endif
+# if defined(CONFIG_ARCH_OMAP16XX)
+# undef cpu_is_omap16xx
+# define cpu_is_omap16xx() 1
+# endif
+#endif
+
+#if defined(MULTI_OMAP2)
+# if defined(CONFIG_ARCH_OMAP2)
+# undef cpu_is_omap24xx
+# define cpu_is_omap24xx() is_omap24xx()
+# endif
+# if defined (CONFIG_SOC_OMAP2420)
+# undef cpu_is_omap242x
+# define cpu_is_omap242x() is_omap242x()
+# endif
+# if defined (CONFIG_SOC_OMAP2430)
+# undef cpu_is_omap243x
+# define cpu_is_omap243x() is_omap243x()
+# endif
+# if defined(CONFIG_ARCH_OMAP3)
+# undef cpu_is_omap34xx
+# undef cpu_is_omap343x
+# define cpu_is_omap34xx() is_omap34xx()
+# define cpu_is_omap343x() is_omap343x()
+# endif
+#else
+# if defined(CONFIG_ARCH_OMAP2)
+# undef cpu_is_omap24xx
+# define cpu_is_omap24xx() 1
+# endif
+# if defined(CONFIG_SOC_OMAP2420)
+# undef cpu_is_omap242x
+# define cpu_is_omap242x() 1
+# endif
+# if defined(CONFIG_SOC_OMAP2430)
+# undef cpu_is_omap243x
+# define cpu_is_omap243x() 1
+# endif
+# if defined(CONFIG_ARCH_OMAP3)
+# undef cpu_is_omap34xx
+# define cpu_is_omap34xx() 1
+# endif
+# if defined(CONFIG_SOC_OMAP3430)
+# undef cpu_is_omap343x
+# define cpu_is_omap343x() 1
+# endif
+#endif
+
+/*
+ * Macros to detect individual cpu types.
+ * These are only rarely needed.
+ * cpu_is_omap330(): True for OMAP330
+ * cpu_is_omap730(): True for OMAP730
+ * cpu_is_omap850(): True for OMAP850
+ * cpu_is_omap1510(): True for OMAP1510
+ * cpu_is_omap1610(): True for OMAP1610
+ * cpu_is_omap1611(): True for OMAP1611
+ * cpu_is_omap5912(): True for OMAP5912
+ * cpu_is_omap1621(): True for OMAP1621
+ * cpu_is_omap1710(): True for OMAP1710
+ * cpu_is_omap2420(): True for OMAP2420
+ * cpu_is_omap2422(): True for OMAP2422
+ * cpu_is_omap2423(): True for OMAP2423
+ * cpu_is_omap2430(): True for OMAP2430
+ * cpu_is_omap3430(): True for OMAP3430
+ * cpu_is_omap3505(): True for OMAP3505
+ * cpu_is_omap3517(): True for OMAP3517
+ */
+#define GET_OMAP_TYPE ((omap_rev() >> 16) & 0xffff)
+
+#define IS_OMAP_TYPE(type, id) \
+static inline int is_omap ##type (void) \
+{ \
+ return (GET_OMAP_TYPE == (id)) ? 1 : 0; \
+}
+
+IS_OMAP_TYPE(310, 0x0310)
+IS_OMAP_TYPE(730, 0x0730)
+IS_OMAP_TYPE(850, 0x0850)
+IS_OMAP_TYPE(1510, 0x1510)
+IS_OMAP_TYPE(1610, 0x1610)
+IS_OMAP_TYPE(1611, 0x1611)
+IS_OMAP_TYPE(5912, 0x1611)
+IS_OMAP_TYPE(1621, 0x1621)
+IS_OMAP_TYPE(1710, 0x1710)
+IS_OMAP_TYPE(2420, 0x2420)
+IS_OMAP_TYPE(2422, 0x2422)
+IS_OMAP_TYPE(2423, 0x2423)
+IS_OMAP_TYPE(2430, 0x2430)
+IS_OMAP_TYPE(3430, 0x3430)
+IS_OMAP_TYPE(3505, 0x3517)
+IS_OMAP_TYPE(3517, 0x3517)
+
+#define cpu_is_omap310() 0
+#define cpu_is_omap730() 0
+#define cpu_is_omap850() 0
+#define cpu_is_omap1510() 0
+#define cpu_is_omap1610() 0
+#define cpu_is_omap5912() 0
+#define cpu_is_omap1611() 0
+#define cpu_is_omap1621() 0
+#define cpu_is_omap1710() 0
+#define cpu_is_omap2420() 0
+#define cpu_is_omap2422() 0
+#define cpu_is_omap2423() 0
+#define cpu_is_omap2430() 0
+#define cpu_is_omap3503() 0
+#define cpu_is_omap3515() 0
+#define cpu_is_omap3525() 0
+#define cpu_is_omap3530() 0
+#define cpu_is_omap3505() 0
+#define cpu_is_omap3517() 0
+#define cpu_is_omap3430() 0
+#define cpu_is_omap3630() 0
+#define cpu_is_omap5430() 0
+
+/*
+ * Whether we have MULTI_OMAP1 or not, we still need to distinguish
+ * between 730 vs 850, 330 vs. 1510 and 1611B/5912 vs. 1710.
+ */
+
+#if defined(CONFIG_ARCH_OMAP730)
+# undef cpu_is_omap730
+# define cpu_is_omap730() is_omap730()
+#endif
+
+#if defined(CONFIG_ARCH_OMAP850)
+# undef cpu_is_omap850
+# define cpu_is_omap850() is_omap850()
+#endif
+
+#if defined(CONFIG_ARCH_OMAP15XX)
+# undef cpu_is_omap310
+# undef cpu_is_omap1510
+# define cpu_is_omap310() is_omap310()
+# define cpu_is_omap1510() is_omap1510()
+#endif
+
+#if defined(CONFIG_ARCH_OMAP16XX)
+# undef cpu_is_omap1610
+# undef cpu_is_omap1611
+# undef cpu_is_omap5912
+# undef cpu_is_omap1621
+# undef cpu_is_omap1710
+# define cpu_is_omap1610() is_omap1610()
+# define cpu_is_omap1611() is_omap1611()
+# define cpu_is_omap5912() is_omap5912()
+# define cpu_is_omap1621() is_omap1621()
+# define cpu_is_omap1710() is_omap1710()
+#endif
+
+#if defined(CONFIG_ARCH_OMAP2)
+# undef cpu_is_omap2420
+# undef cpu_is_omap2422
+# undef cpu_is_omap2423
+# undef cpu_is_omap2430
+# define cpu_is_omap2420() is_omap2420()
+# define cpu_is_omap2422() is_omap2422()
+# define cpu_is_omap2423() is_omap2423()
+# define cpu_is_omap2430() is_omap2430()
+#endif
+
+#if defined(CONFIG_ARCH_OMAP3)
+# undef cpu_is_omap3430
+# undef cpu_is_omap3503
+# undef cpu_is_omap3515
+# undef cpu_is_omap3525
+# undef cpu_is_omap3530
+# undef cpu_is_omap3505
+# undef cpu_is_omap3517
+# undef cpu_is_ti81xx
+# undef cpu_is_ti816x
+# undef cpu_is_ti814x
+# undef cpu_is_am33xx
+# undef cpu_is_am335x
+# define cpu_is_omap3430() is_omap3430()
+# define cpu_is_omap3503() (cpu_is_omap3430() && \
+ (!omap3_has_iva()) && \
+ (!omap3_has_sgx()))
+# define cpu_is_omap3515() (cpu_is_omap3430() && \
+ (!omap3_has_iva()) && \
+ (omap3_has_sgx()))
+# define cpu_is_omap3525() (cpu_is_omap3430() && \
+ (!omap3_has_sgx()) && \
+ (omap3_has_iva()))
+# define cpu_is_omap3530() (cpu_is_omap3430())
+# define cpu_is_omap3517() is_omap3517()
+# define cpu_is_omap3505() (cpu_is_omap3517() && \
+ !omap3_has_sgx())
+# undef cpu_is_omap3630
+# define cpu_is_omap3630() is_omap363x()
+# define cpu_is_ti81xx() is_ti81xx()
+# define cpu_is_ti816x() is_ti816x()
+# define cpu_is_ti814x() is_ti814x()
+# define cpu_is_am33xx() is_am33xx()
+# define cpu_is_am335x() is_am335x()
+#endif
+
+# if defined(CONFIG_ARCH_OMAP4)
+# undef cpu_is_omap44xx
+# undef cpu_is_omap443x
+# undef cpu_is_omap446x
+# undef cpu_is_omap447x
+# define cpu_is_omap44xx() is_omap44xx()
+# define cpu_is_omap443x() is_omap443x()
+# define cpu_is_omap446x() is_omap446x()
+# define cpu_is_omap447x() is_omap447x()
+# endif
+
+# if defined(CONFIG_ARCH_OMAP5)
+# undef cpu_is_omap54xx
+# undef cpu_is_omap543x
+# define cpu_is_omap54xx() is_omap54xx()
+# define cpu_is_omap543x() is_omap543x()
#endif
+/* Macros to detect if we have OMAP1 or OMAP2 */
+#define cpu_class_is_omap1() (cpu_is_omap7xx() || cpu_is_omap15xx() || \
+ cpu_is_omap16xx())
+#define cpu_class_is_omap2() (cpu_is_omap24xx() || cpu_is_omap34xx() || \
+ cpu_is_omap44xx() || cpu_is_omap54xx())
+
+/* Various silicon revisions for omap2 */
+#define OMAP242X_CLASS 0x24200024
+#define OMAP2420_REV_ES1_0 OMAP242X_CLASS
+#define OMAP2420_REV_ES2_0 (OMAP242X_CLASS | (0x1 << 8))
+
+#define OMAP243X_CLASS 0x24300024
+#define OMAP2430_REV_ES1_0 OMAP243X_CLASS
+
+#define OMAP343X_CLASS 0x34300034
+#define OMAP3430_REV_ES1_0 OMAP343X_CLASS
+#define OMAP3430_REV_ES2_0 (OMAP343X_CLASS | (0x1 << 8))
+#define OMAP3430_REV_ES2_1 (OMAP343X_CLASS | (0x2 << 8))
+#define OMAP3430_REV_ES3_0 (OMAP343X_CLASS | (0x3 << 8))
+#define OMAP3430_REV_ES3_1 (OMAP343X_CLASS | (0x4 << 8))
+#define OMAP3430_REV_ES3_1_2 (OMAP343X_CLASS | (0x5 << 8))
+
+#define OMAP363X_CLASS 0x36300034
+#define OMAP3630_REV_ES1_0 OMAP363X_CLASS
+#define OMAP3630_REV_ES1_1 (OMAP363X_CLASS | (0x1 << 8))
+#define OMAP3630_REV_ES1_2 (OMAP363X_CLASS | (0x2 << 8))
+
+#define OMAP3517_CLASS 0x35170034
+#define OMAP3517_REV_ES1_0 OMAP3517_CLASS
+#define OMAP3517_REV_ES1_1 (OMAP3517_CLASS | (0x1 << 8))
+
+#define TI816X_CLASS 0x81600034
+#define TI8168_REV_ES1_0 TI816X_CLASS
+#define TI8168_REV_ES1_1 (TI816X_CLASS | (0x1 << 8))
+
+#define TI814X_CLASS 0x81400034
+#define TI8148_REV_ES1_0 TI814X_CLASS
+#define TI8148_REV_ES2_0 (TI814X_CLASS | (0x1 << 8))
+#define TI8148_REV_ES2_1 (TI814X_CLASS | (0x2 << 8))
+
+#define AM335X_CLASS 0x33500034
+#define AM335X_REV_ES1_0 AM335X_CLASS
+
+#define OMAP443X_CLASS 0x44300044
+#define OMAP4430_REV_ES1_0 (OMAP443X_CLASS | (0x10 << 8))
+#define OMAP4430_REV_ES2_0 (OMAP443X_CLASS | (0x20 << 8))
+#define OMAP4430_REV_ES2_1 (OMAP443X_CLASS | (0x21 << 8))
+#define OMAP4430_REV_ES2_2 (OMAP443X_CLASS | (0x22 << 8))
+#define OMAP4430_REV_ES2_3 (OMAP443X_CLASS | (0x23 << 8))
+
+#define OMAP446X_CLASS 0x44600044
+#define OMAP4460_REV_ES1_0 (OMAP446X_CLASS | (0x10 << 8))
+#define OMAP4460_REV_ES1_1 (OMAP446X_CLASS | (0x11 << 8))
+
+#define OMAP447X_CLASS 0x44700044
+#define OMAP4470_REV_ES1_0 (OMAP447X_CLASS | (0x10 << 8))
+
+#define OMAP54XX_CLASS 0x54000054
+#define OMAP5430_REV_ES1_0 (OMAP54XX_CLASS | (0x30 << 16) | (0x10 << 8))
+#define OMAP5430_REV_ES2_0 (OMAP54XX_CLASS | (0x30 << 16) | (0x20 << 8))
+#define OMAP5432_REV_ES1_0 (OMAP54XX_CLASS | (0x32 << 16) | (0x10 << 8))
+#define OMAP5432_REV_ES2_0 (OMAP54XX_CLASS | (0x32 << 16) | (0x20 << 8))
+
+void omap2xxx_check_revision(void);
+void omap3xxx_check_revision(void);
+void omap4xxx_check_revision(void);
+void omap5xxx_check_revision(void);
+void omap3xxx_check_features(void);
+void ti81xx_check_features(void);
+void omap4xxx_check_features(void);
+void omap5xxx_check_features(void);
+
+/*
+ * Runtime detection of OMAP3 features
+ *
+ * OMAP3_HAS_IO_CHAIN_CTRL: Some later members of the OMAP3 chip
+ * family have OS-level control over the I/O chain clock. This is
+ * to avoid a window during which wakeups could potentially be lost
+ * during powerdomain transitions. If this bit is set, it
+ * indicates that the chip does support OS-level control of this
+ * feature.
+ */
+extern u32 omap_features;
+
+#define OMAP3_HAS_L2CACHE BIT(0)
+#define OMAP3_HAS_IVA BIT(1)
+#define OMAP3_HAS_SGX BIT(2)
+#define OMAP3_HAS_NEON BIT(3)
+#define OMAP3_HAS_ISP BIT(4)
+#define OMAP3_HAS_192MHZ_CLK BIT(5)
+#define OMAP3_HAS_IO_WAKEUP BIT(6)
+#define OMAP3_HAS_SDRC BIT(7)
+#define OMAP3_HAS_IO_CHAIN_CTRL BIT(8)
+#define OMAP4_HAS_PERF_SILICON BIT(9)
+#define OMAP5_HAS_OPP_HIGH BIT(12)
+#define OMAP5_HAS_AUTO_RET BIT(13)
+#define OMAP5_HAS_AVS BIT(14)
+#define OMAP_HAS_GC320 BIT(15)
+
+#define OMAP3_HAS_FEATURE(feat,flag) \
+static inline unsigned int omap3_has_ ##feat(void) \
+{ \
+ return omap_features & OMAP3_HAS_ ##flag; \
+} \
+
+OMAP3_HAS_FEATURE(l2cache, L2CACHE)
+OMAP3_HAS_FEATURE(sgx, SGX)
+OMAP3_HAS_FEATURE(iva, IVA)
+OMAP3_HAS_FEATURE(neon, NEON)
+OMAP3_HAS_FEATURE(isp, ISP)
+OMAP3_HAS_FEATURE(192mhz_clk, 192MHZ_CLK)
+OMAP3_HAS_FEATURE(io_wakeup, IO_WAKEUP)
+OMAP3_HAS_FEATURE(sdrc, SDRC)
+OMAP3_HAS_FEATURE(io_chain_ctrl, IO_CHAIN_CTRL)
+
+/*
+ * Runtime detection of OMAP4 features
+ */
+#define OMAP4_HAS_FEATURE(feat, flag) \
+static inline unsigned int omap4_has_ ##feat(void) \
+{ \
+ return omap_features & OMAP4_HAS_ ##flag; \
+} \
+
+OMAP4_HAS_FEATURE(perf_silicon, PERF_SILICON)
+
+/*
+ * Runtime detection of OMAP5 features
+ */
+#define OMAP5_HAS_FEATURE(feat, flag) \
+static inline unsigned int omap5_has_ ##feat(void) \
+{ \
+ return omap_features & OMAP5_HAS_ ##flag; \
+} \
+
+OMAP5_HAS_FEATURE(opp_high, OPP_HIGH)
+OMAP5_HAS_FEATURE(auto_ret, AUTO_RET)
+OMAP5_HAS_FEATURE(avs, AVS)
+
+/*
+ * Runtime detection of common features across OMAP families
+ */
+#define OMAP_HAS_FEATURE(feat, flag) \
+static inline unsigned int omap_has_ ##feat(void) \
+{ \
+ return omap_features & OMAP_HAS_ ##flag; \
+} \
+
+OMAP_HAS_FEATURE(gc320, GC320)
+
#endif
diff --git a/arch/arm/plat-omap/include/plat/multi.h b/arch/arm/plat-omap/include/plat/multi.h
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * Support for compiling in multiple OMAP processors
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __PLAT_OMAP_MULTI_H
+#define __PLAT_OMAP_MULTI_H
+
+/*
+ * Test if multicore OMAP support is needed
+ */
+#undef MULTI_OMAP1
+#undef MULTI_OMAP2
+#undef OMAP_NAME
+
+#ifdef CONFIG_ARCH_OMAP730
+# ifdef OMAP_NAME
+# undef MULTI_OMAP1
+# define MULTI_OMAP1
+# else
+# define OMAP_NAME omap730
+# endif
+#endif
+#ifdef CONFIG_ARCH_OMAP850
+# ifdef OMAP_NAME
+# undef MULTI_OMAP1
+# define MULTI_OMAP1
+# else
+# define OMAP_NAME omap850
+# endif
+#endif
+#ifdef CONFIG_ARCH_OMAP15XX
+# ifdef OMAP_NAME
+# undef MULTI_OMAP1
+# define MULTI_OMAP1
+# else
+# define OMAP_NAME omap1510
+# endif
+#endif
+#ifdef CONFIG_ARCH_OMAP16XX
+# ifdef OMAP_NAME
+# undef MULTI_OMAP1
+# define MULTI_OMAP1
+# else
+# define OMAP_NAME omap16xx
+# endif
+#endif
+#ifdef CONFIG_ARCH_OMAP2PLUS
+# if (defined(OMAP_NAME) || defined(MULTI_OMAP1))
+# error "OMAP1 and OMAP2PLUS can't be selected at the same time"
+# endif
+#endif
+#ifdef CONFIG_SOC_OMAP2420
+# ifdef OMAP_NAME
+# undef MULTI_OMAP2
+# define MULTI_OMAP2
+# else
+# define OMAP_NAME omap2420
+# endif
+#endif
+#ifdef CONFIG_SOC_OMAP2430
+# ifdef OMAP_NAME
+# undef MULTI_OMAP2
+# define MULTI_OMAP2
+# else
+# define OMAP_NAME omap2430
+# endif
+#endif
+#ifdef CONFIG_ARCH_OMAP3
+# ifdef OMAP_NAME
+# undef MULTI_OMAP2
+# define MULTI_OMAP2
+# else
+# define OMAP_NAME omap3
+# endif
+#endif
+#ifdef CONFIG_ARCH_OMAP4
+# ifdef OMAP_NAME
+# undef MULTI_OMAP2
+# define MULTI_OMAP2
+# else
+# define OMAP_NAME omap4
+# endif
+#endif
+
+#ifdef CONFIG_ARCH_OMAP5
+# ifdef OMAP_NAME
+# undef MULTI_OMAP2
+# define MULTI_OMAP2
+# else
+# define OMAP_NAME omap5
+# endif
+#endif
+
+#endif /* __PLAT_OMAP_MULTI_H */
index fdcbcda307b6abb1b77bba49cbe69262d4183b99..9fd6a50cb2875732df5b2d8e1cef2235b14cf921 100644 (file)
omapdss-$(CONFIG_OMAP5_DSS_HDMI) += hdmi.o \
hdmi_panel.o ti_hdmi_4xxx_ip.o ti_hdmi_5xxx_ip.o
ccflags-$(CONFIG_OMAP2_DSS_DEBUG) += -DDEBUG
+ccflags-y += -Iarch/arm
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/jiffies.h>
+#include <linux/seq_file.h>
#include <video/omapdss.h>
#include "dss_features.h"
#include "dispc-compat.h"
+struct callback_states {
+ /*
+ * Keep track of callbacks at the last 3 levels of pipeline:
+ * info, shadow registers and in DISPC registers.
+ *
+ * Note: We zero the function pointer when moving from one level to
+ * another to avoid checking for dirty and shadow_dirty fields that
+ * are not common between overlay and manager cache structures.
+ */
+ struct omapdss_ovl_cb info, shadow, dispc;
+ bool dispc_displayed;
+ bool shadow_enabled;
+};
+
/*
* We have 4 levels of cache for the dispc settings. First two are in SW and
* the latter two in HW.
bool info_dirty;
struct omap_overlay_info info;
+ /* callback data for the last 3 states */
+ struct callback_states cb;
+
+ /* overlay's channel in DISPC */
+ int dispc_channel;
+
bool shadow_info_dirty;
bool extra_info_dirty;
bool shadow_extra_info_dirty;
bool enabled;
+ enum omap_channel channel;
u32 fifo_low, fifo_high;
/*
void (*framedone_handler)(void *);
void *framedone_handler_data;
+
+ /* callback data for the last 3 states */
+ struct callback_states cb;
};
static struct {
struct mgr_priv_data mgr_priv_data_array[MAX_DSS_MANAGERS];
bool irq_enabled;
+ u32 comp_irq_enabled;
} dss_data;
+/* propagating callback info between states */
+static inline void
+dss_ovl_configure_cb(struct callback_states *st, int i, bool enabled)
+{
+ /* complete info in shadow */
+ dss_ovl_cb(&st->shadow, i, DSS_COMPLETION_ECLIPSED_SHADOW);
+
+ /* propagate info to shadow */
+ st->shadow = st->info;
+ st->shadow_enabled = enabled;
+ /* info traveled to shadow */
+ st->info.fn = NULL;
+}
+
+static inline void
+dss_ovl_program_cb(struct callback_states *st, int i)
+{
+ /* mark previous programming as completed */
+ dss_ovl_cb(&st->dispc, i, st->dispc_displayed ?
+ DSS_COMPLETION_RELEASED : DSS_COMPLETION_TORN);
+
+ /* mark shadow info as programmed, not yet displayed */
+ dss_ovl_cb(&st->shadow, i, DSS_COMPLETION_PROGRAMMED);
+
+ /* if overlay/manager is not enabled, we are done now */
+ if (!st->shadow_enabled) {
+ dss_ovl_cb(&st->shadow, i, DSS_COMPLETION_RELEASED);
+ st->shadow.fn = NULL;
+ }
+
+ /* propagate shadow to dispc */
+ st->dispc = st->shadow;
+ st->shadow.fn = NULL;
+ st->dispc_displayed = false;
+}
+
/* protects dss_data */
static spinlock_t data_lock;
/* lock for blocking functions */
/* This will leave fifo configurations in a nonoptimal state */
op->enabled = false;
dispc_ovl_enable(ovl->id, false);
+ dss_ovl_configure_cb(&op->cb, ovl->id, op->enabled);
return;
}
op->info_dirty = false;
- if (mp->updating)
+ if (mp->updating) {
+ dss_ovl_configure_cb(&op->cb, ovl->id, op->enabled);
op->shadow_info_dirty = true;
+ }
}
static void dss_ovl_write_regs_extra(struct omap_overlay *ovl)
mp = get_mgr_priv(ovl->manager);
op->extra_info_dirty = false;
- if (mp->updating)
+ if (mp->updating) {
+ dss_ovl_configure_cb(&op->cb, ovl->id, op->enabled);
op->shadow_extra_info_dirty = true;
+ }
}
static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
{
struct mgr_priv_data *mp = get_mgr_priv(mgr);
struct omap_overlay *ovl;
+ struct ovl_priv_data *op;
+ int used_ovls = 0;
DSSDBG("writing mgr %d regs", mgr->id);
list_for_each_entry(ovl, &mgr->overlays, list) {
dss_ovl_write_regs(ovl);
dss_ovl_write_regs_extra(ovl);
+ op = get_ovl_priv(ovl);
+ if (op->channel == mgr->id && op->enabled)
+ used_ovls++;
}
if (mp->info_dirty) {
dispc_mgr_setup(mgr->id, &mp->info);
mp->info_dirty = false;
- if (mp->updating)
+ if (mp->updating) {
+ dss_ovl_configure_cb(&mp->cb, mgr->id, used_ovls);
mp->shadow_info_dirty = true;
+ }
}
}
struct ovl_priv_data *op;
mp = get_mgr_priv(mgr);
+
+ if (mp->shadow_info_dirty)
+ dss_ovl_program_cb(&mp->cb, mgr->id);
+
mp->shadow_info_dirty = false;
mp->shadow_extra_info_dirty = false;
list_for_each_entry(ovl, &mgr->overlays, list) {
op = get_ovl_priv(ovl);
+ if (op->shadow_info_dirty || op->shadow_extra_info_dirty) {
+ dss_ovl_program_cb(&op->cb, ovl->id);
+ op->dispc_channel = op->channel;
+ }
op->shadow_info_dirty = false;
op->shadow_extra_info_dirty = false;
}
}
+static void schedule_completion_irq(void);
+
+static void dss_completion_irq_handler(void *data, u32 mask)
+{
+ struct mgr_priv_data *mp;
+ struct ovl_priv_data *op;
+ struct omap_overlay_manager *mgr;
+ struct omap_overlay *ovl;
+ const int num_ovls = ARRAY_SIZE(dss_data.ovl_priv_data_array);
+ const int num_mgrs = dss_feat_get_num_mgrs();
+ const u32 masks[] = {
+ DISPC_IRQ_FRAMEDONE | DISPC_IRQ_VSYNC,
+ DISPC_IRQ_FRAMEDONETV | DISPC_IRQ_EVSYNC_EVEN |
+ DISPC_IRQ_EVSYNC_ODD,
+ DISPC_IRQ_FRAMEDONE2 | DISPC_IRQ_VSYNC2,
+ 0
+ };
+ int i;
+
+ spin_lock(&data_lock);
+
+ for (i = 0; i < num_mgrs; i++) {
+ mgr = omap_dss_get_overlay_manager(i);
+ mp = get_mgr_priv(mgr);
+ if (mask & masks[i]) {
+ if (mgr && mgr->output->device)
+ mgr->output->device->first_vsync = true;
+ dss_ovl_cb(&mp->cb.dispc, i, DSS_COMPLETION_DISPLAYED);
+ mp->cb.dispc_displayed = true;
+ }
+ }
+
+ /* notify all overlays on that manager */
+ for (i = 0; i < num_ovls; i++) {
+ ovl = omap_dss_get_overlay(i);
+ op = get_ovl_priv(ovl);
+ if (mask & masks[op->channel]) {
+ dss_ovl_cb(&op->cb.dispc, i, DSS_COMPLETION_DISPLAYED);
+ op->cb.dispc_displayed = true;
+ }
+ }
+
+ schedule_completion_irq();
+
+ spin_unlock(&data_lock);
+}
+
+static void schedule_completion_irq(void)
+{
+ struct mgr_priv_data *mp;
+ struct ovl_priv_data *op;
+ const int num_ovls = ARRAY_SIZE(dss_data.ovl_priv_data_array);
+ const int num_mgrs = dss_feat_get_num_mgrs();
+ const u32 masks[] = {
+ DISPC_IRQ_FRAMEDONE | DISPC_IRQ_VSYNC,
+ DISPC_IRQ_FRAMEDONETV | DISPC_IRQ_EVSYNC_EVEN |
+ DISPC_IRQ_EVSYNC_ODD,
+ DISPC_IRQ_FRAMEDONE2 | DISPC_IRQ_VSYNC2,
+ 0
+ };
+ u32 mask = 0;
+ int i;
+
+ for (i = 0; i < num_mgrs; i++) {
+ mp = &dss_data.mgr_priv_data_array[i];
+ if (mp->cb.dispc.fn && (mp->cb.dispc.mask &
+ DSS_COMPLETION_DISPLAYED))
+ mask |= masks[i];
+ }
+
+ /* notify all overlays on that manager */
+ for (i = 0; i < num_ovls; i++) {
+ op = &dss_data.ovl_priv_data_array[i];
+ if (op->cb.dispc.fn && op->enabled &&
+ (op->cb.dispc.mask & DSS_COMPLETION_DISPLAYED))
+ mask |= masks[op->channel];
+ }
+
+ if (mask != dss_data.comp_irq_enabled) {
+ if (dss_data.comp_irq_enabled)
+ omap_dispc_unregister_isr(dss_completion_irq_handler,
+ NULL, dss_data.comp_irq_enabled);
+ if (mask)
+ omap_dispc_register_isr(dss_completion_irq_handler,
+ NULL, mask);
+ dss_data.comp_irq_enabled = mask;
+ }
+}
+
static void dss_mgr_start_update_compat(struct omap_overlay_manager *mgr)
{
struct mgr_priv_data *mp = get_mgr_priv(mgr);
return;
}
+ schedule_completion_irq();
+
dss_mgr_write_regs(mgr);
dss_mgr_write_regs_extra(mgr);
dispc_mgr_enable_sync(mgr->id);
+ /* for manually updated displays invoke dsscomp callbacks manually,
+ * as logic that relays on shadow_dirty flag can't correctly release
+ * previous composition
+ */
+ dss_ovl_configure_cb(&mp->cb, mgr->id, true);
+ dss_ovl_program_cb(&mp->cb, mgr->id);
+
+ mgr_clear_shadow_dirty(mgr);
+
spin_unlock_irqrestore(&data_lock, flags);
}
}
}
+ schedule_completion_irq();
dss_write_regs();
dss_set_go_bits();
spin_unlock(&data_lock);
}
+int dss_mgr_blank(struct omap_overlay_manager *mgr,
+ bool wait_for_go)
+{
+ struct ovl_priv_data *op;
+ struct mgr_priv_data *mp;
+ unsigned long flags;
+ int r, r_get, i;
+ const int num_mgrs = dss_feat_get_num_mgrs();
+
+ DSSDBG("dss_mgr_blank(%s,wait=%d)\n", mgr->name, wait_for_go);
+
+ r = dispc_runtime_get();
+ r_get = r;
+ /* still clear cache even if failed to get clocks, just don't config */
+
+
+ /* disable overlays in overlay user info structs and in data info */
+ for (i = 0; i < omap_dss_get_num_overlays(); i++) {
+ struct omap_overlay *ovl;
+
+ ovl = omap_dss_get_overlay(i);
+
+ if (ovl->manager != mgr)
+ continue;
+
+ r = ovl->disable(ovl);
+
+ spin_lock_irqsave(&data_lock, flags);
+ op = get_ovl_priv(ovl);
+
+ /* complete unconfigured info */
+ if (op->user_info_dirty)
+ dss_ovl_cb(&op->user_info.cb, i,
+ DSS_COMPLETION_ECLIPSED_SET);
+ dss_ovl_cb(&op->cb.info, i, DSS_COMPLETION_ECLIPSED_CACHE);
+ op->cb.info.fn = NULL;
+
+ op->user_info_dirty = false;
+ op->info_dirty = true;
+ op->enabled = false;
+ spin_unlock_irqrestore(&data_lock, flags);
+ }
+
+ spin_lock_irqsave(&data_lock, flags);
+ /* dirty manager */
+ mp = get_mgr_priv(mgr);
+ if (mp->user_info_dirty)
+ dss_ovl_cb(&mp->user_info.cb, mgr->id,
+ DSS_COMPLETION_ECLIPSED_SET);
+ dss_ovl_cb(&mp->cb.info, mgr->id, DSS_COMPLETION_ECLIPSED_CACHE);
+ mp->cb.info.fn = NULL;
+ mp->user_info.cb.fn = NULL;
+ mp->info_dirty = true;
+ mp->user_info_dirty = false;
+
+ /*
+ * TRICKY: Enable apply irq even if not waiting for vsync, so that
+ * DISPC programming takes place in case GO bit was on.
+ */
+ if (!dss_data.irq_enabled) {
+ u32 mask = 0;
+ for (i = 0; i < num_mgrs; ++i)
+ mask |= dispc_mgr_get_vsync_irq(i);
+
+ for (i = 0; i < num_mgrs; ++i)
+ mask |= dispc_mgr_get_framedone_irq(i);
+
+ r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
+ dss_data.irq_enabled = true;
+ }
+
+ if (!r_get) {
+ dss_write_regs();
+ dss_set_go_bits();
+ }
+
+ if (r_get || !wait_for_go) {
+ /* pretend that programming has happened */
+ for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+ op = &dss_data.ovl_priv_data_array[i];
+ if (op->channel != mgr->id)
+ continue;
+ if (op->info_dirty)
+ dss_ovl_configure_cb(&op->cb, i, false);
+ if (op->shadow_info_dirty) {
+ dss_ovl_program_cb(&op->cb, i);
+ op->dispc_channel = op->channel;
+ op->shadow_info_dirty = false;
+ } else {
+ pr_warn("ovl%d-shadow is not dirty\n", i);
+ }
+ }
+
+ if (mp->info_dirty)
+ dss_ovl_configure_cb(&mp->cb, i, false);
+ if (mp->shadow_info_dirty) {
+ dss_ovl_program_cb(&mp->cb, i);
+ mp->shadow_info_dirty = false;
+ } else {
+ pr_warn("mgr%d-shadow is not dirty\n", mgr->id);
+ }
+ }
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ if (wait_for_go)
+ mgr->wait_for_go(mgr);
+
+ if (!r_get)
+ dispc_runtime_put();
+
+ return r;
+}
+
+int omap_dss_manager_unregister_callback(struct omap_overlay_manager *mgr,
+ struct omapdss_ovl_cb *cb)
+{
+ unsigned long flags;
+ int r = 0;
+ struct mgr_priv_data *mp = get_mgr_priv(mgr);
+ spin_lock_irqsave(&data_lock, flags);
+ if (mp->user_info_dirty &&
+ mp->user_info.cb.fn == cb->fn &&
+ mp->user_info.cb.data == cb->data)
+ mp->user_info.cb.fn = NULL;
+ else
+ r = -EPERM;
+ spin_unlock_irqrestore(&data_lock, flags);
+ return r;
+}
+
static void omap_dss_mgr_apply_ovl(struct omap_overlay *ovl)
{
struct ovl_priv_data *op;
if (!op->user_info_dirty)
return;
+ /* complete unconfigured info */
+ dss_ovl_cb(&op->cb.info, ovl->id,
+ DSS_COMPLETION_ECLIPSED_CACHE);
+
+ op->cb.info = op->user_info.cb;
+ op->user_info.cb.fn = NULL;
+
op->user_info_dirty = false;
op->info_dirty = true;
op->info = op->user_info;
if (!mp->user_info_dirty)
return;
+ /* complete unconfigured info */
+ dss_ovl_cb(&mp->cb.info, mgr->id,
+ DSS_COMPLETION_ECLIPSED_CACHE);
+
+ mp->cb.info = mp->user_info.cb;
+ mp->user_info.cb.fn = NULL;
+
mp->user_info_dirty = false;
mp->info_dirty = true;
mp->info = mp->user_info;
/* Configure manager */
omap_dss_mgr_apply_mgr(mgr);
- dss_write_regs();
- dss_set_go_bits();
-
spin_unlock_irqrestore(&data_lock, flags);
return 0;
}
+#ifdef CONFIG_DEBUG_FS
+static void seq_print_cb(struct seq_file *s, struct omapdss_ovl_cb *cb)
+{
+ if (!cb->fn) {
+ seq_printf(s, "(none)\n");
+ return;
+ }
+
+ seq_printf(s, "mask=%c%c%c%c [%p] %pf\n",
+ (cb->mask & DSS_COMPLETION_CHANGED) ? 'C' : '-',
+ (cb->mask & DSS_COMPLETION_PROGRAMMED) ? 'P' : '-',
+ (cb->mask & DSS_COMPLETION_DISPLAYED) ? 'D' : '-',
+ (cb->mask & DSS_COMPLETION_RELEASED) ? 'R' : '-',
+ cb->data,
+ cb->fn);
+}
+#endif
+
+void seq_print_cbs(struct omap_overlay_manager *mgr, struct seq_file *s)
+{
+#ifdef CONFIG_DEBUG_FS
+ struct mgr_priv_data *mp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ mp = get_mgr_priv(mgr);
+
+ seq_printf(s, " DISPC pipeline:\n\n"
+ " user_info:%13s ", mp->user_info_dirty ?
+ "DIRTY" : "clean");
+ seq_print_cb(s, &mp->user_info.cb);
+ seq_printf(s, " info:%12s ", mp->info_dirty ? "DIRTY" : "clean");
+ seq_print_cb(s, &mp->cb.info);
+ seq_printf(s, " shadow: %s %s ", mp->cb.shadow_enabled ? "ACT" :
+ "off", mp->shadow_info_dirty ?
+ "DIRTY" : "clean");
+ seq_print_cb(s, &mp->cb.shadow);
+ seq_printf(s, " dispc:%12s ", mp->cb.dispc_displayed ?
+ "DISPLAYED" : "");
+ seq_print_cb(s, &mp->cb.dispc);
+ seq_printf(s, "\n");
+
+ spin_unlock_irqrestore(&data_lock, flags);
+#endif
+}
+
static void dss_apply_ovl_enable(struct omap_overlay *ovl, bool enable)
{
struct ovl_priv_data *op;
dispc_ovl_set_channel_out(ovl->id, mgr->id);
+ op->channel = mgr->id;
+
ovl->manager = mgr;
list_add_tail(&ovl->list, &mgr->overlays);
return e;
}
+int dss_mgr_set_ovls(struct omap_overlay_manager *mgr)
+{
+ unsigned long flags;
+ int i;
+ if (!mgr || !mgr->ovls) {
+ DSSERR("null pointer\n");
+ return -EINVAL;
+ }
+ if (mgr->num_ovls > dss_feat_get_num_ovls()) {
+ DSSERR("Invalid number of overlays passed\n");
+ return -EINVAL;
+ }
+ mutex_lock(&apply_lock);
+ spin_lock_irqsave(&data_lock, flags);
+
+ for (i = 0; i < mgr->num_ovls; i++) {
+ if (mgr != mgr->ovls[i]->manager) {
+ DSSERR("Invalid mgr for ovl#%d\n", mgr->ovls[i]->id);
+ spin_unlock_irqrestore(&data_lock, flags);
+ mutex_unlock(&apply_lock);
+ return -EINVAL;
+ }
+ /* Enable the overlay */
+ if (mgr->ovls[i]->enabled) {
+ struct ovl_priv_data *op = get_ovl_priv(mgr->ovls[i]);
+ op->enabling = true;
+ dss_setup_fifos();
+ op->enabling = false;
+ dss_apply_ovl_enable(mgr->ovls[i], true);
+ } else {
+ dss_apply_ovl_enable(mgr->ovls[i], false);
+ }
+ }
+ dss_write_regs();
+ dss_set_go_bits();
+ spin_unlock_irqrestore(&data_lock, flags);
+ wait_pending_extra_info_updates();
+ mutex_unlock(&apply_lock);
+ return 0;
+}
+
static int dss_ovl_enable(struct omap_overlay *ovl)
{
struct ovl_priv_data *op = get_ovl_priv(ovl);
struct omap_dss_device *dssdev = NULL;
int i, r;
+ if(!pdev)
+ return -ENODEV;
+
mutex_lock(&compat_init_lock);
if (compat_refcnt++ > 0)
mgr->get_manager_info = &dss_mgr_get_info;
mgr->wait_for_go = &dss_mgr_wait_for_go;
mgr->wait_for_vsync = &dss_mgr_wait_for_vsync;
+ mgr->set_ovl = &dss_mgr_set_ovls;
mgr->get_device = &dss_mgr_get_device;
}
/* XXX uninit sysfs files on error */
if (r)
goto err_disp_sysfs;
+
+ BLOCKING_INIT_NOTIFIER_HEAD(&dssdev->state_notifiers);
}
dispc_runtime_get();
index f9c294b3babe7962407ca74a013fdce29e76bd88..075021403234313adcbdd14ddd92ce2af1130402 100644 (file)
return 0;
}
+static void omap_dss_driver_disable(struct omap_dss_device *dssdev)
+{
+ if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)
+ blocking_notifier_call_chain(&dssdev->state_notifiers,
+ OMAP_DSS_DISPLAY_DISABLED, dssdev);
+ dssdev->driver->disable_orig(dssdev);
+}
+
+static int omap_dss_driver_enable(struct omap_dss_device *dssdev)
+{
+ int r = dssdev->driver->enable_orig(dssdev);
+ if (!r && dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ blocking_notifier_call_chain(&dssdev->state_notifiers,
+ OMAP_DSS_DISPLAY_ACTIVE, dssdev);
+ return r;
+}
+
int omap_dss_register_driver(struct omap_dss_driver *dssdriver)
{
dssdriver->driver.bus = &dss_bus_type;
if (dssdriver->get_timings == NULL)
dssdriver->get_timings = omapdss_default_get_timings;
+ dssdriver->disable_orig = dssdriver->disable;
+ dssdriver->disable = omap_dss_driver_disable;
+ dssdriver->enable_orig = dssdriver->enable;
+ dssdriver->enable = omap_dss_driver_enable;
+
return driver_register(&dssdriver->driver);
}
EXPORT_SYMBOL(omap_dss_register_driver);
index 1ca4a677384fd34ad9e15f8cde1f0d06b56954ab..cd32801f3831ff81c3fa15fb3a8121f2e5d34ce8 100644 (file)
#include <linux/pm_runtime.h>
#include <linux/sizes.h>
+#include <plat/cpu.h>
+#include <mach-omap2/clockdomain.h>
#include <video/omapdss.h>
#include "dss.h"
#define DISPC_MAX_NR_FIFOS 5
+static struct clockdomain *l3_1_clkdm, *l3_2_clkdm;
+
static struct {
struct platform_device *pdev;
void __iomem *base;
dispc_write_reg(DISPC_DIVISOR, l);
}
+ if (cpu_is_omap44xx()) {
+ l3_1_clkdm = clkdm_lookup("l3_1_clkdm");
+ l3_2_clkdm = clkdm_lookup("l3_2_clkdm");
+ } else if (cpu_is_omap54xx() && (omap_rev() <= OMAP5430_REV_ES1_0)) {
+ l3_1_clkdm = clkdm_lookup("l3main1_clkdm");
+ l3_2_clkdm = clkdm_lookup("l3main2_clkdm");
+ }
+
/* FUNCGATED */
if (dss_has_feature(FEAT_FUNCGATED))
REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
{
dispc_save_context();
+ /*
+ * OMAP4,5 ERRATUM i740: Mstandby and disconnect protocol issue
+ * Workaround:
+ * Restore L3_1 CD to HW_AUTO, when DSS module idles.
+ */
+ if (cpu_is_omap44xx() || (cpu_is_omap54xx() &&
+ (omap_rev() <= OMAP5430_REV_ES1_0))) {
+ clkdm_allow_idle(l3_1_clkdm);
+ clkdm_allow_idle(l3_2_clkdm);
+ }
+
return 0;
}
static int dispc_runtime_resume(struct device *dev)
{
+ /*
+ * OMAP4,5 ERRATUM i740: Mstandby and disconnect protocol issue
+ * Impacts: all OMAP4 and OMAP5_ES1 devices
+ * Simplfied Description:
+ * issue #1: The handshake between IP modules on L3_1
+ * peripherals with PRCM has a limitation in a certain time
+ * window of L4 clock cycle. Due to the fact that a wrong
+ * variant of stall signal was used in circuit of PRCM, the
+ * intitator-interconnect protocol is broken when the time
+ * window is hit where the PRCM requires the interconnect to go
+ * to idle while intitator asks to wakeup.
+ * Issue #2: DISPC asserts a sub-mstandby signal for a short
+ * period. In this time interval, IP block requests
+ * disconnection of Master port, and results in Mstandby and
+ * wait request to PRCM. In parallel, if mstandby is de-asserted
+ * by DISPC simultaneously, interconnect requests for a
+ * reconnect for one cycle alone resulting in a disconnect
+ * protocol violation and a deadlock of the system.
+ *
+ * Workaround:
+ * L3_1 clock domain must not be programmed in HW_AUTO if
+ * Static dependency with DSS is enabled and DSS clock domain
+ * is ON.
+ */
+ if (cpu_is_omap44xx() || (cpu_is_omap54xx() &&
+ (omap_rev() <= OMAP5430_REV_ES1_0))) {
+ clkdm_deny_idle(l3_1_clkdm);
+ clkdm_deny_idle(l3_2_clkdm);
+ }
+
dispc_restore_context();
return 0;
index 6212bd6877b36055dbaa796b7985a31b3e59588d..c5f1f19549252f6b07c91a55caedf401496b0b43 100644 (file)
void dss_put_device(struct omap_dss_device *dssdev);
void dss_copy_device_pdata(struct omap_dss_device *dst,
const struct omap_dss_device *src);
+int dss_mgr_blank(struct omap_overlay_manager *mgr,
+ bool wait_for_go);
/* output */
void dss_register_output(struct omap_dss_output *out);
/* display */
int dss_suspend_all_devices(void);
int dss_resume_all_devices(void);
+
void dss_disable_all_devices(void);
int display_init_sysfs(struct platform_device *pdev,
#if defined(CONFIG_OMAP4_DSS_HDMI) || defined(CONFIG_OMAP5_DSS_HDMI)
int hdmi_init_platform_driver(void) __init;
void hdmi_uninit_platform_driver(void) __exit;
+
unsigned long hdmi_get_pixel_clock(void);
#else
static inline unsigned long hdmi_get_pixel_clock(void)
index 2551eaa14c42537fb8ed38464b68eb42b0259b85..ccb90c9030a20624c8119d42366302a170f03c62 100644 (file)
dss_feat_get_supported_displays(mgr->id);
mgr->supported_outputs =
dss_feat_get_supported_outputs(mgr->id);
+ mgr->blank = &dss_mgr_blank;
INIT_LIST_HEAD(&mgr->overlays);
continue;
list_for_each_entry(ovl2, &mgr->overlays, list) {
- if (ovl1 == ovl2)
+ if ((ovl1 == ovl2) || !(ovl1->enabled && ovl2->enabled))
continue;
info2 = overlay_infos[ovl2->id];
index 7ca219a63c62ac2436cbc28c2cccd4bcb331b046..6819d0a21ee08ca05e279bc4b973e57566bde9cd 100644 (file)
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
#include <linux/kobject.h>
#include <linux/device.h>
#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <sound/asound.h>
#define DISPC_IRQ_FRAMEDONE (1 << 0)
#define DISPC_IRQ_VSYNC (1 << 1)
enum omap_dss_display_state {
OMAP_DSS_DISPLAY_DISABLED = 0,
OMAP_DSS_DISPLAY_ACTIVE,
+ OMAP_DSS_DISPLAY_SUSPENDED,
};
enum omap_dss_audio_state {
extern const struct omap_video_timings omap_dss_ntsc_timings;
#endif
+enum omapdss_completion_status {
+ DSS_COMPLETION_PROGRAMMED = (1 << 1),
+ DSS_COMPLETION_DISPLAYED = (1 << 2),
+
+ DSS_COMPLETION_CHANGED_SET = (1 << 3),
+ DSS_COMPLETION_CHANGED_CACHE = (1 << 4),
+ DSS_COMPLETION_CHANGED = (3 << 3),
+
+ DSS_COMPLETION_RELEASED = (15 << 5),
+ DSS_COMPLETION_ECLIPSED_SET = (1 << 5),
+ DSS_COMPLETION_ECLIPSED_CACHE = (1 << 6),
+ DSS_COMPLETION_ECLIPSED_SHADOW = (1 << 7),
+ DSS_COMPLETION_TORN = (1 << 8),
+};
+
+struct omapdss_ovl_cb {
+ /* optional callback method */
+ u32 (*fn)(void *data, int id, int status);
+ void *data;
+ u32 mask;
+};
+
+struct omap_dss_cconv_coefs {
+ s16 ry, rcr, rcb;
+ s16 gy, gcr, gcb;
+ s16 by, bcr, bcb;
+ u16 full_range;
+} __aligned(4);
+
+/* Writeback data structures */
+enum omap_writeback_source {
+ OMAP_WB_LCD1 = 0,
+ OMAP_WB_TV = 1,
+ OMAP_WB_LCD2 = 2,
+ OMAP_WB_GFX = 3,
+ OMAP_WB_VID1 = 4,
+ OMAP_WB_VID2 = 5,
+ OMAP_WB_VID3 = 6
+};
+
+enum omap_writeback_capturemode {
+ OMAP_WB_CAPTURE_ALL = 0x0,
+ OMAP_WB_CAPTURE_1 = 0x1,
+ OMAP_WB_CAPTURE_1_OF_2 = 0x2,
+ OMAP_WB_CAPTURE_1_OF_3 = 0x3,
+ OMAP_WB_CAPTURE_1_OF_4 = 0x4,
+ OMAP_WB_CAPTURE_1_OF_5 = 0x5,
+ OMAP_WB_CAPTURE_1_OF_6 = 0x6,
+ OMAP_WB_CAPTURE_1_OF_7 = 0x7
+};
+
+enum omap_writeback_mode {
+ OMAP_WB_CAPTURE_MODE = 0x0,
+ OMAP_WB_MEM2MEM_MODE = 0x1,
+};
+
+struct omap_writeback_info {
+ bool enabled;
+ bool info_dirty;
+ enum omap_writeback_source source;
+ u16 width;
+ u16 height;
+ u16 out_width;
+ u16 out_height;
+ enum omap_color_mode dss_mode;
+ enum omap_writeback_capturemode capturemode;
+ /* capture or mem2mem mode */
+ enum omap_writeback_mode mode;
+ u32 paddr;
+ /* NV12 support*/
+ u32 p_uv_addr;
+ u8 rotation;
+ enum omap_dss_rotation_type rotation_type;
+ bool force_1d;
+};
+
+struct omap_writeback {
+ struct kobject kobj;
+ struct list_head list;
+ bool info_dirty;
+ int width;
+ int height;
+ /* mutex to control access to wb data */
+ struct mutex lock;
+ struct omap_writeback_info info;
+ struct completion wb_completion;
+
+ bool (*check_wb)(struct omap_writeback *wb);
+ int (*set_wb_info)(struct omap_writeback *wb,
+ struct omap_writeback_info *info);
+ void (*get_wb_info)(struct omap_writeback *wb,
+ struct omap_writeback_info *info);
+ int (*register_framedone)(struct omap_writeback *wb);
+ int (*wait_framedone)(struct omap_writeback *wb);
+};
+
struct omap_dss_cpr_coefs {
s16 rr, rg, rb;
s16 gr, gg, gb;
u8 global_alpha;
u8 pre_mult_alpha;
u8 zorder;
+ u16 min_x_decim, max_x_decim, min_y_decim, max_y_decim;
+
+ struct omapdss_ovl_cb cb;
+ struct omap_dss_cconv_coefs cconv;
+ bool force_1d;
+ bool mflag_en;
};
struct omap_overlay {
enum omap_plane id;
enum omap_color_mode supported_modes;
enum omap_overlay_caps caps;
+ bool enabled;
/* dynamic fields */
struct omap_overlay_manager *manager;
bool cpr_enable;
struct omap_dss_cpr_coefs cpr_coefs;
+
+ struct omapdss_ovl_cb cb;
};
struct omap_overlay_manager {
/* dynamic fields */
struct omap_dss_output *output;
+ /* Overlays associated with the manager */
+ struct omap_overlay *ovls[5];
+
+ /* No of overlays for the manager that requires update */
+ u16 num_ovls;
+
/*
* The following functions do not block:
*
int (*apply)(struct omap_overlay_manager *mgr);
int (*wait_for_go)(struct omap_overlay_manager *mgr);
int (*wait_for_vsync)(struct omap_overlay_manager *mgr);
+ int (*blank)(struct omap_overlay_manager *mgr, bool wait_for_vsync);
+ void (*dump_cb)(struct omap_overlay_manager *mgr, struct seq_file *s);
+ int (*set_ovl)(struct omap_overlay_manager *mgr);
+ int (*wb_apply)(struct omap_overlay_manager *mgr,
+ struct omap_writeback *wb);
struct omap_dss_device *(*get_device)(struct omap_overlay_manager *mgr);
};
enum omap_channel channel;
+ bool first_vsync;
+
union {
struct {
u8 data_lines;
struct omap_dss_dsi_videomode_timings dsi_vm_timings;
struct s3d_disp_info s3d_info;
+ u32 width_in_um;
+ u32 height_in_um;
+
} panel;
struct {
enum omap_dss_display_state state;
+ struct blocking_notifier_head state_notifiers;
+
enum omap_dss_audio_state audio_state;
/* platform specific */
int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
bool (*detect)(struct omap_dss_device *dssdev);
+
+ /* for wrapping around state changes */
+ void (*disable_orig)(struct omap_dss_device *display);
+ int (*enable_orig)(struct omap_dss_device *display);
+
/*
* For display drivers that support audio. This encompasses
* HDMI and DisplayPort at the moment.
void omapdss_rfbi_set_interface_timings(struct omap_dss_device *dssdev,
struct rfbi_timings *timings);
+int dispc_scaling_decision(enum omap_plane plane, struct omap_overlay_info *oi,
+ enum omap_channel channel,
+ u16 *x_decim, u16 *y_decim, bool *three_tap);
+int omap_dss_manager_unregister_callback(struct omap_overlay_manager *mgr,
+ struct omapdss_ovl_cb *cb);
+
+/* generic callback handling */
+static inline void dss_ovl_cb(struct omapdss_ovl_cb *cb, int id, int status)
+{
+ if (cb->fn && (cb->mask & status))
+ cb->mask &= cb->fn(cb->data, id, status);
+ if (status & DSS_COMPLETION_RELEASED)
+ cb->mask = 0;
+ if (!cb->mask)
+ cb->fn = NULL;
+}
+
int omapdss_compat_init(void);
void omapdss_compat_uninit(void);
index 55e2bf652beff53d6694e3cf20d2d2e4d6f17393..7ba62e99087ef79c16372d382ae99ebdc8cbad41 100644 (file)
#define DISMOD (val)(val<<2)
#define TXSTATE BIT(4)
#define RXSTATE BIT(5)
+#define SRMOD_MASK 3
+#define SRMOD_INACTIVE 0
/*
* DAVINCI_MCASP_LBCTL_REG - Loop Back Control Register Bits
mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
- mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, ACLKX | AFSX);
+ mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG,
+ ACLKX | ACLKR);
+ mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG,
+ AFSX | AFSR);
break;
case SND_SOC_DAIFMT_CBM_CFS:
/* codec is clock master and frame slave */
mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
mcasp_clr_bits(base + DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
- mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
+ mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
break;
int word_length)
{
u32 fmt;
- u32 rotate = (32 - word_length) / 4;
+ u32 tx_rotate = (word_length / 4) & 0x7;
+ u32 rx_rotate = (32 - word_length) / 4;
u32 mask = (1ULL << word_length) - 1;
/*
* callback, take it into account here. That allows us to for example
* send 32 bits per channel to the codec, while only 16 of them carry
* audio payload.
- * The clock ratio is given for a full period of data (both left and
- * right channels), so it has to be divided by 2.
+ * The clock ratio is given for a full period of data (for I2S format
+ * both left and right channels), so it has to be divided by number of
+ * tdm-slots (for I2S - divided by 2).
*/
if (dev->bclk_lrclk_ratio)
- word_length = dev->bclk_lrclk_ratio / 2;
+ word_length = dev->bclk_lrclk_ratio / dev->tdm_slots;
/* mapping of the XSSZ bit-field as described in the datasheet */
fmt = (word_length >> 1) - 1;
- mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMT_REG,
- RXSSZ(fmt), RXSSZ(0x0F));
- mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG,
- TXSSZ(fmt), TXSSZ(0x0F));
- mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, TXROT(rotate),
- TXROT(7));
- mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMT_REG, RXROT(rotate),
- RXROT(7));
+ if (dev->op_mode != DAVINCI_MCASP_DIT_MODE) {
+ mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMT_REG,
+ RXSSZ(fmt), RXSSZ(0x0F));
+ mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG,
+ TXSSZ(fmt), TXSSZ(0x0F));
+ mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG,
+ TXROT(tx_rotate), TXROT(7));
+ mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMT_REG,
+ RXROT(rx_rotate), RXROT(7));
+ mcasp_set_reg(dev->base + DAVINCI_MCASP_RXMASK_REG,
+ mask);
+ }
+
mcasp_set_reg(dev->base + DAVINCI_MCASP_TXMASK_REG, mask);
- mcasp_set_reg(dev->base + DAVINCI_MCASP_RXMASK_REG, mask);
return 0;
}
-static void davinci_hw_common_param(struct davinci_audio_dev *dev, int stream)
+static int davinci_hw_common_param(struct davinci_audio_dev *dev, int stream,
+ int channels)
{
int i;
u8 tx_ser = 0;
u8 rx_ser = 0;
-
+ u8 ser;
+ u8 slots = dev->tdm_slots;
+ u8 max_active_serializers = (channels + slots - 1) / slots;
/* Default configuration */
mcasp_set_bits(dev->base + DAVINCI_MCASP_PWREMUMGT_REG, MCASP_SOFT);
@@ -682,17 +696,33 @@ static void davinci_hw_common_param(struct davinci_audio_dev *dev, int stream)
for (i = 0; i < dev->num_serializer; i++) {
mcasp_set_bits(dev->base + DAVINCI_MCASP_XRSRCTL_REG(i),
dev->serial_dir[i]);
- if (dev->serial_dir[i] == TX_MODE) {
+ if (dev->serial_dir[i] == TX_MODE &&
+ tx_ser < max_active_serializers) {
mcasp_set_bits(dev->base + DAVINCI_MCASP_PDIR_REG,
AXR(i));
tx_ser++;
- } else if (dev->serial_dir[i] == RX_MODE) {
+ } else if (dev->serial_dir[i] == RX_MODE &&
+ rx_ser < max_active_serializers) {
mcasp_clr_bits(dev->base + DAVINCI_MCASP_PDIR_REG,
AXR(i));
rx_ser++;
+ } else {
+ mcasp_mod_bits(dev->base + DAVINCI_MCASP_XRSRCTL_REG(i),
+ SRMOD_INACTIVE, SRMOD_MASK);
}
}
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ ser = tx_ser;
+ else
+ ser = rx_ser;
+
+ if (ser < max_active_serializers) {
+ dev_warn(dev->dev, "stream has more channels (%d) than are "
+ "enabled in mcasp (%d)\n", channels, ser * slots);
+ return -EINVAL;
+ }
+
if (dev->txnumevt && stream == SNDRV_PCM_STREAM_PLAYBACK) {
if (dev->txnumevt * tx_ser > 64)
dev->txnumevt = 1;
((dev->rxnumevt * rx_ser) << 8), NUMEVT_MASK);
}
}
+
+ return 0;
}
static void davinci_hw_param(struct davinci_audio_dev *dev, int stream)
/* S/PDIF */
static void davinci_hw_dit_param(struct davinci_audio_dev *dev)
{
- /* Set the PDIR for Serialiser as output */
- mcasp_set_bits(dev->base + DAVINCI_MCASP_PDIR_REG, AFSX);
-
- /* TXMASK for 24 bits */
- mcasp_set_reg(dev->base + DAVINCI_MCASP_TXMASK_REG, 0x00FFFFFF);
-
/* Set the TX format : 24 bit right rotation, 32 bit slot, Pad 0
and LSB first */
mcasp_set_bits(dev->base + DAVINCI_MCASP_TXFMT_REG,
&dev->dma_params[substream->stream];
int word_length;
u8 fifo_level;
+ u8 slots = dev->tdm_slots;
+ u8 active_serializers;
+ int channels;
+ struct snd_interval *pcm_channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ channels = pcm_channels->min;
+
+ active_serializers = (channels + slots - 1) / slots;
- davinci_hw_common_param(dev, substream->stream);
+ if (davinci_hw_common_param(dev, substream->stream, channels) == -EINVAL)
+ return -EINVAL;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- fifo_level = dev->txnumevt;
+ fifo_level = dev->txnumevt * active_serializers;
else
- fifo_level = dev->rxnumevt;
+ fifo_level = dev->rxnumevt * active_serializers;
if (dev->op_mode == DAVINCI_MCASP_DIT_MODE)
davinci_hw_dit_param(dev);
.name = "davinci-mcasp.0",
.playback = {
.channels_min = 2,
- .channels_max = 2,
+ .channels_max = 32 * 16,
.rates = DAVINCI_MCASP_RATES,
.formats = DAVINCI_MCASP_PCM_FMTS,
},
.capture = {
.channels_min = 2,
- .channels_max = 2,
+ .channels_max = 32 * 16,
.rates = DAVINCI_MCASP_RATES,
.formats = DAVINCI_MCASP_PCM_FMTS,
},
pdata->op_mode = val;
ret = of_property_read_u32(np, "tdm-slots", &val);
- if (ret >= 0)
+ if (ret >= 0) {
+ if (val < 2 || val > 32) {
+ dev_err(&pdev->dev,
+ "tdm-slots must be in rage [2-32]\n");
+ ret = -EINVAL;
+ goto nodata;
+ }
+
pdata->tdm_slots = val;
+ }
ret = of_property_read_u32(np, "num-serializer", &val);
if (ret >= 0)
index 0edd3b5a37fd86a5c04a54b66b8735d900e33a06..a9ac0c11da71dd784ed26286e46d064f0cca66c9 100644 (file)
u8 num_serializer;
u8 *serial_dir;
u8 version;
- u8 bclk_lrclk_ratio;
+ u16 bclk_lrclk_ratio;
/* McASP FIFO related */
u8 txnumevt;
index 9bdd71b881e20092d3cfa7247d29d11273b4e285..8460edce1c3b6b5cc90dff481e5588c75e2285ad 100644 (file)
src = dma_pos;
dst = prtd->params->dma_addr;
src_bidx = data_type;
- dst_bidx = 0;
+ dst_bidx = 4;
src_cidx = data_type * fifo_level;
dst_cidx = 0;
} else {
edma_set_transfer_params(prtd->asp_link[0], acnt, count, 1, 0,
ASYNC);
else
- edma_set_transfer_params(prtd->asp_link[0], acnt, fifo_level,
- count, fifo_level,
- ABSYNC);
+ edma_set_transfer_params(prtd->asp_link[0], acnt,
+ fifo_level,
+ count, fifo_level,
+ ABSYNC);
}
static void davinci_pcm_dma_irq(unsigned link, u16 ch_status, void *data)