summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: e7f415e)
raw | patch | inline | side by side (parent: e7f415e)
author | Praneeth Bajjuri <praneeth@ti.com> | |
Wed, 20 Mar 2013 23:13:47 +0000 (18:13 -0500) | ||
committer | Praneeth Bajjuri <praneeth@ti.com> | |
Wed, 20 Mar 2013 23:14:47 +0000 (18:14 -0500) |
This reverts commit 6cd07e44352d89c799df8bb0a88a864b15c91c47.
Already fixed in upstream 3.8 kernel by commit
commit ab4d536890853ab6675ede65db40e2c0980cb0ea
Author: Will Deacon <will.deacon@arm.com>
ARM: 7398/1: l2x0: only write to debug registers on PL310
Signed-off-by: Praneeth Bajjuri <praneeth@ti.com>
Already fixed in upstream 3.8 kernel by commit
commit ab4d536890853ab6675ede65db40e2c0980cb0ea
Author: Will Deacon <will.deacon@arm.com>
ARM: 7398/1: l2x0: only write to debug registers on PL310
Signed-off-by: Praneeth Bajjuri <praneeth@ti.com>
arch/arm/include/asm/hardware/cache-l2x0.h | patch | blob | history | |
arch/arm/mm/cache-l2x0.c | patch | blob | history |
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 0ca0f5a7c84b33774d6e80ab564103b15ebfdbf0..3b2c40b5bfa22de982fce4e79afc8ced0326dce1 100644 (file)
#define L2X0_STNDBY_MODE_EN (1 << 0)
/* Registers shifts and masks */
-#define L2X0_CACHE_ID_REV_MASK (0x3f)
#define L2X0_CACHE_ID_PART_MASK (0xf << 6)
#define L2X0_CACHE_ID_PART_L210 (1 << 6)
#define L2X0_CACHE_ID_PART_L310 (3 << 6)
#define L2X0_WAY_SIZE_SHIFT 3
-#define REV_PL310_R2P0 4
-
#ifndef __ASSEMBLY__
extern void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask);
#if defined(CONFIG_CACHE_L2X0) && defined(CONFIG_OF)
index a2f0ff7ee7183261b9405f29e3be290bcf02cd0d..c2f37390308a20b835d653dfbb0f64f28cc8ce40 100644 (file)
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
static DEFINE_RAW_SPINLOCK(l2x0_lock);
static u32 l2x0_way_mask; /* Bitmask of active ways */
static u32 l2x0_size;
-static u32 l2x0_cache_id;
-static unsigned int l2x0_sets;
-static unsigned int l2x0_ways;
static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
/* Aurora don't have the cache ID register available, so we have to
static bool of_init = false;
-static inline bool is_pl310_rev(int rev)
-{
- return (l2x0_cache_id &
- (L2X0_CACHE_ID_PART_MASK | L2X0_CACHE_ID_REV_MASK)) ==
- (L2X0_CACHE_ID_PART_L310 | rev);
-}
-
static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
{
/* wait for cache operation by line or way to complete */
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
-#ifdef CONFIG_PL310_ERRATA_727915
-static void l2x0_for_each_set_way(void __iomem *reg)
-{
- int set;
- int way;
- unsigned long flags;
-
- for (way = 0; way < l2x0_ways; way++) {
- raw_spin_lock_irqsave(&l2x0_lock, flags);
- for (set = 0; set < l2x0_sets; set++)
- writel_relaxed((way << 28) | (set << 5), reg);
- cache_sync();
- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
- }
-}
-#endif
-
static void __l2x0_flush_all(void)
{
debug_writel(0x03);
{
unsigned long flags;
-#ifdef CONFIG_PL310_ERRATA_727915
- if (is_pl310_rev(REV_PL310_R2P0)) {
- l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_INV_LINE_IDX);
- return;
- }
-#endif
-
/* clean all ways */
raw_spin_lock_irqsave(&l2x0_lock, flags);
__l2x0_flush_all();
{
unsigned long flags;
-#ifdef CONFIG_PL310_ERRATA_727915
- if (is_pl310_rev(REV_PL310_R2P0)) {
- l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_LINE_IDX);
- return;
- }
-#endif
-
/* clean all ways */
raw_spin_lock_irqsave(&l2x0_lock, flags);
- debug_writel(0x03);
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
cache_sync();
- debug_writel(0x00);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
{
u32 aux;
+ u32 cache_id;
u32 way_size = 0;
+ int ways;
int way_size_shift = L2X0_WAY_SIZE_SHIFT;
const char *type;
l2x0_base = base;
if (cache_id_part_number_from_dt)
- l2x0_cache_id = cache_id_part_number_from_dt;
+ cache_id = cache_id_part_number_from_dt;
else
- l2x0_cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
+ cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID)
+ & L2X0_CACHE_ID_PART_MASK;
aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
aux &= aux_mask;
aux |= aux_val;
/* Determine the number of ways */
- switch (l2x0_cache_id & L2X0_CACHE_ID_PART_MASK) {
+ switch (cache_id) {
case L2X0_CACHE_ID_PART_L310:
if (aux & (1 << 16))
- l2x0_ways = 16;
+ ways = 16;
else
- l2x0_ways = 8;
+ ways = 8;
type = "L310";
#ifdef CONFIG_PL310_ERRATA_753970
/* Unmapped register. */
outer_cache.set_debug = pl310_set_debug;
break;
case L2X0_CACHE_ID_PART_L210:
- l2x0_ways = (aux >> 13) & 0xf;
+ ways = (aux >> 13) & 0xf;
type = "L210";
break;
break;
default:
/* Assume unknown chips have 8 ways */
- l2x0_ways = 8;
+ ways = 8;
type = "L2x0 series";
break;
}
- l2x0_way_mask = (1 << l2x0_ways) - 1;
+ l2x0_way_mask = (1 << ways) - 1;
/*
* L2 cache Size = Way size * Number of ways
*/
way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
- way_size = SZ_1K << (way_size + way_size_shift);
+ way_size = 1 << (way_size + way_size_shift);
- l2x0_size = l2x0_ways * way_size;
- l2x0_sets = way_size / CACHE_LINE_SIZE;
+ l2x0_size = ways * way_size * SZ_1K;
/*
* Check if l2x0 controller is already enabled.
*/
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
/* Make sure that I&D is not locked down when starting */
- l2x0_unlock(l2x0_cache_id);
+ l2x0_unlock(cache_id);
/* l2x0 controller is disabled */
writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
printk(KERN_INFO "%s cache controller enabled\n", type);
printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
- l2x0_ways, l2x0_cache_id, aux, l2x0_size);
+ ways, cache_id, aux, l2x0_size);
}
#ifdef CONFIG_OF