| /* |
| * Meta cache partition manipulation. |
| * |
| * Copyright 2010 Imagination Technologies Ltd. |
| */ |
| |
| #include <linux/kernel.h> |
| #include <linux/io.h> |
| #include <linux/errno.h> |
| #include <asm/processor.h> |
| #include <asm/cachepart.h> |
| #include <asm/global_lock.h> |
| #include <asm/metag_isa.h> |
| #include <asm/metag_mem.h> |
| |
| #define SYSC_DCPART(n) (SYSC_DCPART0 + SYSC_xCPARTn_STRIDE * (n)) |
| #define SYSC_ICPART(n) (SYSC_ICPART0 + SYSC_xCPARTn_STRIDE * (n)) |
| |
| #define CACHE_ASSOCIATIVITY 4 /* 4 way set-assosiative */ |
| #define ICACHE 0 |
| #define DCACHE 1 |
| |
| /* The CORE_CONFIG2 register is not available on Meta 1 */ |
| #ifdef CONFIG_METAG_META21 |
| unsigned int get_dcache_size(void) |
| { |
| unsigned int config2 = metag_in32(METAC_CORE_CONFIG2); |
| unsigned int sz = 0x1000 << ((config2 & METAC_CORECFG2_DCSZ_BITS) |
| >> METAC_CORECFG2_DCSZ_S); |
| if (config2 & METAC_CORECFG2_DCSMALL_BIT) |
| sz >>= 6; |
| return sz; |
| } |
| |
| unsigned int get_icache_size(void) |
| { |
| unsigned int config2 = metag_in32(METAC_CORE_CONFIG2); |
| unsigned int sz = 0x1000 << ((config2 & METAC_CORE_C2ICSZ_BITS) |
| >> METAC_CORE_C2ICSZ_S); |
| if (config2 & METAC_CORECFG2_ICSMALL_BIT) |
| sz >>= 6; |
| return sz; |
| } |
| |
| unsigned int get_global_dcache_size(void) |
| { |
| unsigned int cpart = metag_in32(SYSC_DCPART(hard_processor_id())); |
| unsigned int temp = cpart & SYSC_xCPARTG_AND_BITS; |
| return (get_dcache_size() * ((temp >> SYSC_xCPARTG_AND_S) + 1)) >> 4; |
| } |
| |
| unsigned int get_global_icache_size(void) |
| { |
| unsigned int cpart = metag_in32(SYSC_ICPART(hard_processor_id())); |
| unsigned int temp = cpart & SYSC_xCPARTG_AND_BITS; |
| return (get_icache_size() * ((temp >> SYSC_xCPARTG_AND_S) + 1)) >> 4; |
| } |
| |
| static unsigned int get_thread_cache_size(unsigned int cache, int thread_id) |
| { |
| unsigned int cache_size; |
| unsigned int t_cache_part; |
| unsigned int isEnabled; |
| unsigned int offset = 0; |
| isEnabled = (cache == DCACHE ? metag_in32(MMCU_DCACHE_CTRL_ADDR) & 0x1 : |
| metag_in32(MMCU_ICACHE_CTRL_ADDR) & 0x1); |
| if (!isEnabled) |
| return 0; |
| #if PAGE_OFFSET >= LINGLOBAL_BASE |
| /* Checking for global cache */ |
| cache_size = (cache == DCACHE ? get_global_dcache_size() : |
| get_global_icache_size()); |
| offset = 8; |
| #else |
| cache_size = (cache == DCACHE ? get_dcache_size() : |
| get_icache_size()); |
| #endif |
| t_cache_part = (cache == DCACHE ? |
| (metag_in32(SYSC_DCPART(thread_id)) >> offset) & 0xF : |
| (metag_in32(SYSC_ICPART(thread_id)) >> offset) & 0xF); |
| switch (t_cache_part) { |
| case 0xF: |
| return cache_size; |
| case 0x7: |
| return cache_size / 2; |
| case 0x3: |
| return cache_size / 4; |
| case 0x1: |
| return cache_size / 8; |
| case 0: |
| return cache_size / 16; |
| } |
| return -1; |
| } |
| |
| #if PAGE_OFFSET >= LINGLOBAL_BASE |
| /* |
| * The global icache partition should be useable if big enough, but we can't |
| * trust the icache local partition to be valid. |
| */ |
| int cachepart_min_iglobal(unsigned int min_size, unsigned int *old_val) |
| { |
| if (get_global_icache_size() < min_size) { |
| pr_err("cachepart_min_iglobal: not enough icache available\n"); |
| return -ENOMEM; |
| } |
| return 0; |
| } |
| |
| void cachepart_restore_iglobal(unsigned int *old_val) |
| { |
| /* cachepart_min_iglobal() hasn't changed anything */ |
| } |
| |
| #else |
| int cachepart_min_iglobal(unsigned int min_size, unsigned int *old_val) |
| { |
| unsigned int lflags; |
| unsigned int cpart; |
| unsigned long cpart_addr = SYSC_ICPART(hard_processor_id()); |
| unsigned int ic_size = get_icache_size(); |
| unsigned int lic_size; |
| unsigned int temp; |
| int ret = 0; |
| |
| __global_lock2(lflags); |
| *old_val = cpart = metag_in32(cpart_addr); |
| |
| /* Is the condition possible by halving this thread's local cache? */ |
| lic_size = cpart & SYSC_xCPARTL_AND_BITS; |
| if (lic_size) { |
| temp = (ic_size * ((lic_size >> SYSC_xCPARTL_AND_S) + 1)) >> 5; |
| if (temp < min_size) { |
| pr_err("cachepart_min_iglobal: max global icache %u < " |
| "%u\n", temp, min_size); |
| ret = -ENOMEM; |
| goto out; |
| } |
| /* |
| * It is invalid to attempt to operate the cache with fewer than |
| * four cache lines allocated to a region (unless it is never |
| * used). |
| */ |
| if (temp < 4*ICACHE_LINE_BYTES) { |
| pr_err("cachepart_min_iglobal: available global icache " |
| "%u < 4 cache lines\n", temp); |
| ret = -ENOMEM; |
| goto out; |
| } |
| } else { |
| pr_err("cachepart_min_iglobal: no global icache available\n"); |
| ret = -ENOMEM; |
| goto out; |
| } |
| |
| cpart &= ~(SYSC_xCPARTL_AND_BITS | SYSC_xCPARTG_AND_BITS | |
| SYSC_xCPARTG_OR_BITS); |
| /* Halve local size, and write into local and global size */ |
| lic_size >>= (SYSC_xCPARTL_AND_S + 1); |
| cpart |= lic_size << SYSC_xCPARTL_AND_S; |
| cpart |= lic_size << SYSC_xCPARTG_AND_S; |
| /* Adjust global offset = local offset + local size */ |
| temp = (cpart & SYSC_xCPARTL_OR_BITS) >> SYSC_xCPARTL_OR_S; |
| temp += lic_size + 1; |
| cpart |= temp << SYSC_xCPARTG_OR_S; |
| |
| /* Disable icache */ |
| metag_out32(0, MMCU_ICACHE_CTRL_ADDR); |
| /* Flush icache */ |
| metag_out32(1, SYSC_ICACHE_FLUSH); |
| /* Re-partition icache */ |
| metag_out32(cpart, cpart_addr); |
| /* Enable icache */ |
| metag_out32(1, MMCU_ICACHE_CTRL_ADDR); |
| |
| out: |
| __global_unlock2(lflags); |
| return ret; |
| } |
| |
| void cachepart_restore_iglobal(unsigned int *old_val) |
| { |
| unsigned int flags; |
| unsigned long cpart_addr = SYSC_ICPART(hard_processor_id()); |
| |
| if (*old_val != metag_in32(cpart_addr)) { |
| __global_lock2(flags); |
| /* Disable icache */ |
| metag_out32(0, MMCU_ICACHE_CTRL_ADDR); |
| /* Flush icache */ |
| metag_out32(1, SYSC_ICACHE_FLUSH); |
| /* Re-partition icache */ |
| metag_out32(*old_val, cpart_addr); |
| /* Enable icache */ |
| metag_out32(1, MMCU_ICACHE_CTRL_ADDR); |
| __global_unlock2(flags); |
| } |
| } |
| #endif |
| |
| void check_for_cache_aliasing(int thread_id) |
| { |
| unsigned int thread_cache_size; |
| unsigned int cache_type; |
| for (cache_type = ICACHE; cache_type <= DCACHE; cache_type++) { |
| thread_cache_size = |
| get_thread_cache_size(cache_type, thread_id); |
| if (thread_cache_size < 0) |
| pr_emerg("Can't read %s cache size\n", |
| cache_type ? "DCACHE" : "ICACHE"); |
| else if (thread_cache_size == 0) |
| /* Cache is off. No need to check for aliasing */ |
| continue; |
| if (thread_cache_size / CACHE_ASSOCIATIVITY > PAGE_SIZE) { |
| pr_emerg("Potential cache aliasing detected in %s on Thread %d\n", |
| cache_type ? "DCACHE" : "ICACHE", thread_id); |
| pr_warn("Total %s size: %u bytes\n", |
| cache_type ? "DCACHE" : "ICACHE", |
| cache_type ? get_dcache_size() |
| : get_icache_size()); |
| pr_warn("Thread %s size: %d bytes\n", |
| cache_type ? "CACHE" : "ICACHE", |
| thread_cache_size); |
| pr_warn("Page Size: %lu bytes\n", PAGE_SIZE); |
| panic("Potential cache aliasing detected"); |
| } |
| } |
| } |
| |
| #else |
| |
| void check_for_cache_aliasing(int thread_id) |
| { |
| return; |
| } |
| |
| #endif |