blob: c1dfa9c10e36ccb1992991b5363b0ef31473e643 [file] [log] [blame]
/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_IO_H
#define __ASM_CSKY_IO_H
#include <abi/pgtable-bits.h>
#include <linux/types.h>
#include <linux/version.h>
extern void __iomem *ioremap(phys_addr_t offset, size_t size);
extern void iounmap(void *addr);
extern int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
size_t size, unsigned long flags);
/*
* I/O memory access primitives. Reads are ordered relative to any
* following Normal memory access. Writes are ordered relative to any prior
* Normal memory access.
*
* For CACHEV1 (807, 810), store instruction could fast retire, so we need
* another mb() to prevent st fast retire.
*
* For CACHEV2 (860), store instruction with PAGE_ATTR_NO_BUFFERABLE won't
* fast retire.
*/
#define readb(c) ({ u8 __v = readb_relaxed(c); rmb(); __v; })
#define readw(c) ({ u16 __v = readw_relaxed(c); rmb(); __v; })
#define readl(c) ({ u32 __v = readl_relaxed(c); rmb(); __v; })
#ifdef CONFIG_CPU_HAS_CACHEV2
#define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); })
#define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); })
#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); })
#else
#define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); mb(); })
#define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); mb(); })
#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); mb(); })
#endif
#define ioremap_nocache(phy, sz) ioremap(phy, sz)
#define ioremap_wc ioremap_nocache
#define ioremap_wt ioremap_nocache
#include <asm-generic/io.h>
#endif /* __ASM_CSKY_IO_H */