mirror of
https://github.com/AsahiLinux/u-boot
synced 2024-12-24 20:13:39 +00:00
f0debb2136
This board has not been converted to CONFIG_DM by the deadline of v2020.01 and is missing other conversions which depend on this as well. Remove it. As this is the last SH4A board, remove that support as well. Cc: Marek Vasut <marek.vasut+renesas@gmail.com> Signed-off-by: Tom Rini <trini@konsulko.com>
262 lines
6.6 KiB
C
262 lines
6.6 KiB
C
#ifndef __ASM_SH_SYSTEM_H
|
|
#define __ASM_SH_SYSTEM_H
|
|
|
|
/*
|
|
* Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
|
|
* Copyright (C) 2002 Paul Mundt
|
|
*
|
|
* from linux kernel code.
|
|
*/
|
|
|
|
#include <asm/irqflags.h>
|
|
#include <asm/types.h>
|
|
|
|
/*
|
|
* switch_to() should switch tasks to task nr n, first
|
|
*/
|
|
|
|
#define switch_to(prev, next, last) do { \
|
|
struct task_struct *__last; \
|
|
register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \
|
|
register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \
|
|
register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \
|
|
register unsigned long *__ts5 __asm__ ("r5") = (unsigned long *)next; \
|
|
register unsigned long *__ts6 __asm__ ("r6") = &next->thread.sp; \
|
|
register unsigned long __ts7 __asm__ ("r7") = next->thread.pc; \
|
|
__asm__ __volatile__ (".balign 4\n\t" \
|
|
"stc.l gbr, @-r15\n\t" \
|
|
"sts.l pr, @-r15\n\t" \
|
|
"mov.l r8, @-r15\n\t" \
|
|
"mov.l r9, @-r15\n\t" \
|
|
"mov.l r10, @-r15\n\t" \
|
|
"mov.l r11, @-r15\n\t" \
|
|
"mov.l r12, @-r15\n\t" \
|
|
"mov.l r13, @-r15\n\t" \
|
|
"mov.l r14, @-r15\n\t" \
|
|
"mov.l r15, @r1 ! save SP\n\t" \
|
|
"mov.l @r6, r15 ! change to new stack\n\t" \
|
|
"mova 1f, %0\n\t" \
|
|
"mov.l %0, @r2 ! save PC\n\t" \
|
|
"mov.l 2f, %0\n\t" \
|
|
"jmp @%0 ! call __switch_to\n\t" \
|
|
" lds r7, pr ! with return to new PC\n\t" \
|
|
".balign 4\n" \
|
|
"2:\n\t" \
|
|
".long __switch_to\n" \
|
|
"1:\n\t" \
|
|
"mov.l @r15+, r14\n\t" \
|
|
"mov.l @r15+, r13\n\t" \
|
|
"mov.l @r15+, r12\n\t" \
|
|
"mov.l @r15+, r11\n\t" \
|
|
"mov.l @r15+, r10\n\t" \
|
|
"mov.l @r15+, r9\n\t" \
|
|
"mov.l @r15+, r8\n\t" \
|
|
"lds.l @r15+, pr\n\t" \
|
|
"ldc.l @r15+, gbr\n\t" \
|
|
: "=z" (__last) \
|
|
: "r" (__ts1), "r" (__ts2), "r" (__ts4), \
|
|
"r" (__ts5), "r" (__ts6), "r" (__ts7) \
|
|
: "r3", "t"); \
|
|
last = __last; \
|
|
} while (0)
|
|
|
|
/*
|
|
* On SMP systems, when the scheduler does migration-cost autodetection,
|
|
* it needs a way to flush as much of the CPU's caches as possible.
|
|
*
|
|
* TODO: fill this in!
|
|
*/
|
|
static inline void sched_cacheflush(void)
|
|
{
|
|
}
|
|
|
|
static inline unsigned long tas(volatile int *m)
|
|
{
|
|
unsigned long retval;
|
|
|
|
__asm__ __volatile__ ("tas.b @%1\n\t"
|
|
"movt %0"
|
|
: "=r" (retval): "r" (m): "t", "memory");
|
|
return retval;
|
|
}
|
|
|
|
/*
|
|
* A brief note on ctrl_barrier(), the control register write barrier.
|
|
*
|
|
* Legacy SH cores typically require a sequence of 8 nops after
|
|
* modification of a control register in order for the changes to take
|
|
* effect. On newer cores (like the sh4a and sh5) this is accomplished
|
|
* with icbi.
|
|
*
|
|
* Historically we have only done this type of barrier for the MMUCR, but
|
|
* it's also necessary for the CCR, so we make it generic here instead.
|
|
*/
|
|
#define mb() __asm__ __volatile__ ("": : :"memory")
|
|
#define rmb() mb()
|
|
#define wmb() __asm__ __volatile__ ("": : :"memory")
|
|
#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
|
|
#define read_barrier_depends() do { } while(0)
|
|
|
|
#ifdef CONFIG_SMP
|
|
#define smp_mb() mb()
|
|
#define smp_rmb() rmb()
|
|
#define smp_wmb() wmb()
|
|
#define smp_read_barrier_depends() read_barrier_depends()
|
|
#else
|
|
#define smp_mb() barrier()
|
|
#define smp_rmb() barrier()
|
|
#define smp_wmb() barrier()
|
|
#define smp_read_barrier_depends() do { } while(0)
|
|
#endif
|
|
|
|
#define set_mb(var, value) do { xchg(&var, value); } while (0)
|
|
|
|
/*
|
|
* Jump to P2 area.
|
|
* When handling TLB or caches, we need to do it from P2 area.
|
|
*/
|
|
#define jump_to_P2() \
|
|
do { \
|
|
unsigned long __dummy; \
|
|
__asm__ __volatile__( \
|
|
"mov.l 1f, %0\n\t" \
|
|
"or %1, %0\n\t" \
|
|
"jmp @%0\n\t" \
|
|
" nop\n\t" \
|
|
".balign 4\n" \
|
|
"1: .long 2f\n" \
|
|
"2:" \
|
|
: "=&r" (__dummy) \
|
|
: "r" (0x20000000)); \
|
|
} while (0)
|
|
|
|
/*
|
|
* Back to P1 area.
|
|
*/
|
|
#define back_to_P1() \
|
|
do { \
|
|
unsigned long __dummy; \
|
|
ctrl_barrier(); \
|
|
__asm__ __volatile__( \
|
|
"mov.l 1f, %0\n\t" \
|
|
"jmp @%0\n\t" \
|
|
" nop\n\t" \
|
|
".balign 4\n" \
|
|
"1: .long 2f\n" \
|
|
"2:" \
|
|
: "=&r" (__dummy)); \
|
|
} while (0)
|
|
|
|
static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
|
|
{
|
|
unsigned long flags, retval;
|
|
|
|
local_irq_save(flags);
|
|
retval = *m;
|
|
*m = val;
|
|
local_irq_restore(flags);
|
|
return retval;
|
|
}
|
|
|
|
static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
|
|
{
|
|
unsigned long flags, retval;
|
|
|
|
local_irq_save(flags);
|
|
retval = *m;
|
|
*m = val & 0xff;
|
|
local_irq_restore(flags);
|
|
return retval;
|
|
}
|
|
|
|
extern void __xchg_called_with_bad_pointer(void);
|
|
|
|
#define __xchg(ptr, x, size) \
|
|
({ \
|
|
unsigned long __xchg__res; \
|
|
volatile void *__xchg_ptr = (ptr); \
|
|
switch (size) { \
|
|
case 4: \
|
|
__xchg__res = xchg_u32(__xchg_ptr, x); \
|
|
break; \
|
|
case 1: \
|
|
__xchg__res = xchg_u8(__xchg_ptr, x); \
|
|
break; \
|
|
default: \
|
|
__xchg_called_with_bad_pointer(); \
|
|
__xchg__res = x; \
|
|
break; \
|
|
} \
|
|
\
|
|
__xchg__res; \
|
|
})
|
|
|
|
#define xchg(ptr,x) \
|
|
((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr))))
|
|
|
|
static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
|
|
unsigned long new)
|
|
{
|
|
__u32 retval;
|
|
unsigned long flags;
|
|
|
|
local_irq_save(flags);
|
|
retval = *m;
|
|
if (retval == old)
|
|
*m = new;
|
|
local_irq_restore(flags); /* implies memory barrier */
|
|
return retval;
|
|
}
|
|
|
|
/* This function doesn't exist, so you'll get a linker error
|
|
* if something tries to do an invalid cmpxchg(). */
|
|
extern void __cmpxchg_called_with_bad_pointer(void);
|
|
|
|
#define __HAVE_ARCH_CMPXCHG 1
|
|
|
|
static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
|
|
unsigned long new, int size)
|
|
{
|
|
switch (size) {
|
|
case 4:
|
|
return __cmpxchg_u32(ptr, old, new);
|
|
}
|
|
__cmpxchg_called_with_bad_pointer();
|
|
return old;
|
|
}
|
|
|
|
#define cmpxchg(ptr,o,n) \
|
|
({ \
|
|
__typeof__(*(ptr)) _o_ = (o); \
|
|
__typeof__(*(ptr)) _n_ = (n); \
|
|
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
|
|
(unsigned long)_n_, sizeof(*(ptr))); \
|
|
})
|
|
|
|
extern void *set_exception_table_vec(unsigned int vec, void *handler);
|
|
|
|
static inline void *set_exception_table_evt(unsigned int evt, void *handler)
|
|
{
|
|
return set_exception_table_vec(evt >> 5, handler);
|
|
}
|
|
|
|
/* XXX
|
|
* disable hlt during certain critical i/o operations
|
|
*/
|
|
#define HAVE_DISABLE_HLT
|
|
void disable_hlt(void);
|
|
void enable_hlt(void);
|
|
|
|
#define arch_align_stack(x) (x)
|
|
|
|
static inline void trigger_address_error(void)
|
|
{
|
|
set_bl_bit();
|
|
__asm__ __volatile__ (
|
|
"mov.l @%1, %0"
|
|
:
|
|
: "r" (0x10000000), "r" (0x80000001)
|
|
);
|
|
}
|
|
|
|
#endif
|