blob: 9f7c7e17c01ee7d8560a568dd4f2d98636c7f130 [file] [log] [blame]
David Howellsb920de12008-02-08 04:19:31 -08001/* MN10300 System definitions
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#ifndef _ASM_SYSTEM_H
12#define _ASM_SYSTEM_H
13
14#include <asm/cpu-regs.h>
15
16#ifdef __KERNEL__
17#ifndef __ASSEMBLY__
18
19#include <linux/kernel.h>
David Howellsdf9ee292010-10-07 14:08:55 +010020#include <linux/irqflags.h>
David Howellsb920de12008-02-08 04:19:31 -080021
22struct task_struct;
23struct thread_struct;
24
25extern asmlinkage
26struct task_struct *__switch_to(struct thread_struct *prev,
27 struct thread_struct *next,
28 struct task_struct *prev_task);
29
30/* context switching is now performed out-of-line in switch_to.S */
31#define switch_to(prev, next, last) \
32do { \
33 current->thread.wchan = (u_long) __builtin_return_address(0); \
34 (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \
35 mb(); \
36 current->thread.wchan = 0; \
37} while (0)
38
39#define arch_align_stack(x) (x)
40
41#define nop() asm volatile ("nop")
42
43#endif /* !__ASSEMBLY__ */
44
45/*
46 * Force strict CPU ordering.
47 * And yes, this is required on UP too when we're talking
48 * to devices.
49 *
50 * For now, "wmb()" doesn't actually do anything, as all
51 * Intel CPU's follow what Intel calls a *Processor Order*,
52 * in which all writes are seen in the program order even
53 * outside the CPU.
54 *
55 * I expect future Intel CPU's to have a weaker ordering,
56 * but I'd also expect them to finally get their act together
57 * and add some real memory barriers if so.
58 *
59 * Some non intel clones support out of order store. wmb() ceases to be a
60 * nop for these.
61 */
62
63#define mb() asm volatile ("": : :"memory")
64#define rmb() mb()
65#define wmb() asm volatile ("": : :"memory")
66
67#ifdef CONFIG_SMP
68#define smp_mb() mb()
69#define smp_rmb() rmb()
70#define smp_wmb() wmb()
71#else
72#define smp_mb() barrier()
73#define smp_rmb() barrier()
74#define smp_wmb() barrier()
75#endif
76
77#define set_mb(var, value) do { var = value; mb(); } while (0)
78#define set_wmb(var, value) do { var = value; wmb(); } while (0)
79
80#define read_barrier_depends() do {} while (0)
81#define smp_read_barrier_depends() do {} while (0)
82
83/*****************************************************************************/
84/*
David Howellsb920de12008-02-08 04:19:31 -080085 * MN10300 doesn't actually have an exchange instruction
86 */
87#ifndef __ASSEMBLY__
88
89struct __xchg_dummy { unsigned long a[100]; };
90#define __xg(x) ((struct __xchg_dummy *)(x))
91
92static inline
93unsigned long __xchg(volatile unsigned long *m, unsigned long val)
94{
95 unsigned long retval;
96 unsigned long flags;
97
98 local_irq_save(flags);
99 retval = *m;
100 *m = val;
101 local_irq_restore(flags);
102 return retval;
103}
104
105#define xchg(ptr, v) \
106 ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \
107 (unsigned long)(v)))
108
109static inline unsigned long __cmpxchg(volatile unsigned long *m,
110 unsigned long old, unsigned long new)
111{
112 unsigned long retval;
113 unsigned long flags;
114
115 local_irq_save(flags);
116 retval = *m;
117 if (retval == old)
118 *m = new;
119 local_irq_restore(flags);
120 return retval;
121}
122
123#define cmpxchg(ptr, o, n) \
124 ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
125 (unsigned long)(o), \
126 (unsigned long)(n)))
127
128#endif /* !__ASSEMBLY__ */
129
130#endif /* __KERNEL__ */
131#endif /* _ASM_SYSTEM_H */