CRIS changes for 4.1

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.12 (GNU/Linux)
 
 iEYEABECAAYFAlU70nMACgkQ31LbvUHyf1cYgwCfSmPhyLFmr0pGM/BxsVY7K1v6
 PaEAn2+7xfZV38E6hwrGMrT42ZvKyL6r
 =LHQU
 -----END PGP SIGNATURE-----

Merge tag 'cris-for-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/jesper/cris

Pull arch/cris updates from Jesper Nilsson:
 "Some much needed love for the CRIS-port.

  There's a bunch of changes this time, giving the CRISv32 port a bit of
  modern makeover with device-tree, irq domain and gpiolib support, and
  more switchover to generic frameworks.

  Some small fixes and removal of the theoretical SMP support brings up
  the rear"

* tag 'cris-for-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/jesper/cris:
  cris: fix integer overflow in ELF_ET_DYN_BASE
  CRISv32: use GENERIC_SCHED_CLOCK
  CRISv32: use MMIO clocksource
  CRISv32: use generic clockevents
  CRIS: use generic headers via Kbuild
  CRIS: use generic cmpxchg.h
  CRIS: use generic atomic.h
  CRIS: use generic atomic bitops
  CRISv10: remove redundant macros from system.h
  CRIS: remove SMP code
  CRISv32: don't enable irqs in INIT_THREAD
  CRISv32: handle multiple signals
  CRISv32: prevent bogus restarts on sigreturn
  CRISv32: don't attempt syscall restart on irq exit
  Add binding documentation for CRIS
  CRIS: add Axis 88 board device tree
  CRISv32: add device tree support
  CRISv32: add irq domains support
  CRIS: enable GPIOLIB
This commit is contained in:
Linus Torvalds 2015-04-26 13:31:05 -07:00
commit 7f9f44308c
50 changed files with 326 additions and 1146 deletions

View File

@ -0,0 +1,9 @@
Axis Communications AB
ARTPEC series SoC Device Tree Bindings
CRISv32 based SoCs are ETRAX FS and ARTPEC-3:
- compatible = "axis,crisv32";

View File

@ -0,0 +1,8 @@
Boards based on the CRIS SoCs:
Required root node properties:
- compatible = should be one or more of the following:
- "axis,dev88" - for Axis devboard 88 with ETRAX FS
Optional:

View File

@ -0,0 +1,23 @@
* CRISv32 Interrupt Controller
Interrupt controller for the CRISv32 SoCs.
Main node required properties:
- compatible : should be:
"axis,crisv32-intc"
- interrupt-controller : Identifies the node as an interrupt controller
- #interrupt-cells : Specifies the number of cells needed to encode an
interrupt source. The type shall be a <u32> and the value shall be 1.
- reg: physical base address and size of the intc registers map.
Example:
intc: interrupt-controller {
compatible = "axis,crisv32-intc";
reg = <0xb001c000 0x1000>;
interrupt-controller;
#interrupt-cells = <1>;
};

View File

@ -46,12 +46,18 @@ config CRIS
select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_IRQ_SHOW
select GENERIC_IOMAP
select GENERIC_SMP_IDLE_THREAD if ETRAX_ARCH_V32
select GENERIC_CMOS_UPDATE
select MODULES_USE_ELF_RELA
select CLONE_BACKWARDS2
select OLD_SIGSUSPEND
select OLD_SIGACTION
select ARCH_REQUIRE_GPIOLIB
select IRQ_DOMAIN if ETRAX_ARCH_V32
select OF if ETRAX_ARCH_V32
select OF_EARLY_FLATTREE if ETRAX_ARCH_V32
select CLKSRC_MMIO if ETRAX_ARCH_V32
select GENERIC_CLOCKEVENTS if ETRAX_ARCH_V32
select GENERIC_SCHED_CLOCK if ETRAX_ARCH_V32
config HZ
int
@ -61,6 +67,10 @@ config NR_CPUS
int
default "1"
config BUILTIN_DTB
string "DTB to build into the kernel image"
depends on OF
source "init/Kconfig"
source "kernel/Kconfig.freezer"

View File

@ -40,6 +40,10 @@ else
MACH :=
endif
ifneq ($(CONFIG_BUILTIN_DTB),"")
core-$(CONFIG_OF) += arch/cris/boot/dts/
endif
LD = $(CROSS_COMPILE)ld -mcrislinux
OBJCOPYFLAGS := -O binary -R .note -R .comment -S

View File

@ -9,7 +9,6 @@ obj-y := entry.o traps.o irq.o debugport.o \
process.o ptrace.o setup.o signal.o traps.o time.o \
cache.o cacheflush.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_ETRAX_KGDB) += kgdb.o kgdb_asm.o
obj-$(CONFIG_ETRAX_FAST_TIMER) += fasttimer.o
obj-$(CONFIG_MODULES) += crisksyms.o

View File

@ -99,6 +99,8 @@ ret_from_kernel_thread:
.type ret_from_intr,@function
ret_from_intr:
moveq 0, $r9 ; not a syscall
;; Check for resched if preemptive kernel, or if we're going back to
;; user-mode. This test matches the user_regs(regs) macro. Don't simply
;; test CCS since that doesn't necessarily reflect what mode we'll
@ -145,7 +147,7 @@ system_call:
;; Stack-frame similar to the irq heads, which is reversed in
;; ret_from_sys_call.
sub.d 92, $sp ; Skip EXS and EDA.
sub.d 92, $sp ; Skip EDA.
movem $r13, [$sp]
move.d $sp, $r8
addq 14*4, $r8
@ -156,8 +158,9 @@ system_call:
move $ccs, $r4
move $srp, $r5
move $erp, $r6
move.d $r9, $r7 ; Store syscall number in EXS
subq 4, $sp
movem $r6, [$r8]
movem $r7, [$r8]
ei ; Enable interrupts while processing syscalls.
move.d $r10, [$sp]
@ -277,44 +280,15 @@ _syscall_exit_work:
.type _work_pending,@function
_work_pending:
addoq +TI_flags, $r0, $acr
move.d [$acr], $r10
btstq TIF_NEED_RESCHED, $r10 ; Need resched?
bpl _work_notifysig ; No, must be signal/notify.
nop
.size _work_pending, . - _work_pending
.type _work_resched,@function
_work_resched:
move.d $r9, $r1 ; Preserve R9.
jsr schedule
nop
move.d $r1, $r9
di
addoq +TI_flags, $r0, $acr
move.d [$acr], $r1
and.d _TIF_WORK_MASK, $r1 ; Ignore sycall trace counter.
beq _Rexit
nop
btstq TIF_NEED_RESCHED, $r1
bmi _work_resched ; current->work.need_resched.
nop
.size _work_resched, . - _work_resched
.type _work_notifysig,@function
_work_notifysig:
;; Deal with pending signals and notify-resume requests.
addoq +TI_flags, $r0, $acr
move.d [$acr], $r12 ; The thread_info_flags parameter.
move.d $sp, $r11 ; The regs param.
jsr do_notify_resume
move.d $r9, $r10 ; do_notify_resume syscall/irq param.
jsr do_work_pending
move.d $r9, $r10 ; The syscall/irq param.
ba _Rexit
nop
.size _work_notifysig, . - _work_notifysig
.size _work_pending, . - _work_pending
;; We get here as a sidetrack when we've entered a syscall with the
;; trace-bit set. We need to call do_syscall_trace and then continue

View File

@ -52,11 +52,6 @@ tstart:
GIO_INIT
#ifdef CONFIG_SMP
secondary_cpu_entry: /* Entry point for secondary CPUs */
di
#endif
;; Setup and enable the MMU. Use same configuration for both the data
;; and the instruction MMU.
;;
@ -164,33 +159,6 @@ secondary_cpu_entry: /* Entry point for secondary CPUs */
nop
nop
#ifdef CONFIG_SMP
;; Read CPU ID
move 0, $srs
nop
nop
nop
move $s12, $r0
cmpq 0, $r0
beq master_cpu
nop
slave_cpu:
; Time to boot-up. Get stack location provided by master CPU.
move.d smp_init_current_idle_thread, $r1
move.d [$r1], $sp
add.d 8192, $sp
move.d ebp_start, $r0 ; Defined in linker-script.
move $r0, $ebp
jsr smp_callin
nop
master_cpu:
/* Set up entry point for secondary CPUs. The boot ROM has set up
* EBP at start of internal memory. The CPU will get there
* later when we issue an IPI to them... */
move.d MEM_INTMEM_START + IPI_INTR_VECT * 4, $r0
move.d secondary_cpu_entry, $r1
move.d $r1, [$r0]
#endif
; Check if starting from DRAM (network->RAM boot or unpacked
; compressed kernel), or directly from flash.
lapcq ., $r0

View File

@ -10,6 +10,8 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/profile.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/threads.h>
@ -56,9 +58,6 @@ struct cris_irq_allocation irq_allocations[NR_REAL_IRQS] =
static unsigned long irq_regs[NR_CPUS] =
{
regi_irq,
#ifdef CONFIG_SMP
regi_irq2,
#endif
};
#if NR_REAL_IRQS > 32
@ -431,6 +430,19 @@ crisv32_do_multiple(struct pt_regs* regs)
irq_exit();
}
static int crisv32_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw_irq_num)
{
irq_set_chip_and_handler(virq, &crisv32_irq_type, handle_simple_irq);
return 0;
}
static struct irq_domain_ops crisv32_irq_ops = {
.map = crisv32_irq_map,
.xlate = irq_domain_xlate_onecell,
};
/*
* This is called by start_kernel. It fixes the IRQ masks and setup the
* interrupt vector table to point to bad_interrupt pointers.
@ -441,6 +453,8 @@ init_IRQ(void)
int i;
int j;
reg_intr_vect_rw_mask vect_mask = {0};
struct device_node *np;
struct irq_domain *domain;
/* Clear all interrupts masks. */
for (i = 0; i < NBR_REGS; i++)
@ -449,10 +463,15 @@ init_IRQ(void)
for (i = 0; i < 256; i++)
etrax_irv->v[i] = weird_irq;
/* Point all IRQ's to bad handlers. */
np = of_find_compatible_node(NULL, NULL, "axis,crisv32-intc");
domain = irq_domain_add_legacy(np, NR_IRQS - FIRST_IRQ,
FIRST_IRQ, FIRST_IRQ,
&crisv32_irq_ops, NULL);
BUG_ON(!domain);
irq_set_default_host(domain);
of_node_put(np);
for (i = FIRST_IRQ, j = 0; j < NR_IRQS; i++, j++) {
irq_set_chip_and_handler(j, &crisv32_irq_type,
handle_simple_irq);
set_exception_vector(i, interrupt[j]);
}

View File

@ -63,11 +63,6 @@ int show_cpuinfo(struct seq_file *m, void *v)
info = &cpinfo[ARRAY_SIZE(cpinfo) - 1];
#ifdef CONFIG_SMP
if (!cpu_online(cpu))
return 0;
#endif
revision = rdvr();
for (i = 0; i < ARRAY_SIZE(cpinfo); i++) {

View File

@ -72,6 +72,9 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
/* Make that the user-mode flag is set. */
regs->ccs |= (1 << (U_CCS_BITNR + CCS_SHIFT));
/* Don't perform syscall restarting */
regs->exs = -1;
/* Restore the old USP. */
err |= __get_user(old_usp, &sc->usp);
wrusp(old_usp);
@ -425,6 +428,8 @@ do_signal(int canrestart, struct pt_regs *regs)
{
struct ksignal ksig;
canrestart = canrestart && ((int)regs->exs >= 0);
/*
* The common case should go fast, which is why this point is
* reached from kernel-mode. If that's the case, just return

View File

@ -1,358 +0,0 @@
#include <linux/types.h>
#include <asm/delay.h>
#include <irq.h>
#include <hwregs/intr_vect.h>
#include <hwregs/intr_vect_defs.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <hwregs/asm/mmu_defs_asm.h>
#include <hwregs/supp_reg.h>
#include <linux/atomic.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/timex.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#define IPI_SCHEDULE 1
#define IPI_CALL 2
#define IPI_FLUSH_TLB 4
#define IPI_BOOT 8
#define FLUSH_ALL (void*)0xffffffff
/* Vector of locks used for various atomic operations */
spinlock_t cris_atomic_locks[] = {
[0 ... LOCK_COUNT - 1] = __SPIN_LOCK_UNLOCKED(cris_atomic_locks)
};
/* CPU masks */
cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
EXPORT_SYMBOL(phys_cpu_present_map);
/* Variables used during SMP boot */
volatile int cpu_now_booting = 0;
volatile struct thread_info *smp_init_current_idle_thread;
/* Variables used during IPI */
static DEFINE_SPINLOCK(call_lock);
static DEFINE_SPINLOCK(tlbstate_lock);
struct call_data_struct {
void (*func) (void *info);
void *info;
int wait;
};
static struct call_data_struct * call_data;
static struct mm_struct* flush_mm;
static struct vm_area_struct* flush_vma;
static unsigned long flush_addr;
/* Mode registers */
static unsigned long irq_regs[NR_CPUS] = {
regi_irq,
regi_irq2
};
static irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id);
static int send_ipi(int vector, int wait, cpumask_t cpu_mask);
static struct irqaction irq_ipi = {
.handler = crisv32_ipi_interrupt,
.flags = 0,
.name = "ipi",
};
extern void cris_mmu_init(void);
extern void cris_timer_init(void);
/* SMP initialization */
void __init smp_prepare_cpus(unsigned int max_cpus)
{
int i;
/* From now on we can expect IPIs so set them up */
setup_irq(IPI_INTR_VECT, &irq_ipi);
/* Mark all possible CPUs as present */
for (i = 0; i < max_cpus; i++)
cpumask_set_cpu(i, &phys_cpu_present_map);
}
void smp_prepare_boot_cpu(void)
{
/* PGD pointer has moved after per_cpu initialization so
* update the MMU.
*/
pgd_t **pgd;
pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id());
SUPP_BANK_SEL(1);
SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
SUPP_BANK_SEL(2);
SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
set_cpu_online(0, true);
cpumask_set_cpu(0, &phys_cpu_present_map);
set_cpu_possible(0, true);
}
void __init smp_cpus_done(unsigned int max_cpus)
{
}
/* Bring one cpu online.*/
static int __init
smp_boot_one_cpu(int cpuid, struct task_struct idle)
{
unsigned timeout;
cpumask_t cpu_mask;
cpumask_clear(&cpu_mask);
task_thread_info(idle)->cpu = cpuid;
/* Information to the CPU that is about to boot */
smp_init_current_idle_thread = task_thread_info(idle);
cpu_now_booting = cpuid;
/* Kick it */
set_cpu_online(cpuid, true);
cpumask_set_cpu(cpuid, &cpu_mask);
send_ipi(IPI_BOOT, 0, cpu_mask);
set_cpu_online(cpuid, false);
/* Wait for CPU to come online */
for (timeout = 0; timeout < 10000; timeout++) {
if(cpu_online(cpuid)) {
cpu_now_booting = 0;
smp_init_current_idle_thread = NULL;
return 0; /* CPU online */
}
udelay(100);
barrier();
}
printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
return -1;
}
/* Secondary CPUs starts using C here. Here we need to setup CPU
* specific stuff such as the local timer and the MMU. */
void __init smp_callin(void)
{
int cpu = cpu_now_booting;
reg_intr_vect_rw_mask vect_mask = {0};
/* Initialise the idle task for this CPU */
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
/* Set up MMU */
cris_mmu_init();
__flush_tlb_all();
/* Setup local timer. */
cris_timer_init();
/* Enable IRQ and idle */
REG_WR(intr_vect, irq_regs[cpu], rw_mask, vect_mask);
crisv32_unmask_irq(IPI_INTR_VECT);
crisv32_unmask_irq(TIMER0_INTR_VECT);
preempt_disable();
notify_cpu_starting(cpu);
local_irq_enable();
set_cpu_online(cpu, true);
cpu_startup_entry(CPUHP_ONLINE);
}
/* Stop execution on this CPU.*/
void stop_this_cpu(void* dummy)
{
local_irq_disable();
asm volatile("halt");
}
/* Other calls */
void smp_send_stop(void)
{
smp_call_function(stop_this_cpu, NULL, 0);
}
int setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
}
/* cache_decay_ticks is used by the scheduler to decide if a process
* is "hot" on one CPU. A higher value means a higher penalty to move
* a process to another CPU. Our cache is rather small so we report
* 1 tick.
*/
unsigned long cache_decay_ticks = 1;
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
smp_boot_one_cpu(cpu, tidle);
return cpu_online(cpu) ? 0 : -ENOSYS;
}
void smp_send_reschedule(int cpu)
{
cpumask_t cpu_mask;
cpumask_clear(&cpu_mask);
cpumask_set_cpu(cpu, &cpu_mask);
send_ipi(IPI_SCHEDULE, 0, cpu_mask);
}
/* TLB flushing
*
* Flush needs to be done on the local CPU and on any other CPU that
* may have the same mapping. The mm->cpu_vm_mask is used to keep track
* of which CPUs that a specific process has been executed on.
*/
void flush_tlb_common(struct mm_struct* mm, struct vm_area_struct* vma, unsigned long addr)
{
unsigned long flags;
cpumask_t cpu_mask;
spin_lock_irqsave(&tlbstate_lock, flags);
cpu_mask = (mm == FLUSH_ALL ? cpu_all_mask : *mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
flush_mm = mm;
flush_vma = vma;
flush_addr = addr;
send_ipi(IPI_FLUSH_TLB, 1, cpu_mask);
spin_unlock_irqrestore(&tlbstate_lock, flags);
}
void flush_tlb_all(void)
{
__flush_tlb_all();
flush_tlb_common(FLUSH_ALL, FLUSH_ALL, 0);
}
void flush_tlb_mm(struct mm_struct *mm)
{
__flush_tlb_mm(mm);
flush_tlb_common(mm, FLUSH_ALL, 0);
/* No more mappings in other CPUs */
cpumask_clear(mm_cpumask(mm));
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
}
void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
__flush_tlb_page(vma, addr);
flush_tlb_common(vma->vm_mm, vma, addr);
}
/* Inter processor interrupts
*
* The IPIs are used for:
* * Force a schedule on a CPU
* * FLush TLB on other CPUs
* * Call a function on other CPUs
*/
int send_ipi(int vector, int wait, cpumask_t cpu_mask)
{
int i = 0;
reg_intr_vect_rw_ipi ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi);
int ret = 0;
/* Calculate CPUs to send to. */
cpumask_and(&cpu_mask, &cpu_mask, cpu_online_mask);
/* Send the IPI. */
for_each_cpu(i, &cpu_mask)
{
ipi.vector |= vector;
REG_WR(intr_vect, irq_regs[i], rw_ipi, ipi);
}
/* Wait for IPI to finish on other CPUS */
if (wait) {
for_each_cpu(i, &cpu_mask) {
int j;
for (j = 0 ; j < 1000; j++) {
ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi);
if (!ipi.vector)
break;
udelay(100);
}
/* Timeout? */
if (ipi.vector) {
printk("SMP call timeout from %d to %d\n", smp_processor_id(), i);
ret = -ETIMEDOUT;
dump_stack();
}
}
}
return ret;
}
/*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
int smp_call_function(void (*func)(void *info), void *info, int wait)
{
cpumask_t cpu_mask;
struct call_data_struct data;
int ret;
cpumask_setall(&cpu_mask);
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
WARN_ON(irqs_disabled());
data.func = func;
data.info = info;
data.wait = wait;
spin_lock(&call_lock);
call_data = &data;
ret = send_ipi(IPI_CALL, wait, cpu_mask);
spin_unlock(&call_lock);
return ret;
}
irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id)
{
void (*func) (void *info) = call_data->func;
void *info = call_data->info;
reg_intr_vect_rw_ipi ipi;
ipi = REG_RD(intr_vect, irq_regs[smp_processor_id()], rw_ipi);
if (ipi.vector & IPI_SCHEDULE) {
scheduler_ipi();
}
if (ipi.vector & IPI_CALL) {
func(info);
}
if (ipi.vector & IPI_FLUSH_TLB) {
if (flush_mm == FLUSH_ALL)
__flush_tlb_all();
else if (flush_vma == FLUSH_ALL)
__flush_tlb_mm(flush_mm);
else
__flush_tlb_page(flush_vma, flush_addr);
}
ipi.vector = 0;
REG_WR(intr_vect, irq_regs[smp_processor_id()], rw_ipi, ipi);
return IRQ_HANDLED;
}

View File

@ -8,12 +8,14 @@
#include <linux/timex.h>
#include <linux/time.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/swap.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/threads.h>
#include <linux/cpufreq.h>
#include <linux/sched_clock.h>
#include <linux/mm.h>
#include <asm/types.h>
#include <asm/signal.h>
@ -36,33 +38,11 @@
/* Number of 763 counts before watchdog bites */
#define ETRAX_WD_CNT ((2*ETRAX_WD_HZ)/HZ + 1)
/* Register the continuos readonly timer available in FS and ARTPEC-3. */
static cycle_t read_cont_rotime(struct clocksource *cs)
{
return (u32)REG_RD(timer, regi_timer0, r_time);
}
static struct clocksource cont_rotime = {
.name = "crisv32_rotime",
.rating = 300,
.read = read_cont_rotime,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static int __init etrax_init_cont_rotime(void)
{
clocksource_register_khz(&cont_rotime, 100000);
return 0;
}
arch_initcall(etrax_init_cont_rotime);
#define CRISV32_TIMER_FREQ (100000000lu)
unsigned long timer_regs[NR_CPUS] =
{
regi_timer0,
#ifdef CONFIG_SMP
regi_timer2
#endif
};
extern int set_rtc_mmss(unsigned long nowtime);
@ -189,81 +169,104 @@ void handle_watchdog_bite(struct pt_regs *regs)
#endif
}
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "xtime_update()" routine every clocktick.
*/
extern void cris_do_profile(struct pt_regs *regs);
extern void cris_profile_sample(struct pt_regs *regs);
static void __iomem *timer_base;
static inline irqreturn_t timer_interrupt(int irq, void *dev_id)
static void crisv32_clkevt_mode(enum clock_event_mode mode,
struct clock_event_device *dev)
{
struct pt_regs *regs = get_irq_regs();
int cpu = smp_processor_id();
reg_timer_r_masked_intr masked_intr;
reg_timer_rw_ack_intr ack_intr = { 0 };
reg_timer_rw_tmr0_ctrl ctrl = {
.op = regk_timer_hold,
.freq = regk_timer_f100,
};
/* Check if the timer interrupt is for us (a tmr0 int) */
masked_intr = REG_RD(timer, timer_regs[cpu], r_masked_intr);
if (!masked_intr.tmr0)
REG_WR(timer, timer_base, rw_tmr0_ctrl, ctrl);
}
static int crisv32_clkevt_next_event(unsigned long evt,
struct clock_event_device *dev)
{
reg_timer_rw_tmr0_ctrl ctrl = {
.op = regk_timer_ld,
.freq = regk_timer_f100,
};
REG_WR(timer, timer_base, rw_tmr0_div, evt);
REG_WR(timer, timer_base, rw_tmr0_ctrl, ctrl);
ctrl.op = regk_timer_run;
REG_WR(timer, timer_base, rw_tmr0_ctrl, ctrl);
return 0;
}
static irqreturn_t crisv32_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
reg_timer_rw_tmr0_ctrl ctrl = {
.op = regk_timer_hold,
.freq = regk_timer_f100,
};
reg_timer_rw_ack_intr ack = { .tmr0 = 1 };
reg_timer_r_masked_intr intr;
intr = REG_RD(timer, timer_base, r_masked_intr);
if (!intr.tmr0)
return IRQ_NONE;
/* Acknowledge the timer irq. */
ack_intr.tmr0 = 1;
REG_WR(timer, timer_regs[cpu], rw_ack_intr, ack_intr);
REG_WR(timer, timer_base, rw_tmr0_ctrl, ctrl);
REG_WR(timer, timer_base, rw_ack_intr, ack);
/* Reset watchdog otherwise it resets us! */
reset_watchdog();
#ifdef CONFIG_SYSTEM_PROFILER
cris_profile_sample(get_irq_regs());
#endif
/* Update statistics. */
update_process_times(user_mode(regs));
evt->event_handler(evt);
cris_do_profile(regs); /* Save profiling information */
/* The master CPU is responsible for the time keeping. */
if (cpu != 0)
return IRQ_HANDLED;
/* Call the real timer interrupt handler */
xtime_update(1);
return IRQ_HANDLED;
}
/* Timer is IRQF_SHARED so drivers can add stuff to the timer irq chain. */
static struct irqaction irq_timer = {
.handler = timer_interrupt,
.flags = IRQF_SHARED,
.name = "timer"
static struct clock_event_device crisv32_clockevent = {
.name = "crisv32-timer",
.rating = 300,
.features = CLOCK_EVT_FEAT_ONESHOT,
.set_mode = crisv32_clkevt_mode,
.set_next_event = crisv32_clkevt_next_event,
};
void __init cris_timer_init(void)
/* Timer is IRQF_SHARED so drivers can add stuff to the timer irq chain. */
static struct irqaction irq_timer = {
.handler = crisv32_timer_interrupt,
.flags = IRQF_TIMER | IRQF_SHARED,
.name = "crisv32-timer",
.dev_id = &crisv32_clockevent,
};
static u64 notrace crisv32_timer_sched_clock(void)
{
return REG_RD(timer, timer_base, r_time);
}
static void __init crisv32_timer_init(void)
{
int cpu = smp_processor_id();
reg_timer_rw_tmr0_ctrl tmr0_ctrl = { 0 };
reg_timer_rw_tmr0_div tmr0_div = TIMER0_DIV;
reg_timer_rw_intr_mask timer_intr_mask;
reg_timer_rw_tmr0_ctrl ctrl = {
.op = regk_timer_hold,
.freq = regk_timer_f100,
};
/* Setup the etrax timers.
* Base frequency is 100MHz, divider 1000000 -> 100 HZ
* We use timer0, so timer1 is free.
* The trig timer is used by the fasttimer API if enabled.
*/
REG_WR(timer, timer_base, rw_tmr0_ctrl, ctrl);
tmr0_ctrl.op = regk_timer_ld;
tmr0_ctrl.freq = regk_timer_f100;
REG_WR(timer, timer_regs[cpu], rw_tmr0_div, tmr0_div);
REG_WR(timer, timer_regs[cpu], rw_tmr0_ctrl, tmr0_ctrl); /* Load */
tmr0_ctrl.op = regk_timer_run;
REG_WR(timer, timer_regs[cpu], rw_tmr0_ctrl, tmr0_ctrl); /* Start */
/* Enable the timer irq. */
timer_intr_mask = REG_RD(timer, timer_regs[cpu], rw_intr_mask);
timer_intr_mask = REG_RD(timer, timer_base, rw_intr_mask);
timer_intr_mask.tmr0 = 1;
REG_WR(timer, timer_regs[cpu], rw_intr_mask, timer_intr_mask);
REG_WR(timer, timer_base, rw_intr_mask, timer_intr_mask);
}
void __init time_init(void)
{
reg_intr_vect_rw_mask intr_mask;
int irq;
int ret;
/* Probe for the RTC and read it if it exists.
* Before the RTC can be probed the loops_per_usec variable needs
@ -273,17 +276,28 @@ void __init time_init(void)
*/
loops_per_usec = 50;
/* Start CPU local timer. */
cris_timer_init();
irq = TIMER0_INTR_VECT;
timer_base = (void __iomem *) regi_timer0;
/* Enable the timer irq in global config. */
intr_mask = REG_RD_VECT(intr_vect, regi_irq, rw_mask, 1);
intr_mask.timer0 = 1;
REG_WR_VECT(intr_vect, regi_irq, rw_mask, 1, intr_mask);
crisv32_timer_init();
/* Now actually register the timer irq handler that calls
* timer_interrupt(). */
setup_irq(TIMER0_INTR_VECT, &irq_timer);
sched_clock_register(crisv32_timer_sched_clock, 32,
CRISV32_TIMER_FREQ);
clocksource_mmio_init(timer_base + REG_RD_ADDR_timer_r_time,
"crisv32-timer", CRISV32_TIMER_FREQ,
300, 32, clocksource_mmio_readl_up);
crisv32_clockevent.cpumask = cpu_possible_mask;
crisv32_clockevent.irq = irq;
ret = setup_irq(irq, &irq_timer);
if (ret)
pr_warn("failed to setup irq %d\n", irq);
clockevents_config_and_register(&crisv32_clockevent,
CRISV32_TIMER_FREQ,
2, 0xffffffff);
/* Enable watchdog if we should use one. */

View File

@ -3,5 +3,5 @@
#
lib-y = checksum.o checksumcopy.o string.o usercopy.o memset.o \
csumcpfruser.o spinlock.o delay.o strcmp.o
csumcpfruser.o delay.o strcmp.o

View File

@ -1,40 +0,0 @@
;; Core of the spinlock implementation
;;
;; Copyright (C) 2004 Axis Communications AB.
;;
;; Author: Mikael Starvik
.global cris_spin_lock
.type cris_spin_lock,@function
.global cris_spin_trylock
.type cris_spin_trylock,@function
.text
cris_spin_lock:
clearf p
1: test.b [$r10]
beq 1b
clearf p
ax
clear.b [$r10]
bcs 1b
clearf p
ret
nop
.size cris_spin_lock, . - cris_spin_lock
cris_spin_trylock:
clearf p
1: move.b [$r10], $r11
ax
clear.b [$r10]
bcs 1b
clearf p
ret
movu.b $r11,$r10
.size cris_spin_trylock, . - cris_spin_trylock

View File

@ -40,17 +40,6 @@ void __init cris_mmu_init(void)
*/
per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd;
#ifdef CONFIG_SMP
{
pgd_t **pgd;
pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id());
SUPP_BANK_SEL(1);
SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
SUPP_BANK_SEL(2);
SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
}
#endif
/* Initialise the TLB. Function found in tlb.c. */
tlb_init();

View File

@ -115,11 +115,7 @@
move.d $r0, [$r1] ; last_refill_cause = rw_mm_cause
3: ; Probably not in a loop, continue normal processing
#ifdef CONFIG_SMP
move $s7, $acr ; PGD
#else
move.d current_pgd, $acr ; PGD
#endif
; Look up PMD in PGD
lsrq 24, $r0 ; Get PMD index into PGD (bit 24-31)
move.d [$acr], $acr ; PGD for the current process

View File

@ -0,0 +1,6 @@
BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o
ifneq ($(CONFIG_BUILTIN_DTB),"")
obj-$(CONFIG_OF) += $(BUILTIN_DTB)
endif
clean-files := *.dtb.S

View File

@ -0,0 +1,18 @@
/dts-v1/;
/include/ "etraxfs.dtsi"
/ {
model = "Axis 88 Developer Board";
compatible = "axis,dev88";
aliases {
serial0 = &uart0;
};
soc {
uart0: serial@b00260000 {
status = "okay";
};
};
};

View File

@ -0,0 +1,38 @@
/ {
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&intc>;
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
device_type = "cpu";
model = "axis,crisv32";
reg = <0>;
};
};
soc {
compatible = "simple-bus";
model = "etraxfs";
#address-cells = <1>;
#size-cells = <1>;
ranges;
intc: interrupt-controller {
compatible = "axis,crisv32-intc";
reg = <0xb001c000 0x1000>;
interrupt-controller;
#interrupt-cells = <1>;
};
serial@b00260000 {
compatible = "axis,etraxfs-uart";
reg = <0xb0026000 0x1000>;
interrupts = <68>;
status = "disabled";
};
};
};

View File

@ -1,7 +0,0 @@
#ifndef __ASM_CRIS_ARCH_ATOMIC__
#define __ASM_CRIS_ARCH_ATOMIC__
#define cris_atomic_save(addr, flags) local_irq_save(flags);
#define cris_atomic_restore(addr, flags) local_irq_restore(flags);
#endif

View File

@ -36,12 +36,4 @@ static inline unsigned long _get_base(char * addr)
return 0;
}
#define nop() __asm__ __volatile__ ("nop");
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
#define tas(ptr) (xchg((ptr),1))
struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) ((struct __xchg_dummy *)(x))
#endif

View File

@ -1,36 +0,0 @@
#ifndef __ASM_CRIS_ARCH_ATOMIC__
#define __ASM_CRIS_ARCH_ATOMIC__
#include <linux/spinlock_types.h>
extern void cris_spin_unlock(void *l, int val);
extern void cris_spin_lock(void *l);
extern int cris_spin_trylock(void* l);
#ifndef CONFIG_SMP
#define cris_atomic_save(addr, flags) local_irq_save(flags);
#define cris_atomic_restore(addr, flags) local_irq_restore(flags);
#else
extern spinlock_t cris_atomic_locks[];
#define LOCK_COUNT 128
#define HASH_ADDR(a) (((int)a) & 127)
#define cris_atomic_save(addr, flags) \
local_irq_save(flags); \
cris_spin_lock((void *)&cris_atomic_locks[HASH_ADDR(addr)].raw_lock.slock);
#define cris_atomic_restore(addr, flags) \
{ \
spinlock_t *lock = (void*)&cris_atomic_locks[HASH_ADDR(addr)]; \
__asm__ volatile ("move.d %1,%0" \
: "=m" (lock->raw_lock.slock) \
: "r" (1) \
: "memory"); \
local_irq_restore(flags); \
}
#endif
#endif

View File

@ -25,8 +25,7 @@ struct thread_struct {
*/
#define TASK_SIZE (0xB0000000UL)
/* CCS I=1, enable interrupts. */
#define INIT_THREAD { 0, 0, (1 << I_CCS_BITNR) }
#define INIT_THREAD { }
#define KSTK_EIP(tsk) \
({ \

View File

@ -1,131 +0,0 @@
#ifndef __ASM_ARCH_SPINLOCK_H
#define __ASM_ARCH_SPINLOCK_H
#include <linux/spinlock_types.h>
#define RW_LOCK_BIAS 0x01000000
extern void cris_spin_unlock(void *l, int val);
extern void cris_spin_lock(void *l);
extern int cris_spin_trylock(void *l);
static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
return *(volatile signed char *)(&(x)->slock) <= 0;
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__asm__ volatile ("move.d %1,%0" \
: "=m" (lock->slock) \
: "r" (1) \
: "memory");
}
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
while (arch_spin_is_locked(lock))
cpu_relax();
}
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return cris_spin_trylock((void *)&lock->slock);
}
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
cris_spin_lock((void *)&lock->slock);
}
static inline void
arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
arch_spin_lock(lock);
}
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
*
* NOTE! it is quite common to have readers in interrupts
* but no interrupt writers. For those circumstances we
* can "mix" irq-safe locks - any writer needs to get a
* irq-safe write-lock, but readers can get non-irqsafe
* read-locks.
*
*/
static inline int arch_read_can_lock(arch_rwlock_t *x)
{
return (int)(x)->lock > 0;
}
static inline int arch_write_can_lock(arch_rwlock_t *x)
{
return (x)->lock == RW_LOCK_BIAS;
}
static inline void arch_read_lock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
while (rw->lock == 0);
rw->lock--;
arch_spin_unlock(&rw->slock);
}
static inline void arch_write_lock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
rw->lock = 0;
arch_spin_unlock(&rw->slock);
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
rw->lock++;
arch_spin_unlock(&rw->slock);
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
rw->lock = RW_LOCK_BIAS;
arch_spin_unlock(&rw->slock);
}
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
int ret = 0;
arch_spin_lock(&rw->slock);
if (rw->lock != 0) {
rw->lock--;
ret = 1;
}
arch_spin_unlock(&rw->slock);
return ret;
}
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
int ret = 0;
arch_spin_lock(&rw->slock);
if (rw->lock == RW_LOCK_BIAS) {
rw->lock = 0;
ret = 1;
}
arch_spin_unlock(&rw->slock);
return ret;
}
#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_ARCH_SPINLOCK_H */

View File

@ -1,16 +1,29 @@
generic-y += atomic.h
generic-y += barrier.h
generic-y += clkdev.h
generic-y += cmpxchg.h
generic-y += cputime.h
generic-y += device.h
generic-y += div64.h
generic-y += exec.h
generic-y += emergency-restart.h
generic-y += futex.h
generic-y += hardirq.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kvm_para.h
generic-y += linkage.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += module.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += scatterlist.h
generic-y += sections.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += vga.h
generic-y += xor.h

View File

@ -1,149 +0,0 @@
/* $Id: atomic.h,v 1.3 2001/07/25 16:15:19 bjornw Exp $ */
#ifndef __ASM_CRIS_ATOMIC__
#define __ASM_CRIS_ATOMIC__
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/cmpxchg.h>
#include <arch/atomic.h>
#include <arch/system.h>
#include <asm/barrier.h>
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
#define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
/* These should be written in asm but we do it in C for now. */
#define ATOMIC_OP(op, c_op) \
static inline void atomic_##op(int i, volatile atomic_t *v) \
{ \
unsigned long flags; \
cris_atomic_save(v, flags); \
v->counter c_op i; \
cris_atomic_restore(v, flags); \
} \
#define ATOMIC_OP_RETURN(op, c_op) \
static inline int atomic_##op##_return(int i, volatile atomic_t *v) \
{ \
unsigned long flags; \
int retval; \
cris_atomic_save(v, flags); \
retval = (v->counter c_op i); \
cris_atomic_restore(v, flags); \
return retval; \
}
#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
static inline int atomic_sub_and_test(int i, volatile atomic_t *v)
{
int retval;
unsigned long flags;
cris_atomic_save(v, flags);
retval = (v->counter -= i) == 0;
cris_atomic_restore(v, flags);
return retval;
}
static inline void atomic_inc(volatile atomic_t *v)
{
unsigned long flags;
cris_atomic_save(v, flags);
(v->counter)++;
cris_atomic_restore(v, flags);
}
static inline void atomic_dec(volatile atomic_t *v)
{
unsigned long flags;
cris_atomic_save(v, flags);
(v->counter)--;
cris_atomic_restore(v, flags);
}
static inline int atomic_inc_return(volatile atomic_t *v)
{
unsigned long flags;
int retval;
cris_atomic_save(v, flags);
retval = ++(v->counter);
cris_atomic_restore(v, flags);
return retval;
}
static inline int atomic_dec_return(volatile atomic_t *v)
{
unsigned long flags;
int retval;
cris_atomic_save(v, flags);
retval = --(v->counter);
cris_atomic_restore(v, flags);
return retval;
}
static inline int atomic_dec_and_test(volatile atomic_t *v)
{
int retval;
unsigned long flags;
cris_atomic_save(v, flags);
retval = --(v->counter) == 0;
cris_atomic_restore(v, flags);
return retval;
}
static inline int atomic_inc_and_test(volatile atomic_t *v)
{
int retval;
unsigned long flags;
cris_atomic_save(v, flags);
retval = ++(v->counter) == 0;
cris_atomic_restore(v, flags);
return retval;
}
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
cris_atomic_save(v, flags);
ret = v->counter;
if (likely(ret == old))
v->counter = new;
cris_atomic_restore(v, flags);
return ret;
}
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int ret;
unsigned long flags;
cris_atomic_save(v, flags);
ret = v->counter;
if (ret != u)
v->counter += a;
cris_atomic_restore(v, flags);
return ret;
}
#endif

View File

@ -19,119 +19,10 @@
#endif
#include <arch/bitops.h>
#include <linux/atomic.h>
#include <linux/compiler.h>
#include <asm/barrier.h>
/*
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* This function is atomic and may not be reordered. See __set_bit()
* if you do not require the atomic guarantees.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
#define set_bit(nr, addr) (void)test_and_set_bit(nr, addr)
/*
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes,
* you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
* in order to ensure changes are visible on other processors.
*/
#define clear_bit(nr, addr) (void)test_and_clear_bit(nr, addr)
/*
* change_bit - Toggle a bit in memory
* @nr: Bit to change
* @addr: Address to start counting from
*
* change_bit() is atomic and may not be reordered.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
#define change_bit(nr, addr) (void)test_and_change_bit(nr, addr)
/**
* test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
{
unsigned int mask, retval;
unsigned long flags;
unsigned int *adr = (unsigned int *)addr;
adr += nr >> 5;
mask = 1 << (nr & 0x1f);
cris_atomic_save(addr, flags);
retval = (mask & *adr) != 0;
*adr |= mask;
cris_atomic_restore(addr, flags);
return retval;
}
/**
* test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
{
unsigned int mask, retval;
unsigned long flags;
unsigned int *adr = (unsigned int *)addr;
adr += nr >> 5;
mask = 1 << (nr & 0x1f);
cris_atomic_save(addr, flags);
retval = (mask & *adr) != 0;
*adr &= ~mask;
cris_atomic_restore(addr, flags);
return retval;
}
/**
* test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
{
unsigned int mask, retval;
unsigned long flags;
unsigned int *adr = (unsigned int *)addr;
adr += nr >> 5;
mask = 1 << (nr & 0x1f);
cris_atomic_save(addr, flags);
retval = (mask & *adr) != 0;
*adr ^= mask;
cris_atomic_restore(addr, flags);
return retval;
}
#include <asm-generic/bitops/atomic.h>
#include <asm-generic/bitops/non-atomic.h>
/*

View File

@ -1,53 +0,0 @@
#ifndef __ASM_CRIS_CMPXCHG__
#define __ASM_CRIS_CMPXCHG__
#include <linux/irqflags.h>
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
/* since Etrax doesn't have any atomic xchg instructions, we need to disable
irq's (if enabled) and do it with move.d's */
unsigned long flags,temp;
local_irq_save(flags); /* save flags, including irq enable bit and shut off irqs */
switch (size) {
case 1:
*((unsigned char *)&temp) = x;
x = *(unsigned char *)ptr;
*(unsigned char *)ptr = *((unsigned char *)&temp);
break;
case 2:
*((unsigned short *)&temp) = x;
x = *(unsigned short *)ptr;
*(unsigned short *)ptr = *((unsigned short *)&temp);
break;
case 4:
temp = x;
x = *(unsigned long *)ptr;
*(unsigned long *)ptr = temp;
break;
}
local_irq_restore(flags); /* restore irq enable bit */
return x;
}
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
#define tas(ptr) (xchg((ptr),1))
#include <asm-generic/cmpxchg-local.h>
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#ifndef CONFIG_SMP
#include <asm-generic/cmpxchg.h>
#endif
#endif /* __ASM_CRIS_CMPXCHG__ */

View File

@ -1,7 +0,0 @@
/*
* Arch specific extensions to struct device
*
* This file is released under the GPLv2
*/
#include <asm-generic/device.h>

View File

@ -1 +0,0 @@
#include <asm-generic/div64.h>

View File

@ -71,7 +71,7 @@ typedef unsigned long elf_fpregset_t;
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,

View File

@ -1,6 +0,0 @@
#ifndef _ASM_EMERGENCY_RESTART_H
#define _ASM_EMERGENCY_RESTART_H
#include <asm-generic/emergency-restart.h>
#endif /* _ASM_EMERGENCY_RESTART_H */

View File

@ -1,6 +0,0 @@
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
#include <asm-generic/futex.h>
#endif

View File

@ -1,7 +0,0 @@
#ifndef __ASM_HARDIRQ_H
#define __ASM_HARDIRQ_H
#include <asm/irq.h>
#include <asm-generic/hardirq.h>
#endif /* __ASM_HARDIRQ_H */

View File

@ -1 +0,0 @@
#include <asm-generic/irq_regs.h>

View File

@ -1 +0,0 @@
#include <asm-generic/kdebug.h>

View File

@ -1,10 +0,0 @@
#ifndef _ASM_KMAP_TYPES_H
#define _ASM_KMAP_TYPES_H
/* Dummy header just to define km_type. None of this
* is actually used on cris.
*/
#include <asm-generic/kmap_types.h>
#endif

View File

@ -1 +0,0 @@
#include <asm-generic/local.h>

View File

@ -1 +0,0 @@
#include <asm-generic/local64.h>

View File

@ -1,6 +0,0 @@
#ifndef _CRIS_PERCPU_H
#define _CRIS_PERCPU_H
#include <asm-generic/percpu.h>
#endif /* _CRIS_PERCPU_H */

View File

@ -1,10 +0,0 @@
#ifndef __ASM_SMP_H
#define __ASM_SMP_H
#include <linux/cpumask.h>
extern cpumask_t phys_cpu_present_map;
#define raw_smp_processor_id() (current_thread_info()->cpu)
#endif

View File

@ -1 +0,0 @@
#include <arch/spinlock.h>

View File

@ -22,16 +22,9 @@ extern void __flush_tlb_mm(struct mm_struct *mm);
extern void __flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr);
#ifdef CONFIG_SMP
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr);
#else
#define flush_tlb_all __flush_tlb_all
#define flush_tlb_mm __flush_tlb_mm
#define flush_tlb_page __flush_tlb_page
#endif
static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
{

View File

@ -1,6 +0,0 @@
#ifndef _ASM_CRIS_TOPOLOGY_H
#define _ASM_CRIS_TOPOLOGY_H
#include <asm-generic/topology.h>
#endif /* _ASM_CRIS_TOPOLOGY_H */

View File

@ -7,6 +7,7 @@ CPPFLAGS_vmlinux.lds := -DDRAM_VIRTUAL_BASE=0x$(CONFIG_ETRAX_DRAM_VIRTUAL_BASE)
extra-y := vmlinux.lds
obj-y := process.o traps.o irq.o ptrace.o setup.o time.o sys_cris.o
obj-y += devicetree.o
obj-$(CONFIG_MODULES) += crisksyms.o
obj-$(CONFIG_MODULES) += module.o

View File

@ -0,0 +1,14 @@
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/printk.h>
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
pr_err("%s(%llx, %llx)\n",
__func__, base, size);
}
void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
{
return alloc_bootmem_align(size, align);
}

View File

@ -42,3 +42,26 @@ void do_notify_resume(int canrestart, struct pt_regs *regs,
tracehook_notify_resume(regs);
}
}
void do_work_pending(int syscall, struct pt_regs *regs,
unsigned int thread_flags)
{
do {
if (likely(thread_flags & _TIF_NEED_RESCHED)) {
schedule();
} else {
if (unlikely(!user_mode(regs)))
return;
local_irq_enable();
if (thread_flags & _TIF_SIGPENDING) {
do_signal(syscall, regs);
syscall = 0;
} else {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
}
}
local_irq_disable();
thread_flags = current_thread_info()->flags;
} while (thread_flags & _TIF_WORK_MASK);
}

View File

@ -19,6 +19,9 @@
#include <linux/utsname.h>
#include <linux/pfn.h>
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <asm/setup.h>
#include <arch/system.h>
@ -64,6 +67,10 @@ void __init setup_arch(char **cmdline_p)
unsigned long start_pfn, max_pfn;
unsigned long memory_start;
#ifdef CONFIG_OF
early_init_dt_scan(__dtb_start);
#endif
/* register an initial console printing routine for printk's */
init_etrax_debug();
@ -141,6 +148,8 @@ void __init setup_arch(char **cmdline_p)
reserve_bootmem(PFN_PHYS(start_pfn), bootmap_size, BOOTMEM_DEFAULT);
unflatten_and_copy_device_tree();
/* paging_init() sets up the MMU and marks all pages as reserved */
paging_init();
@ -204,3 +213,9 @@ static int __init topology_init(void)
subsys_initcall(topology_init);
static int __init cris_of_init(void)
{
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
return 0;
}
core_initcall(cris_of_init);

View File

@ -79,11 +79,13 @@ cris_do_profile(struct pt_regs* regs)
#endif
}
#ifndef CONFIG_GENERIC_SCHED_CLOCK
unsigned long long sched_clock(void)
{
return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ) +
get_ns_in_jiffie();
}
#endif
static int
__init init_udelay(void)