Merge branch 'topic/get-cpu-var' into next
This commit is contained in:
		
						commit
						dd521d1eb4
					
				| @ -21,7 +21,12 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); | ||||
| 
 | ||||
| #define __ARCH_IRQ_STAT | ||||
| 
 | ||||
| #define local_softirq_pending()	__get_cpu_var(irq_stat).__softirq_pending | ||||
| #define local_softirq_pending()	__this_cpu_read(irq_stat.__softirq_pending) | ||||
| 
 | ||||
| #define __ARCH_SET_SOFTIRQ_PENDING | ||||
| 
 | ||||
| #define set_softirq_pending(x) __this_cpu_write(irq_stat.__softirq_pending, (x)) | ||||
| #define or_softirq_pending(x) __this_cpu_or(irq_stat.__softirq_pending, (x)) | ||||
| 
 | ||||
| static inline void ack_bad_irq(unsigned int irq) | ||||
| { | ||||
|  | ||||
| @ -107,14 +107,14 @@ extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); | ||||
| 
 | ||||
| static inline void arch_enter_lazy_mmu_mode(void) | ||||
| { | ||||
| 	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | ||||
| 	struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); | ||||
| 
 | ||||
| 	batch->active = 1; | ||||
| } | ||||
| 
 | ||||
| static inline void arch_leave_lazy_mmu_mode(void) | ||||
| { | ||||
| 	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | ||||
| 	struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); | ||||
| 
 | ||||
| 	if (batch->index) | ||||
| 		__flush_tlb_pending(batch); | ||||
|  | ||||
| @ -98,7 +98,7 @@ DECLARE_PER_CPU(struct xics_cppr, xics_cppr); | ||||
| 
 | ||||
| static inline void xics_push_cppr(unsigned int vec) | ||||
| { | ||||
| 	struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||||
| 	struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); | ||||
| 
 | ||||
| 	if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1)) | ||||
| 		return; | ||||
| @ -111,7 +111,7 @@ static inline void xics_push_cppr(unsigned int vec) | ||||
| 
 | ||||
| static inline unsigned char xics_pop_cppr(void) | ||||
| { | ||||
| 	struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||||
| 	struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); | ||||
| 
 | ||||
| 	if (WARN_ON(os_cppr->index < 1)) | ||||
| 		return LOWEST_PRIORITY; | ||||
| @ -121,7 +121,7 @@ static inline unsigned char xics_pop_cppr(void) | ||||
| 
 | ||||
| static inline void xics_set_base_cppr(unsigned char cppr) | ||||
| { | ||||
| 	struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||||
| 	struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); | ||||
| 
 | ||||
| 	/* we only really want to set the priority when there's
 | ||||
| 	 * just one cppr value on the stack | ||||
| @ -133,7 +133,7 @@ static inline void xics_set_base_cppr(unsigned char cppr) | ||||
| 
 | ||||
| static inline unsigned char xics_cppr_top(void) | ||||
| { | ||||
| 	struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||||
| 	struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); | ||||
| 	 | ||||
| 	return os_cppr->stack[os_cppr->index]; | ||||
| } | ||||
|  | ||||
| @ -41,7 +41,7 @@ void doorbell_exception(struct pt_regs *regs) | ||||
| 
 | ||||
| 	may_hard_irq_enable(); | ||||
| 
 | ||||
| 	__get_cpu_var(irq_stat).doorbell_irqs++; | ||||
| 	__this_cpu_inc(irq_stat.doorbell_irqs); | ||||
| 
 | ||||
| 	smp_ipi_demux(); | ||||
| 
 | ||||
|  | ||||
| @ -63,7 +63,7 @@ int hw_breakpoint_slots(int type) | ||||
| int arch_install_hw_breakpoint(struct perf_event *bp) | ||||
| { | ||||
| 	struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||||
| 	struct perf_event **slot = &__get_cpu_var(bp_per_reg); | ||||
| 	struct perf_event **slot = this_cpu_ptr(&bp_per_reg); | ||||
| 
 | ||||
| 	*slot = bp; | ||||
| 
 | ||||
| @ -88,7 +88,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | ||||
|  */ | ||||
| void arch_uninstall_hw_breakpoint(struct perf_event *bp) | ||||
| { | ||||
| 	struct perf_event **slot = &__get_cpu_var(bp_per_reg); | ||||
| 	struct perf_event **slot = this_cpu_ptr(&bp_per_reg); | ||||
| 
 | ||||
| 	if (*slot != bp) { | ||||
| 		WARN_ONCE(1, "Can't find the breakpoint"); | ||||
| @ -226,7 +226,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args) | ||||
| 	 */ | ||||
| 	rcu_read_lock(); | ||||
| 
 | ||||
| 	bp = __get_cpu_var(bp_per_reg); | ||||
| 	bp = __this_cpu_read(bp_per_reg); | ||||
| 	if (!bp) | ||||
| 		goto out; | ||||
| 	info = counter_arch_bp(bp); | ||||
|  | ||||
| @ -208,7 +208,7 @@ static unsigned long iommu_range_alloc(struct device *dev, | ||||
| 	 * We don't need to disable preemption here because any CPU can | ||||
| 	 * safely use any IOMMU pool. | ||||
| 	 */ | ||||
| 	pool_nr = __raw_get_cpu_var(iommu_pool_hash) & (tbl->nr_pools - 1); | ||||
| 	pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1); | ||||
| 
 | ||||
| 	if (largealloc) | ||||
| 		pool = &(tbl->large_pool); | ||||
|  | ||||
| @ -114,7 +114,7 @@ static inline notrace void set_soft_enabled(unsigned long enable) | ||||
| static inline notrace int decrementer_check_overflow(void) | ||||
| { | ||||
|  	u64 now = get_tb_or_rtc(); | ||||
|  	u64 *next_tb = &__get_cpu_var(decrementers_next_tb); | ||||
| 	u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); | ||||
|   | ||||
| 	return now >= *next_tb; | ||||
| } | ||||
| @ -499,7 +499,7 @@ void __do_irq(struct pt_regs *regs) | ||||
| 
 | ||||
| 	/* And finally process it */ | ||||
| 	if (unlikely(irq == NO_IRQ)) | ||||
| 		__get_cpu_var(irq_stat).spurious_irqs++; | ||||
| 		__this_cpu_inc(irq_stat.spurious_irqs); | ||||
| 	else | ||||
| 		generic_handle_irq(irq); | ||||
| 
 | ||||
|  | ||||
| @ -155,7 +155,7 @@ static int kgdb_singlestep(struct pt_regs *regs) | ||||
| { | ||||
| 	struct thread_info *thread_info, *exception_thread_info; | ||||
| 	struct thread_info *backup_current_thread_info = | ||||
| 		&__get_cpu_var(kgdb_thread_info); | ||||
| 		this_cpu_ptr(&kgdb_thread_info); | ||||
| 
 | ||||
| 	if (user_mode(regs)) | ||||
| 		return 0; | ||||
|  | ||||
| @ -119,7 +119,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) | ||||
| 
 | ||||
| static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | ||||
| { | ||||
| 	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; | ||||
| 	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); | ||||
| 	kcb->kprobe_status = kcb->prev_kprobe.status; | ||||
| 	kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr; | ||||
| } | ||||
| @ -127,7 +127,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | ||||
| static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | ||||
| 				struct kprobe_ctlblk *kcb) | ||||
| { | ||||
| 	__get_cpu_var(current_kprobe) = p; | ||||
| 	__this_cpu_write(current_kprobe, p); | ||||
| 	kcb->kprobe_saved_msr = regs->msr; | ||||
| } | ||||
| 
 | ||||
| @ -192,7 +192,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | ||||
| 				ret = 1; | ||||
| 				goto no_kprobe; | ||||
| 			} | ||||
| 			p = __get_cpu_var(current_kprobe); | ||||
| 			p = __this_cpu_read(current_kprobe); | ||||
| 			if (p->break_handler && p->break_handler(p, regs)) { | ||||
| 				goto ss_probe; | ||||
| 			} | ||||
|  | ||||
| @ -73,8 +73,8 @@ void save_mce_event(struct pt_regs *regs, long handled, | ||||
| 		    uint64_t nip, uint64_t addr) | ||||
| { | ||||
| 	uint64_t srr1; | ||||
| 	int index = __get_cpu_var(mce_nest_count)++; | ||||
| 	struct machine_check_event *mce = &__get_cpu_var(mce_event[index]); | ||||
| 	int index = __this_cpu_inc_return(mce_nest_count); | ||||
| 	struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Return if we don't have enough space to log mce event. | ||||
| @ -143,7 +143,7 @@ void save_mce_event(struct pt_regs *regs, long handled, | ||||
|  */ | ||||
| int get_mce_event(struct machine_check_event *mce, bool release) | ||||
| { | ||||
| 	int index = __get_cpu_var(mce_nest_count) - 1; | ||||
| 	int index = __this_cpu_read(mce_nest_count) - 1; | ||||
| 	struct machine_check_event *mc_evt; | ||||
| 	int ret = 0; | ||||
| 
 | ||||
| @ -153,7 +153,7 @@ int get_mce_event(struct machine_check_event *mce, bool release) | ||||
| 
 | ||||
| 	/* Check if we have MCE info to process. */ | ||||
| 	if (index < MAX_MC_EVT) { | ||||
| 		mc_evt = &__get_cpu_var(mce_event[index]); | ||||
| 		mc_evt = this_cpu_ptr(&mce_event[index]); | ||||
| 		/* Copy the event structure and release the original */ | ||||
| 		if (mce) | ||||
| 			*mce = *mc_evt; | ||||
| @ -163,7 +163,7 @@ int get_mce_event(struct machine_check_event *mce, bool release) | ||||
| 	} | ||||
| 	/* Decrement the count to free the slot. */ | ||||
| 	if (release) | ||||
| 		__get_cpu_var(mce_nest_count)--; | ||||
| 		__this_cpu_dec(mce_nest_count); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| @ -184,13 +184,13 @@ void machine_check_queue_event(void) | ||||
| 	if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) | ||||
| 		return; | ||||
| 
 | ||||
| 	index = __get_cpu_var(mce_queue_count)++; | ||||
| 	index = __this_cpu_inc_return(mce_queue_count); | ||||
| 	/* If queue is full, just return for now. */ | ||||
| 	if (index >= MAX_MC_EVT) { | ||||
| 		__get_cpu_var(mce_queue_count)--; | ||||
| 		__this_cpu_dec(mce_queue_count); | ||||
| 		return; | ||||
| 	} | ||||
| 	__get_cpu_var(mce_event_queue[index]) = evt; | ||||
| 	memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt)); | ||||
| 
 | ||||
| 	/* Queue irq work to process this event later. */ | ||||
| 	irq_work_queue(&mce_event_process_work); | ||||
| @ -208,11 +208,11 @@ static void machine_check_process_queued_event(struct irq_work *work) | ||||
| 	 * For now just print it to console. | ||||
| 	 * TODO: log this error event to FSP or nvram. | ||||
| 	 */ | ||||
| 	while (__get_cpu_var(mce_queue_count) > 0) { | ||||
| 		index = __get_cpu_var(mce_queue_count) - 1; | ||||
| 	while (__this_cpu_read(mce_queue_count) > 0) { | ||||
| 		index = __this_cpu_read(mce_queue_count) - 1; | ||||
| 		machine_check_print_event_info( | ||||
| 				&__get_cpu_var(mce_event_queue[index])); | ||||
| 		__get_cpu_var(mce_queue_count)--; | ||||
| 				this_cpu_ptr(&mce_event_queue[index])); | ||||
| 		__this_cpu_dec(mce_queue_count); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -499,7 +499,7 @@ static inline int set_dawr(struct arch_hw_breakpoint *brk) | ||||
| 
 | ||||
| void __set_breakpoint(struct arch_hw_breakpoint *brk) | ||||
| { | ||||
| 	__get_cpu_var(current_brk) = *brk; | ||||
| 	memcpy(this_cpu_ptr(¤t_brk), brk, sizeof(*brk)); | ||||
| 
 | ||||
| 	if (cpu_has_feature(CPU_FTR_DAWR)) | ||||
| 		set_dawr(brk); | ||||
| @ -842,7 +842,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | ||||
|  * schedule DABR | ||||
|  */ | ||||
| #ifndef CONFIG_HAVE_HW_BREAKPOINT | ||||
| 	if (unlikely(!hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk))) | ||||
| 	if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk))) | ||||
| 		__set_breakpoint(&new->thread.hw_brk); | ||||
| #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | ||||
| #endif | ||||
| @ -856,7 +856,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | ||||
| 	 * Collect processor utilization data per process | ||||
| 	 */ | ||||
| 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) { | ||||
| 		struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); | ||||
| 		struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array); | ||||
| 		long unsigned start_tb, current_tb; | ||||
| 		start_tb = old_thread->start_tb; | ||||
| 		cu->current_tb = current_tb = mfspr(SPRN_PURR); | ||||
| @ -866,7 +866,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | ||||
| #endif /* CONFIG_PPC64 */ | ||||
| 
 | ||||
| #ifdef CONFIG_PPC_BOOK3S_64 | ||||
| 	batch = &__get_cpu_var(ppc64_tlb_batch); | ||||
| 	batch = this_cpu_ptr(&ppc64_tlb_batch); | ||||
| 	if (batch->active) { | ||||
| 		current_thread_info()->local_flags |= _TLF_LAZY_MMU; | ||||
| 		if (batch->index) | ||||
| @ -889,7 +889,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | ||||
| #ifdef CONFIG_PPC_BOOK3S_64 | ||||
| 	if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { | ||||
| 		current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; | ||||
| 		batch = &__get_cpu_var(ppc64_tlb_batch); | ||||
| 		batch = this_cpu_ptr(&ppc64_tlb_batch); | ||||
| 		batch->active = 1; | ||||
| 	} | ||||
| #endif /* CONFIG_PPC_BOOK3S_64 */ | ||||
|  | ||||
| @ -243,7 +243,7 @@ void smp_muxed_ipi_message_pass(int cpu, int msg) | ||||
| 
 | ||||
| irqreturn_t smp_ipi_demux(void) | ||||
| { | ||||
| 	struct cpu_messages *info = &__get_cpu_var(ipi_message); | ||||
| 	struct cpu_messages *info = this_cpu_ptr(&ipi_message); | ||||
| 	unsigned int all; | ||||
| 
 | ||||
| 	mb();	/* order any irq clear */ | ||||
| @ -442,9 +442,9 @@ void generic_mach_cpu_die(void) | ||||
| 	idle_task_exit(); | ||||
| 	cpu = smp_processor_id(); | ||||
| 	printk(KERN_DEBUG "CPU%d offline\n", cpu); | ||||
| 	__get_cpu_var(cpu_state) = CPU_DEAD; | ||||
| 	__this_cpu_write(cpu_state, CPU_DEAD); | ||||
| 	smp_wmb(); | ||||
| 	while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) | ||||
| 	while (__this_cpu_read(cpu_state) != CPU_UP_PREPARE) | ||||
| 		cpu_relax(); | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -394,10 +394,10 @@ void ppc_enable_pmcs(void) | ||||
| 	ppc_set_pmu_inuse(1); | ||||
| 
 | ||||
| 	/* Only need to enable them once */ | ||||
| 	if (__get_cpu_var(pmcs_enabled)) | ||||
| 	if (__this_cpu_read(pmcs_enabled)) | ||||
| 		return; | ||||
| 
 | ||||
| 	__get_cpu_var(pmcs_enabled) = 1; | ||||
| 	__this_cpu_write(pmcs_enabled, 1); | ||||
| 
 | ||||
| 	if (ppc_md.enable_pmcs) | ||||
| 		ppc_md.enable_pmcs(); | ||||
|  | ||||
| @ -458,9 +458,9 @@ static inline void clear_irq_work_pending(void) | ||||
| 
 | ||||
| DEFINE_PER_CPU(u8, irq_work_pending); | ||||
| 
 | ||||
| #define set_irq_work_pending_flag()	__get_cpu_var(irq_work_pending) = 1 | ||||
| #define test_irq_work_pending()		__get_cpu_var(irq_work_pending) | ||||
| #define clear_irq_work_pending()	__get_cpu_var(irq_work_pending) = 0 | ||||
| #define set_irq_work_pending_flag()	__this_cpu_write(irq_work_pending, 1) | ||||
| #define test_irq_work_pending()		__this_cpu_read(irq_work_pending) | ||||
| #define clear_irq_work_pending()	__this_cpu_write(irq_work_pending, 0) | ||||
| 
 | ||||
| #endif /* 32 vs 64 bit */ | ||||
| 
 | ||||
| @ -482,8 +482,8 @@ void arch_irq_work_raise(void) | ||||
| static void __timer_interrupt(void) | ||||
| { | ||||
| 	struct pt_regs *regs = get_irq_regs(); | ||||
| 	u64 *next_tb = &__get_cpu_var(decrementers_next_tb); | ||||
| 	struct clock_event_device *evt = &__get_cpu_var(decrementers); | ||||
| 	u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); | ||||
| 	struct clock_event_device *evt = this_cpu_ptr(&decrementers); | ||||
| 	u64 now; | ||||
| 
 | ||||
| 	trace_timer_interrupt_entry(regs); | ||||
| @ -498,7 +498,7 @@ static void __timer_interrupt(void) | ||||
| 		*next_tb = ~(u64)0; | ||||
| 		if (evt->event_handler) | ||||
| 			evt->event_handler(evt); | ||||
| 		__get_cpu_var(irq_stat).timer_irqs_event++; | ||||
| 		__this_cpu_inc(irq_stat.timer_irqs_event); | ||||
| 	} else { | ||||
| 		now = *next_tb - now; | ||||
| 		if (now <= DECREMENTER_MAX) | ||||
| @ -506,13 +506,13 @@ static void __timer_interrupt(void) | ||||
| 		/* We may have raced with new irq work */ | ||||
| 		if (test_irq_work_pending()) | ||||
| 			set_dec(1); | ||||
| 		__get_cpu_var(irq_stat).timer_irqs_others++; | ||||
| 		__this_cpu_inc(irq_stat.timer_irqs_others); | ||||
| 	} | ||||
| 
 | ||||
| #ifdef CONFIG_PPC64 | ||||
| 	/* collect purr register values often, for accurate calculations */ | ||||
| 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) { | ||||
| 		struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); | ||||
| 		struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array); | ||||
| 		cu->current_tb = mfspr(SPRN_PURR); | ||||
| 	} | ||||
| #endif | ||||
| @ -527,7 +527,7 @@ static void __timer_interrupt(void) | ||||
| void timer_interrupt(struct pt_regs * regs) | ||||
| { | ||||
| 	struct pt_regs *old_regs; | ||||
| 	u64 *next_tb = &__get_cpu_var(decrementers_next_tb); | ||||
| 	u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); | ||||
| 
 | ||||
| 	/* Ensure a positive value is written to the decrementer, or else
 | ||||
| 	 * some CPUs will continue to take decrementer exceptions. | ||||
| @ -813,7 +813,7 @@ static void __init clocksource_init(void) | ||||
| static int decrementer_set_next_event(unsigned long evt, | ||||
| 				      struct clock_event_device *dev) | ||||
| { | ||||
| 	__get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt; | ||||
| 	__this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt); | ||||
| 	set_dec(evt); | ||||
| 
 | ||||
| 	/* We may have raced with new irq work */ | ||||
| @ -833,7 +833,7 @@ static void decrementer_set_mode(enum clock_event_mode mode, | ||||
| /* Interrupt handler for the timer broadcast IPI */ | ||||
| void tick_broadcast_ipi_handler(void) | ||||
| { | ||||
| 	u64 *next_tb = &__get_cpu_var(decrementers_next_tb); | ||||
| 	u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); | ||||
| 
 | ||||
| 	*next_tb = get_tb_or_rtc(); | ||||
| 	__timer_interrupt(); | ||||
|  | ||||
| @ -295,7 +295,7 @@ long machine_check_early(struct pt_regs *regs) | ||||
| { | ||||
| 	long handled = 0; | ||||
| 
 | ||||
| 	__get_cpu_var(irq_stat).mce_exceptions++; | ||||
| 	__this_cpu_inc(irq_stat.mce_exceptions); | ||||
| 
 | ||||
| 	if (cur_cpu_spec && cur_cpu_spec->machine_check_early) | ||||
| 		handled = cur_cpu_spec->machine_check_early(regs); | ||||
| @ -304,7 +304,7 @@ long machine_check_early(struct pt_regs *regs) | ||||
| 
 | ||||
| long hmi_exception_realmode(struct pt_regs *regs) | ||||
| { | ||||
| 	__get_cpu_var(irq_stat).hmi_exceptions++; | ||||
| 	__this_cpu_inc(irq_stat.hmi_exceptions); | ||||
| 
 | ||||
| 	if (ppc_md.hmi_exception_early) | ||||
| 		ppc_md.hmi_exception_early(regs); | ||||
| @ -700,7 +700,7 @@ void machine_check_exception(struct pt_regs *regs) | ||||
| 	enum ctx_state prev_state = exception_enter(); | ||||
| 	int recover = 0; | ||||
| 
 | ||||
| 	__get_cpu_var(irq_stat).mce_exceptions++; | ||||
| 	__this_cpu_inc(irq_stat.mce_exceptions); | ||||
| 
 | ||||
| 	/* See if any machine dependent calls. In theory, we would want
 | ||||
| 	 * to call the CPU first, and call the ppc_md. one if the CPU | ||||
| @ -1519,7 +1519,7 @@ void vsx_unavailable_tm(struct pt_regs *regs) | ||||
| 
 | ||||
| void performance_monitor_exception(struct pt_regs *regs) | ||||
| { | ||||
| 	__get_cpu_var(irq_stat).pmu_irqs++; | ||||
| 	__this_cpu_inc(irq_stat.pmu_irqs); | ||||
| 
 | ||||
| 	perf_irq(regs); | ||||
| } | ||||
|  | ||||
| @ -76,11 +76,11 @@ static inline int local_sid_setup_one(struct id *entry) | ||||
| 	unsigned long sid; | ||||
| 	int ret = -1; | ||||
| 
 | ||||
| 	sid = ++(__get_cpu_var(pcpu_last_used_sid)); | ||||
| 	sid = __this_cpu_inc_return(pcpu_last_used_sid); | ||||
| 	if (sid < NUM_TIDS) { | ||||
| 		__get_cpu_var(pcpu_sids).entry[sid] = entry; | ||||
| 		__this_cpu_write(pcpu_sids)entry[sid], entry); | ||||
| 		entry->val = sid; | ||||
| 		entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid]; | ||||
| 		entry->pentry = this_cpu_ptr(&pcpu_sids.entry[sid]); | ||||
| 		ret = sid; | ||||
| 	} | ||||
| 
 | ||||
| @ -108,8 +108,8 @@ static inline int local_sid_setup_one(struct id *entry) | ||||
| static inline int local_sid_lookup(struct id *entry) | ||||
| { | ||||
| 	if (entry && entry->val != 0 && | ||||
| 	    __get_cpu_var(pcpu_sids).entry[entry->val] == entry && | ||||
| 	    entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val]) | ||||
| 	    __this_cpu_read(pcpu_sids.entry[entry->val]) == entry && | ||||
| 	    entry->pentry == this_cpu_ptr(&pcpu_sids.entry[entry->val])) | ||||
| 		return entry->val; | ||||
| 	return -1; | ||||
| } | ||||
| @ -117,8 +117,8 @@ static inline int local_sid_lookup(struct id *entry) | ||||
| /* Invalidate all id mappings on local core -- call with preempt disabled */ | ||||
| static inline void local_sid_destroy_all(void) | ||||
| { | ||||
| 	__get_cpu_var(pcpu_last_used_sid) = 0; | ||||
| 	memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids))); | ||||
| 	__this_cpu_write(pcpu_last_used_sid, 0); | ||||
| 	memset(this_cpu_ptr(&pcpu_sids), 0, sizeof(pcpu_sids)); | ||||
| } | ||||
| 
 | ||||
| static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500) | ||||
|  | ||||
| @ -144,9 +144,9 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) | ||||
| 	mtspr(SPRN_GESR, vcpu->arch.shared->esr); | ||||
| 
 | ||||
| 	if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || | ||||
| 	    __get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] != vcpu) { | ||||
| 	    __this_cpu_read(last_vcpu_of_lpid[get_lpid(vcpu)]) != vcpu) { | ||||
| 		kvmppc_e500_tlbil_all(vcpu_e500); | ||||
| 		__get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] = vcpu; | ||||
| 		__this_cpu_write(last_vcpu_of_lpid[get_lpid(vcpu)], vcpu); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -629,7 +629,7 @@ static void native_flush_hash_range(unsigned long number, int local) | ||||
| 	unsigned long want_v; | ||||
| 	unsigned long flags; | ||||
| 	real_pte_t pte; | ||||
| 	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | ||||
| 	struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); | ||||
| 	unsigned long psize = batch->psize; | ||||
| 	int ssize = batch->ssize; | ||||
| 	int i; | ||||
|  | ||||
| @ -1322,7 +1322,7 @@ void flush_hash_range(unsigned long number, int local) | ||||
| 	else { | ||||
| 		int i; | ||||
| 		struct ppc64_tlb_batch *batch = | ||||
| 			&__get_cpu_var(ppc64_tlb_batch); | ||||
| 			this_cpu_ptr(&ppc64_tlb_batch); | ||||
| 
 | ||||
| 		for (i = 0; i < number; i++) | ||||
| 			flush_hash_page(batch->vpn[i], batch->pte[i], | ||||
|  | ||||
| @ -33,13 +33,13 @@ static inline int tlb1_next(void) | ||||
| 
 | ||||
| 	ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; | ||||
| 
 | ||||
| 	index = __get_cpu_var(next_tlbcam_idx); | ||||
| 	index = this_cpu_read(next_tlbcam_idx); | ||||
| 
 | ||||
| 	/* Just round-robin the entries and wrap when we hit the end */ | ||||
| 	if (unlikely(index == ncams - 1)) | ||||
| 		__get_cpu_var(next_tlbcam_idx) = tlbcam_index; | ||||
| 		__this_cpu_write(next_tlbcam_idx, tlbcam_index); | ||||
| 	else | ||||
| 		__get_cpu_var(next_tlbcam_idx)++; | ||||
| 		__this_cpu_inc(next_tlbcam_idx); | ||||
| 
 | ||||
| 	return index; | ||||
| } | ||||
|  | ||||
| @ -462,7 +462,7 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte) | ||||
| { | ||||
| 	struct hugepd_freelist **batchp; | ||||
| 
 | ||||
| 	batchp = &get_cpu_var(hugepd_freelist_cur); | ||||
| 	batchp = this_cpu_ptr(&hugepd_freelist_cur); | ||||
| 
 | ||||
| 	if (atomic_read(&tlb->mm->mm_users) < 2 || | ||||
| 	    cpumask_equal(mm_cpumask(tlb->mm), | ||||
|  | ||||
| @ -339,7 +339,7 @@ static void power_pmu_bhrb_reset(void) | ||||
| 
 | ||||
| static void power_pmu_bhrb_enable(struct perf_event *event) | ||||
| { | ||||
| 	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | ||||
| 	struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); | ||||
| 
 | ||||
| 	if (!ppmu->bhrb_nr) | ||||
| 		return; | ||||
| @ -354,7 +354,7 @@ static void power_pmu_bhrb_enable(struct perf_event *event) | ||||
| 
 | ||||
| static void power_pmu_bhrb_disable(struct perf_event *event) | ||||
| { | ||||
| 	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | ||||
| 	struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); | ||||
| 
 | ||||
| 	if (!ppmu->bhrb_nr) | ||||
| 		return; | ||||
| @ -1144,7 +1144,7 @@ static void power_pmu_disable(struct pmu *pmu) | ||||
| 	if (!ppmu) | ||||
| 		return; | ||||
| 	local_irq_save(flags); | ||||
| 	cpuhw = &__get_cpu_var(cpu_hw_events); | ||||
| 	cpuhw = this_cpu_ptr(&cpu_hw_events); | ||||
| 
 | ||||
| 	if (!cpuhw->disabled) { | ||||
| 		/*
 | ||||
| @ -1211,7 +1211,7 @@ static void power_pmu_enable(struct pmu *pmu) | ||||
| 		return; | ||||
| 	local_irq_save(flags); | ||||
| 
 | ||||
| 	cpuhw = &__get_cpu_var(cpu_hw_events); | ||||
| 	cpuhw = this_cpu_ptr(&cpu_hw_events); | ||||
| 	if (!cpuhw->disabled) | ||||
| 		goto out; | ||||
| 
 | ||||
| @ -1403,7 +1403,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags) | ||||
| 	 * Add the event to the list (if there is room) | ||||
| 	 * and check whether the total set is still feasible. | ||||
| 	 */ | ||||
| 	cpuhw = &__get_cpu_var(cpu_hw_events); | ||||
| 	cpuhw = this_cpu_ptr(&cpu_hw_events); | ||||
| 	n0 = cpuhw->n_events; | ||||
| 	if (n0 >= ppmu->n_counter) | ||||
| 		goto out; | ||||
| @ -1469,7 +1469,7 @@ static void power_pmu_del(struct perf_event *event, int ef_flags) | ||||
| 
 | ||||
| 	power_pmu_read(event); | ||||
| 
 | ||||
| 	cpuhw = &__get_cpu_var(cpu_hw_events); | ||||
| 	cpuhw = this_cpu_ptr(&cpu_hw_events); | ||||
| 	for (i = 0; i < cpuhw->n_events; ++i) { | ||||
| 		if (event == cpuhw->event[i]) { | ||||
| 			while (++i < cpuhw->n_events) { | ||||
| @ -1575,7 +1575,7 @@ static void power_pmu_stop(struct perf_event *event, int ef_flags) | ||||
|  */ | ||||
| static void power_pmu_start_txn(struct pmu *pmu) | ||||
| { | ||||
| 	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | ||||
| 	struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); | ||||
| 
 | ||||
| 	perf_pmu_disable(pmu); | ||||
| 	cpuhw->group_flag |= PERF_EVENT_TXN; | ||||
| @ -1589,7 +1589,7 @@ static void power_pmu_start_txn(struct pmu *pmu) | ||||
|  */ | ||||
| static void power_pmu_cancel_txn(struct pmu *pmu) | ||||
| { | ||||
| 	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | ||||
| 	struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); | ||||
| 
 | ||||
| 	cpuhw->group_flag &= ~PERF_EVENT_TXN; | ||||
| 	perf_pmu_enable(pmu); | ||||
| @ -1607,7 +1607,7 @@ static int power_pmu_commit_txn(struct pmu *pmu) | ||||
| 
 | ||||
| 	if (!ppmu) | ||||
| 		return -EAGAIN; | ||||
| 	cpuhw = &__get_cpu_var(cpu_hw_events); | ||||
| 	cpuhw = this_cpu_ptr(&cpu_hw_events); | ||||
| 	n = cpuhw->n_events; | ||||
| 	if (check_excludes(cpuhw->event, cpuhw->flags, 0, n)) | ||||
| 		return -EAGAIN; | ||||
| @ -1964,7 +1964,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | ||||
| 
 | ||||
| 		if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) { | ||||
| 			struct cpu_hw_events *cpuhw; | ||||
| 			cpuhw = &__get_cpu_var(cpu_hw_events); | ||||
| 			cpuhw = this_cpu_ptr(&cpu_hw_events); | ||||
| 			power_pmu_bhrb_read(cpuhw); | ||||
| 			data.br_stack = &cpuhw->bhrb_stack; | ||||
| 		} | ||||
| @ -2037,7 +2037,7 @@ static bool pmc_overflow(unsigned long val) | ||||
| static void perf_event_interrupt(struct pt_regs *regs) | ||||
| { | ||||
| 	int i, j; | ||||
| 	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | ||||
| 	struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); | ||||
| 	struct perf_event *event; | ||||
| 	unsigned long val[8]; | ||||
| 	int found, active; | ||||
|  | ||||
| @ -210,7 +210,7 @@ static void fsl_emb_pmu_disable(struct pmu *pmu) | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	local_irq_save(flags); | ||||
| 	cpuhw = &__get_cpu_var(cpu_hw_events); | ||||
| 	cpuhw = this_cpu_ptr(&cpu_hw_events); | ||||
| 
 | ||||
| 	if (!cpuhw->disabled) { | ||||
| 		cpuhw->disabled = 1; | ||||
| @ -249,7 +249,7 @@ static void fsl_emb_pmu_enable(struct pmu *pmu) | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	local_irq_save(flags); | ||||
| 	cpuhw = &__get_cpu_var(cpu_hw_events); | ||||
| 	cpuhw = this_cpu_ptr(&cpu_hw_events); | ||||
| 	if (!cpuhw->disabled) | ||||
| 		goto out; | ||||
| 
 | ||||
| @ -653,7 +653,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | ||||
| static void perf_event_interrupt(struct pt_regs *regs) | ||||
| { | ||||
| 	int i; | ||||
| 	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | ||||
| 	struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); | ||||
| 	struct perf_event *event; | ||||
| 	unsigned long val; | ||||
| 	int found = 0; | ||||
|  | ||||
| @ -82,7 +82,7 @@ static void iic_unmask(struct irq_data *d) | ||||
| 
 | ||||
| static void iic_eoi(struct irq_data *d) | ||||
| { | ||||
| 	struct iic *iic = &__get_cpu_var(cpu_iic); | ||||
| 	struct iic *iic = this_cpu_ptr(&cpu_iic); | ||||
| 	out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]); | ||||
| 	BUG_ON(iic->eoi_ptr < 0); | ||||
| } | ||||
| @ -148,7 +148,7 @@ static unsigned int iic_get_irq(void) | ||||
| 	struct iic *iic; | ||||
| 	unsigned int virq; | ||||
| 
 | ||||
| 	iic = &__get_cpu_var(cpu_iic); | ||||
| 	iic = this_cpu_ptr(&cpu_iic); | ||||
| 	*(unsigned long *) &pending = | ||||
| 		in_be64((u64 __iomem *) &iic->regs->pending_destr); | ||||
| 	if (!(pending.flags & CBE_IIC_IRQ_VALID)) | ||||
| @ -163,7 +163,7 @@ static unsigned int iic_get_irq(void) | ||||
| 
 | ||||
| void iic_setup_cpu(void) | ||||
| { | ||||
| 	out_be64(&__get_cpu_var(cpu_iic).regs->prio, 0xff); | ||||
| 	out_be64(this_cpu_ptr(&cpu_iic.regs->prio), 0xff); | ||||
| } | ||||
| 
 | ||||
| u8 iic_get_target_id(int cpu) | ||||
|  | ||||
| @ -48,7 +48,7 @@ void __trace_opal_entry(unsigned long opcode, unsigned long *args) | ||||
| 
 | ||||
| 	local_irq_save(flags); | ||||
| 
 | ||||
| 	depth = &__get_cpu_var(opal_trace_depth); | ||||
| 	depth = this_cpu_ptr(&opal_trace_depth); | ||||
| 
 | ||||
| 	if (*depth) | ||||
| 		goto out; | ||||
| @ -69,7 +69,7 @@ void __trace_opal_exit(long opcode, unsigned long retval) | ||||
| 
 | ||||
| 	local_irq_save(flags); | ||||
| 
 | ||||
| 	depth = &__get_cpu_var(opal_trace_depth); | ||||
| 	depth = this_cpu_ptr(&opal_trace_depth); | ||||
| 
 | ||||
| 	if (*depth) | ||||
| 		goto out; | ||||
|  | ||||
| @ -711,7 +711,7 @@ void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq) | ||||
| 
 | ||||
| static unsigned int ps3_get_irq(void) | ||||
| { | ||||
| 	struct ps3_private *pd = &__get_cpu_var(ps3_private); | ||||
| 	struct ps3_private *pd = this_cpu_ptr(&ps3_private); | ||||
| 	u64 x = (pd->bmp.status & pd->bmp.mask); | ||||
| 	unsigned int plug; | ||||
| 
 | ||||
|  | ||||
| @ -75,7 +75,7 @@ static atomic_t dtl_count; | ||||
|  */ | ||||
| static void consume_dtle(struct dtl_entry *dtle, u64 index) | ||||
| { | ||||
| 	struct dtl_ring *dtlr = &__get_cpu_var(dtl_rings); | ||||
| 	struct dtl_ring *dtlr = this_cpu_ptr(&dtl_rings); | ||||
| 	struct dtl_entry *wp = dtlr->write_ptr; | ||||
| 	struct lppaca *vpa = local_paca->lppaca_ptr; | ||||
| 
 | ||||
|  | ||||
| @ -110,7 +110,7 @@ static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long | ||||
| 	if (opcode > MAX_HCALL_OPCODE) | ||||
| 		return; | ||||
| 
 | ||||
| 	h = &__get_cpu_var(hcall_stats)[opcode / 4]; | ||||
| 	h = this_cpu_ptr(&hcall_stats[opcode / 4]); | ||||
| 	h->tb_start = mftb(); | ||||
| 	h->purr_start = mfspr(SPRN_PURR); | ||||
| } | ||||
| @ -123,7 +123,7 @@ static void probe_hcall_exit(void *ignored, unsigned long opcode, unsigned long | ||||
| 	if (opcode > MAX_HCALL_OPCODE) | ||||
| 		return; | ||||
| 
 | ||||
| 	h = &__get_cpu_var(hcall_stats)[opcode / 4]; | ||||
| 	h = this_cpu_ptr(&hcall_stats[opcode / 4]); | ||||
| 	h->num_calls++; | ||||
| 	h->tb_total += mftb() - h->tb_start; | ||||
| 	h->purr_total += mfspr(SPRN_PURR) - h->purr_start; | ||||
|  | ||||
| @ -199,7 +199,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, | ||||
| 
 | ||||
| 	local_irq_save(flags);	/* to protect tcep and the page behind it */ | ||||
| 
 | ||||
| 	tcep = __get_cpu_var(tce_page); | ||||
| 	tcep = __this_cpu_read(tce_page); | ||||
| 
 | ||||
| 	/* This is safe to do since interrupts are off when we're called
 | ||||
| 	 * from iommu_alloc{,_sg}() | ||||
| @ -212,7 +212,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, | ||||
| 			return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, | ||||
| 					    direction, attrs); | ||||
| 		} | ||||
| 		__get_cpu_var(tce_page) = tcep; | ||||
| 		__this_cpu_write(tce_page, tcep); | ||||
| 	} | ||||
| 
 | ||||
| 	rpn = __pa(uaddr) >> TCE_SHIFT; | ||||
| @ -398,7 +398,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, | ||||
| 	long l, limit; | ||||
| 
 | ||||
| 	local_irq_disable();	/* to protect tcep and the page behind it */ | ||||
| 	tcep = __get_cpu_var(tce_page); | ||||
| 	tcep = __this_cpu_read(tce_page); | ||||
| 
 | ||||
| 	if (!tcep) { | ||||
| 		tcep = (__be64 *)__get_free_page(GFP_ATOMIC); | ||||
| @ -406,7 +406,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, | ||||
| 			local_irq_enable(); | ||||
| 			return -ENOMEM; | ||||
| 		} | ||||
| 		__get_cpu_var(tce_page) = tcep; | ||||
| 		__this_cpu_write(tce_page, tcep); | ||||
| 	} | ||||
| 
 | ||||
| 	proto_tce = TCE_PCI_READ | TCE_PCI_WRITE; | ||||
|  | ||||
| @ -515,7 +515,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local) | ||||
| 	unsigned long vpn; | ||||
| 	unsigned long i, pix, rc; | ||||
| 	unsigned long flags = 0; | ||||
| 	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | ||||
| 	struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); | ||||
| 	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); | ||||
| 	unsigned long param[9]; | ||||
| 	unsigned long hash, index, shift, hidx, slot; | ||||
| @ -705,7 +705,7 @@ void __trace_hcall_entry(unsigned long opcode, unsigned long *args) | ||||
| 
 | ||||
| 	local_irq_save(flags); | ||||
| 
 | ||||
| 	depth = &__get_cpu_var(hcall_trace_depth); | ||||
| 	depth = this_cpu_ptr(&hcall_trace_depth); | ||||
| 
 | ||||
| 	if (*depth) | ||||
| 		goto out; | ||||
| @ -730,7 +730,7 @@ void __trace_hcall_exit(long opcode, unsigned long retval, | ||||
| 
 | ||||
| 	local_irq_save(flags); | ||||
| 
 | ||||
| 	depth = &__get_cpu_var(hcall_trace_depth); | ||||
| 	depth = this_cpu_ptr(&hcall_trace_depth); | ||||
| 
 | ||||
| 	if (*depth) | ||||
| 		goto out; | ||||
|  | ||||
| @ -302,8 +302,8 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs) | ||||
| 	/* If it isn't an extended log we can use the per cpu 64bit buffer */ | ||||
| 	h = (struct rtas_error_log *)&savep[1]; | ||||
| 	if (!rtas_error_extended(h)) { | ||||
| 		memcpy(&__get_cpu_var(mce_data_buf), h, sizeof(__u64)); | ||||
| 		errhdr = (struct rtas_error_log *)&__get_cpu_var(mce_data_buf); | ||||
| 		memcpy(this_cpu_ptr(&mce_data_buf), h, sizeof(__u64)); | ||||
| 		errhdr = (struct rtas_error_log *)this_cpu_ptr(&mce_data_buf); | ||||
| 	} else { | ||||
| 		int len, error_log_length; | ||||
| 
 | ||||
|  | ||||
| @ -155,7 +155,7 @@ int __init xics_smp_probe(void) | ||||
| 
 | ||||
| void xics_teardown_cpu(void) | ||||
| { | ||||
| 	struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||||
| 	struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * we have to reset the cppr index to 0 because we're | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user