s390/time: rename tod clock access functions

Fix name clash with some common code device drivers and add "tod"
to all tod clock access function names.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index 02d9a1c..7ef60b5 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -108,7 +108,7 @@
 	mem_data->totalswap = P2K(val.totalswap);
 	mem_data->freeswap  = P2K(val.freeswap);
 
-	mem_data->timestamp = get_clock();
+	mem_data->timestamp = get_tod_clock();
 	mem_data->sync_count_2++;
 }
 
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c
index 1370e35..2d224b9 100644
--- a/arch/s390/appldata/appldata_net_sum.c
+++ b/arch/s390/appldata/appldata_net_sum.c
@@ -111,7 +111,7 @@
 	net_data->tx_dropped = tx_dropped;
 	net_data->collisions = collisions;
 
-	net_data->timestamp = get_clock();
+	net_data->timestamp = get_tod_clock();
 	net_data->sync_count_2++;
 }
 
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index 87521ba..de8e2b3 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -156,7 +156,7 @@
 		}
 		ops.size = new_size;
 	}
-	os_data->timestamp = get_clock();
+	os_data->timestamp = get_tod_clock();
 	os_data->sync_count_2++;
 }
 
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index 4f6afaa..f364dcf 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -245,7 +245,7 @@
 	d2fc = diag2fc_store(guest_query, &count, sizeof(d2fc->hdr));
 	if (IS_ERR(d2fc))
 		return PTR_ERR(d2fc);
-	get_clock_ext(d2fc->hdr.tod_ext);
+	get_tod_clock_ext(d2fc->hdr.tod_ext);
 	d2fc->hdr.len = count * sizeof(struct diag2fc_data);
 	d2fc->hdr.version = DBFS_D2FC_HDR_VERSION;
 	d2fc->hdr.count = count;
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 4c060bb..8ad8af9 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -15,7 +15,7 @@
 #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
 
 /* Inline functions for clock register access. */
-static inline int set_clock(__u64 time)
+static inline int set_tod_clock(__u64 time)
 {
 	int cc;
 
@@ -27,7 +27,7 @@
 	return cc;
 }
 
-static inline int store_clock(__u64 *time)
+static inline int store_tod_clock(__u64 *time)
 {
 	int cc;
 
@@ -71,7 +71,7 @@
 
 typedef unsigned long long cycles_t;
 
-static inline unsigned long long get_clock(void)
+static inline unsigned long long get_tod_clock(void)
 {
 	unsigned long long clk;
 
@@ -83,21 +83,21 @@
 	return clk;
 }
 
-static inline void get_clock_ext(char *clk)
+static inline void get_tod_clock_ext(char *clk)
 {
 	asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
 }
 
-static inline unsigned long long get_clock_xt(void)
+static inline unsigned long long get_tod_clock_xt(void)
 {
 	unsigned char clk[16];
-	get_clock_ext(clk);
+	get_tod_clock_ext(clk);
 	return *((unsigned long long *)&clk[1]);
 }
 
 static inline cycles_t get_cycles(void)
 {
-	return (cycles_t) get_clock() >> 2;
+	return (cycles_t) get_tod_clock() >> 2;
 }
 
 int get_sync_clock(unsigned long long *clock);
@@ -123,9 +123,9 @@
  * function, otherwise the returned value is not guaranteed to
  * be monotonic.
  */
-static inline unsigned long long get_clock_monotonic(void)
+static inline unsigned long long get_tod_clock_monotonic(void)
 {
-	return get_clock_xt() - sched_clock_base_cc;
+	return get_tod_clock_xt() - sched_clock_base_cc;
 }
 
 /**
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 4e8215e..09a94cd 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -867,7 +867,7 @@
 debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level,
 			int exception)
 {
-	active->id.stck = get_clock();
+	active->id.stck = get_tod_clock();
 	active->id.fields.cpuid = smp_processor_id();
 	active->caller = __builtin_return_address(0);
 	active->id.fields.exception = exception;
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 1f0eee9..1ee98e5 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -47,10 +47,10 @@
 {
 	u64 time;
 
-	if (store_clock(&time) == 0)
+	if (store_tod_clock(&time) == 0)
 		return;
 	/* TOD clock not running. Set the clock to Unix Epoch. */
-	if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0)
+	if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0)
 		disabled_wait(0);
 
 	sched_clock_base_cc = TOD_UNIX_EPOCH;
@@ -173,7 +173,7 @@
 	}
 
 	/* re-initialize cputime accounting. */
-	sched_clock_base_cc = get_clock();
+	sched_clock_base_cc = get_tod_clock();
 	S390_lowcore.last_update_clock = sched_clock_base_cc;
 	S390_lowcore.last_update_timer = 0x7fffffffffffffffULL;
 	S390_lowcore.user_timer = 0;
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 7918fbe..504175e 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -293,7 +293,7 @@
 			 * retry this instruction.
 			 */
 			spin_lock(&ipd_lock);
-			tmp = get_clock();
+			tmp = get_tod_clock();
 			if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME)
 				ipd_count++;
 			else
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 7433a2f..549c9d1 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -365,16 +365,16 @@
 	u64 end;
 	int cpu;
 
-	end = get_clock() + (1000000UL << 12);
+	end = get_tod_clock() + (1000000UL << 12);
 	for_each_cpu(cpu, cpumask) {
 		struct pcpu *pcpu = pcpu_devices + cpu;
 		set_bit(ec_stop_cpu, &pcpu->ec_mask);
 		while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
 				   0, NULL) == SIGP_CC_BUSY &&
-		       get_clock() < end)
+		       get_tod_clock() < end)
 			cpu_relax();
 	}
-	while (get_clock() < end) {
+	while (get_tod_clock() < end) {
 		for_each_cpu(cpu, cpumask)
 			if (pcpu_stopped(pcpu_devices + cpu))
 				cpumask_clear_cpu(cpu, cpumask);
@@ -694,7 +694,7 @@
  */
 static void __cpuinit smp_start_secondary(void *cpuvoid)
 {
-	S390_lowcore.last_update_clock = get_clock();
+	S390_lowcore.last_update_clock = get_tod_clock();
 	S390_lowcore.restart_stack = (unsigned long) restart_stack;
 	S390_lowcore.restart_fn = (unsigned long) do_restart;
 	S390_lowcore.restart_data = 0;
@@ -947,7 +947,7 @@
 	unsigned int sequence;
 
 	do {
-		now = get_clock();
+		now = get_tod_clock();
 		sequence = ACCESS_ONCE(idle->sequence);
 		idle_time = ACCESS_ONCE(idle->idle_time);
 		idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 0aa98db..876546b 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -63,7 +63,7 @@
  */
 unsigned long long notrace __kprobes sched_clock(void)
 {
-	return tod_to_ns(get_clock_monotonic());
+	return tod_to_ns(get_tod_clock_monotonic());
 }
 
 /*
@@ -194,7 +194,7 @@
 
 void read_persistent_clock(struct timespec *ts)
 {
-	tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, ts);
+	tod_to_timeval(get_tod_clock() - TOD_UNIX_EPOCH, ts);
 }
 
 void read_boot_clock(struct timespec *ts)
@@ -204,7 +204,7 @@
 
 static cycle_t read_tod_clock(struct clocksource *cs)
 {
-	return get_clock();
+	return get_tod_clock();
 }
 
 static struct clocksource clocksource_tod = {
@@ -342,7 +342,7 @@
 
 	sw_ptr = &get_cpu_var(clock_sync_word);
 	sw0 = atomic_read(sw_ptr);
-	*clock = get_clock();
+	*clock = get_tod_clock();
 	sw1 = atomic_read(sw_ptr);
 	put_cpu_var(clock_sync_word);
 	if (sw0 == sw1 && (sw0 & 0x80000000U))
@@ -486,7 +486,7 @@
 		.p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0,
 		.es = 0, .sl = 0 };
 	if (etr_setr(&etr_eacr) == 0) {
-		etr_tolec = get_clock();
+		etr_tolec = get_tod_clock();
 		set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags);
 		if (etr_port0_online && etr_port1_online)
 			set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
@@ -768,8 +768,8 @@
 	__ctl_set_bit(14, 21);
 	__ctl_set_bit(0, 29);
 	clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32;
-	old_clock = get_clock();
-	if (set_clock(clock) == 0) {
+	old_clock = get_tod_clock();
+	if (set_tod_clock(clock) == 0) {
 		__udelay(1);	/* Wait for the clock to start. */
 		__ctl_clear_bit(0, 29);
 		__ctl_clear_bit(14, 21);
@@ -845,7 +845,7 @@
 			 * assume that this can have caused an stepping
 			 * port switch.
 			 */
-			etr_tolec = get_clock();
+			etr_tolec = get_tod_clock();
 		eacr.p0 = etr_port0_online;
 		if (!eacr.p0)
 			eacr.e0 = 0;
@@ -858,7 +858,7 @@
 			 * assume that this can have caused an stepping
 			 * port switch.
 			 */
-			etr_tolec = get_clock();
+			etr_tolec = get_tod_clock();
 		eacr.p1 = etr_port1_online;
 		if (!eacr.p1)
 			eacr.e1 = 0;
@@ -974,7 +974,7 @@
 	etr_eacr = eacr;
 	etr_setr(&etr_eacr);
 	if (dp_changed)
-		etr_tolec = get_clock();
+		etr_tolec = get_tod_clock();
 }
 
 /*
@@ -1012,7 +1012,7 @@
 	/* Store aib to get the current ETR status word. */
 	BUG_ON(etr_stetr(&aib) != 0);
 	etr_port0.esw = etr_port1.esw = aib.esw;	/* Copy status word. */
-	now = get_clock();
+	now = get_tod_clock();
 
 	/*
 	 * Update the port information if the last stepping port change
@@ -1537,10 +1537,10 @@
 	if (stp_info.todoff[0] || stp_info.todoff[1] ||
 	    stp_info.todoff[2] || stp_info.todoff[3] ||
 	    stp_info.tmd != 2) {
-		old_clock = get_clock();
+		old_clock = get_tod_clock();
 		rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0);
 		if (rc == 0) {
-			delta = adjust_time(old_clock, get_clock(), 0);
+			delta = adjust_time(old_clock, get_tod_clock(), 0);
 			fixup_clock_comparator(delta);
 			rc = chsc_sstpi(stp_page, &stp_info,
 					sizeof(struct stp_sstpi));
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index e84b8b6..8911a16 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -191,7 +191,7 @@
 	unsigned int sequence;
 
 	do {
-		now = get_clock();
+		now = get_tod_clock();
 		sequence = ACCESS_ONCE(idle->sequence);
 		idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
 		idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 82c481d..87418b5 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -362,7 +362,7 @@
 	}
 
 	if ((!rc) && (vcpu->arch.sie_block->ckc <
-		get_clock() + vcpu->arch.sie_block->epoch)) {
+		get_tod_clock() + vcpu->arch.sie_block->epoch)) {
 		if ((!psw_extint_disabled(vcpu)) &&
 			(vcpu->arch.sie_block->gcr[0] & 0x800ul))
 			rc = 1;
@@ -402,7 +402,7 @@
 		goto no_timer;
 	}
 
-	now = get_clock() + vcpu->arch.sie_block->epoch;
+	now = get_tod_clock() + vcpu->arch.sie_block->epoch;
 	if (vcpu->arch.sie_block->ckc < now) {
 		__unset_cpu_idle(vcpu);
 		return 0;
@@ -492,7 +492,7 @@
 	}
 
 	if ((vcpu->arch.sie_block->ckc <
-		get_clock() + vcpu->arch.sie_block->epoch))
+		get_tod_clock() + vcpu->arch.sie_block->epoch))
 		__try_deliver_ckc_interrupt(vcpu);
 
 	if (atomic_read(&fi->active)) {
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 42d0cf8..c61b9fa 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -32,7 +32,7 @@
 	unsigned long cr0, cr6, new;
 	u64 clock_saved, end;
 
-	end = get_clock() + (usecs << 12);
+	end = get_tod_clock() + (usecs << 12);
 	clock_saved = local_tick_disable();
 	__ctl_store(cr0, 0, 0);
 	__ctl_store(cr6, 6, 6);
@@ -45,7 +45,7 @@
 		set_clock_comparator(end);
 		vtime_stop_cpu();
 		local_irq_disable();
-	} while (get_clock() < end);
+	} while (get_tod_clock() < end);
 	lockdep_on();
 	__ctl_load(cr0, 0, 0);
 	__ctl_load(cr6, 6, 6);
@@ -56,7 +56,7 @@
 {
 	u64 clock_saved, end;
 
-	end = get_clock() + (usecs << 12);
+	end = get_tod_clock() + (usecs << 12);
 	do {
 		clock_saved = 0;
 		if (end < S390_lowcore.clock_comparator) {
@@ -67,7 +67,7 @@
 		local_irq_disable();
 		if (clock_saved)
 			local_tick_enable(clock_saved);
-	} while (get_clock() < end);
+	} while (get_tod_clock() < end);
 }
 
 /*
@@ -111,8 +111,8 @@
 {
 	u64 end;
 
-	end = get_clock() + (usecs << 12);
-	while (get_clock() < end)
+	end = get_tod_clock() + (usecs << 12);
+	while (get_tod_clock() < end)
 		cpu_relax();
 }
 
@@ -122,10 +122,10 @@
 
 	nsecs <<= 9;
 	do_div(nsecs, 125);
-	end = get_clock() + nsecs;
+	end = get_tod_clock() + nsecs;
 	if (nsecs & ~0xfffUL)
 		__udelay(nsecs >> 12);
-	while (get_clock() < end)
+	while (get_tod_clock() < end)
 		barrier();
 }
 EXPORT_SYMBOL(__ndelay);