blob: 839f93252e1a93ecbb6c137ef0f9035263ee4b22 [file] [log] [blame]
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001/* arch/arm/mach-msm/smd.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07004 * Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Brian Swetland2eb44eb2008-09-29 16:00:48 -07005 * Author: Brian Swetland <swetland@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/cdev.h>
22#include <linux/device.h>
23#include <linux/wait.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/list.h>
27#include <linux/slab.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070028#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/io.h>
30#include <linux/termios.h>
31#include <linux/ctype.h>
32#include <linux/remote_spinlock.h>
33#include <linux/uaccess.h>
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070034#include <linux/kfifo.h>
Eric Holmberg59a9f942012-03-19 10:04:22 -060035#include <linux/wakelock.h>
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -060036#include <linux/notifier.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070037#include <mach/msm_smd.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038#include <mach/msm_iomap.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070039#include <mach/system.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040#include <mach/subsystem_notif.h>
Angshuman Sarkaread67bd2011-09-21 20:13:12 +053041#include <mach/socinfo.h>
Ram Somani8b9589f2012-04-03 12:07:18 +053042#include <asm/cacheflush.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070043
44#include "smd_private.h"
45#include "proc_comm.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046#include "modem_notifier.h"
Brian Swetland2eb44eb2008-09-29 16:00:48 -070047
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM8X60) \
Jeff Hugo56b933a2011-09-28 14:42:05 -060049 || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX) \
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060050 || defined(CONFIG_ARCH_MSM9615) || defined(CONFIG_ARCH_APQ8064)
Brian Swetland37521a32009-07-01 18:30:47 -070051#define CONFIG_QDSP6 1
52#endif
53
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060054#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) \
55 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056#define CONFIG_DSPS 1
57#endif
58
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060059#if defined(CONFIG_ARCH_MSM8960) \
60 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070061#define CONFIG_WCNSS 1
Jeff Hugo6a8057c2011-08-16 13:47:12 -060062#define CONFIG_DSPS_SMSM 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -070064
65#define MODULE_NAME "msm_smd"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070066#define SMEM_VERSION 0x000B
67#define SMD_VERSION 0x00020000
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070068#define SMSM_SNAPSHOT_CNT 64
Eric Holmbergda31d042012-03-28 14:01:02 -060069#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070
71uint32_t SMSM_NUM_ENTRIES = 8;
72uint32_t SMSM_NUM_HOSTS = 3;
Brian Swetland2eb44eb2008-09-29 16:00:48 -070073
Eric Holmberge8a39322012-04-03 15:14:02 -060074/* Legacy SMSM interrupt notifications */
75#define LEGACY_MODEM_SMSM_MASK (SMSM_RESET | SMSM_INIT | SMSM_SMDINIT \
76 | SMSM_RUN | SMSM_SYSTEM_DOWNLOAD)
77
Brian Swetland2eb44eb2008-09-29 16:00:48 -070078enum {
79 MSM_SMD_DEBUG = 1U << 0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070080 MSM_SMSM_DEBUG = 1U << 1,
81 MSM_SMD_INFO = 1U << 2,
82 MSM_SMSM_INFO = 1U << 3,
Eric Holmberg98c6c642012-02-24 11:29:35 -070083 MSM_SMx_POWER_INFO = 1U << 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084};
85
86struct smsm_shared_info {
87 uint32_t *state;
88 uint32_t *intr_mask;
89 uint32_t *intr_mux;
90};
91
92static struct smsm_shared_info smsm_info;
Eric Holmberg59a9f942012-03-19 10:04:22 -060093static struct kfifo smsm_snapshot_fifo;
94static struct wake_lock smsm_snapshot_wakelock;
95static int smsm_snapshot_count;
96static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097
98struct smsm_size_info_type {
99 uint32_t num_hosts;
100 uint32_t num_entries;
101 uint32_t reserved0;
102 uint32_t reserved1;
103};
104
105struct smsm_state_cb_info {
106 struct list_head cb_list;
107 uint32_t mask;
108 void *data;
109 void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
110};
111
112struct smsm_state_info {
113 struct list_head callbacks;
114 uint32_t last_value;
Eric Holmberge8a39322012-04-03 15:14:02 -0600115 uint32_t intr_mask_set;
116 uint32_t intr_mask_clear;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700117};
118
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530119struct interrupt_config_item {
120 /* must be initialized */
121 irqreturn_t (*irq_handler)(int req, void *data);
122 /* outgoing interrupt config (set from platform data) */
123 uint32_t out_bit_pos;
124 void __iomem *out_base;
125 uint32_t out_offset;
126};
127
128struct interrupt_config {
129 struct interrupt_config_item smd;
130 struct interrupt_config_item smsm;
131};
132
133static irqreturn_t smd_modem_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700134static irqreturn_t smsm_modem_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530135static irqreturn_t smd_dsp_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700136static irqreturn_t smsm_dsp_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530137static irqreturn_t smd_dsps_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700138static irqreturn_t smsm_dsps_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530139static irqreturn_t smd_wcnss_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700140static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530141static irqreturn_t smsm_irq_handler(int irq, void *data);
142
143static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
144 [SMD_MODEM] = {
145 .smd.irq_handler = smd_modem_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700146 .smsm.irq_handler = smsm_modem_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530147 },
148 [SMD_Q6] = {
149 .smd.irq_handler = smd_dsp_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700150 .smsm.irq_handler = smsm_dsp_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530151 },
152 [SMD_DSPS] = {
153 .smd.irq_handler = smd_dsps_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700154 .smsm.irq_handler = smsm_dsps_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530155 },
156 [SMD_WCNSS] = {
157 .smd.irq_handler = smd_wcnss_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700158 .smsm.irq_handler = smsm_wcnss_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530159 },
160};
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700161struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530162
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700163#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
164#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
165 entry * SMSM_NUM_HOSTS + host)
166#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
167
168/* Internal definitions which are not exported in some targets */
169enum {
170 SMSM_APPS_DEM_I = 3,
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700171};
172
173static int msm_smd_debug_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700174module_param_named(debug_mask, msm_smd_debug_mask,
175 int, S_IRUGO | S_IWUSR | S_IWGRP);
176
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700177#if defined(CONFIG_MSM_SMD_DEBUG)
178#define SMD_DBG(x...) do { \
179 if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
180 printk(KERN_DEBUG x); \
181 } while (0)
182
183#define SMSM_DBG(x...) do { \
184 if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
185 printk(KERN_DEBUG x); \
186 } while (0)
187
188#define SMD_INFO(x...) do { \
189 if (msm_smd_debug_mask & MSM_SMD_INFO) \
190 printk(KERN_INFO x); \
191 } while (0)
192
193#define SMSM_INFO(x...) do { \
194 if (msm_smd_debug_mask & MSM_SMSM_INFO) \
195 printk(KERN_INFO x); \
196 } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700197#define SMx_POWER_INFO(x...) do { \
198 if (msm_smd_debug_mask & MSM_SMx_POWER_INFO) \
199 printk(KERN_INFO x); \
200 } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700201#else
202#define SMD_DBG(x...) do { } while (0)
203#define SMSM_DBG(x...) do { } while (0)
204#define SMD_INFO(x...) do { } while (0)
205#define SMSM_INFO(x...) do { } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700206#define SMx_POWER_INFO(x...) do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207#endif
208
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700209static unsigned last_heap_free = 0xffffffff;
210
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700211static inline void smd_write_intr(unsigned int val,
212 const void __iomem *addr);
213
214#if defined(CONFIG_ARCH_MSM7X30)
215#define MSM_TRIG_A2M_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530216 (smd_write_intr(1 << 0, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217#define MSM_TRIG_A2Q6_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530218 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219#define MSM_TRIG_A2M_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530220 (smd_write_intr(1 << 5, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700221#define MSM_TRIG_A2Q6_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530222 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700223#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600224#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225#define MSM_TRIG_A2WCNSS_SMD_INT
226#define MSM_TRIG_A2WCNSS_SMSM_INT
227#elif defined(CONFIG_ARCH_MSM8X60)
228#define MSM_TRIG_A2M_SMD_INT \
229 (smd_write_intr(1 << 3, MSM_GCC_BASE + 0x8))
230#define MSM_TRIG_A2Q6_SMD_INT \
231 (smd_write_intr(1 << 15, MSM_GCC_BASE + 0x8))
232#define MSM_TRIG_A2M_SMSM_INT \
233 (smd_write_intr(1 << 4, MSM_GCC_BASE + 0x8))
234#define MSM_TRIG_A2Q6_SMSM_INT \
235 (smd_write_intr(1 << 14, MSM_GCC_BASE + 0x8))
236#define MSM_TRIG_A2DSPS_SMD_INT \
237 (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4080))
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600238#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700239#define MSM_TRIG_A2WCNSS_SMD_INT
240#define MSM_TRIG_A2WCNSS_SMSM_INT
Jeff Hugo56b933a2011-09-28 14:42:05 -0600241#elif defined(CONFIG_ARCH_MSM9615)
242#define MSM_TRIG_A2M_SMD_INT \
243 (smd_write_intr(1 << 3, MSM_APCS_GCC_BASE + 0x8))
244#define MSM_TRIG_A2Q6_SMD_INT \
245 (smd_write_intr(1 << 15, MSM_APCS_GCC_BASE + 0x8))
246#define MSM_TRIG_A2M_SMSM_INT \
247 (smd_write_intr(1 << 4, MSM_APCS_GCC_BASE + 0x8))
248#define MSM_TRIG_A2Q6_SMSM_INT \
249 (smd_write_intr(1 << 14, MSM_APCS_GCC_BASE + 0x8))
250#define MSM_TRIG_A2DSPS_SMD_INT
251#define MSM_TRIG_A2DSPS_SMSM_INT
252#define MSM_TRIG_A2WCNSS_SMD_INT
253#define MSM_TRIG_A2WCNSS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700254#elif defined(CONFIG_ARCH_FSM9XXX)
255#define MSM_TRIG_A2Q6_SMD_INT \
256 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
257#define MSM_TRIG_A2Q6_SMSM_INT \
258 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
259#define MSM_TRIG_A2M_SMD_INT \
260 (smd_write_intr(1 << 0, MSM_GCC_BASE + 0x8))
261#define MSM_TRIG_A2M_SMSM_INT \
262 (smd_write_intr(1 << 5, MSM_GCC_BASE + 0x8))
263#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600264#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265#define MSM_TRIG_A2WCNSS_SMD_INT
266#define MSM_TRIG_A2WCNSS_SMSM_INT
Eric Holmberg73d45462012-02-28 10:41:31 -0700267#elif defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7x25)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268#define MSM_TRIG_A2M_SMD_INT \
269 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700270#define MSM_TRIG_A2Q6_SMD_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271#define MSM_TRIG_A2M_SMSM_INT \
272 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700273#define MSM_TRIG_A2Q6_SMSM_INT
274#define MSM_TRIG_A2DSPS_SMD_INT
275#define MSM_TRIG_A2DSPS_SMSM_INT
276#define MSM_TRIG_A2WCNSS_SMD_INT
277#define MSM_TRIG_A2WCNSS_SMSM_INT
278#elif defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM7X27A)
279#define MSM_TRIG_A2M_SMD_INT \
280 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
281#define MSM_TRIG_A2Q6_SMD_INT
282#define MSM_TRIG_A2M_SMSM_INT \
283 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
284#define MSM_TRIG_A2Q6_SMSM_INT
285#define MSM_TRIG_A2DSPS_SMD_INT
286#define MSM_TRIG_A2DSPS_SMSM_INT
287#define MSM_TRIG_A2WCNSS_SMD_INT
288#define MSM_TRIG_A2WCNSS_SMSM_INT
289#else /* use platform device / device tree configuration */
290#define MSM_TRIG_A2M_SMD_INT
291#define MSM_TRIG_A2Q6_SMD_INT
292#define MSM_TRIG_A2M_SMSM_INT
293#define MSM_TRIG_A2Q6_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700294#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600295#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700296#define MSM_TRIG_A2WCNSS_SMD_INT
297#define MSM_TRIG_A2WCNSS_SMSM_INT
Brian Swetland37521a32009-07-01 18:30:47 -0700298#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700299
Jeff Hugoee40b152012-02-09 17:39:47 -0700300/*
301 * stub out legacy macros if they are not being used so that the legacy
302 * code compiles even though it is not used
303 *
304 * these definitions should not be used in active code and will cause
305 * an early failure
306 */
307#ifndef INT_A9_M2A_0
308#define INT_A9_M2A_0 -1
309#endif
310#ifndef INT_A9_M2A_5
311#define INT_A9_M2A_5 -1
312#endif
313#ifndef INT_ADSP_A11
314#define INT_ADSP_A11 -1
315#endif
316#ifndef INT_ADSP_A11_SMSM
317#define INT_ADSP_A11_SMSM -1
318#endif
319#ifndef INT_DSPS_A11
320#define INT_DSPS_A11 -1
321#endif
322#ifndef INT_DSPS_A11_SMSM
323#define INT_DSPS_A11_SMSM -1
324#endif
325#ifndef INT_WCNSS_A11
326#define INT_WCNSS_A11 -1
327#endif
328#ifndef INT_WCNSS_A11_SMSM
329#define INT_WCNSS_A11_SMSM -1
330#endif
331
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700332#define SMD_LOOPBACK_CID 100
333
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600334#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
335static remote_spinlock_t remote_spinlock;
336
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700337static LIST_HEAD(smd_ch_list_loopback);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700338static void smd_fake_irq_handler(unsigned long arg);
Eric Holmbergda31d042012-03-28 14:01:02 -0600339static void smsm_cb_snapshot(uint32_t use_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700340
341static void notify_smsm_cb_clients_worker(struct work_struct *work);
342static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
Eric Holmbergc8002902011-09-16 13:55:57 -0600343static DEFINE_MUTEX(smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700344static struct smsm_state_info *smsm_states;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +0530345static int spinlocks_initialized;
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -0600346static RAW_NOTIFIER_HEAD(smsm_driver_state_notifier_list);
347static DEFINE_MUTEX(smsm_driver_state_notifier_lock);
348static void smsm_driver_state_notify(uint32_t state, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349
350static inline void smd_write_intr(unsigned int val,
351 const void __iomem *addr)
352{
353 wmb();
354 __raw_writel(val, addr);
355}
356
357#ifdef CONFIG_WCNSS
358static inline void wakeup_v1_riva(void)
359{
360 /*
361 * workaround hack for RIVA v1 hardware bug
362 * trigger GPIO 40 to wake up RIVA from power collaspe
363 * not to be sent to customers
364 */
Jeff Hugoafb8c4a2011-10-27 15:57:27 -0600365 if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) == 1) {
366 __raw_writel(0x0, MSM_TLMM_BASE + 0x1284);
367 __raw_writel(0x2, MSM_TLMM_BASE + 0x1284);
368 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700369 /* end workaround */
370}
371#else
372static inline void wakeup_v1_riva(void) {}
373#endif
374
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530375static inline void notify_modem_smd(void)
376{
377 static const struct interrupt_config_item *intr
378 = &private_intr_config[SMD_MODEM].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700379 if (intr->out_base) {
380 ++interrupt_stats[SMD_MODEM].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530381 smd_write_intr(intr->out_bit_pos,
382 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700383 } else {
384 ++interrupt_stats[SMD_MODEM].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530385 MSM_TRIG_A2M_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700386 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530387}
388
389static inline void notify_dsp_smd(void)
390{
391 static const struct interrupt_config_item *intr
392 = &private_intr_config[SMD_Q6].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700393 if (intr->out_base) {
394 ++interrupt_stats[SMD_Q6].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530395 smd_write_intr(intr->out_bit_pos,
396 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700397 } else {
398 ++interrupt_stats[SMD_Q6].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530399 MSM_TRIG_A2Q6_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700400 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530401}
402
403static inline void notify_dsps_smd(void)
404{
405 static const struct interrupt_config_item *intr
406 = &private_intr_config[SMD_DSPS].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700407 if (intr->out_base) {
408 ++interrupt_stats[SMD_DSPS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530409 smd_write_intr(intr->out_bit_pos,
410 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700411 } else {
412 ++interrupt_stats[SMD_DSPS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530413 MSM_TRIG_A2DSPS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700414 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530415}
416
417static inline void notify_wcnss_smd(void)
418{
419 static const struct interrupt_config_item *intr
420 = &private_intr_config[SMD_WCNSS].smd;
421 wakeup_v1_riva();
422
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700423 if (intr->out_base) {
424 ++interrupt_stats[SMD_WCNSS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530425 smd_write_intr(intr->out_bit_pos,
426 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700427 } else {
428 ++interrupt_stats[SMD_WCNSS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530429 MSM_TRIG_A2WCNSS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700430 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530431}
432
433static inline void notify_modem_smsm(void)
434{
435 static const struct interrupt_config_item *intr
436 = &private_intr_config[SMD_MODEM].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700437 if (intr->out_base) {
438 ++interrupt_stats[SMD_MODEM].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530439 smd_write_intr(intr->out_bit_pos,
440 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700441 } else {
442 ++interrupt_stats[SMD_MODEM].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530443 MSM_TRIG_A2M_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700444 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530445}
446
447static inline void notify_dsp_smsm(void)
448{
449 static const struct interrupt_config_item *intr
450 = &private_intr_config[SMD_Q6].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700451 if (intr->out_base) {
452 ++interrupt_stats[SMD_Q6].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530453 smd_write_intr(intr->out_bit_pos,
454 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700455 } else {
456 ++interrupt_stats[SMD_Q6].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530457 MSM_TRIG_A2Q6_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700458 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530459}
460
461static inline void notify_dsps_smsm(void)
462{
463 static const struct interrupt_config_item *intr
464 = &private_intr_config[SMD_DSPS].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700465 if (intr->out_base) {
466 ++interrupt_stats[SMD_DSPS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530467 smd_write_intr(intr->out_bit_pos,
468 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700469 } else {
470 ++interrupt_stats[SMD_DSPS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530471 MSM_TRIG_A2DSPS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700472 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530473}
474
475static inline void notify_wcnss_smsm(void)
476{
477 static const struct interrupt_config_item *intr
478 = &private_intr_config[SMD_WCNSS].smsm;
479 wakeup_v1_riva();
480
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700481 if (intr->out_base) {
482 ++interrupt_stats[SMD_WCNSS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530483 smd_write_intr(intr->out_bit_pos,
484 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700485 } else {
486 ++interrupt_stats[SMD_WCNSS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530487 MSM_TRIG_A2WCNSS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700488 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530489}
490
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700491static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
492{
493 /* older protocol don't use smsm_intr_mask,
494 but still communicates with modem */
495 if (!smsm_info.intr_mask ||
496 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
497 & notify_mask))
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530498 notify_modem_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700499
500 if (smsm_info.intr_mask &&
501 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
502 & notify_mask)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700503 uint32_t mux_val;
504
Eric Holmberg6282c5d2011-10-27 17:30:57 -0600505 if (cpu_is_qsd8x50() && smsm_info.intr_mux) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700506 mux_val = __raw_readl(
507 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
508 mux_val++;
509 __raw_writel(mux_val,
510 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
511 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530512 notify_dsp_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513 }
514
515 if (smsm_info.intr_mask &&
516 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
517 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530518 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700519 }
520
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600521 if (smsm_info.intr_mask &&
522 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
523 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530524 notify_dsps_smsm();
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600525 }
526
Eric Holmbergda31d042012-03-28 14:01:02 -0600527 /*
528 * Notify local SMSM callback clients without wakelock since this
529 * code is used by power management during power-down/-up sequencing
530 * on DEM-based targets. Grabbing a wakelock in this case will
531 * abort the power-down sequencing.
532 */
533 smsm_cb_snapshot(0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700534}
535
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536void smd_diag(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700537{
538 char *x;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700539 int size;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700540
541 x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
542 if (x != 0) {
543 x[SZ_DIAG_ERR_MSG - 1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700544 SMD_INFO("smem: DIAG '%s'\n", x);
545 }
546
547 x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size);
548 if (x != 0) {
549 x[size - 1] = 0;
550 pr_err("smem: CRASH LOG\n'%s'\n", x);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700551 }
552}
553
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700554
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700555static void handle_modem_crash(void)
556{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700557 pr_err("MODEM/AMSS has CRASHED\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700558 smd_diag();
559
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700560 /* hard reboot if possible FIXME
561 if (msm_reset_hook)
562 msm_reset_hook();
563 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700564
565 /* in this case the modem or watchdog should reboot us */
566 for (;;)
567 ;
568}
569
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700570int smsm_check_for_modem_crash(void)
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700571{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572 /* if the modem's not ready yet, we have to hope for the best */
573 if (!smsm_info.state)
574 return 0;
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700575
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700576 if (__raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)) & SMSM_RESET) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700577 handle_modem_crash();
578 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700579 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700580 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700581}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700582EXPORT_SYMBOL(smsm_check_for_modem_crash);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700583
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700584/* the spinlock is used to synchronize between the
Brian Swetland03e00cd2009-07-01 17:58:37 -0700585 * irq handler and code that mutates the channel
586 * list or fiddles with channel state
587 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700588static DEFINE_SPINLOCK(smd_lock);
Brian Swetland03e00cd2009-07-01 17:58:37 -0700589DEFINE_SPINLOCK(smem_lock);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700590
591/* the mutex is used during open() and close()
Brian Swetland03e00cd2009-07-01 17:58:37 -0700592 * operations to avoid races while creating or
593 * destroying smd_channel structures
594 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700595static DEFINE_MUTEX(smd_creation_mutex);
596
597static int smd_initialized;
598
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700599struct smd_shared_v1 {
600 struct smd_half_channel ch0;
601 unsigned char data0[SMD_BUF_SIZE];
602 struct smd_half_channel ch1;
603 unsigned char data1[SMD_BUF_SIZE];
604};
605
606struct smd_shared_v2 {
607 struct smd_half_channel ch0;
608 struct smd_half_channel ch1;
609};
610
611struct smd_channel {
612 volatile struct smd_half_channel *send;
613 volatile struct smd_half_channel *recv;
614 unsigned char *send_data;
615 unsigned char *recv_data;
616 unsigned fifo_size;
617 unsigned fifo_mask;
618 struct list_head ch_list;
619
620 unsigned current_packet;
621 unsigned n;
622 void *priv;
623 void (*notify)(void *priv, unsigned flags);
624
625 int (*read)(smd_channel_t *ch, void *data, int len, int user_buf);
626 int (*write)(smd_channel_t *ch, const void *data, int len,
627 int user_buf);
628 int (*read_avail)(smd_channel_t *ch);
629 int (*write_avail)(smd_channel_t *ch);
630 int (*read_from_cb)(smd_channel_t *ch, void *data, int len,
631 int user_buf);
632
633 void (*update_state)(smd_channel_t *ch);
634 unsigned last_state;
635 void (*notify_other_cpu)(void);
636
637 char name[20];
638 struct platform_device pdev;
639 unsigned type;
640
641 int pending_pkt_sz;
642
643 char is_pkt_ch;
644};
645
646struct edge_to_pid {
647 uint32_t local_pid;
648 uint32_t remote_pid;
Eric Holmberg17992c12012-02-29 12:54:44 -0700649 char subsys_name[SMD_MAX_CH_NAME_LEN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700650};
651
652/**
653 * Maps edge type to local and remote processor ID's.
654 */
655static struct edge_to_pid edge_to_pids[] = {
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700656 [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
657 [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "q6"},
658 [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
659 [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
660 [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
661 [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
662 [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
663 [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
664 [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
665 [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
666 [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
667 [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
668 [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
669 [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
670 [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700671};
672
673struct restart_notifier_block {
674 unsigned processor;
675 char *name;
676 struct notifier_block nb;
677};
678
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -0600679static int disable_smsm_reset_handshake;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700680static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
681
682static LIST_HEAD(smd_ch_closed_list);
683static LIST_HEAD(smd_ch_closing_list);
684static LIST_HEAD(smd_ch_to_close_list);
685static LIST_HEAD(smd_ch_list_modem);
686static LIST_HEAD(smd_ch_list_dsp);
687static LIST_HEAD(smd_ch_list_dsps);
688static LIST_HEAD(smd_ch_list_wcnss);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700689
690static unsigned char smd_ch_allocated[64];
691static struct work_struct probe_work;
692
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700693static void finalize_channel_close_fn(struct work_struct *work);
694static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
695static struct workqueue_struct *channel_close_wq;
696
697static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm);
698
699/* on smp systems, the probe might get called from multiple cores,
700 hence use a lock */
701static DEFINE_MUTEX(smd_probe_lock);
702
703static void smd_channel_probe_worker(struct work_struct *work)
704{
705 struct smd_alloc_elm *shared;
706 unsigned n;
707 uint32_t type;
708
709 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
710
711 if (!shared) {
712 pr_err("%s: allocation table not initialized\n", __func__);
713 return;
714 }
715
716 mutex_lock(&smd_probe_lock);
717 for (n = 0; n < 64; n++) {
718 if (smd_ch_allocated[n])
719 continue;
720
721 /* channel should be allocated only if APPS
722 processor is involved */
723 type = SMD_CHANNEL_TYPE(shared[n].type);
724 if ((type != SMD_APPS_MODEM) && (type != SMD_APPS_QDSP) &&
725 (type != SMD_APPS_DSPS) && (type != SMD_APPS_WCNSS))
726 continue;
727 if (!shared[n].ref_count)
728 continue;
729 if (!shared[n].name[0])
730 continue;
731
732 if (!smd_alloc_channel(&shared[n]))
733 smd_ch_allocated[n] = 1;
734 else
735 SMD_INFO("Probe skipping ch %d, not allocated\n", n);
736 }
737 mutex_unlock(&smd_probe_lock);
738}
739
740/**
741 * Lookup processor ID and determine if it belongs to the proved edge
742 * type.
743 *
744 * @shared2: Pointer to v2 shared channel structure
745 * @type: Edge type
746 * @pid: Processor ID of processor on edge
747 * @local_ch: Channel that belongs to processor @pid
748 * @remote_ch: Other side of edge contained @pid
749 *
750 * Returns 0 for not on edge, 1 for found on edge
751 */
752static int pid_is_on_edge(struct smd_shared_v2 *shared2,
753 uint32_t type, uint32_t pid,
754 struct smd_half_channel **local_ch,
755 struct smd_half_channel **remote_ch
756 )
757{
758 int ret = 0;
759 struct edge_to_pid *edge;
760
761 *local_ch = 0;
762 *remote_ch = 0;
763
764 if (!shared2 || (type >= ARRAY_SIZE(edge_to_pids)))
765 return 0;
766
767 edge = &edge_to_pids[type];
768 if (edge->local_pid != edge->remote_pid) {
769 if (pid == edge->local_pid) {
770 *local_ch = &shared2->ch0;
771 *remote_ch = &shared2->ch1;
772 ret = 1;
773 } else if (pid == edge->remote_pid) {
774 *local_ch = &shared2->ch1;
775 *remote_ch = &shared2->ch0;
776 ret = 1;
777 }
778 }
779
780 return ret;
781}
782
Eric Holmberg17992c12012-02-29 12:54:44 -0700783/*
784 * Returns a pointer to the subsystem name or NULL if no
785 * subsystem name is available.
786 *
787 * @type - Edge definition
788 */
789const char *smd_edge_to_subsystem(uint32_t type)
790{
791 const char *subsys = NULL;
792
793 if (type < ARRAY_SIZE(edge_to_pids)) {
794 subsys = edge_to_pids[type].subsys_name;
795 if (subsys[0] == 0x0)
796 subsys = NULL;
797 }
798 return subsys;
799}
800EXPORT_SYMBOL(smd_edge_to_subsystem);
801
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700802/*
803 * Returns a pointer to the subsystem name given the
804 * remote processor ID.
805 *
806 * @pid Remote processor ID
807 * @returns Pointer to subsystem name or NULL if not found
808 */
809const char *smd_pid_to_subsystem(uint32_t pid)
810{
811 const char *subsys = NULL;
812 int i;
813
814 for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
815 if (pid == edge_to_pids[i].remote_pid &&
816 edge_to_pids[i].subsys_name[0] != 0x0
817 ) {
818 subsys = edge_to_pids[i].subsys_name;
819 break;
820 }
821 }
822
823 return subsys;
824}
825EXPORT_SYMBOL(smd_pid_to_subsystem);
Eric Holmberg17992c12012-02-29 12:54:44 -0700826
Eric Holmberg2a563c32011-10-05 14:51:43 -0600827static void smd_reset_edge(struct smd_half_channel *ch, unsigned new_state)
828{
829 if (ch->state != SMD_SS_CLOSED) {
830 ch->state = new_state;
831 ch->fDSR = 0;
832 ch->fCTS = 0;
833 ch->fCD = 0;
834 ch->fSTATE = 1;
835 }
836}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700837
838static void smd_channel_reset_state(struct smd_alloc_elm *shared,
839 unsigned new_state, unsigned pid)
840{
841 unsigned n;
842 struct smd_shared_v2 *shared2;
843 uint32_t type;
844 struct smd_half_channel *local_ch;
845 struct smd_half_channel *remote_ch;
846
847 for (n = 0; n < SMD_CHANNELS; n++) {
848 if (!shared[n].ref_count)
849 continue;
850 if (!shared[n].name[0])
851 continue;
852
853 type = SMD_CHANNEL_TYPE(shared[n].type);
854 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n, sizeof(*shared2));
855 if (!shared2)
856 continue;
857
Eric Holmberg2a563c32011-10-05 14:51:43 -0600858 if (pid_is_on_edge(shared2, type, pid, &local_ch, &remote_ch))
859 smd_reset_edge(local_ch, new_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700860
Eric Holmberg2a563c32011-10-05 14:51:43 -0600861 /*
862 * ModemFW is in the same subsystem as ModemSW, but has
863 * separate SMD edges that need to be reset.
864 */
865 if (pid == SMSM_MODEM &&
866 pid_is_on_edge(shared2, type, SMD_MODEM_Q6_FW,
867 &local_ch, &remote_ch))
868 smd_reset_edge(local_ch, new_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700869 }
870}
871
872
873void smd_channel_reset(uint32_t restart_pid)
874{
875 struct smd_alloc_elm *shared;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700876 unsigned long flags;
877
878 SMD_DBG("%s: starting reset\n", __func__);
879 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
880 if (!shared) {
881 pr_err("%s: allocation table not initialized\n", __func__);
882 return;
883 }
884
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600885 /* release any held spinlocks */
886 remote_spin_release(&remote_spinlock, restart_pid);
887 remote_spin_release_all(restart_pid);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700888
889 /* reset SMSM entry */
890 if (smsm_info.state) {
891 writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
892
Eric Holmberg351a63c2011-12-02 17:49:43 -0700893 /* restart SMSM init handshake */
894 if (restart_pid == SMSM_MODEM) {
895 smsm_change_state(SMSM_APPS_STATE,
Eric Holmberg6b2f80e2012-01-09 12:22:52 -0700896 SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
897 0);
Eric Holmberg351a63c2011-12-02 17:49:43 -0700898 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700899
900 /* notify SMSM processors */
901 smsm_irq_handler(0, 0);
Eric Holmbergb7726442012-03-01 15:31:56 -0700902 notify_modem_smsm();
903 notify_dsp_smsm();
904 notify_dsps_smsm();
905 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700906 }
907
908 /* change all remote states to CLOSING */
909 mutex_lock(&smd_probe_lock);
910 spin_lock_irqsave(&smd_lock, flags);
911 smd_channel_reset_state(shared, SMD_SS_CLOSING, restart_pid);
912 spin_unlock_irqrestore(&smd_lock, flags);
913 mutex_unlock(&smd_probe_lock);
914
915 /* notify SMD processors */
916 mb();
917 smd_fake_irq_handler(0);
918 notify_modem_smd();
919 notify_dsp_smd();
920 notify_dsps_smd();
921 notify_wcnss_smd();
922
923 /* change all remote states to CLOSED */
924 mutex_lock(&smd_probe_lock);
925 spin_lock_irqsave(&smd_lock, flags);
926 smd_channel_reset_state(shared, SMD_SS_CLOSED, restart_pid);
927 spin_unlock_irqrestore(&smd_lock, flags);
928 mutex_unlock(&smd_probe_lock);
929
930 /* notify SMD processors */
931 mb();
932 smd_fake_irq_handler(0);
933 notify_modem_smd();
934 notify_dsp_smd();
935 notify_dsps_smd();
936 notify_wcnss_smd();
937
938 SMD_DBG("%s: finished reset\n", __func__);
939}
940
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700941/* how many bytes are available for reading */
942static int smd_stream_read_avail(struct smd_channel *ch)
943{
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700944 return (ch->recv->head - ch->recv->tail) & ch->fifo_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700945}
946
947/* how many bytes we are free to write */
948static int smd_stream_write_avail(struct smd_channel *ch)
949{
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700950 return ch->fifo_mask -
951 ((ch->send->head - ch->send->tail) & ch->fifo_mask);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700952}
953
954static int smd_packet_read_avail(struct smd_channel *ch)
955{
956 if (ch->current_packet) {
957 int n = smd_stream_read_avail(ch);
958 if (n > ch->current_packet)
959 n = ch->current_packet;
960 return n;
961 } else {
962 return 0;
963 }
964}
965
966static int smd_packet_write_avail(struct smd_channel *ch)
967{
968 int n = smd_stream_write_avail(ch);
969 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
970}
971
972static int ch_is_open(struct smd_channel *ch)
973{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700974 return (ch->recv->state == SMD_SS_OPENED ||
975 ch->recv->state == SMD_SS_FLUSHING)
976 && (ch->send->state == SMD_SS_OPENED);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700977}
978
979/* provide a pointer and length to readable data in the fifo */
980static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
981{
982 unsigned head = ch->recv->head;
983 unsigned tail = ch->recv->tail;
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700984 *ptr = (void *) (ch->recv_data + tail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700985
986 if (tail <= head)
987 return head - tail;
988 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700989 return ch->fifo_size - tail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700990}
991
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700992static int read_intr_blocked(struct smd_channel *ch)
993{
994 return ch->recv->fBLOCKREADINTR;
995}
996
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700997/* advance the fifo read pointer after data from ch_read_buffer is consumed */
998static void ch_read_done(struct smd_channel *ch, unsigned count)
999{
1000 BUG_ON(count > smd_stream_read_avail(ch));
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001001 ch->recv->tail = (ch->recv->tail + count) & ch->fifo_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001002 wmb();
Haley Teng7632fba2009-10-12 10:38:10 -07001003 ch->send->fTAIL = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001004}
1005
1006/* basic read interface to ch_read_{buffer,done} used
Brian Swetland03e00cd2009-07-01 17:58:37 -07001007 * by smd_*_read() and update_packet_state()
1008 * will read-and-discard if the _data pointer is null
1009 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001010static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001011{
1012 void *ptr;
1013 unsigned n;
1014 unsigned char *data = _data;
1015 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001016 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001017
1018 while (len > 0) {
1019 n = ch_read_buffer(ch, &ptr);
1020 if (n == 0)
1021 break;
1022
1023 if (n > len)
1024 n = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001025 if (_data) {
1026 if (user_buf) {
1027 r = copy_to_user(data, ptr, n);
1028 if (r > 0) {
1029 pr_err("%s: "
1030 "copy_to_user could not copy "
1031 "%i bytes.\n",
1032 __func__,
1033 r);
1034 }
1035 } else
1036 memcpy(data, ptr, n);
1037 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001038
1039 data += n;
1040 len -= n;
1041 ch_read_done(ch, n);
1042 }
1043
1044 return orig_len - len;
1045}
1046
1047static void update_stream_state(struct smd_channel *ch)
1048{
1049 /* streams have no special state requiring updating */
1050}
1051
1052static void update_packet_state(struct smd_channel *ch)
1053{
1054 unsigned hdr[5];
1055 int r;
1056
1057 /* can't do anything if we're in the middle of a packet */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001058 while (ch->current_packet == 0) {
1059 /* discard 0 length packets if any */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001060
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001061 /* don't bother unless we can get the full header */
1062 if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
1063 return;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001064
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001065 r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
1066 BUG_ON(r != SMD_HEADER_SIZE);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001067
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001068 ch->current_packet = hdr[0];
1069 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001070}
1071
1072/* provide a pointer and length to next free space in the fifo */
1073static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
1074{
1075 unsigned head = ch->send->head;
1076 unsigned tail = ch->send->tail;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001077 *ptr = (void *) (ch->send_data + head);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001078
1079 if (head < tail) {
1080 return tail - head - 1;
1081 } else {
1082 if (tail == 0)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001083 return ch->fifo_size - head - 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001084 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001085 return ch->fifo_size - head;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001086 }
1087}
1088
1089/* advace the fifo write pointer after freespace
1090 * from ch_write_buffer is filled
1091 */
1092static void ch_write_done(struct smd_channel *ch, unsigned count)
1093{
1094 BUG_ON(count > smd_stream_write_avail(ch));
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001095 ch->send->head = (ch->send->head + count) & ch->fifo_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001096 wmb();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001097 ch->send->fHEAD = 1;
1098}
1099
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001100static void ch_set_state(struct smd_channel *ch, unsigned n)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001101{
1102 if (n == SMD_SS_OPENED) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001103 ch->send->fDSR = 1;
1104 ch->send->fCTS = 1;
1105 ch->send->fCD = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001106 } else {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001107 ch->send->fDSR = 0;
1108 ch->send->fCTS = 0;
1109 ch->send->fCD = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001110 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001111 ch->send->state = n;
1112 ch->send->fSTATE = 1;
1113 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001114}
1115
1116static void do_smd_probe(void)
1117{
1118 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
1119 if (shared->heap_info.free_offset != last_heap_free) {
1120 last_heap_free = shared->heap_info.free_offset;
1121 schedule_work(&probe_work);
1122 }
1123}
1124
1125static void smd_state_change(struct smd_channel *ch,
1126 unsigned last, unsigned next)
1127{
1128 ch->last_state = next;
1129
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001130 SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001131
1132 switch (next) {
1133 case SMD_SS_OPENING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001134 if (ch->send->state == SMD_SS_CLOSING ||
1135 ch->send->state == SMD_SS_CLOSED) {
1136 ch->recv->tail = 0;
1137 ch->send->head = 0;
1138 ch->send->fBLOCKREADINTR = 0;
1139 ch_set_state(ch, SMD_SS_OPENING);
1140 }
1141 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001142 case SMD_SS_OPENED:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001143 if (ch->send->state == SMD_SS_OPENING) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001144 ch_set_state(ch, SMD_SS_OPENED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001145 ch->notify(ch->priv, SMD_EVENT_OPEN);
1146 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001147 break;
1148 case SMD_SS_FLUSHING:
1149 case SMD_SS_RESET:
1150 /* we should force them to close? */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001151 break;
1152 case SMD_SS_CLOSED:
1153 if (ch->send->state == SMD_SS_OPENED) {
1154 ch_set_state(ch, SMD_SS_CLOSING);
1155 ch->current_packet = 0;
Eric Holmbergad4fa8d2011-11-11 16:55:13 -07001156 ch->pending_pkt_sz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001157 ch->notify(ch->priv, SMD_EVENT_CLOSE);
1158 }
1159 break;
1160 case SMD_SS_CLOSING:
1161 if (ch->send->state == SMD_SS_CLOSED) {
1162 list_move(&ch->ch_list,
1163 &smd_ch_to_close_list);
1164 queue_work(channel_close_wq,
1165 &finalize_channel_close_work);
1166 }
1167 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001168 }
1169}
1170
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001171static void handle_smd_irq_closing_list(void)
1172{
1173 unsigned long flags;
1174 struct smd_channel *ch;
1175 struct smd_channel *index;
1176 unsigned tmp;
1177
1178 spin_lock_irqsave(&smd_lock, flags);
1179 list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
1180 if (ch->recv->fSTATE)
1181 ch->recv->fSTATE = 0;
1182 tmp = ch->recv->state;
1183 if (tmp != ch->last_state)
1184 smd_state_change(ch, ch->last_state, tmp);
1185 }
1186 spin_unlock_irqrestore(&smd_lock, flags);
1187}
1188
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001189static void handle_smd_irq(struct list_head *list, void (*notify)(void))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001190{
1191 unsigned long flags;
1192 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001193 unsigned ch_flags;
1194 unsigned tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001195 unsigned char state_change;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001196
1197 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001198 list_for_each_entry(ch, list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001199 state_change = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001200 ch_flags = 0;
1201 if (ch_is_open(ch)) {
1202 if (ch->recv->fHEAD) {
1203 ch->recv->fHEAD = 0;
1204 ch_flags |= 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001205 }
1206 if (ch->recv->fTAIL) {
1207 ch->recv->fTAIL = 0;
1208 ch_flags |= 2;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001209 }
1210 if (ch->recv->fSTATE) {
1211 ch->recv->fSTATE = 0;
1212 ch_flags |= 4;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001213 }
1214 }
1215 tmp = ch->recv->state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001216 if (tmp != ch->last_state) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07001217 SMx_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
1218 ch->n, ch->name, ch->last_state, tmp);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001219 smd_state_change(ch, ch->last_state, tmp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001220 state_change = 1;
1221 }
Eric Holmberg65a7d432012-02-24 11:28:56 -07001222 if (ch_flags & 0x3) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001223 ch->update_state(ch);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001224 SMx_POWER_INFO("SMD ch%d '%s' Data event r%d/w%d\n",
1225 ch->n, ch->name,
1226 ch->read_avail(ch),
1227 ch->fifo_size - ch->write_avail(ch));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001228 ch->notify(ch->priv, SMD_EVENT_DATA);
1229 }
Eric Holmberg98c6c642012-02-24 11:29:35 -07001230 if (ch_flags & 0x4 && !state_change) {
1231 SMx_POWER_INFO("SMD ch%d '%s' State update\n",
1232 ch->n, ch->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001233 ch->notify(ch->priv, SMD_EVENT_STATUS);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001234 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001235 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001236 spin_unlock_irqrestore(&smd_lock, flags);
1237 do_smd_probe();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001238}
1239
Brian Swetland37521a32009-07-01 18:30:47 -07001240static irqreturn_t smd_modem_irq_handler(int irq, void *data)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001241{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001242 SMx_POWER_INFO("SMD Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001243 ++interrupt_stats[SMD_MODEM].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001244 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001245 handle_smd_irq_closing_list();
Brian Swetland37521a32009-07-01 18:30:47 -07001246 return IRQ_HANDLED;
1247}
1248
1249static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
1250{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001251 SMx_POWER_INFO("SMD Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001252 ++interrupt_stats[SMD_Q6].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001253 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001254 handle_smd_irq_closing_list();
1255 return IRQ_HANDLED;
1256}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001257
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001258static irqreturn_t smd_dsps_irq_handler(int irq, void *data)
1259{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001260 SMx_POWER_INFO("SMD Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001261 ++interrupt_stats[SMD_DSPS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001262 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1263 handle_smd_irq_closing_list();
1264 return IRQ_HANDLED;
1265}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001266
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001267static irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
1268{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001269 SMx_POWER_INFO("SMD Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001270 ++interrupt_stats[SMD_WCNSS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001271 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1272 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001273 return IRQ_HANDLED;
1274}
1275
1276static void smd_fake_irq_handler(unsigned long arg)
1277{
Brian Swetland37521a32009-07-01 18:30:47 -07001278 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
1279 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001280 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1281 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1282 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001283}
1284
1285static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
1286
Brian Swetland37521a32009-07-01 18:30:47 -07001287static inline int smd_need_int(struct smd_channel *ch)
1288{
1289 if (ch_is_open(ch)) {
1290 if (ch->recv->fHEAD || ch->recv->fTAIL || ch->recv->fSTATE)
1291 return 1;
1292 if (ch->recv->state != ch->last_state)
1293 return 1;
1294 }
1295 return 0;
1296}
1297
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001298void smd_sleep_exit(void)
1299{
1300 unsigned long flags;
1301 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001302 int need_int = 0;
1303
1304 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland37521a32009-07-01 18:30:47 -07001305 list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
1306 if (smd_need_int(ch)) {
1307 need_int = 1;
1308 break;
1309 }
1310 }
1311 list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
1312 if (smd_need_int(ch)) {
1313 need_int = 1;
1314 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001315 }
1316 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001317 list_for_each_entry(ch, &smd_ch_list_dsps, ch_list) {
1318 if (smd_need_int(ch)) {
1319 need_int = 1;
1320 break;
1321 }
1322 }
1323 list_for_each_entry(ch, &smd_ch_list_wcnss, ch_list) {
1324 if (smd_need_int(ch)) {
1325 need_int = 1;
1326 break;
1327 }
1328 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001329 spin_unlock_irqrestore(&smd_lock, flags);
1330 do_smd_probe();
Brian Swetland37521a32009-07-01 18:30:47 -07001331
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001332 if (need_int) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001333 SMD_DBG("smd_sleep_exit need interrupt\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001334 tasklet_schedule(&smd_fake_irq_tasklet);
1335 }
1336}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001337EXPORT_SYMBOL(smd_sleep_exit);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001338
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001339static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001340{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001341 if (SMD_XFER_TYPE(alloc_elm->type) == 1)
1342 return 0;
1343 else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001344 return 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001345
1346 /* for cases where xfer type is 0 */
1347 if (!strncmp(alloc_elm->name, "DAL", 3))
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001348 return 0;
1349
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001350 /* for cases where xfer type is 0 */
1351 if (!strncmp(alloc_elm->name, "RPCCALL_QDSP", 12))
1352 return 0;
1353
1354 if (alloc_elm->cid > 4 || alloc_elm->cid == 1)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001355 return 1;
1356 else
1357 return 0;
1358}
1359
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001360static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
1361 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001362{
1363 void *ptr;
1364 const unsigned char *buf = _data;
1365 unsigned xfer;
1366 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001367 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001368
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001369 SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001370 if (len < 0)
1371 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001372 else if (len == 0)
1373 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001374
1375 while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
Eric Holmberg7a717872012-02-03 11:58:04 -07001376 if (!ch_is_open(ch)) {
1377 len = orig_len;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001378 break;
Eric Holmberg7a717872012-02-03 11:58:04 -07001379 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001380 if (xfer > len)
1381 xfer = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001382 if (user_buf) {
1383 r = copy_from_user(ptr, buf, xfer);
1384 if (r > 0) {
1385 pr_err("%s: "
1386 "copy_from_user could not copy %i "
1387 "bytes.\n",
1388 __func__,
1389 r);
1390 }
1391 } else
1392 memcpy(ptr, buf, xfer);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001393 ch_write_done(ch, xfer);
1394 len -= xfer;
1395 buf += xfer;
1396 if (len == 0)
1397 break;
1398 }
1399
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001400 if (orig_len - len)
1401 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001402
1403 return orig_len - len;
1404}
1405
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001406static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
1407 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001408{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001409 int ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001410 unsigned hdr[5];
1411
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001412 SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001413 if (len < 0)
1414 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001415 else if (len == 0)
1416 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001417
1418 if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
1419 return -ENOMEM;
1420
1421 hdr[0] = len;
1422 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1423
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001424
1425 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1426 if (ret < 0 || ret != sizeof(hdr)) {
1427 SMD_DBG("%s failed to write pkt header: "
1428 "%d returned\n", __func__, ret);
1429 return -1;
1430 }
1431
1432
1433 ret = smd_stream_write(ch, _data, len, user_buf);
1434 if (ret < 0 || ret != len) {
1435 SMD_DBG("%s failed to write pkt data: "
1436 "%d returned\n", __func__, ret);
1437 return ret;
1438 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001439
1440 return len;
1441}
1442
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001443static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001444{
1445 int r;
1446
1447 if (len < 0)
1448 return -EINVAL;
1449
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001450 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001451 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001452 if (!read_intr_blocked(ch))
1453 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001454
1455 return r;
1456}
1457
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001458static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001459{
1460 unsigned long flags;
1461 int r;
1462
1463 if (len < 0)
1464 return -EINVAL;
1465
1466 if (len > ch->current_packet)
1467 len = ch->current_packet;
1468
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001469 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001470 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001471 if (!read_intr_blocked(ch))
1472 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001473
1474 spin_lock_irqsave(&smd_lock, flags);
1475 ch->current_packet -= r;
1476 update_packet_state(ch);
1477 spin_unlock_irqrestore(&smd_lock, flags);
1478
1479 return r;
1480}
1481
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001482static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
1483 int user_buf)
1484{
1485 int r;
1486
1487 if (len < 0)
1488 return -EINVAL;
1489
1490 if (len > ch->current_packet)
1491 len = ch->current_packet;
1492
1493 r = ch_read(ch, data, len, user_buf);
1494 if (r > 0)
1495 if (!read_intr_blocked(ch))
1496 ch->notify_other_cpu();
1497
1498 ch->current_packet -= r;
1499 update_packet_state(ch);
1500
1501 return r;
1502}
1503
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301504#if (defined(CONFIG_MSM_SMD_PKG4) || defined(CONFIG_MSM_SMD_PKG3))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001505static int smd_alloc_v2(struct smd_channel *ch)
1506{
1507 struct smd_shared_v2 *shared2;
1508 void *buffer;
1509 unsigned buffer_sz;
1510
1511 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n, sizeof(*shared2));
1512 if (!shared2) {
1513 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301514 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001515 }
1516 buffer = smem_get_entry(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
1517 if (!buffer) {
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301518 SMD_INFO("smem_get_entry failed\n");
1519 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001520 }
1521
1522 /* buffer must be a power-of-two size */
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301523 if (buffer_sz & (buffer_sz - 1)) {
1524 SMD_INFO("Buffer size: %u not power of two\n", buffer_sz);
1525 return -EINVAL;
1526 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001527 buffer_sz /= 2;
1528 ch->send = &shared2->ch0;
1529 ch->recv = &shared2->ch1;
1530 ch->send_data = buffer;
1531 ch->recv_data = buffer + buffer_sz;
1532 ch->fifo_size = buffer_sz;
1533 return 0;
1534}
1535
1536static int smd_alloc_v1(struct smd_channel *ch)
1537{
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301538 return -EINVAL;
1539}
1540
1541#else /* define v1 for older targets */
1542static int smd_alloc_v2(struct smd_channel *ch)
1543{
1544 return -EINVAL;
1545}
1546
1547static int smd_alloc_v1(struct smd_channel *ch)
1548{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001549 struct smd_shared_v1 *shared1;
1550 shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
1551 if (!shared1) {
1552 pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301553 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001554 }
1555 ch->send = &shared1->ch0;
1556 ch->recv = &shared1->ch1;
1557 ch->send_data = shared1->data0;
1558 ch->recv_data = shared1->data1;
1559 ch->fifo_size = SMD_BUF_SIZE;
1560 return 0;
1561}
1562
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301563#endif
1564
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001565static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001566{
1567 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001568
1569 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1570 if (ch == 0) {
1571 pr_err("smd_alloc_channel() out of memory\n");
Brian Swetland34f719b2009-10-30 16:22:05 -07001572 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001573 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001574 ch->n = alloc_elm->cid;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001575
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001576 if (smd_alloc_v2(ch) && smd_alloc_v1(ch)) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001577 kfree(ch);
Brian Swetland34f719b2009-10-30 16:22:05 -07001578 return -1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001579 }
1580
1581 ch->fifo_mask = ch->fifo_size - 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001582 ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001583
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001584 if (ch->type == SMD_APPS_MODEM)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001585 ch->notify_other_cpu = notify_modem_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001586 else if (ch->type == SMD_APPS_QDSP)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001587 ch->notify_other_cpu = notify_dsp_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001588 else if (ch->type == SMD_APPS_DSPS)
1589 ch->notify_other_cpu = notify_dsps_smd;
1590 else
1591 ch->notify_other_cpu = notify_wcnss_smd;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001592
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001593 if (smd_is_packet(alloc_elm)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001594 ch->read = smd_packet_read;
1595 ch->write = smd_packet_write;
1596 ch->read_avail = smd_packet_read_avail;
1597 ch->write_avail = smd_packet_write_avail;
1598 ch->update_state = update_packet_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001599 ch->read_from_cb = smd_packet_read_from_cb;
1600 ch->is_pkt_ch = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001601 } else {
1602 ch->read = smd_stream_read;
1603 ch->write = smd_stream_write;
1604 ch->read_avail = smd_stream_read_avail;
1605 ch->write_avail = smd_stream_write_avail;
1606 ch->update_state = update_stream_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001607 ch->read_from_cb = smd_stream_read;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001608 }
1609
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001610 memcpy(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN);
1611 ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001612
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001613 ch->pdev.name = ch->name;
1614 ch->pdev.id = ch->type;
1615
1616 SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
1617 ch->name, ch->n);
1618
1619 mutex_lock(&smd_creation_mutex);
1620 list_add(&ch->ch_list, &smd_ch_closed_list);
1621 mutex_unlock(&smd_creation_mutex);
1622
1623 platform_device_register(&ch->pdev);
1624 if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
1625 /* create a platform driver to be used by smd_tty driver
1626 * so that it can access the loopback port
1627 */
1628 loopback_tty_pdev.id = ch->type;
1629 platform_device_register(&loopback_tty_pdev);
1630 }
1631 return 0;
1632}
1633
1634static inline void notify_loopback_smd(void)
1635{
1636 unsigned long flags;
1637 struct smd_channel *ch;
1638
1639 spin_lock_irqsave(&smd_lock, flags);
1640 list_for_each_entry(ch, &smd_ch_list_loopback, ch_list) {
1641 ch->notify(ch->priv, SMD_EVENT_DATA);
1642 }
1643 spin_unlock_irqrestore(&smd_lock, flags);
1644}
1645
1646static int smd_alloc_loopback_channel(void)
1647{
1648 static struct smd_half_channel smd_loopback_ctl;
1649 static char smd_loopback_data[SMD_BUF_SIZE];
1650 struct smd_channel *ch;
1651
1652 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1653 if (ch == 0) {
1654 pr_err("%s: out of memory\n", __func__);
1655 return -1;
1656 }
1657 ch->n = SMD_LOOPBACK_CID;
1658
1659 ch->send = &smd_loopback_ctl;
1660 ch->recv = &smd_loopback_ctl;
1661 ch->send_data = smd_loopback_data;
1662 ch->recv_data = smd_loopback_data;
1663 ch->fifo_size = SMD_BUF_SIZE;
1664
1665 ch->fifo_mask = ch->fifo_size - 1;
1666 ch->type = SMD_LOOPBACK_TYPE;
1667 ch->notify_other_cpu = notify_loopback_smd;
1668
1669 ch->read = smd_stream_read;
1670 ch->write = smd_stream_write;
1671 ch->read_avail = smd_stream_read_avail;
1672 ch->write_avail = smd_stream_write_avail;
1673 ch->update_state = update_stream_state;
1674 ch->read_from_cb = smd_stream_read;
1675
1676 memset(ch->name, 0, 20);
1677 memcpy(ch->name, "local_loopback", 14);
1678
1679 ch->pdev.name = ch->name;
1680 ch->pdev.id = ch->type;
1681
1682 SMD_INFO("%s: '%s' cid=%d\n", __func__, ch->name, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001683
1684 mutex_lock(&smd_creation_mutex);
1685 list_add(&ch->ch_list, &smd_ch_closed_list);
1686 mutex_unlock(&smd_creation_mutex);
1687
1688 platform_device_register(&ch->pdev);
Brian Swetland34f719b2009-10-30 16:22:05 -07001689 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001690}
1691
1692static void do_nothing_notify(void *priv, unsigned flags)
1693{
1694}
1695
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001696static void finalize_channel_close_fn(struct work_struct *work)
1697{
1698 unsigned long flags;
1699 struct smd_channel *ch;
1700 struct smd_channel *index;
1701
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001702 mutex_lock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001703 spin_lock_irqsave(&smd_lock, flags);
1704 list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
1705 list_del(&ch->ch_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001706 list_add(&ch->ch_list, &smd_ch_closed_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001707 ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
1708 ch->notify = do_nothing_notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001709 }
1710 spin_unlock_irqrestore(&smd_lock, flags);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001711 mutex_unlock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001712}
1713
1714struct smd_channel *smd_get_channel(const char *name, uint32_t type)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001715{
1716 struct smd_channel *ch;
1717
1718 mutex_lock(&smd_creation_mutex);
1719 list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001720 if (!strcmp(name, ch->name) &&
1721 (type == ch->type)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001722 list_del(&ch->ch_list);
1723 mutex_unlock(&smd_creation_mutex);
1724 return ch;
1725 }
1726 }
1727 mutex_unlock(&smd_creation_mutex);
1728
1729 return NULL;
1730}
1731
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001732int smd_named_open_on_edge(const char *name, uint32_t edge,
1733 smd_channel_t **_ch,
1734 void *priv, void (*notify)(void *, unsigned))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001735{
1736 struct smd_channel *ch;
1737 unsigned long flags;
1738
1739 if (smd_initialized == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001740 SMD_INFO("smd_open() before smd_init()\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001741 return -ENODEV;
1742 }
1743
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001744 SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
1745
1746 ch = smd_get_channel(name, edge);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001747 if (!ch) {
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001748 /* check closing list for port */
1749 spin_lock_irqsave(&smd_lock, flags);
1750 list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
1751 if (!strncmp(name, ch->name, 20) &&
1752 (edge == ch->type)) {
1753 /* channel exists, but is being closed */
1754 spin_unlock_irqrestore(&smd_lock, flags);
1755 return -EAGAIN;
1756 }
1757 }
1758
1759 /* check closing workqueue list for port */
1760 list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
1761 if (!strncmp(name, ch->name, 20) &&
1762 (edge == ch->type)) {
1763 /* channel exists, but is being closed */
1764 spin_unlock_irqrestore(&smd_lock, flags);
1765 return -EAGAIN;
1766 }
1767 }
1768 spin_unlock_irqrestore(&smd_lock, flags);
1769
1770 /* one final check to handle closing->closed race condition */
1771 ch = smd_get_channel(name, edge);
1772 if (!ch)
1773 return -ENODEV;
1774 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001775
1776 if (notify == 0)
1777 notify = do_nothing_notify;
1778
1779 ch->notify = notify;
1780 ch->current_packet = 0;
1781 ch->last_state = SMD_SS_CLOSED;
1782 ch->priv = priv;
1783
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001784 if (edge == SMD_LOOPBACK_TYPE) {
1785 ch->last_state = SMD_SS_OPENED;
1786 ch->send->state = SMD_SS_OPENED;
1787 ch->send->fDSR = 1;
1788 ch->send->fCTS = 1;
1789 ch->send->fCD = 1;
1790 }
1791
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001792 *_ch = ch;
1793
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001794 SMD_DBG("smd_open: opening '%s'\n", ch->name);
1795
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001796 spin_lock_irqsave(&smd_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001797 if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_MODEM)
Brian Swetland37521a32009-07-01 18:30:47 -07001798 list_add(&ch->ch_list, &smd_ch_list_modem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001799 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_QDSP)
Brian Swetland37521a32009-07-01 18:30:47 -07001800 list_add(&ch->ch_list, &smd_ch_list_dsp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001801 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_DSPS)
1802 list_add(&ch->ch_list, &smd_ch_list_dsps);
1803 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_WCNSS)
1804 list_add(&ch->ch_list, &smd_ch_list_wcnss);
1805 else
1806 list_add(&ch->ch_list, &smd_ch_list_loopback);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001807
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001808 SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
1809
1810 if (edge != SMD_LOOPBACK_TYPE)
1811 smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
1812
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001813 spin_unlock_irqrestore(&smd_lock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001814
1815 return 0;
1816}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001817EXPORT_SYMBOL(smd_named_open_on_edge);
1818
1819
1820int smd_open(const char *name, smd_channel_t **_ch,
1821 void *priv, void (*notify)(void *, unsigned))
1822{
1823 return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv,
1824 notify);
1825}
1826EXPORT_SYMBOL(smd_open);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001827
1828int smd_close(smd_channel_t *ch)
1829{
1830 unsigned long flags;
1831
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001832 if (ch == 0)
1833 return -1;
1834
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001835 SMD_INFO("smd_close(%s)\n", ch->name);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001836
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001837 spin_lock_irqsave(&smd_lock, flags);
1838 list_del(&ch->ch_list);
1839 if (ch->n == SMD_LOOPBACK_CID) {
1840 ch->send->fDSR = 0;
1841 ch->send->fCTS = 0;
1842 ch->send->fCD = 0;
1843 ch->send->state = SMD_SS_CLOSED;
1844 } else
1845 ch_set_state(ch, SMD_SS_CLOSED);
1846
1847 if (ch->recv->state == SMD_SS_OPENED) {
1848 list_add(&ch->ch_list, &smd_ch_closing_list);
1849 spin_unlock_irqrestore(&smd_lock, flags);
1850 } else {
1851 spin_unlock_irqrestore(&smd_lock, flags);
1852 ch->notify = do_nothing_notify;
1853 mutex_lock(&smd_creation_mutex);
1854 list_add(&ch->ch_list, &smd_ch_closed_list);
1855 mutex_unlock(&smd_creation_mutex);
1856 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001857
1858 return 0;
1859}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001860EXPORT_SYMBOL(smd_close);
1861
1862int smd_write_start(smd_channel_t *ch, int len)
1863{
1864 int ret;
1865 unsigned hdr[5];
1866
1867 if (!ch) {
1868 pr_err("%s: Invalid channel specified\n", __func__);
1869 return -ENODEV;
1870 }
1871 if (!ch->is_pkt_ch) {
1872 pr_err("%s: non-packet channel specified\n", __func__);
1873 return -EACCES;
1874 }
1875 if (len < 1) {
1876 pr_err("%s: invalid length: %d\n", __func__, len);
1877 return -EINVAL;
1878 }
1879
1880 if (ch->pending_pkt_sz) {
1881 pr_err("%s: packet of size: %d in progress\n", __func__,
1882 ch->pending_pkt_sz);
1883 return -EBUSY;
1884 }
1885 ch->pending_pkt_sz = len;
1886
1887 if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
1888 ch->pending_pkt_sz = 0;
1889 SMD_DBG("%s: no space to write packet header\n", __func__);
1890 return -EAGAIN;
1891 }
1892
1893 hdr[0] = len;
1894 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1895
1896
1897 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1898 if (ret < 0 || ret != sizeof(hdr)) {
1899 ch->pending_pkt_sz = 0;
1900 pr_err("%s: packet header failed to write\n", __func__);
1901 return -EPERM;
1902 }
1903 return 0;
1904}
1905EXPORT_SYMBOL(smd_write_start);
1906
1907int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
1908{
1909 int bytes_written;
1910
1911 if (!ch) {
1912 pr_err("%s: Invalid channel specified\n", __func__);
1913 return -ENODEV;
1914 }
1915 if (len < 1) {
1916 pr_err("%s: invalid length: %d\n", __func__, len);
1917 return -EINVAL;
1918 }
1919
1920 if (!ch->pending_pkt_sz) {
1921 pr_err("%s: no transaction in progress\n", __func__);
1922 return -ENOEXEC;
1923 }
1924 if (ch->pending_pkt_sz - len < 0) {
1925 pr_err("%s: segment of size: %d will make packet go over "
1926 "length\n", __func__, len);
1927 return -EINVAL;
1928 }
1929
1930 bytes_written = smd_stream_write(ch, data, len, user_buf);
1931
1932 ch->pending_pkt_sz -= bytes_written;
1933
1934 return bytes_written;
1935}
1936EXPORT_SYMBOL(smd_write_segment);
1937
1938int smd_write_end(smd_channel_t *ch)
1939{
1940
1941 if (!ch) {
1942 pr_err("%s: Invalid channel specified\n", __func__);
1943 return -ENODEV;
1944 }
1945 if (ch->pending_pkt_sz) {
1946 pr_err("%s: current packet not completely written\n", __func__);
1947 return -E2BIG;
1948 }
1949
1950 return 0;
1951}
1952EXPORT_SYMBOL(smd_write_end);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001953
1954int smd_read(smd_channel_t *ch, void *data, int len)
1955{
Jack Pham1b236d12012-03-19 15:27:18 -07001956 if (!ch) {
1957 pr_err("%s: Invalid channel specified\n", __func__);
1958 return -ENODEV;
1959 }
1960
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001961 return ch->read(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001962}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001963EXPORT_SYMBOL(smd_read);
1964
1965int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
1966{
Jack Pham1b236d12012-03-19 15:27:18 -07001967 if (!ch) {
1968 pr_err("%s: Invalid channel specified\n", __func__);
1969 return -ENODEV;
1970 }
1971
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001972 return ch->read(ch, data, len, 1);
1973}
1974EXPORT_SYMBOL(smd_read_user_buffer);
1975
1976int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
1977{
Jack Pham1b236d12012-03-19 15:27:18 -07001978 if (!ch) {
1979 pr_err("%s: Invalid channel specified\n", __func__);
1980 return -ENODEV;
1981 }
1982
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001983 return ch->read_from_cb(ch, data, len, 0);
1984}
1985EXPORT_SYMBOL(smd_read_from_cb);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001986
1987int smd_write(smd_channel_t *ch, const void *data, int len)
1988{
Jack Pham1b236d12012-03-19 15:27:18 -07001989 if (!ch) {
1990 pr_err("%s: Invalid channel specified\n", __func__);
1991 return -ENODEV;
1992 }
1993
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001994 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001995}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001996EXPORT_SYMBOL(smd_write);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001997
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001998int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
Brian Swetland636eb9c2009-12-07 15:28:08 -08001999{
Jack Pham1b236d12012-03-19 15:27:18 -07002000 if (!ch) {
2001 pr_err("%s: Invalid channel specified\n", __func__);
2002 return -ENODEV;
2003 }
2004
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002005 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002006}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002007EXPORT_SYMBOL(smd_write_user_buffer);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002008
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002009int smd_read_avail(smd_channel_t *ch)
2010{
Jack Pham1b236d12012-03-19 15:27:18 -07002011 if (!ch) {
2012 pr_err("%s: Invalid channel specified\n", __func__);
2013 return -ENODEV;
2014 }
2015
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002016 return ch->read_avail(ch);
2017}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002018EXPORT_SYMBOL(smd_read_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002019
2020int smd_write_avail(smd_channel_t *ch)
2021{
Jack Pham1b236d12012-03-19 15:27:18 -07002022 if (!ch) {
2023 pr_err("%s: Invalid channel specified\n", __func__);
2024 return -ENODEV;
2025 }
2026
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002027 return ch->write_avail(ch);
2028}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002029EXPORT_SYMBOL(smd_write_avail);
2030
2031void smd_enable_read_intr(smd_channel_t *ch)
2032{
2033 if (ch)
2034 ch->send->fBLOCKREADINTR = 0;
2035}
2036EXPORT_SYMBOL(smd_enable_read_intr);
2037
2038void smd_disable_read_intr(smd_channel_t *ch)
2039{
2040 if (ch)
2041 ch->send->fBLOCKREADINTR = 1;
2042}
2043EXPORT_SYMBOL(smd_disable_read_intr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002044
2045int smd_wait_until_readable(smd_channel_t *ch, int bytes)
2046{
2047 return -1;
2048}
2049
2050int smd_wait_until_writable(smd_channel_t *ch, int bytes)
2051{
2052 return -1;
2053}
2054
2055int smd_cur_packet_size(smd_channel_t *ch)
2056{
Jack Pham1b236d12012-03-19 15:27:18 -07002057 if (!ch) {
2058 pr_err("%s: Invalid channel specified\n", __func__);
2059 return -ENODEV;
2060 }
2061
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002062 return ch->current_packet;
2063}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002064EXPORT_SYMBOL(smd_cur_packet_size);
2065
2066int smd_tiocmget(smd_channel_t *ch)
2067{
Jack Pham1b236d12012-03-19 15:27:18 -07002068 if (!ch) {
2069 pr_err("%s: Invalid channel specified\n", __func__);
2070 return -ENODEV;
2071 }
2072
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002073 return (ch->recv->fDSR ? TIOCM_DSR : 0) |
2074 (ch->recv->fCTS ? TIOCM_CTS : 0) |
2075 (ch->recv->fCD ? TIOCM_CD : 0) |
2076 (ch->recv->fRI ? TIOCM_RI : 0) |
2077 (ch->send->fCTS ? TIOCM_RTS : 0) |
2078 (ch->send->fDSR ? TIOCM_DTR : 0);
2079}
2080EXPORT_SYMBOL(smd_tiocmget);
2081
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002082/* this api will be called while holding smd_lock */
2083int
2084smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002085{
Jack Pham1b236d12012-03-19 15:27:18 -07002086 if (!ch) {
2087 pr_err("%s: Invalid channel specified\n", __func__);
2088 return -ENODEV;
2089 }
2090
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002091 if (set & TIOCM_DTR)
2092 ch->send->fDSR = 1;
2093
2094 if (set & TIOCM_RTS)
2095 ch->send->fCTS = 1;
2096
2097 if (clear & TIOCM_DTR)
2098 ch->send->fDSR = 0;
2099
2100 if (clear & TIOCM_RTS)
2101 ch->send->fCTS = 0;
2102
2103 ch->send->fSTATE = 1;
2104 barrier();
2105 ch->notify_other_cpu();
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002106
2107 return 0;
2108}
2109EXPORT_SYMBOL(smd_tiocmset_from_cb);
2110
2111int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
2112{
2113 unsigned long flags;
2114
Jack Pham1b236d12012-03-19 15:27:18 -07002115 if (!ch) {
2116 pr_err("%s: Invalid channel specified\n", __func__);
2117 return -ENODEV;
2118 }
2119
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002120 spin_lock_irqsave(&smd_lock, flags);
2121 smd_tiocmset_from_cb(ch, set, clear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002122 spin_unlock_irqrestore(&smd_lock, flags);
2123
2124 return 0;
2125}
2126EXPORT_SYMBOL(smd_tiocmset);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002127
2128
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002129/* -------------------------------------------------------------------------- */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002130
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002131/* smem_alloc returns the pointer to smem item if it is already allocated.
2132 * Otherwise, it returns NULL.
2133 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002134void *smem_alloc(unsigned id, unsigned size)
2135{
2136 return smem_find(id, size);
2137}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002138EXPORT_SYMBOL(smem_alloc);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002139
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002140/* smem_alloc2 returns the pointer to smem item. If it is not allocated,
2141 * it allocates it and then returns the pointer to it.
2142 */
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302143void *smem_alloc2(unsigned id, unsigned size_in)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002144{
2145 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2146 struct smem_heap_entry *toc = shared->heap_toc;
2147 unsigned long flags;
2148 void *ret = NULL;
2149
2150 if (!shared->heap_info.initialized) {
2151 pr_err("%s: smem heap info not initialized\n", __func__);
2152 return NULL;
2153 }
2154
2155 if (id >= SMEM_NUM_ITEMS)
2156 return NULL;
2157
2158 size_in = ALIGN(size_in, 8);
2159 remote_spin_lock_irqsave(&remote_spinlock, flags);
2160 if (toc[id].allocated) {
2161 SMD_DBG("%s: %u already allocated\n", __func__, id);
2162 if (size_in != toc[id].size)
2163 pr_err("%s: wrong size %u (expected %u)\n",
2164 __func__, toc[id].size, size_in);
2165 else
2166 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2167 } else if (id > SMEM_FIXED_ITEM_LAST) {
2168 SMD_DBG("%s: allocating %u\n", __func__, id);
2169 if (shared->heap_info.heap_remaining >= size_in) {
2170 toc[id].offset = shared->heap_info.free_offset;
2171 toc[id].size = size_in;
2172 wmb();
2173 toc[id].allocated = 1;
2174
2175 shared->heap_info.free_offset += size_in;
2176 shared->heap_info.heap_remaining -= size_in;
2177 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2178 } else
2179 pr_err("%s: not enough memory %u (required %u)\n",
2180 __func__, shared->heap_info.heap_remaining,
2181 size_in);
2182 }
2183 wmb();
2184 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
2185 return ret;
2186}
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302187EXPORT_SYMBOL(smem_alloc2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002188
2189void *smem_get_entry(unsigned id, unsigned *size)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002190{
2191 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2192 struct smem_heap_entry *toc = shared->heap_toc;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302193 int use_spinlocks = spinlocks_initialized;
2194 void *ret = 0;
2195 unsigned long flags = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002196
2197 if (id >= SMEM_NUM_ITEMS)
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302198 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002199
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302200 if (use_spinlocks)
2201 remote_spin_lock_irqsave(&remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002202 /* toc is in device memory and cannot be speculatively accessed */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002203 if (toc[id].allocated) {
2204 *size = toc[id].size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002205 barrier();
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302206 ret = (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002207 } else {
2208 *size = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002209 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302210 if (use_spinlocks)
2211 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002212
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302213 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002214}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002215EXPORT_SYMBOL(smem_get_entry);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002216
2217void *smem_find(unsigned id, unsigned size_in)
2218{
2219 unsigned size;
2220 void *ptr;
2221
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002222 ptr = smem_get_entry(id, &size);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002223 if (!ptr)
2224 return 0;
2225
2226 size_in = ALIGN(size_in, 8);
2227 if (size_in != size) {
2228 pr_err("smem_find(%d, %d): wrong size %d\n",
2229 id, size_in, size);
2230 return 0;
2231 }
2232
2233 return ptr;
2234}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002235EXPORT_SYMBOL(smem_find);
2236
2237static int smsm_cb_init(void)
2238{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002239 struct smsm_state_info *state_info;
2240 int n;
2241 int ret = 0;
2242
2243 smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
2244 GFP_KERNEL);
2245
2246 if (!smsm_states) {
2247 pr_err("%s: SMSM init failed\n", __func__);
2248 return -ENOMEM;
2249 }
2250
Eric Holmbergc8002902011-09-16 13:55:57 -06002251 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002252 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2253 state_info = &smsm_states[n];
2254 state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
Eric Holmberge8a39322012-04-03 15:14:02 -06002255 state_info->intr_mask_set = 0x0;
2256 state_info->intr_mask_clear = 0x0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002257 INIT_LIST_HEAD(&state_info->callbacks);
2258 }
Eric Holmbergc8002902011-09-16 13:55:57 -06002259 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002260
2261 return ret;
2262}
2263
2264static int smsm_init(void)
2265{
2266 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2267 int i;
2268 struct smsm_size_info_type *smsm_size_info;
2269
2270 i = remote_spin_lock_init(&remote_spinlock, SMEM_SPINLOCK_SMEM_ALLOC);
2271 if (i) {
2272 pr_err("%s: remote spinlock init failed %d\n", __func__, i);
2273 return i;
2274 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302275 spinlocks_initialized = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002276
2277 smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO,
2278 sizeof(struct smsm_size_info_type));
2279 if (smsm_size_info) {
2280 SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
2281 SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
2282 }
2283
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002284 i = kfifo_alloc(&smsm_snapshot_fifo,
2285 sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
2286 GFP_KERNEL);
2287 if (i) {
2288 pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
2289 return i;
2290 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002291 wake_lock_init(&smsm_snapshot_wakelock, WAKE_LOCK_SUSPEND,
2292 "smsm_snapshot");
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002293
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002294 if (!smsm_info.state) {
2295 smsm_info.state = smem_alloc2(ID_SHARED_STATE,
2296 SMSM_NUM_ENTRIES *
2297 sizeof(uint32_t));
2298
2299 if (smsm_info.state) {
2300 __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2301 if ((shared->version[VERSION_MODEM] >> 16) >= 0xB)
2302 __raw_writel(0, \
2303 SMSM_STATE_ADDR(SMSM_APPS_DEM_I));
2304 }
2305 }
2306
2307 if (!smsm_info.intr_mask) {
2308 smsm_info.intr_mask = smem_alloc2(SMEM_SMSM_CPU_INTR_MASK,
2309 SMSM_NUM_ENTRIES *
2310 SMSM_NUM_HOSTS *
2311 sizeof(uint32_t));
2312
Eric Holmberge8a39322012-04-03 15:14:02 -06002313 if (smsm_info.intr_mask) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002314 for (i = 0; i < SMSM_NUM_ENTRIES; i++)
Eric Holmberge8a39322012-04-03 15:14:02 -06002315 __raw_writel(0x0,
2316 SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
2317
2318 /* Configure legacy modem bits */
2319 __raw_writel(LEGACY_MODEM_SMSM_MASK,
2320 SMSM_INTR_MASK_ADDR(SMSM_MODEM_STATE,
2321 SMSM_APPS));
2322 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002323 }
2324
2325 if (!smsm_info.intr_mux)
2326 smsm_info.intr_mux = smem_alloc2(SMEM_SMD_SMSM_INTR_MUX,
2327 SMSM_NUM_INTR_MUX *
2328 sizeof(uint32_t));
2329
2330 i = smsm_cb_init();
2331 if (i)
2332 return i;
2333
2334 wmb();
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06002335 smsm_driver_state_notify(SMSM_INIT, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002336 return 0;
2337}
2338
2339void smsm_reset_modem(unsigned mode)
2340{
2341 if (mode == SMSM_SYSTEM_DOWNLOAD) {
2342 mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
2343 } else if (mode == SMSM_MODEM_WAIT) {
2344 mode = SMSM_RESET | SMSM_MODEM_WAIT;
2345 } else { /* reset_mode is SMSM_RESET or default */
2346 mode = SMSM_RESET;
2347 }
2348
2349 smsm_change_state(SMSM_APPS_STATE, mode, mode);
2350}
2351EXPORT_SYMBOL(smsm_reset_modem);
2352
2353void smsm_reset_modem_cont(void)
2354{
2355 unsigned long flags;
2356 uint32_t state;
2357
2358 if (!smsm_info.state)
2359 return;
2360
2361 spin_lock_irqsave(&smem_lock, flags);
2362 state = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)) \
2363 & ~SMSM_MODEM_WAIT;
2364 __raw_writel(state, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2365 wmb();
2366 spin_unlock_irqrestore(&smem_lock, flags);
2367}
2368EXPORT_SYMBOL(smsm_reset_modem_cont);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002369
Eric Holmbergda31d042012-03-28 14:01:02 -06002370static void smsm_cb_snapshot(uint32_t use_wakelock)
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002371{
2372 int n;
2373 uint32_t new_state;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002374 unsigned long flags;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002375 int ret;
2376
2377 ret = kfifo_avail(&smsm_snapshot_fifo);
Eric Holmbergda31d042012-03-28 14:01:02 -06002378 if (ret < SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002379 pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
2380 return;
2381 }
2382
Eric Holmberg96b55f62012-04-03 19:10:46 -06002383 /*
2384 * To avoid a race condition with notify_smsm_cb_clients_worker, the
2385 * following sequence must be followed:
2386 * 1) increment snapshot count
2387 * 2) insert data into FIFO
2388 *
2389 * Potentially in parallel, the worker:
2390 * a) verifies >= 1 snapshots are in FIFO
2391 * b) processes snapshot
2392 * c) decrements reference count
2393 *
2394 * This order ensures that 1 will always occur before abc.
2395 */
Eric Holmbergda31d042012-03-28 14:01:02 -06002396 if (use_wakelock) {
2397 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2398 if (smsm_snapshot_count == 0) {
2399 SMx_POWER_INFO("SMSM snapshot wake lock\n");
2400 wake_lock(&smsm_snapshot_wakelock);
2401 }
2402 ++smsm_snapshot_count;
2403 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2404 }
Eric Holmberg96b55f62012-04-03 19:10:46 -06002405
2406 /* queue state entries */
2407 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2408 new_state = __raw_readl(SMSM_STATE_ADDR(n));
2409
2410 ret = kfifo_in(&smsm_snapshot_fifo,
2411 &new_state, sizeof(new_state));
2412 if (ret != sizeof(new_state)) {
2413 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2414 goto restore_snapshot_count;
2415 }
2416 }
2417
2418 /* queue wakelock usage flag */
2419 ret = kfifo_in(&smsm_snapshot_fifo,
2420 &use_wakelock, sizeof(use_wakelock));
2421 if (ret != sizeof(use_wakelock)) {
2422 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2423 goto restore_snapshot_count;
2424 }
2425
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002426 schedule_work(&smsm_cb_work);
Eric Holmberg96b55f62012-04-03 19:10:46 -06002427 return;
2428
2429restore_snapshot_count:
2430 if (use_wakelock) {
2431 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2432 if (smsm_snapshot_count) {
2433 --smsm_snapshot_count;
2434 if (smsm_snapshot_count == 0) {
2435 SMx_POWER_INFO("SMSM snapshot wake unlock\n");
2436 wake_unlock(&smsm_snapshot_wakelock);
2437 }
2438 } else {
2439 pr_err("%s: invalid snapshot count\n", __func__);
2440 }
2441 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2442 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002443}
2444
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002445static irqreturn_t smsm_irq_handler(int irq, void *data)
2446{
2447 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002448
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002449 if (irq == INT_ADSP_A11_SMSM) {
Eric Holmberg6282c5d2011-10-27 17:30:57 -06002450 uint32_t mux_val;
2451 static uint32_t prev_smem_q6_apps_smsm;
2452
2453 if (smsm_info.intr_mux && cpu_is_qsd8x50()) {
2454 mux_val = __raw_readl(
2455 SMSM_INTR_MUX_ADDR(SMEM_Q6_APPS_SMSM));
2456 if (mux_val != prev_smem_q6_apps_smsm)
2457 prev_smem_q6_apps_smsm = mux_val;
2458 }
2459
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002460 spin_lock_irqsave(&smem_lock, flags);
Eric Holmbergda31d042012-03-28 14:01:02 -06002461 smsm_cb_snapshot(1);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002462 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002463 return IRQ_HANDLED;
2464 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002465
2466 spin_lock_irqsave(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002467 if (!smsm_info.state) {
2468 SMSM_INFO("<SM NO STATE>\n");
2469 } else {
2470 unsigned old_apps, apps;
2471 unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002472
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002473 old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002474
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002475 SMSM_DBG("<SM %08x %08x>\n", apps, modm);
2476 if (apps & SMSM_RESET) {
2477 /* If we get an interrupt and the apps SMSM_RESET
2478 bit is already set, the modem is acking the
2479 app's reset ack. */
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002480 if (!disable_smsm_reset_handshake)
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302481 apps &= ~SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002482 /* Issue a fake irq to handle any
2483 * smd state changes during reset
2484 */
2485 smd_fake_irq_handler(0);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002486
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002487 /* queue modem restart notify chain */
2488 modem_queue_start_reset_notify();
2489
2490 } else if (modm & SMSM_RESET) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002491 pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
Ram Somani8b9589f2012-04-03 12:07:18 +05302492 if (!disable_smsm_reset_handshake) {
2493 apps |= SMSM_RESET;
2494 flush_cache_all();
2495 outer_flush_all();
2496 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002497 modem_queue_start_reset_notify();
2498
2499 } else if (modm & SMSM_INIT) {
2500 if (!(apps & SMSM_INIT)) {
2501 apps |= SMSM_INIT;
2502 modem_queue_smsm_init_notify();
2503 }
2504
2505 if (modm & SMSM_SMDINIT)
2506 apps |= SMSM_SMDINIT;
2507 if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) ==
2508 (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT))
2509 apps |= SMSM_RUN;
2510 } else if (modm & SMSM_SYSTEM_DOWNLOAD) {
2511 pr_err("\nSMSM: Modem SMSM state changed to SMSM_SYSTEM_DOWNLOAD.");
2512 modem_queue_start_reset_notify();
2513 }
2514
2515 if (old_apps != apps) {
2516 SMSM_DBG("<SM %08x NOTIFY>\n", apps);
2517 __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2518 do_smd_probe();
2519 notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
2520 }
2521
Eric Holmbergda31d042012-03-28 14:01:02 -06002522 smsm_cb_snapshot(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002523 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002524 spin_unlock_irqrestore(&smem_lock, flags);
2525 return IRQ_HANDLED;
2526}
2527
Eric Holmberg98c6c642012-02-24 11:29:35 -07002528static irqreturn_t smsm_modem_irq_handler(int irq, void *data)
2529{
2530 SMx_POWER_INFO("SMSM Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002531 ++interrupt_stats[SMD_MODEM].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002532 return smsm_irq_handler(irq, data);
2533}
2534
2535static irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
2536{
2537 SMx_POWER_INFO("SMSM Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002538 ++interrupt_stats[SMD_Q6].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002539 return smsm_irq_handler(irq, data);
2540}
2541
2542static irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
2543{
2544 SMx_POWER_INFO("SMSM Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002545 ++interrupt_stats[SMD_DSPS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002546 return smsm_irq_handler(irq, data);
2547}
2548
2549static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
2550{
2551 SMx_POWER_INFO("SMSM Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002552 ++interrupt_stats[SMD_WCNSS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002553 return smsm_irq_handler(irq, data);
2554}
2555
Eric Holmberge8a39322012-04-03 15:14:02 -06002556/*
2557 * Changes the global interrupt mask. The set and clear masks are re-applied
2558 * every time the global interrupt mask is updated for callback registration
2559 * and de-registration.
2560 *
2561 * The clear mask is applied first, so if a bit is set to 1 in both the clear
2562 * mask and the set mask, the result will be that the interrupt is set.
2563 *
2564 * @smsm_entry SMSM entry to change
2565 * @clear_mask 1 = clear bit, 0 = no-op
2566 * @set_mask 1 = set bit, 0 = no-op
2567 *
2568 * @returns 0 for success, < 0 for error
2569 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002570int smsm_change_intr_mask(uint32_t smsm_entry,
2571 uint32_t clear_mask, uint32_t set_mask)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002572{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002573 uint32_t old_mask, new_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002574 unsigned long flags;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002575
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002576 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2577 pr_err("smsm_change_state: Invalid entry %d\n",
2578 smsm_entry);
2579 return -EINVAL;
2580 }
2581
2582 if (!smsm_info.intr_mask) {
2583 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002584 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002585 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002586
2587 spin_lock_irqsave(&smem_lock, flags);
Eric Holmberge8a39322012-04-03 15:14:02 -06002588 smsm_states[smsm_entry].intr_mask_clear = clear_mask;
2589 smsm_states[smsm_entry].intr_mask_set = set_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002590
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002591 old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2592 new_mask = (old_mask & ~clear_mask) | set_mask;
2593 __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002594
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002595 wmb();
2596 spin_unlock_irqrestore(&smem_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002597
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002598 return 0;
2599}
2600EXPORT_SYMBOL(smsm_change_intr_mask);
2601
2602int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
2603{
2604 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2605 pr_err("smsm_change_state: Invalid entry %d\n",
2606 smsm_entry);
2607 return -EINVAL;
2608 }
2609
2610 if (!smsm_info.intr_mask) {
2611 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
2612 return -EIO;
2613 }
2614
2615 *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2616 return 0;
2617}
2618EXPORT_SYMBOL(smsm_get_intr_mask);
2619
2620int smsm_change_state(uint32_t smsm_entry,
2621 uint32_t clear_mask, uint32_t set_mask)
2622{
2623 unsigned long flags;
2624 uint32_t old_state, new_state;
2625
2626 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2627 pr_err("smsm_change_state: Invalid entry %d",
2628 smsm_entry);
2629 return -EINVAL;
2630 }
2631
2632 if (!smsm_info.state) {
2633 pr_err("smsm_change_state <SM NO STATE>\n");
2634 return -EIO;
2635 }
2636 spin_lock_irqsave(&smem_lock, flags);
2637
2638 old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2639 new_state = (old_state & ~clear_mask) | set_mask;
2640 __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
2641 SMSM_DBG("smsm_change_state %x\n", new_state);
2642 notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002643
2644 spin_unlock_irqrestore(&smem_lock, flags);
2645
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002646 return 0;
2647}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002648EXPORT_SYMBOL(smsm_change_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002649
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002650uint32_t smsm_get_state(uint32_t smsm_entry)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002651{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002652 uint32_t rv = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002653
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002654 /* needs interface change to return error code */
2655 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2656 pr_err("smsm_change_state: Invalid entry %d",
2657 smsm_entry);
2658 return 0;
2659 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002660
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002661 if (!smsm_info.state) {
2662 pr_err("smsm_get_state <SM NO STATE>\n");
2663 } else {
2664 rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2665 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002666
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002667 return rv;
2668}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002669EXPORT_SYMBOL(smsm_get_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002670
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002671/**
2672 * Performs SMSM callback client notifiction.
2673 */
2674void notify_smsm_cb_clients_worker(struct work_struct *work)
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002675{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002676 struct smsm_state_cb_info *cb_info;
2677 struct smsm_state_info *state_info;
2678 int n;
2679 uint32_t new_state;
2680 uint32_t state_changes;
Eric Holmbergda31d042012-03-28 14:01:02 -06002681 uint32_t use_wakelock;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002682 int ret;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002683 unsigned long flags;
Brian Swetland03e00cd2009-07-01 17:58:37 -07002684
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002685 if (!smd_initialized)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002686 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002687
Eric Holmbergda31d042012-03-28 14:01:02 -06002688 while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002689 mutex_lock(&smsm_lock);
2690 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2691 state_info = &smsm_states[n];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002692
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002693 ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
2694 sizeof(new_state));
2695 if (ret != sizeof(new_state)) {
2696 pr_err("%s: snapshot underflow %d\n",
2697 __func__, ret);
2698 mutex_unlock(&smsm_lock);
2699 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002700 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002701
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002702 state_changes = state_info->last_value ^ new_state;
2703 if (state_changes) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07002704 SMx_POWER_INFO("SMSM Change %d: %08x->%08x\n",
2705 n, state_info->last_value,
2706 new_state);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002707 list_for_each_entry(cb_info,
2708 &state_info->callbacks, cb_list) {
2709
2710 if (cb_info->mask & state_changes)
2711 cb_info->notify(cb_info->data,
2712 state_info->last_value,
2713 new_state);
2714 }
2715 state_info->last_value = new_state;
2716 }
2717 }
2718 mutex_unlock(&smsm_lock);
Eric Holmberg59a9f942012-03-19 10:04:22 -06002719
Eric Holmbergda31d042012-03-28 14:01:02 -06002720 /* read wakelock flag */
2721 ret = kfifo_out(&smsm_snapshot_fifo, &use_wakelock,
2722 sizeof(use_wakelock));
2723 if (ret != sizeof(use_wakelock)) {
2724 pr_err("%s: snapshot underflow %d\n",
2725 __func__, ret);
2726 return;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002727 }
Eric Holmbergda31d042012-03-28 14:01:02 -06002728
2729 if (use_wakelock) {
2730 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2731 if (smsm_snapshot_count) {
2732 --smsm_snapshot_count;
2733 if (smsm_snapshot_count == 0) {
2734 SMx_POWER_INFO("SMSM snapshot"
2735 " wake unlock\n");
2736 wake_unlock(&smsm_snapshot_wakelock);
2737 }
2738 } else {
2739 pr_err("%s: invalid snapshot count\n",
2740 __func__);
2741 }
2742 spin_unlock_irqrestore(&smsm_snapshot_count_lock,
2743 flags);
2744 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002745 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002746}
2747
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002748
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002749/**
2750 * Registers callback for SMSM state notifications when the specified
2751 * bits change.
2752 *
2753 * @smsm_entry Processor entry to deregister
2754 * @mask Bits to deregister (if result is 0, callback is removed)
2755 * @notify Notification function to deregister
2756 * @data Opaque data passed in to callback
2757 *
2758 * @returns Status code
2759 * <0 error code
2760 * 0 inserted new entry
2761 * 1 updated mask of existing entry
2762 */
2763int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
2764 void (*notify)(void *, uint32_t, uint32_t), void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002765{
Eric Holmberge8a39322012-04-03 15:14:02 -06002766 struct smsm_state_info *state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002767 struct smsm_state_cb_info *cb_info;
2768 struct smsm_state_cb_info *cb_found = 0;
Eric Holmberge8a39322012-04-03 15:14:02 -06002769 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002770 int ret = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002771
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002772 if (smsm_entry >= SMSM_NUM_ENTRIES)
2773 return -EINVAL;
2774
Eric Holmbergc8002902011-09-16 13:55:57 -06002775 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002776
2777 if (!smsm_states) {
2778 /* smsm not yet initialized */
2779 ret = -ENODEV;
2780 goto cleanup;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002781 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002782
Eric Holmberge8a39322012-04-03 15:14:02 -06002783 state = &smsm_states[smsm_entry];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002784 list_for_each_entry(cb_info,
Eric Holmberge8a39322012-04-03 15:14:02 -06002785 &state->callbacks, cb_list) {
2786 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002787 (cb_info->data == data)) {
2788 cb_info->mask |= mask;
2789 cb_found = cb_info;
2790 ret = 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002791 }
Eric Holmberge8a39322012-04-03 15:14:02 -06002792 new_mask |= cb_info->mask;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002793 }
2794
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002795 if (!cb_found) {
2796 cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
2797 GFP_ATOMIC);
2798 if (!cb_info) {
2799 ret = -ENOMEM;
2800 goto cleanup;
2801 }
2802
2803 cb_info->mask = mask;
2804 cb_info->notify = notify;
2805 cb_info->data = data;
2806 INIT_LIST_HEAD(&cb_info->cb_list);
2807 list_add_tail(&cb_info->cb_list,
Eric Holmberge8a39322012-04-03 15:14:02 -06002808 &state->callbacks);
2809 new_mask |= mask;
2810 }
2811
2812 /* update interrupt notification mask */
2813 if (smsm_entry == SMSM_MODEM_STATE)
2814 new_mask |= LEGACY_MODEM_SMSM_MASK;
2815
2816 if (smsm_info.intr_mask) {
2817 unsigned long flags;
2818
2819 spin_lock_irqsave(&smem_lock, flags);
2820 new_mask = (new_mask & ~state->intr_mask_clear)
2821 | state->intr_mask_set;
2822 __raw_writel(new_mask,
2823 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2824 wmb();
2825 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002826 }
2827
2828cleanup:
Eric Holmbergc8002902011-09-16 13:55:57 -06002829 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002830 return ret;
2831}
2832EXPORT_SYMBOL(smsm_state_cb_register);
2833
2834
2835/**
2836 * Deregisters for SMSM state notifications for the specified bits.
2837 *
2838 * @smsm_entry Processor entry to deregister
2839 * @mask Bits to deregister (if result is 0, callback is removed)
2840 * @notify Notification function to deregister
2841 * @data Opaque data passed in to callback
2842 *
2843 * @returns Status code
2844 * <0 error code
2845 * 0 not found
2846 * 1 updated mask
2847 * 2 removed callback
2848 */
2849int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
2850 void (*notify)(void *, uint32_t, uint32_t), void *data)
2851{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002852 struct smsm_state_cb_info *cb_info;
Eric Holmberge8a39322012-04-03 15:14:02 -06002853 struct smsm_state_cb_info *cb_tmp;
2854 struct smsm_state_info *state;
2855 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002856 int ret = 0;
2857
2858 if (smsm_entry >= SMSM_NUM_ENTRIES)
2859 return -EINVAL;
2860
Eric Holmbergc8002902011-09-16 13:55:57 -06002861 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002862
2863 if (!smsm_states) {
2864 /* smsm not yet initialized */
Eric Holmbergc8002902011-09-16 13:55:57 -06002865 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002866 return -ENODEV;
2867 }
2868
Eric Holmberge8a39322012-04-03 15:14:02 -06002869 state = &smsm_states[smsm_entry];
2870 list_for_each_entry_safe(cb_info, cb_tmp,
2871 &state->callbacks, cb_list) {
2872 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002873 (cb_info->data == data)) {
2874 cb_info->mask &= ~mask;
2875 ret = 1;
2876 if (!cb_info->mask) {
2877 /* no mask bits set, remove callback */
2878 list_del(&cb_info->cb_list);
2879 kfree(cb_info);
2880 ret = 2;
Eric Holmberge8a39322012-04-03 15:14:02 -06002881 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002882 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002883 }
Eric Holmberge8a39322012-04-03 15:14:02 -06002884 new_mask |= cb_info->mask;
2885 }
2886
2887 /* update interrupt notification mask */
2888 if (smsm_entry == SMSM_MODEM_STATE)
2889 new_mask |= LEGACY_MODEM_SMSM_MASK;
2890
2891 if (smsm_info.intr_mask) {
2892 unsigned long flags;
2893
2894 spin_lock_irqsave(&smem_lock, flags);
2895 new_mask = (new_mask & ~state->intr_mask_clear)
2896 | state->intr_mask_set;
2897 __raw_writel(new_mask,
2898 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2899 wmb();
2900 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002901 }
2902
Eric Holmbergc8002902011-09-16 13:55:57 -06002903 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002904 return ret;
2905}
2906EXPORT_SYMBOL(smsm_state_cb_deregister);
2907
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06002908int smsm_driver_state_notifier_register(struct notifier_block *nb)
2909{
2910 int ret;
2911 if (!nb)
2912 return -EINVAL;
2913 mutex_lock(&smsm_driver_state_notifier_lock);
2914 ret = raw_notifier_chain_register(&smsm_driver_state_notifier_list, nb);
2915 mutex_unlock(&smsm_driver_state_notifier_lock);
2916 return ret;
2917}
2918EXPORT_SYMBOL(smsm_driver_state_notifier_register);
2919
2920int smsm_driver_state_notifier_unregister(struct notifier_block *nb)
2921{
2922 int ret;
2923 if (!nb)
2924 return -EINVAL;
2925 mutex_lock(&smsm_driver_state_notifier_lock);
2926 ret = raw_notifier_chain_unregister(&smsm_driver_state_notifier_list,
2927 nb);
2928 mutex_unlock(&smsm_driver_state_notifier_lock);
2929 return ret;
2930}
2931EXPORT_SYMBOL(smsm_driver_state_notifier_unregister);
2932
2933static void smsm_driver_state_notify(uint32_t state, void *data)
2934{
2935 mutex_lock(&smsm_driver_state_notifier_lock);
2936 raw_notifier_call_chain(&smsm_driver_state_notifier_list,
2937 state, data);
2938 mutex_unlock(&smsm_driver_state_notifier_lock);
2939}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002940
2941int smd_core_init(void)
2942{
2943 int r;
2944 unsigned long flags = IRQF_TRIGGER_RISING;
2945 SMD_INFO("smd_core_init()\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002946
Brian Swetland37521a32009-07-01 18:30:47 -07002947 r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002948 flags, "smd_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002949 if (r < 0)
2950 return r;
2951 r = enable_irq_wake(INT_A9_M2A_0);
2952 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002953 pr_err("smd_core_init: "
2954 "enable_irq_wake failed for INT_A9_M2A_0\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002955
Eric Holmberg98c6c642012-02-24 11:29:35 -07002956 r = request_irq(INT_A9_M2A_5, smsm_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002957 flags, "smsm_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002958 if (r < 0) {
2959 free_irq(INT_A9_M2A_0, 0);
2960 return r;
2961 }
2962 r = enable_irq_wake(INT_A9_M2A_5);
2963 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002964 pr_err("smd_core_init: "
2965 "enable_irq_wake failed for INT_A9_M2A_5\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002966
Brian Swetland37521a32009-07-01 18:30:47 -07002967#if defined(CONFIG_QDSP6)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002968#if (INT_ADSP_A11 == INT_ADSP_A11_SMSM)
2969 flags |= IRQF_SHARED;
2970#endif
Brian Swetland37521a32009-07-01 18:30:47 -07002971 r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002972 flags, "smd_dev", smd_dsp_irq_handler);
Brian Swetland37521a32009-07-01 18:30:47 -07002973 if (r < 0) {
2974 free_irq(INT_A9_M2A_0, 0);
2975 free_irq(INT_A9_M2A_5, 0);
2976 return r;
2977 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002978
Eric Holmberg98c6c642012-02-24 11:29:35 -07002979 r = request_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler,
2980 flags, "smsm_dev", smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002981 if (r < 0) {
2982 free_irq(INT_A9_M2A_0, 0);
2983 free_irq(INT_A9_M2A_5, 0);
2984 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
2985 return r;
2986 }
2987
2988 r = enable_irq_wake(INT_ADSP_A11);
2989 if (r < 0)
2990 pr_err("smd_core_init: "
2991 "enable_irq_wake failed for INT_ADSP_A11\n");
2992
2993#if (INT_ADSP_A11 != INT_ADSP_A11_SMSM)
2994 r = enable_irq_wake(INT_ADSP_A11_SMSM);
2995 if (r < 0)
2996 pr_err("smd_core_init: enable_irq_wake "
2997 "failed for INT_ADSP_A11_SMSM\n");
2998#endif
2999 flags &= ~IRQF_SHARED;
Brian Swetland37521a32009-07-01 18:30:47 -07003000#endif
3001
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003002#if defined(CONFIG_DSPS)
3003 r = request_irq(INT_DSPS_A11, smd_dsps_irq_handler,
3004 flags, "smd_dev", smd_dsps_irq_handler);
3005 if (r < 0) {
3006 free_irq(INT_A9_M2A_0, 0);
3007 free_irq(INT_A9_M2A_5, 0);
3008 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003009 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003010 return r;
3011 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003012
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003013 r = enable_irq_wake(INT_DSPS_A11);
3014 if (r < 0)
3015 pr_err("smd_core_init: "
3016 "enable_irq_wake failed for INT_ADSP_A11\n");
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003017#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003018
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003019#if defined(CONFIG_WCNSS)
3020 r = request_irq(INT_WCNSS_A11, smd_wcnss_irq_handler,
3021 flags, "smd_dev", smd_wcnss_irq_handler);
3022 if (r < 0) {
3023 free_irq(INT_A9_M2A_0, 0);
3024 free_irq(INT_A9_M2A_5, 0);
3025 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003026 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003027 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3028 return r;
3029 }
3030
3031 r = enable_irq_wake(INT_WCNSS_A11);
3032 if (r < 0)
3033 pr_err("smd_core_init: "
3034 "enable_irq_wake failed for INT_WCNSS_A11\n");
3035
Eric Holmberg98c6c642012-02-24 11:29:35 -07003036 r = request_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler,
3037 flags, "smsm_dev", smsm_wcnss_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003038 if (r < 0) {
3039 free_irq(INT_A9_M2A_0, 0);
3040 free_irq(INT_A9_M2A_5, 0);
3041 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003042 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003043 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3044 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
3045 return r;
3046 }
3047
3048 r = enable_irq_wake(INT_WCNSS_A11_SMSM);
3049 if (r < 0)
3050 pr_err("smd_core_init: "
3051 "enable_irq_wake failed for INT_WCNSS_A11_SMSM\n");
3052#endif
3053
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003054#if defined(CONFIG_DSPS_SMSM)
Eric Holmberg98c6c642012-02-24 11:29:35 -07003055 r = request_irq(INT_DSPS_A11_SMSM, smsm_dsps_irq_handler,
3056 flags, "smsm_dev", smsm_dsps_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003057 if (r < 0) {
3058 free_irq(INT_A9_M2A_0, 0);
3059 free_irq(INT_A9_M2A_5, 0);
3060 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003061 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003062 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3063 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003064 free_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003065 return r;
3066 }
3067
3068 r = enable_irq_wake(INT_DSPS_A11_SMSM);
3069 if (r < 0)
3070 pr_err("smd_core_init: "
3071 "enable_irq_wake failed for INT_DSPS_A11_SMSM\n");
3072#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003073 SMD_INFO("smd_core_init() done\n");
3074
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003075 return 0;
3076}
3077
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303078static int intr_init(struct interrupt_config_item *private_irq,
3079 struct smd_irq_config *platform_irq,
3080 struct platform_device *pdev
3081 )
3082{
3083 int irq_id;
3084 int ret;
3085 int ret_wake;
3086
3087 private_irq->out_bit_pos = platform_irq->out_bit_pos;
3088 private_irq->out_offset = platform_irq->out_offset;
3089 private_irq->out_base = platform_irq->out_base;
3090
3091 irq_id = platform_get_irq_byname(
3092 pdev,
3093 platform_irq->irq_name
3094 );
3095 SMD_DBG("smd: %s: register irq: %s id: %d\n", __func__,
3096 platform_irq->irq_name, irq_id);
3097 ret = request_irq(irq_id,
3098 private_irq->irq_handler,
3099 platform_irq->flags,
3100 platform_irq->device_name,
3101 (void *)platform_irq->dev_id
3102 );
3103 if (ret < 0) {
3104 platform_irq->irq_id = ret;
3105 } else {
3106 platform_irq->irq_id = irq_id;
3107 ret_wake = enable_irq_wake(irq_id);
3108 if (ret_wake < 0) {
3109 pr_err("smd: enable_irq_wake failed on %s",
3110 platform_irq->irq_name);
3111 }
3112 }
3113
3114 return ret;
3115}
3116
3117int smd_core_platform_init(struct platform_device *pdev)
3118{
3119 int i;
3120 int ret;
3121 uint32_t num_ss;
3122 struct smd_platform *smd_platform_data;
3123 struct smd_subsystem_config *smd_ss_config_list;
3124 struct smd_subsystem_config *cfg;
3125 int err_ret = 0;
3126
3127 smd_platform_data = pdev->dev.platform_data;
3128 num_ss = smd_platform_data->num_ss_configs;
3129 smd_ss_config_list = smd_platform_data->smd_ss_configs;
3130
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06003131 if (smd_platform_data->smd_ssr_config)
3132 disable_smsm_reset_handshake = smd_platform_data->
3133 smd_ssr_config->disable_smsm_reset_handshake;
3134
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303135 for (i = 0; i < num_ss; i++) {
3136 cfg = &smd_ss_config_list[i];
3137
3138 ret = intr_init(
3139 &private_intr_config[cfg->irq_config_id].smd,
3140 &cfg->smd_int,
3141 pdev
3142 );
3143
3144 if (ret < 0) {
3145 err_ret = ret;
3146 pr_err("smd: register irq failed on %s\n",
3147 cfg->smd_int.irq_name);
3148 break;
3149 }
3150
3151 ret = intr_init(
3152 &private_intr_config[cfg->irq_config_id].smsm,
3153 &cfg->smsm_int,
3154 pdev
3155 );
3156
3157 if (ret < 0) {
3158 err_ret = ret;
3159 pr_err("smd: register irq failed on %s\n",
3160 cfg->smsm_int.irq_name);
3161 break;
3162 }
Eric Holmberg17992c12012-02-29 12:54:44 -07003163
3164 strncpy(edge_to_pids[cfg->edge].subsys_name,
3165 cfg->subsys_name, SMD_MAX_CH_NAME_LEN);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303166 }
3167
3168 if (err_ret < 0) {
3169 pr_err("smd: deregistering IRQs\n");
3170 for (i = 0; i < num_ss; ++i) {
3171 cfg = &smd_ss_config_list[i];
3172
3173 if (cfg->smd_int.irq_id >= 0)
3174 free_irq(cfg->smd_int.irq_id,
3175 (void *)cfg->smd_int.dev_id
3176 );
3177 if (cfg->smsm_int.irq_id >= 0)
3178 free_irq(cfg->smsm_int.irq_id,
3179 (void *)cfg->smsm_int.dev_id
3180 );
3181 }
3182 return err_ret;
3183 }
3184
3185 SMD_INFO("smd_core_platform_init() done\n");
3186 return 0;
3187
3188}
3189
Gregory Bean4416e9e2010-07-28 10:22:12 -07003190static int __devinit msm_smd_probe(struct platform_device *pdev)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003191{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303192 int ret;
Daniel Walker0aec66d2010-03-18 12:31:08 -07003193
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303194 SMD_INFO("smd probe\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003195 INIT_WORK(&probe_work, smd_channel_probe_worker);
3196
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003197 channel_close_wq = create_singlethread_workqueue("smd_channel_close");
3198 if (IS_ERR(channel_close_wq)) {
3199 pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
3200 return -ENOMEM;
3201 }
3202
3203 if (smsm_init()) {
3204 pr_err("smsm_init() failed\n");
3205 return -1;
3206 }
3207
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303208 if (pdev) {
3209 if (pdev->dev.of_node) {
3210 pr_err("SMD: Device tree not currently supported\n");
3211 return -ENODEV;
3212 } else if (pdev->dev.platform_data) {
3213 ret = smd_core_platform_init(pdev);
3214 if (ret) {
3215 pr_err(
3216 "SMD: smd_core_platform_init() failed\n");
3217 return -ENODEV;
3218 }
3219 } else {
3220 ret = smd_core_init();
3221 if (ret) {
3222 pr_err("smd_core_init() failed\n");
3223 return -ENODEV;
3224 }
3225 }
3226 } else {
3227 pr_err("SMD: PDEV not found\n");
3228 return -ENODEV;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003229 }
3230
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003231 smd_initialized = 1;
3232
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003233 smd_alloc_loopback_channel();
Eric Holmbergc33d4ab2011-10-24 10:28:25 -06003234 smsm_irq_handler(0, 0);
3235 tasklet_schedule(&smd_fake_irq_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003236
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003237 return 0;
3238}
3239
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003240static int restart_notifier_cb(struct notifier_block *this,
3241 unsigned long code,
3242 void *data);
3243
3244static struct restart_notifier_block restart_notifiers[] = {
Eric Holmbergca7ead22011-12-01 17:21:15 -07003245 {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
3246 {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
3247 {SMD_WCNSS, "riva", .nb.notifier_call = restart_notifier_cb},
3248 {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
Eric Holmberg8b0e74f2012-02-08 09:56:17 -07003249 {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003250};
3251
3252static int restart_notifier_cb(struct notifier_block *this,
3253 unsigned long code,
3254 void *data)
3255{
3256 if (code == SUBSYS_AFTER_SHUTDOWN) {
3257 struct restart_notifier_block *notifier;
3258
3259 notifier = container_of(this,
3260 struct restart_notifier_block, nb);
3261 SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
3262 __func__, notifier->processor,
3263 notifier->name);
3264
3265 smd_channel_reset(notifier->processor);
3266 }
3267
3268 return NOTIFY_DONE;
3269}
3270
3271static __init int modem_restart_late_init(void)
3272{
3273 int i;
3274 void *handle;
3275 struct restart_notifier_block *nb;
3276
3277 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
3278 nb = &restart_notifiers[i];
3279 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
3280 SMD_DBG("%s: registering notif for '%s', handle=%p\n",
3281 __func__, nb->name, handle);
3282 }
3283 return 0;
3284}
3285late_initcall(modem_restart_late_init);
3286
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003287static struct platform_driver msm_smd_driver = {
3288 .probe = msm_smd_probe,
3289 .driver = {
3290 .name = MODULE_NAME,
3291 .owner = THIS_MODULE,
3292 },
3293};
3294
3295static int __init msm_smd_init(void)
3296{
3297 return platform_driver_register(&msm_smd_driver);
3298}
3299
3300module_init(msm_smd_init);
3301
3302MODULE_DESCRIPTION("MSM Shared Memory Core");
3303MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
3304MODULE_LICENSE("GPL");