blob: d00dd1a7b277b55dc9ab894a2bb037b0b8a6d4e6 [file] [log] [blame]
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001/* arch/arm/mach-msm/smd.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07004 * Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Brian Swetland2eb44eb2008-09-29 16:00:48 -07005 * Author: Brian Swetland <swetland@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/cdev.h>
22#include <linux/device.h>
23#include <linux/wait.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/list.h>
27#include <linux/slab.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070028#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/io.h>
30#include <linux/termios.h>
31#include <linux/ctype.h>
32#include <linux/remote_spinlock.h>
33#include <linux/uaccess.h>
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070034#include <linux/kfifo.h>
Eric Holmberg59a9f942012-03-19 10:04:22 -060035#include <linux/wakelock.h>
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -060036#include <linux/notifier.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070037#include <mach/msm_smd.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038#include <mach/msm_iomap.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070039#include <mach/system.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040#include <mach/subsystem_notif.h>
Angshuman Sarkaread67bd2011-09-21 20:13:12 +053041#include <mach/socinfo.h>
Ram Somani8b9589f2012-04-03 12:07:18 +053042#include <asm/cacheflush.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070043
44#include "smd_private.h"
45#include "proc_comm.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046#include "modem_notifier.h"
Brian Swetland2eb44eb2008-09-29 16:00:48 -070047
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM8X60) \
Jeff Hugo56b933a2011-09-28 14:42:05 -060049 || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX) \
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060050 || defined(CONFIG_ARCH_MSM9615) || defined(CONFIG_ARCH_APQ8064)
Brian Swetland37521a32009-07-01 18:30:47 -070051#define CONFIG_QDSP6 1
52#endif
53
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060054#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) \
55 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056#define CONFIG_DSPS 1
57#endif
58
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060059#if defined(CONFIG_ARCH_MSM8960) \
60 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070061#define CONFIG_WCNSS 1
Jeff Hugo6a8057c2011-08-16 13:47:12 -060062#define CONFIG_DSPS_SMSM 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -070064
65#define MODULE_NAME "msm_smd"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070066#define SMEM_VERSION 0x000B
67#define SMD_VERSION 0x00020000
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070068#define SMSM_SNAPSHOT_CNT 64
Eric Holmbergda31d042012-03-28 14:01:02 -060069#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070
71uint32_t SMSM_NUM_ENTRIES = 8;
72uint32_t SMSM_NUM_HOSTS = 3;
Brian Swetland2eb44eb2008-09-29 16:00:48 -070073
74enum {
75 MSM_SMD_DEBUG = 1U << 0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076 MSM_SMSM_DEBUG = 1U << 1,
77 MSM_SMD_INFO = 1U << 2,
78 MSM_SMSM_INFO = 1U << 3,
Eric Holmberg98c6c642012-02-24 11:29:35 -070079 MSM_SMx_POWER_INFO = 1U << 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070080};
81
82struct smsm_shared_info {
83 uint32_t *state;
84 uint32_t *intr_mask;
85 uint32_t *intr_mux;
86};
87
88static struct smsm_shared_info smsm_info;
Eric Holmberg59a9f942012-03-19 10:04:22 -060089static struct kfifo smsm_snapshot_fifo;
90static struct wake_lock smsm_snapshot_wakelock;
91static int smsm_snapshot_count;
92static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070093
94struct smsm_size_info_type {
95 uint32_t num_hosts;
96 uint32_t num_entries;
97 uint32_t reserved0;
98 uint32_t reserved1;
99};
100
101struct smsm_state_cb_info {
102 struct list_head cb_list;
103 uint32_t mask;
104 void *data;
105 void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
106};
107
108struct smsm_state_info {
109 struct list_head callbacks;
110 uint32_t last_value;
111};
112
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530113struct interrupt_config_item {
114 /* must be initialized */
115 irqreturn_t (*irq_handler)(int req, void *data);
116 /* outgoing interrupt config (set from platform data) */
117 uint32_t out_bit_pos;
118 void __iomem *out_base;
119 uint32_t out_offset;
120};
121
122struct interrupt_config {
123 struct interrupt_config_item smd;
124 struct interrupt_config_item smsm;
125};
126
127static irqreturn_t smd_modem_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700128static irqreturn_t smsm_modem_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530129static irqreturn_t smd_dsp_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700130static irqreturn_t smsm_dsp_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530131static irqreturn_t smd_dsps_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700132static irqreturn_t smsm_dsps_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530133static irqreturn_t smd_wcnss_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700134static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530135static irqreturn_t smsm_irq_handler(int irq, void *data);
136
137static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
138 [SMD_MODEM] = {
139 .smd.irq_handler = smd_modem_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700140 .smsm.irq_handler = smsm_modem_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530141 },
142 [SMD_Q6] = {
143 .smd.irq_handler = smd_dsp_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700144 .smsm.irq_handler = smsm_dsp_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530145 },
146 [SMD_DSPS] = {
147 .smd.irq_handler = smd_dsps_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700148 .smsm.irq_handler = smsm_dsps_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530149 },
150 [SMD_WCNSS] = {
151 .smd.irq_handler = smd_wcnss_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700152 .smsm.irq_handler = smsm_wcnss_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530153 },
154};
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700155struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530156
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
158#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
159 entry * SMSM_NUM_HOSTS + host)
160#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
161
162/* Internal definitions which are not exported in some targets */
163enum {
164 SMSM_APPS_DEM_I = 3,
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700165};
166
167static int msm_smd_debug_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700168module_param_named(debug_mask, msm_smd_debug_mask,
169 int, S_IRUGO | S_IWUSR | S_IWGRP);
170
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700171#if defined(CONFIG_MSM_SMD_DEBUG)
172#define SMD_DBG(x...) do { \
173 if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
174 printk(KERN_DEBUG x); \
175 } while (0)
176
177#define SMSM_DBG(x...) do { \
178 if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
179 printk(KERN_DEBUG x); \
180 } while (0)
181
182#define SMD_INFO(x...) do { \
183 if (msm_smd_debug_mask & MSM_SMD_INFO) \
184 printk(KERN_INFO x); \
185 } while (0)
186
187#define SMSM_INFO(x...) do { \
188 if (msm_smd_debug_mask & MSM_SMSM_INFO) \
189 printk(KERN_INFO x); \
190 } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700191#define SMx_POWER_INFO(x...) do { \
192 if (msm_smd_debug_mask & MSM_SMx_POWER_INFO) \
193 printk(KERN_INFO x); \
194 } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700195#else
196#define SMD_DBG(x...) do { } while (0)
197#define SMSM_DBG(x...) do { } while (0)
198#define SMD_INFO(x...) do { } while (0)
199#define SMSM_INFO(x...) do { } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700200#define SMx_POWER_INFO(x...) do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700201#endif
202
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700203static unsigned last_heap_free = 0xffffffff;
204
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700205static inline void smd_write_intr(unsigned int val,
206 const void __iomem *addr);
207
208#if defined(CONFIG_ARCH_MSM7X30)
209#define MSM_TRIG_A2M_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530210 (smd_write_intr(1 << 0, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700211#define MSM_TRIG_A2Q6_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530212 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700213#define MSM_TRIG_A2M_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530214 (smd_write_intr(1 << 5, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215#define MSM_TRIG_A2Q6_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530216 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600218#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219#define MSM_TRIG_A2WCNSS_SMD_INT
220#define MSM_TRIG_A2WCNSS_SMSM_INT
221#elif defined(CONFIG_ARCH_MSM8X60)
222#define MSM_TRIG_A2M_SMD_INT \
223 (smd_write_intr(1 << 3, MSM_GCC_BASE + 0x8))
224#define MSM_TRIG_A2Q6_SMD_INT \
225 (smd_write_intr(1 << 15, MSM_GCC_BASE + 0x8))
226#define MSM_TRIG_A2M_SMSM_INT \
227 (smd_write_intr(1 << 4, MSM_GCC_BASE + 0x8))
228#define MSM_TRIG_A2Q6_SMSM_INT \
229 (smd_write_intr(1 << 14, MSM_GCC_BASE + 0x8))
230#define MSM_TRIG_A2DSPS_SMD_INT \
231 (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4080))
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600232#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700233#define MSM_TRIG_A2WCNSS_SMD_INT
234#define MSM_TRIG_A2WCNSS_SMSM_INT
Jeff Hugo56b933a2011-09-28 14:42:05 -0600235#elif defined(CONFIG_ARCH_MSM9615)
236#define MSM_TRIG_A2M_SMD_INT \
237 (smd_write_intr(1 << 3, MSM_APCS_GCC_BASE + 0x8))
238#define MSM_TRIG_A2Q6_SMD_INT \
239 (smd_write_intr(1 << 15, MSM_APCS_GCC_BASE + 0x8))
240#define MSM_TRIG_A2M_SMSM_INT \
241 (smd_write_intr(1 << 4, MSM_APCS_GCC_BASE + 0x8))
242#define MSM_TRIG_A2Q6_SMSM_INT \
243 (smd_write_intr(1 << 14, MSM_APCS_GCC_BASE + 0x8))
244#define MSM_TRIG_A2DSPS_SMD_INT
245#define MSM_TRIG_A2DSPS_SMSM_INT
246#define MSM_TRIG_A2WCNSS_SMD_INT
247#define MSM_TRIG_A2WCNSS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700248#elif defined(CONFIG_ARCH_FSM9XXX)
249#define MSM_TRIG_A2Q6_SMD_INT \
250 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
251#define MSM_TRIG_A2Q6_SMSM_INT \
252 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
253#define MSM_TRIG_A2M_SMD_INT \
254 (smd_write_intr(1 << 0, MSM_GCC_BASE + 0x8))
255#define MSM_TRIG_A2M_SMSM_INT \
256 (smd_write_intr(1 << 5, MSM_GCC_BASE + 0x8))
257#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600258#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259#define MSM_TRIG_A2WCNSS_SMD_INT
260#define MSM_TRIG_A2WCNSS_SMSM_INT
Eric Holmberg73d45462012-02-28 10:41:31 -0700261#elif defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7x25)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262#define MSM_TRIG_A2M_SMD_INT \
263 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700264#define MSM_TRIG_A2Q6_SMD_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265#define MSM_TRIG_A2M_SMSM_INT \
266 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700267#define MSM_TRIG_A2Q6_SMSM_INT
268#define MSM_TRIG_A2DSPS_SMD_INT
269#define MSM_TRIG_A2DSPS_SMSM_INT
270#define MSM_TRIG_A2WCNSS_SMD_INT
271#define MSM_TRIG_A2WCNSS_SMSM_INT
272#elif defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM7X27A)
273#define MSM_TRIG_A2M_SMD_INT \
274 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
275#define MSM_TRIG_A2Q6_SMD_INT
276#define MSM_TRIG_A2M_SMSM_INT \
277 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
278#define MSM_TRIG_A2Q6_SMSM_INT
279#define MSM_TRIG_A2DSPS_SMD_INT
280#define MSM_TRIG_A2DSPS_SMSM_INT
281#define MSM_TRIG_A2WCNSS_SMD_INT
282#define MSM_TRIG_A2WCNSS_SMSM_INT
283#else /* use platform device / device tree configuration */
284#define MSM_TRIG_A2M_SMD_INT
285#define MSM_TRIG_A2Q6_SMD_INT
286#define MSM_TRIG_A2M_SMSM_INT
287#define MSM_TRIG_A2Q6_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600289#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700290#define MSM_TRIG_A2WCNSS_SMD_INT
291#define MSM_TRIG_A2WCNSS_SMSM_INT
Brian Swetland37521a32009-07-01 18:30:47 -0700292#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700293
Jeff Hugoee40b152012-02-09 17:39:47 -0700294/*
295 * stub out legacy macros if they are not being used so that the legacy
296 * code compiles even though it is not used
297 *
298 * these definitions should not be used in active code and will cause
299 * an early failure
300 */
301#ifndef INT_A9_M2A_0
302#define INT_A9_M2A_0 -1
303#endif
304#ifndef INT_A9_M2A_5
305#define INT_A9_M2A_5 -1
306#endif
307#ifndef INT_ADSP_A11
308#define INT_ADSP_A11 -1
309#endif
310#ifndef INT_ADSP_A11_SMSM
311#define INT_ADSP_A11_SMSM -1
312#endif
313#ifndef INT_DSPS_A11
314#define INT_DSPS_A11 -1
315#endif
316#ifndef INT_DSPS_A11_SMSM
317#define INT_DSPS_A11_SMSM -1
318#endif
319#ifndef INT_WCNSS_A11
320#define INT_WCNSS_A11 -1
321#endif
322#ifndef INT_WCNSS_A11_SMSM
323#define INT_WCNSS_A11_SMSM -1
324#endif
325
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700326#define SMD_LOOPBACK_CID 100
327
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600328#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
329static remote_spinlock_t remote_spinlock;
330
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700331static LIST_HEAD(smd_ch_list_loopback);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700332static void smd_fake_irq_handler(unsigned long arg);
Eric Holmbergda31d042012-03-28 14:01:02 -0600333static void smsm_cb_snapshot(uint32_t use_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700334
335static void notify_smsm_cb_clients_worker(struct work_struct *work);
336static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
Eric Holmbergc8002902011-09-16 13:55:57 -0600337static DEFINE_MUTEX(smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700338static struct smsm_state_info *smsm_states;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +0530339static int spinlocks_initialized;
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -0600340static RAW_NOTIFIER_HEAD(smsm_driver_state_notifier_list);
341static DEFINE_MUTEX(smsm_driver_state_notifier_lock);
342static void smsm_driver_state_notify(uint32_t state, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700343
344static inline void smd_write_intr(unsigned int val,
345 const void __iomem *addr)
346{
347 wmb();
348 __raw_writel(val, addr);
349}
350
351#ifdef CONFIG_WCNSS
352static inline void wakeup_v1_riva(void)
353{
354 /*
355 * workaround hack for RIVA v1 hardware bug
356 * trigger GPIO 40 to wake up RIVA from power collaspe
357 * not to be sent to customers
358 */
Jeff Hugoafb8c4a2011-10-27 15:57:27 -0600359 if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) == 1) {
360 __raw_writel(0x0, MSM_TLMM_BASE + 0x1284);
361 __raw_writel(0x2, MSM_TLMM_BASE + 0x1284);
362 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700363 /* end workaround */
364}
365#else
366static inline void wakeup_v1_riva(void) {}
367#endif
368
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530369static inline void notify_modem_smd(void)
370{
371 static const struct interrupt_config_item *intr
372 = &private_intr_config[SMD_MODEM].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700373 if (intr->out_base) {
374 ++interrupt_stats[SMD_MODEM].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530375 smd_write_intr(intr->out_bit_pos,
376 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700377 } else {
378 ++interrupt_stats[SMD_MODEM].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530379 MSM_TRIG_A2M_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700380 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530381}
382
383static inline void notify_dsp_smd(void)
384{
385 static const struct interrupt_config_item *intr
386 = &private_intr_config[SMD_Q6].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700387 if (intr->out_base) {
388 ++interrupt_stats[SMD_Q6].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530389 smd_write_intr(intr->out_bit_pos,
390 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700391 } else {
392 ++interrupt_stats[SMD_Q6].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530393 MSM_TRIG_A2Q6_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700394 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530395}
396
397static inline void notify_dsps_smd(void)
398{
399 static const struct interrupt_config_item *intr
400 = &private_intr_config[SMD_DSPS].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700401 if (intr->out_base) {
402 ++interrupt_stats[SMD_DSPS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530403 smd_write_intr(intr->out_bit_pos,
404 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700405 } else {
406 ++interrupt_stats[SMD_DSPS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530407 MSM_TRIG_A2DSPS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700408 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530409}
410
411static inline void notify_wcnss_smd(void)
412{
413 static const struct interrupt_config_item *intr
414 = &private_intr_config[SMD_WCNSS].smd;
415 wakeup_v1_riva();
416
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700417 if (intr->out_base) {
418 ++interrupt_stats[SMD_WCNSS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530419 smd_write_intr(intr->out_bit_pos,
420 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700421 } else {
422 ++interrupt_stats[SMD_WCNSS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530423 MSM_TRIG_A2WCNSS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700424 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530425}
426
427static inline void notify_modem_smsm(void)
428{
429 static const struct interrupt_config_item *intr
430 = &private_intr_config[SMD_MODEM].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700431 if (intr->out_base) {
432 ++interrupt_stats[SMD_MODEM].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530433 smd_write_intr(intr->out_bit_pos,
434 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700435 } else {
436 ++interrupt_stats[SMD_MODEM].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530437 MSM_TRIG_A2M_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700438 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530439}
440
441static inline void notify_dsp_smsm(void)
442{
443 static const struct interrupt_config_item *intr
444 = &private_intr_config[SMD_Q6].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700445 if (intr->out_base) {
446 ++interrupt_stats[SMD_Q6].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530447 smd_write_intr(intr->out_bit_pos,
448 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700449 } else {
450 ++interrupt_stats[SMD_Q6].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530451 MSM_TRIG_A2Q6_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700452 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530453}
454
455static inline void notify_dsps_smsm(void)
456{
457 static const struct interrupt_config_item *intr
458 = &private_intr_config[SMD_DSPS].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700459 if (intr->out_base) {
460 ++interrupt_stats[SMD_DSPS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530461 smd_write_intr(intr->out_bit_pos,
462 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700463 } else {
464 ++interrupt_stats[SMD_DSPS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530465 MSM_TRIG_A2DSPS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700466 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530467}
468
469static inline void notify_wcnss_smsm(void)
470{
471 static const struct interrupt_config_item *intr
472 = &private_intr_config[SMD_WCNSS].smsm;
473 wakeup_v1_riva();
474
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700475 if (intr->out_base) {
476 ++interrupt_stats[SMD_WCNSS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530477 smd_write_intr(intr->out_bit_pos,
478 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700479 } else {
480 ++interrupt_stats[SMD_WCNSS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530481 MSM_TRIG_A2WCNSS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700482 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530483}
484
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700485static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
486{
487 /* older protocol don't use smsm_intr_mask,
488 but still communicates with modem */
489 if (!smsm_info.intr_mask ||
490 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
491 & notify_mask))
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530492 notify_modem_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493
494 if (smsm_info.intr_mask &&
495 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
496 & notify_mask)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700497 uint32_t mux_val;
498
Eric Holmberg6282c5d2011-10-27 17:30:57 -0600499 if (cpu_is_qsd8x50() && smsm_info.intr_mux) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700500 mux_val = __raw_readl(
501 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
502 mux_val++;
503 __raw_writel(mux_val,
504 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
505 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530506 notify_dsp_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700507 }
508
509 if (smsm_info.intr_mask &&
510 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
511 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530512 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513 }
514
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600515 if (smsm_info.intr_mask &&
516 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
517 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530518 notify_dsps_smsm();
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600519 }
520
Eric Holmbergda31d042012-03-28 14:01:02 -0600521 /*
522 * Notify local SMSM callback clients without wakelock since this
523 * code is used by power management during power-down/-up sequencing
524 * on DEM-based targets. Grabbing a wakelock in this case will
525 * abort the power-down sequencing.
526 */
527 smsm_cb_snapshot(0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700528}
529
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700530void smd_diag(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700531{
532 char *x;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700533 int size;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700534
535 x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
536 if (x != 0) {
537 x[SZ_DIAG_ERR_MSG - 1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700538 SMD_INFO("smem: DIAG '%s'\n", x);
539 }
540
541 x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size);
542 if (x != 0) {
543 x[size - 1] = 0;
544 pr_err("smem: CRASH LOG\n'%s'\n", x);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700545 }
546}
547
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700549static void handle_modem_crash(void)
550{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700551 pr_err("MODEM/AMSS has CRASHED\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700552 smd_diag();
553
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700554 /* hard reboot if possible FIXME
555 if (msm_reset_hook)
556 msm_reset_hook();
557 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700558
559 /* in this case the modem or watchdog should reboot us */
560 for (;;)
561 ;
562}
563
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700564int smsm_check_for_modem_crash(void)
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700565{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700566 /* if the modem's not ready yet, we have to hope for the best */
567 if (!smsm_info.state)
568 return 0;
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700569
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700570 if (__raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)) & SMSM_RESET) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700571 handle_modem_crash();
572 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700573 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700574 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700575}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700576EXPORT_SYMBOL(smsm_check_for_modem_crash);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700577
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700578/* the spinlock is used to synchronize between the
Brian Swetland03e00cd2009-07-01 17:58:37 -0700579 * irq handler and code that mutates the channel
580 * list or fiddles with channel state
581 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700582static DEFINE_SPINLOCK(smd_lock);
Brian Swetland03e00cd2009-07-01 17:58:37 -0700583DEFINE_SPINLOCK(smem_lock);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700584
585/* the mutex is used during open() and close()
Brian Swetland03e00cd2009-07-01 17:58:37 -0700586 * operations to avoid races while creating or
587 * destroying smd_channel structures
588 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700589static DEFINE_MUTEX(smd_creation_mutex);
590
591static int smd_initialized;
592
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700593struct smd_shared_v1 {
594 struct smd_half_channel ch0;
595 unsigned char data0[SMD_BUF_SIZE];
596 struct smd_half_channel ch1;
597 unsigned char data1[SMD_BUF_SIZE];
598};
599
600struct smd_shared_v2 {
601 struct smd_half_channel ch0;
602 struct smd_half_channel ch1;
603};
604
605struct smd_channel {
606 volatile struct smd_half_channel *send;
607 volatile struct smd_half_channel *recv;
608 unsigned char *send_data;
609 unsigned char *recv_data;
610 unsigned fifo_size;
611 unsigned fifo_mask;
612 struct list_head ch_list;
613
614 unsigned current_packet;
615 unsigned n;
616 void *priv;
617 void (*notify)(void *priv, unsigned flags);
618
619 int (*read)(smd_channel_t *ch, void *data, int len, int user_buf);
620 int (*write)(smd_channel_t *ch, const void *data, int len,
621 int user_buf);
622 int (*read_avail)(smd_channel_t *ch);
623 int (*write_avail)(smd_channel_t *ch);
624 int (*read_from_cb)(smd_channel_t *ch, void *data, int len,
625 int user_buf);
626
627 void (*update_state)(smd_channel_t *ch);
628 unsigned last_state;
629 void (*notify_other_cpu)(void);
630
631 char name[20];
632 struct platform_device pdev;
633 unsigned type;
634
635 int pending_pkt_sz;
636
637 char is_pkt_ch;
638};
639
640struct edge_to_pid {
641 uint32_t local_pid;
642 uint32_t remote_pid;
Eric Holmberg17992c12012-02-29 12:54:44 -0700643 char subsys_name[SMD_MAX_CH_NAME_LEN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700644};
645
646/**
647 * Maps edge type to local and remote processor ID's.
648 */
649static struct edge_to_pid edge_to_pids[] = {
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700650 [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
651 [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "q6"},
652 [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
653 [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
654 [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
655 [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
656 [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
657 [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
658 [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
659 [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
660 [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
661 [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
662 [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
663 [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
664 [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700665};
666
667struct restart_notifier_block {
668 unsigned processor;
669 char *name;
670 struct notifier_block nb;
671};
672
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -0600673static int disable_smsm_reset_handshake;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700674static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
675
676static LIST_HEAD(smd_ch_closed_list);
677static LIST_HEAD(smd_ch_closing_list);
678static LIST_HEAD(smd_ch_to_close_list);
679static LIST_HEAD(smd_ch_list_modem);
680static LIST_HEAD(smd_ch_list_dsp);
681static LIST_HEAD(smd_ch_list_dsps);
682static LIST_HEAD(smd_ch_list_wcnss);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700683
684static unsigned char smd_ch_allocated[64];
685static struct work_struct probe_work;
686
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700687static void finalize_channel_close_fn(struct work_struct *work);
688static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
689static struct workqueue_struct *channel_close_wq;
690
691static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm);
692
693/* on smp systems, the probe might get called from multiple cores,
694 hence use a lock */
695static DEFINE_MUTEX(smd_probe_lock);
696
697static void smd_channel_probe_worker(struct work_struct *work)
698{
699 struct smd_alloc_elm *shared;
700 unsigned n;
701 uint32_t type;
702
703 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
704
705 if (!shared) {
706 pr_err("%s: allocation table not initialized\n", __func__);
707 return;
708 }
709
710 mutex_lock(&smd_probe_lock);
711 for (n = 0; n < 64; n++) {
712 if (smd_ch_allocated[n])
713 continue;
714
715 /* channel should be allocated only if APPS
716 processor is involved */
717 type = SMD_CHANNEL_TYPE(shared[n].type);
718 if ((type != SMD_APPS_MODEM) && (type != SMD_APPS_QDSP) &&
719 (type != SMD_APPS_DSPS) && (type != SMD_APPS_WCNSS))
720 continue;
721 if (!shared[n].ref_count)
722 continue;
723 if (!shared[n].name[0])
724 continue;
725
726 if (!smd_alloc_channel(&shared[n]))
727 smd_ch_allocated[n] = 1;
728 else
729 SMD_INFO("Probe skipping ch %d, not allocated\n", n);
730 }
731 mutex_unlock(&smd_probe_lock);
732}
733
734/**
735 * Lookup processor ID and determine if it belongs to the proved edge
736 * type.
737 *
738 * @shared2: Pointer to v2 shared channel structure
739 * @type: Edge type
740 * @pid: Processor ID of processor on edge
741 * @local_ch: Channel that belongs to processor @pid
742 * @remote_ch: Other side of edge contained @pid
743 *
744 * Returns 0 for not on edge, 1 for found on edge
745 */
746static int pid_is_on_edge(struct smd_shared_v2 *shared2,
747 uint32_t type, uint32_t pid,
748 struct smd_half_channel **local_ch,
749 struct smd_half_channel **remote_ch
750 )
751{
752 int ret = 0;
753 struct edge_to_pid *edge;
754
755 *local_ch = 0;
756 *remote_ch = 0;
757
758 if (!shared2 || (type >= ARRAY_SIZE(edge_to_pids)))
759 return 0;
760
761 edge = &edge_to_pids[type];
762 if (edge->local_pid != edge->remote_pid) {
763 if (pid == edge->local_pid) {
764 *local_ch = &shared2->ch0;
765 *remote_ch = &shared2->ch1;
766 ret = 1;
767 } else if (pid == edge->remote_pid) {
768 *local_ch = &shared2->ch1;
769 *remote_ch = &shared2->ch0;
770 ret = 1;
771 }
772 }
773
774 return ret;
775}
776
Eric Holmberg17992c12012-02-29 12:54:44 -0700777/*
778 * Returns a pointer to the subsystem name or NULL if no
779 * subsystem name is available.
780 *
781 * @type - Edge definition
782 */
783const char *smd_edge_to_subsystem(uint32_t type)
784{
785 const char *subsys = NULL;
786
787 if (type < ARRAY_SIZE(edge_to_pids)) {
788 subsys = edge_to_pids[type].subsys_name;
789 if (subsys[0] == 0x0)
790 subsys = NULL;
791 }
792 return subsys;
793}
794EXPORT_SYMBOL(smd_edge_to_subsystem);
795
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700796/*
797 * Returns a pointer to the subsystem name given the
798 * remote processor ID.
799 *
800 * @pid Remote processor ID
801 * @returns Pointer to subsystem name or NULL if not found
802 */
803const char *smd_pid_to_subsystem(uint32_t pid)
804{
805 const char *subsys = NULL;
806 int i;
807
808 for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
809 if (pid == edge_to_pids[i].remote_pid &&
810 edge_to_pids[i].subsys_name[0] != 0x0
811 ) {
812 subsys = edge_to_pids[i].subsys_name;
813 break;
814 }
815 }
816
817 return subsys;
818}
819EXPORT_SYMBOL(smd_pid_to_subsystem);
Eric Holmberg17992c12012-02-29 12:54:44 -0700820
Eric Holmberg2a563c32011-10-05 14:51:43 -0600821static void smd_reset_edge(struct smd_half_channel *ch, unsigned new_state)
822{
823 if (ch->state != SMD_SS_CLOSED) {
824 ch->state = new_state;
825 ch->fDSR = 0;
826 ch->fCTS = 0;
827 ch->fCD = 0;
828 ch->fSTATE = 1;
829 }
830}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700831
832static void smd_channel_reset_state(struct smd_alloc_elm *shared,
833 unsigned new_state, unsigned pid)
834{
835 unsigned n;
836 struct smd_shared_v2 *shared2;
837 uint32_t type;
838 struct smd_half_channel *local_ch;
839 struct smd_half_channel *remote_ch;
840
841 for (n = 0; n < SMD_CHANNELS; n++) {
842 if (!shared[n].ref_count)
843 continue;
844 if (!shared[n].name[0])
845 continue;
846
847 type = SMD_CHANNEL_TYPE(shared[n].type);
848 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n, sizeof(*shared2));
849 if (!shared2)
850 continue;
851
Eric Holmberg2a563c32011-10-05 14:51:43 -0600852 if (pid_is_on_edge(shared2, type, pid, &local_ch, &remote_ch))
853 smd_reset_edge(local_ch, new_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700854
Eric Holmberg2a563c32011-10-05 14:51:43 -0600855 /*
856 * ModemFW is in the same subsystem as ModemSW, but has
857 * separate SMD edges that need to be reset.
858 */
859 if (pid == SMSM_MODEM &&
860 pid_is_on_edge(shared2, type, SMD_MODEM_Q6_FW,
861 &local_ch, &remote_ch))
862 smd_reset_edge(local_ch, new_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700863 }
864}
865
866
867void smd_channel_reset(uint32_t restart_pid)
868{
869 struct smd_alloc_elm *shared;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700870 unsigned long flags;
871
872 SMD_DBG("%s: starting reset\n", __func__);
873 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
874 if (!shared) {
875 pr_err("%s: allocation table not initialized\n", __func__);
876 return;
877 }
878
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600879 /* release any held spinlocks */
880 remote_spin_release(&remote_spinlock, restart_pid);
881 remote_spin_release_all(restart_pid);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700882
883 /* reset SMSM entry */
884 if (smsm_info.state) {
885 writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
886
Eric Holmberg351a63c2011-12-02 17:49:43 -0700887 /* restart SMSM init handshake */
888 if (restart_pid == SMSM_MODEM) {
889 smsm_change_state(SMSM_APPS_STATE,
Eric Holmberg6b2f80e2012-01-09 12:22:52 -0700890 SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
891 0);
Eric Holmberg351a63c2011-12-02 17:49:43 -0700892 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700893
894 /* notify SMSM processors */
895 smsm_irq_handler(0, 0);
Eric Holmbergb7726442012-03-01 15:31:56 -0700896 notify_modem_smsm();
897 notify_dsp_smsm();
898 notify_dsps_smsm();
899 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700900 }
901
902 /* change all remote states to CLOSING */
903 mutex_lock(&smd_probe_lock);
904 spin_lock_irqsave(&smd_lock, flags);
905 smd_channel_reset_state(shared, SMD_SS_CLOSING, restart_pid);
906 spin_unlock_irqrestore(&smd_lock, flags);
907 mutex_unlock(&smd_probe_lock);
908
909 /* notify SMD processors */
910 mb();
911 smd_fake_irq_handler(0);
912 notify_modem_smd();
913 notify_dsp_smd();
914 notify_dsps_smd();
915 notify_wcnss_smd();
916
917 /* change all remote states to CLOSED */
918 mutex_lock(&smd_probe_lock);
919 spin_lock_irqsave(&smd_lock, flags);
920 smd_channel_reset_state(shared, SMD_SS_CLOSED, restart_pid);
921 spin_unlock_irqrestore(&smd_lock, flags);
922 mutex_unlock(&smd_probe_lock);
923
924 /* notify SMD processors */
925 mb();
926 smd_fake_irq_handler(0);
927 notify_modem_smd();
928 notify_dsp_smd();
929 notify_dsps_smd();
930 notify_wcnss_smd();
931
932 SMD_DBG("%s: finished reset\n", __func__);
933}
934
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700935/* how many bytes are available for reading */
936static int smd_stream_read_avail(struct smd_channel *ch)
937{
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700938 return (ch->recv->head - ch->recv->tail) & ch->fifo_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700939}
940
941/* how many bytes we are free to write */
942static int smd_stream_write_avail(struct smd_channel *ch)
943{
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700944 return ch->fifo_mask -
945 ((ch->send->head - ch->send->tail) & ch->fifo_mask);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700946}
947
948static int smd_packet_read_avail(struct smd_channel *ch)
949{
950 if (ch->current_packet) {
951 int n = smd_stream_read_avail(ch);
952 if (n > ch->current_packet)
953 n = ch->current_packet;
954 return n;
955 } else {
956 return 0;
957 }
958}
959
960static int smd_packet_write_avail(struct smd_channel *ch)
961{
962 int n = smd_stream_write_avail(ch);
963 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
964}
965
966static int ch_is_open(struct smd_channel *ch)
967{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700968 return (ch->recv->state == SMD_SS_OPENED ||
969 ch->recv->state == SMD_SS_FLUSHING)
970 && (ch->send->state == SMD_SS_OPENED);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700971}
972
973/* provide a pointer and length to readable data in the fifo */
974static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
975{
976 unsigned head = ch->recv->head;
977 unsigned tail = ch->recv->tail;
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700978 *ptr = (void *) (ch->recv_data + tail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700979
980 if (tail <= head)
981 return head - tail;
982 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700983 return ch->fifo_size - tail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700984}
985
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700986static int read_intr_blocked(struct smd_channel *ch)
987{
988 return ch->recv->fBLOCKREADINTR;
989}
990
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700991/* advance the fifo read pointer after data from ch_read_buffer is consumed */
992static void ch_read_done(struct smd_channel *ch, unsigned count)
993{
994 BUG_ON(count > smd_stream_read_avail(ch));
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700995 ch->recv->tail = (ch->recv->tail + count) & ch->fifo_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700996 wmb();
Haley Teng7632fba2009-10-12 10:38:10 -0700997 ch->send->fTAIL = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700998}
999
1000/* basic read interface to ch_read_{buffer,done} used
Brian Swetland03e00cd2009-07-01 17:58:37 -07001001 * by smd_*_read() and update_packet_state()
1002 * will read-and-discard if the _data pointer is null
1003 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001004static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001005{
1006 void *ptr;
1007 unsigned n;
1008 unsigned char *data = _data;
1009 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001010 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001011
1012 while (len > 0) {
1013 n = ch_read_buffer(ch, &ptr);
1014 if (n == 0)
1015 break;
1016
1017 if (n > len)
1018 n = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001019 if (_data) {
1020 if (user_buf) {
1021 r = copy_to_user(data, ptr, n);
1022 if (r > 0) {
1023 pr_err("%s: "
1024 "copy_to_user could not copy "
1025 "%i bytes.\n",
1026 __func__,
1027 r);
1028 }
1029 } else
1030 memcpy(data, ptr, n);
1031 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001032
1033 data += n;
1034 len -= n;
1035 ch_read_done(ch, n);
1036 }
1037
1038 return orig_len - len;
1039}
1040
1041static void update_stream_state(struct smd_channel *ch)
1042{
1043 /* streams have no special state requiring updating */
1044}
1045
1046static void update_packet_state(struct smd_channel *ch)
1047{
1048 unsigned hdr[5];
1049 int r;
1050
1051 /* can't do anything if we're in the middle of a packet */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001052 while (ch->current_packet == 0) {
1053 /* discard 0 length packets if any */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001054
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001055 /* don't bother unless we can get the full header */
1056 if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
1057 return;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001058
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001059 r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
1060 BUG_ON(r != SMD_HEADER_SIZE);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001061
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001062 ch->current_packet = hdr[0];
1063 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001064}
1065
1066/* provide a pointer and length to next free space in the fifo */
1067static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
1068{
1069 unsigned head = ch->send->head;
1070 unsigned tail = ch->send->tail;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001071 *ptr = (void *) (ch->send_data + head);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001072
1073 if (head < tail) {
1074 return tail - head - 1;
1075 } else {
1076 if (tail == 0)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001077 return ch->fifo_size - head - 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001078 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001079 return ch->fifo_size - head;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001080 }
1081}
1082
1083/* advace the fifo write pointer after freespace
1084 * from ch_write_buffer is filled
1085 */
1086static void ch_write_done(struct smd_channel *ch, unsigned count)
1087{
1088 BUG_ON(count > smd_stream_write_avail(ch));
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001089 ch->send->head = (ch->send->head + count) & ch->fifo_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001090 wmb();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001091 ch->send->fHEAD = 1;
1092}
1093
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001094static void ch_set_state(struct smd_channel *ch, unsigned n)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001095{
1096 if (n == SMD_SS_OPENED) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001097 ch->send->fDSR = 1;
1098 ch->send->fCTS = 1;
1099 ch->send->fCD = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001100 } else {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001101 ch->send->fDSR = 0;
1102 ch->send->fCTS = 0;
1103 ch->send->fCD = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001104 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001105 ch->send->state = n;
1106 ch->send->fSTATE = 1;
1107 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001108}
1109
1110static void do_smd_probe(void)
1111{
1112 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
1113 if (shared->heap_info.free_offset != last_heap_free) {
1114 last_heap_free = shared->heap_info.free_offset;
1115 schedule_work(&probe_work);
1116 }
1117}
1118
1119static void smd_state_change(struct smd_channel *ch,
1120 unsigned last, unsigned next)
1121{
1122 ch->last_state = next;
1123
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001124 SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001125
1126 switch (next) {
1127 case SMD_SS_OPENING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001128 if (ch->send->state == SMD_SS_CLOSING ||
1129 ch->send->state == SMD_SS_CLOSED) {
1130 ch->recv->tail = 0;
1131 ch->send->head = 0;
1132 ch->send->fBLOCKREADINTR = 0;
1133 ch_set_state(ch, SMD_SS_OPENING);
1134 }
1135 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001136 case SMD_SS_OPENED:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001137 if (ch->send->state == SMD_SS_OPENING) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001138 ch_set_state(ch, SMD_SS_OPENED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001139 ch->notify(ch->priv, SMD_EVENT_OPEN);
1140 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001141 break;
1142 case SMD_SS_FLUSHING:
1143 case SMD_SS_RESET:
1144 /* we should force them to close? */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001145 break;
1146 case SMD_SS_CLOSED:
1147 if (ch->send->state == SMD_SS_OPENED) {
1148 ch_set_state(ch, SMD_SS_CLOSING);
1149 ch->current_packet = 0;
Eric Holmbergad4fa8d2011-11-11 16:55:13 -07001150 ch->pending_pkt_sz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001151 ch->notify(ch->priv, SMD_EVENT_CLOSE);
1152 }
1153 break;
1154 case SMD_SS_CLOSING:
1155 if (ch->send->state == SMD_SS_CLOSED) {
1156 list_move(&ch->ch_list,
1157 &smd_ch_to_close_list);
1158 queue_work(channel_close_wq,
1159 &finalize_channel_close_work);
1160 }
1161 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001162 }
1163}
1164
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001165static void handle_smd_irq_closing_list(void)
1166{
1167 unsigned long flags;
1168 struct smd_channel *ch;
1169 struct smd_channel *index;
1170 unsigned tmp;
1171
1172 spin_lock_irqsave(&smd_lock, flags);
1173 list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
1174 if (ch->recv->fSTATE)
1175 ch->recv->fSTATE = 0;
1176 tmp = ch->recv->state;
1177 if (tmp != ch->last_state)
1178 smd_state_change(ch, ch->last_state, tmp);
1179 }
1180 spin_unlock_irqrestore(&smd_lock, flags);
1181}
1182
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001183static void handle_smd_irq(struct list_head *list, void (*notify)(void))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001184{
1185 unsigned long flags;
1186 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001187 unsigned ch_flags;
1188 unsigned tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001189 unsigned char state_change;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001190
1191 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001192 list_for_each_entry(ch, list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001193 state_change = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001194 ch_flags = 0;
1195 if (ch_is_open(ch)) {
1196 if (ch->recv->fHEAD) {
1197 ch->recv->fHEAD = 0;
1198 ch_flags |= 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001199 }
1200 if (ch->recv->fTAIL) {
1201 ch->recv->fTAIL = 0;
1202 ch_flags |= 2;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001203 }
1204 if (ch->recv->fSTATE) {
1205 ch->recv->fSTATE = 0;
1206 ch_flags |= 4;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001207 }
1208 }
1209 tmp = ch->recv->state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001210 if (tmp != ch->last_state) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07001211 SMx_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
1212 ch->n, ch->name, ch->last_state, tmp);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001213 smd_state_change(ch, ch->last_state, tmp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001214 state_change = 1;
1215 }
Eric Holmberg65a7d432012-02-24 11:28:56 -07001216 if (ch_flags & 0x3) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001217 ch->update_state(ch);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001218 SMx_POWER_INFO("SMD ch%d '%s' Data event r%d/w%d\n",
1219 ch->n, ch->name,
1220 ch->read_avail(ch),
1221 ch->fifo_size - ch->write_avail(ch));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001222 ch->notify(ch->priv, SMD_EVENT_DATA);
1223 }
Eric Holmberg98c6c642012-02-24 11:29:35 -07001224 if (ch_flags & 0x4 && !state_change) {
1225 SMx_POWER_INFO("SMD ch%d '%s' State update\n",
1226 ch->n, ch->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001227 ch->notify(ch->priv, SMD_EVENT_STATUS);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001228 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001229 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001230 spin_unlock_irqrestore(&smd_lock, flags);
1231 do_smd_probe();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001232}
1233
Brian Swetland37521a32009-07-01 18:30:47 -07001234static irqreturn_t smd_modem_irq_handler(int irq, void *data)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001235{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001236 SMx_POWER_INFO("SMD Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001237 ++interrupt_stats[SMD_MODEM].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001238 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001239 handle_smd_irq_closing_list();
Brian Swetland37521a32009-07-01 18:30:47 -07001240 return IRQ_HANDLED;
1241}
1242
1243static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
1244{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001245 SMx_POWER_INFO("SMD Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001246 ++interrupt_stats[SMD_Q6].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001247 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001248 handle_smd_irq_closing_list();
1249 return IRQ_HANDLED;
1250}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001251
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001252static irqreturn_t smd_dsps_irq_handler(int irq, void *data)
1253{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001254 SMx_POWER_INFO("SMD Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001255 ++interrupt_stats[SMD_DSPS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001256 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1257 handle_smd_irq_closing_list();
1258 return IRQ_HANDLED;
1259}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001260
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001261static irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
1262{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001263 SMx_POWER_INFO("SMD Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001264 ++interrupt_stats[SMD_WCNSS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001265 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1266 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001267 return IRQ_HANDLED;
1268}
1269
1270static void smd_fake_irq_handler(unsigned long arg)
1271{
Brian Swetland37521a32009-07-01 18:30:47 -07001272 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
1273 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001274 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1275 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1276 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001277}
1278
1279static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
1280
Brian Swetland37521a32009-07-01 18:30:47 -07001281static inline int smd_need_int(struct smd_channel *ch)
1282{
1283 if (ch_is_open(ch)) {
1284 if (ch->recv->fHEAD || ch->recv->fTAIL || ch->recv->fSTATE)
1285 return 1;
1286 if (ch->recv->state != ch->last_state)
1287 return 1;
1288 }
1289 return 0;
1290}
1291
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001292void smd_sleep_exit(void)
1293{
1294 unsigned long flags;
1295 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001296 int need_int = 0;
1297
1298 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland37521a32009-07-01 18:30:47 -07001299 list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
1300 if (smd_need_int(ch)) {
1301 need_int = 1;
1302 break;
1303 }
1304 }
1305 list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
1306 if (smd_need_int(ch)) {
1307 need_int = 1;
1308 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001309 }
1310 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001311 list_for_each_entry(ch, &smd_ch_list_dsps, ch_list) {
1312 if (smd_need_int(ch)) {
1313 need_int = 1;
1314 break;
1315 }
1316 }
1317 list_for_each_entry(ch, &smd_ch_list_wcnss, ch_list) {
1318 if (smd_need_int(ch)) {
1319 need_int = 1;
1320 break;
1321 }
1322 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001323 spin_unlock_irqrestore(&smd_lock, flags);
1324 do_smd_probe();
Brian Swetland37521a32009-07-01 18:30:47 -07001325
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001326 if (need_int) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001327 SMD_DBG("smd_sleep_exit need interrupt\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001328 tasklet_schedule(&smd_fake_irq_tasklet);
1329 }
1330}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001331EXPORT_SYMBOL(smd_sleep_exit);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001332
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001333static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001334{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001335 if (SMD_XFER_TYPE(alloc_elm->type) == 1)
1336 return 0;
1337 else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001338 return 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001339
1340 /* for cases where xfer type is 0 */
1341 if (!strncmp(alloc_elm->name, "DAL", 3))
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001342 return 0;
1343
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001344 /* for cases where xfer type is 0 */
1345 if (!strncmp(alloc_elm->name, "RPCCALL_QDSP", 12))
1346 return 0;
1347
1348 if (alloc_elm->cid > 4 || alloc_elm->cid == 1)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001349 return 1;
1350 else
1351 return 0;
1352}
1353
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001354static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
1355 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001356{
1357 void *ptr;
1358 const unsigned char *buf = _data;
1359 unsigned xfer;
1360 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001361 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001362
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001363 SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001364 if (len < 0)
1365 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001366 else if (len == 0)
1367 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001368
1369 while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
Eric Holmberg7a717872012-02-03 11:58:04 -07001370 if (!ch_is_open(ch)) {
1371 len = orig_len;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001372 break;
Eric Holmberg7a717872012-02-03 11:58:04 -07001373 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001374 if (xfer > len)
1375 xfer = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001376 if (user_buf) {
1377 r = copy_from_user(ptr, buf, xfer);
1378 if (r > 0) {
1379 pr_err("%s: "
1380 "copy_from_user could not copy %i "
1381 "bytes.\n",
1382 __func__,
1383 r);
1384 }
1385 } else
1386 memcpy(ptr, buf, xfer);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001387 ch_write_done(ch, xfer);
1388 len -= xfer;
1389 buf += xfer;
1390 if (len == 0)
1391 break;
1392 }
1393
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001394 if (orig_len - len)
1395 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001396
1397 return orig_len - len;
1398}
1399
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001400static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
1401 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001402{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001403 int ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001404 unsigned hdr[5];
1405
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001406 SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001407 if (len < 0)
1408 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001409 else if (len == 0)
1410 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001411
1412 if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
1413 return -ENOMEM;
1414
1415 hdr[0] = len;
1416 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1417
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001418
1419 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1420 if (ret < 0 || ret != sizeof(hdr)) {
1421 SMD_DBG("%s failed to write pkt header: "
1422 "%d returned\n", __func__, ret);
1423 return -1;
1424 }
1425
1426
1427 ret = smd_stream_write(ch, _data, len, user_buf);
1428 if (ret < 0 || ret != len) {
1429 SMD_DBG("%s failed to write pkt data: "
1430 "%d returned\n", __func__, ret);
1431 return ret;
1432 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001433
1434 return len;
1435}
1436
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001437static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001438{
1439 int r;
1440
1441 if (len < 0)
1442 return -EINVAL;
1443
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001444 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001445 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001446 if (!read_intr_blocked(ch))
1447 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001448
1449 return r;
1450}
1451
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001452static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001453{
1454 unsigned long flags;
1455 int r;
1456
1457 if (len < 0)
1458 return -EINVAL;
1459
1460 if (len > ch->current_packet)
1461 len = ch->current_packet;
1462
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001463 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001464 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001465 if (!read_intr_blocked(ch))
1466 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001467
1468 spin_lock_irqsave(&smd_lock, flags);
1469 ch->current_packet -= r;
1470 update_packet_state(ch);
1471 spin_unlock_irqrestore(&smd_lock, flags);
1472
1473 return r;
1474}
1475
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001476static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
1477 int user_buf)
1478{
1479 int r;
1480
1481 if (len < 0)
1482 return -EINVAL;
1483
1484 if (len > ch->current_packet)
1485 len = ch->current_packet;
1486
1487 r = ch_read(ch, data, len, user_buf);
1488 if (r > 0)
1489 if (!read_intr_blocked(ch))
1490 ch->notify_other_cpu();
1491
1492 ch->current_packet -= r;
1493 update_packet_state(ch);
1494
1495 return r;
1496}
1497
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301498#if (defined(CONFIG_MSM_SMD_PKG4) || defined(CONFIG_MSM_SMD_PKG3))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001499static int smd_alloc_v2(struct smd_channel *ch)
1500{
1501 struct smd_shared_v2 *shared2;
1502 void *buffer;
1503 unsigned buffer_sz;
1504
1505 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n, sizeof(*shared2));
1506 if (!shared2) {
1507 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301508 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001509 }
1510 buffer = smem_get_entry(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
1511 if (!buffer) {
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301512 SMD_INFO("smem_get_entry failed\n");
1513 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001514 }
1515
1516 /* buffer must be a power-of-two size */
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301517 if (buffer_sz & (buffer_sz - 1)) {
1518 SMD_INFO("Buffer size: %u not power of two\n", buffer_sz);
1519 return -EINVAL;
1520 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001521 buffer_sz /= 2;
1522 ch->send = &shared2->ch0;
1523 ch->recv = &shared2->ch1;
1524 ch->send_data = buffer;
1525 ch->recv_data = buffer + buffer_sz;
1526 ch->fifo_size = buffer_sz;
1527 return 0;
1528}
1529
1530static int smd_alloc_v1(struct smd_channel *ch)
1531{
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301532 return -EINVAL;
1533}
1534
1535#else /* define v1 for older targets */
1536static int smd_alloc_v2(struct smd_channel *ch)
1537{
1538 return -EINVAL;
1539}
1540
1541static int smd_alloc_v1(struct smd_channel *ch)
1542{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001543 struct smd_shared_v1 *shared1;
1544 shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
1545 if (!shared1) {
1546 pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301547 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001548 }
1549 ch->send = &shared1->ch0;
1550 ch->recv = &shared1->ch1;
1551 ch->send_data = shared1->data0;
1552 ch->recv_data = shared1->data1;
1553 ch->fifo_size = SMD_BUF_SIZE;
1554 return 0;
1555}
1556
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301557#endif
1558
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001559static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001560{
1561 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001562
1563 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1564 if (ch == 0) {
1565 pr_err("smd_alloc_channel() out of memory\n");
Brian Swetland34f719b2009-10-30 16:22:05 -07001566 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001567 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001568 ch->n = alloc_elm->cid;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001569
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001570 if (smd_alloc_v2(ch) && smd_alloc_v1(ch)) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001571 kfree(ch);
Brian Swetland34f719b2009-10-30 16:22:05 -07001572 return -1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001573 }
1574
1575 ch->fifo_mask = ch->fifo_size - 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001576 ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001577
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001578 if (ch->type == SMD_APPS_MODEM)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001579 ch->notify_other_cpu = notify_modem_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001580 else if (ch->type == SMD_APPS_QDSP)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001581 ch->notify_other_cpu = notify_dsp_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001582 else if (ch->type == SMD_APPS_DSPS)
1583 ch->notify_other_cpu = notify_dsps_smd;
1584 else
1585 ch->notify_other_cpu = notify_wcnss_smd;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001586
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001587 if (smd_is_packet(alloc_elm)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001588 ch->read = smd_packet_read;
1589 ch->write = smd_packet_write;
1590 ch->read_avail = smd_packet_read_avail;
1591 ch->write_avail = smd_packet_write_avail;
1592 ch->update_state = update_packet_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001593 ch->read_from_cb = smd_packet_read_from_cb;
1594 ch->is_pkt_ch = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001595 } else {
1596 ch->read = smd_stream_read;
1597 ch->write = smd_stream_write;
1598 ch->read_avail = smd_stream_read_avail;
1599 ch->write_avail = smd_stream_write_avail;
1600 ch->update_state = update_stream_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001601 ch->read_from_cb = smd_stream_read;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001602 }
1603
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001604 memcpy(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN);
1605 ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001606
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001607 ch->pdev.name = ch->name;
1608 ch->pdev.id = ch->type;
1609
1610 SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
1611 ch->name, ch->n);
1612
1613 mutex_lock(&smd_creation_mutex);
1614 list_add(&ch->ch_list, &smd_ch_closed_list);
1615 mutex_unlock(&smd_creation_mutex);
1616
1617 platform_device_register(&ch->pdev);
1618 if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
1619 /* create a platform driver to be used by smd_tty driver
1620 * so that it can access the loopback port
1621 */
1622 loopback_tty_pdev.id = ch->type;
1623 platform_device_register(&loopback_tty_pdev);
1624 }
1625 return 0;
1626}
1627
1628static inline void notify_loopback_smd(void)
1629{
1630 unsigned long flags;
1631 struct smd_channel *ch;
1632
1633 spin_lock_irqsave(&smd_lock, flags);
1634 list_for_each_entry(ch, &smd_ch_list_loopback, ch_list) {
1635 ch->notify(ch->priv, SMD_EVENT_DATA);
1636 }
1637 spin_unlock_irqrestore(&smd_lock, flags);
1638}
1639
1640static int smd_alloc_loopback_channel(void)
1641{
1642 static struct smd_half_channel smd_loopback_ctl;
1643 static char smd_loopback_data[SMD_BUF_SIZE];
1644 struct smd_channel *ch;
1645
1646 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1647 if (ch == 0) {
1648 pr_err("%s: out of memory\n", __func__);
1649 return -1;
1650 }
1651 ch->n = SMD_LOOPBACK_CID;
1652
1653 ch->send = &smd_loopback_ctl;
1654 ch->recv = &smd_loopback_ctl;
1655 ch->send_data = smd_loopback_data;
1656 ch->recv_data = smd_loopback_data;
1657 ch->fifo_size = SMD_BUF_SIZE;
1658
1659 ch->fifo_mask = ch->fifo_size - 1;
1660 ch->type = SMD_LOOPBACK_TYPE;
1661 ch->notify_other_cpu = notify_loopback_smd;
1662
1663 ch->read = smd_stream_read;
1664 ch->write = smd_stream_write;
1665 ch->read_avail = smd_stream_read_avail;
1666 ch->write_avail = smd_stream_write_avail;
1667 ch->update_state = update_stream_state;
1668 ch->read_from_cb = smd_stream_read;
1669
1670 memset(ch->name, 0, 20);
1671 memcpy(ch->name, "local_loopback", 14);
1672
1673 ch->pdev.name = ch->name;
1674 ch->pdev.id = ch->type;
1675
1676 SMD_INFO("%s: '%s' cid=%d\n", __func__, ch->name, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001677
1678 mutex_lock(&smd_creation_mutex);
1679 list_add(&ch->ch_list, &smd_ch_closed_list);
1680 mutex_unlock(&smd_creation_mutex);
1681
1682 platform_device_register(&ch->pdev);
Brian Swetland34f719b2009-10-30 16:22:05 -07001683 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001684}
1685
1686static void do_nothing_notify(void *priv, unsigned flags)
1687{
1688}
1689
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001690static void finalize_channel_close_fn(struct work_struct *work)
1691{
1692 unsigned long flags;
1693 struct smd_channel *ch;
1694 struct smd_channel *index;
1695
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001696 mutex_lock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001697 spin_lock_irqsave(&smd_lock, flags);
1698 list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
1699 list_del(&ch->ch_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001700 list_add(&ch->ch_list, &smd_ch_closed_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001701 ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
1702 ch->notify = do_nothing_notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001703 }
1704 spin_unlock_irqrestore(&smd_lock, flags);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001705 mutex_unlock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001706}
1707
1708struct smd_channel *smd_get_channel(const char *name, uint32_t type)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001709{
1710 struct smd_channel *ch;
1711
1712 mutex_lock(&smd_creation_mutex);
1713 list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001714 if (!strcmp(name, ch->name) &&
1715 (type == ch->type)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001716 list_del(&ch->ch_list);
1717 mutex_unlock(&smd_creation_mutex);
1718 return ch;
1719 }
1720 }
1721 mutex_unlock(&smd_creation_mutex);
1722
1723 return NULL;
1724}
1725
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001726int smd_named_open_on_edge(const char *name, uint32_t edge,
1727 smd_channel_t **_ch,
1728 void *priv, void (*notify)(void *, unsigned))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001729{
1730 struct smd_channel *ch;
1731 unsigned long flags;
1732
1733 if (smd_initialized == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001734 SMD_INFO("smd_open() before smd_init()\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001735 return -ENODEV;
1736 }
1737
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001738 SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
1739
1740 ch = smd_get_channel(name, edge);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001741 if (!ch) {
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001742 /* check closing list for port */
1743 spin_lock_irqsave(&smd_lock, flags);
1744 list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
1745 if (!strncmp(name, ch->name, 20) &&
1746 (edge == ch->type)) {
1747 /* channel exists, but is being closed */
1748 spin_unlock_irqrestore(&smd_lock, flags);
1749 return -EAGAIN;
1750 }
1751 }
1752
1753 /* check closing workqueue list for port */
1754 list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
1755 if (!strncmp(name, ch->name, 20) &&
1756 (edge == ch->type)) {
1757 /* channel exists, but is being closed */
1758 spin_unlock_irqrestore(&smd_lock, flags);
1759 return -EAGAIN;
1760 }
1761 }
1762 spin_unlock_irqrestore(&smd_lock, flags);
1763
1764 /* one final check to handle closing->closed race condition */
1765 ch = smd_get_channel(name, edge);
1766 if (!ch)
1767 return -ENODEV;
1768 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001769
1770 if (notify == 0)
1771 notify = do_nothing_notify;
1772
1773 ch->notify = notify;
1774 ch->current_packet = 0;
1775 ch->last_state = SMD_SS_CLOSED;
1776 ch->priv = priv;
1777
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001778 if (edge == SMD_LOOPBACK_TYPE) {
1779 ch->last_state = SMD_SS_OPENED;
1780 ch->send->state = SMD_SS_OPENED;
1781 ch->send->fDSR = 1;
1782 ch->send->fCTS = 1;
1783 ch->send->fCD = 1;
1784 }
1785
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001786 *_ch = ch;
1787
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001788 SMD_DBG("smd_open: opening '%s'\n", ch->name);
1789
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001790 spin_lock_irqsave(&smd_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001791 if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_MODEM)
Brian Swetland37521a32009-07-01 18:30:47 -07001792 list_add(&ch->ch_list, &smd_ch_list_modem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001793 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_QDSP)
Brian Swetland37521a32009-07-01 18:30:47 -07001794 list_add(&ch->ch_list, &smd_ch_list_dsp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001795 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_DSPS)
1796 list_add(&ch->ch_list, &smd_ch_list_dsps);
1797 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_WCNSS)
1798 list_add(&ch->ch_list, &smd_ch_list_wcnss);
1799 else
1800 list_add(&ch->ch_list, &smd_ch_list_loopback);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001801
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001802 SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
1803
1804 if (edge != SMD_LOOPBACK_TYPE)
1805 smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
1806
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001807 spin_unlock_irqrestore(&smd_lock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001808
1809 return 0;
1810}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001811EXPORT_SYMBOL(smd_named_open_on_edge);
1812
1813
1814int smd_open(const char *name, smd_channel_t **_ch,
1815 void *priv, void (*notify)(void *, unsigned))
1816{
1817 return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv,
1818 notify);
1819}
1820EXPORT_SYMBOL(smd_open);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001821
1822int smd_close(smd_channel_t *ch)
1823{
1824 unsigned long flags;
1825
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001826 if (ch == 0)
1827 return -1;
1828
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001829 SMD_INFO("smd_close(%s)\n", ch->name);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001830
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001831 spin_lock_irqsave(&smd_lock, flags);
1832 list_del(&ch->ch_list);
1833 if (ch->n == SMD_LOOPBACK_CID) {
1834 ch->send->fDSR = 0;
1835 ch->send->fCTS = 0;
1836 ch->send->fCD = 0;
1837 ch->send->state = SMD_SS_CLOSED;
1838 } else
1839 ch_set_state(ch, SMD_SS_CLOSED);
1840
1841 if (ch->recv->state == SMD_SS_OPENED) {
1842 list_add(&ch->ch_list, &smd_ch_closing_list);
1843 spin_unlock_irqrestore(&smd_lock, flags);
1844 } else {
1845 spin_unlock_irqrestore(&smd_lock, flags);
1846 ch->notify = do_nothing_notify;
1847 mutex_lock(&smd_creation_mutex);
1848 list_add(&ch->ch_list, &smd_ch_closed_list);
1849 mutex_unlock(&smd_creation_mutex);
1850 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001851
1852 return 0;
1853}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001854EXPORT_SYMBOL(smd_close);
1855
1856int smd_write_start(smd_channel_t *ch, int len)
1857{
1858 int ret;
1859 unsigned hdr[5];
1860
1861 if (!ch) {
1862 pr_err("%s: Invalid channel specified\n", __func__);
1863 return -ENODEV;
1864 }
1865 if (!ch->is_pkt_ch) {
1866 pr_err("%s: non-packet channel specified\n", __func__);
1867 return -EACCES;
1868 }
1869 if (len < 1) {
1870 pr_err("%s: invalid length: %d\n", __func__, len);
1871 return -EINVAL;
1872 }
1873
1874 if (ch->pending_pkt_sz) {
1875 pr_err("%s: packet of size: %d in progress\n", __func__,
1876 ch->pending_pkt_sz);
1877 return -EBUSY;
1878 }
1879 ch->pending_pkt_sz = len;
1880
1881 if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
1882 ch->pending_pkt_sz = 0;
1883 SMD_DBG("%s: no space to write packet header\n", __func__);
1884 return -EAGAIN;
1885 }
1886
1887 hdr[0] = len;
1888 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1889
1890
1891 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1892 if (ret < 0 || ret != sizeof(hdr)) {
1893 ch->pending_pkt_sz = 0;
1894 pr_err("%s: packet header failed to write\n", __func__);
1895 return -EPERM;
1896 }
1897 return 0;
1898}
1899EXPORT_SYMBOL(smd_write_start);
1900
1901int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
1902{
1903 int bytes_written;
1904
1905 if (!ch) {
1906 pr_err("%s: Invalid channel specified\n", __func__);
1907 return -ENODEV;
1908 }
1909 if (len < 1) {
1910 pr_err("%s: invalid length: %d\n", __func__, len);
1911 return -EINVAL;
1912 }
1913
1914 if (!ch->pending_pkt_sz) {
1915 pr_err("%s: no transaction in progress\n", __func__);
1916 return -ENOEXEC;
1917 }
1918 if (ch->pending_pkt_sz - len < 0) {
1919 pr_err("%s: segment of size: %d will make packet go over "
1920 "length\n", __func__, len);
1921 return -EINVAL;
1922 }
1923
1924 bytes_written = smd_stream_write(ch, data, len, user_buf);
1925
1926 ch->pending_pkt_sz -= bytes_written;
1927
1928 return bytes_written;
1929}
1930EXPORT_SYMBOL(smd_write_segment);
1931
1932int smd_write_end(smd_channel_t *ch)
1933{
1934
1935 if (!ch) {
1936 pr_err("%s: Invalid channel specified\n", __func__);
1937 return -ENODEV;
1938 }
1939 if (ch->pending_pkt_sz) {
1940 pr_err("%s: current packet not completely written\n", __func__);
1941 return -E2BIG;
1942 }
1943
1944 return 0;
1945}
1946EXPORT_SYMBOL(smd_write_end);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001947
1948int smd_read(smd_channel_t *ch, void *data, int len)
1949{
Jack Pham1b236d12012-03-19 15:27:18 -07001950 if (!ch) {
1951 pr_err("%s: Invalid channel specified\n", __func__);
1952 return -ENODEV;
1953 }
1954
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001955 return ch->read(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001956}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001957EXPORT_SYMBOL(smd_read);
1958
1959int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
1960{
Jack Pham1b236d12012-03-19 15:27:18 -07001961 if (!ch) {
1962 pr_err("%s: Invalid channel specified\n", __func__);
1963 return -ENODEV;
1964 }
1965
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001966 return ch->read(ch, data, len, 1);
1967}
1968EXPORT_SYMBOL(smd_read_user_buffer);
1969
1970int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
1971{
Jack Pham1b236d12012-03-19 15:27:18 -07001972 if (!ch) {
1973 pr_err("%s: Invalid channel specified\n", __func__);
1974 return -ENODEV;
1975 }
1976
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001977 return ch->read_from_cb(ch, data, len, 0);
1978}
1979EXPORT_SYMBOL(smd_read_from_cb);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001980
1981int smd_write(smd_channel_t *ch, const void *data, int len)
1982{
Jack Pham1b236d12012-03-19 15:27:18 -07001983 if (!ch) {
1984 pr_err("%s: Invalid channel specified\n", __func__);
1985 return -ENODEV;
1986 }
1987
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001988 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001989}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001990EXPORT_SYMBOL(smd_write);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001991
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001992int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
Brian Swetland636eb9c2009-12-07 15:28:08 -08001993{
Jack Pham1b236d12012-03-19 15:27:18 -07001994 if (!ch) {
1995 pr_err("%s: Invalid channel specified\n", __func__);
1996 return -ENODEV;
1997 }
1998
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001999 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002000}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002001EXPORT_SYMBOL(smd_write_user_buffer);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002002
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002003int smd_read_avail(smd_channel_t *ch)
2004{
Jack Pham1b236d12012-03-19 15:27:18 -07002005 if (!ch) {
2006 pr_err("%s: Invalid channel specified\n", __func__);
2007 return -ENODEV;
2008 }
2009
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002010 return ch->read_avail(ch);
2011}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002012EXPORT_SYMBOL(smd_read_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002013
2014int smd_write_avail(smd_channel_t *ch)
2015{
Jack Pham1b236d12012-03-19 15:27:18 -07002016 if (!ch) {
2017 pr_err("%s: Invalid channel specified\n", __func__);
2018 return -ENODEV;
2019 }
2020
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002021 return ch->write_avail(ch);
2022}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002023EXPORT_SYMBOL(smd_write_avail);
2024
2025void smd_enable_read_intr(smd_channel_t *ch)
2026{
2027 if (ch)
2028 ch->send->fBLOCKREADINTR = 0;
2029}
2030EXPORT_SYMBOL(smd_enable_read_intr);
2031
2032void smd_disable_read_intr(smd_channel_t *ch)
2033{
2034 if (ch)
2035 ch->send->fBLOCKREADINTR = 1;
2036}
2037EXPORT_SYMBOL(smd_disable_read_intr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002038
2039int smd_wait_until_readable(smd_channel_t *ch, int bytes)
2040{
2041 return -1;
2042}
2043
2044int smd_wait_until_writable(smd_channel_t *ch, int bytes)
2045{
2046 return -1;
2047}
2048
2049int smd_cur_packet_size(smd_channel_t *ch)
2050{
Jack Pham1b236d12012-03-19 15:27:18 -07002051 if (!ch) {
2052 pr_err("%s: Invalid channel specified\n", __func__);
2053 return -ENODEV;
2054 }
2055
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002056 return ch->current_packet;
2057}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002058EXPORT_SYMBOL(smd_cur_packet_size);
2059
2060int smd_tiocmget(smd_channel_t *ch)
2061{
Jack Pham1b236d12012-03-19 15:27:18 -07002062 if (!ch) {
2063 pr_err("%s: Invalid channel specified\n", __func__);
2064 return -ENODEV;
2065 }
2066
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002067 return (ch->recv->fDSR ? TIOCM_DSR : 0) |
2068 (ch->recv->fCTS ? TIOCM_CTS : 0) |
2069 (ch->recv->fCD ? TIOCM_CD : 0) |
2070 (ch->recv->fRI ? TIOCM_RI : 0) |
2071 (ch->send->fCTS ? TIOCM_RTS : 0) |
2072 (ch->send->fDSR ? TIOCM_DTR : 0);
2073}
2074EXPORT_SYMBOL(smd_tiocmget);
2075
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002076/* this api will be called while holding smd_lock */
2077int
2078smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002079{
Jack Pham1b236d12012-03-19 15:27:18 -07002080 if (!ch) {
2081 pr_err("%s: Invalid channel specified\n", __func__);
2082 return -ENODEV;
2083 }
2084
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002085 if (set & TIOCM_DTR)
2086 ch->send->fDSR = 1;
2087
2088 if (set & TIOCM_RTS)
2089 ch->send->fCTS = 1;
2090
2091 if (clear & TIOCM_DTR)
2092 ch->send->fDSR = 0;
2093
2094 if (clear & TIOCM_RTS)
2095 ch->send->fCTS = 0;
2096
2097 ch->send->fSTATE = 1;
2098 barrier();
2099 ch->notify_other_cpu();
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002100
2101 return 0;
2102}
2103EXPORT_SYMBOL(smd_tiocmset_from_cb);
2104
2105int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
2106{
2107 unsigned long flags;
2108
Jack Pham1b236d12012-03-19 15:27:18 -07002109 if (!ch) {
2110 pr_err("%s: Invalid channel specified\n", __func__);
2111 return -ENODEV;
2112 }
2113
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002114 spin_lock_irqsave(&smd_lock, flags);
2115 smd_tiocmset_from_cb(ch, set, clear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002116 spin_unlock_irqrestore(&smd_lock, flags);
2117
2118 return 0;
2119}
2120EXPORT_SYMBOL(smd_tiocmset);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002121
2122
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002123/* -------------------------------------------------------------------------- */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002124
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002125/* smem_alloc returns the pointer to smem item if it is already allocated.
2126 * Otherwise, it returns NULL.
2127 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002128void *smem_alloc(unsigned id, unsigned size)
2129{
2130 return smem_find(id, size);
2131}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002132EXPORT_SYMBOL(smem_alloc);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002133
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002134/* smem_alloc2 returns the pointer to smem item. If it is not allocated,
2135 * it allocates it and then returns the pointer to it.
2136 */
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302137void *smem_alloc2(unsigned id, unsigned size_in)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002138{
2139 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2140 struct smem_heap_entry *toc = shared->heap_toc;
2141 unsigned long flags;
2142 void *ret = NULL;
2143
2144 if (!shared->heap_info.initialized) {
2145 pr_err("%s: smem heap info not initialized\n", __func__);
2146 return NULL;
2147 }
2148
2149 if (id >= SMEM_NUM_ITEMS)
2150 return NULL;
2151
2152 size_in = ALIGN(size_in, 8);
2153 remote_spin_lock_irqsave(&remote_spinlock, flags);
2154 if (toc[id].allocated) {
2155 SMD_DBG("%s: %u already allocated\n", __func__, id);
2156 if (size_in != toc[id].size)
2157 pr_err("%s: wrong size %u (expected %u)\n",
2158 __func__, toc[id].size, size_in);
2159 else
2160 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2161 } else if (id > SMEM_FIXED_ITEM_LAST) {
2162 SMD_DBG("%s: allocating %u\n", __func__, id);
2163 if (shared->heap_info.heap_remaining >= size_in) {
2164 toc[id].offset = shared->heap_info.free_offset;
2165 toc[id].size = size_in;
2166 wmb();
2167 toc[id].allocated = 1;
2168
2169 shared->heap_info.free_offset += size_in;
2170 shared->heap_info.heap_remaining -= size_in;
2171 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2172 } else
2173 pr_err("%s: not enough memory %u (required %u)\n",
2174 __func__, shared->heap_info.heap_remaining,
2175 size_in);
2176 }
2177 wmb();
2178 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
2179 return ret;
2180}
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302181EXPORT_SYMBOL(smem_alloc2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002182
2183void *smem_get_entry(unsigned id, unsigned *size)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002184{
2185 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2186 struct smem_heap_entry *toc = shared->heap_toc;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302187 int use_spinlocks = spinlocks_initialized;
2188 void *ret = 0;
2189 unsigned long flags = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002190
2191 if (id >= SMEM_NUM_ITEMS)
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302192 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002193
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302194 if (use_spinlocks)
2195 remote_spin_lock_irqsave(&remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002196 /* toc is in device memory and cannot be speculatively accessed */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002197 if (toc[id].allocated) {
2198 *size = toc[id].size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002199 barrier();
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302200 ret = (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002201 } else {
2202 *size = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002203 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302204 if (use_spinlocks)
2205 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002206
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302207 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002208}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002209EXPORT_SYMBOL(smem_get_entry);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002210
2211void *smem_find(unsigned id, unsigned size_in)
2212{
2213 unsigned size;
2214 void *ptr;
2215
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002216 ptr = smem_get_entry(id, &size);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002217 if (!ptr)
2218 return 0;
2219
2220 size_in = ALIGN(size_in, 8);
2221 if (size_in != size) {
2222 pr_err("smem_find(%d, %d): wrong size %d\n",
2223 id, size_in, size);
2224 return 0;
2225 }
2226
2227 return ptr;
2228}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002229EXPORT_SYMBOL(smem_find);
2230
2231static int smsm_cb_init(void)
2232{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002233 struct smsm_state_info *state_info;
2234 int n;
2235 int ret = 0;
2236
2237 smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
2238 GFP_KERNEL);
2239
2240 if (!smsm_states) {
2241 pr_err("%s: SMSM init failed\n", __func__);
2242 return -ENOMEM;
2243 }
2244
Eric Holmbergc8002902011-09-16 13:55:57 -06002245 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002246 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2247 state_info = &smsm_states[n];
2248 state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
2249 INIT_LIST_HEAD(&state_info->callbacks);
2250 }
Eric Holmbergc8002902011-09-16 13:55:57 -06002251 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002252
2253 return ret;
2254}
2255
2256static int smsm_init(void)
2257{
2258 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2259 int i;
2260 struct smsm_size_info_type *smsm_size_info;
2261
2262 i = remote_spin_lock_init(&remote_spinlock, SMEM_SPINLOCK_SMEM_ALLOC);
2263 if (i) {
2264 pr_err("%s: remote spinlock init failed %d\n", __func__, i);
2265 return i;
2266 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302267 spinlocks_initialized = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002268
2269 smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO,
2270 sizeof(struct smsm_size_info_type));
2271 if (smsm_size_info) {
2272 SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
2273 SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
2274 }
2275
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002276 i = kfifo_alloc(&smsm_snapshot_fifo,
2277 sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
2278 GFP_KERNEL);
2279 if (i) {
2280 pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
2281 return i;
2282 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002283 wake_lock_init(&smsm_snapshot_wakelock, WAKE_LOCK_SUSPEND,
2284 "smsm_snapshot");
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002285
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002286 if (!smsm_info.state) {
2287 smsm_info.state = smem_alloc2(ID_SHARED_STATE,
2288 SMSM_NUM_ENTRIES *
2289 sizeof(uint32_t));
2290
2291 if (smsm_info.state) {
2292 __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2293 if ((shared->version[VERSION_MODEM] >> 16) >= 0xB)
2294 __raw_writel(0, \
2295 SMSM_STATE_ADDR(SMSM_APPS_DEM_I));
2296 }
2297 }
2298
2299 if (!smsm_info.intr_mask) {
2300 smsm_info.intr_mask = smem_alloc2(SMEM_SMSM_CPU_INTR_MASK,
2301 SMSM_NUM_ENTRIES *
2302 SMSM_NUM_HOSTS *
2303 sizeof(uint32_t));
2304
2305 if (smsm_info.intr_mask)
2306 for (i = 0; i < SMSM_NUM_ENTRIES; i++)
2307 __raw_writel(0xffffffff,
2308 SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
2309 }
2310
2311 if (!smsm_info.intr_mux)
2312 smsm_info.intr_mux = smem_alloc2(SMEM_SMD_SMSM_INTR_MUX,
2313 SMSM_NUM_INTR_MUX *
2314 sizeof(uint32_t));
2315
2316 i = smsm_cb_init();
2317 if (i)
2318 return i;
2319
2320 wmb();
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06002321 smsm_driver_state_notify(SMSM_INIT, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002322 return 0;
2323}
2324
2325void smsm_reset_modem(unsigned mode)
2326{
2327 if (mode == SMSM_SYSTEM_DOWNLOAD) {
2328 mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
2329 } else if (mode == SMSM_MODEM_WAIT) {
2330 mode = SMSM_RESET | SMSM_MODEM_WAIT;
2331 } else { /* reset_mode is SMSM_RESET or default */
2332 mode = SMSM_RESET;
2333 }
2334
2335 smsm_change_state(SMSM_APPS_STATE, mode, mode);
2336}
2337EXPORT_SYMBOL(smsm_reset_modem);
2338
2339void smsm_reset_modem_cont(void)
2340{
2341 unsigned long flags;
2342 uint32_t state;
2343
2344 if (!smsm_info.state)
2345 return;
2346
2347 spin_lock_irqsave(&smem_lock, flags);
2348 state = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)) \
2349 & ~SMSM_MODEM_WAIT;
2350 __raw_writel(state, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2351 wmb();
2352 spin_unlock_irqrestore(&smem_lock, flags);
2353}
2354EXPORT_SYMBOL(smsm_reset_modem_cont);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002355
Eric Holmbergda31d042012-03-28 14:01:02 -06002356static void smsm_cb_snapshot(uint32_t use_wakelock)
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002357{
2358 int n;
2359 uint32_t new_state;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002360 unsigned long flags;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002361 int ret;
2362
2363 ret = kfifo_avail(&smsm_snapshot_fifo);
Eric Holmbergda31d042012-03-28 14:01:02 -06002364 if (ret < SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002365 pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
2366 return;
2367 }
2368
Eric Holmberg96b55f62012-04-03 19:10:46 -06002369 /*
2370 * To avoid a race condition with notify_smsm_cb_clients_worker, the
2371 * following sequence must be followed:
2372 * 1) increment snapshot count
2373 * 2) insert data into FIFO
2374 *
2375 * Potentially in parallel, the worker:
2376 * a) verifies >= 1 snapshots are in FIFO
2377 * b) processes snapshot
2378 * c) decrements reference count
2379 *
2380 * This order ensures that 1 will always occur before abc.
2381 */
Eric Holmbergda31d042012-03-28 14:01:02 -06002382 if (use_wakelock) {
2383 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2384 if (smsm_snapshot_count == 0) {
2385 SMx_POWER_INFO("SMSM snapshot wake lock\n");
2386 wake_lock(&smsm_snapshot_wakelock);
2387 }
2388 ++smsm_snapshot_count;
2389 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2390 }
Eric Holmberg96b55f62012-04-03 19:10:46 -06002391
2392 /* queue state entries */
2393 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2394 new_state = __raw_readl(SMSM_STATE_ADDR(n));
2395
2396 ret = kfifo_in(&smsm_snapshot_fifo,
2397 &new_state, sizeof(new_state));
2398 if (ret != sizeof(new_state)) {
2399 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2400 goto restore_snapshot_count;
2401 }
2402 }
2403
2404 /* queue wakelock usage flag */
2405 ret = kfifo_in(&smsm_snapshot_fifo,
2406 &use_wakelock, sizeof(use_wakelock));
2407 if (ret != sizeof(use_wakelock)) {
2408 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2409 goto restore_snapshot_count;
2410 }
2411
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002412 schedule_work(&smsm_cb_work);
Eric Holmberg96b55f62012-04-03 19:10:46 -06002413 return;
2414
2415restore_snapshot_count:
2416 if (use_wakelock) {
2417 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2418 if (smsm_snapshot_count) {
2419 --smsm_snapshot_count;
2420 if (smsm_snapshot_count == 0) {
2421 SMx_POWER_INFO("SMSM snapshot wake unlock\n");
2422 wake_unlock(&smsm_snapshot_wakelock);
2423 }
2424 } else {
2425 pr_err("%s: invalid snapshot count\n", __func__);
2426 }
2427 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2428 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002429}
2430
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002431static irqreturn_t smsm_irq_handler(int irq, void *data)
2432{
2433 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002434
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002435 if (irq == INT_ADSP_A11_SMSM) {
Eric Holmberg6282c5d2011-10-27 17:30:57 -06002436 uint32_t mux_val;
2437 static uint32_t prev_smem_q6_apps_smsm;
2438
2439 if (smsm_info.intr_mux && cpu_is_qsd8x50()) {
2440 mux_val = __raw_readl(
2441 SMSM_INTR_MUX_ADDR(SMEM_Q6_APPS_SMSM));
2442 if (mux_val != prev_smem_q6_apps_smsm)
2443 prev_smem_q6_apps_smsm = mux_val;
2444 }
2445
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002446 spin_lock_irqsave(&smem_lock, flags);
Eric Holmbergda31d042012-03-28 14:01:02 -06002447 smsm_cb_snapshot(1);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002448 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002449 return IRQ_HANDLED;
2450 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002451
2452 spin_lock_irqsave(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002453 if (!smsm_info.state) {
2454 SMSM_INFO("<SM NO STATE>\n");
2455 } else {
2456 unsigned old_apps, apps;
2457 unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002458
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002459 old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002460
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002461 SMSM_DBG("<SM %08x %08x>\n", apps, modm);
2462 if (apps & SMSM_RESET) {
2463 /* If we get an interrupt and the apps SMSM_RESET
2464 bit is already set, the modem is acking the
2465 app's reset ack. */
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002466 if (!disable_smsm_reset_handshake)
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302467 apps &= ~SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002468 /* Issue a fake irq to handle any
2469 * smd state changes during reset
2470 */
2471 smd_fake_irq_handler(0);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002472
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002473 /* queue modem restart notify chain */
2474 modem_queue_start_reset_notify();
2475
2476 } else if (modm & SMSM_RESET) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002477 pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
Ram Somani8b9589f2012-04-03 12:07:18 +05302478 if (!disable_smsm_reset_handshake) {
2479 apps |= SMSM_RESET;
2480 flush_cache_all();
2481 outer_flush_all();
2482 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002483 modem_queue_start_reset_notify();
2484
2485 } else if (modm & SMSM_INIT) {
2486 if (!(apps & SMSM_INIT)) {
2487 apps |= SMSM_INIT;
2488 modem_queue_smsm_init_notify();
2489 }
2490
2491 if (modm & SMSM_SMDINIT)
2492 apps |= SMSM_SMDINIT;
2493 if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) ==
2494 (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT))
2495 apps |= SMSM_RUN;
2496 } else if (modm & SMSM_SYSTEM_DOWNLOAD) {
2497 pr_err("\nSMSM: Modem SMSM state changed to SMSM_SYSTEM_DOWNLOAD.");
2498 modem_queue_start_reset_notify();
2499 }
2500
2501 if (old_apps != apps) {
2502 SMSM_DBG("<SM %08x NOTIFY>\n", apps);
2503 __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2504 do_smd_probe();
2505 notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
2506 }
2507
Eric Holmbergda31d042012-03-28 14:01:02 -06002508 smsm_cb_snapshot(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002509 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002510 spin_unlock_irqrestore(&smem_lock, flags);
2511 return IRQ_HANDLED;
2512}
2513
Eric Holmberg98c6c642012-02-24 11:29:35 -07002514static irqreturn_t smsm_modem_irq_handler(int irq, void *data)
2515{
2516 SMx_POWER_INFO("SMSM Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002517 ++interrupt_stats[SMD_MODEM].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002518 return smsm_irq_handler(irq, data);
2519}
2520
2521static irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
2522{
2523 SMx_POWER_INFO("SMSM Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002524 ++interrupt_stats[SMD_Q6].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002525 return smsm_irq_handler(irq, data);
2526}
2527
2528static irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
2529{
2530 SMx_POWER_INFO("SMSM Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002531 ++interrupt_stats[SMD_DSPS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002532 return smsm_irq_handler(irq, data);
2533}
2534
2535static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
2536{
2537 SMx_POWER_INFO("SMSM Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002538 ++interrupt_stats[SMD_WCNSS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002539 return smsm_irq_handler(irq, data);
2540}
2541
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002542int smsm_change_intr_mask(uint32_t smsm_entry,
2543 uint32_t clear_mask, uint32_t set_mask)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002544{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002545 uint32_t old_mask, new_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002546 unsigned long flags;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002547
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002548 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2549 pr_err("smsm_change_state: Invalid entry %d\n",
2550 smsm_entry);
2551 return -EINVAL;
2552 }
2553
2554 if (!smsm_info.intr_mask) {
2555 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002556 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002557 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002558
2559 spin_lock_irqsave(&smem_lock, flags);
2560
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002561 old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2562 new_mask = (old_mask & ~clear_mask) | set_mask;
2563 __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002564
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002565 wmb();
2566 spin_unlock_irqrestore(&smem_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002567
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002568 return 0;
2569}
2570EXPORT_SYMBOL(smsm_change_intr_mask);
2571
2572int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
2573{
2574 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2575 pr_err("smsm_change_state: Invalid entry %d\n",
2576 smsm_entry);
2577 return -EINVAL;
2578 }
2579
2580 if (!smsm_info.intr_mask) {
2581 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
2582 return -EIO;
2583 }
2584
2585 *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2586 return 0;
2587}
2588EXPORT_SYMBOL(smsm_get_intr_mask);
2589
2590int smsm_change_state(uint32_t smsm_entry,
2591 uint32_t clear_mask, uint32_t set_mask)
2592{
2593 unsigned long flags;
2594 uint32_t old_state, new_state;
2595
2596 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2597 pr_err("smsm_change_state: Invalid entry %d",
2598 smsm_entry);
2599 return -EINVAL;
2600 }
2601
2602 if (!smsm_info.state) {
2603 pr_err("smsm_change_state <SM NO STATE>\n");
2604 return -EIO;
2605 }
2606 spin_lock_irqsave(&smem_lock, flags);
2607
2608 old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2609 new_state = (old_state & ~clear_mask) | set_mask;
2610 __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
2611 SMSM_DBG("smsm_change_state %x\n", new_state);
2612 notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002613
2614 spin_unlock_irqrestore(&smem_lock, flags);
2615
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002616 return 0;
2617}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002618EXPORT_SYMBOL(smsm_change_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002619
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002620uint32_t smsm_get_state(uint32_t smsm_entry)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002621{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002622 uint32_t rv = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002623
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002624 /* needs interface change to return error code */
2625 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2626 pr_err("smsm_change_state: Invalid entry %d",
2627 smsm_entry);
2628 return 0;
2629 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002630
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002631 if (!smsm_info.state) {
2632 pr_err("smsm_get_state <SM NO STATE>\n");
2633 } else {
2634 rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2635 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002636
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002637 return rv;
2638}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002639EXPORT_SYMBOL(smsm_get_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002640
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002641/**
2642 * Performs SMSM callback client notifiction.
2643 */
2644void notify_smsm_cb_clients_worker(struct work_struct *work)
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002645{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002646 struct smsm_state_cb_info *cb_info;
2647 struct smsm_state_info *state_info;
2648 int n;
2649 uint32_t new_state;
2650 uint32_t state_changes;
Eric Holmbergda31d042012-03-28 14:01:02 -06002651 uint32_t use_wakelock;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002652 int ret;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002653 unsigned long flags;
Brian Swetland03e00cd2009-07-01 17:58:37 -07002654
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002655 if (!smd_initialized)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002656 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002657
Eric Holmbergda31d042012-03-28 14:01:02 -06002658 while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002659 mutex_lock(&smsm_lock);
2660 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2661 state_info = &smsm_states[n];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002662
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002663 ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
2664 sizeof(new_state));
2665 if (ret != sizeof(new_state)) {
2666 pr_err("%s: snapshot underflow %d\n",
2667 __func__, ret);
2668 mutex_unlock(&smsm_lock);
2669 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002670 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002671
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002672 state_changes = state_info->last_value ^ new_state;
2673 if (state_changes) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07002674 SMx_POWER_INFO("SMSM Change %d: %08x->%08x\n",
2675 n, state_info->last_value,
2676 new_state);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002677 list_for_each_entry(cb_info,
2678 &state_info->callbacks, cb_list) {
2679
2680 if (cb_info->mask & state_changes)
2681 cb_info->notify(cb_info->data,
2682 state_info->last_value,
2683 new_state);
2684 }
2685 state_info->last_value = new_state;
2686 }
2687 }
2688 mutex_unlock(&smsm_lock);
Eric Holmberg59a9f942012-03-19 10:04:22 -06002689
Eric Holmbergda31d042012-03-28 14:01:02 -06002690 /* read wakelock flag */
2691 ret = kfifo_out(&smsm_snapshot_fifo, &use_wakelock,
2692 sizeof(use_wakelock));
2693 if (ret != sizeof(use_wakelock)) {
2694 pr_err("%s: snapshot underflow %d\n",
2695 __func__, ret);
2696 return;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002697 }
Eric Holmbergda31d042012-03-28 14:01:02 -06002698
2699 if (use_wakelock) {
2700 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2701 if (smsm_snapshot_count) {
2702 --smsm_snapshot_count;
2703 if (smsm_snapshot_count == 0) {
2704 SMx_POWER_INFO("SMSM snapshot"
2705 " wake unlock\n");
2706 wake_unlock(&smsm_snapshot_wakelock);
2707 }
2708 } else {
2709 pr_err("%s: invalid snapshot count\n",
2710 __func__);
2711 }
2712 spin_unlock_irqrestore(&smsm_snapshot_count_lock,
2713 flags);
2714 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002715 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002716}
2717
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002718
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002719/**
2720 * Registers callback for SMSM state notifications when the specified
2721 * bits change.
2722 *
2723 * @smsm_entry Processor entry to deregister
2724 * @mask Bits to deregister (if result is 0, callback is removed)
2725 * @notify Notification function to deregister
2726 * @data Opaque data passed in to callback
2727 *
2728 * @returns Status code
2729 * <0 error code
2730 * 0 inserted new entry
2731 * 1 updated mask of existing entry
2732 */
2733int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
2734 void (*notify)(void *, uint32_t, uint32_t), void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002735{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002736 struct smsm_state_cb_info *cb_info;
2737 struct smsm_state_cb_info *cb_found = 0;
2738 int ret = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002739
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002740 if (smsm_entry >= SMSM_NUM_ENTRIES)
2741 return -EINVAL;
2742
Eric Holmbergc8002902011-09-16 13:55:57 -06002743 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002744
2745 if (!smsm_states) {
2746 /* smsm not yet initialized */
2747 ret = -ENODEV;
2748 goto cleanup;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002749 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002750
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002751 list_for_each_entry(cb_info,
2752 &smsm_states[smsm_entry].callbacks, cb_list) {
2753 if ((cb_info->notify == notify) &&
2754 (cb_info->data == data)) {
2755 cb_info->mask |= mask;
2756 cb_found = cb_info;
2757 ret = 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002758 break;
2759 }
2760 }
2761
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002762 if (!cb_found) {
2763 cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
2764 GFP_ATOMIC);
2765 if (!cb_info) {
2766 ret = -ENOMEM;
2767 goto cleanup;
2768 }
2769
2770 cb_info->mask = mask;
2771 cb_info->notify = notify;
2772 cb_info->data = data;
2773 INIT_LIST_HEAD(&cb_info->cb_list);
2774 list_add_tail(&cb_info->cb_list,
2775 &smsm_states[smsm_entry].callbacks);
2776 }
2777
2778cleanup:
Eric Holmbergc8002902011-09-16 13:55:57 -06002779 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002780 return ret;
2781}
2782EXPORT_SYMBOL(smsm_state_cb_register);
2783
2784
2785/**
2786 * Deregisters for SMSM state notifications for the specified bits.
2787 *
2788 * @smsm_entry Processor entry to deregister
2789 * @mask Bits to deregister (if result is 0, callback is removed)
2790 * @notify Notification function to deregister
2791 * @data Opaque data passed in to callback
2792 *
2793 * @returns Status code
2794 * <0 error code
2795 * 0 not found
2796 * 1 updated mask
2797 * 2 removed callback
2798 */
2799int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
2800 void (*notify)(void *, uint32_t, uint32_t), void *data)
2801{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002802 struct smsm_state_cb_info *cb_info;
2803 int ret = 0;
2804
2805 if (smsm_entry >= SMSM_NUM_ENTRIES)
2806 return -EINVAL;
2807
Eric Holmbergc8002902011-09-16 13:55:57 -06002808 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002809
2810 if (!smsm_states) {
2811 /* smsm not yet initialized */
Eric Holmbergc8002902011-09-16 13:55:57 -06002812 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002813 return -ENODEV;
2814 }
2815
2816 list_for_each_entry(cb_info,
2817 &smsm_states[smsm_entry].callbacks, cb_list) {
2818 if ((cb_info->notify == notify) &&
2819 (cb_info->data == data)) {
2820 cb_info->mask &= ~mask;
2821 ret = 1;
2822 if (!cb_info->mask) {
2823 /* no mask bits set, remove callback */
2824 list_del(&cb_info->cb_list);
2825 kfree(cb_info);
2826 ret = 2;
2827 }
2828 break;
2829 }
2830 }
2831
Eric Holmbergc8002902011-09-16 13:55:57 -06002832 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002833 return ret;
2834}
2835EXPORT_SYMBOL(smsm_state_cb_deregister);
2836
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06002837int smsm_driver_state_notifier_register(struct notifier_block *nb)
2838{
2839 int ret;
2840 if (!nb)
2841 return -EINVAL;
2842 mutex_lock(&smsm_driver_state_notifier_lock);
2843 ret = raw_notifier_chain_register(&smsm_driver_state_notifier_list, nb);
2844 mutex_unlock(&smsm_driver_state_notifier_lock);
2845 return ret;
2846}
2847EXPORT_SYMBOL(smsm_driver_state_notifier_register);
2848
2849int smsm_driver_state_notifier_unregister(struct notifier_block *nb)
2850{
2851 int ret;
2852 if (!nb)
2853 return -EINVAL;
2854 mutex_lock(&smsm_driver_state_notifier_lock);
2855 ret = raw_notifier_chain_unregister(&smsm_driver_state_notifier_list,
2856 nb);
2857 mutex_unlock(&smsm_driver_state_notifier_lock);
2858 return ret;
2859}
2860EXPORT_SYMBOL(smsm_driver_state_notifier_unregister);
2861
2862static void smsm_driver_state_notify(uint32_t state, void *data)
2863{
2864 mutex_lock(&smsm_driver_state_notifier_lock);
2865 raw_notifier_call_chain(&smsm_driver_state_notifier_list,
2866 state, data);
2867 mutex_unlock(&smsm_driver_state_notifier_lock);
2868}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002869
2870int smd_core_init(void)
2871{
2872 int r;
2873 unsigned long flags = IRQF_TRIGGER_RISING;
2874 SMD_INFO("smd_core_init()\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002875
Brian Swetland37521a32009-07-01 18:30:47 -07002876 r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002877 flags, "smd_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002878 if (r < 0)
2879 return r;
2880 r = enable_irq_wake(INT_A9_M2A_0);
2881 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002882 pr_err("smd_core_init: "
2883 "enable_irq_wake failed for INT_A9_M2A_0\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002884
Eric Holmberg98c6c642012-02-24 11:29:35 -07002885 r = request_irq(INT_A9_M2A_5, smsm_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002886 flags, "smsm_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002887 if (r < 0) {
2888 free_irq(INT_A9_M2A_0, 0);
2889 return r;
2890 }
2891 r = enable_irq_wake(INT_A9_M2A_5);
2892 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002893 pr_err("smd_core_init: "
2894 "enable_irq_wake failed for INT_A9_M2A_5\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002895
Brian Swetland37521a32009-07-01 18:30:47 -07002896#if defined(CONFIG_QDSP6)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002897#if (INT_ADSP_A11 == INT_ADSP_A11_SMSM)
2898 flags |= IRQF_SHARED;
2899#endif
Brian Swetland37521a32009-07-01 18:30:47 -07002900 r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002901 flags, "smd_dev", smd_dsp_irq_handler);
Brian Swetland37521a32009-07-01 18:30:47 -07002902 if (r < 0) {
2903 free_irq(INT_A9_M2A_0, 0);
2904 free_irq(INT_A9_M2A_5, 0);
2905 return r;
2906 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002907
Eric Holmberg98c6c642012-02-24 11:29:35 -07002908 r = request_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler,
2909 flags, "smsm_dev", smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002910 if (r < 0) {
2911 free_irq(INT_A9_M2A_0, 0);
2912 free_irq(INT_A9_M2A_5, 0);
2913 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
2914 return r;
2915 }
2916
2917 r = enable_irq_wake(INT_ADSP_A11);
2918 if (r < 0)
2919 pr_err("smd_core_init: "
2920 "enable_irq_wake failed for INT_ADSP_A11\n");
2921
2922#if (INT_ADSP_A11 != INT_ADSP_A11_SMSM)
2923 r = enable_irq_wake(INT_ADSP_A11_SMSM);
2924 if (r < 0)
2925 pr_err("smd_core_init: enable_irq_wake "
2926 "failed for INT_ADSP_A11_SMSM\n");
2927#endif
2928 flags &= ~IRQF_SHARED;
Brian Swetland37521a32009-07-01 18:30:47 -07002929#endif
2930
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002931#if defined(CONFIG_DSPS)
2932 r = request_irq(INT_DSPS_A11, smd_dsps_irq_handler,
2933 flags, "smd_dev", smd_dsps_irq_handler);
2934 if (r < 0) {
2935 free_irq(INT_A9_M2A_0, 0);
2936 free_irq(INT_A9_M2A_5, 0);
2937 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07002938 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002939 return r;
2940 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002941
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002942 r = enable_irq_wake(INT_DSPS_A11);
2943 if (r < 0)
2944 pr_err("smd_core_init: "
2945 "enable_irq_wake failed for INT_ADSP_A11\n");
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002946#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002947
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002948#if defined(CONFIG_WCNSS)
2949 r = request_irq(INT_WCNSS_A11, smd_wcnss_irq_handler,
2950 flags, "smd_dev", smd_wcnss_irq_handler);
2951 if (r < 0) {
2952 free_irq(INT_A9_M2A_0, 0);
2953 free_irq(INT_A9_M2A_5, 0);
2954 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07002955 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002956 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
2957 return r;
2958 }
2959
2960 r = enable_irq_wake(INT_WCNSS_A11);
2961 if (r < 0)
2962 pr_err("smd_core_init: "
2963 "enable_irq_wake failed for INT_WCNSS_A11\n");
2964
Eric Holmberg98c6c642012-02-24 11:29:35 -07002965 r = request_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler,
2966 flags, "smsm_dev", smsm_wcnss_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002967 if (r < 0) {
2968 free_irq(INT_A9_M2A_0, 0);
2969 free_irq(INT_A9_M2A_5, 0);
2970 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07002971 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002972 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
2973 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
2974 return r;
2975 }
2976
2977 r = enable_irq_wake(INT_WCNSS_A11_SMSM);
2978 if (r < 0)
2979 pr_err("smd_core_init: "
2980 "enable_irq_wake failed for INT_WCNSS_A11_SMSM\n");
2981#endif
2982
Jeff Hugo6a8057c2011-08-16 13:47:12 -06002983#if defined(CONFIG_DSPS_SMSM)
Eric Holmberg98c6c642012-02-24 11:29:35 -07002984 r = request_irq(INT_DSPS_A11_SMSM, smsm_dsps_irq_handler,
2985 flags, "smsm_dev", smsm_dsps_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06002986 if (r < 0) {
2987 free_irq(INT_A9_M2A_0, 0);
2988 free_irq(INT_A9_M2A_5, 0);
2989 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07002990 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06002991 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
2992 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07002993 free_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06002994 return r;
2995 }
2996
2997 r = enable_irq_wake(INT_DSPS_A11_SMSM);
2998 if (r < 0)
2999 pr_err("smd_core_init: "
3000 "enable_irq_wake failed for INT_DSPS_A11_SMSM\n");
3001#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003002 SMD_INFO("smd_core_init() done\n");
3003
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003004 return 0;
3005}
3006
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303007static int intr_init(struct interrupt_config_item *private_irq,
3008 struct smd_irq_config *platform_irq,
3009 struct platform_device *pdev
3010 )
3011{
3012 int irq_id;
3013 int ret;
3014 int ret_wake;
3015
3016 private_irq->out_bit_pos = platform_irq->out_bit_pos;
3017 private_irq->out_offset = platform_irq->out_offset;
3018 private_irq->out_base = platform_irq->out_base;
3019
3020 irq_id = platform_get_irq_byname(
3021 pdev,
3022 platform_irq->irq_name
3023 );
3024 SMD_DBG("smd: %s: register irq: %s id: %d\n", __func__,
3025 platform_irq->irq_name, irq_id);
3026 ret = request_irq(irq_id,
3027 private_irq->irq_handler,
3028 platform_irq->flags,
3029 platform_irq->device_name,
3030 (void *)platform_irq->dev_id
3031 );
3032 if (ret < 0) {
3033 platform_irq->irq_id = ret;
3034 } else {
3035 platform_irq->irq_id = irq_id;
3036 ret_wake = enable_irq_wake(irq_id);
3037 if (ret_wake < 0) {
3038 pr_err("smd: enable_irq_wake failed on %s",
3039 platform_irq->irq_name);
3040 }
3041 }
3042
3043 return ret;
3044}
3045
3046int smd_core_platform_init(struct platform_device *pdev)
3047{
3048 int i;
3049 int ret;
3050 uint32_t num_ss;
3051 struct smd_platform *smd_platform_data;
3052 struct smd_subsystem_config *smd_ss_config_list;
3053 struct smd_subsystem_config *cfg;
3054 int err_ret = 0;
3055
3056 smd_platform_data = pdev->dev.platform_data;
3057 num_ss = smd_platform_data->num_ss_configs;
3058 smd_ss_config_list = smd_platform_data->smd_ss_configs;
3059
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06003060 if (smd_platform_data->smd_ssr_config)
3061 disable_smsm_reset_handshake = smd_platform_data->
3062 smd_ssr_config->disable_smsm_reset_handshake;
3063
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303064 for (i = 0; i < num_ss; i++) {
3065 cfg = &smd_ss_config_list[i];
3066
3067 ret = intr_init(
3068 &private_intr_config[cfg->irq_config_id].smd,
3069 &cfg->smd_int,
3070 pdev
3071 );
3072
3073 if (ret < 0) {
3074 err_ret = ret;
3075 pr_err("smd: register irq failed on %s\n",
3076 cfg->smd_int.irq_name);
3077 break;
3078 }
3079
3080 ret = intr_init(
3081 &private_intr_config[cfg->irq_config_id].smsm,
3082 &cfg->smsm_int,
3083 pdev
3084 );
3085
3086 if (ret < 0) {
3087 err_ret = ret;
3088 pr_err("smd: register irq failed on %s\n",
3089 cfg->smsm_int.irq_name);
3090 break;
3091 }
Eric Holmberg17992c12012-02-29 12:54:44 -07003092
3093 strncpy(edge_to_pids[cfg->edge].subsys_name,
3094 cfg->subsys_name, SMD_MAX_CH_NAME_LEN);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303095 }
3096
3097 if (err_ret < 0) {
3098 pr_err("smd: deregistering IRQs\n");
3099 for (i = 0; i < num_ss; ++i) {
3100 cfg = &smd_ss_config_list[i];
3101
3102 if (cfg->smd_int.irq_id >= 0)
3103 free_irq(cfg->smd_int.irq_id,
3104 (void *)cfg->smd_int.dev_id
3105 );
3106 if (cfg->smsm_int.irq_id >= 0)
3107 free_irq(cfg->smsm_int.irq_id,
3108 (void *)cfg->smsm_int.dev_id
3109 );
3110 }
3111 return err_ret;
3112 }
3113
3114 SMD_INFO("smd_core_platform_init() done\n");
3115 return 0;
3116
3117}
3118
Gregory Bean4416e9e2010-07-28 10:22:12 -07003119static int __devinit msm_smd_probe(struct platform_device *pdev)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003120{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303121 int ret;
Daniel Walker0aec66d2010-03-18 12:31:08 -07003122
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303123 SMD_INFO("smd probe\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003124 INIT_WORK(&probe_work, smd_channel_probe_worker);
3125
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003126 channel_close_wq = create_singlethread_workqueue("smd_channel_close");
3127 if (IS_ERR(channel_close_wq)) {
3128 pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
3129 return -ENOMEM;
3130 }
3131
3132 if (smsm_init()) {
3133 pr_err("smsm_init() failed\n");
3134 return -1;
3135 }
3136
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303137 if (pdev) {
3138 if (pdev->dev.of_node) {
3139 pr_err("SMD: Device tree not currently supported\n");
3140 return -ENODEV;
3141 } else if (pdev->dev.platform_data) {
3142 ret = smd_core_platform_init(pdev);
3143 if (ret) {
3144 pr_err(
3145 "SMD: smd_core_platform_init() failed\n");
3146 return -ENODEV;
3147 }
3148 } else {
3149 ret = smd_core_init();
3150 if (ret) {
3151 pr_err("smd_core_init() failed\n");
3152 return -ENODEV;
3153 }
3154 }
3155 } else {
3156 pr_err("SMD: PDEV not found\n");
3157 return -ENODEV;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003158 }
3159
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003160 smd_initialized = 1;
3161
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003162 smd_alloc_loopback_channel();
Eric Holmbergc33d4ab2011-10-24 10:28:25 -06003163 smsm_irq_handler(0, 0);
3164 tasklet_schedule(&smd_fake_irq_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003165
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003166 return 0;
3167}
3168
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003169static int restart_notifier_cb(struct notifier_block *this,
3170 unsigned long code,
3171 void *data);
3172
3173static struct restart_notifier_block restart_notifiers[] = {
Eric Holmbergca7ead22011-12-01 17:21:15 -07003174 {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
3175 {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
3176 {SMD_WCNSS, "riva", .nb.notifier_call = restart_notifier_cb},
3177 {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
Eric Holmberg8b0e74f2012-02-08 09:56:17 -07003178 {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003179};
3180
3181static int restart_notifier_cb(struct notifier_block *this,
3182 unsigned long code,
3183 void *data)
3184{
3185 if (code == SUBSYS_AFTER_SHUTDOWN) {
3186 struct restart_notifier_block *notifier;
3187
3188 notifier = container_of(this,
3189 struct restart_notifier_block, nb);
3190 SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
3191 __func__, notifier->processor,
3192 notifier->name);
3193
3194 smd_channel_reset(notifier->processor);
3195 }
3196
3197 return NOTIFY_DONE;
3198}
3199
3200static __init int modem_restart_late_init(void)
3201{
3202 int i;
3203 void *handle;
3204 struct restart_notifier_block *nb;
3205
3206 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
3207 nb = &restart_notifiers[i];
3208 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
3209 SMD_DBG("%s: registering notif for '%s', handle=%p\n",
3210 __func__, nb->name, handle);
3211 }
3212 return 0;
3213}
3214late_initcall(modem_restart_late_init);
3215
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003216static struct platform_driver msm_smd_driver = {
3217 .probe = msm_smd_probe,
3218 .driver = {
3219 .name = MODULE_NAME,
3220 .owner = THIS_MODULE,
3221 },
3222};
3223
3224static int __init msm_smd_init(void)
3225{
3226 return platform_driver_register(&msm_smd_driver);
3227}
3228
3229module_init(msm_smd_init);
3230
3231MODULE_DESCRIPTION("MSM Shared Memory Core");
3232MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
3233MODULE_LICENSE("GPL");