blob: 948dbbb1efe48def5fd883f9a2a7a62a873684c7 [file] [log] [blame]
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001/* arch/arm/mach-msm/smd.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07004 * Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Brian Swetland2eb44eb2008-09-29 16:00:48 -07005 * Author: Brian Swetland <swetland@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/cdev.h>
22#include <linux/device.h>
23#include <linux/wait.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/list.h>
27#include <linux/slab.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070028#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/io.h>
30#include <linux/termios.h>
31#include <linux/ctype.h>
32#include <linux/remote_spinlock.h>
33#include <linux/uaccess.h>
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070034#include <linux/kfifo.h>
Eric Holmberg59a9f942012-03-19 10:04:22 -060035#include <linux/wakelock.h>
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -060036#include <linux/notifier.h>
Jeff Hugobdc734d2012-03-26 16:05:39 -060037#include <linux/sort.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070038#include <mach/msm_smd.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039#include <mach/msm_iomap.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070040#include <mach/system.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041#include <mach/subsystem_notif.h>
Angshuman Sarkaread67bd2011-09-21 20:13:12 +053042#include <mach/socinfo.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070043#include <mach/proc_comm.h>
Ram Somani8b9589f2012-04-03 12:07:18 +053044#include <asm/cacheflush.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070045
46#include "smd_private.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047#include "modem_notifier.h"
Brian Swetland2eb44eb2008-09-29 16:00:48 -070048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM8X60) \
Jeff Hugo56b933a2011-09-28 14:42:05 -060050 || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX) \
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060051 || defined(CONFIG_ARCH_MSM9615) || defined(CONFIG_ARCH_APQ8064)
Brian Swetland37521a32009-07-01 18:30:47 -070052#define CONFIG_QDSP6 1
53#endif
54
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060055#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) \
56 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057#define CONFIG_DSPS 1
58#endif
59
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060060#if defined(CONFIG_ARCH_MSM8960) \
61 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062#define CONFIG_WCNSS 1
Jeff Hugo6a8057c2011-08-16 13:47:12 -060063#define CONFIG_DSPS_SMSM 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -070065
66#define MODULE_NAME "msm_smd"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067#define SMEM_VERSION 0x000B
68#define SMD_VERSION 0x00020000
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070069#define SMSM_SNAPSHOT_CNT 64
Eric Holmbergda31d042012-03-28 14:01:02 -060070#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72uint32_t SMSM_NUM_ENTRIES = 8;
73uint32_t SMSM_NUM_HOSTS = 3;
Brian Swetland2eb44eb2008-09-29 16:00:48 -070074
Eric Holmberge8a39322012-04-03 15:14:02 -060075/* Legacy SMSM interrupt notifications */
76#define LEGACY_MODEM_SMSM_MASK (SMSM_RESET | SMSM_INIT | SMSM_SMDINIT \
77 | SMSM_RUN | SMSM_SYSTEM_DOWNLOAD)
Brian Swetland2eb44eb2008-09-29 16:00:48 -070078
79enum {
80 MSM_SMD_DEBUG = 1U << 0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081 MSM_SMSM_DEBUG = 1U << 1,
82 MSM_SMD_INFO = 1U << 2,
83 MSM_SMSM_INFO = 1U << 3,
Eric Holmberg98c6c642012-02-24 11:29:35 -070084 MSM_SMx_POWER_INFO = 1U << 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085};
86
87struct smsm_shared_info {
88 uint32_t *state;
89 uint32_t *intr_mask;
90 uint32_t *intr_mux;
91};
92
93static struct smsm_shared_info smsm_info;
Eric Holmberg59a9f942012-03-19 10:04:22 -060094static struct kfifo smsm_snapshot_fifo;
95static struct wake_lock smsm_snapshot_wakelock;
96static int smsm_snapshot_count;
97static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070098
99struct smsm_size_info_type {
100 uint32_t num_hosts;
101 uint32_t num_entries;
102 uint32_t reserved0;
103 uint32_t reserved1;
104};
105
106struct smsm_state_cb_info {
107 struct list_head cb_list;
108 uint32_t mask;
109 void *data;
110 void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
111};
112
113struct smsm_state_info {
114 struct list_head callbacks;
115 uint32_t last_value;
Eric Holmberge8a39322012-04-03 15:14:02 -0600116 uint32_t intr_mask_set;
117 uint32_t intr_mask_clear;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700118};
119
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530120struct interrupt_config_item {
121 /* must be initialized */
122 irqreturn_t (*irq_handler)(int req, void *data);
123 /* outgoing interrupt config (set from platform data) */
124 uint32_t out_bit_pos;
125 void __iomem *out_base;
126 uint32_t out_offset;
127};
128
129struct interrupt_config {
130 struct interrupt_config_item smd;
131 struct interrupt_config_item smsm;
132};
133
134static irqreturn_t smd_modem_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700135static irqreturn_t smsm_modem_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530136static irqreturn_t smd_dsp_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700137static irqreturn_t smsm_dsp_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530138static irqreturn_t smd_dsps_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700139static irqreturn_t smsm_dsps_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530140static irqreturn_t smd_wcnss_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700141static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600142static irqreturn_t smd_rpm_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530143static irqreturn_t smsm_irq_handler(int irq, void *data);
144
145static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
146 [SMD_MODEM] = {
147 .smd.irq_handler = smd_modem_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700148 .smsm.irq_handler = smsm_modem_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530149 },
150 [SMD_Q6] = {
151 .smd.irq_handler = smd_dsp_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700152 .smsm.irq_handler = smsm_dsp_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530153 },
154 [SMD_DSPS] = {
155 .smd.irq_handler = smd_dsps_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700156 .smsm.irq_handler = smsm_dsps_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530157 },
158 [SMD_WCNSS] = {
159 .smd.irq_handler = smd_wcnss_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700160 .smsm.irq_handler = smsm_wcnss_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530161 },
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600162 [SMD_RPM] = {
163 .smd.irq_handler = smd_rpm_irq_handler,
164 .smsm.irq_handler = NULL, /* does not support smsm */
165 },
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530166};
Jeff Hugobdc734d2012-03-26 16:05:39 -0600167
168struct smem_area {
169 void *phys_addr;
170 unsigned size;
171 void __iomem *virt_addr;
172};
173static uint32_t num_smem_areas;
174static struct smem_area *smem_areas;
175static void *smem_range_check(void *base, unsigned offset);
176
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700177struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530178
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700179#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
180#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
181 entry * SMSM_NUM_HOSTS + host)
182#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
183
184/* Internal definitions which are not exported in some targets */
185enum {
186 SMSM_APPS_DEM_I = 3,
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700187};
188
189static int msm_smd_debug_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700190module_param_named(debug_mask, msm_smd_debug_mask,
191 int, S_IRUGO | S_IWUSR | S_IWGRP);
192
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700193#if defined(CONFIG_MSM_SMD_DEBUG)
194#define SMD_DBG(x...) do { \
195 if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
196 printk(KERN_DEBUG x); \
197 } while (0)
198
199#define SMSM_DBG(x...) do { \
200 if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
201 printk(KERN_DEBUG x); \
202 } while (0)
203
204#define SMD_INFO(x...) do { \
205 if (msm_smd_debug_mask & MSM_SMD_INFO) \
206 printk(KERN_INFO x); \
207 } while (0)
208
209#define SMSM_INFO(x...) do { \
210 if (msm_smd_debug_mask & MSM_SMSM_INFO) \
211 printk(KERN_INFO x); \
212 } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700213#define SMx_POWER_INFO(x...) do { \
214 if (msm_smd_debug_mask & MSM_SMx_POWER_INFO) \
215 printk(KERN_INFO x); \
216 } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217#else
218#define SMD_DBG(x...) do { } while (0)
219#define SMSM_DBG(x...) do { } while (0)
220#define SMD_INFO(x...) do { } while (0)
221#define SMSM_INFO(x...) do { } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700222#define SMx_POWER_INFO(x...) do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700223#endif
224
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700225static unsigned last_heap_free = 0xffffffff;
226
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227static inline void smd_write_intr(unsigned int val,
228 const void __iomem *addr);
229
230#if defined(CONFIG_ARCH_MSM7X30)
231#define MSM_TRIG_A2M_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530232 (smd_write_intr(1 << 0, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700233#define MSM_TRIG_A2Q6_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530234 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700235#define MSM_TRIG_A2M_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530236 (smd_write_intr(1 << 5, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700237#define MSM_TRIG_A2Q6_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530238 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700239#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600240#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241#define MSM_TRIG_A2WCNSS_SMD_INT
242#define MSM_TRIG_A2WCNSS_SMSM_INT
243#elif defined(CONFIG_ARCH_MSM8X60)
244#define MSM_TRIG_A2M_SMD_INT \
245 (smd_write_intr(1 << 3, MSM_GCC_BASE + 0x8))
246#define MSM_TRIG_A2Q6_SMD_INT \
247 (smd_write_intr(1 << 15, MSM_GCC_BASE + 0x8))
248#define MSM_TRIG_A2M_SMSM_INT \
249 (smd_write_intr(1 << 4, MSM_GCC_BASE + 0x8))
250#define MSM_TRIG_A2Q6_SMSM_INT \
251 (smd_write_intr(1 << 14, MSM_GCC_BASE + 0x8))
252#define MSM_TRIG_A2DSPS_SMD_INT \
253 (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4080))
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600254#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700255#define MSM_TRIG_A2WCNSS_SMD_INT
256#define MSM_TRIG_A2WCNSS_SMSM_INT
Jeff Hugo56b933a2011-09-28 14:42:05 -0600257#elif defined(CONFIG_ARCH_MSM9615)
258#define MSM_TRIG_A2M_SMD_INT \
259 (smd_write_intr(1 << 3, MSM_APCS_GCC_BASE + 0x8))
260#define MSM_TRIG_A2Q6_SMD_INT \
261 (smd_write_intr(1 << 15, MSM_APCS_GCC_BASE + 0x8))
262#define MSM_TRIG_A2M_SMSM_INT \
263 (smd_write_intr(1 << 4, MSM_APCS_GCC_BASE + 0x8))
264#define MSM_TRIG_A2Q6_SMSM_INT \
265 (smd_write_intr(1 << 14, MSM_APCS_GCC_BASE + 0x8))
266#define MSM_TRIG_A2DSPS_SMD_INT
267#define MSM_TRIG_A2DSPS_SMSM_INT
268#define MSM_TRIG_A2WCNSS_SMD_INT
269#define MSM_TRIG_A2WCNSS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700270#elif defined(CONFIG_ARCH_FSM9XXX)
271#define MSM_TRIG_A2Q6_SMD_INT \
272 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
273#define MSM_TRIG_A2Q6_SMSM_INT \
274 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
275#define MSM_TRIG_A2M_SMD_INT \
276 (smd_write_intr(1 << 0, MSM_GCC_BASE + 0x8))
277#define MSM_TRIG_A2M_SMSM_INT \
278 (smd_write_intr(1 << 5, MSM_GCC_BASE + 0x8))
279#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600280#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700281#define MSM_TRIG_A2WCNSS_SMD_INT
282#define MSM_TRIG_A2WCNSS_SMSM_INT
Eric Holmberg73d45462012-02-28 10:41:31 -0700283#elif defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7x25)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700284#define MSM_TRIG_A2M_SMD_INT \
285 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700286#define MSM_TRIG_A2Q6_SMD_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287#define MSM_TRIG_A2M_SMSM_INT \
288 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700289#define MSM_TRIG_A2Q6_SMSM_INT
290#define MSM_TRIG_A2DSPS_SMD_INT
291#define MSM_TRIG_A2DSPS_SMSM_INT
292#define MSM_TRIG_A2WCNSS_SMD_INT
293#define MSM_TRIG_A2WCNSS_SMSM_INT
294#elif defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM7X27A)
295#define MSM_TRIG_A2M_SMD_INT \
296 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
297#define MSM_TRIG_A2Q6_SMD_INT
298#define MSM_TRIG_A2M_SMSM_INT \
299 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
300#define MSM_TRIG_A2Q6_SMSM_INT
301#define MSM_TRIG_A2DSPS_SMD_INT
302#define MSM_TRIG_A2DSPS_SMSM_INT
303#define MSM_TRIG_A2WCNSS_SMD_INT
304#define MSM_TRIG_A2WCNSS_SMSM_INT
305#else /* use platform device / device tree configuration */
306#define MSM_TRIG_A2M_SMD_INT
307#define MSM_TRIG_A2Q6_SMD_INT
308#define MSM_TRIG_A2M_SMSM_INT
309#define MSM_TRIG_A2Q6_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700310#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600311#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700312#define MSM_TRIG_A2WCNSS_SMD_INT
313#define MSM_TRIG_A2WCNSS_SMSM_INT
Brian Swetland37521a32009-07-01 18:30:47 -0700314#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700315
Jeff Hugoee40b152012-02-09 17:39:47 -0700316/*
317 * stub out legacy macros if they are not being used so that the legacy
318 * code compiles even though it is not used
319 *
320 * these definitions should not be used in active code and will cause
321 * an early failure
322 */
323#ifndef INT_A9_M2A_0
324#define INT_A9_M2A_0 -1
325#endif
326#ifndef INT_A9_M2A_5
327#define INT_A9_M2A_5 -1
328#endif
329#ifndef INT_ADSP_A11
330#define INT_ADSP_A11 -1
331#endif
332#ifndef INT_ADSP_A11_SMSM
333#define INT_ADSP_A11_SMSM -1
334#endif
335#ifndef INT_DSPS_A11
336#define INT_DSPS_A11 -1
337#endif
338#ifndef INT_DSPS_A11_SMSM
339#define INT_DSPS_A11_SMSM -1
340#endif
341#ifndef INT_WCNSS_A11
342#define INT_WCNSS_A11 -1
343#endif
344#ifndef INT_WCNSS_A11_SMSM
345#define INT_WCNSS_A11_SMSM -1
346#endif
347
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700348#define SMD_LOOPBACK_CID 100
349
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600350#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
351static remote_spinlock_t remote_spinlock;
352
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700353static LIST_HEAD(smd_ch_list_loopback);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700354static void smd_fake_irq_handler(unsigned long arg);
Eric Holmbergda31d042012-03-28 14:01:02 -0600355static void smsm_cb_snapshot(uint32_t use_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700356
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -0600357static struct workqueue_struct *smsm_cb_wq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700358static void notify_smsm_cb_clients_worker(struct work_struct *work);
359static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
Eric Holmbergc8002902011-09-16 13:55:57 -0600360static DEFINE_MUTEX(smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700361static struct smsm_state_info *smsm_states;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +0530362static int spinlocks_initialized;
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -0600363static RAW_NOTIFIER_HEAD(smsm_driver_state_notifier_list);
364static DEFINE_MUTEX(smsm_driver_state_notifier_lock);
365static void smsm_driver_state_notify(uint32_t state, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700366
367static inline void smd_write_intr(unsigned int val,
368 const void __iomem *addr)
369{
370 wmb();
371 __raw_writel(val, addr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700372}
373
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700374#ifdef CONFIG_WCNSS
375static inline void wakeup_v1_riva(void)
376{
377 /*
378 * workaround hack for RIVA v1 hardware bug
379 * trigger GPIO 40 to wake up RIVA from power collaspe
380 * not to be sent to customers
381 */
Jeff Hugoafb8c4a2011-10-27 15:57:27 -0600382 if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) == 1) {
383 __raw_writel(0x0, MSM_TLMM_BASE + 0x1284);
384 __raw_writel(0x2, MSM_TLMM_BASE + 0x1284);
385 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700386 /* end workaround */
387}
388#else
389static inline void wakeup_v1_riva(void) {}
390#endif
391
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700392static inline void notify_modem_smd(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700393{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530394 static const struct interrupt_config_item *intr
395 = &private_intr_config[SMD_MODEM].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700396 if (intr->out_base) {
397 ++interrupt_stats[SMD_MODEM].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530398 smd_write_intr(intr->out_bit_pos,
399 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700400 } else {
401 ++interrupt_stats[SMD_MODEM].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530402 MSM_TRIG_A2M_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700403 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700404}
405
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700406static inline void notify_dsp_smd(void)
407{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530408 static const struct interrupt_config_item *intr
409 = &private_intr_config[SMD_Q6].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700410 if (intr->out_base) {
411 ++interrupt_stats[SMD_Q6].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530412 smd_write_intr(intr->out_bit_pos,
413 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700414 } else {
415 ++interrupt_stats[SMD_Q6].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530416 MSM_TRIG_A2Q6_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700417 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700418}
419
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530420static inline void notify_dsps_smd(void)
421{
422 static const struct interrupt_config_item *intr
423 = &private_intr_config[SMD_DSPS].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700424 if (intr->out_base) {
425 ++interrupt_stats[SMD_DSPS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530426 smd_write_intr(intr->out_bit_pos,
427 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700428 } else {
429 ++interrupt_stats[SMD_DSPS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530430 MSM_TRIG_A2DSPS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700431 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530432}
433
434static inline void notify_wcnss_smd(void)
435{
436 static const struct interrupt_config_item *intr
437 = &private_intr_config[SMD_WCNSS].smd;
438 wakeup_v1_riva();
439
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700440 if (intr->out_base) {
441 ++interrupt_stats[SMD_WCNSS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530442 smd_write_intr(intr->out_bit_pos,
443 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700444 } else {
445 ++interrupt_stats[SMD_WCNSS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530446 MSM_TRIG_A2WCNSS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700447 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530448}
449
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600450static inline void notify_rpm_smd(void)
451{
452 static const struct interrupt_config_item *intr
453 = &private_intr_config[SMD_RPM].smd;
454
455 if (intr->out_base) {
456 ++interrupt_stats[SMD_RPM].smd_out_config_count;
457 smd_write_intr(intr->out_bit_pos,
458 intr->out_base + intr->out_offset);
459 }
460}
461
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530462static inline void notify_modem_smsm(void)
463{
464 static const struct interrupt_config_item *intr
465 = &private_intr_config[SMD_MODEM].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700466 if (intr->out_base) {
467 ++interrupt_stats[SMD_MODEM].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530468 smd_write_intr(intr->out_bit_pos,
469 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700470 } else {
471 ++interrupt_stats[SMD_MODEM].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530472 MSM_TRIG_A2M_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700473 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530474}
475
476static inline void notify_dsp_smsm(void)
477{
478 static const struct interrupt_config_item *intr
479 = &private_intr_config[SMD_Q6].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700480 if (intr->out_base) {
481 ++interrupt_stats[SMD_Q6].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530482 smd_write_intr(intr->out_bit_pos,
483 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700484 } else {
485 ++interrupt_stats[SMD_Q6].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530486 MSM_TRIG_A2Q6_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700487 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530488}
489
490static inline void notify_dsps_smsm(void)
491{
492 static const struct interrupt_config_item *intr
493 = &private_intr_config[SMD_DSPS].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700494 if (intr->out_base) {
495 ++interrupt_stats[SMD_DSPS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530496 smd_write_intr(intr->out_bit_pos,
497 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700498 } else {
499 ++interrupt_stats[SMD_DSPS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530500 MSM_TRIG_A2DSPS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700501 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530502}
503
504static inline void notify_wcnss_smsm(void)
505{
506 static const struct interrupt_config_item *intr
507 = &private_intr_config[SMD_WCNSS].smsm;
508 wakeup_v1_riva();
509
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700510 if (intr->out_base) {
511 ++interrupt_stats[SMD_WCNSS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530512 smd_write_intr(intr->out_bit_pos,
513 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700514 } else {
515 ++interrupt_stats[SMD_WCNSS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530516 MSM_TRIG_A2WCNSS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700517 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530518}
519
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700520static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
521{
522 /* older protocol don't use smsm_intr_mask,
523 but still communicates with modem */
524 if (!smsm_info.intr_mask ||
525 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
526 & notify_mask))
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530527 notify_modem_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700528
529 if (smsm_info.intr_mask &&
530 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
531 & notify_mask)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700532 uint32_t mux_val;
533
Eric Holmberg6282c5d2011-10-27 17:30:57 -0600534 if (cpu_is_qsd8x50() && smsm_info.intr_mux) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700535 mux_val = __raw_readl(
536 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
537 mux_val++;
538 __raw_writel(mux_val,
539 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
540 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530541 notify_dsp_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700542 }
543
544 if (smsm_info.intr_mask &&
545 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
546 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530547 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548 }
549
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600550 if (smsm_info.intr_mask &&
551 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
552 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530553 notify_dsps_smsm();
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600554 }
555
Eric Holmbergda31d042012-03-28 14:01:02 -0600556 /*
557 * Notify local SMSM callback clients without wakelock since this
558 * code is used by power management during power-down/-up sequencing
559 * on DEM-based targets. Grabbing a wakelock in this case will
560 * abort the power-down sequencing.
561 */
Eric Holmberg51676a12012-07-10 18:45:23 -0600562 if (smsm_info.intr_mask &&
563 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS))
564 & notify_mask)) {
565 smsm_cb_snapshot(0);
566 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700567}
568
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700569void smd_diag(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700570{
571 char *x;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572 int size;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700573
574 x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
575 if (x != 0) {
576 x[SZ_DIAG_ERR_MSG - 1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700577 SMD_INFO("smem: DIAG '%s'\n", x);
578 }
579
580 x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size);
581 if (x != 0) {
582 x[size - 1] = 0;
583 pr_err("smem: CRASH LOG\n'%s'\n", x);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700584 }
585}
586
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700588static void handle_modem_crash(void)
589{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700590 pr_err("MODEM/AMSS has CRASHED\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700591 smd_diag();
592
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700593 /* hard reboot if possible FIXME
594 if (msm_reset_hook)
595 msm_reset_hook();
596 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700597
598 /* in this case the modem or watchdog should reboot us */
599 for (;;)
600 ;
601}
602
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700603int smsm_check_for_modem_crash(void)
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700604{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700605 /* if the modem's not ready yet, we have to hope for the best */
606 if (!smsm_info.state)
607 return 0;
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700608
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700609 if (__raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)) & SMSM_RESET) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700610 handle_modem_crash();
611 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700612 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700613 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700614}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700615EXPORT_SYMBOL(smsm_check_for_modem_crash);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700616
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700617/* the spinlock is used to synchronize between the
Brian Swetland03e00cd2009-07-01 17:58:37 -0700618 * irq handler and code that mutates the channel
619 * list or fiddles with channel state
620 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700621static DEFINE_SPINLOCK(smd_lock);
Brian Swetland03e00cd2009-07-01 17:58:37 -0700622DEFINE_SPINLOCK(smem_lock);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700623
624/* the mutex is used during open() and close()
Brian Swetland03e00cd2009-07-01 17:58:37 -0700625 * operations to avoid races while creating or
626 * destroying smd_channel structures
627 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700628static DEFINE_MUTEX(smd_creation_mutex);
629
630static int smd_initialized;
631
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700632struct smd_shared_v1 {
633 struct smd_half_channel ch0;
634 unsigned char data0[SMD_BUF_SIZE];
635 struct smd_half_channel ch1;
636 unsigned char data1[SMD_BUF_SIZE];
637};
638
639struct smd_shared_v2 {
640 struct smd_half_channel ch0;
641 struct smd_half_channel ch1;
642};
643
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600644struct smd_shared_v2_word_access {
645 struct smd_half_channel_word_access ch0;
646 struct smd_half_channel_word_access ch1;
647};
648
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700649struct smd_channel {
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600650 volatile void *send; /* some variant of smd_half_channel */
651 volatile void *recv; /* some variant of smd_half_channel */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700652 unsigned char *send_data;
653 unsigned char *recv_data;
654 unsigned fifo_size;
655 unsigned fifo_mask;
656 struct list_head ch_list;
657
658 unsigned current_packet;
659 unsigned n;
660 void *priv;
661 void (*notify)(void *priv, unsigned flags);
662
663 int (*read)(smd_channel_t *ch, void *data, int len, int user_buf);
664 int (*write)(smd_channel_t *ch, const void *data, int len,
665 int user_buf);
666 int (*read_avail)(smd_channel_t *ch);
667 int (*write_avail)(smd_channel_t *ch);
668 int (*read_from_cb)(smd_channel_t *ch, void *data, int len,
669 int user_buf);
670
671 void (*update_state)(smd_channel_t *ch);
672 unsigned last_state;
673 void (*notify_other_cpu)(void);
674
675 char name[20];
676 struct platform_device pdev;
677 unsigned type;
678
679 int pending_pkt_sz;
680
681 char is_pkt_ch;
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600682
683 /*
684 * private internal functions to access *send and *recv.
685 * never to be exported outside of smd
686 */
687 struct smd_half_channel_access *half_ch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700688};
689
690struct edge_to_pid {
691 uint32_t local_pid;
692 uint32_t remote_pid;
Eric Holmberg17992c12012-02-29 12:54:44 -0700693 char subsys_name[SMD_MAX_CH_NAME_LEN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700694};
695
696/**
697 * Maps edge type to local and remote processor ID's.
698 */
699static struct edge_to_pid edge_to_pids[] = {
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700700 [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
701 [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "q6"},
702 [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
703 [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
704 [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
705 [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
706 [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
707 [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
708 [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
709 [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
710 [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
711 [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
712 [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
713 [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
714 [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600715 [SMD_APPS_RPM] = {SMD_APPS, SMD_RPM},
716 [SMD_MODEM_RPM] = {SMD_MODEM, SMD_RPM},
717 [SMD_QDSP_RPM] = {SMD_Q6, SMD_RPM},
718 [SMD_WCNSS_RPM] = {SMD_WCNSS, SMD_RPM},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700719};
720
721struct restart_notifier_block {
722 unsigned processor;
723 char *name;
724 struct notifier_block nb;
725};
726
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -0600727static int disable_smsm_reset_handshake;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700728static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
729
730static LIST_HEAD(smd_ch_closed_list);
731static LIST_HEAD(smd_ch_closing_list);
732static LIST_HEAD(smd_ch_to_close_list);
733static LIST_HEAD(smd_ch_list_modem);
734static LIST_HEAD(smd_ch_list_dsp);
735static LIST_HEAD(smd_ch_list_dsps);
736static LIST_HEAD(smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600737static LIST_HEAD(smd_ch_list_rpm);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700738
739static unsigned char smd_ch_allocated[64];
740static struct work_struct probe_work;
741
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700742static void finalize_channel_close_fn(struct work_struct *work);
743static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
744static struct workqueue_struct *channel_close_wq;
745
746static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm);
747
748/* on smp systems, the probe might get called from multiple cores,
749 hence use a lock */
750static DEFINE_MUTEX(smd_probe_lock);
751
752static void smd_channel_probe_worker(struct work_struct *work)
753{
754 struct smd_alloc_elm *shared;
755 unsigned n;
756 uint32_t type;
757
758 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
759
760 if (!shared) {
761 pr_err("%s: allocation table not initialized\n", __func__);
762 return;
763 }
764
765 mutex_lock(&smd_probe_lock);
766 for (n = 0; n < 64; n++) {
767 if (smd_ch_allocated[n])
768 continue;
769
770 /* channel should be allocated only if APPS
771 processor is involved */
772 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600773 if (type >= ARRAY_SIZE(edge_to_pids) ||
774 edge_to_pids[type].local_pid != SMD_APPS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700775 continue;
776 if (!shared[n].ref_count)
777 continue;
778 if (!shared[n].name[0])
779 continue;
780
781 if (!smd_alloc_channel(&shared[n]))
782 smd_ch_allocated[n] = 1;
783 else
784 SMD_INFO("Probe skipping ch %d, not allocated\n", n);
785 }
786 mutex_unlock(&smd_probe_lock);
787}
788
789/**
790 * Lookup processor ID and determine if it belongs to the proved edge
791 * type.
792 *
793 * @shared2: Pointer to v2 shared channel structure
794 * @type: Edge type
795 * @pid: Processor ID of processor on edge
796 * @local_ch: Channel that belongs to processor @pid
797 * @remote_ch: Other side of edge contained @pid
798 *
799 * Returns 0 for not on edge, 1 for found on edge
800 */
801static int pid_is_on_edge(struct smd_shared_v2 *shared2,
802 uint32_t type, uint32_t pid,
803 struct smd_half_channel **local_ch,
804 struct smd_half_channel **remote_ch
805 )
806{
807 int ret = 0;
808 struct edge_to_pid *edge;
809
810 *local_ch = 0;
811 *remote_ch = 0;
812
813 if (!shared2 || (type >= ARRAY_SIZE(edge_to_pids)))
814 return 0;
815
816 edge = &edge_to_pids[type];
817 if (edge->local_pid != edge->remote_pid) {
818 if (pid == edge->local_pid) {
819 *local_ch = &shared2->ch0;
820 *remote_ch = &shared2->ch1;
821 ret = 1;
822 } else if (pid == edge->remote_pid) {
823 *local_ch = &shared2->ch1;
824 *remote_ch = &shared2->ch0;
825 ret = 1;
826 }
827 }
828
829 return ret;
830}
831
Eric Holmberg17992c12012-02-29 12:54:44 -0700832/*
833 * Returns a pointer to the subsystem name or NULL if no
834 * subsystem name is available.
835 *
836 * @type - Edge definition
837 */
838const char *smd_edge_to_subsystem(uint32_t type)
839{
840 const char *subsys = NULL;
841
842 if (type < ARRAY_SIZE(edge_to_pids)) {
843 subsys = edge_to_pids[type].subsys_name;
844 if (subsys[0] == 0x0)
845 subsys = NULL;
846 }
847 return subsys;
848}
849EXPORT_SYMBOL(smd_edge_to_subsystem);
850
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700851/*
852 * Returns a pointer to the subsystem name given the
853 * remote processor ID.
854 *
855 * @pid Remote processor ID
856 * @returns Pointer to subsystem name or NULL if not found
857 */
858const char *smd_pid_to_subsystem(uint32_t pid)
859{
860 const char *subsys = NULL;
861 int i;
862
863 for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
864 if (pid == edge_to_pids[i].remote_pid &&
865 edge_to_pids[i].subsys_name[0] != 0x0
866 ) {
867 subsys = edge_to_pids[i].subsys_name;
868 break;
869 }
870 }
871
872 return subsys;
873}
874EXPORT_SYMBOL(smd_pid_to_subsystem);
Eric Holmberg17992c12012-02-29 12:54:44 -0700875
Eric Holmberg2a563c32011-10-05 14:51:43 -0600876static void smd_reset_edge(struct smd_half_channel *ch, unsigned new_state)
877{
878 if (ch->state != SMD_SS_CLOSED) {
879 ch->state = new_state;
880 ch->fDSR = 0;
881 ch->fCTS = 0;
882 ch->fCD = 0;
883 ch->fSTATE = 1;
884 }
885}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700886
887static void smd_channel_reset_state(struct smd_alloc_elm *shared,
888 unsigned new_state, unsigned pid)
889{
890 unsigned n;
891 struct smd_shared_v2 *shared2;
892 uint32_t type;
893 struct smd_half_channel *local_ch;
894 struct smd_half_channel *remote_ch;
895
896 for (n = 0; n < SMD_CHANNELS; n++) {
897 if (!shared[n].ref_count)
898 continue;
899 if (!shared[n].name[0])
900 continue;
901
902 type = SMD_CHANNEL_TYPE(shared[n].type);
903 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n, sizeof(*shared2));
904 if (!shared2)
905 continue;
906
Eric Holmberg2a563c32011-10-05 14:51:43 -0600907 if (pid_is_on_edge(shared2, type, pid, &local_ch, &remote_ch))
908 smd_reset_edge(local_ch, new_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700909
Eric Holmberg2a563c32011-10-05 14:51:43 -0600910 /*
911 * ModemFW is in the same subsystem as ModemSW, but has
912 * separate SMD edges that need to be reset.
913 */
914 if (pid == SMSM_MODEM &&
915 pid_is_on_edge(shared2, type, SMD_MODEM_Q6_FW,
916 &local_ch, &remote_ch))
917 smd_reset_edge(local_ch, new_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700918 }
919}
920
921
922void smd_channel_reset(uint32_t restart_pid)
923{
924 struct smd_alloc_elm *shared;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700925 unsigned long flags;
926
927 SMD_DBG("%s: starting reset\n", __func__);
928 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
929 if (!shared) {
930 pr_err("%s: allocation table not initialized\n", __func__);
931 return;
932 }
933
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600934 /* release any held spinlocks */
935 remote_spin_release(&remote_spinlock, restart_pid);
936 remote_spin_release_all(restart_pid);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700937
938 /* reset SMSM entry */
939 if (smsm_info.state) {
940 writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
941
Eric Holmberg351a63c2011-12-02 17:49:43 -0700942 /* restart SMSM init handshake */
943 if (restart_pid == SMSM_MODEM) {
944 smsm_change_state(SMSM_APPS_STATE,
Eric Holmberg6b2f80e2012-01-09 12:22:52 -0700945 SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
946 0);
Eric Holmberg351a63c2011-12-02 17:49:43 -0700947 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700948
949 /* notify SMSM processors */
950 smsm_irq_handler(0, 0);
Eric Holmbergb7726442012-03-01 15:31:56 -0700951 notify_modem_smsm();
952 notify_dsp_smsm();
953 notify_dsps_smsm();
954 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700955 }
956
957 /* change all remote states to CLOSING */
958 mutex_lock(&smd_probe_lock);
959 spin_lock_irqsave(&smd_lock, flags);
960 smd_channel_reset_state(shared, SMD_SS_CLOSING, restart_pid);
961 spin_unlock_irqrestore(&smd_lock, flags);
962 mutex_unlock(&smd_probe_lock);
963
964 /* notify SMD processors */
965 mb();
966 smd_fake_irq_handler(0);
967 notify_modem_smd();
968 notify_dsp_smd();
969 notify_dsps_smd();
970 notify_wcnss_smd();
971
972 /* change all remote states to CLOSED */
973 mutex_lock(&smd_probe_lock);
974 spin_lock_irqsave(&smd_lock, flags);
975 smd_channel_reset_state(shared, SMD_SS_CLOSED, restart_pid);
976 spin_unlock_irqrestore(&smd_lock, flags);
977 mutex_unlock(&smd_probe_lock);
978
979 /* notify SMD processors */
980 mb();
981 smd_fake_irq_handler(0);
982 notify_modem_smd();
983 notify_dsp_smd();
984 notify_dsps_smd();
985 notify_wcnss_smd();
986
987 SMD_DBG("%s: finished reset\n", __func__);
988}
989
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700990/* how many bytes are available for reading */
991static int smd_stream_read_avail(struct smd_channel *ch)
992{
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600993 return (ch->half_ch->get_head(ch->recv) -
994 ch->half_ch->get_tail(ch->recv)) & ch->fifo_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700995}
996
997/* how many bytes we are free to write */
998static int smd_stream_write_avail(struct smd_channel *ch)
999{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001000 return ch->fifo_mask - ((ch->half_ch->get_head(ch->send) -
1001 ch->half_ch->get_tail(ch->send)) & ch->fifo_mask);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001002}
1003
1004static int smd_packet_read_avail(struct smd_channel *ch)
1005{
1006 if (ch->current_packet) {
1007 int n = smd_stream_read_avail(ch);
1008 if (n > ch->current_packet)
1009 n = ch->current_packet;
1010 return n;
1011 } else {
1012 return 0;
1013 }
1014}
1015
1016static int smd_packet_write_avail(struct smd_channel *ch)
1017{
1018 int n = smd_stream_write_avail(ch);
1019 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
1020}
1021
1022static int ch_is_open(struct smd_channel *ch)
1023{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001024 return (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED ||
1025 ch->half_ch->get_state(ch->recv) == SMD_SS_FLUSHING)
1026 && (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001027}
1028
1029/* provide a pointer and length to readable data in the fifo */
1030static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
1031{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001032 unsigned head = ch->half_ch->get_head(ch->recv);
1033 unsigned tail = ch->half_ch->get_tail(ch->recv);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001034 *ptr = (void *) (ch->recv_data + tail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001035
1036 if (tail <= head)
1037 return head - tail;
1038 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001039 return ch->fifo_size - tail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001040}
1041
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001042static int read_intr_blocked(struct smd_channel *ch)
1043{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001044 return ch->half_ch->get_fBLOCKREADINTR(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001045}
1046
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001047/* advance the fifo read pointer after data from ch_read_buffer is consumed */
1048static void ch_read_done(struct smd_channel *ch, unsigned count)
1049{
1050 BUG_ON(count > smd_stream_read_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001051 ch->half_ch->set_tail(ch->recv,
1052 (ch->half_ch->get_tail(ch->recv) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001053 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001054 ch->half_ch->set_fTAIL(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001055}
1056
1057/* basic read interface to ch_read_{buffer,done} used
Brian Swetland03e00cd2009-07-01 17:58:37 -07001058 * by smd_*_read() and update_packet_state()
1059 * will read-and-discard if the _data pointer is null
1060 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001061static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001062{
1063 void *ptr;
1064 unsigned n;
1065 unsigned char *data = _data;
1066 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001067 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001068
1069 while (len > 0) {
1070 n = ch_read_buffer(ch, &ptr);
1071 if (n == 0)
1072 break;
1073
1074 if (n > len)
1075 n = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001076 if (_data) {
1077 if (user_buf) {
1078 r = copy_to_user(data, ptr, n);
1079 if (r > 0) {
1080 pr_err("%s: "
1081 "copy_to_user could not copy "
1082 "%i bytes.\n",
1083 __func__,
1084 r);
1085 }
1086 } else
1087 memcpy(data, ptr, n);
1088 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001089
1090 data += n;
1091 len -= n;
1092 ch_read_done(ch, n);
1093 }
1094
1095 return orig_len - len;
1096}
1097
1098static void update_stream_state(struct smd_channel *ch)
1099{
1100 /* streams have no special state requiring updating */
1101}
1102
1103static void update_packet_state(struct smd_channel *ch)
1104{
1105 unsigned hdr[5];
1106 int r;
1107
1108 /* can't do anything if we're in the middle of a packet */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001109 while (ch->current_packet == 0) {
1110 /* discard 0 length packets if any */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001111
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001112 /* don't bother unless we can get the full header */
1113 if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
1114 return;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001115
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001116 r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
1117 BUG_ON(r != SMD_HEADER_SIZE);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001118
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001119 ch->current_packet = hdr[0];
1120 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001121}
1122
1123/* provide a pointer and length to next free space in the fifo */
1124static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
1125{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001126 unsigned head = ch->half_ch->get_head(ch->send);
1127 unsigned tail = ch->half_ch->get_tail(ch->send);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001128 *ptr = (void *) (ch->send_data + head);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001129
1130 if (head < tail) {
1131 return tail - head - 1;
1132 } else {
1133 if (tail == 0)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001134 return ch->fifo_size - head - 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001135 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001136 return ch->fifo_size - head;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001137 }
1138}
1139
1140/* advace the fifo write pointer after freespace
1141 * from ch_write_buffer is filled
1142 */
1143static void ch_write_done(struct smd_channel *ch, unsigned count)
1144{
1145 BUG_ON(count > smd_stream_write_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001146 ch->half_ch->set_head(ch->send,
1147 (ch->half_ch->get_head(ch->send) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001148 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001149 ch->half_ch->set_fHEAD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001150}
1151
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001152static void ch_set_state(struct smd_channel *ch, unsigned n)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001153{
1154 if (n == SMD_SS_OPENED) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001155 ch->half_ch->set_fDSR(ch->send, 1);
1156 ch->half_ch->set_fCTS(ch->send, 1);
1157 ch->half_ch->set_fCD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001158 } else {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001159 ch->half_ch->set_fDSR(ch->send, 0);
1160 ch->half_ch->set_fCTS(ch->send, 0);
1161 ch->half_ch->set_fCD(ch->send, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001162 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001163 ch->half_ch->set_state(ch->send, n);
1164 ch->half_ch->set_fSTATE(ch->send, 1);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001165 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001166}
1167
1168static void do_smd_probe(void)
1169{
1170 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
1171 if (shared->heap_info.free_offset != last_heap_free) {
1172 last_heap_free = shared->heap_info.free_offset;
1173 schedule_work(&probe_work);
1174 }
1175}
1176
1177static void smd_state_change(struct smd_channel *ch,
1178 unsigned last, unsigned next)
1179{
1180 ch->last_state = next;
1181
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001182 SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001183
1184 switch (next) {
1185 case SMD_SS_OPENING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001186 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSING ||
1187 ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
1188 ch->half_ch->set_tail(ch->recv, 0);
1189 ch->half_ch->set_head(ch->send, 0);
1190 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001191 ch_set_state(ch, SMD_SS_OPENING);
1192 }
1193 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001194 case SMD_SS_OPENED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001195 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENING) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001196 ch_set_state(ch, SMD_SS_OPENED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001197 ch->notify(ch->priv, SMD_EVENT_OPEN);
1198 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001199 break;
1200 case SMD_SS_FLUSHING:
1201 case SMD_SS_RESET:
1202 /* we should force them to close? */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001203 break;
1204 case SMD_SS_CLOSED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001205 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001206 ch_set_state(ch, SMD_SS_CLOSING);
1207 ch->current_packet = 0;
Eric Holmbergad4fa8d2011-11-11 16:55:13 -07001208 ch->pending_pkt_sz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001209 ch->notify(ch->priv, SMD_EVENT_CLOSE);
1210 }
1211 break;
1212 case SMD_SS_CLOSING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001213 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001214 list_move(&ch->ch_list,
1215 &smd_ch_to_close_list);
1216 queue_work(channel_close_wq,
1217 &finalize_channel_close_work);
1218 }
1219 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001220 }
1221}
1222
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001223static void handle_smd_irq_closing_list(void)
1224{
1225 unsigned long flags;
1226 struct smd_channel *ch;
1227 struct smd_channel *index;
1228 unsigned tmp;
1229
1230 spin_lock_irqsave(&smd_lock, flags);
1231 list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001232 if (ch->half_ch->get_fSTATE(ch->recv))
1233 ch->half_ch->set_fSTATE(ch->recv, 0);
1234 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001235 if (tmp != ch->last_state)
1236 smd_state_change(ch, ch->last_state, tmp);
1237 }
1238 spin_unlock_irqrestore(&smd_lock, flags);
1239}
1240
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001241static void handle_smd_irq(struct list_head *list, void (*notify)(void))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001242{
1243 unsigned long flags;
1244 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001245 unsigned ch_flags;
1246 unsigned tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001247 unsigned char state_change;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001248
1249 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001250 list_for_each_entry(ch, list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001251 state_change = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001252 ch_flags = 0;
1253 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001254 if (ch->half_ch->get_fHEAD(ch->recv)) {
1255 ch->half_ch->set_fHEAD(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001256 ch_flags |= 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001257 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001258 if (ch->half_ch->get_fTAIL(ch->recv)) {
1259 ch->half_ch->set_fTAIL(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001260 ch_flags |= 2;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001261 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001262 if (ch->half_ch->get_fSTATE(ch->recv)) {
1263 ch->half_ch->set_fSTATE(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001264 ch_flags |= 4;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001265 }
1266 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001267 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001268 if (tmp != ch->last_state) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07001269 SMx_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
1270 ch->n, ch->name, ch->last_state, tmp);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001271 smd_state_change(ch, ch->last_state, tmp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001272 state_change = 1;
1273 }
Eric Holmberg65a7d432012-02-24 11:28:56 -07001274 if (ch_flags & 0x3) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001275 ch->update_state(ch);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001276 SMx_POWER_INFO("SMD ch%d '%s' Data event r%d/w%d\n",
1277 ch->n, ch->name,
1278 ch->read_avail(ch),
1279 ch->fifo_size - ch->write_avail(ch));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001280 ch->notify(ch->priv, SMD_EVENT_DATA);
1281 }
Eric Holmberg98c6c642012-02-24 11:29:35 -07001282 if (ch_flags & 0x4 && !state_change) {
1283 SMx_POWER_INFO("SMD ch%d '%s' State update\n",
1284 ch->n, ch->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001285 ch->notify(ch->priv, SMD_EVENT_STATUS);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001286 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001287 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001288 spin_unlock_irqrestore(&smd_lock, flags);
1289 do_smd_probe();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001290}
1291
Brian Swetland37521a32009-07-01 18:30:47 -07001292static irqreturn_t smd_modem_irq_handler(int irq, void *data)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001293{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001294 SMx_POWER_INFO("SMD Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001295 ++interrupt_stats[SMD_MODEM].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001296 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001297 handle_smd_irq_closing_list();
Brian Swetland37521a32009-07-01 18:30:47 -07001298 return IRQ_HANDLED;
1299}
1300
1301static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
1302{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001303 SMx_POWER_INFO("SMD Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001304 ++interrupt_stats[SMD_Q6].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001305 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001306 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001307 return IRQ_HANDLED;
1308}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001309
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001310static irqreturn_t smd_dsps_irq_handler(int irq, void *data)
1311{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001312 SMx_POWER_INFO("SMD Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001313 ++interrupt_stats[SMD_DSPS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001314 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1315 handle_smd_irq_closing_list();
1316 return IRQ_HANDLED;
1317}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001318
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001319static irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
1320{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001321 SMx_POWER_INFO("SMD Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001322 ++interrupt_stats[SMD_WCNSS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001323 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1324 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001325 return IRQ_HANDLED;
1326}
1327
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001328static irqreturn_t smd_rpm_irq_handler(int irq, void *data)
1329{
1330 SMx_POWER_INFO("SMD Int RPM->Apps\n");
1331 ++interrupt_stats[SMD_RPM].smd_in_count;
1332 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
1333 handle_smd_irq_closing_list();
1334 return IRQ_HANDLED;
1335}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001336
1337static void smd_fake_irq_handler(unsigned long arg)
1338{
Brian Swetland37521a32009-07-01 18:30:47 -07001339 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
1340 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001341 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1342 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001343 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001344 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001345}
1346
1347static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
1348
Brian Swetland37521a32009-07-01 18:30:47 -07001349static inline int smd_need_int(struct smd_channel *ch)
1350{
1351 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001352 if (ch->half_ch->get_fHEAD(ch->recv) ||
1353 ch->half_ch->get_fTAIL(ch->recv) ||
1354 ch->half_ch->get_fSTATE(ch->recv))
Brian Swetland37521a32009-07-01 18:30:47 -07001355 return 1;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001356 if (ch->half_ch->get_state(ch->recv) != ch->last_state)
Brian Swetland37521a32009-07-01 18:30:47 -07001357 return 1;
1358 }
1359 return 0;
1360}
1361
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001362void smd_sleep_exit(void)
1363{
1364 unsigned long flags;
1365 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001366 int need_int = 0;
1367
1368 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland37521a32009-07-01 18:30:47 -07001369 list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
1370 if (smd_need_int(ch)) {
1371 need_int = 1;
1372 break;
1373 }
1374 }
1375 list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
1376 if (smd_need_int(ch)) {
1377 need_int = 1;
1378 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001379 }
1380 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001381 list_for_each_entry(ch, &smd_ch_list_dsps, ch_list) {
1382 if (smd_need_int(ch)) {
1383 need_int = 1;
1384 break;
1385 }
1386 }
1387 list_for_each_entry(ch, &smd_ch_list_wcnss, ch_list) {
1388 if (smd_need_int(ch)) {
1389 need_int = 1;
1390 break;
1391 }
1392 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001393 spin_unlock_irqrestore(&smd_lock, flags);
1394 do_smd_probe();
Brian Swetland37521a32009-07-01 18:30:47 -07001395
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001396 if (need_int) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001397 SMD_DBG("smd_sleep_exit need interrupt\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001398 tasklet_schedule(&smd_fake_irq_tasklet);
1399 }
1400}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001401EXPORT_SYMBOL(smd_sleep_exit);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001402
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001403static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001404{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001405 if (SMD_XFER_TYPE(alloc_elm->type) == 1)
1406 return 0;
1407 else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001408 return 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001409
1410 /* for cases where xfer type is 0 */
1411 if (!strncmp(alloc_elm->name, "DAL", 3))
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001412 return 0;
1413
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001414 /* for cases where xfer type is 0 */
1415 if (!strncmp(alloc_elm->name, "RPCCALL_QDSP", 12))
1416 return 0;
1417
1418 if (alloc_elm->cid > 4 || alloc_elm->cid == 1)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001419 return 1;
1420 else
1421 return 0;
1422}
1423
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001424static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
1425 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001426{
1427 void *ptr;
1428 const unsigned char *buf = _data;
1429 unsigned xfer;
1430 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001431 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001432
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001433 SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001434 if (len < 0)
1435 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001436 else if (len == 0)
1437 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001438
1439 while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
Eric Holmberg7a717872012-02-03 11:58:04 -07001440 if (!ch_is_open(ch)) {
1441 len = orig_len;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001442 break;
Eric Holmberg7a717872012-02-03 11:58:04 -07001443 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001444 if (xfer > len)
1445 xfer = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001446 if (user_buf) {
1447 r = copy_from_user(ptr, buf, xfer);
1448 if (r > 0) {
1449 pr_err("%s: "
1450 "copy_from_user could not copy %i "
1451 "bytes.\n",
1452 __func__,
1453 r);
1454 }
1455 } else
1456 memcpy(ptr, buf, xfer);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001457 ch_write_done(ch, xfer);
1458 len -= xfer;
1459 buf += xfer;
1460 if (len == 0)
1461 break;
1462 }
1463
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001464 if (orig_len - len)
1465 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001466
1467 return orig_len - len;
1468}
1469
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001470static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
1471 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001472{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001473 int ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001474 unsigned hdr[5];
1475
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001476 SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001477 if (len < 0)
1478 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001479 else if (len == 0)
1480 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001481
1482 if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
1483 return -ENOMEM;
1484
1485 hdr[0] = len;
1486 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1487
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001488
1489 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1490 if (ret < 0 || ret != sizeof(hdr)) {
1491 SMD_DBG("%s failed to write pkt header: "
1492 "%d returned\n", __func__, ret);
1493 return -1;
1494 }
1495
1496
1497 ret = smd_stream_write(ch, _data, len, user_buf);
1498 if (ret < 0 || ret != len) {
1499 SMD_DBG("%s failed to write pkt data: "
1500 "%d returned\n", __func__, ret);
1501 return ret;
1502 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001503
1504 return len;
1505}
1506
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001507static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001508{
1509 int r;
1510
1511 if (len < 0)
1512 return -EINVAL;
1513
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001514 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001515 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001516 if (!read_intr_blocked(ch))
1517 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001518
1519 return r;
1520}
1521
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001522static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001523{
1524 unsigned long flags;
1525 int r;
1526
1527 if (len < 0)
1528 return -EINVAL;
1529
1530 if (len > ch->current_packet)
1531 len = ch->current_packet;
1532
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001533 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001534 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001535 if (!read_intr_blocked(ch))
1536 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001537
1538 spin_lock_irqsave(&smd_lock, flags);
1539 ch->current_packet -= r;
1540 update_packet_state(ch);
1541 spin_unlock_irqrestore(&smd_lock, flags);
1542
1543 return r;
1544}
1545
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001546static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
1547 int user_buf)
1548{
1549 int r;
1550
1551 if (len < 0)
1552 return -EINVAL;
1553
1554 if (len > ch->current_packet)
1555 len = ch->current_packet;
1556
1557 r = ch_read(ch, data, len, user_buf);
1558 if (r > 0)
1559 if (!read_intr_blocked(ch))
1560 ch->notify_other_cpu();
1561
1562 ch->current_packet -= r;
1563 update_packet_state(ch);
1564
1565 return r;
1566}
1567
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301568#if (defined(CONFIG_MSM_SMD_PKG4) || defined(CONFIG_MSM_SMD_PKG3))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001569static int smd_alloc_v2(struct smd_channel *ch)
1570{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001571 void *buffer;
1572 unsigned buffer_sz;
1573
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001574 if (is_word_access_ch(ch->type)) {
1575 struct smd_shared_v2_word_access *shared2;
1576 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1577 sizeof(*shared2));
1578 if (!shared2) {
1579 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1580 return -EINVAL;
1581 }
1582 ch->send = &shared2->ch0;
1583 ch->recv = &shared2->ch1;
1584 } else {
1585 struct smd_shared_v2 *shared2;
1586 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1587 sizeof(*shared2));
1588 if (!shared2) {
1589 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1590 return -EINVAL;
1591 }
1592 ch->send = &shared2->ch0;
1593 ch->recv = &shared2->ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001594 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001595 ch->half_ch = get_half_ch_funcs(ch->type);
1596
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001597 buffer = smem_get_entry(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
1598 if (!buffer) {
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301599 SMD_INFO("smem_get_entry failed\n");
1600 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001601 }
1602
1603 /* buffer must be a power-of-two size */
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301604 if (buffer_sz & (buffer_sz - 1)) {
1605 SMD_INFO("Buffer size: %u not power of two\n", buffer_sz);
1606 return -EINVAL;
1607 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001608 buffer_sz /= 2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001609 ch->send_data = buffer;
1610 ch->recv_data = buffer + buffer_sz;
1611 ch->fifo_size = buffer_sz;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001612
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001613 return 0;
1614}
1615
1616static int smd_alloc_v1(struct smd_channel *ch)
1617{
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301618 return -EINVAL;
1619}
1620
1621#else /* define v1 for older targets */
1622static int smd_alloc_v2(struct smd_channel *ch)
1623{
1624 return -EINVAL;
1625}
1626
1627static int smd_alloc_v1(struct smd_channel *ch)
1628{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001629 struct smd_shared_v1 *shared1;
1630 shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
1631 if (!shared1) {
1632 pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301633 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001634 }
1635 ch->send = &shared1->ch0;
1636 ch->recv = &shared1->ch1;
1637 ch->send_data = shared1->data0;
1638 ch->recv_data = shared1->data1;
1639 ch->fifo_size = SMD_BUF_SIZE;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001640 ch->half_ch = get_half_ch_funcs(ch->type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001641 return 0;
1642}
1643
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301644#endif
1645
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001646static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001647{
1648 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001649
1650 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1651 if (ch == 0) {
1652 pr_err("smd_alloc_channel() out of memory\n");
Brian Swetland34f719b2009-10-30 16:22:05 -07001653 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001654 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001655 ch->n = alloc_elm->cid;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001656 ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001657
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001658 if (smd_alloc_v2(ch) && smd_alloc_v1(ch)) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001659 kfree(ch);
Brian Swetland34f719b2009-10-30 16:22:05 -07001660 return -1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001661 }
1662
1663 ch->fifo_mask = ch->fifo_size - 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001664
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001665 /* probe_worker guarentees ch->type will be a valid type */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001666 if (ch->type == SMD_APPS_MODEM)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001667 ch->notify_other_cpu = notify_modem_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001668 else if (ch->type == SMD_APPS_QDSP)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001669 ch->notify_other_cpu = notify_dsp_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001670 else if (ch->type == SMD_APPS_DSPS)
1671 ch->notify_other_cpu = notify_dsps_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001672 else if (ch->type == SMD_APPS_WCNSS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001673 ch->notify_other_cpu = notify_wcnss_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001674 else if (ch->type == SMD_APPS_RPM)
1675 ch->notify_other_cpu = notify_rpm_smd;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001676
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001677 if (smd_is_packet(alloc_elm)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001678 ch->read = smd_packet_read;
1679 ch->write = smd_packet_write;
1680 ch->read_avail = smd_packet_read_avail;
1681 ch->write_avail = smd_packet_write_avail;
1682 ch->update_state = update_packet_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001683 ch->read_from_cb = smd_packet_read_from_cb;
1684 ch->is_pkt_ch = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001685 } else {
1686 ch->read = smd_stream_read;
1687 ch->write = smd_stream_write;
1688 ch->read_avail = smd_stream_read_avail;
1689 ch->write_avail = smd_stream_write_avail;
1690 ch->update_state = update_stream_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001691 ch->read_from_cb = smd_stream_read;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001692 }
1693
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001694 memcpy(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN);
1695 ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001696
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001697 ch->pdev.name = ch->name;
1698 ch->pdev.id = ch->type;
1699
1700 SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
1701 ch->name, ch->n);
1702
1703 mutex_lock(&smd_creation_mutex);
1704 list_add(&ch->ch_list, &smd_ch_closed_list);
1705 mutex_unlock(&smd_creation_mutex);
1706
1707 platform_device_register(&ch->pdev);
1708 if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
1709 /* create a platform driver to be used by smd_tty driver
1710 * so that it can access the loopback port
1711 */
1712 loopback_tty_pdev.id = ch->type;
1713 platform_device_register(&loopback_tty_pdev);
1714 }
1715 return 0;
1716}
1717
1718static inline void notify_loopback_smd(void)
1719{
1720 unsigned long flags;
1721 struct smd_channel *ch;
1722
1723 spin_lock_irqsave(&smd_lock, flags);
1724 list_for_each_entry(ch, &smd_ch_list_loopback, ch_list) {
1725 ch->notify(ch->priv, SMD_EVENT_DATA);
1726 }
1727 spin_unlock_irqrestore(&smd_lock, flags);
1728}
1729
1730static int smd_alloc_loopback_channel(void)
1731{
1732 static struct smd_half_channel smd_loopback_ctl;
1733 static char smd_loopback_data[SMD_BUF_SIZE];
1734 struct smd_channel *ch;
1735
1736 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1737 if (ch == 0) {
1738 pr_err("%s: out of memory\n", __func__);
1739 return -1;
1740 }
1741 ch->n = SMD_LOOPBACK_CID;
1742
1743 ch->send = &smd_loopback_ctl;
1744 ch->recv = &smd_loopback_ctl;
1745 ch->send_data = smd_loopback_data;
1746 ch->recv_data = smd_loopback_data;
1747 ch->fifo_size = SMD_BUF_SIZE;
1748
1749 ch->fifo_mask = ch->fifo_size - 1;
1750 ch->type = SMD_LOOPBACK_TYPE;
1751 ch->notify_other_cpu = notify_loopback_smd;
1752
1753 ch->read = smd_stream_read;
1754 ch->write = smd_stream_write;
1755 ch->read_avail = smd_stream_read_avail;
1756 ch->write_avail = smd_stream_write_avail;
1757 ch->update_state = update_stream_state;
1758 ch->read_from_cb = smd_stream_read;
1759
1760 memset(ch->name, 0, 20);
1761 memcpy(ch->name, "local_loopback", 14);
1762
1763 ch->pdev.name = ch->name;
1764 ch->pdev.id = ch->type;
1765
1766 SMD_INFO("%s: '%s' cid=%d\n", __func__, ch->name, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001767
1768 mutex_lock(&smd_creation_mutex);
1769 list_add(&ch->ch_list, &smd_ch_closed_list);
1770 mutex_unlock(&smd_creation_mutex);
1771
1772 platform_device_register(&ch->pdev);
Brian Swetland34f719b2009-10-30 16:22:05 -07001773 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001774}
1775
1776static void do_nothing_notify(void *priv, unsigned flags)
1777{
1778}
1779
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001780static void finalize_channel_close_fn(struct work_struct *work)
1781{
1782 unsigned long flags;
1783 struct smd_channel *ch;
1784 struct smd_channel *index;
1785
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001786 mutex_lock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001787 spin_lock_irqsave(&smd_lock, flags);
1788 list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
1789 list_del(&ch->ch_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001790 list_add(&ch->ch_list, &smd_ch_closed_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001791 ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
1792 ch->notify = do_nothing_notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001793 }
1794 spin_unlock_irqrestore(&smd_lock, flags);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001795 mutex_unlock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001796}
1797
1798struct smd_channel *smd_get_channel(const char *name, uint32_t type)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001799{
1800 struct smd_channel *ch;
1801
1802 mutex_lock(&smd_creation_mutex);
1803 list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001804 if (!strcmp(name, ch->name) &&
1805 (type == ch->type)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001806 list_del(&ch->ch_list);
1807 mutex_unlock(&smd_creation_mutex);
1808 return ch;
1809 }
1810 }
1811 mutex_unlock(&smd_creation_mutex);
1812
1813 return NULL;
1814}
1815
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001816int smd_named_open_on_edge(const char *name, uint32_t edge,
1817 smd_channel_t **_ch,
1818 void *priv, void (*notify)(void *, unsigned))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001819{
1820 struct smd_channel *ch;
1821 unsigned long flags;
1822
1823 if (smd_initialized == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001824 SMD_INFO("smd_open() before smd_init()\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001825 return -ENODEV;
1826 }
1827
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001828 SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
1829
1830 ch = smd_get_channel(name, edge);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001831 if (!ch) {
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001832 /* check closing list for port */
1833 spin_lock_irqsave(&smd_lock, flags);
1834 list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
1835 if (!strncmp(name, ch->name, 20) &&
1836 (edge == ch->type)) {
1837 /* channel exists, but is being closed */
1838 spin_unlock_irqrestore(&smd_lock, flags);
1839 return -EAGAIN;
1840 }
1841 }
1842
1843 /* check closing workqueue list for port */
1844 list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
1845 if (!strncmp(name, ch->name, 20) &&
1846 (edge == ch->type)) {
1847 /* channel exists, but is being closed */
1848 spin_unlock_irqrestore(&smd_lock, flags);
1849 return -EAGAIN;
1850 }
1851 }
1852 spin_unlock_irqrestore(&smd_lock, flags);
1853
1854 /* one final check to handle closing->closed race condition */
1855 ch = smd_get_channel(name, edge);
1856 if (!ch)
1857 return -ENODEV;
1858 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001859
1860 if (notify == 0)
1861 notify = do_nothing_notify;
1862
1863 ch->notify = notify;
1864 ch->current_packet = 0;
1865 ch->last_state = SMD_SS_CLOSED;
1866 ch->priv = priv;
1867
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001868 if (edge == SMD_LOOPBACK_TYPE) {
1869 ch->last_state = SMD_SS_OPENED;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001870 ch->half_ch->set_state(ch->send, SMD_SS_OPENED);
1871 ch->half_ch->set_fDSR(ch->send, 1);
1872 ch->half_ch->set_fCTS(ch->send, 1);
1873 ch->half_ch->set_fCD(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001874 }
1875
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001876 *_ch = ch;
1877
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001878 SMD_DBG("smd_open: opening '%s'\n", ch->name);
1879
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001880 spin_lock_irqsave(&smd_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001881 if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_MODEM)
Brian Swetland37521a32009-07-01 18:30:47 -07001882 list_add(&ch->ch_list, &smd_ch_list_modem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001883 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_QDSP)
Brian Swetland37521a32009-07-01 18:30:47 -07001884 list_add(&ch->ch_list, &smd_ch_list_dsp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001885 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_DSPS)
1886 list_add(&ch->ch_list, &smd_ch_list_dsps);
1887 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_WCNSS)
1888 list_add(&ch->ch_list, &smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001889 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_RPM)
1890 list_add(&ch->ch_list, &smd_ch_list_rpm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001891 else
1892 list_add(&ch->ch_list, &smd_ch_list_loopback);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001893
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001894 SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
1895
1896 if (edge != SMD_LOOPBACK_TYPE)
1897 smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
1898
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001899 spin_unlock_irqrestore(&smd_lock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001900
1901 return 0;
1902}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001903EXPORT_SYMBOL(smd_named_open_on_edge);
1904
1905
1906int smd_open(const char *name, smd_channel_t **_ch,
1907 void *priv, void (*notify)(void *, unsigned))
1908{
1909 return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv,
1910 notify);
1911}
1912EXPORT_SYMBOL(smd_open);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001913
1914int smd_close(smd_channel_t *ch)
1915{
1916 unsigned long flags;
1917
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001918 if (ch == 0)
1919 return -1;
1920
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001921 SMD_INFO("smd_close(%s)\n", ch->name);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001922
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001923 spin_lock_irqsave(&smd_lock, flags);
1924 list_del(&ch->ch_list);
1925 if (ch->n == SMD_LOOPBACK_CID) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001926 ch->half_ch->set_fDSR(ch->send, 0);
1927 ch->half_ch->set_fCTS(ch->send, 0);
1928 ch->half_ch->set_fCD(ch->send, 0);
1929 ch->half_ch->set_state(ch->send, SMD_SS_CLOSED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001930 } else
1931 ch_set_state(ch, SMD_SS_CLOSED);
1932
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001933 if (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001934 list_add(&ch->ch_list, &smd_ch_closing_list);
1935 spin_unlock_irqrestore(&smd_lock, flags);
1936 } else {
1937 spin_unlock_irqrestore(&smd_lock, flags);
1938 ch->notify = do_nothing_notify;
1939 mutex_lock(&smd_creation_mutex);
1940 list_add(&ch->ch_list, &smd_ch_closed_list);
1941 mutex_unlock(&smd_creation_mutex);
1942 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001943
1944 return 0;
1945}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001946EXPORT_SYMBOL(smd_close);
1947
1948int smd_write_start(smd_channel_t *ch, int len)
1949{
1950 int ret;
1951 unsigned hdr[5];
1952
1953 if (!ch) {
1954 pr_err("%s: Invalid channel specified\n", __func__);
1955 return -ENODEV;
1956 }
1957 if (!ch->is_pkt_ch) {
1958 pr_err("%s: non-packet channel specified\n", __func__);
1959 return -EACCES;
1960 }
1961 if (len < 1) {
1962 pr_err("%s: invalid length: %d\n", __func__, len);
1963 return -EINVAL;
1964 }
1965
1966 if (ch->pending_pkt_sz) {
1967 pr_err("%s: packet of size: %d in progress\n", __func__,
1968 ch->pending_pkt_sz);
1969 return -EBUSY;
1970 }
1971 ch->pending_pkt_sz = len;
1972
1973 if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
1974 ch->pending_pkt_sz = 0;
1975 SMD_DBG("%s: no space to write packet header\n", __func__);
1976 return -EAGAIN;
1977 }
1978
1979 hdr[0] = len;
1980 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1981
1982
1983 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1984 if (ret < 0 || ret != sizeof(hdr)) {
1985 ch->pending_pkt_sz = 0;
1986 pr_err("%s: packet header failed to write\n", __func__);
1987 return -EPERM;
1988 }
1989 return 0;
1990}
1991EXPORT_SYMBOL(smd_write_start);
1992
1993int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
1994{
1995 int bytes_written;
1996
1997 if (!ch) {
1998 pr_err("%s: Invalid channel specified\n", __func__);
1999 return -ENODEV;
2000 }
2001 if (len < 1) {
2002 pr_err("%s: invalid length: %d\n", __func__, len);
2003 return -EINVAL;
2004 }
2005
2006 if (!ch->pending_pkt_sz) {
2007 pr_err("%s: no transaction in progress\n", __func__);
2008 return -ENOEXEC;
2009 }
2010 if (ch->pending_pkt_sz - len < 0) {
2011 pr_err("%s: segment of size: %d will make packet go over "
2012 "length\n", __func__, len);
2013 return -EINVAL;
2014 }
2015
2016 bytes_written = smd_stream_write(ch, data, len, user_buf);
2017
2018 ch->pending_pkt_sz -= bytes_written;
2019
2020 return bytes_written;
2021}
2022EXPORT_SYMBOL(smd_write_segment);
2023
2024int smd_write_end(smd_channel_t *ch)
2025{
2026
2027 if (!ch) {
2028 pr_err("%s: Invalid channel specified\n", __func__);
2029 return -ENODEV;
2030 }
2031 if (ch->pending_pkt_sz) {
2032 pr_err("%s: current packet not completely written\n", __func__);
2033 return -E2BIG;
2034 }
2035
2036 return 0;
2037}
2038EXPORT_SYMBOL(smd_write_end);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002039
2040int smd_read(smd_channel_t *ch, void *data, int len)
2041{
Jack Pham1b236d12012-03-19 15:27:18 -07002042 if (!ch) {
2043 pr_err("%s: Invalid channel specified\n", __func__);
2044 return -ENODEV;
2045 }
2046
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002047 return ch->read(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002048}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002049EXPORT_SYMBOL(smd_read);
2050
2051int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
2052{
Jack Pham1b236d12012-03-19 15:27:18 -07002053 if (!ch) {
2054 pr_err("%s: Invalid channel specified\n", __func__);
2055 return -ENODEV;
2056 }
2057
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002058 return ch->read(ch, data, len, 1);
2059}
2060EXPORT_SYMBOL(smd_read_user_buffer);
2061
2062int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
2063{
Jack Pham1b236d12012-03-19 15:27:18 -07002064 if (!ch) {
2065 pr_err("%s: Invalid channel specified\n", __func__);
2066 return -ENODEV;
2067 }
2068
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002069 return ch->read_from_cb(ch, data, len, 0);
2070}
2071EXPORT_SYMBOL(smd_read_from_cb);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002072
2073int smd_write(smd_channel_t *ch, const void *data, int len)
2074{
Jack Pham1b236d12012-03-19 15:27:18 -07002075 if (!ch) {
2076 pr_err("%s: Invalid channel specified\n", __func__);
2077 return -ENODEV;
2078 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002079
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002080 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002081}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002082EXPORT_SYMBOL(smd_write);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002083
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002084int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
Brian Swetland636eb9c2009-12-07 15:28:08 -08002085{
Jack Pham1b236d12012-03-19 15:27:18 -07002086 if (!ch) {
2087 pr_err("%s: Invalid channel specified\n", __func__);
2088 return -ENODEV;
2089 }
2090
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002091 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002092}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002093EXPORT_SYMBOL(smd_write_user_buffer);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002094
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002095int smd_read_avail(smd_channel_t *ch)
2096{
Jack Pham1b236d12012-03-19 15:27:18 -07002097 if (!ch) {
2098 pr_err("%s: Invalid channel specified\n", __func__);
2099 return -ENODEV;
2100 }
2101
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002102 return ch->read_avail(ch);
2103}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002104EXPORT_SYMBOL(smd_read_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002105
2106int smd_write_avail(smd_channel_t *ch)
2107{
Jack Pham1b236d12012-03-19 15:27:18 -07002108 if (!ch) {
2109 pr_err("%s: Invalid channel specified\n", __func__);
2110 return -ENODEV;
2111 }
2112
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002113 return ch->write_avail(ch);
2114}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002115EXPORT_SYMBOL(smd_write_avail);
2116
2117void smd_enable_read_intr(smd_channel_t *ch)
2118{
2119 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002120 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002121}
2122EXPORT_SYMBOL(smd_enable_read_intr);
2123
2124void smd_disable_read_intr(smd_channel_t *ch)
2125{
2126 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002127 ch->half_ch->set_fBLOCKREADINTR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002128}
2129EXPORT_SYMBOL(smd_disable_read_intr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002130
2131int smd_wait_until_readable(smd_channel_t *ch, int bytes)
2132{
2133 return -1;
2134}
2135
2136int smd_wait_until_writable(smd_channel_t *ch, int bytes)
2137{
2138 return -1;
2139}
2140
2141int smd_cur_packet_size(smd_channel_t *ch)
2142{
Jack Pham1b236d12012-03-19 15:27:18 -07002143 if (!ch) {
2144 pr_err("%s: Invalid channel specified\n", __func__);
2145 return -ENODEV;
2146 }
2147
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002148 return ch->current_packet;
2149}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002150EXPORT_SYMBOL(smd_cur_packet_size);
2151
2152int smd_tiocmget(smd_channel_t *ch)
2153{
Jack Pham1b236d12012-03-19 15:27:18 -07002154 if (!ch) {
2155 pr_err("%s: Invalid channel specified\n", __func__);
2156 return -ENODEV;
2157 }
2158
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002159 return (ch->half_ch->get_fDSR(ch->recv) ? TIOCM_DSR : 0) |
2160 (ch->half_ch->get_fCTS(ch->recv) ? TIOCM_CTS : 0) |
2161 (ch->half_ch->get_fCD(ch->recv) ? TIOCM_CD : 0) |
2162 (ch->half_ch->get_fRI(ch->recv) ? TIOCM_RI : 0) |
2163 (ch->half_ch->get_fCTS(ch->send) ? TIOCM_RTS : 0) |
2164 (ch->half_ch->get_fDSR(ch->send) ? TIOCM_DTR : 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002165}
2166EXPORT_SYMBOL(smd_tiocmget);
2167
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002168/* this api will be called while holding smd_lock */
2169int
2170smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002171{
Jack Pham1b236d12012-03-19 15:27:18 -07002172 if (!ch) {
2173 pr_err("%s: Invalid channel specified\n", __func__);
2174 return -ENODEV;
2175 }
2176
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002177 if (set & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002178 ch->half_ch->set_fDSR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002179
2180 if (set & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002181 ch->half_ch->set_fCTS(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002182
2183 if (clear & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002184 ch->half_ch->set_fDSR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002185
2186 if (clear & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002187 ch->half_ch->set_fCTS(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002188
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002189 ch->half_ch->set_fSTATE(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002190 barrier();
2191 ch->notify_other_cpu();
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002192
2193 return 0;
2194}
2195EXPORT_SYMBOL(smd_tiocmset_from_cb);
2196
2197int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
2198{
2199 unsigned long flags;
2200
Jack Pham1b236d12012-03-19 15:27:18 -07002201 if (!ch) {
2202 pr_err("%s: Invalid channel specified\n", __func__);
2203 return -ENODEV;
2204 }
2205
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002206 spin_lock_irqsave(&smd_lock, flags);
2207 smd_tiocmset_from_cb(ch, set, clear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002208 spin_unlock_irqrestore(&smd_lock, flags);
2209
2210 return 0;
2211}
2212EXPORT_SYMBOL(smd_tiocmset);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002213
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002214int smd_is_pkt_avail(smd_channel_t *ch)
2215{
2216 if (!ch || !ch->is_pkt_ch)
2217 return -EINVAL;
2218
2219 if (ch->current_packet)
2220 return 1;
2221
2222 update_packet_state(ch);
2223
2224 return ch->current_packet ? 1 : 0;
2225}
2226EXPORT_SYMBOL(smd_is_pkt_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002227
2228
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002229/* -------------------------------------------------------------------------- */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002230
Jeff Hugobdc734d2012-03-26 16:05:39 -06002231/*
2232 * Shared Memory Range Check
2233 *
2234 * Takes a physical address and an offset and checks if the resulting physical
2235 * address would fit into one of the aux smem regions. If so, returns the
2236 * corresponding virtual address. Otherwise returns NULL. Expects the array
2237 * of smem regions to be in ascending physical address order.
2238 *
2239 * @base: physical base address to check
2240 * @offset: offset from the base to get the final address
2241 */
2242static void *smem_range_check(void *base, unsigned offset)
2243{
2244 int i;
2245 void *phys_addr;
2246 unsigned size;
2247
2248 for (i = 0; i < num_smem_areas; ++i) {
2249 phys_addr = smem_areas[i].phys_addr;
2250 size = smem_areas[i].size;
2251 if (base < phys_addr)
2252 return NULL;
2253 if (base > phys_addr + size)
2254 continue;
2255 if (base >= phys_addr && base + offset < phys_addr + size)
2256 return smem_areas[i].virt_addr + offset;
2257 }
2258
2259 return NULL;
2260}
2261
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002262/* smem_alloc returns the pointer to smem item if it is already allocated.
2263 * Otherwise, it returns NULL.
2264 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002265void *smem_alloc(unsigned id, unsigned size)
2266{
2267 return smem_find(id, size);
2268}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002269EXPORT_SYMBOL(smem_alloc);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002270
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002271/* smem_alloc2 returns the pointer to smem item. If it is not allocated,
2272 * it allocates it and then returns the pointer to it.
2273 */
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302274void *smem_alloc2(unsigned id, unsigned size_in)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002275{
2276 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2277 struct smem_heap_entry *toc = shared->heap_toc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002278 unsigned long flags;
2279 void *ret = NULL;
2280
2281 if (!shared->heap_info.initialized) {
2282 pr_err("%s: smem heap info not initialized\n", __func__);
2283 return NULL;
2284 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002285
2286 if (id >= SMEM_NUM_ITEMS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002287 return NULL;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002288
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002289 size_in = ALIGN(size_in, 8);
2290 remote_spin_lock_irqsave(&remote_spinlock, flags);
2291 if (toc[id].allocated) {
2292 SMD_DBG("%s: %u already allocated\n", __func__, id);
2293 if (size_in != toc[id].size)
2294 pr_err("%s: wrong size %u (expected %u)\n",
2295 __func__, toc[id].size, size_in);
2296 else
2297 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2298 } else if (id > SMEM_FIXED_ITEM_LAST) {
2299 SMD_DBG("%s: allocating %u\n", __func__, id);
2300 if (shared->heap_info.heap_remaining >= size_in) {
2301 toc[id].offset = shared->heap_info.free_offset;
2302 toc[id].size = size_in;
2303 wmb();
2304 toc[id].allocated = 1;
2305
2306 shared->heap_info.free_offset += size_in;
2307 shared->heap_info.heap_remaining -= size_in;
2308 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2309 } else
2310 pr_err("%s: not enough memory %u (required %u)\n",
2311 __func__, shared->heap_info.heap_remaining,
2312 size_in);
2313 }
2314 wmb();
2315 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
2316 return ret;
2317}
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302318EXPORT_SYMBOL(smem_alloc2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002319
2320void *smem_get_entry(unsigned id, unsigned *size)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002321{
2322 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2323 struct smem_heap_entry *toc = shared->heap_toc;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302324 int use_spinlocks = spinlocks_initialized;
2325 void *ret = 0;
2326 unsigned long flags = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002327
2328 if (id >= SMEM_NUM_ITEMS)
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302329 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002330
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302331 if (use_spinlocks)
2332 remote_spin_lock_irqsave(&remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002333 /* toc is in device memory and cannot be speculatively accessed */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002334 if (toc[id].allocated) {
2335 *size = toc[id].size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002336 barrier();
Jeff Hugobdc734d2012-03-26 16:05:39 -06002337 if (!(toc[id].reserved & BASE_ADDR_MASK))
2338 ret = (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
2339 else
2340 ret = smem_range_check(
2341 (void *)(toc[id].reserved & BASE_ADDR_MASK),
2342 toc[id].offset);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002343 } else {
2344 *size = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002345 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302346 if (use_spinlocks)
2347 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002348
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302349 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002350}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002351EXPORT_SYMBOL(smem_get_entry);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002352
2353void *smem_find(unsigned id, unsigned size_in)
2354{
2355 unsigned size;
2356 void *ptr;
2357
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002358 ptr = smem_get_entry(id, &size);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002359 if (!ptr)
2360 return 0;
2361
2362 size_in = ALIGN(size_in, 8);
2363 if (size_in != size) {
2364 pr_err("smem_find(%d, %d): wrong size %d\n",
2365 id, size_in, size);
2366 return 0;
2367 }
2368
2369 return ptr;
2370}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002371EXPORT_SYMBOL(smem_find);
2372
2373static int smsm_cb_init(void)
2374{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002375 struct smsm_state_info *state_info;
2376 int n;
2377 int ret = 0;
2378
2379 smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
2380 GFP_KERNEL);
2381
2382 if (!smsm_states) {
2383 pr_err("%s: SMSM init failed\n", __func__);
2384 return -ENOMEM;
2385 }
2386
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002387 smsm_cb_wq = create_singlethread_workqueue("smsm_cb_wq");
2388 if (!smsm_cb_wq) {
2389 pr_err("%s: smsm_cb_wq creation failed\n", __func__);
2390 kfree(smsm_states);
2391 return -EFAULT;
2392 }
2393
Eric Holmbergc8002902011-09-16 13:55:57 -06002394 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002395 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2396 state_info = &smsm_states[n];
2397 state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
Eric Holmberge8a39322012-04-03 15:14:02 -06002398 state_info->intr_mask_set = 0x0;
2399 state_info->intr_mask_clear = 0x0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002400 INIT_LIST_HEAD(&state_info->callbacks);
2401 }
Eric Holmbergc8002902011-09-16 13:55:57 -06002402 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002403
2404 return ret;
2405}
2406
2407static int smsm_init(void)
2408{
2409 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2410 int i;
2411 struct smsm_size_info_type *smsm_size_info;
2412
2413 i = remote_spin_lock_init(&remote_spinlock, SMEM_SPINLOCK_SMEM_ALLOC);
2414 if (i) {
2415 pr_err("%s: remote spinlock init failed %d\n", __func__, i);
2416 return i;
2417 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302418 spinlocks_initialized = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002419
2420 smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO,
2421 sizeof(struct smsm_size_info_type));
2422 if (smsm_size_info) {
2423 SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
2424 SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
2425 }
2426
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002427 i = kfifo_alloc(&smsm_snapshot_fifo,
2428 sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
2429 GFP_KERNEL);
2430 if (i) {
2431 pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
2432 return i;
2433 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002434 wake_lock_init(&smsm_snapshot_wakelock, WAKE_LOCK_SUSPEND,
2435 "smsm_snapshot");
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002436
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002437 if (!smsm_info.state) {
2438 smsm_info.state = smem_alloc2(ID_SHARED_STATE,
2439 SMSM_NUM_ENTRIES *
2440 sizeof(uint32_t));
2441
2442 if (smsm_info.state) {
2443 __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2444 if ((shared->version[VERSION_MODEM] >> 16) >= 0xB)
2445 __raw_writel(0, \
2446 SMSM_STATE_ADDR(SMSM_APPS_DEM_I));
2447 }
2448 }
2449
2450 if (!smsm_info.intr_mask) {
2451 smsm_info.intr_mask = smem_alloc2(SMEM_SMSM_CPU_INTR_MASK,
2452 SMSM_NUM_ENTRIES *
2453 SMSM_NUM_HOSTS *
2454 sizeof(uint32_t));
2455
Eric Holmberge8a39322012-04-03 15:14:02 -06002456 if (smsm_info.intr_mask) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002457 for (i = 0; i < SMSM_NUM_ENTRIES; i++)
Eric Holmberge8a39322012-04-03 15:14:02 -06002458 __raw_writel(0x0,
2459 SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
2460
2461 /* Configure legacy modem bits */
2462 __raw_writel(LEGACY_MODEM_SMSM_MASK,
2463 SMSM_INTR_MASK_ADDR(SMSM_MODEM_STATE,
2464 SMSM_APPS));
2465 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002466 }
2467
2468 if (!smsm_info.intr_mux)
2469 smsm_info.intr_mux = smem_alloc2(SMEM_SMD_SMSM_INTR_MUX,
2470 SMSM_NUM_INTR_MUX *
2471 sizeof(uint32_t));
2472
2473 i = smsm_cb_init();
2474 if (i)
2475 return i;
2476
2477 wmb();
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06002478 smsm_driver_state_notify(SMSM_INIT, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002479 return 0;
2480}
2481
2482void smsm_reset_modem(unsigned mode)
2483{
2484 if (mode == SMSM_SYSTEM_DOWNLOAD) {
2485 mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
2486 } else if (mode == SMSM_MODEM_WAIT) {
2487 mode = SMSM_RESET | SMSM_MODEM_WAIT;
2488 } else { /* reset_mode is SMSM_RESET or default */
2489 mode = SMSM_RESET;
2490 }
2491
2492 smsm_change_state(SMSM_APPS_STATE, mode, mode);
2493}
2494EXPORT_SYMBOL(smsm_reset_modem);
2495
2496void smsm_reset_modem_cont(void)
2497{
2498 unsigned long flags;
2499 uint32_t state;
2500
2501 if (!smsm_info.state)
2502 return;
2503
2504 spin_lock_irqsave(&smem_lock, flags);
2505 state = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)) \
2506 & ~SMSM_MODEM_WAIT;
2507 __raw_writel(state, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2508 wmb();
2509 spin_unlock_irqrestore(&smem_lock, flags);
2510}
2511EXPORT_SYMBOL(smsm_reset_modem_cont);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002512
Eric Holmbergda31d042012-03-28 14:01:02 -06002513static void smsm_cb_snapshot(uint32_t use_wakelock)
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002514{
2515 int n;
2516 uint32_t new_state;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002517 unsigned long flags;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002518 int ret;
2519
2520 ret = kfifo_avail(&smsm_snapshot_fifo);
Eric Holmbergda31d042012-03-28 14:01:02 -06002521 if (ret < SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002522 pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
2523 return;
2524 }
2525
Eric Holmberg96b55f62012-04-03 19:10:46 -06002526 /*
2527 * To avoid a race condition with notify_smsm_cb_clients_worker, the
2528 * following sequence must be followed:
2529 * 1) increment snapshot count
2530 * 2) insert data into FIFO
2531 *
2532 * Potentially in parallel, the worker:
2533 * a) verifies >= 1 snapshots are in FIFO
2534 * b) processes snapshot
2535 * c) decrements reference count
2536 *
2537 * This order ensures that 1 will always occur before abc.
2538 */
Eric Holmbergda31d042012-03-28 14:01:02 -06002539 if (use_wakelock) {
2540 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2541 if (smsm_snapshot_count == 0) {
2542 SMx_POWER_INFO("SMSM snapshot wake lock\n");
2543 wake_lock(&smsm_snapshot_wakelock);
2544 }
2545 ++smsm_snapshot_count;
2546 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2547 }
Eric Holmberg96b55f62012-04-03 19:10:46 -06002548
2549 /* queue state entries */
2550 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2551 new_state = __raw_readl(SMSM_STATE_ADDR(n));
2552
2553 ret = kfifo_in(&smsm_snapshot_fifo,
2554 &new_state, sizeof(new_state));
2555 if (ret != sizeof(new_state)) {
2556 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2557 goto restore_snapshot_count;
2558 }
2559 }
2560
2561 /* queue wakelock usage flag */
2562 ret = kfifo_in(&smsm_snapshot_fifo,
2563 &use_wakelock, sizeof(use_wakelock));
2564 if (ret != sizeof(use_wakelock)) {
2565 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2566 goto restore_snapshot_count;
2567 }
2568
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002569 queue_work(smsm_cb_wq, &smsm_cb_work);
Eric Holmberg96b55f62012-04-03 19:10:46 -06002570 return;
2571
2572restore_snapshot_count:
2573 if (use_wakelock) {
2574 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2575 if (smsm_snapshot_count) {
2576 --smsm_snapshot_count;
2577 if (smsm_snapshot_count == 0) {
2578 SMx_POWER_INFO("SMSM snapshot wake unlock\n");
2579 wake_unlock(&smsm_snapshot_wakelock);
2580 }
2581 } else {
2582 pr_err("%s: invalid snapshot count\n", __func__);
2583 }
2584 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2585 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002586}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002587
2588static irqreturn_t smsm_irq_handler(int irq, void *data)
2589{
2590 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002591
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002592 if (irq == INT_ADSP_A11_SMSM) {
Eric Holmberg6282c5d2011-10-27 17:30:57 -06002593 uint32_t mux_val;
2594 static uint32_t prev_smem_q6_apps_smsm;
2595
2596 if (smsm_info.intr_mux && cpu_is_qsd8x50()) {
2597 mux_val = __raw_readl(
2598 SMSM_INTR_MUX_ADDR(SMEM_Q6_APPS_SMSM));
2599 if (mux_val != prev_smem_q6_apps_smsm)
2600 prev_smem_q6_apps_smsm = mux_val;
2601 }
2602
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002603 spin_lock_irqsave(&smem_lock, flags);
Eric Holmbergda31d042012-03-28 14:01:02 -06002604 smsm_cb_snapshot(1);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002605 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002606 return IRQ_HANDLED;
2607 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002608
2609 spin_lock_irqsave(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002610 if (!smsm_info.state) {
2611 SMSM_INFO("<SM NO STATE>\n");
2612 } else {
2613 unsigned old_apps, apps;
2614 unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002615
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002616 old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002617
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002618 SMSM_DBG("<SM %08x %08x>\n", apps, modm);
2619 if (apps & SMSM_RESET) {
2620 /* If we get an interrupt and the apps SMSM_RESET
2621 bit is already set, the modem is acking the
2622 app's reset ack. */
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002623 if (!disable_smsm_reset_handshake)
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302624 apps &= ~SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002625 /* Issue a fake irq to handle any
2626 * smd state changes during reset
2627 */
2628 smd_fake_irq_handler(0);
Daniel Walker79848a22010-03-16 15:20:07 -07002629
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002630 /* queue modem restart notify chain */
2631 modem_queue_start_reset_notify();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002632
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002633 } else if (modm & SMSM_RESET) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002634 pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
Ram Somani8b9589f2012-04-03 12:07:18 +05302635 if (!disable_smsm_reset_handshake) {
2636 apps |= SMSM_RESET;
2637 flush_cache_all();
2638 outer_flush_all();
2639 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002640 modem_queue_start_reset_notify();
2641
2642 } else if (modm & SMSM_INIT) {
2643 if (!(apps & SMSM_INIT)) {
2644 apps |= SMSM_INIT;
2645 modem_queue_smsm_init_notify();
2646 }
2647
2648 if (modm & SMSM_SMDINIT)
2649 apps |= SMSM_SMDINIT;
2650 if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) ==
2651 (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT))
2652 apps |= SMSM_RUN;
2653 } else if (modm & SMSM_SYSTEM_DOWNLOAD) {
2654 pr_err("\nSMSM: Modem SMSM state changed to SMSM_SYSTEM_DOWNLOAD.");
2655 modem_queue_start_reset_notify();
2656 }
2657
2658 if (old_apps != apps) {
2659 SMSM_DBG("<SM %08x NOTIFY>\n", apps);
2660 __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2661 do_smd_probe();
2662 notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
2663 }
2664
Eric Holmbergda31d042012-03-28 14:01:02 -06002665 smsm_cb_snapshot(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002666 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002667 spin_unlock_irqrestore(&smem_lock, flags);
2668 return IRQ_HANDLED;
2669}
2670
Eric Holmberg98c6c642012-02-24 11:29:35 -07002671static irqreturn_t smsm_modem_irq_handler(int irq, void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002672{
Eric Holmberg98c6c642012-02-24 11:29:35 -07002673 SMx_POWER_INFO("SMSM Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002674 ++interrupt_stats[SMD_MODEM].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002675 return smsm_irq_handler(irq, data);
2676}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002677
Eric Holmberg98c6c642012-02-24 11:29:35 -07002678static irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
2679{
2680 SMx_POWER_INFO("SMSM Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002681 ++interrupt_stats[SMD_Q6].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002682 return smsm_irq_handler(irq, data);
2683}
2684
2685static irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
2686{
2687 SMx_POWER_INFO("SMSM Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002688 ++interrupt_stats[SMD_DSPS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002689 return smsm_irq_handler(irq, data);
2690}
2691
2692static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
2693{
2694 SMx_POWER_INFO("SMSM Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002695 ++interrupt_stats[SMD_WCNSS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002696 return smsm_irq_handler(irq, data);
2697}
2698
Eric Holmberge8a39322012-04-03 15:14:02 -06002699/*
2700 * Changes the global interrupt mask. The set and clear masks are re-applied
2701 * every time the global interrupt mask is updated for callback registration
2702 * and de-registration.
2703 *
2704 * The clear mask is applied first, so if a bit is set to 1 in both the clear
2705 * mask and the set mask, the result will be that the interrupt is set.
2706 *
2707 * @smsm_entry SMSM entry to change
2708 * @clear_mask 1 = clear bit, 0 = no-op
2709 * @set_mask 1 = set bit, 0 = no-op
2710 *
2711 * @returns 0 for success, < 0 for error
2712 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002713int smsm_change_intr_mask(uint32_t smsm_entry,
2714 uint32_t clear_mask, uint32_t set_mask)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002715{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002716 uint32_t old_mask, new_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002717 unsigned long flags;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002718
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002719 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2720 pr_err("smsm_change_state: Invalid entry %d\n",
2721 smsm_entry);
2722 return -EINVAL;
2723 }
2724
2725 if (!smsm_info.intr_mask) {
2726 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002727 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002728 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002729
2730 spin_lock_irqsave(&smem_lock, flags);
Eric Holmberge8a39322012-04-03 15:14:02 -06002731 smsm_states[smsm_entry].intr_mask_clear = clear_mask;
2732 smsm_states[smsm_entry].intr_mask_set = set_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002733
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002734 old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2735 new_mask = (old_mask & ~clear_mask) | set_mask;
2736 __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002737
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002738 wmb();
2739 spin_unlock_irqrestore(&smem_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002740
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002741 return 0;
2742}
2743EXPORT_SYMBOL(smsm_change_intr_mask);
2744
2745int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
2746{
2747 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2748 pr_err("smsm_change_state: Invalid entry %d\n",
2749 smsm_entry);
2750 return -EINVAL;
2751 }
2752
2753 if (!smsm_info.intr_mask) {
2754 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
2755 return -EIO;
2756 }
2757
2758 *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2759 return 0;
2760}
2761EXPORT_SYMBOL(smsm_get_intr_mask);
2762
2763int smsm_change_state(uint32_t smsm_entry,
2764 uint32_t clear_mask, uint32_t set_mask)
2765{
2766 unsigned long flags;
2767 uint32_t old_state, new_state;
2768
2769 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2770 pr_err("smsm_change_state: Invalid entry %d",
2771 smsm_entry);
2772 return -EINVAL;
2773 }
2774
2775 if (!smsm_info.state) {
2776 pr_err("smsm_change_state <SM NO STATE>\n");
2777 return -EIO;
2778 }
2779 spin_lock_irqsave(&smem_lock, flags);
2780
2781 old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2782 new_state = (old_state & ~clear_mask) | set_mask;
2783 __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
2784 SMSM_DBG("smsm_change_state %x\n", new_state);
2785 notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002786
2787 spin_unlock_irqrestore(&smem_lock, flags);
2788
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002789 return 0;
2790}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002791EXPORT_SYMBOL(smsm_change_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002792
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002793uint32_t smsm_get_state(uint32_t smsm_entry)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002794{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002795 uint32_t rv = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002796
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002797 /* needs interface change to return error code */
2798 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2799 pr_err("smsm_change_state: Invalid entry %d",
2800 smsm_entry);
2801 return 0;
2802 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002803
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002804 if (!smsm_info.state) {
2805 pr_err("smsm_get_state <SM NO STATE>\n");
2806 } else {
2807 rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2808 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002809
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002810 return rv;
2811}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002812EXPORT_SYMBOL(smsm_get_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002813
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002814/**
2815 * Performs SMSM callback client notifiction.
2816 */
2817void notify_smsm_cb_clients_worker(struct work_struct *work)
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002818{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002819 struct smsm_state_cb_info *cb_info;
2820 struct smsm_state_info *state_info;
2821 int n;
2822 uint32_t new_state;
2823 uint32_t state_changes;
Eric Holmbergda31d042012-03-28 14:01:02 -06002824 uint32_t use_wakelock;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002825 int ret;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002826 unsigned long flags;
Brian Swetland03e00cd2009-07-01 17:58:37 -07002827
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002828 if (!smd_initialized)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002829 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002830
Eric Holmbergda31d042012-03-28 14:01:02 -06002831 while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002832 mutex_lock(&smsm_lock);
2833 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2834 state_info = &smsm_states[n];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002835
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002836 ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
2837 sizeof(new_state));
2838 if (ret != sizeof(new_state)) {
2839 pr_err("%s: snapshot underflow %d\n",
2840 __func__, ret);
2841 mutex_unlock(&smsm_lock);
2842 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002843 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002844
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002845 state_changes = state_info->last_value ^ new_state;
2846 if (state_changes) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07002847 SMx_POWER_INFO("SMSM Change %d: %08x->%08x\n",
2848 n, state_info->last_value,
2849 new_state);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002850 list_for_each_entry(cb_info,
2851 &state_info->callbacks, cb_list) {
2852
2853 if (cb_info->mask & state_changes)
2854 cb_info->notify(cb_info->data,
2855 state_info->last_value,
2856 new_state);
2857 }
2858 state_info->last_value = new_state;
2859 }
2860 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002861
Eric Holmbergda31d042012-03-28 14:01:02 -06002862 /* read wakelock flag */
2863 ret = kfifo_out(&smsm_snapshot_fifo, &use_wakelock,
2864 sizeof(use_wakelock));
2865 if (ret != sizeof(use_wakelock)) {
2866 pr_err("%s: snapshot underflow %d\n",
2867 __func__, ret);
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002868 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002869 return;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002870 }
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002871 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002872
2873 if (use_wakelock) {
2874 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2875 if (smsm_snapshot_count) {
2876 --smsm_snapshot_count;
2877 if (smsm_snapshot_count == 0) {
2878 SMx_POWER_INFO("SMSM snapshot"
2879 " wake unlock\n");
2880 wake_unlock(&smsm_snapshot_wakelock);
2881 }
2882 } else {
2883 pr_err("%s: invalid snapshot count\n",
2884 __func__);
2885 }
2886 spin_unlock_irqrestore(&smsm_snapshot_count_lock,
2887 flags);
2888 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002889 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002890}
2891
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002892
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002893/**
2894 * Registers callback for SMSM state notifications when the specified
2895 * bits change.
2896 *
2897 * @smsm_entry Processor entry to deregister
2898 * @mask Bits to deregister (if result is 0, callback is removed)
2899 * @notify Notification function to deregister
2900 * @data Opaque data passed in to callback
2901 *
2902 * @returns Status code
2903 * <0 error code
2904 * 0 inserted new entry
2905 * 1 updated mask of existing entry
2906 */
2907int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
2908 void (*notify)(void *, uint32_t, uint32_t), void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002909{
Eric Holmberge8a39322012-04-03 15:14:02 -06002910 struct smsm_state_info *state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002911 struct smsm_state_cb_info *cb_info;
2912 struct smsm_state_cb_info *cb_found = 0;
Eric Holmberge8a39322012-04-03 15:14:02 -06002913 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002914 int ret = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002915
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002916 if (smsm_entry >= SMSM_NUM_ENTRIES)
2917 return -EINVAL;
2918
Eric Holmbergc8002902011-09-16 13:55:57 -06002919 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002920
2921 if (!smsm_states) {
2922 /* smsm not yet initialized */
2923 ret = -ENODEV;
2924 goto cleanup;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002925 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002926
Eric Holmberge8a39322012-04-03 15:14:02 -06002927 state = &smsm_states[smsm_entry];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002928 list_for_each_entry(cb_info,
Eric Holmberge8a39322012-04-03 15:14:02 -06002929 &state->callbacks, cb_list) {
2930 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002931 (cb_info->data == data)) {
2932 cb_info->mask |= mask;
2933 cb_found = cb_info;
2934 ret = 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002935 }
Eric Holmberge8a39322012-04-03 15:14:02 -06002936 new_mask |= cb_info->mask;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002937 }
2938
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002939 if (!cb_found) {
2940 cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
2941 GFP_ATOMIC);
2942 if (!cb_info) {
2943 ret = -ENOMEM;
2944 goto cleanup;
2945 }
2946
2947 cb_info->mask = mask;
2948 cb_info->notify = notify;
2949 cb_info->data = data;
2950 INIT_LIST_HEAD(&cb_info->cb_list);
2951 list_add_tail(&cb_info->cb_list,
Eric Holmberge8a39322012-04-03 15:14:02 -06002952 &state->callbacks);
2953 new_mask |= mask;
2954 }
2955
2956 /* update interrupt notification mask */
2957 if (smsm_entry == SMSM_MODEM_STATE)
2958 new_mask |= LEGACY_MODEM_SMSM_MASK;
2959
2960 if (smsm_info.intr_mask) {
2961 unsigned long flags;
2962
2963 spin_lock_irqsave(&smem_lock, flags);
2964 new_mask = (new_mask & ~state->intr_mask_clear)
2965 | state->intr_mask_set;
2966 __raw_writel(new_mask,
2967 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2968 wmb();
2969 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002970 }
2971
2972cleanup:
Eric Holmbergc8002902011-09-16 13:55:57 -06002973 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002974 return ret;
2975}
2976EXPORT_SYMBOL(smsm_state_cb_register);
2977
2978
2979/**
2980 * Deregisters for SMSM state notifications for the specified bits.
2981 *
2982 * @smsm_entry Processor entry to deregister
2983 * @mask Bits to deregister (if result is 0, callback is removed)
2984 * @notify Notification function to deregister
2985 * @data Opaque data passed in to callback
2986 *
2987 * @returns Status code
2988 * <0 error code
2989 * 0 not found
2990 * 1 updated mask
2991 * 2 removed callback
2992 */
2993int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
2994 void (*notify)(void *, uint32_t, uint32_t), void *data)
2995{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002996 struct smsm_state_cb_info *cb_info;
Eric Holmberge8a39322012-04-03 15:14:02 -06002997 struct smsm_state_cb_info *cb_tmp;
2998 struct smsm_state_info *state;
2999 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003000 int ret = 0;
3001
3002 if (smsm_entry >= SMSM_NUM_ENTRIES)
3003 return -EINVAL;
3004
Eric Holmbergc8002902011-09-16 13:55:57 -06003005 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003006
3007 if (!smsm_states) {
3008 /* smsm not yet initialized */
Eric Holmbergc8002902011-09-16 13:55:57 -06003009 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003010 return -ENODEV;
3011 }
3012
Eric Holmberge8a39322012-04-03 15:14:02 -06003013 state = &smsm_states[smsm_entry];
3014 list_for_each_entry_safe(cb_info, cb_tmp,
3015 &state->callbacks, cb_list) {
3016 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003017 (cb_info->data == data)) {
3018 cb_info->mask &= ~mask;
3019 ret = 1;
3020 if (!cb_info->mask) {
3021 /* no mask bits set, remove callback */
3022 list_del(&cb_info->cb_list);
3023 kfree(cb_info);
3024 ret = 2;
Eric Holmberge8a39322012-04-03 15:14:02 -06003025 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003026 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003027 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003028 new_mask |= cb_info->mask;
3029 }
3030
3031 /* update interrupt notification mask */
3032 if (smsm_entry == SMSM_MODEM_STATE)
3033 new_mask |= LEGACY_MODEM_SMSM_MASK;
3034
3035 if (smsm_info.intr_mask) {
3036 unsigned long flags;
3037
3038 spin_lock_irqsave(&smem_lock, flags);
3039 new_mask = (new_mask & ~state->intr_mask_clear)
3040 | state->intr_mask_set;
3041 __raw_writel(new_mask,
3042 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3043 wmb();
3044 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003045 }
3046
Eric Holmbergc8002902011-09-16 13:55:57 -06003047 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003048 return ret;
3049}
3050EXPORT_SYMBOL(smsm_state_cb_deregister);
3051
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003052int smsm_driver_state_notifier_register(struct notifier_block *nb)
3053{
3054 int ret;
3055 if (!nb)
3056 return -EINVAL;
3057 mutex_lock(&smsm_driver_state_notifier_lock);
3058 ret = raw_notifier_chain_register(&smsm_driver_state_notifier_list, nb);
3059 mutex_unlock(&smsm_driver_state_notifier_lock);
3060 return ret;
3061}
3062EXPORT_SYMBOL(smsm_driver_state_notifier_register);
3063
3064int smsm_driver_state_notifier_unregister(struct notifier_block *nb)
3065{
3066 int ret;
3067 if (!nb)
3068 return -EINVAL;
3069 mutex_lock(&smsm_driver_state_notifier_lock);
3070 ret = raw_notifier_chain_unregister(&smsm_driver_state_notifier_list,
3071 nb);
3072 mutex_unlock(&smsm_driver_state_notifier_lock);
3073 return ret;
3074}
3075EXPORT_SYMBOL(smsm_driver_state_notifier_unregister);
3076
3077static void smsm_driver_state_notify(uint32_t state, void *data)
3078{
3079 mutex_lock(&smsm_driver_state_notifier_lock);
3080 raw_notifier_call_chain(&smsm_driver_state_notifier_list,
3081 state, data);
3082 mutex_unlock(&smsm_driver_state_notifier_lock);
3083}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003084
3085int smd_core_init(void)
3086{
3087 int r;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003088 unsigned long flags = IRQF_TRIGGER_RISING;
3089 SMD_INFO("smd_core_init()\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003090
Brian Swetland37521a32009-07-01 18:30:47 -07003091 r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003092 flags, "smd_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003093 if (r < 0)
3094 return r;
3095 r = enable_irq_wake(INT_A9_M2A_0);
3096 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003097 pr_err("smd_core_init: "
3098 "enable_irq_wake failed for INT_A9_M2A_0\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003099
Eric Holmberg98c6c642012-02-24 11:29:35 -07003100 r = request_irq(INT_A9_M2A_5, smsm_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003101 flags, "smsm_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003102 if (r < 0) {
3103 free_irq(INT_A9_M2A_0, 0);
3104 return r;
3105 }
3106 r = enable_irq_wake(INT_A9_M2A_5);
3107 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003108 pr_err("smd_core_init: "
3109 "enable_irq_wake failed for INT_A9_M2A_5\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003110
Brian Swetland37521a32009-07-01 18:30:47 -07003111#if defined(CONFIG_QDSP6)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003112#if (INT_ADSP_A11 == INT_ADSP_A11_SMSM)
3113 flags |= IRQF_SHARED;
3114#endif
Brian Swetland37521a32009-07-01 18:30:47 -07003115 r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003116 flags, "smd_dev", smd_dsp_irq_handler);
Brian Swetland37521a32009-07-01 18:30:47 -07003117 if (r < 0) {
3118 free_irq(INT_A9_M2A_0, 0);
3119 free_irq(INT_A9_M2A_5, 0);
3120 return r;
3121 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003122
Eric Holmberg98c6c642012-02-24 11:29:35 -07003123 r = request_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler,
3124 flags, "smsm_dev", smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003125 if (r < 0) {
3126 free_irq(INT_A9_M2A_0, 0);
3127 free_irq(INT_A9_M2A_5, 0);
3128 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
3129 return r;
3130 }
3131
3132 r = enable_irq_wake(INT_ADSP_A11);
3133 if (r < 0)
3134 pr_err("smd_core_init: "
3135 "enable_irq_wake failed for INT_ADSP_A11\n");
3136
3137#if (INT_ADSP_A11 != INT_ADSP_A11_SMSM)
3138 r = enable_irq_wake(INT_ADSP_A11_SMSM);
3139 if (r < 0)
3140 pr_err("smd_core_init: enable_irq_wake "
3141 "failed for INT_ADSP_A11_SMSM\n");
3142#endif
3143 flags &= ~IRQF_SHARED;
Brian Swetland37521a32009-07-01 18:30:47 -07003144#endif
3145
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003146#if defined(CONFIG_DSPS)
3147 r = request_irq(INT_DSPS_A11, smd_dsps_irq_handler,
3148 flags, "smd_dev", smd_dsps_irq_handler);
3149 if (r < 0) {
3150 free_irq(INT_A9_M2A_0, 0);
3151 free_irq(INT_A9_M2A_5, 0);
3152 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003153 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003154 return r;
3155 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003156
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003157 r = enable_irq_wake(INT_DSPS_A11);
3158 if (r < 0)
3159 pr_err("smd_core_init: "
3160 "enable_irq_wake failed for INT_ADSP_A11\n");
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003161#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003162
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003163#if defined(CONFIG_WCNSS)
3164 r = request_irq(INT_WCNSS_A11, smd_wcnss_irq_handler,
3165 flags, "smd_dev", smd_wcnss_irq_handler);
3166 if (r < 0) {
3167 free_irq(INT_A9_M2A_0, 0);
3168 free_irq(INT_A9_M2A_5, 0);
3169 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003170 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003171 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3172 return r;
3173 }
3174
3175 r = enable_irq_wake(INT_WCNSS_A11);
3176 if (r < 0)
3177 pr_err("smd_core_init: "
3178 "enable_irq_wake failed for INT_WCNSS_A11\n");
3179
Eric Holmberg98c6c642012-02-24 11:29:35 -07003180 r = request_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler,
3181 flags, "smsm_dev", smsm_wcnss_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003182 if (r < 0) {
3183 free_irq(INT_A9_M2A_0, 0);
3184 free_irq(INT_A9_M2A_5, 0);
3185 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003186 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003187 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3188 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
3189 return r;
3190 }
3191
3192 r = enable_irq_wake(INT_WCNSS_A11_SMSM);
3193 if (r < 0)
3194 pr_err("smd_core_init: "
3195 "enable_irq_wake failed for INT_WCNSS_A11_SMSM\n");
3196#endif
3197
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003198#if defined(CONFIG_DSPS_SMSM)
Eric Holmberg98c6c642012-02-24 11:29:35 -07003199 r = request_irq(INT_DSPS_A11_SMSM, smsm_dsps_irq_handler,
3200 flags, "smsm_dev", smsm_dsps_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003201 if (r < 0) {
3202 free_irq(INT_A9_M2A_0, 0);
3203 free_irq(INT_A9_M2A_5, 0);
3204 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003205 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003206 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3207 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003208 free_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003209 return r;
3210 }
3211
3212 r = enable_irq_wake(INT_DSPS_A11_SMSM);
3213 if (r < 0)
3214 pr_err("smd_core_init: "
3215 "enable_irq_wake failed for INT_DSPS_A11_SMSM\n");
3216#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003217 SMD_INFO("smd_core_init() done\n");
3218
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003219 return 0;
3220}
3221
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303222static int intr_init(struct interrupt_config_item *private_irq,
3223 struct smd_irq_config *platform_irq,
3224 struct platform_device *pdev
3225 )
3226{
3227 int irq_id;
3228 int ret;
3229 int ret_wake;
3230
3231 private_irq->out_bit_pos = platform_irq->out_bit_pos;
3232 private_irq->out_offset = platform_irq->out_offset;
3233 private_irq->out_base = platform_irq->out_base;
3234
3235 irq_id = platform_get_irq_byname(
3236 pdev,
3237 platform_irq->irq_name
3238 );
3239 SMD_DBG("smd: %s: register irq: %s id: %d\n", __func__,
3240 platform_irq->irq_name, irq_id);
3241 ret = request_irq(irq_id,
3242 private_irq->irq_handler,
3243 platform_irq->flags,
3244 platform_irq->device_name,
3245 (void *)platform_irq->dev_id
3246 );
3247 if (ret < 0) {
3248 platform_irq->irq_id = ret;
3249 } else {
3250 platform_irq->irq_id = irq_id;
3251 ret_wake = enable_irq_wake(irq_id);
3252 if (ret_wake < 0) {
3253 pr_err("smd: enable_irq_wake failed on %s",
3254 platform_irq->irq_name);
3255 }
3256 }
3257
3258 return ret;
3259}
3260
Jeff Hugobdc734d2012-03-26 16:05:39 -06003261int sort_cmp_func(const void *a, const void *b)
3262{
3263 struct smem_area *left = (struct smem_area *)(a);
3264 struct smem_area *right = (struct smem_area *)(b);
3265
3266 return left->phys_addr - right->phys_addr;
3267}
3268
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303269int smd_core_platform_init(struct platform_device *pdev)
3270{
3271 int i;
3272 int ret;
3273 uint32_t num_ss;
3274 struct smd_platform *smd_platform_data;
3275 struct smd_subsystem_config *smd_ss_config_list;
3276 struct smd_subsystem_config *cfg;
3277 int err_ret = 0;
Jeff Hugobdc734d2012-03-26 16:05:39 -06003278 struct smd_smem_regions *smd_smem_areas;
3279 int smem_idx = 0;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303280
3281 smd_platform_data = pdev->dev.platform_data;
3282 num_ss = smd_platform_data->num_ss_configs;
3283 smd_ss_config_list = smd_platform_data->smd_ss_configs;
3284
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06003285 if (smd_platform_data->smd_ssr_config)
3286 disable_smsm_reset_handshake = smd_platform_data->
3287 smd_ssr_config->disable_smsm_reset_handshake;
3288
Jeff Hugobdc734d2012-03-26 16:05:39 -06003289 smd_smem_areas = smd_platform_data->smd_smem_areas;
3290 if (smd_smem_areas) {
3291 num_smem_areas = smd_platform_data->num_smem_areas;
3292 smem_areas = kmalloc(sizeof(struct smem_area) * num_smem_areas,
3293 GFP_KERNEL);
3294 if (!smem_areas) {
3295 pr_err("%s: smem_areas kmalloc failed\n", __func__);
3296 err_ret = -ENOMEM;
3297 goto smem_areas_alloc_fail;
3298 }
3299
3300 for (smem_idx = 0; smem_idx < num_smem_areas; ++smem_idx) {
3301 smem_areas[smem_idx].phys_addr =
3302 smd_smem_areas[smem_idx].phys_addr;
3303 smem_areas[smem_idx].size =
3304 smd_smem_areas[smem_idx].size;
3305 smem_areas[smem_idx].virt_addr = ioremap_nocache(
3306 (unsigned long)(smem_areas[smem_idx].phys_addr),
3307 smem_areas[smem_idx].size);
3308 if (!smem_areas[smem_idx].virt_addr) {
3309 pr_err("%s: ioremap_nocache() of addr:%p"
3310 " size: %x\n", __func__,
3311 smem_areas[smem_idx].phys_addr,
3312 smem_areas[smem_idx].size);
3313 err_ret = -ENOMEM;
3314 ++smem_idx;
3315 goto smem_failed;
3316 }
3317 }
3318 sort(smem_areas, num_smem_areas,
3319 sizeof(struct smem_area),
3320 sort_cmp_func, NULL);
3321 }
3322
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303323 for (i = 0; i < num_ss; i++) {
3324 cfg = &smd_ss_config_list[i];
3325
3326 ret = intr_init(
3327 &private_intr_config[cfg->irq_config_id].smd,
3328 &cfg->smd_int,
3329 pdev
3330 );
3331
3332 if (ret < 0) {
3333 err_ret = ret;
3334 pr_err("smd: register irq failed on %s\n",
3335 cfg->smd_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003336 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303337 }
3338
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003339 /* only init smsm structs if this edge supports smsm */
3340 if (cfg->smsm_int.irq_id)
3341 ret = intr_init(
3342 &private_intr_config[cfg->irq_config_id].smsm,
3343 &cfg->smsm_int,
3344 pdev
3345 );
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303346
3347 if (ret < 0) {
3348 err_ret = ret;
3349 pr_err("smd: register irq failed on %s\n",
3350 cfg->smsm_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003351 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303352 }
Eric Holmberg17992c12012-02-29 12:54:44 -07003353
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003354 if (cfg->subsys_name)
3355 strlcpy(edge_to_pids[cfg->edge].subsys_name,
Eric Holmberg17992c12012-02-29 12:54:44 -07003356 cfg->subsys_name, SMD_MAX_CH_NAME_LEN);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303357 }
3358
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303359
3360 SMD_INFO("smd_core_platform_init() done\n");
3361 return 0;
3362
Jeff Hugobdc734d2012-03-26 16:05:39 -06003363intr_failed:
3364 pr_err("smd: deregistering IRQs\n");
3365 for (i = 0; i < num_ss; ++i) {
3366 cfg = &smd_ss_config_list[i];
3367
3368 if (cfg->smd_int.irq_id >= 0)
3369 free_irq(cfg->smd_int.irq_id,
3370 (void *)cfg->smd_int.dev_id
3371 );
3372 if (cfg->smsm_int.irq_id >= 0)
3373 free_irq(cfg->smsm_int.irq_id,
3374 (void *)cfg->smsm_int.dev_id
3375 );
3376 }
3377smem_failed:
3378 for (smem_idx = smem_idx - 1; smem_idx >= 0; --smem_idx)
3379 iounmap(smem_areas[smem_idx].virt_addr);
3380 kfree(smem_areas);
3381smem_areas_alloc_fail:
3382 return err_ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303383}
3384
Gregory Bean4416e9e2010-07-28 10:22:12 -07003385static int __devinit msm_smd_probe(struct platform_device *pdev)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003386{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303387 int ret;
Daniel Walker0aec66d2010-03-18 12:31:08 -07003388
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303389 SMD_INFO("smd probe\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003390 INIT_WORK(&probe_work, smd_channel_probe_worker);
3391
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003392 channel_close_wq = create_singlethread_workqueue("smd_channel_close");
3393 if (IS_ERR(channel_close_wq)) {
3394 pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
3395 return -ENOMEM;
3396 }
3397
3398 if (smsm_init()) {
3399 pr_err("smsm_init() failed\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003400 return -1;
3401 }
3402
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303403 if (pdev) {
3404 if (pdev->dev.of_node) {
3405 pr_err("SMD: Device tree not currently supported\n");
3406 return -ENODEV;
3407 } else if (pdev->dev.platform_data) {
3408 ret = smd_core_platform_init(pdev);
3409 if (ret) {
3410 pr_err(
3411 "SMD: smd_core_platform_init() failed\n");
3412 return -ENODEV;
3413 }
3414 } else {
3415 ret = smd_core_init();
3416 if (ret) {
3417 pr_err("smd_core_init() failed\n");
3418 return -ENODEV;
3419 }
3420 }
3421 } else {
3422 pr_err("SMD: PDEV not found\n");
3423 return -ENODEV;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003424 }
Iliyan Malchev1207bab2009-11-15 18:16:43 -08003425
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003426 smd_initialized = 1;
3427
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003428 smd_alloc_loopback_channel();
Eric Holmbergc33d4ab2011-10-24 10:28:25 -06003429 smsm_irq_handler(0, 0);
3430 tasklet_schedule(&smd_fake_irq_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003431
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003432 return 0;
3433}
3434
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003435static int restart_notifier_cb(struct notifier_block *this,
3436 unsigned long code,
3437 void *data);
3438
3439static struct restart_notifier_block restart_notifiers[] = {
Eric Holmbergca7ead22011-12-01 17:21:15 -07003440 {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
3441 {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
3442 {SMD_WCNSS, "riva", .nb.notifier_call = restart_notifier_cb},
3443 {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
Eric Holmberg8b0e74f2012-02-08 09:56:17 -07003444 {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003445};
3446
3447static int restart_notifier_cb(struct notifier_block *this,
3448 unsigned long code,
3449 void *data)
3450{
3451 if (code == SUBSYS_AFTER_SHUTDOWN) {
3452 struct restart_notifier_block *notifier;
3453
3454 notifier = container_of(this,
3455 struct restart_notifier_block, nb);
3456 SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
3457 __func__, notifier->processor,
3458 notifier->name);
3459
3460 smd_channel_reset(notifier->processor);
3461 }
3462
3463 return NOTIFY_DONE;
3464}
3465
3466static __init int modem_restart_late_init(void)
3467{
3468 int i;
3469 void *handle;
3470 struct restart_notifier_block *nb;
3471
3472 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
3473 nb = &restart_notifiers[i];
3474 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
3475 SMD_DBG("%s: registering notif for '%s', handle=%p\n",
3476 __func__, nb->name, handle);
3477 }
3478 return 0;
3479}
3480late_initcall(modem_restart_late_init);
3481
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003482static struct platform_driver msm_smd_driver = {
3483 .probe = msm_smd_probe,
3484 .driver = {
3485 .name = MODULE_NAME,
3486 .owner = THIS_MODULE,
3487 },
3488};
3489
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003490int __init msm_smd_init(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003491{
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003492 static bool registered;
3493
3494 if (registered)
3495 return 0;
3496
3497 registered = true;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003498 return platform_driver_register(&msm_smd_driver);
3499}
3500
3501module_init(msm_smd_init);
3502
3503MODULE_DESCRIPTION("MSM Shared Memory Core");
3504MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
3505MODULE_LICENSE("GPL");