blob: 19b3ea66ab7b96028733e1145c45c1a9e2e94c9b [file] [log] [blame]
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001/* arch/arm/mach-msm/smd.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07004 * Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Brian Swetland2eb44eb2008-09-29 16:00:48 -07005 * Author: Brian Swetland <swetland@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/cdev.h>
22#include <linux/device.h>
23#include <linux/wait.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/list.h>
27#include <linux/slab.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070028#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/io.h>
30#include <linux/termios.h>
31#include <linux/ctype.h>
32#include <linux/remote_spinlock.h>
33#include <linux/uaccess.h>
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070034#include <linux/kfifo.h>
Eric Holmberg59a9f942012-03-19 10:04:22 -060035#include <linux/wakelock.h>
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -060036#include <linux/notifier.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070037#include <mach/msm_smd.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038#include <mach/msm_iomap.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070039#include <mach/system.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040#include <mach/subsystem_notif.h>
Angshuman Sarkaread67bd2011-09-21 20:13:12 +053041#include <mach/socinfo.h>
Ram Somani8b9589f2012-04-03 12:07:18 +053042#include <asm/cacheflush.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070043
44#include "smd_private.h"
45#include "proc_comm.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046#include "modem_notifier.h"
Brian Swetland2eb44eb2008-09-29 16:00:48 -070047
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM8X60) \
Jeff Hugo56b933a2011-09-28 14:42:05 -060049 || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX) \
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060050 || defined(CONFIG_ARCH_MSM9615) || defined(CONFIG_ARCH_APQ8064)
Brian Swetland37521a32009-07-01 18:30:47 -070051#define CONFIG_QDSP6 1
52#endif
53
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060054#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) \
55 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056#define CONFIG_DSPS 1
57#endif
58
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060059#if defined(CONFIG_ARCH_MSM8960) \
60 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070061#define CONFIG_WCNSS 1
Jeff Hugo6a8057c2011-08-16 13:47:12 -060062#define CONFIG_DSPS_SMSM 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -070064
65#define MODULE_NAME "msm_smd"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070066#define SMEM_VERSION 0x000B
67#define SMD_VERSION 0x00020000
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070068#define SMSM_SNAPSHOT_CNT 64
Eric Holmbergda31d042012-03-28 14:01:02 -060069#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070
71uint32_t SMSM_NUM_ENTRIES = 8;
72uint32_t SMSM_NUM_HOSTS = 3;
Brian Swetland2eb44eb2008-09-29 16:00:48 -070073
Eric Holmberge8a39322012-04-03 15:14:02 -060074/* Legacy SMSM interrupt notifications */
75#define LEGACY_MODEM_SMSM_MASK (SMSM_RESET | SMSM_INIT | SMSM_SMDINIT \
76 | SMSM_RUN | SMSM_SYSTEM_DOWNLOAD)
77
Brian Swetland2eb44eb2008-09-29 16:00:48 -070078enum {
79 MSM_SMD_DEBUG = 1U << 0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070080 MSM_SMSM_DEBUG = 1U << 1,
81 MSM_SMD_INFO = 1U << 2,
82 MSM_SMSM_INFO = 1U << 3,
Eric Holmberg98c6c642012-02-24 11:29:35 -070083 MSM_SMx_POWER_INFO = 1U << 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084};
85
86struct smsm_shared_info {
87 uint32_t *state;
88 uint32_t *intr_mask;
89 uint32_t *intr_mux;
90};
91
92static struct smsm_shared_info smsm_info;
Eric Holmberg59a9f942012-03-19 10:04:22 -060093static struct kfifo smsm_snapshot_fifo;
94static struct wake_lock smsm_snapshot_wakelock;
95static int smsm_snapshot_count;
96static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097
98struct smsm_size_info_type {
99 uint32_t num_hosts;
100 uint32_t num_entries;
101 uint32_t reserved0;
102 uint32_t reserved1;
103};
104
105struct smsm_state_cb_info {
106 struct list_head cb_list;
107 uint32_t mask;
108 void *data;
109 void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
110};
111
112struct smsm_state_info {
113 struct list_head callbacks;
114 uint32_t last_value;
Eric Holmberge8a39322012-04-03 15:14:02 -0600115 uint32_t intr_mask_set;
116 uint32_t intr_mask_clear;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700117};
118
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530119struct interrupt_config_item {
120 /* must be initialized */
121 irqreturn_t (*irq_handler)(int req, void *data);
122 /* outgoing interrupt config (set from platform data) */
123 uint32_t out_bit_pos;
124 void __iomem *out_base;
125 uint32_t out_offset;
126};
127
128struct interrupt_config {
129 struct interrupt_config_item smd;
130 struct interrupt_config_item smsm;
131};
132
133static irqreturn_t smd_modem_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700134static irqreturn_t smsm_modem_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530135static irqreturn_t smd_dsp_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700136static irqreturn_t smsm_dsp_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530137static irqreturn_t smd_dsps_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700138static irqreturn_t smsm_dsps_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530139static irqreturn_t smd_wcnss_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700140static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600141static irqreturn_t smd_rpm_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530142static irqreturn_t smsm_irq_handler(int irq, void *data);
143
144static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
145 [SMD_MODEM] = {
146 .smd.irq_handler = smd_modem_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700147 .smsm.irq_handler = smsm_modem_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530148 },
149 [SMD_Q6] = {
150 .smd.irq_handler = smd_dsp_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700151 .smsm.irq_handler = smsm_dsp_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530152 },
153 [SMD_DSPS] = {
154 .smd.irq_handler = smd_dsps_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700155 .smsm.irq_handler = smsm_dsps_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530156 },
157 [SMD_WCNSS] = {
158 .smd.irq_handler = smd_wcnss_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700159 .smsm.irq_handler = smsm_wcnss_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530160 },
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600161 [SMD_RPM] = {
162 .smd.irq_handler = smd_rpm_irq_handler,
163 .smsm.irq_handler = NULL, /* does not support smsm */
164 },
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530165};
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700166struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530167
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700168#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
169#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
170 entry * SMSM_NUM_HOSTS + host)
171#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
172
173/* Internal definitions which are not exported in some targets */
174enum {
175 SMSM_APPS_DEM_I = 3,
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700176};
177
178static int msm_smd_debug_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700179module_param_named(debug_mask, msm_smd_debug_mask,
180 int, S_IRUGO | S_IWUSR | S_IWGRP);
181
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182#if defined(CONFIG_MSM_SMD_DEBUG)
183#define SMD_DBG(x...) do { \
184 if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
185 printk(KERN_DEBUG x); \
186 } while (0)
187
188#define SMSM_DBG(x...) do { \
189 if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
190 printk(KERN_DEBUG x); \
191 } while (0)
192
193#define SMD_INFO(x...) do { \
194 if (msm_smd_debug_mask & MSM_SMD_INFO) \
195 printk(KERN_INFO x); \
196 } while (0)
197
198#define SMSM_INFO(x...) do { \
199 if (msm_smd_debug_mask & MSM_SMSM_INFO) \
200 printk(KERN_INFO x); \
201 } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700202#define SMx_POWER_INFO(x...) do { \
203 if (msm_smd_debug_mask & MSM_SMx_POWER_INFO) \
204 printk(KERN_INFO x); \
205 } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700206#else
207#define SMD_DBG(x...) do { } while (0)
208#define SMSM_DBG(x...) do { } while (0)
209#define SMD_INFO(x...) do { } while (0)
210#define SMSM_INFO(x...) do { } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700211#define SMx_POWER_INFO(x...) do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700212#endif
213
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700214static unsigned last_heap_free = 0xffffffff;
215
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216static inline void smd_write_intr(unsigned int val,
217 const void __iomem *addr);
218
219#if defined(CONFIG_ARCH_MSM7X30)
220#define MSM_TRIG_A2M_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530221 (smd_write_intr(1 << 0, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700222#define MSM_TRIG_A2Q6_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530223 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224#define MSM_TRIG_A2M_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530225 (smd_write_intr(1 << 5, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700226#define MSM_TRIG_A2Q6_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530227 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600229#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230#define MSM_TRIG_A2WCNSS_SMD_INT
231#define MSM_TRIG_A2WCNSS_SMSM_INT
232#elif defined(CONFIG_ARCH_MSM8X60)
233#define MSM_TRIG_A2M_SMD_INT \
234 (smd_write_intr(1 << 3, MSM_GCC_BASE + 0x8))
235#define MSM_TRIG_A2Q6_SMD_INT \
236 (smd_write_intr(1 << 15, MSM_GCC_BASE + 0x8))
237#define MSM_TRIG_A2M_SMSM_INT \
238 (smd_write_intr(1 << 4, MSM_GCC_BASE + 0x8))
239#define MSM_TRIG_A2Q6_SMSM_INT \
240 (smd_write_intr(1 << 14, MSM_GCC_BASE + 0x8))
241#define MSM_TRIG_A2DSPS_SMD_INT \
242 (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4080))
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600243#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700244#define MSM_TRIG_A2WCNSS_SMD_INT
245#define MSM_TRIG_A2WCNSS_SMSM_INT
Jeff Hugo56b933a2011-09-28 14:42:05 -0600246#elif defined(CONFIG_ARCH_MSM9615)
247#define MSM_TRIG_A2M_SMD_INT \
248 (smd_write_intr(1 << 3, MSM_APCS_GCC_BASE + 0x8))
249#define MSM_TRIG_A2Q6_SMD_INT \
250 (smd_write_intr(1 << 15, MSM_APCS_GCC_BASE + 0x8))
251#define MSM_TRIG_A2M_SMSM_INT \
252 (smd_write_intr(1 << 4, MSM_APCS_GCC_BASE + 0x8))
253#define MSM_TRIG_A2Q6_SMSM_INT \
254 (smd_write_intr(1 << 14, MSM_APCS_GCC_BASE + 0x8))
255#define MSM_TRIG_A2DSPS_SMD_INT
256#define MSM_TRIG_A2DSPS_SMSM_INT
257#define MSM_TRIG_A2WCNSS_SMD_INT
258#define MSM_TRIG_A2WCNSS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259#elif defined(CONFIG_ARCH_FSM9XXX)
260#define MSM_TRIG_A2Q6_SMD_INT \
261 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
262#define MSM_TRIG_A2Q6_SMSM_INT \
263 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
264#define MSM_TRIG_A2M_SMD_INT \
265 (smd_write_intr(1 << 0, MSM_GCC_BASE + 0x8))
266#define MSM_TRIG_A2M_SMSM_INT \
267 (smd_write_intr(1 << 5, MSM_GCC_BASE + 0x8))
268#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600269#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700270#define MSM_TRIG_A2WCNSS_SMD_INT
271#define MSM_TRIG_A2WCNSS_SMSM_INT
Eric Holmberg73d45462012-02-28 10:41:31 -0700272#elif defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7x25)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700273#define MSM_TRIG_A2M_SMD_INT \
274 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700275#define MSM_TRIG_A2Q6_SMD_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700276#define MSM_TRIG_A2M_SMSM_INT \
277 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700278#define MSM_TRIG_A2Q6_SMSM_INT
279#define MSM_TRIG_A2DSPS_SMD_INT
280#define MSM_TRIG_A2DSPS_SMSM_INT
281#define MSM_TRIG_A2WCNSS_SMD_INT
282#define MSM_TRIG_A2WCNSS_SMSM_INT
283#elif defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM7X27A)
284#define MSM_TRIG_A2M_SMD_INT \
285 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
286#define MSM_TRIG_A2Q6_SMD_INT
287#define MSM_TRIG_A2M_SMSM_INT \
288 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
289#define MSM_TRIG_A2Q6_SMSM_INT
290#define MSM_TRIG_A2DSPS_SMD_INT
291#define MSM_TRIG_A2DSPS_SMSM_INT
292#define MSM_TRIG_A2WCNSS_SMD_INT
293#define MSM_TRIG_A2WCNSS_SMSM_INT
294#else /* use platform device / device tree configuration */
295#define MSM_TRIG_A2M_SMD_INT
296#define MSM_TRIG_A2Q6_SMD_INT
297#define MSM_TRIG_A2M_SMSM_INT
298#define MSM_TRIG_A2Q6_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700299#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600300#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700301#define MSM_TRIG_A2WCNSS_SMD_INT
302#define MSM_TRIG_A2WCNSS_SMSM_INT
Brian Swetland37521a32009-07-01 18:30:47 -0700303#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700304
Jeff Hugoee40b152012-02-09 17:39:47 -0700305/*
306 * stub out legacy macros if they are not being used so that the legacy
307 * code compiles even though it is not used
308 *
309 * these definitions should not be used in active code and will cause
310 * an early failure
311 */
312#ifndef INT_A9_M2A_0
313#define INT_A9_M2A_0 -1
314#endif
315#ifndef INT_A9_M2A_5
316#define INT_A9_M2A_5 -1
317#endif
318#ifndef INT_ADSP_A11
319#define INT_ADSP_A11 -1
320#endif
321#ifndef INT_ADSP_A11_SMSM
322#define INT_ADSP_A11_SMSM -1
323#endif
324#ifndef INT_DSPS_A11
325#define INT_DSPS_A11 -1
326#endif
327#ifndef INT_DSPS_A11_SMSM
328#define INT_DSPS_A11_SMSM -1
329#endif
330#ifndef INT_WCNSS_A11
331#define INT_WCNSS_A11 -1
332#endif
333#ifndef INT_WCNSS_A11_SMSM
334#define INT_WCNSS_A11_SMSM -1
335#endif
336
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700337#define SMD_LOOPBACK_CID 100
338
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600339#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
340static remote_spinlock_t remote_spinlock;
341
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700342static LIST_HEAD(smd_ch_list_loopback);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700343static void smd_fake_irq_handler(unsigned long arg);
Eric Holmbergda31d042012-03-28 14:01:02 -0600344static void smsm_cb_snapshot(uint32_t use_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700345
346static void notify_smsm_cb_clients_worker(struct work_struct *work);
347static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
Eric Holmbergc8002902011-09-16 13:55:57 -0600348static DEFINE_MUTEX(smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349static struct smsm_state_info *smsm_states;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +0530350static int spinlocks_initialized;
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -0600351static RAW_NOTIFIER_HEAD(smsm_driver_state_notifier_list);
352static DEFINE_MUTEX(smsm_driver_state_notifier_lock);
353static void smsm_driver_state_notify(uint32_t state, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700354
355static inline void smd_write_intr(unsigned int val,
356 const void __iomem *addr)
357{
358 wmb();
359 __raw_writel(val, addr);
360}
361
362#ifdef CONFIG_WCNSS
363static inline void wakeup_v1_riva(void)
364{
365 /*
366 * workaround hack for RIVA v1 hardware bug
367 * trigger GPIO 40 to wake up RIVA from power collaspe
368 * not to be sent to customers
369 */
Jeff Hugoafb8c4a2011-10-27 15:57:27 -0600370 if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) == 1) {
371 __raw_writel(0x0, MSM_TLMM_BASE + 0x1284);
372 __raw_writel(0x2, MSM_TLMM_BASE + 0x1284);
373 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700374 /* end workaround */
375}
376#else
377static inline void wakeup_v1_riva(void) {}
378#endif
379
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530380static inline void notify_modem_smd(void)
381{
382 static const struct interrupt_config_item *intr
383 = &private_intr_config[SMD_MODEM].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700384 if (intr->out_base) {
385 ++interrupt_stats[SMD_MODEM].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530386 smd_write_intr(intr->out_bit_pos,
387 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700388 } else {
389 ++interrupt_stats[SMD_MODEM].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530390 MSM_TRIG_A2M_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700391 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530392}
393
394static inline void notify_dsp_smd(void)
395{
396 static const struct interrupt_config_item *intr
397 = &private_intr_config[SMD_Q6].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700398 if (intr->out_base) {
399 ++interrupt_stats[SMD_Q6].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530400 smd_write_intr(intr->out_bit_pos,
401 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700402 } else {
403 ++interrupt_stats[SMD_Q6].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530404 MSM_TRIG_A2Q6_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700405 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530406}
407
408static inline void notify_dsps_smd(void)
409{
410 static const struct interrupt_config_item *intr
411 = &private_intr_config[SMD_DSPS].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700412 if (intr->out_base) {
413 ++interrupt_stats[SMD_DSPS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530414 smd_write_intr(intr->out_bit_pos,
415 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700416 } else {
417 ++interrupt_stats[SMD_DSPS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530418 MSM_TRIG_A2DSPS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700419 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530420}
421
422static inline void notify_wcnss_smd(void)
423{
424 static const struct interrupt_config_item *intr
425 = &private_intr_config[SMD_WCNSS].smd;
426 wakeup_v1_riva();
427
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700428 if (intr->out_base) {
429 ++interrupt_stats[SMD_WCNSS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530430 smd_write_intr(intr->out_bit_pos,
431 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700432 } else {
433 ++interrupt_stats[SMD_WCNSS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530434 MSM_TRIG_A2WCNSS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700435 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530436}
437
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600438static inline void notify_rpm_smd(void)
439{
440 static const struct interrupt_config_item *intr
441 = &private_intr_config[SMD_RPM].smd;
442
443 if (intr->out_base) {
444 ++interrupt_stats[SMD_RPM].smd_out_config_count;
445 smd_write_intr(intr->out_bit_pos,
446 intr->out_base + intr->out_offset);
447 }
448}
449
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530450static inline void notify_modem_smsm(void)
451{
452 static const struct interrupt_config_item *intr
453 = &private_intr_config[SMD_MODEM].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700454 if (intr->out_base) {
455 ++interrupt_stats[SMD_MODEM].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530456 smd_write_intr(intr->out_bit_pos,
457 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700458 } else {
459 ++interrupt_stats[SMD_MODEM].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530460 MSM_TRIG_A2M_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700461 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530462}
463
464static inline void notify_dsp_smsm(void)
465{
466 static const struct interrupt_config_item *intr
467 = &private_intr_config[SMD_Q6].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700468 if (intr->out_base) {
469 ++interrupt_stats[SMD_Q6].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530470 smd_write_intr(intr->out_bit_pos,
471 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700472 } else {
473 ++interrupt_stats[SMD_Q6].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530474 MSM_TRIG_A2Q6_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700475 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530476}
477
478static inline void notify_dsps_smsm(void)
479{
480 static const struct interrupt_config_item *intr
481 = &private_intr_config[SMD_DSPS].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700482 if (intr->out_base) {
483 ++interrupt_stats[SMD_DSPS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530484 smd_write_intr(intr->out_bit_pos,
485 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700486 } else {
487 ++interrupt_stats[SMD_DSPS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530488 MSM_TRIG_A2DSPS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700489 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530490}
491
492static inline void notify_wcnss_smsm(void)
493{
494 static const struct interrupt_config_item *intr
495 = &private_intr_config[SMD_WCNSS].smsm;
496 wakeup_v1_riva();
497
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700498 if (intr->out_base) {
499 ++interrupt_stats[SMD_WCNSS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530500 smd_write_intr(intr->out_bit_pos,
501 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700502 } else {
503 ++interrupt_stats[SMD_WCNSS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530504 MSM_TRIG_A2WCNSS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700505 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530506}
507
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700508static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
509{
510 /* older protocol don't use smsm_intr_mask,
511 but still communicates with modem */
512 if (!smsm_info.intr_mask ||
513 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
514 & notify_mask))
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530515 notify_modem_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700516
517 if (smsm_info.intr_mask &&
518 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
519 & notify_mask)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700520 uint32_t mux_val;
521
Eric Holmberg6282c5d2011-10-27 17:30:57 -0600522 if (cpu_is_qsd8x50() && smsm_info.intr_mux) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700523 mux_val = __raw_readl(
524 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
525 mux_val++;
526 __raw_writel(mux_val,
527 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
528 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530529 notify_dsp_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700530 }
531
532 if (smsm_info.intr_mask &&
533 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
534 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530535 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536 }
537
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600538 if (smsm_info.intr_mask &&
539 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
540 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530541 notify_dsps_smsm();
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600542 }
543
Eric Holmbergda31d042012-03-28 14:01:02 -0600544 /*
545 * Notify local SMSM callback clients without wakelock since this
546 * code is used by power management during power-down/-up sequencing
547 * on DEM-based targets. Grabbing a wakelock in this case will
548 * abort the power-down sequencing.
549 */
550 smsm_cb_snapshot(0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700551}
552
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700553void smd_diag(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700554{
555 char *x;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700556 int size;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700557
558 x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
559 if (x != 0) {
560 x[SZ_DIAG_ERR_MSG - 1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700561 SMD_INFO("smem: DIAG '%s'\n", x);
562 }
563
564 x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size);
565 if (x != 0) {
566 x[size - 1] = 0;
567 pr_err("smem: CRASH LOG\n'%s'\n", x);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700568 }
569}
570
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700572static void handle_modem_crash(void)
573{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700574 pr_err("MODEM/AMSS has CRASHED\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700575 smd_diag();
576
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700577 /* hard reboot if possible FIXME
578 if (msm_reset_hook)
579 msm_reset_hook();
580 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700581
582 /* in this case the modem or watchdog should reboot us */
583 for (;;)
584 ;
585}
586
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587int smsm_check_for_modem_crash(void)
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700588{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700589 /* if the modem's not ready yet, we have to hope for the best */
590 if (!smsm_info.state)
591 return 0;
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700592
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700593 if (__raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)) & SMSM_RESET) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700594 handle_modem_crash();
595 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700596 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700597 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700598}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700599EXPORT_SYMBOL(smsm_check_for_modem_crash);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700600
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700601/* the spinlock is used to synchronize between the
Brian Swetland03e00cd2009-07-01 17:58:37 -0700602 * irq handler and code that mutates the channel
603 * list or fiddles with channel state
604 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700605static DEFINE_SPINLOCK(smd_lock);
Brian Swetland03e00cd2009-07-01 17:58:37 -0700606DEFINE_SPINLOCK(smem_lock);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700607
608/* the mutex is used during open() and close()
Brian Swetland03e00cd2009-07-01 17:58:37 -0700609 * operations to avoid races while creating or
610 * destroying smd_channel structures
611 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700612static DEFINE_MUTEX(smd_creation_mutex);
613
614static int smd_initialized;
615
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616struct smd_shared_v1 {
617 struct smd_half_channel ch0;
618 unsigned char data0[SMD_BUF_SIZE];
619 struct smd_half_channel ch1;
620 unsigned char data1[SMD_BUF_SIZE];
621};
622
623struct smd_shared_v2 {
624 struct smd_half_channel ch0;
625 struct smd_half_channel ch1;
626};
627
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600628struct smd_shared_v2_word_access {
629 struct smd_half_channel_word_access ch0;
630 struct smd_half_channel_word_access ch1;
631};
632
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700633struct smd_channel {
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600634 volatile void *send; /* some variant of smd_half_channel */
635 volatile void *recv; /* some variant of smd_half_channel */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700636 unsigned char *send_data;
637 unsigned char *recv_data;
638 unsigned fifo_size;
639 unsigned fifo_mask;
640 struct list_head ch_list;
641
642 unsigned current_packet;
643 unsigned n;
644 void *priv;
645 void (*notify)(void *priv, unsigned flags);
646
647 int (*read)(smd_channel_t *ch, void *data, int len, int user_buf);
648 int (*write)(smd_channel_t *ch, const void *data, int len,
649 int user_buf);
650 int (*read_avail)(smd_channel_t *ch);
651 int (*write_avail)(smd_channel_t *ch);
652 int (*read_from_cb)(smd_channel_t *ch, void *data, int len,
653 int user_buf);
654
655 void (*update_state)(smd_channel_t *ch);
656 unsigned last_state;
657 void (*notify_other_cpu)(void);
658
659 char name[20];
660 struct platform_device pdev;
661 unsigned type;
662
663 int pending_pkt_sz;
664
665 char is_pkt_ch;
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600666
667 /*
668 * private internal functions to access *send and *recv.
669 * never to be exported outside of smd
670 */
671 struct smd_half_channel_access *half_ch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700672};
673
674struct edge_to_pid {
675 uint32_t local_pid;
676 uint32_t remote_pid;
Eric Holmberg17992c12012-02-29 12:54:44 -0700677 char subsys_name[SMD_MAX_CH_NAME_LEN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700678};
679
680/**
681 * Maps edge type to local and remote processor ID's.
682 */
683static struct edge_to_pid edge_to_pids[] = {
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700684 [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
685 [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "q6"},
686 [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
687 [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
688 [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
689 [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
690 [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
691 [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
692 [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
693 [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
694 [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
695 [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
696 [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
697 [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
698 [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600699 [SMD_APPS_RPM] = {SMD_APPS, SMD_RPM},
700 [SMD_MODEM_RPM] = {SMD_MODEM, SMD_RPM},
701 [SMD_QDSP_RPM] = {SMD_Q6, SMD_RPM},
702 [SMD_WCNSS_RPM] = {SMD_WCNSS, SMD_RPM},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700703};
704
705struct restart_notifier_block {
706 unsigned processor;
707 char *name;
708 struct notifier_block nb;
709};
710
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -0600711static int disable_smsm_reset_handshake;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700712static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
713
714static LIST_HEAD(smd_ch_closed_list);
715static LIST_HEAD(smd_ch_closing_list);
716static LIST_HEAD(smd_ch_to_close_list);
717static LIST_HEAD(smd_ch_list_modem);
718static LIST_HEAD(smd_ch_list_dsp);
719static LIST_HEAD(smd_ch_list_dsps);
720static LIST_HEAD(smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600721static LIST_HEAD(smd_ch_list_rpm);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700722
723static unsigned char smd_ch_allocated[64];
724static struct work_struct probe_work;
725
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700726static void finalize_channel_close_fn(struct work_struct *work);
727static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
728static struct workqueue_struct *channel_close_wq;
729
730static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm);
731
732/* on smp systems, the probe might get called from multiple cores,
733 hence use a lock */
734static DEFINE_MUTEX(smd_probe_lock);
735
736static void smd_channel_probe_worker(struct work_struct *work)
737{
738 struct smd_alloc_elm *shared;
739 unsigned n;
740 uint32_t type;
741
742 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
743
744 if (!shared) {
745 pr_err("%s: allocation table not initialized\n", __func__);
746 return;
747 }
748
749 mutex_lock(&smd_probe_lock);
750 for (n = 0; n < 64; n++) {
751 if (smd_ch_allocated[n])
752 continue;
753
754 /* channel should be allocated only if APPS
755 processor is involved */
756 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600757 if (type >= ARRAY_SIZE(edge_to_pids) ||
758 edge_to_pids[type].local_pid != SMD_APPS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700759 continue;
760 if (!shared[n].ref_count)
761 continue;
762 if (!shared[n].name[0])
763 continue;
764
765 if (!smd_alloc_channel(&shared[n]))
766 smd_ch_allocated[n] = 1;
767 else
768 SMD_INFO("Probe skipping ch %d, not allocated\n", n);
769 }
770 mutex_unlock(&smd_probe_lock);
771}
772
773/**
774 * Lookup processor ID and determine if it belongs to the proved edge
775 * type.
776 *
777 * @shared2: Pointer to v2 shared channel structure
778 * @type: Edge type
779 * @pid: Processor ID of processor on edge
780 * @local_ch: Channel that belongs to processor @pid
781 * @remote_ch: Other side of edge contained @pid
782 *
783 * Returns 0 for not on edge, 1 for found on edge
784 */
785static int pid_is_on_edge(struct smd_shared_v2 *shared2,
786 uint32_t type, uint32_t pid,
787 struct smd_half_channel **local_ch,
788 struct smd_half_channel **remote_ch
789 )
790{
791 int ret = 0;
792 struct edge_to_pid *edge;
793
794 *local_ch = 0;
795 *remote_ch = 0;
796
797 if (!shared2 || (type >= ARRAY_SIZE(edge_to_pids)))
798 return 0;
799
800 edge = &edge_to_pids[type];
801 if (edge->local_pid != edge->remote_pid) {
802 if (pid == edge->local_pid) {
803 *local_ch = &shared2->ch0;
804 *remote_ch = &shared2->ch1;
805 ret = 1;
806 } else if (pid == edge->remote_pid) {
807 *local_ch = &shared2->ch1;
808 *remote_ch = &shared2->ch0;
809 ret = 1;
810 }
811 }
812
813 return ret;
814}
815
Eric Holmberg17992c12012-02-29 12:54:44 -0700816/*
817 * Returns a pointer to the subsystem name or NULL if no
818 * subsystem name is available.
819 *
820 * @type - Edge definition
821 */
822const char *smd_edge_to_subsystem(uint32_t type)
823{
824 const char *subsys = NULL;
825
826 if (type < ARRAY_SIZE(edge_to_pids)) {
827 subsys = edge_to_pids[type].subsys_name;
828 if (subsys[0] == 0x0)
829 subsys = NULL;
830 }
831 return subsys;
832}
833EXPORT_SYMBOL(smd_edge_to_subsystem);
834
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700835/*
836 * Returns a pointer to the subsystem name given the
837 * remote processor ID.
838 *
839 * @pid Remote processor ID
840 * @returns Pointer to subsystem name or NULL if not found
841 */
842const char *smd_pid_to_subsystem(uint32_t pid)
843{
844 const char *subsys = NULL;
845 int i;
846
847 for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
848 if (pid == edge_to_pids[i].remote_pid &&
849 edge_to_pids[i].subsys_name[0] != 0x0
850 ) {
851 subsys = edge_to_pids[i].subsys_name;
852 break;
853 }
854 }
855
856 return subsys;
857}
858EXPORT_SYMBOL(smd_pid_to_subsystem);
Eric Holmberg17992c12012-02-29 12:54:44 -0700859
Eric Holmberg2a563c32011-10-05 14:51:43 -0600860static void smd_reset_edge(struct smd_half_channel *ch, unsigned new_state)
861{
862 if (ch->state != SMD_SS_CLOSED) {
863 ch->state = new_state;
864 ch->fDSR = 0;
865 ch->fCTS = 0;
866 ch->fCD = 0;
867 ch->fSTATE = 1;
868 }
869}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700870
871static void smd_channel_reset_state(struct smd_alloc_elm *shared,
872 unsigned new_state, unsigned pid)
873{
874 unsigned n;
875 struct smd_shared_v2 *shared2;
876 uint32_t type;
877 struct smd_half_channel *local_ch;
878 struct smd_half_channel *remote_ch;
879
880 for (n = 0; n < SMD_CHANNELS; n++) {
881 if (!shared[n].ref_count)
882 continue;
883 if (!shared[n].name[0])
884 continue;
885
886 type = SMD_CHANNEL_TYPE(shared[n].type);
887 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n, sizeof(*shared2));
888 if (!shared2)
889 continue;
890
Eric Holmberg2a563c32011-10-05 14:51:43 -0600891 if (pid_is_on_edge(shared2, type, pid, &local_ch, &remote_ch))
892 smd_reset_edge(local_ch, new_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700893
Eric Holmberg2a563c32011-10-05 14:51:43 -0600894 /*
895 * ModemFW is in the same subsystem as ModemSW, but has
896 * separate SMD edges that need to be reset.
897 */
898 if (pid == SMSM_MODEM &&
899 pid_is_on_edge(shared2, type, SMD_MODEM_Q6_FW,
900 &local_ch, &remote_ch))
901 smd_reset_edge(local_ch, new_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700902 }
903}
904
905
906void smd_channel_reset(uint32_t restart_pid)
907{
908 struct smd_alloc_elm *shared;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700909 unsigned long flags;
910
911 SMD_DBG("%s: starting reset\n", __func__);
912 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
913 if (!shared) {
914 pr_err("%s: allocation table not initialized\n", __func__);
915 return;
916 }
917
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600918 /* release any held spinlocks */
919 remote_spin_release(&remote_spinlock, restart_pid);
920 remote_spin_release_all(restart_pid);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700921
922 /* reset SMSM entry */
923 if (smsm_info.state) {
924 writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
925
Eric Holmberg351a63c2011-12-02 17:49:43 -0700926 /* restart SMSM init handshake */
927 if (restart_pid == SMSM_MODEM) {
928 smsm_change_state(SMSM_APPS_STATE,
Eric Holmberg6b2f80e2012-01-09 12:22:52 -0700929 SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
930 0);
Eric Holmberg351a63c2011-12-02 17:49:43 -0700931 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700932
933 /* notify SMSM processors */
934 smsm_irq_handler(0, 0);
Eric Holmbergb7726442012-03-01 15:31:56 -0700935 notify_modem_smsm();
936 notify_dsp_smsm();
937 notify_dsps_smsm();
938 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700939 }
940
941 /* change all remote states to CLOSING */
942 mutex_lock(&smd_probe_lock);
943 spin_lock_irqsave(&smd_lock, flags);
944 smd_channel_reset_state(shared, SMD_SS_CLOSING, restart_pid);
945 spin_unlock_irqrestore(&smd_lock, flags);
946 mutex_unlock(&smd_probe_lock);
947
948 /* notify SMD processors */
949 mb();
950 smd_fake_irq_handler(0);
951 notify_modem_smd();
952 notify_dsp_smd();
953 notify_dsps_smd();
954 notify_wcnss_smd();
955
956 /* change all remote states to CLOSED */
957 mutex_lock(&smd_probe_lock);
958 spin_lock_irqsave(&smd_lock, flags);
959 smd_channel_reset_state(shared, SMD_SS_CLOSED, restart_pid);
960 spin_unlock_irqrestore(&smd_lock, flags);
961 mutex_unlock(&smd_probe_lock);
962
963 /* notify SMD processors */
964 mb();
965 smd_fake_irq_handler(0);
966 notify_modem_smd();
967 notify_dsp_smd();
968 notify_dsps_smd();
969 notify_wcnss_smd();
970
971 SMD_DBG("%s: finished reset\n", __func__);
972}
973
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700974/* how many bytes are available for reading */
975static int smd_stream_read_avail(struct smd_channel *ch)
976{
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600977 return (ch->half_ch->get_head(ch->recv) -
978 ch->half_ch->get_tail(ch->recv)) & ch->fifo_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700979}
980
981/* how many bytes we are free to write */
982static int smd_stream_write_avail(struct smd_channel *ch)
983{
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600984 return ch->fifo_mask - ((ch->half_ch->get_head(ch->send) -
985 ch->half_ch->get_tail(ch->send)) & ch->fifo_mask);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700986}
987
988static int smd_packet_read_avail(struct smd_channel *ch)
989{
990 if (ch->current_packet) {
991 int n = smd_stream_read_avail(ch);
992 if (n > ch->current_packet)
993 n = ch->current_packet;
994 return n;
995 } else {
996 return 0;
997 }
998}
999
1000static int smd_packet_write_avail(struct smd_channel *ch)
1001{
1002 int n = smd_stream_write_avail(ch);
1003 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
1004}
1005
1006static int ch_is_open(struct smd_channel *ch)
1007{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001008 return (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED ||
1009 ch->half_ch->get_state(ch->recv) == SMD_SS_FLUSHING)
1010 && (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001011}
1012
1013/* provide a pointer and length to readable data in the fifo */
1014static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
1015{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001016 unsigned head = ch->half_ch->get_head(ch->recv);
1017 unsigned tail = ch->half_ch->get_tail(ch->recv);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001018 *ptr = (void *) (ch->recv_data + tail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001019
1020 if (tail <= head)
1021 return head - tail;
1022 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001023 return ch->fifo_size - tail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001024}
1025
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001026static int read_intr_blocked(struct smd_channel *ch)
1027{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001028 return ch->half_ch->get_fBLOCKREADINTR(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001029}
1030
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001031/* advance the fifo read pointer after data from ch_read_buffer is consumed */
1032static void ch_read_done(struct smd_channel *ch, unsigned count)
1033{
1034 BUG_ON(count > smd_stream_read_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001035 ch->half_ch->set_tail(ch->recv,
1036 (ch->half_ch->get_tail(ch->recv) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001037 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001038 ch->half_ch->set_fTAIL(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001039}
1040
1041/* basic read interface to ch_read_{buffer,done} used
Brian Swetland03e00cd2009-07-01 17:58:37 -07001042 * by smd_*_read() and update_packet_state()
1043 * will read-and-discard if the _data pointer is null
1044 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001045static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001046{
1047 void *ptr;
1048 unsigned n;
1049 unsigned char *data = _data;
1050 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001051 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001052
1053 while (len > 0) {
1054 n = ch_read_buffer(ch, &ptr);
1055 if (n == 0)
1056 break;
1057
1058 if (n > len)
1059 n = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001060 if (_data) {
1061 if (user_buf) {
1062 r = copy_to_user(data, ptr, n);
1063 if (r > 0) {
1064 pr_err("%s: "
1065 "copy_to_user could not copy "
1066 "%i bytes.\n",
1067 __func__,
1068 r);
1069 }
1070 } else
1071 memcpy(data, ptr, n);
1072 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001073
1074 data += n;
1075 len -= n;
1076 ch_read_done(ch, n);
1077 }
1078
1079 return orig_len - len;
1080}
1081
1082static void update_stream_state(struct smd_channel *ch)
1083{
1084 /* streams have no special state requiring updating */
1085}
1086
1087static void update_packet_state(struct smd_channel *ch)
1088{
1089 unsigned hdr[5];
1090 int r;
1091
1092 /* can't do anything if we're in the middle of a packet */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001093 while (ch->current_packet == 0) {
1094 /* discard 0 length packets if any */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001095
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001096 /* don't bother unless we can get the full header */
1097 if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
1098 return;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001099
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001100 r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
1101 BUG_ON(r != SMD_HEADER_SIZE);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001102
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001103 ch->current_packet = hdr[0];
1104 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001105}
1106
1107/* provide a pointer and length to next free space in the fifo */
1108static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
1109{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001110 unsigned head = ch->half_ch->get_head(ch->send);
1111 unsigned tail = ch->half_ch->get_tail(ch->send);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001112 *ptr = (void *) (ch->send_data + head);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001113
1114 if (head < tail) {
1115 return tail - head - 1;
1116 } else {
1117 if (tail == 0)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001118 return ch->fifo_size - head - 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001119 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001120 return ch->fifo_size - head;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001121 }
1122}
1123
1124/* advace the fifo write pointer after freespace
1125 * from ch_write_buffer is filled
1126 */
1127static void ch_write_done(struct smd_channel *ch, unsigned count)
1128{
1129 BUG_ON(count > smd_stream_write_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001130 ch->half_ch->set_head(ch->send,
1131 (ch->half_ch->get_head(ch->send) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001132 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001133 ch->half_ch->set_fHEAD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001134}
1135
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001136static void ch_set_state(struct smd_channel *ch, unsigned n)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001137{
1138 if (n == SMD_SS_OPENED) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001139 ch->half_ch->set_fDSR(ch->send, 1);
1140 ch->half_ch->set_fCTS(ch->send, 1);
1141 ch->half_ch->set_fCD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001142 } else {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001143 ch->half_ch->set_fDSR(ch->send, 0);
1144 ch->half_ch->set_fCTS(ch->send, 0);
1145 ch->half_ch->set_fCD(ch->send, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001146 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001147 ch->half_ch->set_state(ch->send, n);
1148 ch->half_ch->set_fSTATE(ch->send, 1);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001149 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001150}
1151
1152static void do_smd_probe(void)
1153{
1154 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
1155 if (shared->heap_info.free_offset != last_heap_free) {
1156 last_heap_free = shared->heap_info.free_offset;
1157 schedule_work(&probe_work);
1158 }
1159}
1160
1161static void smd_state_change(struct smd_channel *ch,
1162 unsigned last, unsigned next)
1163{
1164 ch->last_state = next;
1165
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001166 SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001167
1168 switch (next) {
1169 case SMD_SS_OPENING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001170 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSING ||
1171 ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
1172 ch->half_ch->set_tail(ch->recv, 0);
1173 ch->half_ch->set_head(ch->send, 0);
1174 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001175 ch_set_state(ch, SMD_SS_OPENING);
1176 }
1177 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001178 case SMD_SS_OPENED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001179 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENING) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001180 ch_set_state(ch, SMD_SS_OPENED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001181 ch->notify(ch->priv, SMD_EVENT_OPEN);
1182 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001183 break;
1184 case SMD_SS_FLUSHING:
1185 case SMD_SS_RESET:
1186 /* we should force them to close? */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001187 break;
1188 case SMD_SS_CLOSED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001189 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001190 ch_set_state(ch, SMD_SS_CLOSING);
1191 ch->current_packet = 0;
Eric Holmbergad4fa8d2011-11-11 16:55:13 -07001192 ch->pending_pkt_sz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001193 ch->notify(ch->priv, SMD_EVENT_CLOSE);
1194 }
1195 break;
1196 case SMD_SS_CLOSING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001197 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001198 list_move(&ch->ch_list,
1199 &smd_ch_to_close_list);
1200 queue_work(channel_close_wq,
1201 &finalize_channel_close_work);
1202 }
1203 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001204 }
1205}
1206
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001207static void handle_smd_irq_closing_list(void)
1208{
1209 unsigned long flags;
1210 struct smd_channel *ch;
1211 struct smd_channel *index;
1212 unsigned tmp;
1213
1214 spin_lock_irqsave(&smd_lock, flags);
1215 list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001216 if (ch->half_ch->get_fSTATE(ch->recv))
1217 ch->half_ch->set_fSTATE(ch->recv, 0);
1218 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001219 if (tmp != ch->last_state)
1220 smd_state_change(ch, ch->last_state, tmp);
1221 }
1222 spin_unlock_irqrestore(&smd_lock, flags);
1223}
1224
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001225static void handle_smd_irq(struct list_head *list, void (*notify)(void))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001226{
1227 unsigned long flags;
1228 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001229 unsigned ch_flags;
1230 unsigned tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001231 unsigned char state_change;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001232
1233 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001234 list_for_each_entry(ch, list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001235 state_change = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001236 ch_flags = 0;
1237 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001238 if (ch->half_ch->get_fHEAD(ch->recv)) {
1239 ch->half_ch->set_fHEAD(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001240 ch_flags |= 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001241 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001242 if (ch->half_ch->get_fTAIL(ch->recv)) {
1243 ch->half_ch->set_fTAIL(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001244 ch_flags |= 2;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001245 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001246 if (ch->half_ch->get_fSTATE(ch->recv)) {
1247 ch->half_ch->set_fSTATE(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001248 ch_flags |= 4;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001249 }
1250 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001251 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001252 if (tmp != ch->last_state) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07001253 SMx_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
1254 ch->n, ch->name, ch->last_state, tmp);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001255 smd_state_change(ch, ch->last_state, tmp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001256 state_change = 1;
1257 }
Eric Holmberg65a7d432012-02-24 11:28:56 -07001258 if (ch_flags & 0x3) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001259 ch->update_state(ch);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001260 SMx_POWER_INFO("SMD ch%d '%s' Data event r%d/w%d\n",
1261 ch->n, ch->name,
1262 ch->read_avail(ch),
1263 ch->fifo_size - ch->write_avail(ch));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001264 ch->notify(ch->priv, SMD_EVENT_DATA);
1265 }
Eric Holmberg98c6c642012-02-24 11:29:35 -07001266 if (ch_flags & 0x4 && !state_change) {
1267 SMx_POWER_INFO("SMD ch%d '%s' State update\n",
1268 ch->n, ch->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001269 ch->notify(ch->priv, SMD_EVENT_STATUS);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001270 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001271 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001272 spin_unlock_irqrestore(&smd_lock, flags);
1273 do_smd_probe();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001274}
1275
Brian Swetland37521a32009-07-01 18:30:47 -07001276static irqreturn_t smd_modem_irq_handler(int irq, void *data)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001277{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001278 SMx_POWER_INFO("SMD Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001279 ++interrupt_stats[SMD_MODEM].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001280 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001281 handle_smd_irq_closing_list();
Brian Swetland37521a32009-07-01 18:30:47 -07001282 return IRQ_HANDLED;
1283}
1284
1285static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
1286{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001287 SMx_POWER_INFO("SMD Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001288 ++interrupt_stats[SMD_Q6].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001289 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001290 handle_smd_irq_closing_list();
1291 return IRQ_HANDLED;
1292}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001293
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001294static irqreturn_t smd_dsps_irq_handler(int irq, void *data)
1295{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001296 SMx_POWER_INFO("SMD Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001297 ++interrupt_stats[SMD_DSPS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001298 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1299 handle_smd_irq_closing_list();
1300 return IRQ_HANDLED;
1301}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001302
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001303static irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
1304{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001305 SMx_POWER_INFO("SMD Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001306 ++interrupt_stats[SMD_WCNSS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001307 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1308 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001309 return IRQ_HANDLED;
1310}
1311
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001312static irqreturn_t smd_rpm_irq_handler(int irq, void *data)
1313{
1314 SMx_POWER_INFO("SMD Int RPM->Apps\n");
1315 ++interrupt_stats[SMD_RPM].smd_in_count;
1316 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
1317 handle_smd_irq_closing_list();
1318 return IRQ_HANDLED;
1319}
1320
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001321static void smd_fake_irq_handler(unsigned long arg)
1322{
Brian Swetland37521a32009-07-01 18:30:47 -07001323 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
1324 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001325 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1326 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001327 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001328 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001329}
1330
1331static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
1332
Brian Swetland37521a32009-07-01 18:30:47 -07001333static inline int smd_need_int(struct smd_channel *ch)
1334{
1335 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001336 if (ch->half_ch->get_fHEAD(ch->recv) ||
1337 ch->half_ch->get_fTAIL(ch->recv) ||
1338 ch->half_ch->get_fSTATE(ch->recv))
Brian Swetland37521a32009-07-01 18:30:47 -07001339 return 1;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001340 if (ch->half_ch->get_state(ch->recv) != ch->last_state)
Brian Swetland37521a32009-07-01 18:30:47 -07001341 return 1;
1342 }
1343 return 0;
1344}
1345
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001346void smd_sleep_exit(void)
1347{
1348 unsigned long flags;
1349 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001350 int need_int = 0;
1351
1352 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland37521a32009-07-01 18:30:47 -07001353 list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
1354 if (smd_need_int(ch)) {
1355 need_int = 1;
1356 break;
1357 }
1358 }
1359 list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
1360 if (smd_need_int(ch)) {
1361 need_int = 1;
1362 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001363 }
1364 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001365 list_for_each_entry(ch, &smd_ch_list_dsps, ch_list) {
1366 if (smd_need_int(ch)) {
1367 need_int = 1;
1368 break;
1369 }
1370 }
1371 list_for_each_entry(ch, &smd_ch_list_wcnss, ch_list) {
1372 if (smd_need_int(ch)) {
1373 need_int = 1;
1374 break;
1375 }
1376 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001377 spin_unlock_irqrestore(&smd_lock, flags);
1378 do_smd_probe();
Brian Swetland37521a32009-07-01 18:30:47 -07001379
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001380 if (need_int) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001381 SMD_DBG("smd_sleep_exit need interrupt\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001382 tasklet_schedule(&smd_fake_irq_tasklet);
1383 }
1384}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001385EXPORT_SYMBOL(smd_sleep_exit);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001386
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001387static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001388{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001389 if (SMD_XFER_TYPE(alloc_elm->type) == 1)
1390 return 0;
1391 else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001392 return 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001393
1394 /* for cases where xfer type is 0 */
1395 if (!strncmp(alloc_elm->name, "DAL", 3))
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001396 return 0;
1397
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001398 /* for cases where xfer type is 0 */
1399 if (!strncmp(alloc_elm->name, "RPCCALL_QDSP", 12))
1400 return 0;
1401
1402 if (alloc_elm->cid > 4 || alloc_elm->cid == 1)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001403 return 1;
1404 else
1405 return 0;
1406}
1407
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001408static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
1409 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001410{
1411 void *ptr;
1412 const unsigned char *buf = _data;
1413 unsigned xfer;
1414 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001415 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001416
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001417 SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001418 if (len < 0)
1419 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001420 else if (len == 0)
1421 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001422
1423 while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
Eric Holmberg7a717872012-02-03 11:58:04 -07001424 if (!ch_is_open(ch)) {
1425 len = orig_len;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001426 break;
Eric Holmberg7a717872012-02-03 11:58:04 -07001427 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001428 if (xfer > len)
1429 xfer = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001430 if (user_buf) {
1431 r = copy_from_user(ptr, buf, xfer);
1432 if (r > 0) {
1433 pr_err("%s: "
1434 "copy_from_user could not copy %i "
1435 "bytes.\n",
1436 __func__,
1437 r);
1438 }
1439 } else
1440 memcpy(ptr, buf, xfer);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001441 ch_write_done(ch, xfer);
1442 len -= xfer;
1443 buf += xfer;
1444 if (len == 0)
1445 break;
1446 }
1447
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001448 if (orig_len - len)
1449 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001450
1451 return orig_len - len;
1452}
1453
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001454static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
1455 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001456{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001457 int ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001458 unsigned hdr[5];
1459
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001460 SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001461 if (len < 0)
1462 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001463 else if (len == 0)
1464 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001465
1466 if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
1467 return -ENOMEM;
1468
1469 hdr[0] = len;
1470 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1471
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001472
1473 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1474 if (ret < 0 || ret != sizeof(hdr)) {
1475 SMD_DBG("%s failed to write pkt header: "
1476 "%d returned\n", __func__, ret);
1477 return -1;
1478 }
1479
1480
1481 ret = smd_stream_write(ch, _data, len, user_buf);
1482 if (ret < 0 || ret != len) {
1483 SMD_DBG("%s failed to write pkt data: "
1484 "%d returned\n", __func__, ret);
1485 return ret;
1486 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001487
1488 return len;
1489}
1490
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001491static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001492{
1493 int r;
1494
1495 if (len < 0)
1496 return -EINVAL;
1497
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001498 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001499 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001500 if (!read_intr_blocked(ch))
1501 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001502
1503 return r;
1504}
1505
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001506static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001507{
1508 unsigned long flags;
1509 int r;
1510
1511 if (len < 0)
1512 return -EINVAL;
1513
1514 if (len > ch->current_packet)
1515 len = ch->current_packet;
1516
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001517 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001518 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001519 if (!read_intr_blocked(ch))
1520 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001521
1522 spin_lock_irqsave(&smd_lock, flags);
1523 ch->current_packet -= r;
1524 update_packet_state(ch);
1525 spin_unlock_irqrestore(&smd_lock, flags);
1526
1527 return r;
1528}
1529
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001530static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
1531 int user_buf)
1532{
1533 int r;
1534
1535 if (len < 0)
1536 return -EINVAL;
1537
1538 if (len > ch->current_packet)
1539 len = ch->current_packet;
1540
1541 r = ch_read(ch, data, len, user_buf);
1542 if (r > 0)
1543 if (!read_intr_blocked(ch))
1544 ch->notify_other_cpu();
1545
1546 ch->current_packet -= r;
1547 update_packet_state(ch);
1548
1549 return r;
1550}
1551
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301552#if (defined(CONFIG_MSM_SMD_PKG4) || defined(CONFIG_MSM_SMD_PKG3))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001553static int smd_alloc_v2(struct smd_channel *ch)
1554{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001555 void *buffer;
1556 unsigned buffer_sz;
1557
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001558 if (is_word_access_ch(ch->type)) {
1559 struct smd_shared_v2_word_access *shared2;
1560 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1561 sizeof(*shared2));
1562 if (!shared2) {
1563 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1564 return -EINVAL;
1565 }
1566 ch->send = &shared2->ch0;
1567 ch->recv = &shared2->ch1;
1568 } else {
1569 struct smd_shared_v2 *shared2;
1570 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1571 sizeof(*shared2));
1572 if (!shared2) {
1573 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1574 return -EINVAL;
1575 }
1576 ch->send = &shared2->ch0;
1577 ch->recv = &shared2->ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001578 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001579 ch->half_ch = get_half_ch_funcs(ch->type);
1580
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001581 buffer = smem_get_entry(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
1582 if (!buffer) {
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301583 SMD_INFO("smem_get_entry failed\n");
1584 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001585 }
1586
1587 /* buffer must be a power-of-two size */
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301588 if (buffer_sz & (buffer_sz - 1)) {
1589 SMD_INFO("Buffer size: %u not power of two\n", buffer_sz);
1590 return -EINVAL;
1591 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001592 buffer_sz /= 2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001593 ch->send_data = buffer;
1594 ch->recv_data = buffer + buffer_sz;
1595 ch->fifo_size = buffer_sz;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001596
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001597 return 0;
1598}
1599
1600static int smd_alloc_v1(struct smd_channel *ch)
1601{
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301602 return -EINVAL;
1603}
1604
1605#else /* define v1 for older targets */
1606static int smd_alloc_v2(struct smd_channel *ch)
1607{
1608 return -EINVAL;
1609}
1610
1611static int smd_alloc_v1(struct smd_channel *ch)
1612{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001613 struct smd_shared_v1 *shared1;
1614 shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
1615 if (!shared1) {
1616 pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301617 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001618 }
1619 ch->send = &shared1->ch0;
1620 ch->recv = &shared1->ch1;
1621 ch->send_data = shared1->data0;
1622 ch->recv_data = shared1->data1;
1623 ch->fifo_size = SMD_BUF_SIZE;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001624 ch->half_ch = get_half_ch_funcs(ch->type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001625 return 0;
1626}
1627
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301628#endif
1629
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001630static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001631{
1632 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001633
1634 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1635 if (ch == 0) {
1636 pr_err("smd_alloc_channel() out of memory\n");
Brian Swetland34f719b2009-10-30 16:22:05 -07001637 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001638 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001639 ch->n = alloc_elm->cid;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001640 ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001641
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001642 if (smd_alloc_v2(ch) && smd_alloc_v1(ch)) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001643 kfree(ch);
Brian Swetland34f719b2009-10-30 16:22:05 -07001644 return -1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001645 }
1646
1647 ch->fifo_mask = ch->fifo_size - 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001648
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001649 /* probe_worker guarentees ch->type will be a valid type */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001650 if (ch->type == SMD_APPS_MODEM)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001651 ch->notify_other_cpu = notify_modem_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001652 else if (ch->type == SMD_APPS_QDSP)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001653 ch->notify_other_cpu = notify_dsp_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001654 else if (ch->type == SMD_APPS_DSPS)
1655 ch->notify_other_cpu = notify_dsps_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001656 else if (ch->type == SMD_APPS_WCNSS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001657 ch->notify_other_cpu = notify_wcnss_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001658 else if (ch->type == SMD_APPS_RPM)
1659 ch->notify_other_cpu = notify_rpm_smd;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001660
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001661 if (smd_is_packet(alloc_elm)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001662 ch->read = smd_packet_read;
1663 ch->write = smd_packet_write;
1664 ch->read_avail = smd_packet_read_avail;
1665 ch->write_avail = smd_packet_write_avail;
1666 ch->update_state = update_packet_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001667 ch->read_from_cb = smd_packet_read_from_cb;
1668 ch->is_pkt_ch = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001669 } else {
1670 ch->read = smd_stream_read;
1671 ch->write = smd_stream_write;
1672 ch->read_avail = smd_stream_read_avail;
1673 ch->write_avail = smd_stream_write_avail;
1674 ch->update_state = update_stream_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001675 ch->read_from_cb = smd_stream_read;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001676 }
1677
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001678 memcpy(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN);
1679 ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001680
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001681 ch->pdev.name = ch->name;
1682 ch->pdev.id = ch->type;
1683
1684 SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
1685 ch->name, ch->n);
1686
1687 mutex_lock(&smd_creation_mutex);
1688 list_add(&ch->ch_list, &smd_ch_closed_list);
1689 mutex_unlock(&smd_creation_mutex);
1690
1691 platform_device_register(&ch->pdev);
1692 if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
1693 /* create a platform driver to be used by smd_tty driver
1694 * so that it can access the loopback port
1695 */
1696 loopback_tty_pdev.id = ch->type;
1697 platform_device_register(&loopback_tty_pdev);
1698 }
1699 return 0;
1700}
1701
1702static inline void notify_loopback_smd(void)
1703{
1704 unsigned long flags;
1705 struct smd_channel *ch;
1706
1707 spin_lock_irqsave(&smd_lock, flags);
1708 list_for_each_entry(ch, &smd_ch_list_loopback, ch_list) {
1709 ch->notify(ch->priv, SMD_EVENT_DATA);
1710 }
1711 spin_unlock_irqrestore(&smd_lock, flags);
1712}
1713
1714static int smd_alloc_loopback_channel(void)
1715{
1716 static struct smd_half_channel smd_loopback_ctl;
1717 static char smd_loopback_data[SMD_BUF_SIZE];
1718 struct smd_channel *ch;
1719
1720 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1721 if (ch == 0) {
1722 pr_err("%s: out of memory\n", __func__);
1723 return -1;
1724 }
1725 ch->n = SMD_LOOPBACK_CID;
1726
1727 ch->send = &smd_loopback_ctl;
1728 ch->recv = &smd_loopback_ctl;
1729 ch->send_data = smd_loopback_data;
1730 ch->recv_data = smd_loopback_data;
1731 ch->fifo_size = SMD_BUF_SIZE;
1732
1733 ch->fifo_mask = ch->fifo_size - 1;
1734 ch->type = SMD_LOOPBACK_TYPE;
1735 ch->notify_other_cpu = notify_loopback_smd;
1736
1737 ch->read = smd_stream_read;
1738 ch->write = smd_stream_write;
1739 ch->read_avail = smd_stream_read_avail;
1740 ch->write_avail = smd_stream_write_avail;
1741 ch->update_state = update_stream_state;
1742 ch->read_from_cb = smd_stream_read;
1743
1744 memset(ch->name, 0, 20);
1745 memcpy(ch->name, "local_loopback", 14);
1746
1747 ch->pdev.name = ch->name;
1748 ch->pdev.id = ch->type;
1749
1750 SMD_INFO("%s: '%s' cid=%d\n", __func__, ch->name, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001751
1752 mutex_lock(&smd_creation_mutex);
1753 list_add(&ch->ch_list, &smd_ch_closed_list);
1754 mutex_unlock(&smd_creation_mutex);
1755
1756 platform_device_register(&ch->pdev);
Brian Swetland34f719b2009-10-30 16:22:05 -07001757 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001758}
1759
1760static void do_nothing_notify(void *priv, unsigned flags)
1761{
1762}
1763
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001764static void finalize_channel_close_fn(struct work_struct *work)
1765{
1766 unsigned long flags;
1767 struct smd_channel *ch;
1768 struct smd_channel *index;
1769
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001770 mutex_lock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001771 spin_lock_irqsave(&smd_lock, flags);
1772 list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
1773 list_del(&ch->ch_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001774 list_add(&ch->ch_list, &smd_ch_closed_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001775 ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
1776 ch->notify = do_nothing_notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001777 }
1778 spin_unlock_irqrestore(&smd_lock, flags);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001779 mutex_unlock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001780}
1781
1782struct smd_channel *smd_get_channel(const char *name, uint32_t type)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001783{
1784 struct smd_channel *ch;
1785
1786 mutex_lock(&smd_creation_mutex);
1787 list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001788 if (!strcmp(name, ch->name) &&
1789 (type == ch->type)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001790 list_del(&ch->ch_list);
1791 mutex_unlock(&smd_creation_mutex);
1792 return ch;
1793 }
1794 }
1795 mutex_unlock(&smd_creation_mutex);
1796
1797 return NULL;
1798}
1799
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001800int smd_named_open_on_edge(const char *name, uint32_t edge,
1801 smd_channel_t **_ch,
1802 void *priv, void (*notify)(void *, unsigned))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001803{
1804 struct smd_channel *ch;
1805 unsigned long flags;
1806
1807 if (smd_initialized == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001808 SMD_INFO("smd_open() before smd_init()\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001809 return -ENODEV;
1810 }
1811
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001812 SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
1813
1814 ch = smd_get_channel(name, edge);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001815 if (!ch) {
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001816 /* check closing list for port */
1817 spin_lock_irqsave(&smd_lock, flags);
1818 list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
1819 if (!strncmp(name, ch->name, 20) &&
1820 (edge == ch->type)) {
1821 /* channel exists, but is being closed */
1822 spin_unlock_irqrestore(&smd_lock, flags);
1823 return -EAGAIN;
1824 }
1825 }
1826
1827 /* check closing workqueue list for port */
1828 list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
1829 if (!strncmp(name, ch->name, 20) &&
1830 (edge == ch->type)) {
1831 /* channel exists, but is being closed */
1832 spin_unlock_irqrestore(&smd_lock, flags);
1833 return -EAGAIN;
1834 }
1835 }
1836 spin_unlock_irqrestore(&smd_lock, flags);
1837
1838 /* one final check to handle closing->closed race condition */
1839 ch = smd_get_channel(name, edge);
1840 if (!ch)
1841 return -ENODEV;
1842 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001843
1844 if (notify == 0)
1845 notify = do_nothing_notify;
1846
1847 ch->notify = notify;
1848 ch->current_packet = 0;
1849 ch->last_state = SMD_SS_CLOSED;
1850 ch->priv = priv;
1851
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001852 if (edge == SMD_LOOPBACK_TYPE) {
1853 ch->last_state = SMD_SS_OPENED;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001854 ch->half_ch->set_state(ch->send, SMD_SS_OPENED);
1855 ch->half_ch->set_fDSR(ch->send, 1);
1856 ch->half_ch->set_fCTS(ch->send, 1);
1857 ch->half_ch->set_fCD(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001858 }
1859
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001860 *_ch = ch;
1861
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001862 SMD_DBG("smd_open: opening '%s'\n", ch->name);
1863
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001864 spin_lock_irqsave(&smd_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001865 if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_MODEM)
Brian Swetland37521a32009-07-01 18:30:47 -07001866 list_add(&ch->ch_list, &smd_ch_list_modem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001867 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_QDSP)
Brian Swetland37521a32009-07-01 18:30:47 -07001868 list_add(&ch->ch_list, &smd_ch_list_dsp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001869 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_DSPS)
1870 list_add(&ch->ch_list, &smd_ch_list_dsps);
1871 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_WCNSS)
1872 list_add(&ch->ch_list, &smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001873 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_RPM)
1874 list_add(&ch->ch_list, &smd_ch_list_rpm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001875 else
1876 list_add(&ch->ch_list, &smd_ch_list_loopback);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001877
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001878 SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
1879
1880 if (edge != SMD_LOOPBACK_TYPE)
1881 smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
1882
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001883 spin_unlock_irqrestore(&smd_lock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001884
1885 return 0;
1886}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001887EXPORT_SYMBOL(smd_named_open_on_edge);
1888
1889
1890int smd_open(const char *name, smd_channel_t **_ch,
1891 void *priv, void (*notify)(void *, unsigned))
1892{
1893 return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv,
1894 notify);
1895}
1896EXPORT_SYMBOL(smd_open);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001897
1898int smd_close(smd_channel_t *ch)
1899{
1900 unsigned long flags;
1901
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001902 if (ch == 0)
1903 return -1;
1904
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001905 SMD_INFO("smd_close(%s)\n", ch->name);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001906
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001907 spin_lock_irqsave(&smd_lock, flags);
1908 list_del(&ch->ch_list);
1909 if (ch->n == SMD_LOOPBACK_CID) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001910 ch->half_ch->set_fDSR(ch->send, 0);
1911 ch->half_ch->set_fCTS(ch->send, 0);
1912 ch->half_ch->set_fCD(ch->send, 0);
1913 ch->half_ch->set_state(ch->send, SMD_SS_CLOSED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001914 } else
1915 ch_set_state(ch, SMD_SS_CLOSED);
1916
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001917 if (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001918 list_add(&ch->ch_list, &smd_ch_closing_list);
1919 spin_unlock_irqrestore(&smd_lock, flags);
1920 } else {
1921 spin_unlock_irqrestore(&smd_lock, flags);
1922 ch->notify = do_nothing_notify;
1923 mutex_lock(&smd_creation_mutex);
1924 list_add(&ch->ch_list, &smd_ch_closed_list);
1925 mutex_unlock(&smd_creation_mutex);
1926 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001927
1928 return 0;
1929}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001930EXPORT_SYMBOL(smd_close);
1931
1932int smd_write_start(smd_channel_t *ch, int len)
1933{
1934 int ret;
1935 unsigned hdr[5];
1936
1937 if (!ch) {
1938 pr_err("%s: Invalid channel specified\n", __func__);
1939 return -ENODEV;
1940 }
1941 if (!ch->is_pkt_ch) {
1942 pr_err("%s: non-packet channel specified\n", __func__);
1943 return -EACCES;
1944 }
1945 if (len < 1) {
1946 pr_err("%s: invalid length: %d\n", __func__, len);
1947 return -EINVAL;
1948 }
1949
1950 if (ch->pending_pkt_sz) {
1951 pr_err("%s: packet of size: %d in progress\n", __func__,
1952 ch->pending_pkt_sz);
1953 return -EBUSY;
1954 }
1955 ch->pending_pkt_sz = len;
1956
1957 if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
1958 ch->pending_pkt_sz = 0;
1959 SMD_DBG("%s: no space to write packet header\n", __func__);
1960 return -EAGAIN;
1961 }
1962
1963 hdr[0] = len;
1964 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1965
1966
1967 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1968 if (ret < 0 || ret != sizeof(hdr)) {
1969 ch->pending_pkt_sz = 0;
1970 pr_err("%s: packet header failed to write\n", __func__);
1971 return -EPERM;
1972 }
1973 return 0;
1974}
1975EXPORT_SYMBOL(smd_write_start);
1976
1977int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
1978{
1979 int bytes_written;
1980
1981 if (!ch) {
1982 pr_err("%s: Invalid channel specified\n", __func__);
1983 return -ENODEV;
1984 }
1985 if (len < 1) {
1986 pr_err("%s: invalid length: %d\n", __func__, len);
1987 return -EINVAL;
1988 }
1989
1990 if (!ch->pending_pkt_sz) {
1991 pr_err("%s: no transaction in progress\n", __func__);
1992 return -ENOEXEC;
1993 }
1994 if (ch->pending_pkt_sz - len < 0) {
1995 pr_err("%s: segment of size: %d will make packet go over "
1996 "length\n", __func__, len);
1997 return -EINVAL;
1998 }
1999
2000 bytes_written = smd_stream_write(ch, data, len, user_buf);
2001
2002 ch->pending_pkt_sz -= bytes_written;
2003
2004 return bytes_written;
2005}
2006EXPORT_SYMBOL(smd_write_segment);
2007
2008int smd_write_end(smd_channel_t *ch)
2009{
2010
2011 if (!ch) {
2012 pr_err("%s: Invalid channel specified\n", __func__);
2013 return -ENODEV;
2014 }
2015 if (ch->pending_pkt_sz) {
2016 pr_err("%s: current packet not completely written\n", __func__);
2017 return -E2BIG;
2018 }
2019
2020 return 0;
2021}
2022EXPORT_SYMBOL(smd_write_end);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002023
2024int smd_read(smd_channel_t *ch, void *data, int len)
2025{
Jack Pham1b236d12012-03-19 15:27:18 -07002026 if (!ch) {
2027 pr_err("%s: Invalid channel specified\n", __func__);
2028 return -ENODEV;
2029 }
2030
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002031 return ch->read(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002032}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002033EXPORT_SYMBOL(smd_read);
2034
2035int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
2036{
Jack Pham1b236d12012-03-19 15:27:18 -07002037 if (!ch) {
2038 pr_err("%s: Invalid channel specified\n", __func__);
2039 return -ENODEV;
2040 }
2041
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002042 return ch->read(ch, data, len, 1);
2043}
2044EXPORT_SYMBOL(smd_read_user_buffer);
2045
2046int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
2047{
Jack Pham1b236d12012-03-19 15:27:18 -07002048 if (!ch) {
2049 pr_err("%s: Invalid channel specified\n", __func__);
2050 return -ENODEV;
2051 }
2052
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002053 return ch->read_from_cb(ch, data, len, 0);
2054}
2055EXPORT_SYMBOL(smd_read_from_cb);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002056
2057int smd_write(smd_channel_t *ch, const void *data, int len)
2058{
Jack Pham1b236d12012-03-19 15:27:18 -07002059 if (!ch) {
2060 pr_err("%s: Invalid channel specified\n", __func__);
2061 return -ENODEV;
2062 }
2063
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002064 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002065}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002066EXPORT_SYMBOL(smd_write);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002067
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002068int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
Brian Swetland636eb9c2009-12-07 15:28:08 -08002069{
Jack Pham1b236d12012-03-19 15:27:18 -07002070 if (!ch) {
2071 pr_err("%s: Invalid channel specified\n", __func__);
2072 return -ENODEV;
2073 }
2074
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002075 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002076}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002077EXPORT_SYMBOL(smd_write_user_buffer);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002078
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002079int smd_read_avail(smd_channel_t *ch)
2080{
Jack Pham1b236d12012-03-19 15:27:18 -07002081 if (!ch) {
2082 pr_err("%s: Invalid channel specified\n", __func__);
2083 return -ENODEV;
2084 }
2085
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002086 return ch->read_avail(ch);
2087}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002088EXPORT_SYMBOL(smd_read_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002089
2090int smd_write_avail(smd_channel_t *ch)
2091{
Jack Pham1b236d12012-03-19 15:27:18 -07002092 if (!ch) {
2093 pr_err("%s: Invalid channel specified\n", __func__);
2094 return -ENODEV;
2095 }
2096
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002097 return ch->write_avail(ch);
2098}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002099EXPORT_SYMBOL(smd_write_avail);
2100
2101void smd_enable_read_intr(smd_channel_t *ch)
2102{
2103 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002104 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002105}
2106EXPORT_SYMBOL(smd_enable_read_intr);
2107
2108void smd_disable_read_intr(smd_channel_t *ch)
2109{
2110 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002111 ch->half_ch->set_fBLOCKREADINTR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002112}
2113EXPORT_SYMBOL(smd_disable_read_intr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002114
2115int smd_wait_until_readable(smd_channel_t *ch, int bytes)
2116{
2117 return -1;
2118}
2119
2120int smd_wait_until_writable(smd_channel_t *ch, int bytes)
2121{
2122 return -1;
2123}
2124
2125int smd_cur_packet_size(smd_channel_t *ch)
2126{
Jack Pham1b236d12012-03-19 15:27:18 -07002127 if (!ch) {
2128 pr_err("%s: Invalid channel specified\n", __func__);
2129 return -ENODEV;
2130 }
2131
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002132 return ch->current_packet;
2133}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002134EXPORT_SYMBOL(smd_cur_packet_size);
2135
2136int smd_tiocmget(smd_channel_t *ch)
2137{
Jack Pham1b236d12012-03-19 15:27:18 -07002138 if (!ch) {
2139 pr_err("%s: Invalid channel specified\n", __func__);
2140 return -ENODEV;
2141 }
2142
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002143 return (ch->half_ch->get_fDSR(ch->recv) ? TIOCM_DSR : 0) |
2144 (ch->half_ch->get_fCTS(ch->recv) ? TIOCM_CTS : 0) |
2145 (ch->half_ch->get_fCD(ch->recv) ? TIOCM_CD : 0) |
2146 (ch->half_ch->get_fRI(ch->recv) ? TIOCM_RI : 0) |
2147 (ch->half_ch->get_fCTS(ch->send) ? TIOCM_RTS : 0) |
2148 (ch->half_ch->get_fDSR(ch->send) ? TIOCM_DTR : 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002149}
2150EXPORT_SYMBOL(smd_tiocmget);
2151
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002152/* this api will be called while holding smd_lock */
2153int
2154smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002155{
Jack Pham1b236d12012-03-19 15:27:18 -07002156 if (!ch) {
2157 pr_err("%s: Invalid channel specified\n", __func__);
2158 return -ENODEV;
2159 }
2160
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002161 if (set & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002162 ch->half_ch->set_fDSR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002163
2164 if (set & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002165 ch->half_ch->set_fCTS(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002166
2167 if (clear & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002168 ch->half_ch->set_fDSR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002169
2170 if (clear & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002171 ch->half_ch->set_fCTS(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002172
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002173 ch->half_ch->set_fSTATE(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002174 barrier();
2175 ch->notify_other_cpu();
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002176
2177 return 0;
2178}
2179EXPORT_SYMBOL(smd_tiocmset_from_cb);
2180
2181int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
2182{
2183 unsigned long flags;
2184
Jack Pham1b236d12012-03-19 15:27:18 -07002185 if (!ch) {
2186 pr_err("%s: Invalid channel specified\n", __func__);
2187 return -ENODEV;
2188 }
2189
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002190 spin_lock_irqsave(&smd_lock, flags);
2191 smd_tiocmset_from_cb(ch, set, clear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002192 spin_unlock_irqrestore(&smd_lock, flags);
2193
2194 return 0;
2195}
2196EXPORT_SYMBOL(smd_tiocmset);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002197
2198
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002199/* -------------------------------------------------------------------------- */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002200
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002201/* smem_alloc returns the pointer to smem item if it is already allocated.
2202 * Otherwise, it returns NULL.
2203 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002204void *smem_alloc(unsigned id, unsigned size)
2205{
2206 return smem_find(id, size);
2207}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002208EXPORT_SYMBOL(smem_alloc);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002209
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002210/* smem_alloc2 returns the pointer to smem item. If it is not allocated,
2211 * it allocates it and then returns the pointer to it.
2212 */
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302213void *smem_alloc2(unsigned id, unsigned size_in)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002214{
2215 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2216 struct smem_heap_entry *toc = shared->heap_toc;
2217 unsigned long flags;
2218 void *ret = NULL;
2219
2220 if (!shared->heap_info.initialized) {
2221 pr_err("%s: smem heap info not initialized\n", __func__);
2222 return NULL;
2223 }
2224
2225 if (id >= SMEM_NUM_ITEMS)
2226 return NULL;
2227
2228 size_in = ALIGN(size_in, 8);
2229 remote_spin_lock_irqsave(&remote_spinlock, flags);
2230 if (toc[id].allocated) {
2231 SMD_DBG("%s: %u already allocated\n", __func__, id);
2232 if (size_in != toc[id].size)
2233 pr_err("%s: wrong size %u (expected %u)\n",
2234 __func__, toc[id].size, size_in);
2235 else
2236 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2237 } else if (id > SMEM_FIXED_ITEM_LAST) {
2238 SMD_DBG("%s: allocating %u\n", __func__, id);
2239 if (shared->heap_info.heap_remaining >= size_in) {
2240 toc[id].offset = shared->heap_info.free_offset;
2241 toc[id].size = size_in;
2242 wmb();
2243 toc[id].allocated = 1;
2244
2245 shared->heap_info.free_offset += size_in;
2246 shared->heap_info.heap_remaining -= size_in;
2247 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2248 } else
2249 pr_err("%s: not enough memory %u (required %u)\n",
2250 __func__, shared->heap_info.heap_remaining,
2251 size_in);
2252 }
2253 wmb();
2254 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
2255 return ret;
2256}
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302257EXPORT_SYMBOL(smem_alloc2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002258
2259void *smem_get_entry(unsigned id, unsigned *size)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002260{
2261 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2262 struct smem_heap_entry *toc = shared->heap_toc;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302263 int use_spinlocks = spinlocks_initialized;
2264 void *ret = 0;
2265 unsigned long flags = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002266
2267 if (id >= SMEM_NUM_ITEMS)
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302268 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002269
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302270 if (use_spinlocks)
2271 remote_spin_lock_irqsave(&remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002272 /* toc is in device memory and cannot be speculatively accessed */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002273 if (toc[id].allocated) {
2274 *size = toc[id].size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002275 barrier();
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302276 ret = (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002277 } else {
2278 *size = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002279 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302280 if (use_spinlocks)
2281 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002282
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302283 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002284}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002285EXPORT_SYMBOL(smem_get_entry);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002286
2287void *smem_find(unsigned id, unsigned size_in)
2288{
2289 unsigned size;
2290 void *ptr;
2291
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002292 ptr = smem_get_entry(id, &size);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002293 if (!ptr)
2294 return 0;
2295
2296 size_in = ALIGN(size_in, 8);
2297 if (size_in != size) {
2298 pr_err("smem_find(%d, %d): wrong size %d\n",
2299 id, size_in, size);
2300 return 0;
2301 }
2302
2303 return ptr;
2304}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002305EXPORT_SYMBOL(smem_find);
2306
2307static int smsm_cb_init(void)
2308{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002309 struct smsm_state_info *state_info;
2310 int n;
2311 int ret = 0;
2312
2313 smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
2314 GFP_KERNEL);
2315
2316 if (!smsm_states) {
2317 pr_err("%s: SMSM init failed\n", __func__);
2318 return -ENOMEM;
2319 }
2320
Eric Holmbergc8002902011-09-16 13:55:57 -06002321 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002322 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2323 state_info = &smsm_states[n];
2324 state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
Eric Holmberge8a39322012-04-03 15:14:02 -06002325 state_info->intr_mask_set = 0x0;
2326 state_info->intr_mask_clear = 0x0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002327 INIT_LIST_HEAD(&state_info->callbacks);
2328 }
Eric Holmbergc8002902011-09-16 13:55:57 -06002329 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002330
2331 return ret;
2332}
2333
2334static int smsm_init(void)
2335{
2336 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2337 int i;
2338 struct smsm_size_info_type *smsm_size_info;
2339
2340 i = remote_spin_lock_init(&remote_spinlock, SMEM_SPINLOCK_SMEM_ALLOC);
2341 if (i) {
2342 pr_err("%s: remote spinlock init failed %d\n", __func__, i);
2343 return i;
2344 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302345 spinlocks_initialized = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002346
2347 smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO,
2348 sizeof(struct smsm_size_info_type));
2349 if (smsm_size_info) {
2350 SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
2351 SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
2352 }
2353
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002354 i = kfifo_alloc(&smsm_snapshot_fifo,
2355 sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
2356 GFP_KERNEL);
2357 if (i) {
2358 pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
2359 return i;
2360 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002361 wake_lock_init(&smsm_snapshot_wakelock, WAKE_LOCK_SUSPEND,
2362 "smsm_snapshot");
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002363
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002364 if (!smsm_info.state) {
2365 smsm_info.state = smem_alloc2(ID_SHARED_STATE,
2366 SMSM_NUM_ENTRIES *
2367 sizeof(uint32_t));
2368
2369 if (smsm_info.state) {
2370 __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2371 if ((shared->version[VERSION_MODEM] >> 16) >= 0xB)
2372 __raw_writel(0, \
2373 SMSM_STATE_ADDR(SMSM_APPS_DEM_I));
2374 }
2375 }
2376
2377 if (!smsm_info.intr_mask) {
2378 smsm_info.intr_mask = smem_alloc2(SMEM_SMSM_CPU_INTR_MASK,
2379 SMSM_NUM_ENTRIES *
2380 SMSM_NUM_HOSTS *
2381 sizeof(uint32_t));
2382
Eric Holmberge8a39322012-04-03 15:14:02 -06002383 if (smsm_info.intr_mask) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002384 for (i = 0; i < SMSM_NUM_ENTRIES; i++)
Eric Holmberge8a39322012-04-03 15:14:02 -06002385 __raw_writel(0x0,
2386 SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
2387
2388 /* Configure legacy modem bits */
2389 __raw_writel(LEGACY_MODEM_SMSM_MASK,
2390 SMSM_INTR_MASK_ADDR(SMSM_MODEM_STATE,
2391 SMSM_APPS));
2392 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002393 }
2394
2395 if (!smsm_info.intr_mux)
2396 smsm_info.intr_mux = smem_alloc2(SMEM_SMD_SMSM_INTR_MUX,
2397 SMSM_NUM_INTR_MUX *
2398 sizeof(uint32_t));
2399
2400 i = smsm_cb_init();
2401 if (i)
2402 return i;
2403
2404 wmb();
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06002405 smsm_driver_state_notify(SMSM_INIT, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002406 return 0;
2407}
2408
2409void smsm_reset_modem(unsigned mode)
2410{
2411 if (mode == SMSM_SYSTEM_DOWNLOAD) {
2412 mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
2413 } else if (mode == SMSM_MODEM_WAIT) {
2414 mode = SMSM_RESET | SMSM_MODEM_WAIT;
2415 } else { /* reset_mode is SMSM_RESET or default */
2416 mode = SMSM_RESET;
2417 }
2418
2419 smsm_change_state(SMSM_APPS_STATE, mode, mode);
2420}
2421EXPORT_SYMBOL(smsm_reset_modem);
2422
2423void smsm_reset_modem_cont(void)
2424{
2425 unsigned long flags;
2426 uint32_t state;
2427
2428 if (!smsm_info.state)
2429 return;
2430
2431 spin_lock_irqsave(&smem_lock, flags);
2432 state = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)) \
2433 & ~SMSM_MODEM_WAIT;
2434 __raw_writel(state, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2435 wmb();
2436 spin_unlock_irqrestore(&smem_lock, flags);
2437}
2438EXPORT_SYMBOL(smsm_reset_modem_cont);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002439
Eric Holmbergda31d042012-03-28 14:01:02 -06002440static void smsm_cb_snapshot(uint32_t use_wakelock)
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002441{
2442 int n;
2443 uint32_t new_state;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002444 unsigned long flags;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002445 int ret;
2446
2447 ret = kfifo_avail(&smsm_snapshot_fifo);
Eric Holmbergda31d042012-03-28 14:01:02 -06002448 if (ret < SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002449 pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
2450 return;
2451 }
2452
Eric Holmberg96b55f62012-04-03 19:10:46 -06002453 /*
2454 * To avoid a race condition with notify_smsm_cb_clients_worker, the
2455 * following sequence must be followed:
2456 * 1) increment snapshot count
2457 * 2) insert data into FIFO
2458 *
2459 * Potentially in parallel, the worker:
2460 * a) verifies >= 1 snapshots are in FIFO
2461 * b) processes snapshot
2462 * c) decrements reference count
2463 *
2464 * This order ensures that 1 will always occur before abc.
2465 */
Eric Holmbergda31d042012-03-28 14:01:02 -06002466 if (use_wakelock) {
2467 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2468 if (smsm_snapshot_count == 0) {
2469 SMx_POWER_INFO("SMSM snapshot wake lock\n");
2470 wake_lock(&smsm_snapshot_wakelock);
2471 }
2472 ++smsm_snapshot_count;
2473 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2474 }
Eric Holmberg96b55f62012-04-03 19:10:46 -06002475
2476 /* queue state entries */
2477 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2478 new_state = __raw_readl(SMSM_STATE_ADDR(n));
2479
2480 ret = kfifo_in(&smsm_snapshot_fifo,
2481 &new_state, sizeof(new_state));
2482 if (ret != sizeof(new_state)) {
2483 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2484 goto restore_snapshot_count;
2485 }
2486 }
2487
2488 /* queue wakelock usage flag */
2489 ret = kfifo_in(&smsm_snapshot_fifo,
2490 &use_wakelock, sizeof(use_wakelock));
2491 if (ret != sizeof(use_wakelock)) {
2492 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2493 goto restore_snapshot_count;
2494 }
2495
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002496 schedule_work(&smsm_cb_work);
Eric Holmberg96b55f62012-04-03 19:10:46 -06002497 return;
2498
2499restore_snapshot_count:
2500 if (use_wakelock) {
2501 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2502 if (smsm_snapshot_count) {
2503 --smsm_snapshot_count;
2504 if (smsm_snapshot_count == 0) {
2505 SMx_POWER_INFO("SMSM snapshot wake unlock\n");
2506 wake_unlock(&smsm_snapshot_wakelock);
2507 }
2508 } else {
2509 pr_err("%s: invalid snapshot count\n", __func__);
2510 }
2511 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2512 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002513}
2514
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002515static irqreturn_t smsm_irq_handler(int irq, void *data)
2516{
2517 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002518
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002519 if (irq == INT_ADSP_A11_SMSM) {
Eric Holmberg6282c5d2011-10-27 17:30:57 -06002520 uint32_t mux_val;
2521 static uint32_t prev_smem_q6_apps_smsm;
2522
2523 if (smsm_info.intr_mux && cpu_is_qsd8x50()) {
2524 mux_val = __raw_readl(
2525 SMSM_INTR_MUX_ADDR(SMEM_Q6_APPS_SMSM));
2526 if (mux_val != prev_smem_q6_apps_smsm)
2527 prev_smem_q6_apps_smsm = mux_val;
2528 }
2529
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002530 spin_lock_irqsave(&smem_lock, flags);
Eric Holmbergda31d042012-03-28 14:01:02 -06002531 smsm_cb_snapshot(1);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002532 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002533 return IRQ_HANDLED;
2534 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002535
2536 spin_lock_irqsave(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002537 if (!smsm_info.state) {
2538 SMSM_INFO("<SM NO STATE>\n");
2539 } else {
2540 unsigned old_apps, apps;
2541 unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002542
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002543 old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002544
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002545 SMSM_DBG("<SM %08x %08x>\n", apps, modm);
2546 if (apps & SMSM_RESET) {
2547 /* If we get an interrupt and the apps SMSM_RESET
2548 bit is already set, the modem is acking the
2549 app's reset ack. */
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002550 if (!disable_smsm_reset_handshake)
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302551 apps &= ~SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002552 /* Issue a fake irq to handle any
2553 * smd state changes during reset
2554 */
2555 smd_fake_irq_handler(0);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002556
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002557 /* queue modem restart notify chain */
2558 modem_queue_start_reset_notify();
2559
2560 } else if (modm & SMSM_RESET) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002561 pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
Ram Somani8b9589f2012-04-03 12:07:18 +05302562 if (!disable_smsm_reset_handshake) {
2563 apps |= SMSM_RESET;
2564 flush_cache_all();
2565 outer_flush_all();
2566 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002567 modem_queue_start_reset_notify();
2568
2569 } else if (modm & SMSM_INIT) {
2570 if (!(apps & SMSM_INIT)) {
2571 apps |= SMSM_INIT;
2572 modem_queue_smsm_init_notify();
2573 }
2574
2575 if (modm & SMSM_SMDINIT)
2576 apps |= SMSM_SMDINIT;
2577 if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) ==
2578 (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT))
2579 apps |= SMSM_RUN;
2580 } else if (modm & SMSM_SYSTEM_DOWNLOAD) {
2581 pr_err("\nSMSM: Modem SMSM state changed to SMSM_SYSTEM_DOWNLOAD.");
2582 modem_queue_start_reset_notify();
2583 }
2584
2585 if (old_apps != apps) {
2586 SMSM_DBG("<SM %08x NOTIFY>\n", apps);
2587 __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2588 do_smd_probe();
2589 notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
2590 }
2591
Eric Holmbergda31d042012-03-28 14:01:02 -06002592 smsm_cb_snapshot(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002593 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002594 spin_unlock_irqrestore(&smem_lock, flags);
2595 return IRQ_HANDLED;
2596}
2597
Eric Holmberg98c6c642012-02-24 11:29:35 -07002598static irqreturn_t smsm_modem_irq_handler(int irq, void *data)
2599{
2600 SMx_POWER_INFO("SMSM Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002601 ++interrupt_stats[SMD_MODEM].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002602 return smsm_irq_handler(irq, data);
2603}
2604
2605static irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
2606{
2607 SMx_POWER_INFO("SMSM Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002608 ++interrupt_stats[SMD_Q6].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002609 return smsm_irq_handler(irq, data);
2610}
2611
2612static irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
2613{
2614 SMx_POWER_INFO("SMSM Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002615 ++interrupt_stats[SMD_DSPS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002616 return smsm_irq_handler(irq, data);
2617}
2618
2619static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
2620{
2621 SMx_POWER_INFO("SMSM Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002622 ++interrupt_stats[SMD_WCNSS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002623 return smsm_irq_handler(irq, data);
2624}
2625
Eric Holmberge8a39322012-04-03 15:14:02 -06002626/*
2627 * Changes the global interrupt mask. The set and clear masks are re-applied
2628 * every time the global interrupt mask is updated for callback registration
2629 * and de-registration.
2630 *
2631 * The clear mask is applied first, so if a bit is set to 1 in both the clear
2632 * mask and the set mask, the result will be that the interrupt is set.
2633 *
2634 * @smsm_entry SMSM entry to change
2635 * @clear_mask 1 = clear bit, 0 = no-op
2636 * @set_mask 1 = set bit, 0 = no-op
2637 *
2638 * @returns 0 for success, < 0 for error
2639 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002640int smsm_change_intr_mask(uint32_t smsm_entry,
2641 uint32_t clear_mask, uint32_t set_mask)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002642{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002643 uint32_t old_mask, new_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002644 unsigned long flags;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002645
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002646 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2647 pr_err("smsm_change_state: Invalid entry %d\n",
2648 smsm_entry);
2649 return -EINVAL;
2650 }
2651
2652 if (!smsm_info.intr_mask) {
2653 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002654 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002655 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002656
2657 spin_lock_irqsave(&smem_lock, flags);
Eric Holmberge8a39322012-04-03 15:14:02 -06002658 smsm_states[smsm_entry].intr_mask_clear = clear_mask;
2659 smsm_states[smsm_entry].intr_mask_set = set_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002660
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002661 old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2662 new_mask = (old_mask & ~clear_mask) | set_mask;
2663 __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002664
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002665 wmb();
2666 spin_unlock_irqrestore(&smem_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002667
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002668 return 0;
2669}
2670EXPORT_SYMBOL(smsm_change_intr_mask);
2671
2672int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
2673{
2674 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2675 pr_err("smsm_change_state: Invalid entry %d\n",
2676 smsm_entry);
2677 return -EINVAL;
2678 }
2679
2680 if (!smsm_info.intr_mask) {
2681 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
2682 return -EIO;
2683 }
2684
2685 *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2686 return 0;
2687}
2688EXPORT_SYMBOL(smsm_get_intr_mask);
2689
2690int smsm_change_state(uint32_t smsm_entry,
2691 uint32_t clear_mask, uint32_t set_mask)
2692{
2693 unsigned long flags;
2694 uint32_t old_state, new_state;
2695
2696 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2697 pr_err("smsm_change_state: Invalid entry %d",
2698 smsm_entry);
2699 return -EINVAL;
2700 }
2701
2702 if (!smsm_info.state) {
2703 pr_err("smsm_change_state <SM NO STATE>\n");
2704 return -EIO;
2705 }
2706 spin_lock_irqsave(&smem_lock, flags);
2707
2708 old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2709 new_state = (old_state & ~clear_mask) | set_mask;
2710 __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
2711 SMSM_DBG("smsm_change_state %x\n", new_state);
2712 notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002713
2714 spin_unlock_irqrestore(&smem_lock, flags);
2715
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002716 return 0;
2717}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002718EXPORT_SYMBOL(smsm_change_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002719
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002720uint32_t smsm_get_state(uint32_t smsm_entry)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002721{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002722 uint32_t rv = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002723
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002724 /* needs interface change to return error code */
2725 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2726 pr_err("smsm_change_state: Invalid entry %d",
2727 smsm_entry);
2728 return 0;
2729 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002730
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002731 if (!smsm_info.state) {
2732 pr_err("smsm_get_state <SM NO STATE>\n");
2733 } else {
2734 rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2735 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002736
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002737 return rv;
2738}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002739EXPORT_SYMBOL(smsm_get_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002740
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002741/**
2742 * Performs SMSM callback client notifiction.
2743 */
2744void notify_smsm_cb_clients_worker(struct work_struct *work)
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002745{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002746 struct smsm_state_cb_info *cb_info;
2747 struct smsm_state_info *state_info;
2748 int n;
2749 uint32_t new_state;
2750 uint32_t state_changes;
Eric Holmbergda31d042012-03-28 14:01:02 -06002751 uint32_t use_wakelock;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002752 int ret;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002753 unsigned long flags;
Brian Swetland03e00cd2009-07-01 17:58:37 -07002754
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002755 if (!smd_initialized)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002756 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002757
Eric Holmbergda31d042012-03-28 14:01:02 -06002758 while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002759 mutex_lock(&smsm_lock);
2760 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2761 state_info = &smsm_states[n];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002762
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002763 ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
2764 sizeof(new_state));
2765 if (ret != sizeof(new_state)) {
2766 pr_err("%s: snapshot underflow %d\n",
2767 __func__, ret);
2768 mutex_unlock(&smsm_lock);
2769 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002770 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002771
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002772 state_changes = state_info->last_value ^ new_state;
2773 if (state_changes) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07002774 SMx_POWER_INFO("SMSM Change %d: %08x->%08x\n",
2775 n, state_info->last_value,
2776 new_state);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002777 list_for_each_entry(cb_info,
2778 &state_info->callbacks, cb_list) {
2779
2780 if (cb_info->mask & state_changes)
2781 cb_info->notify(cb_info->data,
2782 state_info->last_value,
2783 new_state);
2784 }
2785 state_info->last_value = new_state;
2786 }
2787 }
2788 mutex_unlock(&smsm_lock);
Eric Holmberg59a9f942012-03-19 10:04:22 -06002789
Eric Holmbergda31d042012-03-28 14:01:02 -06002790 /* read wakelock flag */
2791 ret = kfifo_out(&smsm_snapshot_fifo, &use_wakelock,
2792 sizeof(use_wakelock));
2793 if (ret != sizeof(use_wakelock)) {
2794 pr_err("%s: snapshot underflow %d\n",
2795 __func__, ret);
2796 return;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002797 }
Eric Holmbergda31d042012-03-28 14:01:02 -06002798
2799 if (use_wakelock) {
2800 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2801 if (smsm_snapshot_count) {
2802 --smsm_snapshot_count;
2803 if (smsm_snapshot_count == 0) {
2804 SMx_POWER_INFO("SMSM snapshot"
2805 " wake unlock\n");
2806 wake_unlock(&smsm_snapshot_wakelock);
2807 }
2808 } else {
2809 pr_err("%s: invalid snapshot count\n",
2810 __func__);
2811 }
2812 spin_unlock_irqrestore(&smsm_snapshot_count_lock,
2813 flags);
2814 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002815 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002816}
2817
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002818
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002819/**
2820 * Registers callback for SMSM state notifications when the specified
2821 * bits change.
2822 *
2823 * @smsm_entry Processor entry to deregister
2824 * @mask Bits to deregister (if result is 0, callback is removed)
2825 * @notify Notification function to deregister
2826 * @data Opaque data passed in to callback
2827 *
2828 * @returns Status code
2829 * <0 error code
2830 * 0 inserted new entry
2831 * 1 updated mask of existing entry
2832 */
2833int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
2834 void (*notify)(void *, uint32_t, uint32_t), void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002835{
Eric Holmberge8a39322012-04-03 15:14:02 -06002836 struct smsm_state_info *state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002837 struct smsm_state_cb_info *cb_info;
2838 struct smsm_state_cb_info *cb_found = 0;
Eric Holmberge8a39322012-04-03 15:14:02 -06002839 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002840 int ret = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002841
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002842 if (smsm_entry >= SMSM_NUM_ENTRIES)
2843 return -EINVAL;
2844
Eric Holmbergc8002902011-09-16 13:55:57 -06002845 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002846
2847 if (!smsm_states) {
2848 /* smsm not yet initialized */
2849 ret = -ENODEV;
2850 goto cleanup;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002851 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002852
Eric Holmberge8a39322012-04-03 15:14:02 -06002853 state = &smsm_states[smsm_entry];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002854 list_for_each_entry(cb_info,
Eric Holmberge8a39322012-04-03 15:14:02 -06002855 &state->callbacks, cb_list) {
2856 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002857 (cb_info->data == data)) {
2858 cb_info->mask |= mask;
2859 cb_found = cb_info;
2860 ret = 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002861 }
Eric Holmberge8a39322012-04-03 15:14:02 -06002862 new_mask |= cb_info->mask;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002863 }
2864
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002865 if (!cb_found) {
2866 cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
2867 GFP_ATOMIC);
2868 if (!cb_info) {
2869 ret = -ENOMEM;
2870 goto cleanup;
2871 }
2872
2873 cb_info->mask = mask;
2874 cb_info->notify = notify;
2875 cb_info->data = data;
2876 INIT_LIST_HEAD(&cb_info->cb_list);
2877 list_add_tail(&cb_info->cb_list,
Eric Holmberge8a39322012-04-03 15:14:02 -06002878 &state->callbacks);
2879 new_mask |= mask;
2880 }
2881
2882 /* update interrupt notification mask */
2883 if (smsm_entry == SMSM_MODEM_STATE)
2884 new_mask |= LEGACY_MODEM_SMSM_MASK;
2885
2886 if (smsm_info.intr_mask) {
2887 unsigned long flags;
2888
2889 spin_lock_irqsave(&smem_lock, flags);
2890 new_mask = (new_mask & ~state->intr_mask_clear)
2891 | state->intr_mask_set;
2892 __raw_writel(new_mask,
2893 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2894 wmb();
2895 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002896 }
2897
2898cleanup:
Eric Holmbergc8002902011-09-16 13:55:57 -06002899 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002900 return ret;
2901}
2902EXPORT_SYMBOL(smsm_state_cb_register);
2903
2904
2905/**
2906 * Deregisters for SMSM state notifications for the specified bits.
2907 *
2908 * @smsm_entry Processor entry to deregister
2909 * @mask Bits to deregister (if result is 0, callback is removed)
2910 * @notify Notification function to deregister
2911 * @data Opaque data passed in to callback
2912 *
2913 * @returns Status code
2914 * <0 error code
2915 * 0 not found
2916 * 1 updated mask
2917 * 2 removed callback
2918 */
2919int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
2920 void (*notify)(void *, uint32_t, uint32_t), void *data)
2921{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002922 struct smsm_state_cb_info *cb_info;
Eric Holmberge8a39322012-04-03 15:14:02 -06002923 struct smsm_state_cb_info *cb_tmp;
2924 struct smsm_state_info *state;
2925 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002926 int ret = 0;
2927
2928 if (smsm_entry >= SMSM_NUM_ENTRIES)
2929 return -EINVAL;
2930
Eric Holmbergc8002902011-09-16 13:55:57 -06002931 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002932
2933 if (!smsm_states) {
2934 /* smsm not yet initialized */
Eric Holmbergc8002902011-09-16 13:55:57 -06002935 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002936 return -ENODEV;
2937 }
2938
Eric Holmberge8a39322012-04-03 15:14:02 -06002939 state = &smsm_states[smsm_entry];
2940 list_for_each_entry_safe(cb_info, cb_tmp,
2941 &state->callbacks, cb_list) {
2942 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002943 (cb_info->data == data)) {
2944 cb_info->mask &= ~mask;
2945 ret = 1;
2946 if (!cb_info->mask) {
2947 /* no mask bits set, remove callback */
2948 list_del(&cb_info->cb_list);
2949 kfree(cb_info);
2950 ret = 2;
Eric Holmberge8a39322012-04-03 15:14:02 -06002951 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002952 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002953 }
Eric Holmberge8a39322012-04-03 15:14:02 -06002954 new_mask |= cb_info->mask;
2955 }
2956
2957 /* update interrupt notification mask */
2958 if (smsm_entry == SMSM_MODEM_STATE)
2959 new_mask |= LEGACY_MODEM_SMSM_MASK;
2960
2961 if (smsm_info.intr_mask) {
2962 unsigned long flags;
2963
2964 spin_lock_irqsave(&smem_lock, flags);
2965 new_mask = (new_mask & ~state->intr_mask_clear)
2966 | state->intr_mask_set;
2967 __raw_writel(new_mask,
2968 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2969 wmb();
2970 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002971 }
2972
Eric Holmbergc8002902011-09-16 13:55:57 -06002973 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002974 return ret;
2975}
2976EXPORT_SYMBOL(smsm_state_cb_deregister);
2977
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06002978int smsm_driver_state_notifier_register(struct notifier_block *nb)
2979{
2980 int ret;
2981 if (!nb)
2982 return -EINVAL;
2983 mutex_lock(&smsm_driver_state_notifier_lock);
2984 ret = raw_notifier_chain_register(&smsm_driver_state_notifier_list, nb);
2985 mutex_unlock(&smsm_driver_state_notifier_lock);
2986 return ret;
2987}
2988EXPORT_SYMBOL(smsm_driver_state_notifier_register);
2989
2990int smsm_driver_state_notifier_unregister(struct notifier_block *nb)
2991{
2992 int ret;
2993 if (!nb)
2994 return -EINVAL;
2995 mutex_lock(&smsm_driver_state_notifier_lock);
2996 ret = raw_notifier_chain_unregister(&smsm_driver_state_notifier_list,
2997 nb);
2998 mutex_unlock(&smsm_driver_state_notifier_lock);
2999 return ret;
3000}
3001EXPORT_SYMBOL(smsm_driver_state_notifier_unregister);
3002
3003static void smsm_driver_state_notify(uint32_t state, void *data)
3004{
3005 mutex_lock(&smsm_driver_state_notifier_lock);
3006 raw_notifier_call_chain(&smsm_driver_state_notifier_list,
3007 state, data);
3008 mutex_unlock(&smsm_driver_state_notifier_lock);
3009}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003010
3011int smd_core_init(void)
3012{
3013 int r;
3014 unsigned long flags = IRQF_TRIGGER_RISING;
3015 SMD_INFO("smd_core_init()\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003016
Brian Swetland37521a32009-07-01 18:30:47 -07003017 r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003018 flags, "smd_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003019 if (r < 0)
3020 return r;
3021 r = enable_irq_wake(INT_A9_M2A_0);
3022 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003023 pr_err("smd_core_init: "
3024 "enable_irq_wake failed for INT_A9_M2A_0\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003025
Eric Holmberg98c6c642012-02-24 11:29:35 -07003026 r = request_irq(INT_A9_M2A_5, smsm_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003027 flags, "smsm_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003028 if (r < 0) {
3029 free_irq(INT_A9_M2A_0, 0);
3030 return r;
3031 }
3032 r = enable_irq_wake(INT_A9_M2A_5);
3033 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003034 pr_err("smd_core_init: "
3035 "enable_irq_wake failed for INT_A9_M2A_5\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003036
Brian Swetland37521a32009-07-01 18:30:47 -07003037#if defined(CONFIG_QDSP6)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003038#if (INT_ADSP_A11 == INT_ADSP_A11_SMSM)
3039 flags |= IRQF_SHARED;
3040#endif
Brian Swetland37521a32009-07-01 18:30:47 -07003041 r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003042 flags, "smd_dev", smd_dsp_irq_handler);
Brian Swetland37521a32009-07-01 18:30:47 -07003043 if (r < 0) {
3044 free_irq(INT_A9_M2A_0, 0);
3045 free_irq(INT_A9_M2A_5, 0);
3046 return r;
3047 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003048
Eric Holmberg98c6c642012-02-24 11:29:35 -07003049 r = request_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler,
3050 flags, "smsm_dev", smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003051 if (r < 0) {
3052 free_irq(INT_A9_M2A_0, 0);
3053 free_irq(INT_A9_M2A_5, 0);
3054 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
3055 return r;
3056 }
3057
3058 r = enable_irq_wake(INT_ADSP_A11);
3059 if (r < 0)
3060 pr_err("smd_core_init: "
3061 "enable_irq_wake failed for INT_ADSP_A11\n");
3062
3063#if (INT_ADSP_A11 != INT_ADSP_A11_SMSM)
3064 r = enable_irq_wake(INT_ADSP_A11_SMSM);
3065 if (r < 0)
3066 pr_err("smd_core_init: enable_irq_wake "
3067 "failed for INT_ADSP_A11_SMSM\n");
3068#endif
3069 flags &= ~IRQF_SHARED;
Brian Swetland37521a32009-07-01 18:30:47 -07003070#endif
3071
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003072#if defined(CONFIG_DSPS)
3073 r = request_irq(INT_DSPS_A11, smd_dsps_irq_handler,
3074 flags, "smd_dev", smd_dsps_irq_handler);
3075 if (r < 0) {
3076 free_irq(INT_A9_M2A_0, 0);
3077 free_irq(INT_A9_M2A_5, 0);
3078 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003079 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003080 return r;
3081 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003082
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003083 r = enable_irq_wake(INT_DSPS_A11);
3084 if (r < 0)
3085 pr_err("smd_core_init: "
3086 "enable_irq_wake failed for INT_ADSP_A11\n");
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003087#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003088
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003089#if defined(CONFIG_WCNSS)
3090 r = request_irq(INT_WCNSS_A11, smd_wcnss_irq_handler,
3091 flags, "smd_dev", smd_wcnss_irq_handler);
3092 if (r < 0) {
3093 free_irq(INT_A9_M2A_0, 0);
3094 free_irq(INT_A9_M2A_5, 0);
3095 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003096 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003097 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3098 return r;
3099 }
3100
3101 r = enable_irq_wake(INT_WCNSS_A11);
3102 if (r < 0)
3103 pr_err("smd_core_init: "
3104 "enable_irq_wake failed for INT_WCNSS_A11\n");
3105
Eric Holmberg98c6c642012-02-24 11:29:35 -07003106 r = request_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler,
3107 flags, "smsm_dev", smsm_wcnss_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003108 if (r < 0) {
3109 free_irq(INT_A9_M2A_0, 0);
3110 free_irq(INT_A9_M2A_5, 0);
3111 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003112 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003113 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3114 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
3115 return r;
3116 }
3117
3118 r = enable_irq_wake(INT_WCNSS_A11_SMSM);
3119 if (r < 0)
3120 pr_err("smd_core_init: "
3121 "enable_irq_wake failed for INT_WCNSS_A11_SMSM\n");
3122#endif
3123
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003124#if defined(CONFIG_DSPS_SMSM)
Eric Holmberg98c6c642012-02-24 11:29:35 -07003125 r = request_irq(INT_DSPS_A11_SMSM, smsm_dsps_irq_handler,
3126 flags, "smsm_dev", smsm_dsps_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003127 if (r < 0) {
3128 free_irq(INT_A9_M2A_0, 0);
3129 free_irq(INT_A9_M2A_5, 0);
3130 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003131 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003132 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3133 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003134 free_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003135 return r;
3136 }
3137
3138 r = enable_irq_wake(INT_DSPS_A11_SMSM);
3139 if (r < 0)
3140 pr_err("smd_core_init: "
3141 "enable_irq_wake failed for INT_DSPS_A11_SMSM\n");
3142#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003143 SMD_INFO("smd_core_init() done\n");
3144
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003145 return 0;
3146}
3147
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303148static int intr_init(struct interrupt_config_item *private_irq,
3149 struct smd_irq_config *platform_irq,
3150 struct platform_device *pdev
3151 )
3152{
3153 int irq_id;
3154 int ret;
3155 int ret_wake;
3156
3157 private_irq->out_bit_pos = platform_irq->out_bit_pos;
3158 private_irq->out_offset = platform_irq->out_offset;
3159 private_irq->out_base = platform_irq->out_base;
3160
3161 irq_id = platform_get_irq_byname(
3162 pdev,
3163 platform_irq->irq_name
3164 );
3165 SMD_DBG("smd: %s: register irq: %s id: %d\n", __func__,
3166 platform_irq->irq_name, irq_id);
3167 ret = request_irq(irq_id,
3168 private_irq->irq_handler,
3169 platform_irq->flags,
3170 platform_irq->device_name,
3171 (void *)platform_irq->dev_id
3172 );
3173 if (ret < 0) {
3174 platform_irq->irq_id = ret;
3175 } else {
3176 platform_irq->irq_id = irq_id;
3177 ret_wake = enable_irq_wake(irq_id);
3178 if (ret_wake < 0) {
3179 pr_err("smd: enable_irq_wake failed on %s",
3180 platform_irq->irq_name);
3181 }
3182 }
3183
3184 return ret;
3185}
3186
3187int smd_core_platform_init(struct platform_device *pdev)
3188{
3189 int i;
3190 int ret;
3191 uint32_t num_ss;
3192 struct smd_platform *smd_platform_data;
3193 struct smd_subsystem_config *smd_ss_config_list;
3194 struct smd_subsystem_config *cfg;
3195 int err_ret = 0;
3196
3197 smd_platform_data = pdev->dev.platform_data;
3198 num_ss = smd_platform_data->num_ss_configs;
3199 smd_ss_config_list = smd_platform_data->smd_ss_configs;
3200
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06003201 if (smd_platform_data->smd_ssr_config)
3202 disable_smsm_reset_handshake = smd_platform_data->
3203 smd_ssr_config->disable_smsm_reset_handshake;
3204
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303205 for (i = 0; i < num_ss; i++) {
3206 cfg = &smd_ss_config_list[i];
3207
3208 ret = intr_init(
3209 &private_intr_config[cfg->irq_config_id].smd,
3210 &cfg->smd_int,
3211 pdev
3212 );
3213
3214 if (ret < 0) {
3215 err_ret = ret;
3216 pr_err("smd: register irq failed on %s\n",
3217 cfg->smd_int.irq_name);
3218 break;
3219 }
3220
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003221 /* only init smsm structs if this edge supports smsm */
3222 if (cfg->smsm_int.irq_id)
3223 ret = intr_init(
3224 &private_intr_config[cfg->irq_config_id].smsm,
3225 &cfg->smsm_int,
3226 pdev
3227 );
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303228
3229 if (ret < 0) {
3230 err_ret = ret;
3231 pr_err("smd: register irq failed on %s\n",
3232 cfg->smsm_int.irq_name);
3233 break;
3234 }
Eric Holmberg17992c12012-02-29 12:54:44 -07003235
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003236 if (cfg->subsys_name)
3237 strlcpy(edge_to_pids[cfg->edge].subsys_name,
Eric Holmberg17992c12012-02-29 12:54:44 -07003238 cfg->subsys_name, SMD_MAX_CH_NAME_LEN);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303239 }
3240
3241 if (err_ret < 0) {
3242 pr_err("smd: deregistering IRQs\n");
3243 for (i = 0; i < num_ss; ++i) {
3244 cfg = &smd_ss_config_list[i];
3245
3246 if (cfg->smd_int.irq_id >= 0)
3247 free_irq(cfg->smd_int.irq_id,
3248 (void *)cfg->smd_int.dev_id
3249 );
3250 if (cfg->smsm_int.irq_id >= 0)
3251 free_irq(cfg->smsm_int.irq_id,
3252 (void *)cfg->smsm_int.dev_id
3253 );
3254 }
3255 return err_ret;
3256 }
3257
3258 SMD_INFO("smd_core_platform_init() done\n");
3259 return 0;
3260
3261}
3262
Gregory Bean4416e9e2010-07-28 10:22:12 -07003263static int __devinit msm_smd_probe(struct platform_device *pdev)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003264{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303265 int ret;
Daniel Walker0aec66d2010-03-18 12:31:08 -07003266
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303267 SMD_INFO("smd probe\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003268 INIT_WORK(&probe_work, smd_channel_probe_worker);
3269
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003270 channel_close_wq = create_singlethread_workqueue("smd_channel_close");
3271 if (IS_ERR(channel_close_wq)) {
3272 pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
3273 return -ENOMEM;
3274 }
3275
3276 if (smsm_init()) {
3277 pr_err("smsm_init() failed\n");
3278 return -1;
3279 }
3280
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303281 if (pdev) {
3282 if (pdev->dev.of_node) {
3283 pr_err("SMD: Device tree not currently supported\n");
3284 return -ENODEV;
3285 } else if (pdev->dev.platform_data) {
3286 ret = smd_core_platform_init(pdev);
3287 if (ret) {
3288 pr_err(
3289 "SMD: smd_core_platform_init() failed\n");
3290 return -ENODEV;
3291 }
3292 } else {
3293 ret = smd_core_init();
3294 if (ret) {
3295 pr_err("smd_core_init() failed\n");
3296 return -ENODEV;
3297 }
3298 }
3299 } else {
3300 pr_err("SMD: PDEV not found\n");
3301 return -ENODEV;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003302 }
3303
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003304 smd_initialized = 1;
3305
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003306 smd_alloc_loopback_channel();
Eric Holmbergc33d4ab2011-10-24 10:28:25 -06003307 smsm_irq_handler(0, 0);
3308 tasklet_schedule(&smd_fake_irq_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003309
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003310 return 0;
3311}
3312
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003313static int restart_notifier_cb(struct notifier_block *this,
3314 unsigned long code,
3315 void *data);
3316
3317static struct restart_notifier_block restart_notifiers[] = {
Eric Holmbergca7ead22011-12-01 17:21:15 -07003318 {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
3319 {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
3320 {SMD_WCNSS, "riva", .nb.notifier_call = restart_notifier_cb},
3321 {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
Eric Holmberg8b0e74f2012-02-08 09:56:17 -07003322 {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003323};
3324
3325static int restart_notifier_cb(struct notifier_block *this,
3326 unsigned long code,
3327 void *data)
3328{
3329 if (code == SUBSYS_AFTER_SHUTDOWN) {
3330 struct restart_notifier_block *notifier;
3331
3332 notifier = container_of(this,
3333 struct restart_notifier_block, nb);
3334 SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
3335 __func__, notifier->processor,
3336 notifier->name);
3337
3338 smd_channel_reset(notifier->processor);
3339 }
3340
3341 return NOTIFY_DONE;
3342}
3343
3344static __init int modem_restart_late_init(void)
3345{
3346 int i;
3347 void *handle;
3348 struct restart_notifier_block *nb;
3349
3350 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
3351 nb = &restart_notifiers[i];
3352 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
3353 SMD_DBG("%s: registering notif for '%s', handle=%p\n",
3354 __func__, nb->name, handle);
3355 }
3356 return 0;
3357}
3358late_initcall(modem_restart_late_init);
3359
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003360static struct platform_driver msm_smd_driver = {
3361 .probe = msm_smd_probe,
3362 .driver = {
3363 .name = MODULE_NAME,
3364 .owner = THIS_MODULE,
3365 },
3366};
3367
3368static int __init msm_smd_init(void)
3369{
3370 return platform_driver_register(&msm_smd_driver);
3371}
3372
3373module_init(msm_smd_init);
3374
3375MODULE_DESCRIPTION("MSM Shared Memory Core");
3376MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
3377MODULE_LICENSE("GPL");