blob: 6b42325d20c656fda19872767cf1f2dda1114d3c [file] [log] [blame]
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001/* arch/arm/mach-msm/smd.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07004 * Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Brian Swetland2eb44eb2008-09-29 16:00:48 -07005 * Author: Brian Swetland <swetland@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/cdev.h>
22#include <linux/device.h>
23#include <linux/wait.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/list.h>
27#include <linux/slab.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070028#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/io.h>
30#include <linux/termios.h>
31#include <linux/ctype.h>
32#include <linux/remote_spinlock.h>
33#include <linux/uaccess.h>
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070034#include <linux/kfifo.h>
Eric Holmberg59a9f942012-03-19 10:04:22 -060035#include <linux/wakelock.h>
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -060036#include <linux/notifier.h>
Jeff Hugobdc734d2012-03-26 16:05:39 -060037#include <linux/sort.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070038#include <mach/msm_smd.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039#include <mach/msm_iomap.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070040#include <mach/system.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041#include <mach/subsystem_notif.h>
Angshuman Sarkaread67bd2011-09-21 20:13:12 +053042#include <mach/socinfo.h>
Ram Somani8b9589f2012-04-03 12:07:18 +053043#include <asm/cacheflush.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070044
45#include "smd_private.h"
46#include "proc_comm.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047#include "modem_notifier.h"
Brian Swetland2eb44eb2008-09-29 16:00:48 -070048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM8X60) \
Jeff Hugo56b933a2011-09-28 14:42:05 -060050 || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX) \
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060051 || defined(CONFIG_ARCH_MSM9615) || defined(CONFIG_ARCH_APQ8064)
Brian Swetland37521a32009-07-01 18:30:47 -070052#define CONFIG_QDSP6 1
53#endif
54
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060055#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) \
56 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057#define CONFIG_DSPS 1
58#endif
59
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060060#if defined(CONFIG_ARCH_MSM8960) \
61 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062#define CONFIG_WCNSS 1
Jeff Hugo6a8057c2011-08-16 13:47:12 -060063#define CONFIG_DSPS_SMSM 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -070065
66#define MODULE_NAME "msm_smd"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067#define SMEM_VERSION 0x000B
68#define SMD_VERSION 0x00020000
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070069#define SMSM_SNAPSHOT_CNT 64
Eric Holmbergda31d042012-03-28 14:01:02 -060070#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72uint32_t SMSM_NUM_ENTRIES = 8;
73uint32_t SMSM_NUM_HOSTS = 3;
Brian Swetland2eb44eb2008-09-29 16:00:48 -070074
Eric Holmberge8a39322012-04-03 15:14:02 -060075/* Legacy SMSM interrupt notifications */
76#define LEGACY_MODEM_SMSM_MASK (SMSM_RESET | SMSM_INIT | SMSM_SMDINIT \
77 | SMSM_RUN | SMSM_SYSTEM_DOWNLOAD)
78
Brian Swetland2eb44eb2008-09-29 16:00:48 -070079enum {
80 MSM_SMD_DEBUG = 1U << 0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081 MSM_SMSM_DEBUG = 1U << 1,
82 MSM_SMD_INFO = 1U << 2,
83 MSM_SMSM_INFO = 1U << 3,
Eric Holmberg98c6c642012-02-24 11:29:35 -070084 MSM_SMx_POWER_INFO = 1U << 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085};
86
87struct smsm_shared_info {
88 uint32_t *state;
89 uint32_t *intr_mask;
90 uint32_t *intr_mux;
91};
92
93static struct smsm_shared_info smsm_info;
Eric Holmberg59a9f942012-03-19 10:04:22 -060094static struct kfifo smsm_snapshot_fifo;
95static struct wake_lock smsm_snapshot_wakelock;
96static int smsm_snapshot_count;
97static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070098
99struct smsm_size_info_type {
100 uint32_t num_hosts;
101 uint32_t num_entries;
102 uint32_t reserved0;
103 uint32_t reserved1;
104};
105
106struct smsm_state_cb_info {
107 struct list_head cb_list;
108 uint32_t mask;
109 void *data;
110 void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
111};
112
113struct smsm_state_info {
114 struct list_head callbacks;
115 uint32_t last_value;
Eric Holmberge8a39322012-04-03 15:14:02 -0600116 uint32_t intr_mask_set;
117 uint32_t intr_mask_clear;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700118};
119
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530120struct interrupt_config_item {
121 /* must be initialized */
122 irqreturn_t (*irq_handler)(int req, void *data);
123 /* outgoing interrupt config (set from platform data) */
124 uint32_t out_bit_pos;
125 void __iomem *out_base;
126 uint32_t out_offset;
127};
128
129struct interrupt_config {
130 struct interrupt_config_item smd;
131 struct interrupt_config_item smsm;
132};
133
134static irqreturn_t smd_modem_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700135static irqreturn_t smsm_modem_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530136static irqreturn_t smd_dsp_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700137static irqreturn_t smsm_dsp_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530138static irqreturn_t smd_dsps_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700139static irqreturn_t smsm_dsps_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530140static irqreturn_t smd_wcnss_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700141static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600142static irqreturn_t smd_rpm_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530143static irqreturn_t smsm_irq_handler(int irq, void *data);
144
145static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
146 [SMD_MODEM] = {
147 .smd.irq_handler = smd_modem_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700148 .smsm.irq_handler = smsm_modem_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530149 },
150 [SMD_Q6] = {
151 .smd.irq_handler = smd_dsp_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700152 .smsm.irq_handler = smsm_dsp_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530153 },
154 [SMD_DSPS] = {
155 .smd.irq_handler = smd_dsps_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700156 .smsm.irq_handler = smsm_dsps_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530157 },
158 [SMD_WCNSS] = {
159 .smd.irq_handler = smd_wcnss_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700160 .smsm.irq_handler = smsm_wcnss_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530161 },
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600162 [SMD_RPM] = {
163 .smd.irq_handler = smd_rpm_irq_handler,
164 .smsm.irq_handler = NULL, /* does not support smsm */
165 },
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530166};
Jeff Hugobdc734d2012-03-26 16:05:39 -0600167
168struct smem_area {
169 void *phys_addr;
170 unsigned size;
171 void __iomem *virt_addr;
172};
173static uint32_t num_smem_areas;
174static struct smem_area *smem_areas;
175static void *smem_range_check(void *base, unsigned offset);
176
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700177struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530178
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700179#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
180#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
181 entry * SMSM_NUM_HOSTS + host)
182#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
183
184/* Internal definitions which are not exported in some targets */
185enum {
186 SMSM_APPS_DEM_I = 3,
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700187};
188
189static int msm_smd_debug_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700190module_param_named(debug_mask, msm_smd_debug_mask,
191 int, S_IRUGO | S_IWUSR | S_IWGRP);
192
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700193#if defined(CONFIG_MSM_SMD_DEBUG)
194#define SMD_DBG(x...) do { \
195 if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
196 printk(KERN_DEBUG x); \
197 } while (0)
198
199#define SMSM_DBG(x...) do { \
200 if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
201 printk(KERN_DEBUG x); \
202 } while (0)
203
204#define SMD_INFO(x...) do { \
205 if (msm_smd_debug_mask & MSM_SMD_INFO) \
206 printk(KERN_INFO x); \
207 } while (0)
208
209#define SMSM_INFO(x...) do { \
210 if (msm_smd_debug_mask & MSM_SMSM_INFO) \
211 printk(KERN_INFO x); \
212 } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700213#define SMx_POWER_INFO(x...) do { \
214 if (msm_smd_debug_mask & MSM_SMx_POWER_INFO) \
215 printk(KERN_INFO x); \
216 } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217#else
218#define SMD_DBG(x...) do { } while (0)
219#define SMSM_DBG(x...) do { } while (0)
220#define SMD_INFO(x...) do { } while (0)
221#define SMSM_INFO(x...) do { } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700222#define SMx_POWER_INFO(x...) do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700223#endif
224
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700225static unsigned last_heap_free = 0xffffffff;
226
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227static inline void smd_write_intr(unsigned int val,
228 const void __iomem *addr);
229
230#if defined(CONFIG_ARCH_MSM7X30)
231#define MSM_TRIG_A2M_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530232 (smd_write_intr(1 << 0, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700233#define MSM_TRIG_A2Q6_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530234 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700235#define MSM_TRIG_A2M_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530236 (smd_write_intr(1 << 5, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700237#define MSM_TRIG_A2Q6_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530238 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700239#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600240#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241#define MSM_TRIG_A2WCNSS_SMD_INT
242#define MSM_TRIG_A2WCNSS_SMSM_INT
243#elif defined(CONFIG_ARCH_MSM8X60)
244#define MSM_TRIG_A2M_SMD_INT \
245 (smd_write_intr(1 << 3, MSM_GCC_BASE + 0x8))
246#define MSM_TRIG_A2Q6_SMD_INT \
247 (smd_write_intr(1 << 15, MSM_GCC_BASE + 0x8))
248#define MSM_TRIG_A2M_SMSM_INT \
249 (smd_write_intr(1 << 4, MSM_GCC_BASE + 0x8))
250#define MSM_TRIG_A2Q6_SMSM_INT \
251 (smd_write_intr(1 << 14, MSM_GCC_BASE + 0x8))
252#define MSM_TRIG_A2DSPS_SMD_INT \
253 (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4080))
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600254#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700255#define MSM_TRIG_A2WCNSS_SMD_INT
256#define MSM_TRIG_A2WCNSS_SMSM_INT
Jeff Hugo56b933a2011-09-28 14:42:05 -0600257#elif defined(CONFIG_ARCH_MSM9615)
258#define MSM_TRIG_A2M_SMD_INT \
259 (smd_write_intr(1 << 3, MSM_APCS_GCC_BASE + 0x8))
260#define MSM_TRIG_A2Q6_SMD_INT \
261 (smd_write_intr(1 << 15, MSM_APCS_GCC_BASE + 0x8))
262#define MSM_TRIG_A2M_SMSM_INT \
263 (smd_write_intr(1 << 4, MSM_APCS_GCC_BASE + 0x8))
264#define MSM_TRIG_A2Q6_SMSM_INT \
265 (smd_write_intr(1 << 14, MSM_APCS_GCC_BASE + 0x8))
266#define MSM_TRIG_A2DSPS_SMD_INT
267#define MSM_TRIG_A2DSPS_SMSM_INT
268#define MSM_TRIG_A2WCNSS_SMD_INT
269#define MSM_TRIG_A2WCNSS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700270#elif defined(CONFIG_ARCH_FSM9XXX)
271#define MSM_TRIG_A2Q6_SMD_INT \
272 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
273#define MSM_TRIG_A2Q6_SMSM_INT \
274 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
275#define MSM_TRIG_A2M_SMD_INT \
276 (smd_write_intr(1 << 0, MSM_GCC_BASE + 0x8))
277#define MSM_TRIG_A2M_SMSM_INT \
278 (smd_write_intr(1 << 5, MSM_GCC_BASE + 0x8))
279#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600280#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700281#define MSM_TRIG_A2WCNSS_SMD_INT
282#define MSM_TRIG_A2WCNSS_SMSM_INT
Eric Holmberg73d45462012-02-28 10:41:31 -0700283#elif defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7x25)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700284#define MSM_TRIG_A2M_SMD_INT \
285 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700286#define MSM_TRIG_A2Q6_SMD_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287#define MSM_TRIG_A2M_SMSM_INT \
288 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700289#define MSM_TRIG_A2Q6_SMSM_INT
290#define MSM_TRIG_A2DSPS_SMD_INT
291#define MSM_TRIG_A2DSPS_SMSM_INT
292#define MSM_TRIG_A2WCNSS_SMD_INT
293#define MSM_TRIG_A2WCNSS_SMSM_INT
294#elif defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM7X27A)
295#define MSM_TRIG_A2M_SMD_INT \
296 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
297#define MSM_TRIG_A2Q6_SMD_INT
298#define MSM_TRIG_A2M_SMSM_INT \
299 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
300#define MSM_TRIG_A2Q6_SMSM_INT
301#define MSM_TRIG_A2DSPS_SMD_INT
302#define MSM_TRIG_A2DSPS_SMSM_INT
303#define MSM_TRIG_A2WCNSS_SMD_INT
304#define MSM_TRIG_A2WCNSS_SMSM_INT
305#else /* use platform device / device tree configuration */
306#define MSM_TRIG_A2M_SMD_INT
307#define MSM_TRIG_A2Q6_SMD_INT
308#define MSM_TRIG_A2M_SMSM_INT
309#define MSM_TRIG_A2Q6_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700310#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600311#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700312#define MSM_TRIG_A2WCNSS_SMD_INT
313#define MSM_TRIG_A2WCNSS_SMSM_INT
Brian Swetland37521a32009-07-01 18:30:47 -0700314#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700315
Jeff Hugoee40b152012-02-09 17:39:47 -0700316/*
317 * stub out legacy macros if they are not being used so that the legacy
318 * code compiles even though it is not used
319 *
320 * these definitions should not be used in active code and will cause
321 * an early failure
322 */
323#ifndef INT_A9_M2A_0
324#define INT_A9_M2A_0 -1
325#endif
326#ifndef INT_A9_M2A_5
327#define INT_A9_M2A_5 -1
328#endif
329#ifndef INT_ADSP_A11
330#define INT_ADSP_A11 -1
331#endif
332#ifndef INT_ADSP_A11_SMSM
333#define INT_ADSP_A11_SMSM -1
334#endif
335#ifndef INT_DSPS_A11
336#define INT_DSPS_A11 -1
337#endif
338#ifndef INT_DSPS_A11_SMSM
339#define INT_DSPS_A11_SMSM -1
340#endif
341#ifndef INT_WCNSS_A11
342#define INT_WCNSS_A11 -1
343#endif
344#ifndef INT_WCNSS_A11_SMSM
345#define INT_WCNSS_A11_SMSM -1
346#endif
347
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700348#define SMD_LOOPBACK_CID 100
349
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600350#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
351static remote_spinlock_t remote_spinlock;
352
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700353static LIST_HEAD(smd_ch_list_loopback);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700354static void smd_fake_irq_handler(unsigned long arg);
Eric Holmbergda31d042012-03-28 14:01:02 -0600355static void smsm_cb_snapshot(uint32_t use_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700356
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -0600357static struct workqueue_struct *smsm_cb_wq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700358static void notify_smsm_cb_clients_worker(struct work_struct *work);
359static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
Eric Holmbergc8002902011-09-16 13:55:57 -0600360static DEFINE_MUTEX(smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700361static struct smsm_state_info *smsm_states;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +0530362static int spinlocks_initialized;
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -0600363static RAW_NOTIFIER_HEAD(smsm_driver_state_notifier_list);
364static DEFINE_MUTEX(smsm_driver_state_notifier_lock);
365static void smsm_driver_state_notify(uint32_t state, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700366
367static inline void smd_write_intr(unsigned int val,
368 const void __iomem *addr)
369{
370 wmb();
371 __raw_writel(val, addr);
372}
373
374#ifdef CONFIG_WCNSS
375static inline void wakeup_v1_riva(void)
376{
377 /*
378 * workaround hack for RIVA v1 hardware bug
379 * trigger GPIO 40 to wake up RIVA from power collaspe
380 * not to be sent to customers
381 */
Jeff Hugoafb8c4a2011-10-27 15:57:27 -0600382 if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) == 1) {
383 __raw_writel(0x0, MSM_TLMM_BASE + 0x1284);
384 __raw_writel(0x2, MSM_TLMM_BASE + 0x1284);
385 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700386 /* end workaround */
387}
388#else
389static inline void wakeup_v1_riva(void) {}
390#endif
391
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530392static inline void notify_modem_smd(void)
393{
394 static const struct interrupt_config_item *intr
395 = &private_intr_config[SMD_MODEM].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700396 if (intr->out_base) {
397 ++interrupt_stats[SMD_MODEM].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530398 smd_write_intr(intr->out_bit_pos,
399 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700400 } else {
401 ++interrupt_stats[SMD_MODEM].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530402 MSM_TRIG_A2M_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700403 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530404}
405
406static inline void notify_dsp_smd(void)
407{
408 static const struct interrupt_config_item *intr
409 = &private_intr_config[SMD_Q6].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700410 if (intr->out_base) {
411 ++interrupt_stats[SMD_Q6].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530412 smd_write_intr(intr->out_bit_pos,
413 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700414 } else {
415 ++interrupt_stats[SMD_Q6].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530416 MSM_TRIG_A2Q6_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700417 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530418}
419
420static inline void notify_dsps_smd(void)
421{
422 static const struct interrupt_config_item *intr
423 = &private_intr_config[SMD_DSPS].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700424 if (intr->out_base) {
425 ++interrupt_stats[SMD_DSPS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530426 smd_write_intr(intr->out_bit_pos,
427 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700428 } else {
429 ++interrupt_stats[SMD_DSPS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530430 MSM_TRIG_A2DSPS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700431 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530432}
433
434static inline void notify_wcnss_smd(void)
435{
436 static const struct interrupt_config_item *intr
437 = &private_intr_config[SMD_WCNSS].smd;
438 wakeup_v1_riva();
439
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700440 if (intr->out_base) {
441 ++interrupt_stats[SMD_WCNSS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530442 smd_write_intr(intr->out_bit_pos,
443 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700444 } else {
445 ++interrupt_stats[SMD_WCNSS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530446 MSM_TRIG_A2WCNSS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700447 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530448}
449
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600450static inline void notify_rpm_smd(void)
451{
452 static const struct interrupt_config_item *intr
453 = &private_intr_config[SMD_RPM].smd;
454
455 if (intr->out_base) {
456 ++interrupt_stats[SMD_RPM].smd_out_config_count;
457 smd_write_intr(intr->out_bit_pos,
458 intr->out_base + intr->out_offset);
459 }
460}
461
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530462static inline void notify_modem_smsm(void)
463{
464 static const struct interrupt_config_item *intr
465 = &private_intr_config[SMD_MODEM].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700466 if (intr->out_base) {
467 ++interrupt_stats[SMD_MODEM].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530468 smd_write_intr(intr->out_bit_pos,
469 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700470 } else {
471 ++interrupt_stats[SMD_MODEM].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530472 MSM_TRIG_A2M_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700473 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530474}
475
476static inline void notify_dsp_smsm(void)
477{
478 static const struct interrupt_config_item *intr
479 = &private_intr_config[SMD_Q6].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700480 if (intr->out_base) {
481 ++interrupt_stats[SMD_Q6].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530482 smd_write_intr(intr->out_bit_pos,
483 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700484 } else {
485 ++interrupt_stats[SMD_Q6].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530486 MSM_TRIG_A2Q6_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700487 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530488}
489
490static inline void notify_dsps_smsm(void)
491{
492 static const struct interrupt_config_item *intr
493 = &private_intr_config[SMD_DSPS].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700494 if (intr->out_base) {
495 ++interrupt_stats[SMD_DSPS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530496 smd_write_intr(intr->out_bit_pos,
497 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700498 } else {
499 ++interrupt_stats[SMD_DSPS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530500 MSM_TRIG_A2DSPS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700501 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530502}
503
504static inline void notify_wcnss_smsm(void)
505{
506 static const struct interrupt_config_item *intr
507 = &private_intr_config[SMD_WCNSS].smsm;
508 wakeup_v1_riva();
509
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700510 if (intr->out_base) {
511 ++interrupt_stats[SMD_WCNSS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530512 smd_write_intr(intr->out_bit_pos,
513 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700514 } else {
515 ++interrupt_stats[SMD_WCNSS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530516 MSM_TRIG_A2WCNSS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700517 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530518}
519
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700520static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
521{
522 /* older protocol don't use smsm_intr_mask,
523 but still communicates with modem */
524 if (!smsm_info.intr_mask ||
525 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
526 & notify_mask))
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530527 notify_modem_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700528
529 if (smsm_info.intr_mask &&
530 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
531 & notify_mask)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700532 uint32_t mux_val;
533
Eric Holmberg6282c5d2011-10-27 17:30:57 -0600534 if (cpu_is_qsd8x50() && smsm_info.intr_mux) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700535 mux_val = __raw_readl(
536 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
537 mux_val++;
538 __raw_writel(mux_val,
539 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
540 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530541 notify_dsp_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700542 }
543
544 if (smsm_info.intr_mask &&
545 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
546 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530547 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548 }
549
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600550 if (smsm_info.intr_mask &&
551 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
552 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530553 notify_dsps_smsm();
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600554 }
555
Eric Holmbergda31d042012-03-28 14:01:02 -0600556 /*
557 * Notify local SMSM callback clients without wakelock since this
558 * code is used by power management during power-down/-up sequencing
559 * on DEM-based targets. Grabbing a wakelock in this case will
560 * abort the power-down sequencing.
561 */
562 smsm_cb_snapshot(0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700563}
564
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700565void smd_diag(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700566{
567 char *x;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700568 int size;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700569
570 x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
571 if (x != 0) {
572 x[SZ_DIAG_ERR_MSG - 1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700573 SMD_INFO("smem: DIAG '%s'\n", x);
574 }
575
576 x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size);
577 if (x != 0) {
578 x[size - 1] = 0;
579 pr_err("smem: CRASH LOG\n'%s'\n", x);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700580 }
581}
582
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700583
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700584static void handle_modem_crash(void)
585{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700586 pr_err("MODEM/AMSS has CRASHED\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700587 smd_diag();
588
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700589 /* hard reboot if possible FIXME
590 if (msm_reset_hook)
591 msm_reset_hook();
592 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700593
594 /* in this case the modem or watchdog should reboot us */
595 for (;;)
596 ;
597}
598
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700599int smsm_check_for_modem_crash(void)
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700600{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700601 /* if the modem's not ready yet, we have to hope for the best */
602 if (!smsm_info.state)
603 return 0;
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700604
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700605 if (__raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)) & SMSM_RESET) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700606 handle_modem_crash();
607 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700608 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700609 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700610}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700611EXPORT_SYMBOL(smsm_check_for_modem_crash);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700612
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700613/* the spinlock is used to synchronize between the
Brian Swetland03e00cd2009-07-01 17:58:37 -0700614 * irq handler and code that mutates the channel
615 * list or fiddles with channel state
616 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700617static DEFINE_SPINLOCK(smd_lock);
Brian Swetland03e00cd2009-07-01 17:58:37 -0700618DEFINE_SPINLOCK(smem_lock);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700619
620/* the mutex is used during open() and close()
Brian Swetland03e00cd2009-07-01 17:58:37 -0700621 * operations to avoid races while creating or
622 * destroying smd_channel structures
623 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700624static DEFINE_MUTEX(smd_creation_mutex);
625
626static int smd_initialized;
627
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700628struct smd_shared_v1 {
629 struct smd_half_channel ch0;
630 unsigned char data0[SMD_BUF_SIZE];
631 struct smd_half_channel ch1;
632 unsigned char data1[SMD_BUF_SIZE];
633};
634
635struct smd_shared_v2 {
636 struct smd_half_channel ch0;
637 struct smd_half_channel ch1;
638};
639
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600640struct smd_shared_v2_word_access {
641 struct smd_half_channel_word_access ch0;
642 struct smd_half_channel_word_access ch1;
643};
644
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700645struct smd_channel {
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600646 volatile void *send; /* some variant of smd_half_channel */
647 volatile void *recv; /* some variant of smd_half_channel */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700648 unsigned char *send_data;
649 unsigned char *recv_data;
650 unsigned fifo_size;
651 unsigned fifo_mask;
652 struct list_head ch_list;
653
654 unsigned current_packet;
655 unsigned n;
656 void *priv;
657 void (*notify)(void *priv, unsigned flags);
658
659 int (*read)(smd_channel_t *ch, void *data, int len, int user_buf);
660 int (*write)(smd_channel_t *ch, const void *data, int len,
661 int user_buf);
662 int (*read_avail)(smd_channel_t *ch);
663 int (*write_avail)(smd_channel_t *ch);
664 int (*read_from_cb)(smd_channel_t *ch, void *data, int len,
665 int user_buf);
666
667 void (*update_state)(smd_channel_t *ch);
668 unsigned last_state;
669 void (*notify_other_cpu)(void);
670
671 char name[20];
672 struct platform_device pdev;
673 unsigned type;
674
675 int pending_pkt_sz;
676
677 char is_pkt_ch;
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600678
679 /*
680 * private internal functions to access *send and *recv.
681 * never to be exported outside of smd
682 */
683 struct smd_half_channel_access *half_ch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700684};
685
686struct edge_to_pid {
687 uint32_t local_pid;
688 uint32_t remote_pid;
Eric Holmberg17992c12012-02-29 12:54:44 -0700689 char subsys_name[SMD_MAX_CH_NAME_LEN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700690};
691
692/**
693 * Maps edge type to local and remote processor ID's.
694 */
695static struct edge_to_pid edge_to_pids[] = {
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700696 [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
697 [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "q6"},
698 [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
699 [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
700 [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
701 [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
702 [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
703 [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
704 [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
705 [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
706 [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
707 [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
708 [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
709 [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
710 [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600711 [SMD_APPS_RPM] = {SMD_APPS, SMD_RPM},
712 [SMD_MODEM_RPM] = {SMD_MODEM, SMD_RPM},
713 [SMD_QDSP_RPM] = {SMD_Q6, SMD_RPM},
714 [SMD_WCNSS_RPM] = {SMD_WCNSS, SMD_RPM},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700715};
716
717struct restart_notifier_block {
718 unsigned processor;
719 char *name;
720 struct notifier_block nb;
721};
722
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -0600723static int disable_smsm_reset_handshake;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700724static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
725
726static LIST_HEAD(smd_ch_closed_list);
727static LIST_HEAD(smd_ch_closing_list);
728static LIST_HEAD(smd_ch_to_close_list);
729static LIST_HEAD(smd_ch_list_modem);
730static LIST_HEAD(smd_ch_list_dsp);
731static LIST_HEAD(smd_ch_list_dsps);
732static LIST_HEAD(smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600733static LIST_HEAD(smd_ch_list_rpm);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700734
735static unsigned char smd_ch_allocated[64];
736static struct work_struct probe_work;
737
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700738static void finalize_channel_close_fn(struct work_struct *work);
739static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
740static struct workqueue_struct *channel_close_wq;
741
742static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm);
743
744/* on smp systems, the probe might get called from multiple cores,
745 hence use a lock */
746static DEFINE_MUTEX(smd_probe_lock);
747
748static void smd_channel_probe_worker(struct work_struct *work)
749{
750 struct smd_alloc_elm *shared;
751 unsigned n;
752 uint32_t type;
753
754 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
755
756 if (!shared) {
757 pr_err("%s: allocation table not initialized\n", __func__);
758 return;
759 }
760
761 mutex_lock(&smd_probe_lock);
762 for (n = 0; n < 64; n++) {
763 if (smd_ch_allocated[n])
764 continue;
765
766 /* channel should be allocated only if APPS
767 processor is involved */
768 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600769 if (type >= ARRAY_SIZE(edge_to_pids) ||
770 edge_to_pids[type].local_pid != SMD_APPS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700771 continue;
772 if (!shared[n].ref_count)
773 continue;
774 if (!shared[n].name[0])
775 continue;
776
777 if (!smd_alloc_channel(&shared[n]))
778 smd_ch_allocated[n] = 1;
779 else
780 SMD_INFO("Probe skipping ch %d, not allocated\n", n);
781 }
782 mutex_unlock(&smd_probe_lock);
783}
784
785/**
786 * Lookup processor ID and determine if it belongs to the proved edge
787 * type.
788 *
789 * @shared2: Pointer to v2 shared channel structure
790 * @type: Edge type
791 * @pid: Processor ID of processor on edge
792 * @local_ch: Channel that belongs to processor @pid
793 * @remote_ch: Other side of edge contained @pid
794 *
795 * Returns 0 for not on edge, 1 for found on edge
796 */
797static int pid_is_on_edge(struct smd_shared_v2 *shared2,
798 uint32_t type, uint32_t pid,
799 struct smd_half_channel **local_ch,
800 struct smd_half_channel **remote_ch
801 )
802{
803 int ret = 0;
804 struct edge_to_pid *edge;
805
806 *local_ch = 0;
807 *remote_ch = 0;
808
809 if (!shared2 || (type >= ARRAY_SIZE(edge_to_pids)))
810 return 0;
811
812 edge = &edge_to_pids[type];
813 if (edge->local_pid != edge->remote_pid) {
814 if (pid == edge->local_pid) {
815 *local_ch = &shared2->ch0;
816 *remote_ch = &shared2->ch1;
817 ret = 1;
818 } else if (pid == edge->remote_pid) {
819 *local_ch = &shared2->ch1;
820 *remote_ch = &shared2->ch0;
821 ret = 1;
822 }
823 }
824
825 return ret;
826}
827
Eric Holmberg17992c12012-02-29 12:54:44 -0700828/*
829 * Returns a pointer to the subsystem name or NULL if no
830 * subsystem name is available.
831 *
832 * @type - Edge definition
833 */
834const char *smd_edge_to_subsystem(uint32_t type)
835{
836 const char *subsys = NULL;
837
838 if (type < ARRAY_SIZE(edge_to_pids)) {
839 subsys = edge_to_pids[type].subsys_name;
840 if (subsys[0] == 0x0)
841 subsys = NULL;
842 }
843 return subsys;
844}
845EXPORT_SYMBOL(smd_edge_to_subsystem);
846
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700847/*
848 * Returns a pointer to the subsystem name given the
849 * remote processor ID.
850 *
851 * @pid Remote processor ID
852 * @returns Pointer to subsystem name or NULL if not found
853 */
854const char *smd_pid_to_subsystem(uint32_t pid)
855{
856 const char *subsys = NULL;
857 int i;
858
859 for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
860 if (pid == edge_to_pids[i].remote_pid &&
861 edge_to_pids[i].subsys_name[0] != 0x0
862 ) {
863 subsys = edge_to_pids[i].subsys_name;
864 break;
865 }
866 }
867
868 return subsys;
869}
870EXPORT_SYMBOL(smd_pid_to_subsystem);
Eric Holmberg17992c12012-02-29 12:54:44 -0700871
Eric Holmberg2a563c32011-10-05 14:51:43 -0600872static void smd_reset_edge(struct smd_half_channel *ch, unsigned new_state)
873{
874 if (ch->state != SMD_SS_CLOSED) {
875 ch->state = new_state;
876 ch->fDSR = 0;
877 ch->fCTS = 0;
878 ch->fCD = 0;
879 ch->fSTATE = 1;
880 }
881}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700882
883static void smd_channel_reset_state(struct smd_alloc_elm *shared,
884 unsigned new_state, unsigned pid)
885{
886 unsigned n;
887 struct smd_shared_v2 *shared2;
888 uint32_t type;
889 struct smd_half_channel *local_ch;
890 struct smd_half_channel *remote_ch;
891
892 for (n = 0; n < SMD_CHANNELS; n++) {
893 if (!shared[n].ref_count)
894 continue;
895 if (!shared[n].name[0])
896 continue;
897
898 type = SMD_CHANNEL_TYPE(shared[n].type);
899 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n, sizeof(*shared2));
900 if (!shared2)
901 continue;
902
Eric Holmberg2a563c32011-10-05 14:51:43 -0600903 if (pid_is_on_edge(shared2, type, pid, &local_ch, &remote_ch))
904 smd_reset_edge(local_ch, new_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700905
Eric Holmberg2a563c32011-10-05 14:51:43 -0600906 /*
907 * ModemFW is in the same subsystem as ModemSW, but has
908 * separate SMD edges that need to be reset.
909 */
910 if (pid == SMSM_MODEM &&
911 pid_is_on_edge(shared2, type, SMD_MODEM_Q6_FW,
912 &local_ch, &remote_ch))
913 smd_reset_edge(local_ch, new_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700914 }
915}
916
917
918void smd_channel_reset(uint32_t restart_pid)
919{
920 struct smd_alloc_elm *shared;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700921 unsigned long flags;
922
923 SMD_DBG("%s: starting reset\n", __func__);
924 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
925 if (!shared) {
926 pr_err("%s: allocation table not initialized\n", __func__);
927 return;
928 }
929
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600930 /* release any held spinlocks */
931 remote_spin_release(&remote_spinlock, restart_pid);
932 remote_spin_release_all(restart_pid);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700933
934 /* reset SMSM entry */
935 if (smsm_info.state) {
936 writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
937
Eric Holmberg351a63c2011-12-02 17:49:43 -0700938 /* restart SMSM init handshake */
939 if (restart_pid == SMSM_MODEM) {
940 smsm_change_state(SMSM_APPS_STATE,
Eric Holmberg6b2f80e2012-01-09 12:22:52 -0700941 SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
942 0);
Eric Holmberg351a63c2011-12-02 17:49:43 -0700943 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700944
945 /* notify SMSM processors */
946 smsm_irq_handler(0, 0);
Eric Holmbergb7726442012-03-01 15:31:56 -0700947 notify_modem_smsm();
948 notify_dsp_smsm();
949 notify_dsps_smsm();
950 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700951 }
952
953 /* change all remote states to CLOSING */
954 mutex_lock(&smd_probe_lock);
955 spin_lock_irqsave(&smd_lock, flags);
956 smd_channel_reset_state(shared, SMD_SS_CLOSING, restart_pid);
957 spin_unlock_irqrestore(&smd_lock, flags);
958 mutex_unlock(&smd_probe_lock);
959
960 /* notify SMD processors */
961 mb();
962 smd_fake_irq_handler(0);
963 notify_modem_smd();
964 notify_dsp_smd();
965 notify_dsps_smd();
966 notify_wcnss_smd();
967
968 /* change all remote states to CLOSED */
969 mutex_lock(&smd_probe_lock);
970 spin_lock_irqsave(&smd_lock, flags);
971 smd_channel_reset_state(shared, SMD_SS_CLOSED, restart_pid);
972 spin_unlock_irqrestore(&smd_lock, flags);
973 mutex_unlock(&smd_probe_lock);
974
975 /* notify SMD processors */
976 mb();
977 smd_fake_irq_handler(0);
978 notify_modem_smd();
979 notify_dsp_smd();
980 notify_dsps_smd();
981 notify_wcnss_smd();
982
983 SMD_DBG("%s: finished reset\n", __func__);
984}
985
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700986/* how many bytes are available for reading */
987static int smd_stream_read_avail(struct smd_channel *ch)
988{
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600989 return (ch->half_ch->get_head(ch->recv) -
990 ch->half_ch->get_tail(ch->recv)) & ch->fifo_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700991}
992
993/* how many bytes we are free to write */
994static int smd_stream_write_avail(struct smd_channel *ch)
995{
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600996 return ch->fifo_mask - ((ch->half_ch->get_head(ch->send) -
997 ch->half_ch->get_tail(ch->send)) & ch->fifo_mask);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700998}
999
1000static int smd_packet_read_avail(struct smd_channel *ch)
1001{
1002 if (ch->current_packet) {
1003 int n = smd_stream_read_avail(ch);
1004 if (n > ch->current_packet)
1005 n = ch->current_packet;
1006 return n;
1007 } else {
1008 return 0;
1009 }
1010}
1011
1012static int smd_packet_write_avail(struct smd_channel *ch)
1013{
1014 int n = smd_stream_write_avail(ch);
1015 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
1016}
1017
1018static int ch_is_open(struct smd_channel *ch)
1019{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001020 return (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED ||
1021 ch->half_ch->get_state(ch->recv) == SMD_SS_FLUSHING)
1022 && (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001023}
1024
1025/* provide a pointer and length to readable data in the fifo */
1026static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
1027{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001028 unsigned head = ch->half_ch->get_head(ch->recv);
1029 unsigned tail = ch->half_ch->get_tail(ch->recv);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001030 *ptr = (void *) (ch->recv_data + tail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001031
1032 if (tail <= head)
1033 return head - tail;
1034 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001035 return ch->fifo_size - tail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001036}
1037
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001038static int read_intr_blocked(struct smd_channel *ch)
1039{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001040 return ch->half_ch->get_fBLOCKREADINTR(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001041}
1042
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001043/* advance the fifo read pointer after data from ch_read_buffer is consumed */
1044static void ch_read_done(struct smd_channel *ch, unsigned count)
1045{
1046 BUG_ON(count > smd_stream_read_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001047 ch->half_ch->set_tail(ch->recv,
1048 (ch->half_ch->get_tail(ch->recv) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001049 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001050 ch->half_ch->set_fTAIL(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001051}
1052
1053/* basic read interface to ch_read_{buffer,done} used
Brian Swetland03e00cd2009-07-01 17:58:37 -07001054 * by smd_*_read() and update_packet_state()
1055 * will read-and-discard if the _data pointer is null
1056 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001057static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001058{
1059 void *ptr;
1060 unsigned n;
1061 unsigned char *data = _data;
1062 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001063 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001064
1065 while (len > 0) {
1066 n = ch_read_buffer(ch, &ptr);
1067 if (n == 0)
1068 break;
1069
1070 if (n > len)
1071 n = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001072 if (_data) {
1073 if (user_buf) {
1074 r = copy_to_user(data, ptr, n);
1075 if (r > 0) {
1076 pr_err("%s: "
1077 "copy_to_user could not copy "
1078 "%i bytes.\n",
1079 __func__,
1080 r);
1081 }
1082 } else
1083 memcpy(data, ptr, n);
1084 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001085
1086 data += n;
1087 len -= n;
1088 ch_read_done(ch, n);
1089 }
1090
1091 return orig_len - len;
1092}
1093
1094static void update_stream_state(struct smd_channel *ch)
1095{
1096 /* streams have no special state requiring updating */
1097}
1098
1099static void update_packet_state(struct smd_channel *ch)
1100{
1101 unsigned hdr[5];
1102 int r;
1103
1104 /* can't do anything if we're in the middle of a packet */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001105 while (ch->current_packet == 0) {
1106 /* discard 0 length packets if any */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001107
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001108 /* don't bother unless we can get the full header */
1109 if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
1110 return;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001111
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001112 r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
1113 BUG_ON(r != SMD_HEADER_SIZE);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001114
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001115 ch->current_packet = hdr[0];
1116 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001117}
1118
1119/* provide a pointer and length to next free space in the fifo */
1120static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
1121{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001122 unsigned head = ch->half_ch->get_head(ch->send);
1123 unsigned tail = ch->half_ch->get_tail(ch->send);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001124 *ptr = (void *) (ch->send_data + head);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001125
1126 if (head < tail) {
1127 return tail - head - 1;
1128 } else {
1129 if (tail == 0)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001130 return ch->fifo_size - head - 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001131 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001132 return ch->fifo_size - head;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001133 }
1134}
1135
1136/* advace the fifo write pointer after freespace
1137 * from ch_write_buffer is filled
1138 */
1139static void ch_write_done(struct smd_channel *ch, unsigned count)
1140{
1141 BUG_ON(count > smd_stream_write_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001142 ch->half_ch->set_head(ch->send,
1143 (ch->half_ch->get_head(ch->send) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001144 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001145 ch->half_ch->set_fHEAD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001146}
1147
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001148static void ch_set_state(struct smd_channel *ch, unsigned n)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001149{
1150 if (n == SMD_SS_OPENED) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001151 ch->half_ch->set_fDSR(ch->send, 1);
1152 ch->half_ch->set_fCTS(ch->send, 1);
1153 ch->half_ch->set_fCD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001154 } else {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001155 ch->half_ch->set_fDSR(ch->send, 0);
1156 ch->half_ch->set_fCTS(ch->send, 0);
1157 ch->half_ch->set_fCD(ch->send, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001158 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001159 ch->half_ch->set_state(ch->send, n);
1160 ch->half_ch->set_fSTATE(ch->send, 1);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001161 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001162}
1163
1164static void do_smd_probe(void)
1165{
1166 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
1167 if (shared->heap_info.free_offset != last_heap_free) {
1168 last_heap_free = shared->heap_info.free_offset;
1169 schedule_work(&probe_work);
1170 }
1171}
1172
1173static void smd_state_change(struct smd_channel *ch,
1174 unsigned last, unsigned next)
1175{
1176 ch->last_state = next;
1177
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001178 SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001179
1180 switch (next) {
1181 case SMD_SS_OPENING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001182 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSING ||
1183 ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
1184 ch->half_ch->set_tail(ch->recv, 0);
1185 ch->half_ch->set_head(ch->send, 0);
1186 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001187 ch_set_state(ch, SMD_SS_OPENING);
1188 }
1189 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001190 case SMD_SS_OPENED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001191 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENING) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001192 ch_set_state(ch, SMD_SS_OPENED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001193 ch->notify(ch->priv, SMD_EVENT_OPEN);
1194 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001195 break;
1196 case SMD_SS_FLUSHING:
1197 case SMD_SS_RESET:
1198 /* we should force them to close? */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001199 break;
1200 case SMD_SS_CLOSED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001201 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001202 ch_set_state(ch, SMD_SS_CLOSING);
1203 ch->current_packet = 0;
Eric Holmbergad4fa8d2011-11-11 16:55:13 -07001204 ch->pending_pkt_sz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001205 ch->notify(ch->priv, SMD_EVENT_CLOSE);
1206 }
1207 break;
1208 case SMD_SS_CLOSING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001209 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001210 list_move(&ch->ch_list,
1211 &smd_ch_to_close_list);
1212 queue_work(channel_close_wq,
1213 &finalize_channel_close_work);
1214 }
1215 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001216 }
1217}
1218
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001219static void handle_smd_irq_closing_list(void)
1220{
1221 unsigned long flags;
1222 struct smd_channel *ch;
1223 struct smd_channel *index;
1224 unsigned tmp;
1225
1226 spin_lock_irqsave(&smd_lock, flags);
1227 list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001228 if (ch->half_ch->get_fSTATE(ch->recv))
1229 ch->half_ch->set_fSTATE(ch->recv, 0);
1230 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001231 if (tmp != ch->last_state)
1232 smd_state_change(ch, ch->last_state, tmp);
1233 }
1234 spin_unlock_irqrestore(&smd_lock, flags);
1235}
1236
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001237static void handle_smd_irq(struct list_head *list, void (*notify)(void))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001238{
1239 unsigned long flags;
1240 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001241 unsigned ch_flags;
1242 unsigned tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001243 unsigned char state_change;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001244
1245 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001246 list_for_each_entry(ch, list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001247 state_change = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001248 ch_flags = 0;
1249 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001250 if (ch->half_ch->get_fHEAD(ch->recv)) {
1251 ch->half_ch->set_fHEAD(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001252 ch_flags |= 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001253 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001254 if (ch->half_ch->get_fTAIL(ch->recv)) {
1255 ch->half_ch->set_fTAIL(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001256 ch_flags |= 2;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001257 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001258 if (ch->half_ch->get_fSTATE(ch->recv)) {
1259 ch->half_ch->set_fSTATE(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001260 ch_flags |= 4;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001261 }
1262 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001263 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001264 if (tmp != ch->last_state) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07001265 SMx_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
1266 ch->n, ch->name, ch->last_state, tmp);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001267 smd_state_change(ch, ch->last_state, tmp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001268 state_change = 1;
1269 }
Eric Holmberg65a7d432012-02-24 11:28:56 -07001270 if (ch_flags & 0x3) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001271 ch->update_state(ch);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001272 SMx_POWER_INFO("SMD ch%d '%s' Data event r%d/w%d\n",
1273 ch->n, ch->name,
1274 ch->read_avail(ch),
1275 ch->fifo_size - ch->write_avail(ch));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001276 ch->notify(ch->priv, SMD_EVENT_DATA);
1277 }
Eric Holmberg98c6c642012-02-24 11:29:35 -07001278 if (ch_flags & 0x4 && !state_change) {
1279 SMx_POWER_INFO("SMD ch%d '%s' State update\n",
1280 ch->n, ch->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001281 ch->notify(ch->priv, SMD_EVENT_STATUS);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001282 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001283 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001284 spin_unlock_irqrestore(&smd_lock, flags);
1285 do_smd_probe();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001286}
1287
Brian Swetland37521a32009-07-01 18:30:47 -07001288static irqreturn_t smd_modem_irq_handler(int irq, void *data)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001289{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001290 SMx_POWER_INFO("SMD Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001291 ++interrupt_stats[SMD_MODEM].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001292 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001293 handle_smd_irq_closing_list();
Brian Swetland37521a32009-07-01 18:30:47 -07001294 return IRQ_HANDLED;
1295}
1296
1297static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
1298{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001299 SMx_POWER_INFO("SMD Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001300 ++interrupt_stats[SMD_Q6].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001301 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001302 handle_smd_irq_closing_list();
1303 return IRQ_HANDLED;
1304}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001305
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001306static irqreturn_t smd_dsps_irq_handler(int irq, void *data)
1307{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001308 SMx_POWER_INFO("SMD Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001309 ++interrupt_stats[SMD_DSPS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001310 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1311 handle_smd_irq_closing_list();
1312 return IRQ_HANDLED;
1313}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001314
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001315static irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
1316{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001317 SMx_POWER_INFO("SMD Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001318 ++interrupt_stats[SMD_WCNSS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001319 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1320 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001321 return IRQ_HANDLED;
1322}
1323
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001324static irqreturn_t smd_rpm_irq_handler(int irq, void *data)
1325{
1326 SMx_POWER_INFO("SMD Int RPM->Apps\n");
1327 ++interrupt_stats[SMD_RPM].smd_in_count;
1328 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
1329 handle_smd_irq_closing_list();
1330 return IRQ_HANDLED;
1331}
1332
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001333static void smd_fake_irq_handler(unsigned long arg)
1334{
Brian Swetland37521a32009-07-01 18:30:47 -07001335 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
1336 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001337 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1338 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001339 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001340 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001341}
1342
1343static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
1344
Brian Swetland37521a32009-07-01 18:30:47 -07001345static inline int smd_need_int(struct smd_channel *ch)
1346{
1347 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001348 if (ch->half_ch->get_fHEAD(ch->recv) ||
1349 ch->half_ch->get_fTAIL(ch->recv) ||
1350 ch->half_ch->get_fSTATE(ch->recv))
Brian Swetland37521a32009-07-01 18:30:47 -07001351 return 1;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001352 if (ch->half_ch->get_state(ch->recv) != ch->last_state)
Brian Swetland37521a32009-07-01 18:30:47 -07001353 return 1;
1354 }
1355 return 0;
1356}
1357
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001358void smd_sleep_exit(void)
1359{
1360 unsigned long flags;
1361 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001362 int need_int = 0;
1363
1364 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland37521a32009-07-01 18:30:47 -07001365 list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
1366 if (smd_need_int(ch)) {
1367 need_int = 1;
1368 break;
1369 }
1370 }
1371 list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
1372 if (smd_need_int(ch)) {
1373 need_int = 1;
1374 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001375 }
1376 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001377 list_for_each_entry(ch, &smd_ch_list_dsps, ch_list) {
1378 if (smd_need_int(ch)) {
1379 need_int = 1;
1380 break;
1381 }
1382 }
1383 list_for_each_entry(ch, &smd_ch_list_wcnss, ch_list) {
1384 if (smd_need_int(ch)) {
1385 need_int = 1;
1386 break;
1387 }
1388 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001389 spin_unlock_irqrestore(&smd_lock, flags);
1390 do_smd_probe();
Brian Swetland37521a32009-07-01 18:30:47 -07001391
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001392 if (need_int) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001393 SMD_DBG("smd_sleep_exit need interrupt\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001394 tasklet_schedule(&smd_fake_irq_tasklet);
1395 }
1396}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001397EXPORT_SYMBOL(smd_sleep_exit);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001398
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001399static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001400{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001401 if (SMD_XFER_TYPE(alloc_elm->type) == 1)
1402 return 0;
1403 else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001404 return 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001405
1406 /* for cases where xfer type is 0 */
1407 if (!strncmp(alloc_elm->name, "DAL", 3))
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001408 return 0;
1409
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001410 /* for cases where xfer type is 0 */
1411 if (!strncmp(alloc_elm->name, "RPCCALL_QDSP", 12))
1412 return 0;
1413
1414 if (alloc_elm->cid > 4 || alloc_elm->cid == 1)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001415 return 1;
1416 else
1417 return 0;
1418}
1419
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001420static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
1421 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001422{
1423 void *ptr;
1424 const unsigned char *buf = _data;
1425 unsigned xfer;
1426 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001427 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001428
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001429 SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001430 if (len < 0)
1431 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001432 else if (len == 0)
1433 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001434
1435 while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
Eric Holmberg7a717872012-02-03 11:58:04 -07001436 if (!ch_is_open(ch)) {
1437 len = orig_len;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001438 break;
Eric Holmberg7a717872012-02-03 11:58:04 -07001439 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001440 if (xfer > len)
1441 xfer = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001442 if (user_buf) {
1443 r = copy_from_user(ptr, buf, xfer);
1444 if (r > 0) {
1445 pr_err("%s: "
1446 "copy_from_user could not copy %i "
1447 "bytes.\n",
1448 __func__,
1449 r);
1450 }
1451 } else
1452 memcpy(ptr, buf, xfer);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001453 ch_write_done(ch, xfer);
1454 len -= xfer;
1455 buf += xfer;
1456 if (len == 0)
1457 break;
1458 }
1459
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001460 if (orig_len - len)
1461 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001462
1463 return orig_len - len;
1464}
1465
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001466static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
1467 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001468{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001469 int ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001470 unsigned hdr[5];
1471
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001472 SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001473 if (len < 0)
1474 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001475 else if (len == 0)
1476 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001477
1478 if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
1479 return -ENOMEM;
1480
1481 hdr[0] = len;
1482 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1483
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001484
1485 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1486 if (ret < 0 || ret != sizeof(hdr)) {
1487 SMD_DBG("%s failed to write pkt header: "
1488 "%d returned\n", __func__, ret);
1489 return -1;
1490 }
1491
1492
1493 ret = smd_stream_write(ch, _data, len, user_buf);
1494 if (ret < 0 || ret != len) {
1495 SMD_DBG("%s failed to write pkt data: "
1496 "%d returned\n", __func__, ret);
1497 return ret;
1498 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001499
1500 return len;
1501}
1502
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001503static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001504{
1505 int r;
1506
1507 if (len < 0)
1508 return -EINVAL;
1509
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001510 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001511 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001512 if (!read_intr_blocked(ch))
1513 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001514
1515 return r;
1516}
1517
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001518static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001519{
1520 unsigned long flags;
1521 int r;
1522
1523 if (len < 0)
1524 return -EINVAL;
1525
1526 if (len > ch->current_packet)
1527 len = ch->current_packet;
1528
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001529 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001530 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001531 if (!read_intr_blocked(ch))
1532 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001533
1534 spin_lock_irqsave(&smd_lock, flags);
1535 ch->current_packet -= r;
1536 update_packet_state(ch);
1537 spin_unlock_irqrestore(&smd_lock, flags);
1538
1539 return r;
1540}
1541
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001542static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
1543 int user_buf)
1544{
1545 int r;
1546
1547 if (len < 0)
1548 return -EINVAL;
1549
1550 if (len > ch->current_packet)
1551 len = ch->current_packet;
1552
1553 r = ch_read(ch, data, len, user_buf);
1554 if (r > 0)
1555 if (!read_intr_blocked(ch))
1556 ch->notify_other_cpu();
1557
1558 ch->current_packet -= r;
1559 update_packet_state(ch);
1560
1561 return r;
1562}
1563
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301564#if (defined(CONFIG_MSM_SMD_PKG4) || defined(CONFIG_MSM_SMD_PKG3))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001565static int smd_alloc_v2(struct smd_channel *ch)
1566{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001567 void *buffer;
1568 unsigned buffer_sz;
1569
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001570 if (is_word_access_ch(ch->type)) {
1571 struct smd_shared_v2_word_access *shared2;
1572 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1573 sizeof(*shared2));
1574 if (!shared2) {
1575 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1576 return -EINVAL;
1577 }
1578 ch->send = &shared2->ch0;
1579 ch->recv = &shared2->ch1;
1580 } else {
1581 struct smd_shared_v2 *shared2;
1582 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1583 sizeof(*shared2));
1584 if (!shared2) {
1585 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1586 return -EINVAL;
1587 }
1588 ch->send = &shared2->ch0;
1589 ch->recv = &shared2->ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001590 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001591 ch->half_ch = get_half_ch_funcs(ch->type);
1592
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001593 buffer = smem_get_entry(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
1594 if (!buffer) {
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301595 SMD_INFO("smem_get_entry failed\n");
1596 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001597 }
1598
1599 /* buffer must be a power-of-two size */
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301600 if (buffer_sz & (buffer_sz - 1)) {
1601 SMD_INFO("Buffer size: %u not power of two\n", buffer_sz);
1602 return -EINVAL;
1603 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001604 buffer_sz /= 2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001605 ch->send_data = buffer;
1606 ch->recv_data = buffer + buffer_sz;
1607 ch->fifo_size = buffer_sz;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001608
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001609 return 0;
1610}
1611
1612static int smd_alloc_v1(struct smd_channel *ch)
1613{
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301614 return -EINVAL;
1615}
1616
1617#else /* define v1 for older targets */
1618static int smd_alloc_v2(struct smd_channel *ch)
1619{
1620 return -EINVAL;
1621}
1622
1623static int smd_alloc_v1(struct smd_channel *ch)
1624{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001625 struct smd_shared_v1 *shared1;
1626 shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
1627 if (!shared1) {
1628 pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301629 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001630 }
1631 ch->send = &shared1->ch0;
1632 ch->recv = &shared1->ch1;
1633 ch->send_data = shared1->data0;
1634 ch->recv_data = shared1->data1;
1635 ch->fifo_size = SMD_BUF_SIZE;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001636 ch->half_ch = get_half_ch_funcs(ch->type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001637 return 0;
1638}
1639
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301640#endif
1641
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001642static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001643{
1644 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001645
1646 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1647 if (ch == 0) {
1648 pr_err("smd_alloc_channel() out of memory\n");
Brian Swetland34f719b2009-10-30 16:22:05 -07001649 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001650 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001651 ch->n = alloc_elm->cid;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001652 ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001653
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001654 if (smd_alloc_v2(ch) && smd_alloc_v1(ch)) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001655 kfree(ch);
Brian Swetland34f719b2009-10-30 16:22:05 -07001656 return -1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001657 }
1658
1659 ch->fifo_mask = ch->fifo_size - 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001660
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001661 /* probe_worker guarentees ch->type will be a valid type */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001662 if (ch->type == SMD_APPS_MODEM)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001663 ch->notify_other_cpu = notify_modem_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001664 else if (ch->type == SMD_APPS_QDSP)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001665 ch->notify_other_cpu = notify_dsp_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001666 else if (ch->type == SMD_APPS_DSPS)
1667 ch->notify_other_cpu = notify_dsps_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001668 else if (ch->type == SMD_APPS_WCNSS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001669 ch->notify_other_cpu = notify_wcnss_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001670 else if (ch->type == SMD_APPS_RPM)
1671 ch->notify_other_cpu = notify_rpm_smd;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001672
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001673 if (smd_is_packet(alloc_elm)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001674 ch->read = smd_packet_read;
1675 ch->write = smd_packet_write;
1676 ch->read_avail = smd_packet_read_avail;
1677 ch->write_avail = smd_packet_write_avail;
1678 ch->update_state = update_packet_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001679 ch->read_from_cb = smd_packet_read_from_cb;
1680 ch->is_pkt_ch = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001681 } else {
1682 ch->read = smd_stream_read;
1683 ch->write = smd_stream_write;
1684 ch->read_avail = smd_stream_read_avail;
1685 ch->write_avail = smd_stream_write_avail;
1686 ch->update_state = update_stream_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001687 ch->read_from_cb = smd_stream_read;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001688 }
1689
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001690 memcpy(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN);
1691 ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001692
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001693 ch->pdev.name = ch->name;
1694 ch->pdev.id = ch->type;
1695
1696 SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
1697 ch->name, ch->n);
1698
1699 mutex_lock(&smd_creation_mutex);
1700 list_add(&ch->ch_list, &smd_ch_closed_list);
1701 mutex_unlock(&smd_creation_mutex);
1702
1703 platform_device_register(&ch->pdev);
1704 if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
1705 /* create a platform driver to be used by smd_tty driver
1706 * so that it can access the loopback port
1707 */
1708 loopback_tty_pdev.id = ch->type;
1709 platform_device_register(&loopback_tty_pdev);
1710 }
1711 return 0;
1712}
1713
1714static inline void notify_loopback_smd(void)
1715{
1716 unsigned long flags;
1717 struct smd_channel *ch;
1718
1719 spin_lock_irqsave(&smd_lock, flags);
1720 list_for_each_entry(ch, &smd_ch_list_loopback, ch_list) {
1721 ch->notify(ch->priv, SMD_EVENT_DATA);
1722 }
1723 spin_unlock_irqrestore(&smd_lock, flags);
1724}
1725
1726static int smd_alloc_loopback_channel(void)
1727{
1728 static struct smd_half_channel smd_loopback_ctl;
1729 static char smd_loopback_data[SMD_BUF_SIZE];
1730 struct smd_channel *ch;
1731
1732 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1733 if (ch == 0) {
1734 pr_err("%s: out of memory\n", __func__);
1735 return -1;
1736 }
1737 ch->n = SMD_LOOPBACK_CID;
1738
1739 ch->send = &smd_loopback_ctl;
1740 ch->recv = &smd_loopback_ctl;
1741 ch->send_data = smd_loopback_data;
1742 ch->recv_data = smd_loopback_data;
1743 ch->fifo_size = SMD_BUF_SIZE;
1744
1745 ch->fifo_mask = ch->fifo_size - 1;
1746 ch->type = SMD_LOOPBACK_TYPE;
1747 ch->notify_other_cpu = notify_loopback_smd;
1748
1749 ch->read = smd_stream_read;
1750 ch->write = smd_stream_write;
1751 ch->read_avail = smd_stream_read_avail;
1752 ch->write_avail = smd_stream_write_avail;
1753 ch->update_state = update_stream_state;
1754 ch->read_from_cb = smd_stream_read;
1755
1756 memset(ch->name, 0, 20);
1757 memcpy(ch->name, "local_loopback", 14);
1758
1759 ch->pdev.name = ch->name;
1760 ch->pdev.id = ch->type;
1761
1762 SMD_INFO("%s: '%s' cid=%d\n", __func__, ch->name, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001763
1764 mutex_lock(&smd_creation_mutex);
1765 list_add(&ch->ch_list, &smd_ch_closed_list);
1766 mutex_unlock(&smd_creation_mutex);
1767
1768 platform_device_register(&ch->pdev);
Brian Swetland34f719b2009-10-30 16:22:05 -07001769 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001770}
1771
1772static void do_nothing_notify(void *priv, unsigned flags)
1773{
1774}
1775
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001776static void finalize_channel_close_fn(struct work_struct *work)
1777{
1778 unsigned long flags;
1779 struct smd_channel *ch;
1780 struct smd_channel *index;
1781
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001782 mutex_lock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001783 spin_lock_irqsave(&smd_lock, flags);
1784 list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
1785 list_del(&ch->ch_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001786 list_add(&ch->ch_list, &smd_ch_closed_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001787 ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
1788 ch->notify = do_nothing_notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001789 }
1790 spin_unlock_irqrestore(&smd_lock, flags);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001791 mutex_unlock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001792}
1793
1794struct smd_channel *smd_get_channel(const char *name, uint32_t type)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001795{
1796 struct smd_channel *ch;
1797
1798 mutex_lock(&smd_creation_mutex);
1799 list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001800 if (!strcmp(name, ch->name) &&
1801 (type == ch->type)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001802 list_del(&ch->ch_list);
1803 mutex_unlock(&smd_creation_mutex);
1804 return ch;
1805 }
1806 }
1807 mutex_unlock(&smd_creation_mutex);
1808
1809 return NULL;
1810}
1811
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001812int smd_named_open_on_edge(const char *name, uint32_t edge,
1813 smd_channel_t **_ch,
1814 void *priv, void (*notify)(void *, unsigned))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001815{
1816 struct smd_channel *ch;
1817 unsigned long flags;
1818
1819 if (smd_initialized == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001820 SMD_INFO("smd_open() before smd_init()\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001821 return -ENODEV;
1822 }
1823
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001824 SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
1825
1826 ch = smd_get_channel(name, edge);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001827 if (!ch) {
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001828 /* check closing list for port */
1829 spin_lock_irqsave(&smd_lock, flags);
1830 list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
1831 if (!strncmp(name, ch->name, 20) &&
1832 (edge == ch->type)) {
1833 /* channel exists, but is being closed */
1834 spin_unlock_irqrestore(&smd_lock, flags);
1835 return -EAGAIN;
1836 }
1837 }
1838
1839 /* check closing workqueue list for port */
1840 list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
1841 if (!strncmp(name, ch->name, 20) &&
1842 (edge == ch->type)) {
1843 /* channel exists, but is being closed */
1844 spin_unlock_irqrestore(&smd_lock, flags);
1845 return -EAGAIN;
1846 }
1847 }
1848 spin_unlock_irqrestore(&smd_lock, flags);
1849
1850 /* one final check to handle closing->closed race condition */
1851 ch = smd_get_channel(name, edge);
1852 if (!ch)
1853 return -ENODEV;
1854 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001855
1856 if (notify == 0)
1857 notify = do_nothing_notify;
1858
1859 ch->notify = notify;
1860 ch->current_packet = 0;
1861 ch->last_state = SMD_SS_CLOSED;
1862 ch->priv = priv;
1863
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001864 if (edge == SMD_LOOPBACK_TYPE) {
1865 ch->last_state = SMD_SS_OPENED;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001866 ch->half_ch->set_state(ch->send, SMD_SS_OPENED);
1867 ch->half_ch->set_fDSR(ch->send, 1);
1868 ch->half_ch->set_fCTS(ch->send, 1);
1869 ch->half_ch->set_fCD(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001870 }
1871
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001872 *_ch = ch;
1873
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001874 SMD_DBG("smd_open: opening '%s'\n", ch->name);
1875
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001876 spin_lock_irqsave(&smd_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001877 if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_MODEM)
Brian Swetland37521a32009-07-01 18:30:47 -07001878 list_add(&ch->ch_list, &smd_ch_list_modem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001879 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_QDSP)
Brian Swetland37521a32009-07-01 18:30:47 -07001880 list_add(&ch->ch_list, &smd_ch_list_dsp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001881 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_DSPS)
1882 list_add(&ch->ch_list, &smd_ch_list_dsps);
1883 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_WCNSS)
1884 list_add(&ch->ch_list, &smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001885 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_RPM)
1886 list_add(&ch->ch_list, &smd_ch_list_rpm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001887 else
1888 list_add(&ch->ch_list, &smd_ch_list_loopback);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001889
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001890 SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
1891
1892 if (edge != SMD_LOOPBACK_TYPE)
1893 smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
1894
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001895 spin_unlock_irqrestore(&smd_lock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001896
1897 return 0;
1898}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001899EXPORT_SYMBOL(smd_named_open_on_edge);
1900
1901
1902int smd_open(const char *name, smd_channel_t **_ch,
1903 void *priv, void (*notify)(void *, unsigned))
1904{
1905 return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv,
1906 notify);
1907}
1908EXPORT_SYMBOL(smd_open);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001909
1910int smd_close(smd_channel_t *ch)
1911{
1912 unsigned long flags;
1913
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001914 if (ch == 0)
1915 return -1;
1916
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001917 SMD_INFO("smd_close(%s)\n", ch->name);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001918
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001919 spin_lock_irqsave(&smd_lock, flags);
1920 list_del(&ch->ch_list);
1921 if (ch->n == SMD_LOOPBACK_CID) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001922 ch->half_ch->set_fDSR(ch->send, 0);
1923 ch->half_ch->set_fCTS(ch->send, 0);
1924 ch->half_ch->set_fCD(ch->send, 0);
1925 ch->half_ch->set_state(ch->send, SMD_SS_CLOSED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001926 } else
1927 ch_set_state(ch, SMD_SS_CLOSED);
1928
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001929 if (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001930 list_add(&ch->ch_list, &smd_ch_closing_list);
1931 spin_unlock_irqrestore(&smd_lock, flags);
1932 } else {
1933 spin_unlock_irqrestore(&smd_lock, flags);
1934 ch->notify = do_nothing_notify;
1935 mutex_lock(&smd_creation_mutex);
1936 list_add(&ch->ch_list, &smd_ch_closed_list);
1937 mutex_unlock(&smd_creation_mutex);
1938 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001939
1940 return 0;
1941}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001942EXPORT_SYMBOL(smd_close);
1943
1944int smd_write_start(smd_channel_t *ch, int len)
1945{
1946 int ret;
1947 unsigned hdr[5];
1948
1949 if (!ch) {
1950 pr_err("%s: Invalid channel specified\n", __func__);
1951 return -ENODEV;
1952 }
1953 if (!ch->is_pkt_ch) {
1954 pr_err("%s: non-packet channel specified\n", __func__);
1955 return -EACCES;
1956 }
1957 if (len < 1) {
1958 pr_err("%s: invalid length: %d\n", __func__, len);
1959 return -EINVAL;
1960 }
1961
1962 if (ch->pending_pkt_sz) {
1963 pr_err("%s: packet of size: %d in progress\n", __func__,
1964 ch->pending_pkt_sz);
1965 return -EBUSY;
1966 }
1967 ch->pending_pkt_sz = len;
1968
1969 if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
1970 ch->pending_pkt_sz = 0;
1971 SMD_DBG("%s: no space to write packet header\n", __func__);
1972 return -EAGAIN;
1973 }
1974
1975 hdr[0] = len;
1976 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1977
1978
1979 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1980 if (ret < 0 || ret != sizeof(hdr)) {
1981 ch->pending_pkt_sz = 0;
1982 pr_err("%s: packet header failed to write\n", __func__);
1983 return -EPERM;
1984 }
1985 return 0;
1986}
1987EXPORT_SYMBOL(smd_write_start);
1988
1989int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
1990{
1991 int bytes_written;
1992
1993 if (!ch) {
1994 pr_err("%s: Invalid channel specified\n", __func__);
1995 return -ENODEV;
1996 }
1997 if (len < 1) {
1998 pr_err("%s: invalid length: %d\n", __func__, len);
1999 return -EINVAL;
2000 }
2001
2002 if (!ch->pending_pkt_sz) {
2003 pr_err("%s: no transaction in progress\n", __func__);
2004 return -ENOEXEC;
2005 }
2006 if (ch->pending_pkt_sz - len < 0) {
2007 pr_err("%s: segment of size: %d will make packet go over "
2008 "length\n", __func__, len);
2009 return -EINVAL;
2010 }
2011
2012 bytes_written = smd_stream_write(ch, data, len, user_buf);
2013
2014 ch->pending_pkt_sz -= bytes_written;
2015
2016 return bytes_written;
2017}
2018EXPORT_SYMBOL(smd_write_segment);
2019
2020int smd_write_end(smd_channel_t *ch)
2021{
2022
2023 if (!ch) {
2024 pr_err("%s: Invalid channel specified\n", __func__);
2025 return -ENODEV;
2026 }
2027 if (ch->pending_pkt_sz) {
2028 pr_err("%s: current packet not completely written\n", __func__);
2029 return -E2BIG;
2030 }
2031
2032 return 0;
2033}
2034EXPORT_SYMBOL(smd_write_end);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002035
2036int smd_read(smd_channel_t *ch, void *data, int len)
2037{
Jack Pham1b236d12012-03-19 15:27:18 -07002038 if (!ch) {
2039 pr_err("%s: Invalid channel specified\n", __func__);
2040 return -ENODEV;
2041 }
2042
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002043 return ch->read(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002044}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002045EXPORT_SYMBOL(smd_read);
2046
2047int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
2048{
Jack Pham1b236d12012-03-19 15:27:18 -07002049 if (!ch) {
2050 pr_err("%s: Invalid channel specified\n", __func__);
2051 return -ENODEV;
2052 }
2053
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002054 return ch->read(ch, data, len, 1);
2055}
2056EXPORT_SYMBOL(smd_read_user_buffer);
2057
2058int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
2059{
Jack Pham1b236d12012-03-19 15:27:18 -07002060 if (!ch) {
2061 pr_err("%s: Invalid channel specified\n", __func__);
2062 return -ENODEV;
2063 }
2064
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002065 return ch->read_from_cb(ch, data, len, 0);
2066}
2067EXPORT_SYMBOL(smd_read_from_cb);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002068
2069int smd_write(smd_channel_t *ch, const void *data, int len)
2070{
Jack Pham1b236d12012-03-19 15:27:18 -07002071 if (!ch) {
2072 pr_err("%s: Invalid channel specified\n", __func__);
2073 return -ENODEV;
2074 }
2075
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002076 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002077}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002078EXPORT_SYMBOL(smd_write);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002079
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002080int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
Brian Swetland636eb9c2009-12-07 15:28:08 -08002081{
Jack Pham1b236d12012-03-19 15:27:18 -07002082 if (!ch) {
2083 pr_err("%s: Invalid channel specified\n", __func__);
2084 return -ENODEV;
2085 }
2086
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002087 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002088}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002089EXPORT_SYMBOL(smd_write_user_buffer);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002090
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002091int smd_read_avail(smd_channel_t *ch)
2092{
Jack Pham1b236d12012-03-19 15:27:18 -07002093 if (!ch) {
2094 pr_err("%s: Invalid channel specified\n", __func__);
2095 return -ENODEV;
2096 }
2097
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002098 return ch->read_avail(ch);
2099}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002100EXPORT_SYMBOL(smd_read_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002101
2102int smd_write_avail(smd_channel_t *ch)
2103{
Jack Pham1b236d12012-03-19 15:27:18 -07002104 if (!ch) {
2105 pr_err("%s: Invalid channel specified\n", __func__);
2106 return -ENODEV;
2107 }
2108
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002109 return ch->write_avail(ch);
2110}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002111EXPORT_SYMBOL(smd_write_avail);
2112
2113void smd_enable_read_intr(smd_channel_t *ch)
2114{
2115 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002116 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002117}
2118EXPORT_SYMBOL(smd_enable_read_intr);
2119
2120void smd_disable_read_intr(smd_channel_t *ch)
2121{
2122 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002123 ch->half_ch->set_fBLOCKREADINTR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002124}
2125EXPORT_SYMBOL(smd_disable_read_intr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002126
2127int smd_wait_until_readable(smd_channel_t *ch, int bytes)
2128{
2129 return -1;
2130}
2131
2132int smd_wait_until_writable(smd_channel_t *ch, int bytes)
2133{
2134 return -1;
2135}
2136
2137int smd_cur_packet_size(smd_channel_t *ch)
2138{
Jack Pham1b236d12012-03-19 15:27:18 -07002139 if (!ch) {
2140 pr_err("%s: Invalid channel specified\n", __func__);
2141 return -ENODEV;
2142 }
2143
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002144 return ch->current_packet;
2145}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002146EXPORT_SYMBOL(smd_cur_packet_size);
2147
2148int smd_tiocmget(smd_channel_t *ch)
2149{
Jack Pham1b236d12012-03-19 15:27:18 -07002150 if (!ch) {
2151 pr_err("%s: Invalid channel specified\n", __func__);
2152 return -ENODEV;
2153 }
2154
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002155 return (ch->half_ch->get_fDSR(ch->recv) ? TIOCM_DSR : 0) |
2156 (ch->half_ch->get_fCTS(ch->recv) ? TIOCM_CTS : 0) |
2157 (ch->half_ch->get_fCD(ch->recv) ? TIOCM_CD : 0) |
2158 (ch->half_ch->get_fRI(ch->recv) ? TIOCM_RI : 0) |
2159 (ch->half_ch->get_fCTS(ch->send) ? TIOCM_RTS : 0) |
2160 (ch->half_ch->get_fDSR(ch->send) ? TIOCM_DTR : 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002161}
2162EXPORT_SYMBOL(smd_tiocmget);
2163
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002164/* this api will be called while holding smd_lock */
2165int
2166smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002167{
Jack Pham1b236d12012-03-19 15:27:18 -07002168 if (!ch) {
2169 pr_err("%s: Invalid channel specified\n", __func__);
2170 return -ENODEV;
2171 }
2172
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002173 if (set & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002174 ch->half_ch->set_fDSR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002175
2176 if (set & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002177 ch->half_ch->set_fCTS(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002178
2179 if (clear & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002180 ch->half_ch->set_fDSR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002181
2182 if (clear & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002183 ch->half_ch->set_fCTS(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002184
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002185 ch->half_ch->set_fSTATE(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002186 barrier();
2187 ch->notify_other_cpu();
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002188
2189 return 0;
2190}
2191EXPORT_SYMBOL(smd_tiocmset_from_cb);
2192
2193int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
2194{
2195 unsigned long flags;
2196
Jack Pham1b236d12012-03-19 15:27:18 -07002197 if (!ch) {
2198 pr_err("%s: Invalid channel specified\n", __func__);
2199 return -ENODEV;
2200 }
2201
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002202 spin_lock_irqsave(&smd_lock, flags);
2203 smd_tiocmset_from_cb(ch, set, clear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002204 spin_unlock_irqrestore(&smd_lock, flags);
2205
2206 return 0;
2207}
2208EXPORT_SYMBOL(smd_tiocmset);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002209
2210
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002211/* -------------------------------------------------------------------------- */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002212
Jeff Hugobdc734d2012-03-26 16:05:39 -06002213/*
2214 * Shared Memory Range Check
2215 *
2216 * Takes a physical address and an offset and checks if the resulting physical
2217 * address would fit into one of the aux smem regions. If so, returns the
2218 * corresponding virtual address. Otherwise returns NULL. Expects the array
2219 * of smem regions to be in ascending physical address order.
2220 *
2221 * @base: physical base address to check
2222 * @offset: offset from the base to get the final address
2223 */
2224static void *smem_range_check(void *base, unsigned offset)
2225{
2226 int i;
2227 void *phys_addr;
2228 unsigned size;
2229
2230 for (i = 0; i < num_smem_areas; ++i) {
2231 phys_addr = smem_areas[i].phys_addr;
2232 size = smem_areas[i].size;
2233 if (base < phys_addr)
2234 return NULL;
2235 if (base > phys_addr + size)
2236 continue;
2237 if (base >= phys_addr && base + offset < phys_addr + size)
2238 return smem_areas[i].virt_addr + offset;
2239 }
2240
2241 return NULL;
2242}
2243
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002244/* smem_alloc returns the pointer to smem item if it is already allocated.
2245 * Otherwise, it returns NULL.
2246 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002247void *smem_alloc(unsigned id, unsigned size)
2248{
2249 return smem_find(id, size);
2250}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002251EXPORT_SYMBOL(smem_alloc);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002252
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002253/* smem_alloc2 returns the pointer to smem item. If it is not allocated,
2254 * it allocates it and then returns the pointer to it.
2255 */
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302256void *smem_alloc2(unsigned id, unsigned size_in)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002257{
2258 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2259 struct smem_heap_entry *toc = shared->heap_toc;
2260 unsigned long flags;
2261 void *ret = NULL;
2262
2263 if (!shared->heap_info.initialized) {
2264 pr_err("%s: smem heap info not initialized\n", __func__);
2265 return NULL;
2266 }
2267
2268 if (id >= SMEM_NUM_ITEMS)
2269 return NULL;
2270
2271 size_in = ALIGN(size_in, 8);
2272 remote_spin_lock_irqsave(&remote_spinlock, flags);
2273 if (toc[id].allocated) {
2274 SMD_DBG("%s: %u already allocated\n", __func__, id);
2275 if (size_in != toc[id].size)
2276 pr_err("%s: wrong size %u (expected %u)\n",
2277 __func__, toc[id].size, size_in);
2278 else
2279 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2280 } else if (id > SMEM_FIXED_ITEM_LAST) {
2281 SMD_DBG("%s: allocating %u\n", __func__, id);
2282 if (shared->heap_info.heap_remaining >= size_in) {
2283 toc[id].offset = shared->heap_info.free_offset;
2284 toc[id].size = size_in;
2285 wmb();
2286 toc[id].allocated = 1;
2287
2288 shared->heap_info.free_offset += size_in;
2289 shared->heap_info.heap_remaining -= size_in;
2290 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2291 } else
2292 pr_err("%s: not enough memory %u (required %u)\n",
2293 __func__, shared->heap_info.heap_remaining,
2294 size_in);
2295 }
2296 wmb();
2297 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
2298 return ret;
2299}
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302300EXPORT_SYMBOL(smem_alloc2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002301
2302void *smem_get_entry(unsigned id, unsigned *size)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002303{
2304 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2305 struct smem_heap_entry *toc = shared->heap_toc;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302306 int use_spinlocks = spinlocks_initialized;
2307 void *ret = 0;
2308 unsigned long flags = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002309
2310 if (id >= SMEM_NUM_ITEMS)
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302311 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002312
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302313 if (use_spinlocks)
2314 remote_spin_lock_irqsave(&remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002315 /* toc is in device memory and cannot be speculatively accessed */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002316 if (toc[id].allocated) {
2317 *size = toc[id].size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002318 barrier();
Jeff Hugobdc734d2012-03-26 16:05:39 -06002319 if (!(toc[id].reserved & BASE_ADDR_MASK))
2320 ret = (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
2321 else
2322 ret = smem_range_check(
2323 (void *)(toc[id].reserved & BASE_ADDR_MASK),
2324 toc[id].offset);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002325 } else {
2326 *size = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002327 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302328 if (use_spinlocks)
2329 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002330
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302331 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002332}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002333EXPORT_SYMBOL(smem_get_entry);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002334
2335void *smem_find(unsigned id, unsigned size_in)
2336{
2337 unsigned size;
2338 void *ptr;
2339
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002340 ptr = smem_get_entry(id, &size);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002341 if (!ptr)
2342 return 0;
2343
2344 size_in = ALIGN(size_in, 8);
2345 if (size_in != size) {
2346 pr_err("smem_find(%d, %d): wrong size %d\n",
2347 id, size_in, size);
2348 return 0;
2349 }
2350
2351 return ptr;
2352}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002353EXPORT_SYMBOL(smem_find);
2354
2355static int smsm_cb_init(void)
2356{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002357 struct smsm_state_info *state_info;
2358 int n;
2359 int ret = 0;
2360
2361 smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
2362 GFP_KERNEL);
2363
2364 if (!smsm_states) {
2365 pr_err("%s: SMSM init failed\n", __func__);
2366 return -ENOMEM;
2367 }
2368
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002369 smsm_cb_wq = create_singlethread_workqueue("smsm_cb_wq");
2370 if (!smsm_cb_wq) {
2371 pr_err("%s: smsm_cb_wq creation failed\n", __func__);
2372 kfree(smsm_states);
2373 return -EFAULT;
2374 }
2375
Eric Holmbergc8002902011-09-16 13:55:57 -06002376 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002377 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2378 state_info = &smsm_states[n];
2379 state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
Eric Holmberge8a39322012-04-03 15:14:02 -06002380 state_info->intr_mask_set = 0x0;
2381 state_info->intr_mask_clear = 0x0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002382 INIT_LIST_HEAD(&state_info->callbacks);
2383 }
Eric Holmbergc8002902011-09-16 13:55:57 -06002384 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002385
2386 return ret;
2387}
2388
2389static int smsm_init(void)
2390{
2391 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2392 int i;
2393 struct smsm_size_info_type *smsm_size_info;
2394
2395 i = remote_spin_lock_init(&remote_spinlock, SMEM_SPINLOCK_SMEM_ALLOC);
2396 if (i) {
2397 pr_err("%s: remote spinlock init failed %d\n", __func__, i);
2398 return i;
2399 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302400 spinlocks_initialized = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002401
2402 smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO,
2403 sizeof(struct smsm_size_info_type));
2404 if (smsm_size_info) {
2405 SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
2406 SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
2407 }
2408
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002409 i = kfifo_alloc(&smsm_snapshot_fifo,
2410 sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
2411 GFP_KERNEL);
2412 if (i) {
2413 pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
2414 return i;
2415 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002416 wake_lock_init(&smsm_snapshot_wakelock, WAKE_LOCK_SUSPEND,
2417 "smsm_snapshot");
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002418
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002419 if (!smsm_info.state) {
2420 smsm_info.state = smem_alloc2(ID_SHARED_STATE,
2421 SMSM_NUM_ENTRIES *
2422 sizeof(uint32_t));
2423
2424 if (smsm_info.state) {
2425 __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2426 if ((shared->version[VERSION_MODEM] >> 16) >= 0xB)
2427 __raw_writel(0, \
2428 SMSM_STATE_ADDR(SMSM_APPS_DEM_I));
2429 }
2430 }
2431
2432 if (!smsm_info.intr_mask) {
2433 smsm_info.intr_mask = smem_alloc2(SMEM_SMSM_CPU_INTR_MASK,
2434 SMSM_NUM_ENTRIES *
2435 SMSM_NUM_HOSTS *
2436 sizeof(uint32_t));
2437
Eric Holmberge8a39322012-04-03 15:14:02 -06002438 if (smsm_info.intr_mask) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002439 for (i = 0; i < SMSM_NUM_ENTRIES; i++)
Eric Holmberge8a39322012-04-03 15:14:02 -06002440 __raw_writel(0x0,
2441 SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
2442
2443 /* Configure legacy modem bits */
2444 __raw_writel(LEGACY_MODEM_SMSM_MASK,
2445 SMSM_INTR_MASK_ADDR(SMSM_MODEM_STATE,
2446 SMSM_APPS));
2447 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002448 }
2449
2450 if (!smsm_info.intr_mux)
2451 smsm_info.intr_mux = smem_alloc2(SMEM_SMD_SMSM_INTR_MUX,
2452 SMSM_NUM_INTR_MUX *
2453 sizeof(uint32_t));
2454
2455 i = smsm_cb_init();
2456 if (i)
2457 return i;
2458
2459 wmb();
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06002460 smsm_driver_state_notify(SMSM_INIT, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002461 return 0;
2462}
2463
2464void smsm_reset_modem(unsigned mode)
2465{
2466 if (mode == SMSM_SYSTEM_DOWNLOAD) {
2467 mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
2468 } else if (mode == SMSM_MODEM_WAIT) {
2469 mode = SMSM_RESET | SMSM_MODEM_WAIT;
2470 } else { /* reset_mode is SMSM_RESET or default */
2471 mode = SMSM_RESET;
2472 }
2473
2474 smsm_change_state(SMSM_APPS_STATE, mode, mode);
2475}
2476EXPORT_SYMBOL(smsm_reset_modem);
2477
2478void smsm_reset_modem_cont(void)
2479{
2480 unsigned long flags;
2481 uint32_t state;
2482
2483 if (!smsm_info.state)
2484 return;
2485
2486 spin_lock_irqsave(&smem_lock, flags);
2487 state = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)) \
2488 & ~SMSM_MODEM_WAIT;
2489 __raw_writel(state, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2490 wmb();
2491 spin_unlock_irqrestore(&smem_lock, flags);
2492}
2493EXPORT_SYMBOL(smsm_reset_modem_cont);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002494
Eric Holmbergda31d042012-03-28 14:01:02 -06002495static void smsm_cb_snapshot(uint32_t use_wakelock)
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002496{
2497 int n;
2498 uint32_t new_state;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002499 unsigned long flags;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002500 int ret;
2501
2502 ret = kfifo_avail(&smsm_snapshot_fifo);
Eric Holmbergda31d042012-03-28 14:01:02 -06002503 if (ret < SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002504 pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
2505 return;
2506 }
2507
Eric Holmberg96b55f62012-04-03 19:10:46 -06002508 /*
2509 * To avoid a race condition with notify_smsm_cb_clients_worker, the
2510 * following sequence must be followed:
2511 * 1) increment snapshot count
2512 * 2) insert data into FIFO
2513 *
2514 * Potentially in parallel, the worker:
2515 * a) verifies >= 1 snapshots are in FIFO
2516 * b) processes snapshot
2517 * c) decrements reference count
2518 *
2519 * This order ensures that 1 will always occur before abc.
2520 */
Eric Holmbergda31d042012-03-28 14:01:02 -06002521 if (use_wakelock) {
2522 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2523 if (smsm_snapshot_count == 0) {
2524 SMx_POWER_INFO("SMSM snapshot wake lock\n");
2525 wake_lock(&smsm_snapshot_wakelock);
2526 }
2527 ++smsm_snapshot_count;
2528 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2529 }
Eric Holmberg96b55f62012-04-03 19:10:46 -06002530
2531 /* queue state entries */
2532 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2533 new_state = __raw_readl(SMSM_STATE_ADDR(n));
2534
2535 ret = kfifo_in(&smsm_snapshot_fifo,
2536 &new_state, sizeof(new_state));
2537 if (ret != sizeof(new_state)) {
2538 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2539 goto restore_snapshot_count;
2540 }
2541 }
2542
2543 /* queue wakelock usage flag */
2544 ret = kfifo_in(&smsm_snapshot_fifo,
2545 &use_wakelock, sizeof(use_wakelock));
2546 if (ret != sizeof(use_wakelock)) {
2547 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2548 goto restore_snapshot_count;
2549 }
2550
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002551 queue_work(smsm_cb_wq, &smsm_cb_work);
Eric Holmberg96b55f62012-04-03 19:10:46 -06002552 return;
2553
2554restore_snapshot_count:
2555 if (use_wakelock) {
2556 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2557 if (smsm_snapshot_count) {
2558 --smsm_snapshot_count;
2559 if (smsm_snapshot_count == 0) {
2560 SMx_POWER_INFO("SMSM snapshot wake unlock\n");
2561 wake_unlock(&smsm_snapshot_wakelock);
2562 }
2563 } else {
2564 pr_err("%s: invalid snapshot count\n", __func__);
2565 }
2566 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2567 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002568}
2569
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002570static irqreturn_t smsm_irq_handler(int irq, void *data)
2571{
2572 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002573
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002574 if (irq == INT_ADSP_A11_SMSM) {
Eric Holmberg6282c5d2011-10-27 17:30:57 -06002575 uint32_t mux_val;
2576 static uint32_t prev_smem_q6_apps_smsm;
2577
2578 if (smsm_info.intr_mux && cpu_is_qsd8x50()) {
2579 mux_val = __raw_readl(
2580 SMSM_INTR_MUX_ADDR(SMEM_Q6_APPS_SMSM));
2581 if (mux_val != prev_smem_q6_apps_smsm)
2582 prev_smem_q6_apps_smsm = mux_val;
2583 }
2584
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002585 spin_lock_irqsave(&smem_lock, flags);
Eric Holmbergda31d042012-03-28 14:01:02 -06002586 smsm_cb_snapshot(1);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002587 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002588 return IRQ_HANDLED;
2589 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002590
2591 spin_lock_irqsave(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002592 if (!smsm_info.state) {
2593 SMSM_INFO("<SM NO STATE>\n");
2594 } else {
2595 unsigned old_apps, apps;
2596 unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002597
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002598 old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002599
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002600 SMSM_DBG("<SM %08x %08x>\n", apps, modm);
2601 if (apps & SMSM_RESET) {
2602 /* If we get an interrupt and the apps SMSM_RESET
2603 bit is already set, the modem is acking the
2604 app's reset ack. */
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002605 if (!disable_smsm_reset_handshake)
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302606 apps &= ~SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002607 /* Issue a fake irq to handle any
2608 * smd state changes during reset
2609 */
2610 smd_fake_irq_handler(0);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002611
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002612 /* queue modem restart notify chain */
2613 modem_queue_start_reset_notify();
2614
2615 } else if (modm & SMSM_RESET) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002616 pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
Ram Somani8b9589f2012-04-03 12:07:18 +05302617 if (!disable_smsm_reset_handshake) {
2618 apps |= SMSM_RESET;
2619 flush_cache_all();
2620 outer_flush_all();
2621 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002622 modem_queue_start_reset_notify();
2623
2624 } else if (modm & SMSM_INIT) {
2625 if (!(apps & SMSM_INIT)) {
2626 apps |= SMSM_INIT;
2627 modem_queue_smsm_init_notify();
2628 }
2629
2630 if (modm & SMSM_SMDINIT)
2631 apps |= SMSM_SMDINIT;
2632 if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) ==
2633 (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT))
2634 apps |= SMSM_RUN;
2635 } else if (modm & SMSM_SYSTEM_DOWNLOAD) {
2636 pr_err("\nSMSM: Modem SMSM state changed to SMSM_SYSTEM_DOWNLOAD.");
2637 modem_queue_start_reset_notify();
2638 }
2639
2640 if (old_apps != apps) {
2641 SMSM_DBG("<SM %08x NOTIFY>\n", apps);
2642 __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2643 do_smd_probe();
2644 notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
2645 }
2646
Eric Holmbergda31d042012-03-28 14:01:02 -06002647 smsm_cb_snapshot(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002648 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002649 spin_unlock_irqrestore(&smem_lock, flags);
2650 return IRQ_HANDLED;
2651}
2652
Eric Holmberg98c6c642012-02-24 11:29:35 -07002653static irqreturn_t smsm_modem_irq_handler(int irq, void *data)
2654{
2655 SMx_POWER_INFO("SMSM Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002656 ++interrupt_stats[SMD_MODEM].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002657 return smsm_irq_handler(irq, data);
2658}
2659
2660static irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
2661{
2662 SMx_POWER_INFO("SMSM Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002663 ++interrupt_stats[SMD_Q6].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002664 return smsm_irq_handler(irq, data);
2665}
2666
2667static irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
2668{
2669 SMx_POWER_INFO("SMSM Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002670 ++interrupt_stats[SMD_DSPS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002671 return smsm_irq_handler(irq, data);
2672}
2673
2674static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
2675{
2676 SMx_POWER_INFO("SMSM Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002677 ++interrupt_stats[SMD_WCNSS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002678 return smsm_irq_handler(irq, data);
2679}
2680
Eric Holmberge8a39322012-04-03 15:14:02 -06002681/*
2682 * Changes the global interrupt mask. The set and clear masks are re-applied
2683 * every time the global interrupt mask is updated for callback registration
2684 * and de-registration.
2685 *
2686 * The clear mask is applied first, so if a bit is set to 1 in both the clear
2687 * mask and the set mask, the result will be that the interrupt is set.
2688 *
2689 * @smsm_entry SMSM entry to change
2690 * @clear_mask 1 = clear bit, 0 = no-op
2691 * @set_mask 1 = set bit, 0 = no-op
2692 *
2693 * @returns 0 for success, < 0 for error
2694 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002695int smsm_change_intr_mask(uint32_t smsm_entry,
2696 uint32_t clear_mask, uint32_t set_mask)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002697{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002698 uint32_t old_mask, new_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002699 unsigned long flags;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002700
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002701 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2702 pr_err("smsm_change_state: Invalid entry %d\n",
2703 smsm_entry);
2704 return -EINVAL;
2705 }
2706
2707 if (!smsm_info.intr_mask) {
2708 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002709 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002710 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002711
2712 spin_lock_irqsave(&smem_lock, flags);
Eric Holmberge8a39322012-04-03 15:14:02 -06002713 smsm_states[smsm_entry].intr_mask_clear = clear_mask;
2714 smsm_states[smsm_entry].intr_mask_set = set_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002715
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002716 old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2717 new_mask = (old_mask & ~clear_mask) | set_mask;
2718 __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002719
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002720 wmb();
2721 spin_unlock_irqrestore(&smem_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002722
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002723 return 0;
2724}
2725EXPORT_SYMBOL(smsm_change_intr_mask);
2726
2727int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
2728{
2729 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2730 pr_err("smsm_change_state: Invalid entry %d\n",
2731 smsm_entry);
2732 return -EINVAL;
2733 }
2734
2735 if (!smsm_info.intr_mask) {
2736 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
2737 return -EIO;
2738 }
2739
2740 *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2741 return 0;
2742}
2743EXPORT_SYMBOL(smsm_get_intr_mask);
2744
2745int smsm_change_state(uint32_t smsm_entry,
2746 uint32_t clear_mask, uint32_t set_mask)
2747{
2748 unsigned long flags;
2749 uint32_t old_state, new_state;
2750
2751 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2752 pr_err("smsm_change_state: Invalid entry %d",
2753 smsm_entry);
2754 return -EINVAL;
2755 }
2756
2757 if (!smsm_info.state) {
2758 pr_err("smsm_change_state <SM NO STATE>\n");
2759 return -EIO;
2760 }
2761 spin_lock_irqsave(&smem_lock, flags);
2762
2763 old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2764 new_state = (old_state & ~clear_mask) | set_mask;
2765 __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
2766 SMSM_DBG("smsm_change_state %x\n", new_state);
2767 notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002768
2769 spin_unlock_irqrestore(&smem_lock, flags);
2770
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002771 return 0;
2772}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002773EXPORT_SYMBOL(smsm_change_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002774
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002775uint32_t smsm_get_state(uint32_t smsm_entry)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002776{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002777 uint32_t rv = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002778
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002779 /* needs interface change to return error code */
2780 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2781 pr_err("smsm_change_state: Invalid entry %d",
2782 smsm_entry);
2783 return 0;
2784 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002785
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002786 if (!smsm_info.state) {
2787 pr_err("smsm_get_state <SM NO STATE>\n");
2788 } else {
2789 rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2790 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002791
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002792 return rv;
2793}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002794EXPORT_SYMBOL(smsm_get_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002795
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002796/**
2797 * Performs SMSM callback client notifiction.
2798 */
2799void notify_smsm_cb_clients_worker(struct work_struct *work)
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002800{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002801 struct smsm_state_cb_info *cb_info;
2802 struct smsm_state_info *state_info;
2803 int n;
2804 uint32_t new_state;
2805 uint32_t state_changes;
Eric Holmbergda31d042012-03-28 14:01:02 -06002806 uint32_t use_wakelock;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002807 int ret;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002808 unsigned long flags;
Brian Swetland03e00cd2009-07-01 17:58:37 -07002809
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002810 if (!smd_initialized)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002811 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002812
Eric Holmbergda31d042012-03-28 14:01:02 -06002813 while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002814 mutex_lock(&smsm_lock);
2815 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2816 state_info = &smsm_states[n];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002817
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002818 ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
2819 sizeof(new_state));
2820 if (ret != sizeof(new_state)) {
2821 pr_err("%s: snapshot underflow %d\n",
2822 __func__, ret);
2823 mutex_unlock(&smsm_lock);
2824 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002825 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002826
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002827 state_changes = state_info->last_value ^ new_state;
2828 if (state_changes) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07002829 SMx_POWER_INFO("SMSM Change %d: %08x->%08x\n",
2830 n, state_info->last_value,
2831 new_state);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002832 list_for_each_entry(cb_info,
2833 &state_info->callbacks, cb_list) {
2834
2835 if (cb_info->mask & state_changes)
2836 cb_info->notify(cb_info->data,
2837 state_info->last_value,
2838 new_state);
2839 }
2840 state_info->last_value = new_state;
2841 }
2842 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002843
Eric Holmbergda31d042012-03-28 14:01:02 -06002844 /* read wakelock flag */
2845 ret = kfifo_out(&smsm_snapshot_fifo, &use_wakelock,
2846 sizeof(use_wakelock));
2847 if (ret != sizeof(use_wakelock)) {
2848 pr_err("%s: snapshot underflow %d\n",
2849 __func__, ret);
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002850 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002851 return;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002852 }
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002853 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002854
2855 if (use_wakelock) {
2856 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2857 if (smsm_snapshot_count) {
2858 --smsm_snapshot_count;
2859 if (smsm_snapshot_count == 0) {
2860 SMx_POWER_INFO("SMSM snapshot"
2861 " wake unlock\n");
2862 wake_unlock(&smsm_snapshot_wakelock);
2863 }
2864 } else {
2865 pr_err("%s: invalid snapshot count\n",
2866 __func__);
2867 }
2868 spin_unlock_irqrestore(&smsm_snapshot_count_lock,
2869 flags);
2870 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002871 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002872}
2873
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002874
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002875/**
2876 * Registers callback for SMSM state notifications when the specified
2877 * bits change.
2878 *
2879 * @smsm_entry Processor entry to deregister
2880 * @mask Bits to deregister (if result is 0, callback is removed)
2881 * @notify Notification function to deregister
2882 * @data Opaque data passed in to callback
2883 *
2884 * @returns Status code
2885 * <0 error code
2886 * 0 inserted new entry
2887 * 1 updated mask of existing entry
2888 */
2889int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
2890 void (*notify)(void *, uint32_t, uint32_t), void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002891{
Eric Holmberge8a39322012-04-03 15:14:02 -06002892 struct smsm_state_info *state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002893 struct smsm_state_cb_info *cb_info;
2894 struct smsm_state_cb_info *cb_found = 0;
Eric Holmberge8a39322012-04-03 15:14:02 -06002895 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002896 int ret = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002897
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002898 if (smsm_entry >= SMSM_NUM_ENTRIES)
2899 return -EINVAL;
2900
Eric Holmbergc8002902011-09-16 13:55:57 -06002901 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002902
2903 if (!smsm_states) {
2904 /* smsm not yet initialized */
2905 ret = -ENODEV;
2906 goto cleanup;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002907 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002908
Eric Holmberge8a39322012-04-03 15:14:02 -06002909 state = &smsm_states[smsm_entry];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002910 list_for_each_entry(cb_info,
Eric Holmberge8a39322012-04-03 15:14:02 -06002911 &state->callbacks, cb_list) {
2912 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002913 (cb_info->data == data)) {
2914 cb_info->mask |= mask;
2915 cb_found = cb_info;
2916 ret = 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002917 }
Eric Holmberge8a39322012-04-03 15:14:02 -06002918 new_mask |= cb_info->mask;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002919 }
2920
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002921 if (!cb_found) {
2922 cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
2923 GFP_ATOMIC);
2924 if (!cb_info) {
2925 ret = -ENOMEM;
2926 goto cleanup;
2927 }
2928
2929 cb_info->mask = mask;
2930 cb_info->notify = notify;
2931 cb_info->data = data;
2932 INIT_LIST_HEAD(&cb_info->cb_list);
2933 list_add_tail(&cb_info->cb_list,
Eric Holmberge8a39322012-04-03 15:14:02 -06002934 &state->callbacks);
2935 new_mask |= mask;
2936 }
2937
2938 /* update interrupt notification mask */
2939 if (smsm_entry == SMSM_MODEM_STATE)
2940 new_mask |= LEGACY_MODEM_SMSM_MASK;
2941
2942 if (smsm_info.intr_mask) {
2943 unsigned long flags;
2944
2945 spin_lock_irqsave(&smem_lock, flags);
2946 new_mask = (new_mask & ~state->intr_mask_clear)
2947 | state->intr_mask_set;
2948 __raw_writel(new_mask,
2949 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2950 wmb();
2951 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002952 }
2953
2954cleanup:
Eric Holmbergc8002902011-09-16 13:55:57 -06002955 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002956 return ret;
2957}
2958EXPORT_SYMBOL(smsm_state_cb_register);
2959
2960
2961/**
2962 * Deregisters for SMSM state notifications for the specified bits.
2963 *
2964 * @smsm_entry Processor entry to deregister
2965 * @mask Bits to deregister (if result is 0, callback is removed)
2966 * @notify Notification function to deregister
2967 * @data Opaque data passed in to callback
2968 *
2969 * @returns Status code
2970 * <0 error code
2971 * 0 not found
2972 * 1 updated mask
2973 * 2 removed callback
2974 */
2975int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
2976 void (*notify)(void *, uint32_t, uint32_t), void *data)
2977{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002978 struct smsm_state_cb_info *cb_info;
Eric Holmberge8a39322012-04-03 15:14:02 -06002979 struct smsm_state_cb_info *cb_tmp;
2980 struct smsm_state_info *state;
2981 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002982 int ret = 0;
2983
2984 if (smsm_entry >= SMSM_NUM_ENTRIES)
2985 return -EINVAL;
2986
Eric Holmbergc8002902011-09-16 13:55:57 -06002987 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002988
2989 if (!smsm_states) {
2990 /* smsm not yet initialized */
Eric Holmbergc8002902011-09-16 13:55:57 -06002991 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002992 return -ENODEV;
2993 }
2994
Eric Holmberge8a39322012-04-03 15:14:02 -06002995 state = &smsm_states[smsm_entry];
2996 list_for_each_entry_safe(cb_info, cb_tmp,
2997 &state->callbacks, cb_list) {
2998 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002999 (cb_info->data == data)) {
3000 cb_info->mask &= ~mask;
3001 ret = 1;
3002 if (!cb_info->mask) {
3003 /* no mask bits set, remove callback */
3004 list_del(&cb_info->cb_list);
3005 kfree(cb_info);
3006 ret = 2;
Eric Holmberge8a39322012-04-03 15:14:02 -06003007 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003008 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003009 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003010 new_mask |= cb_info->mask;
3011 }
3012
3013 /* update interrupt notification mask */
3014 if (smsm_entry == SMSM_MODEM_STATE)
3015 new_mask |= LEGACY_MODEM_SMSM_MASK;
3016
3017 if (smsm_info.intr_mask) {
3018 unsigned long flags;
3019
3020 spin_lock_irqsave(&smem_lock, flags);
3021 new_mask = (new_mask & ~state->intr_mask_clear)
3022 | state->intr_mask_set;
3023 __raw_writel(new_mask,
3024 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3025 wmb();
3026 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003027 }
3028
Eric Holmbergc8002902011-09-16 13:55:57 -06003029 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003030 return ret;
3031}
3032EXPORT_SYMBOL(smsm_state_cb_deregister);
3033
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003034int smsm_driver_state_notifier_register(struct notifier_block *nb)
3035{
3036 int ret;
3037 if (!nb)
3038 return -EINVAL;
3039 mutex_lock(&smsm_driver_state_notifier_lock);
3040 ret = raw_notifier_chain_register(&smsm_driver_state_notifier_list, nb);
3041 mutex_unlock(&smsm_driver_state_notifier_lock);
3042 return ret;
3043}
3044EXPORT_SYMBOL(smsm_driver_state_notifier_register);
3045
3046int smsm_driver_state_notifier_unregister(struct notifier_block *nb)
3047{
3048 int ret;
3049 if (!nb)
3050 return -EINVAL;
3051 mutex_lock(&smsm_driver_state_notifier_lock);
3052 ret = raw_notifier_chain_unregister(&smsm_driver_state_notifier_list,
3053 nb);
3054 mutex_unlock(&smsm_driver_state_notifier_lock);
3055 return ret;
3056}
3057EXPORT_SYMBOL(smsm_driver_state_notifier_unregister);
3058
3059static void smsm_driver_state_notify(uint32_t state, void *data)
3060{
3061 mutex_lock(&smsm_driver_state_notifier_lock);
3062 raw_notifier_call_chain(&smsm_driver_state_notifier_list,
3063 state, data);
3064 mutex_unlock(&smsm_driver_state_notifier_lock);
3065}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003066
3067int smd_core_init(void)
3068{
3069 int r;
3070 unsigned long flags = IRQF_TRIGGER_RISING;
3071 SMD_INFO("smd_core_init()\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003072
Brian Swetland37521a32009-07-01 18:30:47 -07003073 r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003074 flags, "smd_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003075 if (r < 0)
3076 return r;
3077 r = enable_irq_wake(INT_A9_M2A_0);
3078 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003079 pr_err("smd_core_init: "
3080 "enable_irq_wake failed for INT_A9_M2A_0\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003081
Eric Holmberg98c6c642012-02-24 11:29:35 -07003082 r = request_irq(INT_A9_M2A_5, smsm_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003083 flags, "smsm_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003084 if (r < 0) {
3085 free_irq(INT_A9_M2A_0, 0);
3086 return r;
3087 }
3088 r = enable_irq_wake(INT_A9_M2A_5);
3089 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003090 pr_err("smd_core_init: "
3091 "enable_irq_wake failed for INT_A9_M2A_5\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003092
Brian Swetland37521a32009-07-01 18:30:47 -07003093#if defined(CONFIG_QDSP6)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003094#if (INT_ADSP_A11 == INT_ADSP_A11_SMSM)
3095 flags |= IRQF_SHARED;
3096#endif
Brian Swetland37521a32009-07-01 18:30:47 -07003097 r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003098 flags, "smd_dev", smd_dsp_irq_handler);
Brian Swetland37521a32009-07-01 18:30:47 -07003099 if (r < 0) {
3100 free_irq(INT_A9_M2A_0, 0);
3101 free_irq(INT_A9_M2A_5, 0);
3102 return r;
3103 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003104
Eric Holmberg98c6c642012-02-24 11:29:35 -07003105 r = request_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler,
3106 flags, "smsm_dev", smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003107 if (r < 0) {
3108 free_irq(INT_A9_M2A_0, 0);
3109 free_irq(INT_A9_M2A_5, 0);
3110 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
3111 return r;
3112 }
3113
3114 r = enable_irq_wake(INT_ADSP_A11);
3115 if (r < 0)
3116 pr_err("smd_core_init: "
3117 "enable_irq_wake failed for INT_ADSP_A11\n");
3118
3119#if (INT_ADSP_A11 != INT_ADSP_A11_SMSM)
3120 r = enable_irq_wake(INT_ADSP_A11_SMSM);
3121 if (r < 0)
3122 pr_err("smd_core_init: enable_irq_wake "
3123 "failed for INT_ADSP_A11_SMSM\n");
3124#endif
3125 flags &= ~IRQF_SHARED;
Brian Swetland37521a32009-07-01 18:30:47 -07003126#endif
3127
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003128#if defined(CONFIG_DSPS)
3129 r = request_irq(INT_DSPS_A11, smd_dsps_irq_handler,
3130 flags, "smd_dev", smd_dsps_irq_handler);
3131 if (r < 0) {
3132 free_irq(INT_A9_M2A_0, 0);
3133 free_irq(INT_A9_M2A_5, 0);
3134 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003135 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003136 return r;
3137 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003138
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003139 r = enable_irq_wake(INT_DSPS_A11);
3140 if (r < 0)
3141 pr_err("smd_core_init: "
3142 "enable_irq_wake failed for INT_ADSP_A11\n");
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003143#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003144
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003145#if defined(CONFIG_WCNSS)
3146 r = request_irq(INT_WCNSS_A11, smd_wcnss_irq_handler,
3147 flags, "smd_dev", smd_wcnss_irq_handler);
3148 if (r < 0) {
3149 free_irq(INT_A9_M2A_0, 0);
3150 free_irq(INT_A9_M2A_5, 0);
3151 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003152 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003153 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3154 return r;
3155 }
3156
3157 r = enable_irq_wake(INT_WCNSS_A11);
3158 if (r < 0)
3159 pr_err("smd_core_init: "
3160 "enable_irq_wake failed for INT_WCNSS_A11\n");
3161
Eric Holmberg98c6c642012-02-24 11:29:35 -07003162 r = request_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler,
3163 flags, "smsm_dev", smsm_wcnss_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003164 if (r < 0) {
3165 free_irq(INT_A9_M2A_0, 0);
3166 free_irq(INT_A9_M2A_5, 0);
3167 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003168 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003169 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3170 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
3171 return r;
3172 }
3173
3174 r = enable_irq_wake(INT_WCNSS_A11_SMSM);
3175 if (r < 0)
3176 pr_err("smd_core_init: "
3177 "enable_irq_wake failed for INT_WCNSS_A11_SMSM\n");
3178#endif
3179
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003180#if defined(CONFIG_DSPS_SMSM)
Eric Holmberg98c6c642012-02-24 11:29:35 -07003181 r = request_irq(INT_DSPS_A11_SMSM, smsm_dsps_irq_handler,
3182 flags, "smsm_dev", smsm_dsps_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003183 if (r < 0) {
3184 free_irq(INT_A9_M2A_0, 0);
3185 free_irq(INT_A9_M2A_5, 0);
3186 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003187 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003188 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3189 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003190 free_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003191 return r;
3192 }
3193
3194 r = enable_irq_wake(INT_DSPS_A11_SMSM);
3195 if (r < 0)
3196 pr_err("smd_core_init: "
3197 "enable_irq_wake failed for INT_DSPS_A11_SMSM\n");
3198#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003199 SMD_INFO("smd_core_init() done\n");
3200
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003201 return 0;
3202}
3203
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303204static int intr_init(struct interrupt_config_item *private_irq,
3205 struct smd_irq_config *platform_irq,
3206 struct platform_device *pdev
3207 )
3208{
3209 int irq_id;
3210 int ret;
3211 int ret_wake;
3212
3213 private_irq->out_bit_pos = platform_irq->out_bit_pos;
3214 private_irq->out_offset = platform_irq->out_offset;
3215 private_irq->out_base = platform_irq->out_base;
3216
3217 irq_id = platform_get_irq_byname(
3218 pdev,
3219 platform_irq->irq_name
3220 );
3221 SMD_DBG("smd: %s: register irq: %s id: %d\n", __func__,
3222 platform_irq->irq_name, irq_id);
3223 ret = request_irq(irq_id,
3224 private_irq->irq_handler,
3225 platform_irq->flags,
3226 platform_irq->device_name,
3227 (void *)platform_irq->dev_id
3228 );
3229 if (ret < 0) {
3230 platform_irq->irq_id = ret;
3231 } else {
3232 platform_irq->irq_id = irq_id;
3233 ret_wake = enable_irq_wake(irq_id);
3234 if (ret_wake < 0) {
3235 pr_err("smd: enable_irq_wake failed on %s",
3236 platform_irq->irq_name);
3237 }
3238 }
3239
3240 return ret;
3241}
3242
Jeff Hugobdc734d2012-03-26 16:05:39 -06003243int sort_cmp_func(const void *a, const void *b)
3244{
3245 struct smem_area *left = (struct smem_area *)(a);
3246 struct smem_area *right = (struct smem_area *)(b);
3247
3248 return left->phys_addr - right->phys_addr;
3249}
3250
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303251int smd_core_platform_init(struct platform_device *pdev)
3252{
3253 int i;
3254 int ret;
3255 uint32_t num_ss;
3256 struct smd_platform *smd_platform_data;
3257 struct smd_subsystem_config *smd_ss_config_list;
3258 struct smd_subsystem_config *cfg;
3259 int err_ret = 0;
Jeff Hugobdc734d2012-03-26 16:05:39 -06003260 struct smd_smem_regions *smd_smem_areas;
3261 int smem_idx = 0;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303262
3263 smd_platform_data = pdev->dev.platform_data;
3264 num_ss = smd_platform_data->num_ss_configs;
3265 smd_ss_config_list = smd_platform_data->smd_ss_configs;
3266
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06003267 if (smd_platform_data->smd_ssr_config)
3268 disable_smsm_reset_handshake = smd_platform_data->
3269 smd_ssr_config->disable_smsm_reset_handshake;
3270
Jeff Hugobdc734d2012-03-26 16:05:39 -06003271 smd_smem_areas = smd_platform_data->smd_smem_areas;
3272 if (smd_smem_areas) {
3273 num_smem_areas = smd_platform_data->num_smem_areas;
3274 smem_areas = kmalloc(sizeof(struct smem_area) * num_smem_areas,
3275 GFP_KERNEL);
3276 if (!smem_areas) {
3277 pr_err("%s: smem_areas kmalloc failed\n", __func__);
3278 err_ret = -ENOMEM;
3279 goto smem_areas_alloc_fail;
3280 }
3281
3282 for (smem_idx = 0; smem_idx < num_smem_areas; ++smem_idx) {
3283 smem_areas[smem_idx].phys_addr =
3284 smd_smem_areas[smem_idx].phys_addr;
3285 smem_areas[smem_idx].size =
3286 smd_smem_areas[smem_idx].size;
3287 smem_areas[smem_idx].virt_addr = ioremap_nocache(
3288 (unsigned long)(smem_areas[smem_idx].phys_addr),
3289 smem_areas[smem_idx].size);
3290 if (!smem_areas[smem_idx].virt_addr) {
3291 pr_err("%s: ioremap_nocache() of addr:%p"
3292 " size: %x\n", __func__,
3293 smem_areas[smem_idx].phys_addr,
3294 smem_areas[smem_idx].size);
3295 err_ret = -ENOMEM;
3296 ++smem_idx;
3297 goto smem_failed;
3298 }
3299 }
3300 sort(smem_areas, num_smem_areas,
3301 sizeof(struct smem_area),
3302 sort_cmp_func, NULL);
3303 }
3304
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303305 for (i = 0; i < num_ss; i++) {
3306 cfg = &smd_ss_config_list[i];
3307
3308 ret = intr_init(
3309 &private_intr_config[cfg->irq_config_id].smd,
3310 &cfg->smd_int,
3311 pdev
3312 );
3313
3314 if (ret < 0) {
3315 err_ret = ret;
3316 pr_err("smd: register irq failed on %s\n",
3317 cfg->smd_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003318 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303319 }
3320
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003321 /* only init smsm structs if this edge supports smsm */
3322 if (cfg->smsm_int.irq_id)
3323 ret = intr_init(
3324 &private_intr_config[cfg->irq_config_id].smsm,
3325 &cfg->smsm_int,
3326 pdev
3327 );
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303328
3329 if (ret < 0) {
3330 err_ret = ret;
3331 pr_err("smd: register irq failed on %s\n",
3332 cfg->smsm_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003333 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303334 }
Eric Holmberg17992c12012-02-29 12:54:44 -07003335
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003336 if (cfg->subsys_name)
3337 strlcpy(edge_to_pids[cfg->edge].subsys_name,
Eric Holmberg17992c12012-02-29 12:54:44 -07003338 cfg->subsys_name, SMD_MAX_CH_NAME_LEN);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303339 }
3340
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303341
3342 SMD_INFO("smd_core_platform_init() done\n");
3343 return 0;
3344
Jeff Hugobdc734d2012-03-26 16:05:39 -06003345intr_failed:
3346 pr_err("smd: deregistering IRQs\n");
3347 for (i = 0; i < num_ss; ++i) {
3348 cfg = &smd_ss_config_list[i];
3349
3350 if (cfg->smd_int.irq_id >= 0)
3351 free_irq(cfg->smd_int.irq_id,
3352 (void *)cfg->smd_int.dev_id
3353 );
3354 if (cfg->smsm_int.irq_id >= 0)
3355 free_irq(cfg->smsm_int.irq_id,
3356 (void *)cfg->smsm_int.dev_id
3357 );
3358 }
3359smem_failed:
3360 for (smem_idx = smem_idx - 1; smem_idx >= 0; --smem_idx)
3361 iounmap(smem_areas[smem_idx].virt_addr);
3362 kfree(smem_areas);
3363smem_areas_alloc_fail:
3364 return err_ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303365}
3366
Gregory Bean4416e9e2010-07-28 10:22:12 -07003367static int __devinit msm_smd_probe(struct platform_device *pdev)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003368{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303369 int ret;
Daniel Walker0aec66d2010-03-18 12:31:08 -07003370
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303371 SMD_INFO("smd probe\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003372 INIT_WORK(&probe_work, smd_channel_probe_worker);
3373
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003374 channel_close_wq = create_singlethread_workqueue("smd_channel_close");
3375 if (IS_ERR(channel_close_wq)) {
3376 pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
3377 return -ENOMEM;
3378 }
3379
3380 if (smsm_init()) {
3381 pr_err("smsm_init() failed\n");
3382 return -1;
3383 }
3384
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303385 if (pdev) {
3386 if (pdev->dev.of_node) {
3387 pr_err("SMD: Device tree not currently supported\n");
3388 return -ENODEV;
3389 } else if (pdev->dev.platform_data) {
3390 ret = smd_core_platform_init(pdev);
3391 if (ret) {
3392 pr_err(
3393 "SMD: smd_core_platform_init() failed\n");
3394 return -ENODEV;
3395 }
3396 } else {
3397 ret = smd_core_init();
3398 if (ret) {
3399 pr_err("smd_core_init() failed\n");
3400 return -ENODEV;
3401 }
3402 }
3403 } else {
3404 pr_err("SMD: PDEV not found\n");
3405 return -ENODEV;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003406 }
3407
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003408 smd_initialized = 1;
3409
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003410 smd_alloc_loopback_channel();
Eric Holmbergc33d4ab2011-10-24 10:28:25 -06003411 smsm_irq_handler(0, 0);
3412 tasklet_schedule(&smd_fake_irq_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003413
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003414 return 0;
3415}
3416
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003417static int restart_notifier_cb(struct notifier_block *this,
3418 unsigned long code,
3419 void *data);
3420
3421static struct restart_notifier_block restart_notifiers[] = {
Eric Holmbergca7ead22011-12-01 17:21:15 -07003422 {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
3423 {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
3424 {SMD_WCNSS, "riva", .nb.notifier_call = restart_notifier_cb},
3425 {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
Eric Holmberg8b0e74f2012-02-08 09:56:17 -07003426 {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003427};
3428
3429static int restart_notifier_cb(struct notifier_block *this,
3430 unsigned long code,
3431 void *data)
3432{
3433 if (code == SUBSYS_AFTER_SHUTDOWN) {
3434 struct restart_notifier_block *notifier;
3435
3436 notifier = container_of(this,
3437 struct restart_notifier_block, nb);
3438 SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
3439 __func__, notifier->processor,
3440 notifier->name);
3441
3442 smd_channel_reset(notifier->processor);
3443 }
3444
3445 return NOTIFY_DONE;
3446}
3447
3448static __init int modem_restart_late_init(void)
3449{
3450 int i;
3451 void *handle;
3452 struct restart_notifier_block *nb;
3453
3454 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
3455 nb = &restart_notifiers[i];
3456 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
3457 SMD_DBG("%s: registering notif for '%s', handle=%p\n",
3458 __func__, nb->name, handle);
3459 }
3460 return 0;
3461}
3462late_initcall(modem_restart_late_init);
3463
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003464static struct platform_driver msm_smd_driver = {
3465 .probe = msm_smd_probe,
3466 .driver = {
3467 .name = MODULE_NAME,
3468 .owner = THIS_MODULE,
3469 },
3470};
3471
3472static int __init msm_smd_init(void)
3473{
3474 return platform_driver_register(&msm_smd_driver);
3475}
3476
3477module_init(msm_smd_init);
3478
3479MODULE_DESCRIPTION("MSM Shared Memory Core");
3480MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
3481MODULE_LICENSE("GPL");