blob: e82e44b125743ec4ddbfbe2586431c8b68cf1e70 [file] [log] [blame]
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001/* arch/arm/mach-msm/smd.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07004 * Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Brian Swetland2eb44eb2008-09-29 16:00:48 -07005 * Author: Brian Swetland <swetland@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/cdev.h>
22#include <linux/device.h>
23#include <linux/wait.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/list.h>
27#include <linux/slab.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070028#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/io.h>
30#include <linux/termios.h>
31#include <linux/ctype.h>
32#include <linux/remote_spinlock.h>
33#include <linux/uaccess.h>
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070034#include <linux/kfifo.h>
Eric Holmberg59a9f942012-03-19 10:04:22 -060035#include <linux/wakelock.h>
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -060036#include <linux/notifier.h>
Jeff Hugobdc734d2012-03-26 16:05:39 -060037#include <linux/sort.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070038#include <mach/msm_smd.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039#include <mach/msm_iomap.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070040#include <mach/system.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041#include <mach/subsystem_notif.h>
Angshuman Sarkaread67bd2011-09-21 20:13:12 +053042#include <mach/socinfo.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070043#include <mach/proc_comm.h>
Ram Somani8b9589f2012-04-03 12:07:18 +053044#include <asm/cacheflush.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070045
46#include "smd_private.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047#include "modem_notifier.h"
Brian Swetland2eb44eb2008-09-29 16:00:48 -070048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM8X60) \
Jeff Hugo56b933a2011-09-28 14:42:05 -060050 || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX) \
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060051 || defined(CONFIG_ARCH_MSM9615) || defined(CONFIG_ARCH_APQ8064)
Brian Swetland37521a32009-07-01 18:30:47 -070052#define CONFIG_QDSP6 1
53#endif
54
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060055#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) \
56 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057#define CONFIG_DSPS 1
58#endif
59
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060060#if defined(CONFIG_ARCH_MSM8960) \
61 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062#define CONFIG_WCNSS 1
Jeff Hugo6a8057c2011-08-16 13:47:12 -060063#define CONFIG_DSPS_SMSM 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -070065
66#define MODULE_NAME "msm_smd"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067#define SMEM_VERSION 0x000B
68#define SMD_VERSION 0x00020000
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070069#define SMSM_SNAPSHOT_CNT 64
Eric Holmbergda31d042012-03-28 14:01:02 -060070#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72uint32_t SMSM_NUM_ENTRIES = 8;
73uint32_t SMSM_NUM_HOSTS = 3;
Brian Swetland2eb44eb2008-09-29 16:00:48 -070074
Eric Holmberge8a39322012-04-03 15:14:02 -060075/* Legacy SMSM interrupt notifications */
76#define LEGACY_MODEM_SMSM_MASK (SMSM_RESET | SMSM_INIT | SMSM_SMDINIT \
77 | SMSM_RUN | SMSM_SYSTEM_DOWNLOAD)
Brian Swetland2eb44eb2008-09-29 16:00:48 -070078
79enum {
80 MSM_SMD_DEBUG = 1U << 0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081 MSM_SMSM_DEBUG = 1U << 1,
82 MSM_SMD_INFO = 1U << 2,
83 MSM_SMSM_INFO = 1U << 3,
Eric Holmberg98c6c642012-02-24 11:29:35 -070084 MSM_SMx_POWER_INFO = 1U << 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085};
86
87struct smsm_shared_info {
88 uint32_t *state;
89 uint32_t *intr_mask;
90 uint32_t *intr_mux;
91};
92
93static struct smsm_shared_info smsm_info;
Eric Holmberg59a9f942012-03-19 10:04:22 -060094static struct kfifo smsm_snapshot_fifo;
95static struct wake_lock smsm_snapshot_wakelock;
96static int smsm_snapshot_count;
97static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070098
99struct smsm_size_info_type {
100 uint32_t num_hosts;
101 uint32_t num_entries;
102 uint32_t reserved0;
103 uint32_t reserved1;
104};
105
106struct smsm_state_cb_info {
107 struct list_head cb_list;
108 uint32_t mask;
109 void *data;
110 void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
111};
112
113struct smsm_state_info {
114 struct list_head callbacks;
115 uint32_t last_value;
Eric Holmberge8a39322012-04-03 15:14:02 -0600116 uint32_t intr_mask_set;
117 uint32_t intr_mask_clear;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700118};
119
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530120struct interrupt_config_item {
121 /* must be initialized */
122 irqreturn_t (*irq_handler)(int req, void *data);
123 /* outgoing interrupt config (set from platform data) */
124 uint32_t out_bit_pos;
125 void __iomem *out_base;
126 uint32_t out_offset;
Eric Holmbergdeace152012-07-25 12:17:11 -0600127 int irq_id;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530128};
129
130struct interrupt_config {
131 struct interrupt_config_item smd;
132 struct interrupt_config_item smsm;
133};
134
135static irqreturn_t smd_modem_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700136static irqreturn_t smsm_modem_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530137static irqreturn_t smd_dsp_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700138static irqreturn_t smsm_dsp_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530139static irqreturn_t smd_dsps_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700140static irqreturn_t smsm_dsps_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530141static irqreturn_t smd_wcnss_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700142static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600143static irqreturn_t smd_rpm_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530144static irqreturn_t smsm_irq_handler(int irq, void *data);
145
146static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
147 [SMD_MODEM] = {
148 .smd.irq_handler = smd_modem_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700149 .smsm.irq_handler = smsm_modem_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530150 },
151 [SMD_Q6] = {
152 .smd.irq_handler = smd_dsp_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700153 .smsm.irq_handler = smsm_dsp_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530154 },
155 [SMD_DSPS] = {
156 .smd.irq_handler = smd_dsps_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700157 .smsm.irq_handler = smsm_dsps_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530158 },
159 [SMD_WCNSS] = {
160 .smd.irq_handler = smd_wcnss_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700161 .smsm.irq_handler = smsm_wcnss_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530162 },
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600163 [SMD_RPM] = {
164 .smd.irq_handler = smd_rpm_irq_handler,
165 .smsm.irq_handler = NULL, /* does not support smsm */
166 },
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530167};
Jeff Hugobdc734d2012-03-26 16:05:39 -0600168
169struct smem_area {
170 void *phys_addr;
171 unsigned size;
172 void __iomem *virt_addr;
173};
174static uint32_t num_smem_areas;
175static struct smem_area *smem_areas;
176static void *smem_range_check(void *base, unsigned offset);
177
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700178struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530179
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
181#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
182 entry * SMSM_NUM_HOSTS + host)
183#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
184
185/* Internal definitions which are not exported in some targets */
186enum {
187 SMSM_APPS_DEM_I = 3,
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700188};
189
190static int msm_smd_debug_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700191module_param_named(debug_mask, msm_smd_debug_mask,
192 int, S_IRUGO | S_IWUSR | S_IWGRP);
193
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194#if defined(CONFIG_MSM_SMD_DEBUG)
195#define SMD_DBG(x...) do { \
196 if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
197 printk(KERN_DEBUG x); \
198 } while (0)
199
200#define SMSM_DBG(x...) do { \
201 if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
202 printk(KERN_DEBUG x); \
203 } while (0)
204
205#define SMD_INFO(x...) do { \
206 if (msm_smd_debug_mask & MSM_SMD_INFO) \
207 printk(KERN_INFO x); \
208 } while (0)
209
210#define SMSM_INFO(x...) do { \
211 if (msm_smd_debug_mask & MSM_SMSM_INFO) \
212 printk(KERN_INFO x); \
213 } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700214#define SMx_POWER_INFO(x...) do { \
215 if (msm_smd_debug_mask & MSM_SMx_POWER_INFO) \
216 printk(KERN_INFO x); \
217 } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218#else
219#define SMD_DBG(x...) do { } while (0)
220#define SMSM_DBG(x...) do { } while (0)
221#define SMD_INFO(x...) do { } while (0)
222#define SMSM_INFO(x...) do { } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700223#define SMx_POWER_INFO(x...) do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224#endif
225
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700226static unsigned last_heap_free = 0xffffffff;
227
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228static inline void smd_write_intr(unsigned int val,
229 const void __iomem *addr);
230
231#if defined(CONFIG_ARCH_MSM7X30)
232#define MSM_TRIG_A2M_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530233 (smd_write_intr(1 << 0, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700234#define MSM_TRIG_A2Q6_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530235 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236#define MSM_TRIG_A2M_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530237 (smd_write_intr(1 << 5, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238#define MSM_TRIG_A2Q6_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530239 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600241#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700242#define MSM_TRIG_A2WCNSS_SMD_INT
243#define MSM_TRIG_A2WCNSS_SMSM_INT
244#elif defined(CONFIG_ARCH_MSM8X60)
245#define MSM_TRIG_A2M_SMD_INT \
246 (smd_write_intr(1 << 3, MSM_GCC_BASE + 0x8))
247#define MSM_TRIG_A2Q6_SMD_INT \
248 (smd_write_intr(1 << 15, MSM_GCC_BASE + 0x8))
249#define MSM_TRIG_A2M_SMSM_INT \
250 (smd_write_intr(1 << 4, MSM_GCC_BASE + 0x8))
251#define MSM_TRIG_A2Q6_SMSM_INT \
252 (smd_write_intr(1 << 14, MSM_GCC_BASE + 0x8))
253#define MSM_TRIG_A2DSPS_SMD_INT \
254 (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4080))
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600255#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256#define MSM_TRIG_A2WCNSS_SMD_INT
257#define MSM_TRIG_A2WCNSS_SMSM_INT
Jeff Hugo56b933a2011-09-28 14:42:05 -0600258#elif defined(CONFIG_ARCH_MSM9615)
259#define MSM_TRIG_A2M_SMD_INT \
260 (smd_write_intr(1 << 3, MSM_APCS_GCC_BASE + 0x8))
261#define MSM_TRIG_A2Q6_SMD_INT \
262 (smd_write_intr(1 << 15, MSM_APCS_GCC_BASE + 0x8))
263#define MSM_TRIG_A2M_SMSM_INT \
264 (smd_write_intr(1 << 4, MSM_APCS_GCC_BASE + 0x8))
265#define MSM_TRIG_A2Q6_SMSM_INT \
266 (smd_write_intr(1 << 14, MSM_APCS_GCC_BASE + 0x8))
267#define MSM_TRIG_A2DSPS_SMD_INT
268#define MSM_TRIG_A2DSPS_SMSM_INT
269#define MSM_TRIG_A2WCNSS_SMD_INT
270#define MSM_TRIG_A2WCNSS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271#elif defined(CONFIG_ARCH_FSM9XXX)
272#define MSM_TRIG_A2Q6_SMD_INT \
273 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
274#define MSM_TRIG_A2Q6_SMSM_INT \
275 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
276#define MSM_TRIG_A2M_SMD_INT \
277 (smd_write_intr(1 << 0, MSM_GCC_BASE + 0x8))
278#define MSM_TRIG_A2M_SMSM_INT \
279 (smd_write_intr(1 << 5, MSM_GCC_BASE + 0x8))
280#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600281#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700282#define MSM_TRIG_A2WCNSS_SMD_INT
283#define MSM_TRIG_A2WCNSS_SMSM_INT
Eric Holmberg73d45462012-02-28 10:41:31 -0700284#elif defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7x25)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285#define MSM_TRIG_A2M_SMD_INT \
286 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700287#define MSM_TRIG_A2Q6_SMD_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288#define MSM_TRIG_A2M_SMSM_INT \
289 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700290#define MSM_TRIG_A2Q6_SMSM_INT
291#define MSM_TRIG_A2DSPS_SMD_INT
292#define MSM_TRIG_A2DSPS_SMSM_INT
293#define MSM_TRIG_A2WCNSS_SMD_INT
294#define MSM_TRIG_A2WCNSS_SMSM_INT
295#elif defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM7X27A)
296#define MSM_TRIG_A2M_SMD_INT \
297 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
298#define MSM_TRIG_A2Q6_SMD_INT
299#define MSM_TRIG_A2M_SMSM_INT \
300 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
301#define MSM_TRIG_A2Q6_SMSM_INT
302#define MSM_TRIG_A2DSPS_SMD_INT
303#define MSM_TRIG_A2DSPS_SMSM_INT
304#define MSM_TRIG_A2WCNSS_SMD_INT
305#define MSM_TRIG_A2WCNSS_SMSM_INT
306#else /* use platform device / device tree configuration */
307#define MSM_TRIG_A2M_SMD_INT
308#define MSM_TRIG_A2Q6_SMD_INT
309#define MSM_TRIG_A2M_SMSM_INT
310#define MSM_TRIG_A2Q6_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700311#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600312#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700313#define MSM_TRIG_A2WCNSS_SMD_INT
314#define MSM_TRIG_A2WCNSS_SMSM_INT
Brian Swetland37521a32009-07-01 18:30:47 -0700315#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316
Jeff Hugoee40b152012-02-09 17:39:47 -0700317/*
318 * stub out legacy macros if they are not being used so that the legacy
319 * code compiles even though it is not used
320 *
321 * these definitions should not be used in active code and will cause
322 * an early failure
323 */
324#ifndef INT_A9_M2A_0
325#define INT_A9_M2A_0 -1
326#endif
327#ifndef INT_A9_M2A_5
328#define INT_A9_M2A_5 -1
329#endif
330#ifndef INT_ADSP_A11
331#define INT_ADSP_A11 -1
332#endif
333#ifndef INT_ADSP_A11_SMSM
334#define INT_ADSP_A11_SMSM -1
335#endif
336#ifndef INT_DSPS_A11
337#define INT_DSPS_A11 -1
338#endif
339#ifndef INT_DSPS_A11_SMSM
340#define INT_DSPS_A11_SMSM -1
341#endif
342#ifndef INT_WCNSS_A11
343#define INT_WCNSS_A11 -1
344#endif
345#ifndef INT_WCNSS_A11_SMSM
346#define INT_WCNSS_A11_SMSM -1
347#endif
348
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349#define SMD_LOOPBACK_CID 100
350
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600351#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
352static remote_spinlock_t remote_spinlock;
353
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700354static LIST_HEAD(smd_ch_list_loopback);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700355static void smd_fake_irq_handler(unsigned long arg);
Eric Holmbergda31d042012-03-28 14:01:02 -0600356static void smsm_cb_snapshot(uint32_t use_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700357
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -0600358static struct workqueue_struct *smsm_cb_wq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700359static void notify_smsm_cb_clients_worker(struct work_struct *work);
360static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
Eric Holmbergc8002902011-09-16 13:55:57 -0600361static DEFINE_MUTEX(smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362static struct smsm_state_info *smsm_states;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +0530363static int spinlocks_initialized;
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -0600364static RAW_NOTIFIER_HEAD(smsm_driver_state_notifier_list);
365static DEFINE_MUTEX(smsm_driver_state_notifier_lock);
366static void smsm_driver_state_notify(uint32_t state, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367
368static inline void smd_write_intr(unsigned int val,
369 const void __iomem *addr)
370{
371 wmb();
372 __raw_writel(val, addr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700373}
374
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700375#ifdef CONFIG_WCNSS
376static inline void wakeup_v1_riva(void)
377{
378 /*
379 * workaround hack for RIVA v1 hardware bug
380 * trigger GPIO 40 to wake up RIVA from power collaspe
381 * not to be sent to customers
382 */
Jeff Hugoafb8c4a2011-10-27 15:57:27 -0600383 if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) == 1) {
384 __raw_writel(0x0, MSM_TLMM_BASE + 0x1284);
385 __raw_writel(0x2, MSM_TLMM_BASE + 0x1284);
386 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700387 /* end workaround */
388}
389#else
390static inline void wakeup_v1_riva(void) {}
391#endif
392
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700393static inline void notify_modem_smd(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700394{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530395 static const struct interrupt_config_item *intr
396 = &private_intr_config[SMD_MODEM].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700397 if (intr->out_base) {
398 ++interrupt_stats[SMD_MODEM].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530399 smd_write_intr(intr->out_bit_pos,
400 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700401 } else {
402 ++interrupt_stats[SMD_MODEM].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530403 MSM_TRIG_A2M_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700404 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700405}
406
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700407static inline void notify_dsp_smd(void)
408{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530409 static const struct interrupt_config_item *intr
410 = &private_intr_config[SMD_Q6].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700411 if (intr->out_base) {
412 ++interrupt_stats[SMD_Q6].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530413 smd_write_intr(intr->out_bit_pos,
414 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700415 } else {
416 ++interrupt_stats[SMD_Q6].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530417 MSM_TRIG_A2Q6_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700418 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700419}
420
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530421static inline void notify_dsps_smd(void)
422{
423 static const struct interrupt_config_item *intr
424 = &private_intr_config[SMD_DSPS].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700425 if (intr->out_base) {
426 ++interrupt_stats[SMD_DSPS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530427 smd_write_intr(intr->out_bit_pos,
428 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700429 } else {
430 ++interrupt_stats[SMD_DSPS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530431 MSM_TRIG_A2DSPS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700432 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530433}
434
435static inline void notify_wcnss_smd(void)
436{
437 static const struct interrupt_config_item *intr
438 = &private_intr_config[SMD_WCNSS].smd;
439 wakeup_v1_riva();
440
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700441 if (intr->out_base) {
442 ++interrupt_stats[SMD_WCNSS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530443 smd_write_intr(intr->out_bit_pos,
444 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700445 } else {
446 ++interrupt_stats[SMD_WCNSS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530447 MSM_TRIG_A2WCNSS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700448 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530449}
450
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600451static inline void notify_rpm_smd(void)
452{
453 static const struct interrupt_config_item *intr
454 = &private_intr_config[SMD_RPM].smd;
455
456 if (intr->out_base) {
457 ++interrupt_stats[SMD_RPM].smd_out_config_count;
458 smd_write_intr(intr->out_bit_pos,
459 intr->out_base + intr->out_offset);
460 }
461}
462
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530463static inline void notify_modem_smsm(void)
464{
465 static const struct interrupt_config_item *intr
466 = &private_intr_config[SMD_MODEM].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700467 if (intr->out_base) {
468 ++interrupt_stats[SMD_MODEM].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530469 smd_write_intr(intr->out_bit_pos,
470 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700471 } else {
472 ++interrupt_stats[SMD_MODEM].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530473 MSM_TRIG_A2M_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700474 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530475}
476
477static inline void notify_dsp_smsm(void)
478{
479 static const struct interrupt_config_item *intr
480 = &private_intr_config[SMD_Q6].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700481 if (intr->out_base) {
482 ++interrupt_stats[SMD_Q6].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530483 smd_write_intr(intr->out_bit_pos,
484 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700485 } else {
486 ++interrupt_stats[SMD_Q6].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530487 MSM_TRIG_A2Q6_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700488 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530489}
490
491static inline void notify_dsps_smsm(void)
492{
493 static const struct interrupt_config_item *intr
494 = &private_intr_config[SMD_DSPS].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700495 if (intr->out_base) {
496 ++interrupt_stats[SMD_DSPS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530497 smd_write_intr(intr->out_bit_pos,
498 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700499 } else {
500 ++interrupt_stats[SMD_DSPS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530501 MSM_TRIG_A2DSPS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700502 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530503}
504
505static inline void notify_wcnss_smsm(void)
506{
507 static const struct interrupt_config_item *intr
508 = &private_intr_config[SMD_WCNSS].smsm;
509 wakeup_v1_riva();
510
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700511 if (intr->out_base) {
512 ++interrupt_stats[SMD_WCNSS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530513 smd_write_intr(intr->out_bit_pos,
514 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700515 } else {
516 ++interrupt_stats[SMD_WCNSS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530517 MSM_TRIG_A2WCNSS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700518 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530519}
520
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700521static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
522{
523 /* older protocol don't use smsm_intr_mask,
524 but still communicates with modem */
525 if (!smsm_info.intr_mask ||
526 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
527 & notify_mask))
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530528 notify_modem_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529
530 if (smsm_info.intr_mask &&
531 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
532 & notify_mask)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700533 uint32_t mux_val;
534
Eric Holmberg6282c5d2011-10-27 17:30:57 -0600535 if (cpu_is_qsd8x50() && smsm_info.intr_mux) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536 mux_val = __raw_readl(
537 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
538 mux_val++;
539 __raw_writel(mux_val,
540 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
541 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530542 notify_dsp_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700543 }
544
545 if (smsm_info.intr_mask &&
546 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
547 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530548 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700549 }
550
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600551 if (smsm_info.intr_mask &&
552 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
553 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530554 notify_dsps_smsm();
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600555 }
556
Eric Holmbergda31d042012-03-28 14:01:02 -0600557 /*
558 * Notify local SMSM callback clients without wakelock since this
559 * code is used by power management during power-down/-up sequencing
560 * on DEM-based targets. Grabbing a wakelock in this case will
561 * abort the power-down sequencing.
562 */
Eric Holmberg51676a12012-07-10 18:45:23 -0600563 if (smsm_info.intr_mask &&
564 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS))
565 & notify_mask)) {
566 smsm_cb_snapshot(0);
567 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700568}
569
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700570void smd_diag(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700571{
572 char *x;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700573 int size;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700574
575 x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
576 if (x != 0) {
577 x[SZ_DIAG_ERR_MSG - 1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700578 SMD_INFO("smem: DIAG '%s'\n", x);
579 }
580
581 x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size);
582 if (x != 0) {
583 x[size - 1] = 0;
584 pr_err("smem: CRASH LOG\n'%s'\n", x);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700585 }
586}
587
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700588
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700589static void handle_modem_crash(void)
590{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700591 pr_err("MODEM/AMSS has CRASHED\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700592 smd_diag();
593
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700594 /* hard reboot if possible FIXME
595 if (msm_reset_hook)
596 msm_reset_hook();
597 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700598
599 /* in this case the modem or watchdog should reboot us */
600 for (;;)
601 ;
602}
603
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700604int smsm_check_for_modem_crash(void)
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700605{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700606 /* if the modem's not ready yet, we have to hope for the best */
607 if (!smsm_info.state)
608 return 0;
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700609
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700610 if (__raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)) & SMSM_RESET) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700611 handle_modem_crash();
612 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700613 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700614 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700615}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616EXPORT_SYMBOL(smsm_check_for_modem_crash);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700617
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700618/* the spinlock is used to synchronize between the
Brian Swetland03e00cd2009-07-01 17:58:37 -0700619 * irq handler and code that mutates the channel
620 * list or fiddles with channel state
621 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700622static DEFINE_SPINLOCK(smd_lock);
Brian Swetland03e00cd2009-07-01 17:58:37 -0700623DEFINE_SPINLOCK(smem_lock);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700624
625/* the mutex is used during open() and close()
Brian Swetland03e00cd2009-07-01 17:58:37 -0700626 * operations to avoid races while creating or
627 * destroying smd_channel structures
628 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700629static DEFINE_MUTEX(smd_creation_mutex);
630
631static int smd_initialized;
632
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700633struct smd_shared_v1 {
634 struct smd_half_channel ch0;
635 unsigned char data0[SMD_BUF_SIZE];
636 struct smd_half_channel ch1;
637 unsigned char data1[SMD_BUF_SIZE];
638};
639
640struct smd_shared_v2 {
641 struct smd_half_channel ch0;
642 struct smd_half_channel ch1;
643};
644
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600645struct smd_shared_v2_word_access {
646 struct smd_half_channel_word_access ch0;
647 struct smd_half_channel_word_access ch1;
648};
649
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700650struct smd_channel {
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600651 volatile void *send; /* some variant of smd_half_channel */
652 volatile void *recv; /* some variant of smd_half_channel */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700653 unsigned char *send_data;
654 unsigned char *recv_data;
655 unsigned fifo_size;
656 unsigned fifo_mask;
657 struct list_head ch_list;
658
659 unsigned current_packet;
660 unsigned n;
661 void *priv;
662 void (*notify)(void *priv, unsigned flags);
663
664 int (*read)(smd_channel_t *ch, void *data, int len, int user_buf);
665 int (*write)(smd_channel_t *ch, const void *data, int len,
666 int user_buf);
667 int (*read_avail)(smd_channel_t *ch);
668 int (*write_avail)(smd_channel_t *ch);
669 int (*read_from_cb)(smd_channel_t *ch, void *data, int len,
670 int user_buf);
671
672 void (*update_state)(smd_channel_t *ch);
673 unsigned last_state;
674 void (*notify_other_cpu)(void);
675
676 char name[20];
677 struct platform_device pdev;
678 unsigned type;
679
680 int pending_pkt_sz;
681
682 char is_pkt_ch;
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600683
684 /*
685 * private internal functions to access *send and *recv.
686 * never to be exported outside of smd
687 */
688 struct smd_half_channel_access *half_ch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700689};
690
691struct edge_to_pid {
692 uint32_t local_pid;
693 uint32_t remote_pid;
Eric Holmberg17992c12012-02-29 12:54:44 -0700694 char subsys_name[SMD_MAX_CH_NAME_LEN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700695};
696
697/**
698 * Maps edge type to local and remote processor ID's.
699 */
700static struct edge_to_pid edge_to_pids[] = {
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700701 [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
702 [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "q6"},
703 [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
704 [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
705 [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
706 [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
707 [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
708 [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
709 [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
710 [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
711 [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
712 [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
713 [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
714 [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
715 [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600716 [SMD_APPS_RPM] = {SMD_APPS, SMD_RPM},
717 [SMD_MODEM_RPM] = {SMD_MODEM, SMD_RPM},
718 [SMD_QDSP_RPM] = {SMD_Q6, SMD_RPM},
719 [SMD_WCNSS_RPM] = {SMD_WCNSS, SMD_RPM},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700720};
721
722struct restart_notifier_block {
723 unsigned processor;
724 char *name;
725 struct notifier_block nb;
726};
727
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -0600728static int disable_smsm_reset_handshake;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700729static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
730
731static LIST_HEAD(smd_ch_closed_list);
732static LIST_HEAD(smd_ch_closing_list);
733static LIST_HEAD(smd_ch_to_close_list);
734static LIST_HEAD(smd_ch_list_modem);
735static LIST_HEAD(smd_ch_list_dsp);
736static LIST_HEAD(smd_ch_list_dsps);
737static LIST_HEAD(smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600738static LIST_HEAD(smd_ch_list_rpm);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700739
740static unsigned char smd_ch_allocated[64];
741static struct work_struct probe_work;
742
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700743static void finalize_channel_close_fn(struct work_struct *work);
744static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
745static struct workqueue_struct *channel_close_wq;
746
747static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm);
748
749/* on smp systems, the probe might get called from multiple cores,
750 hence use a lock */
751static DEFINE_MUTEX(smd_probe_lock);
752
753static void smd_channel_probe_worker(struct work_struct *work)
754{
755 struct smd_alloc_elm *shared;
756 unsigned n;
757 uint32_t type;
758
759 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
760
761 if (!shared) {
762 pr_err("%s: allocation table not initialized\n", __func__);
763 return;
764 }
765
766 mutex_lock(&smd_probe_lock);
767 for (n = 0; n < 64; n++) {
768 if (smd_ch_allocated[n])
769 continue;
770
771 /* channel should be allocated only if APPS
772 processor is involved */
773 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600774 if (type >= ARRAY_SIZE(edge_to_pids) ||
775 edge_to_pids[type].local_pid != SMD_APPS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700776 continue;
777 if (!shared[n].ref_count)
778 continue;
779 if (!shared[n].name[0])
780 continue;
781
782 if (!smd_alloc_channel(&shared[n]))
783 smd_ch_allocated[n] = 1;
784 else
785 SMD_INFO("Probe skipping ch %d, not allocated\n", n);
786 }
787 mutex_unlock(&smd_probe_lock);
788}
789
790/**
791 * Lookup processor ID and determine if it belongs to the proved edge
792 * type.
793 *
794 * @shared2: Pointer to v2 shared channel structure
795 * @type: Edge type
796 * @pid: Processor ID of processor on edge
797 * @local_ch: Channel that belongs to processor @pid
798 * @remote_ch: Other side of edge contained @pid
799 *
800 * Returns 0 for not on edge, 1 for found on edge
801 */
802static int pid_is_on_edge(struct smd_shared_v2 *shared2,
803 uint32_t type, uint32_t pid,
804 struct smd_half_channel **local_ch,
805 struct smd_half_channel **remote_ch
806 )
807{
808 int ret = 0;
809 struct edge_to_pid *edge;
810
811 *local_ch = 0;
812 *remote_ch = 0;
813
814 if (!shared2 || (type >= ARRAY_SIZE(edge_to_pids)))
815 return 0;
816
817 edge = &edge_to_pids[type];
818 if (edge->local_pid != edge->remote_pid) {
819 if (pid == edge->local_pid) {
820 *local_ch = &shared2->ch0;
821 *remote_ch = &shared2->ch1;
822 ret = 1;
823 } else if (pid == edge->remote_pid) {
824 *local_ch = &shared2->ch1;
825 *remote_ch = &shared2->ch0;
826 ret = 1;
827 }
828 }
829
830 return ret;
831}
832
Eric Holmberg17992c12012-02-29 12:54:44 -0700833/*
834 * Returns a pointer to the subsystem name or NULL if no
835 * subsystem name is available.
836 *
837 * @type - Edge definition
838 */
839const char *smd_edge_to_subsystem(uint32_t type)
840{
841 const char *subsys = NULL;
842
843 if (type < ARRAY_SIZE(edge_to_pids)) {
844 subsys = edge_to_pids[type].subsys_name;
845 if (subsys[0] == 0x0)
846 subsys = NULL;
847 }
848 return subsys;
849}
850EXPORT_SYMBOL(smd_edge_to_subsystem);
851
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700852/*
853 * Returns a pointer to the subsystem name given the
854 * remote processor ID.
855 *
856 * @pid Remote processor ID
857 * @returns Pointer to subsystem name or NULL if not found
858 */
859const char *smd_pid_to_subsystem(uint32_t pid)
860{
861 const char *subsys = NULL;
862 int i;
863
864 for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
865 if (pid == edge_to_pids[i].remote_pid &&
866 edge_to_pids[i].subsys_name[0] != 0x0
867 ) {
868 subsys = edge_to_pids[i].subsys_name;
869 break;
870 }
871 }
872
873 return subsys;
874}
875EXPORT_SYMBOL(smd_pid_to_subsystem);
Eric Holmberg17992c12012-02-29 12:54:44 -0700876
Eric Holmberg2a563c32011-10-05 14:51:43 -0600877static void smd_reset_edge(struct smd_half_channel *ch, unsigned new_state)
878{
879 if (ch->state != SMD_SS_CLOSED) {
880 ch->state = new_state;
881 ch->fDSR = 0;
882 ch->fCTS = 0;
883 ch->fCD = 0;
884 ch->fSTATE = 1;
885 }
886}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700887
888static void smd_channel_reset_state(struct smd_alloc_elm *shared,
889 unsigned new_state, unsigned pid)
890{
891 unsigned n;
892 struct smd_shared_v2 *shared2;
893 uint32_t type;
894 struct smd_half_channel *local_ch;
895 struct smd_half_channel *remote_ch;
896
897 for (n = 0; n < SMD_CHANNELS; n++) {
898 if (!shared[n].ref_count)
899 continue;
900 if (!shared[n].name[0])
901 continue;
902
903 type = SMD_CHANNEL_TYPE(shared[n].type);
904 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n, sizeof(*shared2));
905 if (!shared2)
906 continue;
907
Eric Holmberg2a563c32011-10-05 14:51:43 -0600908 if (pid_is_on_edge(shared2, type, pid, &local_ch, &remote_ch))
909 smd_reset_edge(local_ch, new_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700910
Eric Holmberg2a563c32011-10-05 14:51:43 -0600911 /*
912 * ModemFW is in the same subsystem as ModemSW, but has
913 * separate SMD edges that need to be reset.
914 */
915 if (pid == SMSM_MODEM &&
916 pid_is_on_edge(shared2, type, SMD_MODEM_Q6_FW,
917 &local_ch, &remote_ch))
918 smd_reset_edge(local_ch, new_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700919 }
920}
921
922
923void smd_channel_reset(uint32_t restart_pid)
924{
925 struct smd_alloc_elm *shared;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700926 unsigned long flags;
927
928 SMD_DBG("%s: starting reset\n", __func__);
929 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
930 if (!shared) {
931 pr_err("%s: allocation table not initialized\n", __func__);
932 return;
933 }
934
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600935 /* release any held spinlocks */
936 remote_spin_release(&remote_spinlock, restart_pid);
937 remote_spin_release_all(restart_pid);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700938
939 /* reset SMSM entry */
940 if (smsm_info.state) {
941 writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
942
Eric Holmberg351a63c2011-12-02 17:49:43 -0700943 /* restart SMSM init handshake */
944 if (restart_pid == SMSM_MODEM) {
945 smsm_change_state(SMSM_APPS_STATE,
Eric Holmberg6b2f80e2012-01-09 12:22:52 -0700946 SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
947 0);
Eric Holmberg351a63c2011-12-02 17:49:43 -0700948 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700949
950 /* notify SMSM processors */
951 smsm_irq_handler(0, 0);
Eric Holmbergb7726442012-03-01 15:31:56 -0700952 notify_modem_smsm();
953 notify_dsp_smsm();
954 notify_dsps_smsm();
955 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700956 }
957
958 /* change all remote states to CLOSING */
959 mutex_lock(&smd_probe_lock);
960 spin_lock_irqsave(&smd_lock, flags);
961 smd_channel_reset_state(shared, SMD_SS_CLOSING, restart_pid);
962 spin_unlock_irqrestore(&smd_lock, flags);
963 mutex_unlock(&smd_probe_lock);
964
965 /* notify SMD processors */
966 mb();
967 smd_fake_irq_handler(0);
968 notify_modem_smd();
969 notify_dsp_smd();
970 notify_dsps_smd();
971 notify_wcnss_smd();
972
973 /* change all remote states to CLOSED */
974 mutex_lock(&smd_probe_lock);
975 spin_lock_irqsave(&smd_lock, flags);
976 smd_channel_reset_state(shared, SMD_SS_CLOSED, restart_pid);
977 spin_unlock_irqrestore(&smd_lock, flags);
978 mutex_unlock(&smd_probe_lock);
979
980 /* notify SMD processors */
981 mb();
982 smd_fake_irq_handler(0);
983 notify_modem_smd();
984 notify_dsp_smd();
985 notify_dsps_smd();
986 notify_wcnss_smd();
987
988 SMD_DBG("%s: finished reset\n", __func__);
989}
990
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700991/* how many bytes are available for reading */
992static int smd_stream_read_avail(struct smd_channel *ch)
993{
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600994 return (ch->half_ch->get_head(ch->recv) -
995 ch->half_ch->get_tail(ch->recv)) & ch->fifo_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700996}
997
998/* how many bytes we are free to write */
999static int smd_stream_write_avail(struct smd_channel *ch)
1000{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001001 return ch->fifo_mask - ((ch->half_ch->get_head(ch->send) -
1002 ch->half_ch->get_tail(ch->send)) & ch->fifo_mask);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001003}
1004
1005static int smd_packet_read_avail(struct smd_channel *ch)
1006{
1007 if (ch->current_packet) {
1008 int n = smd_stream_read_avail(ch);
1009 if (n > ch->current_packet)
1010 n = ch->current_packet;
1011 return n;
1012 } else {
1013 return 0;
1014 }
1015}
1016
1017static int smd_packet_write_avail(struct smd_channel *ch)
1018{
1019 int n = smd_stream_write_avail(ch);
1020 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
1021}
1022
1023static int ch_is_open(struct smd_channel *ch)
1024{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001025 return (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED ||
1026 ch->half_ch->get_state(ch->recv) == SMD_SS_FLUSHING)
1027 && (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001028}
1029
1030/* provide a pointer and length to readable data in the fifo */
1031static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
1032{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001033 unsigned head = ch->half_ch->get_head(ch->recv);
1034 unsigned tail = ch->half_ch->get_tail(ch->recv);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001035 *ptr = (void *) (ch->recv_data + tail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001036
1037 if (tail <= head)
1038 return head - tail;
1039 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001040 return ch->fifo_size - tail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001041}
1042
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001043static int read_intr_blocked(struct smd_channel *ch)
1044{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001045 return ch->half_ch->get_fBLOCKREADINTR(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001046}
1047
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001048/* advance the fifo read pointer after data from ch_read_buffer is consumed */
1049static void ch_read_done(struct smd_channel *ch, unsigned count)
1050{
1051 BUG_ON(count > smd_stream_read_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001052 ch->half_ch->set_tail(ch->recv,
1053 (ch->half_ch->get_tail(ch->recv) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001054 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001055 ch->half_ch->set_fTAIL(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001056}
1057
1058/* basic read interface to ch_read_{buffer,done} used
Brian Swetland03e00cd2009-07-01 17:58:37 -07001059 * by smd_*_read() and update_packet_state()
1060 * will read-and-discard if the _data pointer is null
1061 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001062static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001063{
1064 void *ptr;
1065 unsigned n;
1066 unsigned char *data = _data;
1067 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001068 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001069
1070 while (len > 0) {
1071 n = ch_read_buffer(ch, &ptr);
1072 if (n == 0)
1073 break;
1074
1075 if (n > len)
1076 n = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001077 if (_data) {
1078 if (user_buf) {
1079 r = copy_to_user(data, ptr, n);
1080 if (r > 0) {
1081 pr_err("%s: "
1082 "copy_to_user could not copy "
1083 "%i bytes.\n",
1084 __func__,
1085 r);
1086 }
1087 } else
1088 memcpy(data, ptr, n);
1089 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001090
1091 data += n;
1092 len -= n;
1093 ch_read_done(ch, n);
1094 }
1095
1096 return orig_len - len;
1097}
1098
1099static void update_stream_state(struct smd_channel *ch)
1100{
1101 /* streams have no special state requiring updating */
1102}
1103
1104static void update_packet_state(struct smd_channel *ch)
1105{
1106 unsigned hdr[5];
1107 int r;
1108
1109 /* can't do anything if we're in the middle of a packet */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001110 while (ch->current_packet == 0) {
1111 /* discard 0 length packets if any */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001112
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001113 /* don't bother unless we can get the full header */
1114 if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
1115 return;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001116
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001117 r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
1118 BUG_ON(r != SMD_HEADER_SIZE);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001119
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001120 ch->current_packet = hdr[0];
1121 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001122}
1123
1124/* provide a pointer and length to next free space in the fifo */
1125static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
1126{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001127 unsigned head = ch->half_ch->get_head(ch->send);
1128 unsigned tail = ch->half_ch->get_tail(ch->send);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001129 *ptr = (void *) (ch->send_data + head);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001130
1131 if (head < tail) {
1132 return tail - head - 1;
1133 } else {
1134 if (tail == 0)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001135 return ch->fifo_size - head - 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001136 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001137 return ch->fifo_size - head;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001138 }
1139}
1140
1141/* advace the fifo write pointer after freespace
1142 * from ch_write_buffer is filled
1143 */
1144static void ch_write_done(struct smd_channel *ch, unsigned count)
1145{
1146 BUG_ON(count > smd_stream_write_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001147 ch->half_ch->set_head(ch->send,
1148 (ch->half_ch->get_head(ch->send) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001149 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001150 ch->half_ch->set_fHEAD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001151}
1152
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001153static void ch_set_state(struct smd_channel *ch, unsigned n)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001154{
1155 if (n == SMD_SS_OPENED) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001156 ch->half_ch->set_fDSR(ch->send, 1);
1157 ch->half_ch->set_fCTS(ch->send, 1);
1158 ch->half_ch->set_fCD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001159 } else {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001160 ch->half_ch->set_fDSR(ch->send, 0);
1161 ch->half_ch->set_fCTS(ch->send, 0);
1162 ch->half_ch->set_fCD(ch->send, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001163 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001164 ch->half_ch->set_state(ch->send, n);
1165 ch->half_ch->set_fSTATE(ch->send, 1);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001166 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001167}
1168
1169static void do_smd_probe(void)
1170{
1171 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
1172 if (shared->heap_info.free_offset != last_heap_free) {
1173 last_heap_free = shared->heap_info.free_offset;
1174 schedule_work(&probe_work);
1175 }
1176}
1177
1178static void smd_state_change(struct smd_channel *ch,
1179 unsigned last, unsigned next)
1180{
1181 ch->last_state = next;
1182
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001183 SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001184
1185 switch (next) {
1186 case SMD_SS_OPENING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001187 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSING ||
1188 ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
1189 ch->half_ch->set_tail(ch->recv, 0);
1190 ch->half_ch->set_head(ch->send, 0);
1191 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001192 ch_set_state(ch, SMD_SS_OPENING);
1193 }
1194 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001195 case SMD_SS_OPENED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001196 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENING) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001197 ch_set_state(ch, SMD_SS_OPENED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001198 ch->notify(ch->priv, SMD_EVENT_OPEN);
1199 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001200 break;
1201 case SMD_SS_FLUSHING:
1202 case SMD_SS_RESET:
1203 /* we should force them to close? */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001204 break;
1205 case SMD_SS_CLOSED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001206 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001207 ch_set_state(ch, SMD_SS_CLOSING);
1208 ch->current_packet = 0;
Eric Holmbergad4fa8d2011-11-11 16:55:13 -07001209 ch->pending_pkt_sz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001210 ch->notify(ch->priv, SMD_EVENT_CLOSE);
1211 }
1212 break;
1213 case SMD_SS_CLOSING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001214 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001215 list_move(&ch->ch_list,
1216 &smd_ch_to_close_list);
1217 queue_work(channel_close_wq,
1218 &finalize_channel_close_work);
1219 }
1220 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001221 }
1222}
1223
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001224static void handle_smd_irq_closing_list(void)
1225{
1226 unsigned long flags;
1227 struct smd_channel *ch;
1228 struct smd_channel *index;
1229 unsigned tmp;
1230
1231 spin_lock_irqsave(&smd_lock, flags);
1232 list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001233 if (ch->half_ch->get_fSTATE(ch->recv))
1234 ch->half_ch->set_fSTATE(ch->recv, 0);
1235 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001236 if (tmp != ch->last_state)
1237 smd_state_change(ch, ch->last_state, tmp);
1238 }
1239 spin_unlock_irqrestore(&smd_lock, flags);
1240}
1241
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001242static void handle_smd_irq(struct list_head *list, void (*notify)(void))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001243{
1244 unsigned long flags;
1245 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001246 unsigned ch_flags;
1247 unsigned tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001248 unsigned char state_change;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001249
1250 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001251 list_for_each_entry(ch, list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001252 state_change = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001253 ch_flags = 0;
1254 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001255 if (ch->half_ch->get_fHEAD(ch->recv)) {
1256 ch->half_ch->set_fHEAD(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001257 ch_flags |= 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001258 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001259 if (ch->half_ch->get_fTAIL(ch->recv)) {
1260 ch->half_ch->set_fTAIL(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001261 ch_flags |= 2;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001262 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001263 if (ch->half_ch->get_fSTATE(ch->recv)) {
1264 ch->half_ch->set_fSTATE(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001265 ch_flags |= 4;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001266 }
1267 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001268 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001269 if (tmp != ch->last_state) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07001270 SMx_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
1271 ch->n, ch->name, ch->last_state, tmp);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001272 smd_state_change(ch, ch->last_state, tmp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001273 state_change = 1;
1274 }
Eric Holmberg65a7d432012-02-24 11:28:56 -07001275 if (ch_flags & 0x3) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001276 ch->update_state(ch);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001277 SMx_POWER_INFO("SMD ch%d '%s' Data event r%d/w%d\n",
1278 ch->n, ch->name,
1279 ch->read_avail(ch),
1280 ch->fifo_size - ch->write_avail(ch));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001281 ch->notify(ch->priv, SMD_EVENT_DATA);
1282 }
Eric Holmberg98c6c642012-02-24 11:29:35 -07001283 if (ch_flags & 0x4 && !state_change) {
1284 SMx_POWER_INFO("SMD ch%d '%s' State update\n",
1285 ch->n, ch->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001286 ch->notify(ch->priv, SMD_EVENT_STATUS);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001287 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001288 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001289 spin_unlock_irqrestore(&smd_lock, flags);
1290 do_smd_probe();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001291}
1292
Brian Swetland37521a32009-07-01 18:30:47 -07001293static irqreturn_t smd_modem_irq_handler(int irq, void *data)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001294{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001295 SMx_POWER_INFO("SMD Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001296 ++interrupt_stats[SMD_MODEM].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001297 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001298 handle_smd_irq_closing_list();
Brian Swetland37521a32009-07-01 18:30:47 -07001299 return IRQ_HANDLED;
1300}
1301
1302static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
1303{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001304 SMx_POWER_INFO("SMD Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001305 ++interrupt_stats[SMD_Q6].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001306 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001307 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001308 return IRQ_HANDLED;
1309}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001310
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001311static irqreturn_t smd_dsps_irq_handler(int irq, void *data)
1312{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001313 SMx_POWER_INFO("SMD Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001314 ++interrupt_stats[SMD_DSPS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001315 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1316 handle_smd_irq_closing_list();
1317 return IRQ_HANDLED;
1318}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001319
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001320static irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
1321{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001322 SMx_POWER_INFO("SMD Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001323 ++interrupt_stats[SMD_WCNSS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001324 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1325 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001326 return IRQ_HANDLED;
1327}
1328
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001329static irqreturn_t smd_rpm_irq_handler(int irq, void *data)
1330{
1331 SMx_POWER_INFO("SMD Int RPM->Apps\n");
1332 ++interrupt_stats[SMD_RPM].smd_in_count;
1333 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
1334 handle_smd_irq_closing_list();
1335 return IRQ_HANDLED;
1336}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001337
1338static void smd_fake_irq_handler(unsigned long arg)
1339{
Brian Swetland37521a32009-07-01 18:30:47 -07001340 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
1341 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001342 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1343 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001344 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001345 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001346}
1347
1348static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
1349
Brian Swetland37521a32009-07-01 18:30:47 -07001350static inline int smd_need_int(struct smd_channel *ch)
1351{
1352 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001353 if (ch->half_ch->get_fHEAD(ch->recv) ||
1354 ch->half_ch->get_fTAIL(ch->recv) ||
1355 ch->half_ch->get_fSTATE(ch->recv))
Brian Swetland37521a32009-07-01 18:30:47 -07001356 return 1;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001357 if (ch->half_ch->get_state(ch->recv) != ch->last_state)
Brian Swetland37521a32009-07-01 18:30:47 -07001358 return 1;
1359 }
1360 return 0;
1361}
1362
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001363void smd_sleep_exit(void)
1364{
1365 unsigned long flags;
1366 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001367 int need_int = 0;
1368
1369 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland37521a32009-07-01 18:30:47 -07001370 list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
1371 if (smd_need_int(ch)) {
1372 need_int = 1;
1373 break;
1374 }
1375 }
1376 list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
1377 if (smd_need_int(ch)) {
1378 need_int = 1;
1379 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001380 }
1381 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001382 list_for_each_entry(ch, &smd_ch_list_dsps, ch_list) {
1383 if (smd_need_int(ch)) {
1384 need_int = 1;
1385 break;
1386 }
1387 }
1388 list_for_each_entry(ch, &smd_ch_list_wcnss, ch_list) {
1389 if (smd_need_int(ch)) {
1390 need_int = 1;
1391 break;
1392 }
1393 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001394 spin_unlock_irqrestore(&smd_lock, flags);
1395 do_smd_probe();
Brian Swetland37521a32009-07-01 18:30:47 -07001396
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001397 if (need_int) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001398 SMD_DBG("smd_sleep_exit need interrupt\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001399 tasklet_schedule(&smd_fake_irq_tasklet);
1400 }
1401}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001402EXPORT_SYMBOL(smd_sleep_exit);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001403
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001404static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001405{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001406 if (SMD_XFER_TYPE(alloc_elm->type) == 1)
1407 return 0;
1408 else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001409 return 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001410
1411 /* for cases where xfer type is 0 */
1412 if (!strncmp(alloc_elm->name, "DAL", 3))
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001413 return 0;
1414
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001415 /* for cases where xfer type is 0 */
1416 if (!strncmp(alloc_elm->name, "RPCCALL_QDSP", 12))
1417 return 0;
1418
1419 if (alloc_elm->cid > 4 || alloc_elm->cid == 1)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001420 return 1;
1421 else
1422 return 0;
1423}
1424
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001425static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
1426 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001427{
1428 void *ptr;
1429 const unsigned char *buf = _data;
1430 unsigned xfer;
1431 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001432 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001433
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001434 SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001435 if (len < 0)
1436 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001437 else if (len == 0)
1438 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001439
1440 while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
Eric Holmberg7a717872012-02-03 11:58:04 -07001441 if (!ch_is_open(ch)) {
1442 len = orig_len;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001443 break;
Eric Holmberg7a717872012-02-03 11:58:04 -07001444 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001445 if (xfer > len)
1446 xfer = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001447 if (user_buf) {
1448 r = copy_from_user(ptr, buf, xfer);
1449 if (r > 0) {
1450 pr_err("%s: "
1451 "copy_from_user could not copy %i "
1452 "bytes.\n",
1453 __func__,
1454 r);
1455 }
1456 } else
1457 memcpy(ptr, buf, xfer);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001458 ch_write_done(ch, xfer);
1459 len -= xfer;
1460 buf += xfer;
1461 if (len == 0)
1462 break;
1463 }
1464
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001465 if (orig_len - len)
1466 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001467
1468 return orig_len - len;
1469}
1470
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001471static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
1472 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001473{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001474 int ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001475 unsigned hdr[5];
1476
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001477 SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001478 if (len < 0)
1479 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001480 else if (len == 0)
1481 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001482
1483 if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
1484 return -ENOMEM;
1485
1486 hdr[0] = len;
1487 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1488
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001489
1490 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1491 if (ret < 0 || ret != sizeof(hdr)) {
1492 SMD_DBG("%s failed to write pkt header: "
1493 "%d returned\n", __func__, ret);
1494 return -1;
1495 }
1496
1497
1498 ret = smd_stream_write(ch, _data, len, user_buf);
1499 if (ret < 0 || ret != len) {
1500 SMD_DBG("%s failed to write pkt data: "
1501 "%d returned\n", __func__, ret);
1502 return ret;
1503 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001504
1505 return len;
1506}
1507
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001508static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001509{
1510 int r;
1511
1512 if (len < 0)
1513 return -EINVAL;
1514
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001515 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001516 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001517 if (!read_intr_blocked(ch))
1518 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001519
1520 return r;
1521}
1522
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001523static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001524{
1525 unsigned long flags;
1526 int r;
1527
1528 if (len < 0)
1529 return -EINVAL;
1530
1531 if (len > ch->current_packet)
1532 len = ch->current_packet;
1533
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001534 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001535 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001536 if (!read_intr_blocked(ch))
1537 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001538
1539 spin_lock_irqsave(&smd_lock, flags);
1540 ch->current_packet -= r;
1541 update_packet_state(ch);
1542 spin_unlock_irqrestore(&smd_lock, flags);
1543
1544 return r;
1545}
1546
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001547static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
1548 int user_buf)
1549{
1550 int r;
1551
1552 if (len < 0)
1553 return -EINVAL;
1554
1555 if (len > ch->current_packet)
1556 len = ch->current_packet;
1557
1558 r = ch_read(ch, data, len, user_buf);
1559 if (r > 0)
1560 if (!read_intr_blocked(ch))
1561 ch->notify_other_cpu();
1562
1563 ch->current_packet -= r;
1564 update_packet_state(ch);
1565
1566 return r;
1567}
1568
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301569#if (defined(CONFIG_MSM_SMD_PKG4) || defined(CONFIG_MSM_SMD_PKG3))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001570static int smd_alloc_v2(struct smd_channel *ch)
1571{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001572 void *buffer;
1573 unsigned buffer_sz;
1574
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001575 if (is_word_access_ch(ch->type)) {
1576 struct smd_shared_v2_word_access *shared2;
1577 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1578 sizeof(*shared2));
1579 if (!shared2) {
1580 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1581 return -EINVAL;
1582 }
1583 ch->send = &shared2->ch0;
1584 ch->recv = &shared2->ch1;
1585 } else {
1586 struct smd_shared_v2 *shared2;
1587 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1588 sizeof(*shared2));
1589 if (!shared2) {
1590 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1591 return -EINVAL;
1592 }
1593 ch->send = &shared2->ch0;
1594 ch->recv = &shared2->ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001595 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001596 ch->half_ch = get_half_ch_funcs(ch->type);
1597
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001598 buffer = smem_get_entry(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
1599 if (!buffer) {
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301600 SMD_INFO("smem_get_entry failed\n");
1601 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001602 }
1603
1604 /* buffer must be a power-of-two size */
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301605 if (buffer_sz & (buffer_sz - 1)) {
1606 SMD_INFO("Buffer size: %u not power of two\n", buffer_sz);
1607 return -EINVAL;
1608 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001609 buffer_sz /= 2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001610 ch->send_data = buffer;
1611 ch->recv_data = buffer + buffer_sz;
1612 ch->fifo_size = buffer_sz;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001613
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001614 return 0;
1615}
1616
1617static int smd_alloc_v1(struct smd_channel *ch)
1618{
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301619 return -EINVAL;
1620}
1621
1622#else /* define v1 for older targets */
1623static int smd_alloc_v2(struct smd_channel *ch)
1624{
1625 return -EINVAL;
1626}
1627
1628static int smd_alloc_v1(struct smd_channel *ch)
1629{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001630 struct smd_shared_v1 *shared1;
1631 shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
1632 if (!shared1) {
1633 pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301634 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001635 }
1636 ch->send = &shared1->ch0;
1637 ch->recv = &shared1->ch1;
1638 ch->send_data = shared1->data0;
1639 ch->recv_data = shared1->data1;
1640 ch->fifo_size = SMD_BUF_SIZE;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001641 ch->half_ch = get_half_ch_funcs(ch->type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001642 return 0;
1643}
1644
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301645#endif
1646
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001647static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001648{
1649 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001650
1651 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1652 if (ch == 0) {
1653 pr_err("smd_alloc_channel() out of memory\n");
Brian Swetland34f719b2009-10-30 16:22:05 -07001654 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001655 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001656 ch->n = alloc_elm->cid;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001657 ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001658
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001659 if (smd_alloc_v2(ch) && smd_alloc_v1(ch)) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001660 kfree(ch);
Brian Swetland34f719b2009-10-30 16:22:05 -07001661 return -1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001662 }
1663
1664 ch->fifo_mask = ch->fifo_size - 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001665
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001666 /* probe_worker guarentees ch->type will be a valid type */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001667 if (ch->type == SMD_APPS_MODEM)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001668 ch->notify_other_cpu = notify_modem_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001669 else if (ch->type == SMD_APPS_QDSP)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001670 ch->notify_other_cpu = notify_dsp_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001671 else if (ch->type == SMD_APPS_DSPS)
1672 ch->notify_other_cpu = notify_dsps_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001673 else if (ch->type == SMD_APPS_WCNSS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001674 ch->notify_other_cpu = notify_wcnss_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001675 else if (ch->type == SMD_APPS_RPM)
1676 ch->notify_other_cpu = notify_rpm_smd;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001677
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001678 if (smd_is_packet(alloc_elm)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001679 ch->read = smd_packet_read;
1680 ch->write = smd_packet_write;
1681 ch->read_avail = smd_packet_read_avail;
1682 ch->write_avail = smd_packet_write_avail;
1683 ch->update_state = update_packet_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001684 ch->read_from_cb = smd_packet_read_from_cb;
1685 ch->is_pkt_ch = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001686 } else {
1687 ch->read = smd_stream_read;
1688 ch->write = smd_stream_write;
1689 ch->read_avail = smd_stream_read_avail;
1690 ch->write_avail = smd_stream_write_avail;
1691 ch->update_state = update_stream_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001692 ch->read_from_cb = smd_stream_read;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001693 }
1694
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001695 memcpy(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN);
1696 ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001697
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001698 ch->pdev.name = ch->name;
1699 ch->pdev.id = ch->type;
1700
1701 SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
1702 ch->name, ch->n);
1703
1704 mutex_lock(&smd_creation_mutex);
1705 list_add(&ch->ch_list, &smd_ch_closed_list);
1706 mutex_unlock(&smd_creation_mutex);
1707
1708 platform_device_register(&ch->pdev);
1709 if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
1710 /* create a platform driver to be used by smd_tty driver
1711 * so that it can access the loopback port
1712 */
1713 loopback_tty_pdev.id = ch->type;
1714 platform_device_register(&loopback_tty_pdev);
1715 }
1716 return 0;
1717}
1718
1719static inline void notify_loopback_smd(void)
1720{
1721 unsigned long flags;
1722 struct smd_channel *ch;
1723
1724 spin_lock_irqsave(&smd_lock, flags);
1725 list_for_each_entry(ch, &smd_ch_list_loopback, ch_list) {
1726 ch->notify(ch->priv, SMD_EVENT_DATA);
1727 }
1728 spin_unlock_irqrestore(&smd_lock, flags);
1729}
1730
1731static int smd_alloc_loopback_channel(void)
1732{
1733 static struct smd_half_channel smd_loopback_ctl;
1734 static char smd_loopback_data[SMD_BUF_SIZE];
1735 struct smd_channel *ch;
1736
1737 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1738 if (ch == 0) {
1739 pr_err("%s: out of memory\n", __func__);
1740 return -1;
1741 }
1742 ch->n = SMD_LOOPBACK_CID;
1743
1744 ch->send = &smd_loopback_ctl;
1745 ch->recv = &smd_loopback_ctl;
1746 ch->send_data = smd_loopback_data;
1747 ch->recv_data = smd_loopback_data;
1748 ch->fifo_size = SMD_BUF_SIZE;
1749
1750 ch->fifo_mask = ch->fifo_size - 1;
1751 ch->type = SMD_LOOPBACK_TYPE;
1752 ch->notify_other_cpu = notify_loopback_smd;
1753
1754 ch->read = smd_stream_read;
1755 ch->write = smd_stream_write;
1756 ch->read_avail = smd_stream_read_avail;
1757 ch->write_avail = smd_stream_write_avail;
1758 ch->update_state = update_stream_state;
1759 ch->read_from_cb = smd_stream_read;
1760
1761 memset(ch->name, 0, 20);
1762 memcpy(ch->name, "local_loopback", 14);
1763
1764 ch->pdev.name = ch->name;
1765 ch->pdev.id = ch->type;
1766
1767 SMD_INFO("%s: '%s' cid=%d\n", __func__, ch->name, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001768
1769 mutex_lock(&smd_creation_mutex);
1770 list_add(&ch->ch_list, &smd_ch_closed_list);
1771 mutex_unlock(&smd_creation_mutex);
1772
1773 platform_device_register(&ch->pdev);
Brian Swetland34f719b2009-10-30 16:22:05 -07001774 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001775}
1776
1777static void do_nothing_notify(void *priv, unsigned flags)
1778{
1779}
1780
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001781static void finalize_channel_close_fn(struct work_struct *work)
1782{
1783 unsigned long flags;
1784 struct smd_channel *ch;
1785 struct smd_channel *index;
1786
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001787 mutex_lock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001788 spin_lock_irqsave(&smd_lock, flags);
1789 list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
1790 list_del(&ch->ch_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001791 list_add(&ch->ch_list, &smd_ch_closed_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001792 ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
1793 ch->notify = do_nothing_notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001794 }
1795 spin_unlock_irqrestore(&smd_lock, flags);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001796 mutex_unlock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001797}
1798
1799struct smd_channel *smd_get_channel(const char *name, uint32_t type)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001800{
1801 struct smd_channel *ch;
1802
1803 mutex_lock(&smd_creation_mutex);
1804 list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001805 if (!strcmp(name, ch->name) &&
1806 (type == ch->type)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001807 list_del(&ch->ch_list);
1808 mutex_unlock(&smd_creation_mutex);
1809 return ch;
1810 }
1811 }
1812 mutex_unlock(&smd_creation_mutex);
1813
1814 return NULL;
1815}
1816
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001817int smd_named_open_on_edge(const char *name, uint32_t edge,
1818 smd_channel_t **_ch,
1819 void *priv, void (*notify)(void *, unsigned))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001820{
1821 struct smd_channel *ch;
1822 unsigned long flags;
1823
1824 if (smd_initialized == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001825 SMD_INFO("smd_open() before smd_init()\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001826 return -ENODEV;
1827 }
1828
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001829 SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
1830
1831 ch = smd_get_channel(name, edge);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001832 if (!ch) {
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001833 /* check closing list for port */
1834 spin_lock_irqsave(&smd_lock, flags);
1835 list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
1836 if (!strncmp(name, ch->name, 20) &&
1837 (edge == ch->type)) {
1838 /* channel exists, but is being closed */
1839 spin_unlock_irqrestore(&smd_lock, flags);
1840 return -EAGAIN;
1841 }
1842 }
1843
1844 /* check closing workqueue list for port */
1845 list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
1846 if (!strncmp(name, ch->name, 20) &&
1847 (edge == ch->type)) {
1848 /* channel exists, but is being closed */
1849 spin_unlock_irqrestore(&smd_lock, flags);
1850 return -EAGAIN;
1851 }
1852 }
1853 spin_unlock_irqrestore(&smd_lock, flags);
1854
1855 /* one final check to handle closing->closed race condition */
1856 ch = smd_get_channel(name, edge);
1857 if (!ch)
1858 return -ENODEV;
1859 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001860
1861 if (notify == 0)
1862 notify = do_nothing_notify;
1863
1864 ch->notify = notify;
1865 ch->current_packet = 0;
1866 ch->last_state = SMD_SS_CLOSED;
1867 ch->priv = priv;
1868
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001869 if (edge == SMD_LOOPBACK_TYPE) {
1870 ch->last_state = SMD_SS_OPENED;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001871 ch->half_ch->set_state(ch->send, SMD_SS_OPENED);
1872 ch->half_ch->set_fDSR(ch->send, 1);
1873 ch->half_ch->set_fCTS(ch->send, 1);
1874 ch->half_ch->set_fCD(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001875 }
1876
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001877 *_ch = ch;
1878
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001879 SMD_DBG("smd_open: opening '%s'\n", ch->name);
1880
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001881 spin_lock_irqsave(&smd_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001882 if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_MODEM)
Brian Swetland37521a32009-07-01 18:30:47 -07001883 list_add(&ch->ch_list, &smd_ch_list_modem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001884 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_QDSP)
Brian Swetland37521a32009-07-01 18:30:47 -07001885 list_add(&ch->ch_list, &smd_ch_list_dsp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001886 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_DSPS)
1887 list_add(&ch->ch_list, &smd_ch_list_dsps);
1888 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_WCNSS)
1889 list_add(&ch->ch_list, &smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001890 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_RPM)
1891 list_add(&ch->ch_list, &smd_ch_list_rpm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001892 else
1893 list_add(&ch->ch_list, &smd_ch_list_loopback);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001894
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001895 SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
1896
1897 if (edge != SMD_LOOPBACK_TYPE)
1898 smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
1899
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001900 spin_unlock_irqrestore(&smd_lock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001901
1902 return 0;
1903}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001904EXPORT_SYMBOL(smd_named_open_on_edge);
1905
1906
1907int smd_open(const char *name, smd_channel_t **_ch,
1908 void *priv, void (*notify)(void *, unsigned))
1909{
1910 return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv,
1911 notify);
1912}
1913EXPORT_SYMBOL(smd_open);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001914
1915int smd_close(smd_channel_t *ch)
1916{
1917 unsigned long flags;
1918
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001919 if (ch == 0)
1920 return -1;
1921
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001922 SMD_INFO("smd_close(%s)\n", ch->name);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001923
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001924 spin_lock_irqsave(&smd_lock, flags);
1925 list_del(&ch->ch_list);
1926 if (ch->n == SMD_LOOPBACK_CID) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001927 ch->half_ch->set_fDSR(ch->send, 0);
1928 ch->half_ch->set_fCTS(ch->send, 0);
1929 ch->half_ch->set_fCD(ch->send, 0);
1930 ch->half_ch->set_state(ch->send, SMD_SS_CLOSED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001931 } else
1932 ch_set_state(ch, SMD_SS_CLOSED);
1933
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001934 if (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001935 list_add(&ch->ch_list, &smd_ch_closing_list);
1936 spin_unlock_irqrestore(&smd_lock, flags);
1937 } else {
1938 spin_unlock_irqrestore(&smd_lock, flags);
1939 ch->notify = do_nothing_notify;
1940 mutex_lock(&smd_creation_mutex);
1941 list_add(&ch->ch_list, &smd_ch_closed_list);
1942 mutex_unlock(&smd_creation_mutex);
1943 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001944
1945 return 0;
1946}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001947EXPORT_SYMBOL(smd_close);
1948
1949int smd_write_start(smd_channel_t *ch, int len)
1950{
1951 int ret;
1952 unsigned hdr[5];
1953
1954 if (!ch) {
1955 pr_err("%s: Invalid channel specified\n", __func__);
1956 return -ENODEV;
1957 }
1958 if (!ch->is_pkt_ch) {
1959 pr_err("%s: non-packet channel specified\n", __func__);
1960 return -EACCES;
1961 }
1962 if (len < 1) {
1963 pr_err("%s: invalid length: %d\n", __func__, len);
1964 return -EINVAL;
1965 }
1966
1967 if (ch->pending_pkt_sz) {
1968 pr_err("%s: packet of size: %d in progress\n", __func__,
1969 ch->pending_pkt_sz);
1970 return -EBUSY;
1971 }
1972 ch->pending_pkt_sz = len;
1973
1974 if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
1975 ch->pending_pkt_sz = 0;
1976 SMD_DBG("%s: no space to write packet header\n", __func__);
1977 return -EAGAIN;
1978 }
1979
1980 hdr[0] = len;
1981 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1982
1983
1984 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1985 if (ret < 0 || ret != sizeof(hdr)) {
1986 ch->pending_pkt_sz = 0;
1987 pr_err("%s: packet header failed to write\n", __func__);
1988 return -EPERM;
1989 }
1990 return 0;
1991}
1992EXPORT_SYMBOL(smd_write_start);
1993
1994int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
1995{
1996 int bytes_written;
1997
1998 if (!ch) {
1999 pr_err("%s: Invalid channel specified\n", __func__);
2000 return -ENODEV;
2001 }
2002 if (len < 1) {
2003 pr_err("%s: invalid length: %d\n", __func__, len);
2004 return -EINVAL;
2005 }
2006
2007 if (!ch->pending_pkt_sz) {
2008 pr_err("%s: no transaction in progress\n", __func__);
2009 return -ENOEXEC;
2010 }
2011 if (ch->pending_pkt_sz - len < 0) {
2012 pr_err("%s: segment of size: %d will make packet go over "
2013 "length\n", __func__, len);
2014 return -EINVAL;
2015 }
2016
2017 bytes_written = smd_stream_write(ch, data, len, user_buf);
2018
2019 ch->pending_pkt_sz -= bytes_written;
2020
2021 return bytes_written;
2022}
2023EXPORT_SYMBOL(smd_write_segment);
2024
2025int smd_write_end(smd_channel_t *ch)
2026{
2027
2028 if (!ch) {
2029 pr_err("%s: Invalid channel specified\n", __func__);
2030 return -ENODEV;
2031 }
2032 if (ch->pending_pkt_sz) {
2033 pr_err("%s: current packet not completely written\n", __func__);
2034 return -E2BIG;
2035 }
2036
2037 return 0;
2038}
2039EXPORT_SYMBOL(smd_write_end);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002040
2041int smd_read(smd_channel_t *ch, void *data, int len)
2042{
Jack Pham1b236d12012-03-19 15:27:18 -07002043 if (!ch) {
2044 pr_err("%s: Invalid channel specified\n", __func__);
2045 return -ENODEV;
2046 }
2047
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002048 return ch->read(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002049}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002050EXPORT_SYMBOL(smd_read);
2051
2052int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
2053{
Jack Pham1b236d12012-03-19 15:27:18 -07002054 if (!ch) {
2055 pr_err("%s: Invalid channel specified\n", __func__);
2056 return -ENODEV;
2057 }
2058
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002059 return ch->read(ch, data, len, 1);
2060}
2061EXPORT_SYMBOL(smd_read_user_buffer);
2062
2063int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
2064{
Jack Pham1b236d12012-03-19 15:27:18 -07002065 if (!ch) {
2066 pr_err("%s: Invalid channel specified\n", __func__);
2067 return -ENODEV;
2068 }
2069
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002070 return ch->read_from_cb(ch, data, len, 0);
2071}
2072EXPORT_SYMBOL(smd_read_from_cb);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002073
2074int smd_write(smd_channel_t *ch, const void *data, int len)
2075{
Jack Pham1b236d12012-03-19 15:27:18 -07002076 if (!ch) {
2077 pr_err("%s: Invalid channel specified\n", __func__);
2078 return -ENODEV;
2079 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002080
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002081 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002082}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002083EXPORT_SYMBOL(smd_write);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002084
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002085int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
Brian Swetland636eb9c2009-12-07 15:28:08 -08002086{
Jack Pham1b236d12012-03-19 15:27:18 -07002087 if (!ch) {
2088 pr_err("%s: Invalid channel specified\n", __func__);
2089 return -ENODEV;
2090 }
2091
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002092 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002093}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002094EXPORT_SYMBOL(smd_write_user_buffer);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002095
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002096int smd_read_avail(smd_channel_t *ch)
2097{
Jack Pham1b236d12012-03-19 15:27:18 -07002098 if (!ch) {
2099 pr_err("%s: Invalid channel specified\n", __func__);
2100 return -ENODEV;
2101 }
2102
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002103 return ch->read_avail(ch);
2104}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002105EXPORT_SYMBOL(smd_read_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002106
2107int smd_write_avail(smd_channel_t *ch)
2108{
Jack Pham1b236d12012-03-19 15:27:18 -07002109 if (!ch) {
2110 pr_err("%s: Invalid channel specified\n", __func__);
2111 return -ENODEV;
2112 }
2113
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002114 return ch->write_avail(ch);
2115}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002116EXPORT_SYMBOL(smd_write_avail);
2117
2118void smd_enable_read_intr(smd_channel_t *ch)
2119{
2120 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002121 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002122}
2123EXPORT_SYMBOL(smd_enable_read_intr);
2124
2125void smd_disable_read_intr(smd_channel_t *ch)
2126{
2127 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002128 ch->half_ch->set_fBLOCKREADINTR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002129}
2130EXPORT_SYMBOL(smd_disable_read_intr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002131
Eric Holmbergdeace152012-07-25 12:17:11 -06002132/**
2133 * Enable/disable receive interrupts for the remote processor used by a
2134 * particular channel.
2135 * @ch: open channel handle to use for the edge
2136 * @mask: 1 = mask interrupts; 0 = unmask interrupts
2137 * @returns: 0 for success; < 0 for failure
2138 *
2139 * Note that this enables/disables all interrupts from the remote subsystem for
2140 * all channels. As such, it should be used with care and only for specific
2141 * use cases such as power-collapse sequencing.
2142 */
2143int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask)
2144{
2145 struct irq_chip *irq_chip;
2146 struct irq_data *irq_data;
2147 struct interrupt_config_item *int_cfg;
2148
2149 if (!ch)
2150 return -EINVAL;
2151
2152 if (ch->type >= ARRAY_SIZE(edge_to_pids))
2153 return -ENODEV;
2154
2155 int_cfg = &private_intr_config[edge_to_pids[ch->type].remote_pid].smd;
2156
2157 if (int_cfg->irq_id < 0)
2158 return -ENODEV;
2159
2160 irq_chip = irq_get_chip(int_cfg->irq_id);
2161 if (!irq_chip)
2162 return -ENODEV;
2163
2164 irq_data = irq_get_irq_data(int_cfg->irq_id);
2165 if (!irq_data)
2166 return -ENODEV;
2167
2168 if (mask) {
2169 SMx_POWER_INFO("SMD Masking interrupts from %s\n",
2170 edge_to_pids[ch->type].subsys_name);
2171 irq_chip->irq_mask(irq_data);
2172 } else {
2173 SMx_POWER_INFO("SMD Unmasking interrupts from %s\n",
2174 edge_to_pids[ch->type].subsys_name);
2175 irq_chip->irq_unmask(irq_data);
2176 }
2177
2178 return 0;
2179}
2180EXPORT_SYMBOL(smd_mask_receive_interrupt);
2181
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002182int smd_wait_until_readable(smd_channel_t *ch, int bytes)
2183{
2184 return -1;
2185}
2186
2187int smd_wait_until_writable(smd_channel_t *ch, int bytes)
2188{
2189 return -1;
2190}
2191
2192int smd_cur_packet_size(smd_channel_t *ch)
2193{
Jack Pham1b236d12012-03-19 15:27:18 -07002194 if (!ch) {
2195 pr_err("%s: Invalid channel specified\n", __func__);
2196 return -ENODEV;
2197 }
2198
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002199 return ch->current_packet;
2200}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002201EXPORT_SYMBOL(smd_cur_packet_size);
2202
2203int smd_tiocmget(smd_channel_t *ch)
2204{
Jack Pham1b236d12012-03-19 15:27:18 -07002205 if (!ch) {
2206 pr_err("%s: Invalid channel specified\n", __func__);
2207 return -ENODEV;
2208 }
2209
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002210 return (ch->half_ch->get_fDSR(ch->recv) ? TIOCM_DSR : 0) |
2211 (ch->half_ch->get_fCTS(ch->recv) ? TIOCM_CTS : 0) |
2212 (ch->half_ch->get_fCD(ch->recv) ? TIOCM_CD : 0) |
2213 (ch->half_ch->get_fRI(ch->recv) ? TIOCM_RI : 0) |
2214 (ch->half_ch->get_fCTS(ch->send) ? TIOCM_RTS : 0) |
2215 (ch->half_ch->get_fDSR(ch->send) ? TIOCM_DTR : 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002216}
2217EXPORT_SYMBOL(smd_tiocmget);
2218
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002219/* this api will be called while holding smd_lock */
2220int
2221smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002222{
Jack Pham1b236d12012-03-19 15:27:18 -07002223 if (!ch) {
2224 pr_err("%s: Invalid channel specified\n", __func__);
2225 return -ENODEV;
2226 }
2227
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002228 if (set & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002229 ch->half_ch->set_fDSR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002230
2231 if (set & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002232 ch->half_ch->set_fCTS(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002233
2234 if (clear & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002235 ch->half_ch->set_fDSR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002236
2237 if (clear & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002238 ch->half_ch->set_fCTS(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002239
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002240 ch->half_ch->set_fSTATE(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002241 barrier();
2242 ch->notify_other_cpu();
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002243
2244 return 0;
2245}
2246EXPORT_SYMBOL(smd_tiocmset_from_cb);
2247
2248int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
2249{
2250 unsigned long flags;
2251
Jack Pham1b236d12012-03-19 15:27:18 -07002252 if (!ch) {
2253 pr_err("%s: Invalid channel specified\n", __func__);
2254 return -ENODEV;
2255 }
2256
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002257 spin_lock_irqsave(&smd_lock, flags);
2258 smd_tiocmset_from_cb(ch, set, clear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002259 spin_unlock_irqrestore(&smd_lock, flags);
2260
2261 return 0;
2262}
2263EXPORT_SYMBOL(smd_tiocmset);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002264
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002265int smd_is_pkt_avail(smd_channel_t *ch)
2266{
Jeff Hugoa8549f12012-08-13 20:36:18 -06002267 unsigned long flags;
2268
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002269 if (!ch || !ch->is_pkt_ch)
2270 return -EINVAL;
2271
2272 if (ch->current_packet)
2273 return 1;
2274
Jeff Hugoa8549f12012-08-13 20:36:18 -06002275 spin_lock_irqsave(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002276 update_packet_state(ch);
Jeff Hugoa8549f12012-08-13 20:36:18 -06002277 spin_unlock_irqrestore(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002278
2279 return ch->current_packet ? 1 : 0;
2280}
2281EXPORT_SYMBOL(smd_is_pkt_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002282
2283
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002284/* -------------------------------------------------------------------------- */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002285
Jeff Hugobdc734d2012-03-26 16:05:39 -06002286/*
2287 * Shared Memory Range Check
2288 *
2289 * Takes a physical address and an offset and checks if the resulting physical
2290 * address would fit into one of the aux smem regions. If so, returns the
2291 * corresponding virtual address. Otherwise returns NULL. Expects the array
2292 * of smem regions to be in ascending physical address order.
2293 *
2294 * @base: physical base address to check
2295 * @offset: offset from the base to get the final address
2296 */
2297static void *smem_range_check(void *base, unsigned offset)
2298{
2299 int i;
2300 void *phys_addr;
2301 unsigned size;
2302
2303 for (i = 0; i < num_smem_areas; ++i) {
2304 phys_addr = smem_areas[i].phys_addr;
2305 size = smem_areas[i].size;
2306 if (base < phys_addr)
2307 return NULL;
2308 if (base > phys_addr + size)
2309 continue;
2310 if (base >= phys_addr && base + offset < phys_addr + size)
2311 return smem_areas[i].virt_addr + offset;
2312 }
2313
2314 return NULL;
2315}
2316
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002317/* smem_alloc returns the pointer to smem item if it is already allocated.
2318 * Otherwise, it returns NULL.
2319 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002320void *smem_alloc(unsigned id, unsigned size)
2321{
2322 return smem_find(id, size);
2323}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002324EXPORT_SYMBOL(smem_alloc);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002325
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002326/* smem_alloc2 returns the pointer to smem item. If it is not allocated,
2327 * it allocates it and then returns the pointer to it.
2328 */
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302329void *smem_alloc2(unsigned id, unsigned size_in)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002330{
2331 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2332 struct smem_heap_entry *toc = shared->heap_toc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002333 unsigned long flags;
2334 void *ret = NULL;
2335
2336 if (!shared->heap_info.initialized) {
2337 pr_err("%s: smem heap info not initialized\n", __func__);
2338 return NULL;
2339 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002340
2341 if (id >= SMEM_NUM_ITEMS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002342 return NULL;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002343
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002344 size_in = ALIGN(size_in, 8);
2345 remote_spin_lock_irqsave(&remote_spinlock, flags);
2346 if (toc[id].allocated) {
2347 SMD_DBG("%s: %u already allocated\n", __func__, id);
2348 if (size_in != toc[id].size)
2349 pr_err("%s: wrong size %u (expected %u)\n",
2350 __func__, toc[id].size, size_in);
2351 else
2352 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2353 } else if (id > SMEM_FIXED_ITEM_LAST) {
2354 SMD_DBG("%s: allocating %u\n", __func__, id);
2355 if (shared->heap_info.heap_remaining >= size_in) {
2356 toc[id].offset = shared->heap_info.free_offset;
2357 toc[id].size = size_in;
2358 wmb();
2359 toc[id].allocated = 1;
2360
2361 shared->heap_info.free_offset += size_in;
2362 shared->heap_info.heap_remaining -= size_in;
2363 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2364 } else
2365 pr_err("%s: not enough memory %u (required %u)\n",
2366 __func__, shared->heap_info.heap_remaining,
2367 size_in);
2368 }
2369 wmb();
2370 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
2371 return ret;
2372}
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302373EXPORT_SYMBOL(smem_alloc2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002374
2375void *smem_get_entry(unsigned id, unsigned *size)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002376{
2377 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2378 struct smem_heap_entry *toc = shared->heap_toc;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302379 int use_spinlocks = spinlocks_initialized;
2380 void *ret = 0;
2381 unsigned long flags = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002382
2383 if (id >= SMEM_NUM_ITEMS)
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302384 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002385
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302386 if (use_spinlocks)
2387 remote_spin_lock_irqsave(&remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002388 /* toc is in device memory and cannot be speculatively accessed */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002389 if (toc[id].allocated) {
2390 *size = toc[id].size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002391 barrier();
Jeff Hugobdc734d2012-03-26 16:05:39 -06002392 if (!(toc[id].reserved & BASE_ADDR_MASK))
2393 ret = (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
2394 else
2395 ret = smem_range_check(
2396 (void *)(toc[id].reserved & BASE_ADDR_MASK),
2397 toc[id].offset);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002398 } else {
2399 *size = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002400 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302401 if (use_spinlocks)
2402 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002403
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302404 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002405}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002406EXPORT_SYMBOL(smem_get_entry);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002407
2408void *smem_find(unsigned id, unsigned size_in)
2409{
2410 unsigned size;
2411 void *ptr;
2412
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002413 ptr = smem_get_entry(id, &size);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002414 if (!ptr)
2415 return 0;
2416
2417 size_in = ALIGN(size_in, 8);
2418 if (size_in != size) {
2419 pr_err("smem_find(%d, %d): wrong size %d\n",
2420 id, size_in, size);
2421 return 0;
2422 }
2423
2424 return ptr;
2425}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002426EXPORT_SYMBOL(smem_find);
2427
2428static int smsm_cb_init(void)
2429{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002430 struct smsm_state_info *state_info;
2431 int n;
2432 int ret = 0;
2433
2434 smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
2435 GFP_KERNEL);
2436
2437 if (!smsm_states) {
2438 pr_err("%s: SMSM init failed\n", __func__);
2439 return -ENOMEM;
2440 }
2441
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002442 smsm_cb_wq = create_singlethread_workqueue("smsm_cb_wq");
2443 if (!smsm_cb_wq) {
2444 pr_err("%s: smsm_cb_wq creation failed\n", __func__);
2445 kfree(smsm_states);
2446 return -EFAULT;
2447 }
2448
Eric Holmbergc8002902011-09-16 13:55:57 -06002449 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002450 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2451 state_info = &smsm_states[n];
2452 state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
Eric Holmberge8a39322012-04-03 15:14:02 -06002453 state_info->intr_mask_set = 0x0;
2454 state_info->intr_mask_clear = 0x0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002455 INIT_LIST_HEAD(&state_info->callbacks);
2456 }
Eric Holmbergc8002902011-09-16 13:55:57 -06002457 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002458
2459 return ret;
2460}
2461
2462static int smsm_init(void)
2463{
2464 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2465 int i;
2466 struct smsm_size_info_type *smsm_size_info;
2467
2468 i = remote_spin_lock_init(&remote_spinlock, SMEM_SPINLOCK_SMEM_ALLOC);
2469 if (i) {
2470 pr_err("%s: remote spinlock init failed %d\n", __func__, i);
2471 return i;
2472 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302473 spinlocks_initialized = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002474
2475 smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO,
2476 sizeof(struct smsm_size_info_type));
2477 if (smsm_size_info) {
2478 SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
2479 SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
2480 }
2481
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002482 i = kfifo_alloc(&smsm_snapshot_fifo,
2483 sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
2484 GFP_KERNEL);
2485 if (i) {
2486 pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
2487 return i;
2488 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002489 wake_lock_init(&smsm_snapshot_wakelock, WAKE_LOCK_SUSPEND,
2490 "smsm_snapshot");
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002491
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002492 if (!smsm_info.state) {
2493 smsm_info.state = smem_alloc2(ID_SHARED_STATE,
2494 SMSM_NUM_ENTRIES *
2495 sizeof(uint32_t));
2496
2497 if (smsm_info.state) {
2498 __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2499 if ((shared->version[VERSION_MODEM] >> 16) >= 0xB)
2500 __raw_writel(0, \
2501 SMSM_STATE_ADDR(SMSM_APPS_DEM_I));
2502 }
2503 }
2504
2505 if (!smsm_info.intr_mask) {
2506 smsm_info.intr_mask = smem_alloc2(SMEM_SMSM_CPU_INTR_MASK,
2507 SMSM_NUM_ENTRIES *
2508 SMSM_NUM_HOSTS *
2509 sizeof(uint32_t));
2510
Eric Holmberge8a39322012-04-03 15:14:02 -06002511 if (smsm_info.intr_mask) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002512 for (i = 0; i < SMSM_NUM_ENTRIES; i++)
Eric Holmberge8a39322012-04-03 15:14:02 -06002513 __raw_writel(0x0,
2514 SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
2515
2516 /* Configure legacy modem bits */
2517 __raw_writel(LEGACY_MODEM_SMSM_MASK,
2518 SMSM_INTR_MASK_ADDR(SMSM_MODEM_STATE,
2519 SMSM_APPS));
2520 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002521 }
2522
2523 if (!smsm_info.intr_mux)
2524 smsm_info.intr_mux = smem_alloc2(SMEM_SMD_SMSM_INTR_MUX,
2525 SMSM_NUM_INTR_MUX *
2526 sizeof(uint32_t));
2527
2528 i = smsm_cb_init();
2529 if (i)
2530 return i;
2531
2532 wmb();
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06002533 smsm_driver_state_notify(SMSM_INIT, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002534 return 0;
2535}
2536
2537void smsm_reset_modem(unsigned mode)
2538{
2539 if (mode == SMSM_SYSTEM_DOWNLOAD) {
2540 mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
2541 } else if (mode == SMSM_MODEM_WAIT) {
2542 mode = SMSM_RESET | SMSM_MODEM_WAIT;
2543 } else { /* reset_mode is SMSM_RESET or default */
2544 mode = SMSM_RESET;
2545 }
2546
2547 smsm_change_state(SMSM_APPS_STATE, mode, mode);
2548}
2549EXPORT_SYMBOL(smsm_reset_modem);
2550
2551void smsm_reset_modem_cont(void)
2552{
2553 unsigned long flags;
2554 uint32_t state;
2555
2556 if (!smsm_info.state)
2557 return;
2558
2559 spin_lock_irqsave(&smem_lock, flags);
2560 state = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)) \
2561 & ~SMSM_MODEM_WAIT;
2562 __raw_writel(state, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2563 wmb();
2564 spin_unlock_irqrestore(&smem_lock, flags);
2565}
2566EXPORT_SYMBOL(smsm_reset_modem_cont);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002567
Eric Holmbergda31d042012-03-28 14:01:02 -06002568static void smsm_cb_snapshot(uint32_t use_wakelock)
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002569{
2570 int n;
2571 uint32_t new_state;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002572 unsigned long flags;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002573 int ret;
2574
2575 ret = kfifo_avail(&smsm_snapshot_fifo);
Eric Holmbergda31d042012-03-28 14:01:02 -06002576 if (ret < SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002577 pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
2578 return;
2579 }
2580
Eric Holmberg96b55f62012-04-03 19:10:46 -06002581 /*
2582 * To avoid a race condition with notify_smsm_cb_clients_worker, the
2583 * following sequence must be followed:
2584 * 1) increment snapshot count
2585 * 2) insert data into FIFO
2586 *
2587 * Potentially in parallel, the worker:
2588 * a) verifies >= 1 snapshots are in FIFO
2589 * b) processes snapshot
2590 * c) decrements reference count
2591 *
2592 * This order ensures that 1 will always occur before abc.
2593 */
Eric Holmbergda31d042012-03-28 14:01:02 -06002594 if (use_wakelock) {
2595 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2596 if (smsm_snapshot_count == 0) {
2597 SMx_POWER_INFO("SMSM snapshot wake lock\n");
2598 wake_lock(&smsm_snapshot_wakelock);
2599 }
2600 ++smsm_snapshot_count;
2601 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2602 }
Eric Holmberg96b55f62012-04-03 19:10:46 -06002603
2604 /* queue state entries */
2605 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2606 new_state = __raw_readl(SMSM_STATE_ADDR(n));
2607
2608 ret = kfifo_in(&smsm_snapshot_fifo,
2609 &new_state, sizeof(new_state));
2610 if (ret != sizeof(new_state)) {
2611 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2612 goto restore_snapshot_count;
2613 }
2614 }
2615
2616 /* queue wakelock usage flag */
2617 ret = kfifo_in(&smsm_snapshot_fifo,
2618 &use_wakelock, sizeof(use_wakelock));
2619 if (ret != sizeof(use_wakelock)) {
2620 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2621 goto restore_snapshot_count;
2622 }
2623
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002624 queue_work(smsm_cb_wq, &smsm_cb_work);
Eric Holmberg96b55f62012-04-03 19:10:46 -06002625 return;
2626
2627restore_snapshot_count:
2628 if (use_wakelock) {
2629 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2630 if (smsm_snapshot_count) {
2631 --smsm_snapshot_count;
2632 if (smsm_snapshot_count == 0) {
2633 SMx_POWER_INFO("SMSM snapshot wake unlock\n");
2634 wake_unlock(&smsm_snapshot_wakelock);
2635 }
2636 } else {
2637 pr_err("%s: invalid snapshot count\n", __func__);
2638 }
2639 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2640 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002641}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002642
2643static irqreturn_t smsm_irq_handler(int irq, void *data)
2644{
2645 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002646
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002647 if (irq == INT_ADSP_A11_SMSM) {
Eric Holmberg6282c5d2011-10-27 17:30:57 -06002648 uint32_t mux_val;
2649 static uint32_t prev_smem_q6_apps_smsm;
2650
2651 if (smsm_info.intr_mux && cpu_is_qsd8x50()) {
2652 mux_val = __raw_readl(
2653 SMSM_INTR_MUX_ADDR(SMEM_Q6_APPS_SMSM));
2654 if (mux_val != prev_smem_q6_apps_smsm)
2655 prev_smem_q6_apps_smsm = mux_val;
2656 }
2657
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002658 spin_lock_irqsave(&smem_lock, flags);
Eric Holmbergda31d042012-03-28 14:01:02 -06002659 smsm_cb_snapshot(1);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002660 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002661 return IRQ_HANDLED;
2662 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002663
2664 spin_lock_irqsave(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002665 if (!smsm_info.state) {
2666 SMSM_INFO("<SM NO STATE>\n");
2667 } else {
2668 unsigned old_apps, apps;
2669 unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002670
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002671 old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002672
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002673 SMSM_DBG("<SM %08x %08x>\n", apps, modm);
2674 if (apps & SMSM_RESET) {
2675 /* If we get an interrupt and the apps SMSM_RESET
2676 bit is already set, the modem is acking the
2677 app's reset ack. */
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002678 if (!disable_smsm_reset_handshake)
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302679 apps &= ~SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002680 /* Issue a fake irq to handle any
2681 * smd state changes during reset
2682 */
2683 smd_fake_irq_handler(0);
Daniel Walker79848a22010-03-16 15:20:07 -07002684
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002685 /* queue modem restart notify chain */
2686 modem_queue_start_reset_notify();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002687
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002688 } else if (modm & SMSM_RESET) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002689 pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
Ram Somani8b9589f2012-04-03 12:07:18 +05302690 if (!disable_smsm_reset_handshake) {
2691 apps |= SMSM_RESET;
2692 flush_cache_all();
2693 outer_flush_all();
2694 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002695 modem_queue_start_reset_notify();
2696
2697 } else if (modm & SMSM_INIT) {
2698 if (!(apps & SMSM_INIT)) {
2699 apps |= SMSM_INIT;
2700 modem_queue_smsm_init_notify();
2701 }
2702
2703 if (modm & SMSM_SMDINIT)
2704 apps |= SMSM_SMDINIT;
2705 if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) ==
2706 (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT))
2707 apps |= SMSM_RUN;
2708 } else if (modm & SMSM_SYSTEM_DOWNLOAD) {
2709 pr_err("\nSMSM: Modem SMSM state changed to SMSM_SYSTEM_DOWNLOAD.");
2710 modem_queue_start_reset_notify();
2711 }
2712
2713 if (old_apps != apps) {
2714 SMSM_DBG("<SM %08x NOTIFY>\n", apps);
2715 __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2716 do_smd_probe();
2717 notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
2718 }
2719
Eric Holmbergda31d042012-03-28 14:01:02 -06002720 smsm_cb_snapshot(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002721 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002722 spin_unlock_irqrestore(&smem_lock, flags);
2723 return IRQ_HANDLED;
2724}
2725
Eric Holmberg98c6c642012-02-24 11:29:35 -07002726static irqreturn_t smsm_modem_irq_handler(int irq, void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002727{
Eric Holmberg98c6c642012-02-24 11:29:35 -07002728 SMx_POWER_INFO("SMSM Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002729 ++interrupt_stats[SMD_MODEM].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002730 return smsm_irq_handler(irq, data);
2731}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002732
Eric Holmberg98c6c642012-02-24 11:29:35 -07002733static irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
2734{
2735 SMx_POWER_INFO("SMSM Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002736 ++interrupt_stats[SMD_Q6].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002737 return smsm_irq_handler(irq, data);
2738}
2739
2740static irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
2741{
2742 SMx_POWER_INFO("SMSM Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002743 ++interrupt_stats[SMD_DSPS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002744 return smsm_irq_handler(irq, data);
2745}
2746
2747static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
2748{
2749 SMx_POWER_INFO("SMSM Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002750 ++interrupt_stats[SMD_WCNSS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002751 return smsm_irq_handler(irq, data);
2752}
2753
Eric Holmberge8a39322012-04-03 15:14:02 -06002754/*
2755 * Changes the global interrupt mask. The set and clear masks are re-applied
2756 * every time the global interrupt mask is updated for callback registration
2757 * and de-registration.
2758 *
2759 * The clear mask is applied first, so if a bit is set to 1 in both the clear
2760 * mask and the set mask, the result will be that the interrupt is set.
2761 *
2762 * @smsm_entry SMSM entry to change
2763 * @clear_mask 1 = clear bit, 0 = no-op
2764 * @set_mask 1 = set bit, 0 = no-op
2765 *
2766 * @returns 0 for success, < 0 for error
2767 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002768int smsm_change_intr_mask(uint32_t smsm_entry,
2769 uint32_t clear_mask, uint32_t set_mask)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002770{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002771 uint32_t old_mask, new_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002772 unsigned long flags;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002773
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002774 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2775 pr_err("smsm_change_state: Invalid entry %d\n",
2776 smsm_entry);
2777 return -EINVAL;
2778 }
2779
2780 if (!smsm_info.intr_mask) {
2781 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002782 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002783 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002784
2785 spin_lock_irqsave(&smem_lock, flags);
Eric Holmberge8a39322012-04-03 15:14:02 -06002786 smsm_states[smsm_entry].intr_mask_clear = clear_mask;
2787 smsm_states[smsm_entry].intr_mask_set = set_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002788
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002789 old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2790 new_mask = (old_mask & ~clear_mask) | set_mask;
2791 __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002792
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002793 wmb();
2794 spin_unlock_irqrestore(&smem_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002795
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002796 return 0;
2797}
2798EXPORT_SYMBOL(smsm_change_intr_mask);
2799
2800int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
2801{
2802 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2803 pr_err("smsm_change_state: Invalid entry %d\n",
2804 smsm_entry);
2805 return -EINVAL;
2806 }
2807
2808 if (!smsm_info.intr_mask) {
2809 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
2810 return -EIO;
2811 }
2812
2813 *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2814 return 0;
2815}
2816EXPORT_SYMBOL(smsm_get_intr_mask);
2817
2818int smsm_change_state(uint32_t smsm_entry,
2819 uint32_t clear_mask, uint32_t set_mask)
2820{
2821 unsigned long flags;
2822 uint32_t old_state, new_state;
2823
2824 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2825 pr_err("smsm_change_state: Invalid entry %d",
2826 smsm_entry);
2827 return -EINVAL;
2828 }
2829
2830 if (!smsm_info.state) {
2831 pr_err("smsm_change_state <SM NO STATE>\n");
2832 return -EIO;
2833 }
2834 spin_lock_irqsave(&smem_lock, flags);
2835
2836 old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2837 new_state = (old_state & ~clear_mask) | set_mask;
2838 __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
2839 SMSM_DBG("smsm_change_state %x\n", new_state);
2840 notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002841
2842 spin_unlock_irqrestore(&smem_lock, flags);
2843
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002844 return 0;
2845}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002846EXPORT_SYMBOL(smsm_change_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002847
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002848uint32_t smsm_get_state(uint32_t smsm_entry)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002849{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002850 uint32_t rv = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002851
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002852 /* needs interface change to return error code */
2853 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2854 pr_err("smsm_change_state: Invalid entry %d",
2855 smsm_entry);
2856 return 0;
2857 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002858
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002859 if (!smsm_info.state) {
2860 pr_err("smsm_get_state <SM NO STATE>\n");
2861 } else {
2862 rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2863 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002864
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002865 return rv;
2866}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002867EXPORT_SYMBOL(smsm_get_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002868
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002869/**
2870 * Performs SMSM callback client notifiction.
2871 */
2872void notify_smsm_cb_clients_worker(struct work_struct *work)
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002873{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002874 struct smsm_state_cb_info *cb_info;
2875 struct smsm_state_info *state_info;
2876 int n;
2877 uint32_t new_state;
2878 uint32_t state_changes;
Eric Holmbergda31d042012-03-28 14:01:02 -06002879 uint32_t use_wakelock;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002880 int ret;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002881 unsigned long flags;
Brian Swetland03e00cd2009-07-01 17:58:37 -07002882
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002883 if (!smd_initialized)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002884 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002885
Eric Holmbergda31d042012-03-28 14:01:02 -06002886 while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002887 mutex_lock(&smsm_lock);
2888 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2889 state_info = &smsm_states[n];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002890
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002891 ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
2892 sizeof(new_state));
2893 if (ret != sizeof(new_state)) {
2894 pr_err("%s: snapshot underflow %d\n",
2895 __func__, ret);
2896 mutex_unlock(&smsm_lock);
2897 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002898 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002899
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002900 state_changes = state_info->last_value ^ new_state;
2901 if (state_changes) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07002902 SMx_POWER_INFO("SMSM Change %d: %08x->%08x\n",
2903 n, state_info->last_value,
2904 new_state);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002905 list_for_each_entry(cb_info,
2906 &state_info->callbacks, cb_list) {
2907
2908 if (cb_info->mask & state_changes)
2909 cb_info->notify(cb_info->data,
2910 state_info->last_value,
2911 new_state);
2912 }
2913 state_info->last_value = new_state;
2914 }
2915 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002916
Eric Holmbergda31d042012-03-28 14:01:02 -06002917 /* read wakelock flag */
2918 ret = kfifo_out(&smsm_snapshot_fifo, &use_wakelock,
2919 sizeof(use_wakelock));
2920 if (ret != sizeof(use_wakelock)) {
2921 pr_err("%s: snapshot underflow %d\n",
2922 __func__, ret);
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002923 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002924 return;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002925 }
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002926 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002927
2928 if (use_wakelock) {
2929 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2930 if (smsm_snapshot_count) {
2931 --smsm_snapshot_count;
2932 if (smsm_snapshot_count == 0) {
2933 SMx_POWER_INFO("SMSM snapshot"
2934 " wake unlock\n");
2935 wake_unlock(&smsm_snapshot_wakelock);
2936 }
2937 } else {
2938 pr_err("%s: invalid snapshot count\n",
2939 __func__);
2940 }
2941 spin_unlock_irqrestore(&smsm_snapshot_count_lock,
2942 flags);
2943 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002944 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002945}
2946
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002947
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002948/**
2949 * Registers callback for SMSM state notifications when the specified
2950 * bits change.
2951 *
2952 * @smsm_entry Processor entry to deregister
2953 * @mask Bits to deregister (if result is 0, callback is removed)
2954 * @notify Notification function to deregister
2955 * @data Opaque data passed in to callback
2956 *
2957 * @returns Status code
2958 * <0 error code
2959 * 0 inserted new entry
2960 * 1 updated mask of existing entry
2961 */
2962int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
2963 void (*notify)(void *, uint32_t, uint32_t), void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002964{
Eric Holmberge8a39322012-04-03 15:14:02 -06002965 struct smsm_state_info *state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002966 struct smsm_state_cb_info *cb_info;
2967 struct smsm_state_cb_info *cb_found = 0;
Eric Holmberge8a39322012-04-03 15:14:02 -06002968 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002969 int ret = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002970
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002971 if (smsm_entry >= SMSM_NUM_ENTRIES)
2972 return -EINVAL;
2973
Eric Holmbergc8002902011-09-16 13:55:57 -06002974 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002975
2976 if (!smsm_states) {
2977 /* smsm not yet initialized */
2978 ret = -ENODEV;
2979 goto cleanup;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002980 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002981
Eric Holmberge8a39322012-04-03 15:14:02 -06002982 state = &smsm_states[smsm_entry];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002983 list_for_each_entry(cb_info,
Eric Holmberge8a39322012-04-03 15:14:02 -06002984 &state->callbacks, cb_list) {
2985 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002986 (cb_info->data == data)) {
2987 cb_info->mask |= mask;
2988 cb_found = cb_info;
2989 ret = 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002990 }
Eric Holmberge8a39322012-04-03 15:14:02 -06002991 new_mask |= cb_info->mask;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002992 }
2993
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002994 if (!cb_found) {
2995 cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
2996 GFP_ATOMIC);
2997 if (!cb_info) {
2998 ret = -ENOMEM;
2999 goto cleanup;
3000 }
3001
3002 cb_info->mask = mask;
3003 cb_info->notify = notify;
3004 cb_info->data = data;
3005 INIT_LIST_HEAD(&cb_info->cb_list);
3006 list_add_tail(&cb_info->cb_list,
Eric Holmberge8a39322012-04-03 15:14:02 -06003007 &state->callbacks);
3008 new_mask |= mask;
3009 }
3010
3011 /* update interrupt notification mask */
3012 if (smsm_entry == SMSM_MODEM_STATE)
3013 new_mask |= LEGACY_MODEM_SMSM_MASK;
3014
3015 if (smsm_info.intr_mask) {
3016 unsigned long flags;
3017
3018 spin_lock_irqsave(&smem_lock, flags);
3019 new_mask = (new_mask & ~state->intr_mask_clear)
3020 | state->intr_mask_set;
3021 __raw_writel(new_mask,
3022 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3023 wmb();
3024 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003025 }
3026
3027cleanup:
Eric Holmbergc8002902011-09-16 13:55:57 -06003028 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003029 return ret;
3030}
3031EXPORT_SYMBOL(smsm_state_cb_register);
3032
3033
3034/**
3035 * Deregisters for SMSM state notifications for the specified bits.
3036 *
3037 * @smsm_entry Processor entry to deregister
3038 * @mask Bits to deregister (if result is 0, callback is removed)
3039 * @notify Notification function to deregister
3040 * @data Opaque data passed in to callback
3041 *
3042 * @returns Status code
3043 * <0 error code
3044 * 0 not found
3045 * 1 updated mask
3046 * 2 removed callback
3047 */
3048int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
3049 void (*notify)(void *, uint32_t, uint32_t), void *data)
3050{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003051 struct smsm_state_cb_info *cb_info;
Eric Holmberge8a39322012-04-03 15:14:02 -06003052 struct smsm_state_cb_info *cb_tmp;
3053 struct smsm_state_info *state;
3054 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003055 int ret = 0;
3056
3057 if (smsm_entry >= SMSM_NUM_ENTRIES)
3058 return -EINVAL;
3059
Eric Holmbergc8002902011-09-16 13:55:57 -06003060 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003061
3062 if (!smsm_states) {
3063 /* smsm not yet initialized */
Eric Holmbergc8002902011-09-16 13:55:57 -06003064 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003065 return -ENODEV;
3066 }
3067
Eric Holmberge8a39322012-04-03 15:14:02 -06003068 state = &smsm_states[smsm_entry];
3069 list_for_each_entry_safe(cb_info, cb_tmp,
3070 &state->callbacks, cb_list) {
3071 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003072 (cb_info->data == data)) {
3073 cb_info->mask &= ~mask;
3074 ret = 1;
3075 if (!cb_info->mask) {
3076 /* no mask bits set, remove callback */
3077 list_del(&cb_info->cb_list);
3078 kfree(cb_info);
3079 ret = 2;
Eric Holmberge8a39322012-04-03 15:14:02 -06003080 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003081 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003082 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003083 new_mask |= cb_info->mask;
3084 }
3085
3086 /* update interrupt notification mask */
3087 if (smsm_entry == SMSM_MODEM_STATE)
3088 new_mask |= LEGACY_MODEM_SMSM_MASK;
3089
3090 if (smsm_info.intr_mask) {
3091 unsigned long flags;
3092
3093 spin_lock_irqsave(&smem_lock, flags);
3094 new_mask = (new_mask & ~state->intr_mask_clear)
3095 | state->intr_mask_set;
3096 __raw_writel(new_mask,
3097 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3098 wmb();
3099 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003100 }
3101
Eric Holmbergc8002902011-09-16 13:55:57 -06003102 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003103 return ret;
3104}
3105EXPORT_SYMBOL(smsm_state_cb_deregister);
3106
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003107int smsm_driver_state_notifier_register(struct notifier_block *nb)
3108{
3109 int ret;
3110 if (!nb)
3111 return -EINVAL;
3112 mutex_lock(&smsm_driver_state_notifier_lock);
3113 ret = raw_notifier_chain_register(&smsm_driver_state_notifier_list, nb);
3114 mutex_unlock(&smsm_driver_state_notifier_lock);
3115 return ret;
3116}
3117EXPORT_SYMBOL(smsm_driver_state_notifier_register);
3118
3119int smsm_driver_state_notifier_unregister(struct notifier_block *nb)
3120{
3121 int ret;
3122 if (!nb)
3123 return -EINVAL;
3124 mutex_lock(&smsm_driver_state_notifier_lock);
3125 ret = raw_notifier_chain_unregister(&smsm_driver_state_notifier_list,
3126 nb);
3127 mutex_unlock(&smsm_driver_state_notifier_lock);
3128 return ret;
3129}
3130EXPORT_SYMBOL(smsm_driver_state_notifier_unregister);
3131
3132static void smsm_driver_state_notify(uint32_t state, void *data)
3133{
3134 mutex_lock(&smsm_driver_state_notifier_lock);
3135 raw_notifier_call_chain(&smsm_driver_state_notifier_list,
3136 state, data);
3137 mutex_unlock(&smsm_driver_state_notifier_lock);
3138}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003139
3140int smd_core_init(void)
3141{
3142 int r;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003143 unsigned long flags = IRQF_TRIGGER_RISING;
3144 SMD_INFO("smd_core_init()\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003145
Brian Swetland37521a32009-07-01 18:30:47 -07003146 r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003147 flags, "smd_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003148 if (r < 0)
3149 return r;
3150 r = enable_irq_wake(INT_A9_M2A_0);
3151 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003152 pr_err("smd_core_init: "
3153 "enable_irq_wake failed for INT_A9_M2A_0\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003154
Eric Holmberg98c6c642012-02-24 11:29:35 -07003155 r = request_irq(INT_A9_M2A_5, smsm_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003156 flags, "smsm_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003157 if (r < 0) {
3158 free_irq(INT_A9_M2A_0, 0);
3159 return r;
3160 }
3161 r = enable_irq_wake(INT_A9_M2A_5);
3162 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003163 pr_err("smd_core_init: "
3164 "enable_irq_wake failed for INT_A9_M2A_5\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003165
Brian Swetland37521a32009-07-01 18:30:47 -07003166#if defined(CONFIG_QDSP6)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003167#if (INT_ADSP_A11 == INT_ADSP_A11_SMSM)
3168 flags |= IRQF_SHARED;
3169#endif
Brian Swetland37521a32009-07-01 18:30:47 -07003170 r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003171 flags, "smd_dev", smd_dsp_irq_handler);
Brian Swetland37521a32009-07-01 18:30:47 -07003172 if (r < 0) {
3173 free_irq(INT_A9_M2A_0, 0);
3174 free_irq(INT_A9_M2A_5, 0);
3175 return r;
3176 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003177
Eric Holmberg98c6c642012-02-24 11:29:35 -07003178 r = request_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler,
3179 flags, "smsm_dev", smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003180 if (r < 0) {
3181 free_irq(INT_A9_M2A_0, 0);
3182 free_irq(INT_A9_M2A_5, 0);
3183 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
3184 return r;
3185 }
3186
3187 r = enable_irq_wake(INT_ADSP_A11);
3188 if (r < 0)
3189 pr_err("smd_core_init: "
3190 "enable_irq_wake failed for INT_ADSP_A11\n");
3191
3192#if (INT_ADSP_A11 != INT_ADSP_A11_SMSM)
3193 r = enable_irq_wake(INT_ADSP_A11_SMSM);
3194 if (r < 0)
3195 pr_err("smd_core_init: enable_irq_wake "
3196 "failed for INT_ADSP_A11_SMSM\n");
3197#endif
3198 flags &= ~IRQF_SHARED;
Brian Swetland37521a32009-07-01 18:30:47 -07003199#endif
3200
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003201#if defined(CONFIG_DSPS)
3202 r = request_irq(INT_DSPS_A11, smd_dsps_irq_handler,
3203 flags, "smd_dev", smd_dsps_irq_handler);
3204 if (r < 0) {
3205 free_irq(INT_A9_M2A_0, 0);
3206 free_irq(INT_A9_M2A_5, 0);
3207 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003208 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003209 return r;
3210 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003211
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003212 r = enable_irq_wake(INT_DSPS_A11);
3213 if (r < 0)
3214 pr_err("smd_core_init: "
3215 "enable_irq_wake failed for INT_ADSP_A11\n");
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003216#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003217
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003218#if defined(CONFIG_WCNSS)
3219 r = request_irq(INT_WCNSS_A11, smd_wcnss_irq_handler,
3220 flags, "smd_dev", smd_wcnss_irq_handler);
3221 if (r < 0) {
3222 free_irq(INT_A9_M2A_0, 0);
3223 free_irq(INT_A9_M2A_5, 0);
3224 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003225 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003226 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3227 return r;
3228 }
3229
3230 r = enable_irq_wake(INT_WCNSS_A11);
3231 if (r < 0)
3232 pr_err("smd_core_init: "
3233 "enable_irq_wake failed for INT_WCNSS_A11\n");
3234
Eric Holmberg98c6c642012-02-24 11:29:35 -07003235 r = request_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler,
3236 flags, "smsm_dev", smsm_wcnss_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003237 if (r < 0) {
3238 free_irq(INT_A9_M2A_0, 0);
3239 free_irq(INT_A9_M2A_5, 0);
3240 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003241 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003242 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3243 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
3244 return r;
3245 }
3246
3247 r = enable_irq_wake(INT_WCNSS_A11_SMSM);
3248 if (r < 0)
3249 pr_err("smd_core_init: "
3250 "enable_irq_wake failed for INT_WCNSS_A11_SMSM\n");
3251#endif
3252
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003253#if defined(CONFIG_DSPS_SMSM)
Eric Holmberg98c6c642012-02-24 11:29:35 -07003254 r = request_irq(INT_DSPS_A11_SMSM, smsm_dsps_irq_handler,
3255 flags, "smsm_dev", smsm_dsps_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003256 if (r < 0) {
3257 free_irq(INT_A9_M2A_0, 0);
3258 free_irq(INT_A9_M2A_5, 0);
3259 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003260 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003261 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3262 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003263 free_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003264 return r;
3265 }
3266
3267 r = enable_irq_wake(INT_DSPS_A11_SMSM);
3268 if (r < 0)
3269 pr_err("smd_core_init: "
3270 "enable_irq_wake failed for INT_DSPS_A11_SMSM\n");
3271#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003272 SMD_INFO("smd_core_init() done\n");
3273
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003274 return 0;
3275}
3276
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303277static int intr_init(struct interrupt_config_item *private_irq,
3278 struct smd_irq_config *platform_irq,
3279 struct platform_device *pdev
3280 )
3281{
3282 int irq_id;
3283 int ret;
3284 int ret_wake;
3285
3286 private_irq->out_bit_pos = platform_irq->out_bit_pos;
3287 private_irq->out_offset = platform_irq->out_offset;
3288 private_irq->out_base = platform_irq->out_base;
3289
3290 irq_id = platform_get_irq_byname(
3291 pdev,
3292 platform_irq->irq_name
3293 );
3294 SMD_DBG("smd: %s: register irq: %s id: %d\n", __func__,
3295 platform_irq->irq_name, irq_id);
3296 ret = request_irq(irq_id,
3297 private_irq->irq_handler,
3298 platform_irq->flags,
3299 platform_irq->device_name,
3300 (void *)platform_irq->dev_id
3301 );
3302 if (ret < 0) {
3303 platform_irq->irq_id = ret;
Eric Holmbergdeace152012-07-25 12:17:11 -06003304 private_irq->irq_id = ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303305 } else {
3306 platform_irq->irq_id = irq_id;
Eric Holmbergdeace152012-07-25 12:17:11 -06003307 private_irq->irq_id = irq_id;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303308 ret_wake = enable_irq_wake(irq_id);
3309 if (ret_wake < 0) {
3310 pr_err("smd: enable_irq_wake failed on %s",
3311 platform_irq->irq_name);
3312 }
3313 }
3314
3315 return ret;
3316}
3317
Jeff Hugobdc734d2012-03-26 16:05:39 -06003318int sort_cmp_func(const void *a, const void *b)
3319{
3320 struct smem_area *left = (struct smem_area *)(a);
3321 struct smem_area *right = (struct smem_area *)(b);
3322
3323 return left->phys_addr - right->phys_addr;
3324}
3325
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303326int smd_core_platform_init(struct platform_device *pdev)
3327{
3328 int i;
3329 int ret;
3330 uint32_t num_ss;
3331 struct smd_platform *smd_platform_data;
3332 struct smd_subsystem_config *smd_ss_config_list;
3333 struct smd_subsystem_config *cfg;
3334 int err_ret = 0;
Jeff Hugobdc734d2012-03-26 16:05:39 -06003335 struct smd_smem_regions *smd_smem_areas;
3336 int smem_idx = 0;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303337
3338 smd_platform_data = pdev->dev.platform_data;
3339 num_ss = smd_platform_data->num_ss_configs;
3340 smd_ss_config_list = smd_platform_data->smd_ss_configs;
3341
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06003342 if (smd_platform_data->smd_ssr_config)
3343 disable_smsm_reset_handshake = smd_platform_data->
3344 smd_ssr_config->disable_smsm_reset_handshake;
3345
Jeff Hugobdc734d2012-03-26 16:05:39 -06003346 smd_smem_areas = smd_platform_data->smd_smem_areas;
3347 if (smd_smem_areas) {
3348 num_smem_areas = smd_platform_data->num_smem_areas;
3349 smem_areas = kmalloc(sizeof(struct smem_area) * num_smem_areas,
3350 GFP_KERNEL);
3351 if (!smem_areas) {
3352 pr_err("%s: smem_areas kmalloc failed\n", __func__);
3353 err_ret = -ENOMEM;
3354 goto smem_areas_alloc_fail;
3355 }
3356
3357 for (smem_idx = 0; smem_idx < num_smem_areas; ++smem_idx) {
3358 smem_areas[smem_idx].phys_addr =
3359 smd_smem_areas[smem_idx].phys_addr;
3360 smem_areas[smem_idx].size =
3361 smd_smem_areas[smem_idx].size;
3362 smem_areas[smem_idx].virt_addr = ioremap_nocache(
3363 (unsigned long)(smem_areas[smem_idx].phys_addr),
3364 smem_areas[smem_idx].size);
3365 if (!smem_areas[smem_idx].virt_addr) {
3366 pr_err("%s: ioremap_nocache() of addr:%p"
3367 " size: %x\n", __func__,
3368 smem_areas[smem_idx].phys_addr,
3369 smem_areas[smem_idx].size);
3370 err_ret = -ENOMEM;
3371 ++smem_idx;
3372 goto smem_failed;
3373 }
3374 }
3375 sort(smem_areas, num_smem_areas,
3376 sizeof(struct smem_area),
3377 sort_cmp_func, NULL);
3378 }
3379
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303380 for (i = 0; i < num_ss; i++) {
3381 cfg = &smd_ss_config_list[i];
3382
3383 ret = intr_init(
3384 &private_intr_config[cfg->irq_config_id].smd,
3385 &cfg->smd_int,
3386 pdev
3387 );
3388
3389 if (ret < 0) {
3390 err_ret = ret;
3391 pr_err("smd: register irq failed on %s\n",
3392 cfg->smd_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003393 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303394 }
3395
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003396 /* only init smsm structs if this edge supports smsm */
3397 if (cfg->smsm_int.irq_id)
3398 ret = intr_init(
3399 &private_intr_config[cfg->irq_config_id].smsm,
3400 &cfg->smsm_int,
3401 pdev
3402 );
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303403
3404 if (ret < 0) {
3405 err_ret = ret;
3406 pr_err("smd: register irq failed on %s\n",
3407 cfg->smsm_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003408 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303409 }
Eric Holmberg17992c12012-02-29 12:54:44 -07003410
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003411 if (cfg->subsys_name)
3412 strlcpy(edge_to_pids[cfg->edge].subsys_name,
Eric Holmberg17992c12012-02-29 12:54:44 -07003413 cfg->subsys_name, SMD_MAX_CH_NAME_LEN);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303414 }
3415
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303416
3417 SMD_INFO("smd_core_platform_init() done\n");
3418 return 0;
3419
Jeff Hugobdc734d2012-03-26 16:05:39 -06003420intr_failed:
3421 pr_err("smd: deregistering IRQs\n");
3422 for (i = 0; i < num_ss; ++i) {
3423 cfg = &smd_ss_config_list[i];
3424
3425 if (cfg->smd_int.irq_id >= 0)
3426 free_irq(cfg->smd_int.irq_id,
3427 (void *)cfg->smd_int.dev_id
3428 );
3429 if (cfg->smsm_int.irq_id >= 0)
3430 free_irq(cfg->smsm_int.irq_id,
3431 (void *)cfg->smsm_int.dev_id
3432 );
3433 }
3434smem_failed:
3435 for (smem_idx = smem_idx - 1; smem_idx >= 0; --smem_idx)
3436 iounmap(smem_areas[smem_idx].virt_addr);
3437 kfree(smem_areas);
3438smem_areas_alloc_fail:
3439 return err_ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303440}
3441
Gregory Bean4416e9e2010-07-28 10:22:12 -07003442static int __devinit msm_smd_probe(struct platform_device *pdev)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003443{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303444 int ret;
Daniel Walker0aec66d2010-03-18 12:31:08 -07003445
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303446 SMD_INFO("smd probe\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003447 INIT_WORK(&probe_work, smd_channel_probe_worker);
3448
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003449 channel_close_wq = create_singlethread_workqueue("smd_channel_close");
3450 if (IS_ERR(channel_close_wq)) {
3451 pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
3452 return -ENOMEM;
3453 }
3454
3455 if (smsm_init()) {
3456 pr_err("smsm_init() failed\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003457 return -1;
3458 }
3459
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303460 if (pdev) {
3461 if (pdev->dev.of_node) {
3462 pr_err("SMD: Device tree not currently supported\n");
3463 return -ENODEV;
3464 } else if (pdev->dev.platform_data) {
3465 ret = smd_core_platform_init(pdev);
3466 if (ret) {
3467 pr_err(
3468 "SMD: smd_core_platform_init() failed\n");
3469 return -ENODEV;
3470 }
3471 } else {
3472 ret = smd_core_init();
3473 if (ret) {
3474 pr_err("smd_core_init() failed\n");
3475 return -ENODEV;
3476 }
3477 }
3478 } else {
3479 pr_err("SMD: PDEV not found\n");
3480 return -ENODEV;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003481 }
Iliyan Malchev1207bab2009-11-15 18:16:43 -08003482
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003483 smd_initialized = 1;
3484
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003485 smd_alloc_loopback_channel();
Eric Holmbergc33d4ab2011-10-24 10:28:25 -06003486 smsm_irq_handler(0, 0);
3487 tasklet_schedule(&smd_fake_irq_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003488
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003489 return 0;
3490}
3491
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003492static int restart_notifier_cb(struct notifier_block *this,
3493 unsigned long code,
3494 void *data);
3495
3496static struct restart_notifier_block restart_notifiers[] = {
Eric Holmbergca7ead22011-12-01 17:21:15 -07003497 {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
3498 {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
3499 {SMD_WCNSS, "riva", .nb.notifier_call = restart_notifier_cb},
3500 {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
Eric Holmberg8b0e74f2012-02-08 09:56:17 -07003501 {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003502};
3503
3504static int restart_notifier_cb(struct notifier_block *this,
3505 unsigned long code,
3506 void *data)
3507{
3508 if (code == SUBSYS_AFTER_SHUTDOWN) {
3509 struct restart_notifier_block *notifier;
3510
3511 notifier = container_of(this,
3512 struct restart_notifier_block, nb);
3513 SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
3514 __func__, notifier->processor,
3515 notifier->name);
3516
3517 smd_channel_reset(notifier->processor);
3518 }
3519
3520 return NOTIFY_DONE;
3521}
3522
3523static __init int modem_restart_late_init(void)
3524{
3525 int i;
3526 void *handle;
3527 struct restart_notifier_block *nb;
3528
3529 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
3530 nb = &restart_notifiers[i];
3531 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
3532 SMD_DBG("%s: registering notif for '%s', handle=%p\n",
3533 __func__, nb->name, handle);
3534 }
3535 return 0;
3536}
3537late_initcall(modem_restart_late_init);
3538
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003539static struct platform_driver msm_smd_driver = {
3540 .probe = msm_smd_probe,
3541 .driver = {
3542 .name = MODULE_NAME,
3543 .owner = THIS_MODULE,
3544 },
3545};
3546
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003547int __init msm_smd_init(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003548{
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003549 static bool registered;
3550
3551 if (registered)
3552 return 0;
3553
3554 registered = true;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003555 return platform_driver_register(&msm_smd_driver);
3556}
3557
3558module_init(msm_smd_init);
3559
3560MODULE_DESCRIPTION("MSM Shared Memory Core");
3561MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
3562MODULE_LICENSE("GPL");