blob: 80d0a4ef48047abda1b912a86d1ab970f644393c [file] [log] [blame]
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001/* arch/arm/mach-msm/smd.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07004 * Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Brian Swetland2eb44eb2008-09-29 16:00:48 -07005 * Author: Brian Swetland <swetland@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/cdev.h>
22#include <linux/device.h>
23#include <linux/wait.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/list.h>
27#include <linux/slab.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070028#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/io.h>
30#include <linux/termios.h>
31#include <linux/ctype.h>
32#include <linux/remote_spinlock.h>
33#include <linux/uaccess.h>
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070034#include <linux/kfifo.h>
Eric Holmberg59a9f942012-03-19 10:04:22 -060035#include <linux/wakelock.h>
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -060036#include <linux/notifier.h>
Jeff Hugobdc734d2012-03-26 16:05:39 -060037#include <linux/sort.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070038#include <mach/msm_smd.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039#include <mach/msm_iomap.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070040#include <mach/system.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041#include <mach/subsystem_notif.h>
Angshuman Sarkaread67bd2011-09-21 20:13:12 +053042#include <mach/socinfo.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070043#include <mach/proc_comm.h>
Ram Somani8b9589f2012-04-03 12:07:18 +053044#include <asm/cacheflush.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070045
46#include "smd_private.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047#include "modem_notifier.h"
Brian Swetland2eb44eb2008-09-29 16:00:48 -070048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM8X60) \
Jeff Hugo56b933a2011-09-28 14:42:05 -060050 || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX) \
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060051 || defined(CONFIG_ARCH_MSM9615) || defined(CONFIG_ARCH_APQ8064)
Brian Swetland37521a32009-07-01 18:30:47 -070052#define CONFIG_QDSP6 1
53#endif
54
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060055#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) \
56 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057#define CONFIG_DSPS 1
58#endif
59
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060060#if defined(CONFIG_ARCH_MSM8960) \
61 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062#define CONFIG_WCNSS 1
Jeff Hugo6a8057c2011-08-16 13:47:12 -060063#define CONFIG_DSPS_SMSM 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -070065
66#define MODULE_NAME "msm_smd"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067#define SMEM_VERSION 0x000B
68#define SMD_VERSION 0x00020000
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070069#define SMSM_SNAPSHOT_CNT 64
Eric Holmbergda31d042012-03-28 14:01:02 -060070#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72uint32_t SMSM_NUM_ENTRIES = 8;
73uint32_t SMSM_NUM_HOSTS = 3;
Brian Swetland2eb44eb2008-09-29 16:00:48 -070074
Eric Holmberge8a39322012-04-03 15:14:02 -060075/* Legacy SMSM interrupt notifications */
76#define LEGACY_MODEM_SMSM_MASK (SMSM_RESET | SMSM_INIT | SMSM_SMDINIT \
77 | SMSM_RUN | SMSM_SYSTEM_DOWNLOAD)
Brian Swetland2eb44eb2008-09-29 16:00:48 -070078
79enum {
80 MSM_SMD_DEBUG = 1U << 0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081 MSM_SMSM_DEBUG = 1U << 1,
82 MSM_SMD_INFO = 1U << 2,
83 MSM_SMSM_INFO = 1U << 3,
Eric Holmberg98c6c642012-02-24 11:29:35 -070084 MSM_SMx_POWER_INFO = 1U << 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085};
86
87struct smsm_shared_info {
88 uint32_t *state;
89 uint32_t *intr_mask;
90 uint32_t *intr_mux;
91};
92
93static struct smsm_shared_info smsm_info;
Eric Holmberg59a9f942012-03-19 10:04:22 -060094static struct kfifo smsm_snapshot_fifo;
95static struct wake_lock smsm_snapshot_wakelock;
96static int smsm_snapshot_count;
97static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070098
99struct smsm_size_info_type {
100 uint32_t num_hosts;
101 uint32_t num_entries;
102 uint32_t reserved0;
103 uint32_t reserved1;
104};
105
106struct smsm_state_cb_info {
107 struct list_head cb_list;
108 uint32_t mask;
109 void *data;
110 void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
111};
112
113struct smsm_state_info {
114 struct list_head callbacks;
115 uint32_t last_value;
Eric Holmberge8a39322012-04-03 15:14:02 -0600116 uint32_t intr_mask_set;
117 uint32_t intr_mask_clear;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700118};
119
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530120struct interrupt_config_item {
121 /* must be initialized */
122 irqreturn_t (*irq_handler)(int req, void *data);
123 /* outgoing interrupt config (set from platform data) */
124 uint32_t out_bit_pos;
125 void __iomem *out_base;
126 uint32_t out_offset;
Eric Holmbergdeace152012-07-25 12:17:11 -0600127 int irq_id;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530128};
129
130struct interrupt_config {
131 struct interrupt_config_item smd;
132 struct interrupt_config_item smsm;
133};
134
135static irqreturn_t smd_modem_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700136static irqreturn_t smsm_modem_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530137static irqreturn_t smd_dsp_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700138static irqreturn_t smsm_dsp_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530139static irqreturn_t smd_dsps_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700140static irqreturn_t smsm_dsps_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530141static irqreturn_t smd_wcnss_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700142static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600143static irqreturn_t smd_rpm_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530144static irqreturn_t smsm_irq_handler(int irq, void *data);
145
146static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
147 [SMD_MODEM] = {
148 .smd.irq_handler = smd_modem_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700149 .smsm.irq_handler = smsm_modem_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530150 },
151 [SMD_Q6] = {
152 .smd.irq_handler = smd_dsp_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700153 .smsm.irq_handler = smsm_dsp_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530154 },
155 [SMD_DSPS] = {
156 .smd.irq_handler = smd_dsps_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700157 .smsm.irq_handler = smsm_dsps_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530158 },
159 [SMD_WCNSS] = {
160 .smd.irq_handler = smd_wcnss_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700161 .smsm.irq_handler = smsm_wcnss_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530162 },
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600163 [SMD_RPM] = {
164 .smd.irq_handler = smd_rpm_irq_handler,
165 .smsm.irq_handler = NULL, /* does not support smsm */
166 },
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530167};
Jeff Hugobdc734d2012-03-26 16:05:39 -0600168
169struct smem_area {
170 void *phys_addr;
171 unsigned size;
172 void __iomem *virt_addr;
173};
174static uint32_t num_smem_areas;
175static struct smem_area *smem_areas;
176static void *smem_range_check(void *base, unsigned offset);
177
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700178struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530179
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
181#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
182 entry * SMSM_NUM_HOSTS + host)
183#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
184
185/* Internal definitions which are not exported in some targets */
186enum {
187 SMSM_APPS_DEM_I = 3,
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700188};
189
190static int msm_smd_debug_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700191module_param_named(debug_mask, msm_smd_debug_mask,
192 int, S_IRUGO | S_IWUSR | S_IWGRP);
193
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194#if defined(CONFIG_MSM_SMD_DEBUG)
195#define SMD_DBG(x...) do { \
196 if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
197 printk(KERN_DEBUG x); \
198 } while (0)
199
200#define SMSM_DBG(x...) do { \
201 if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
202 printk(KERN_DEBUG x); \
203 } while (0)
204
205#define SMD_INFO(x...) do { \
206 if (msm_smd_debug_mask & MSM_SMD_INFO) \
207 printk(KERN_INFO x); \
208 } while (0)
209
210#define SMSM_INFO(x...) do { \
211 if (msm_smd_debug_mask & MSM_SMSM_INFO) \
212 printk(KERN_INFO x); \
213 } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700214#define SMx_POWER_INFO(x...) do { \
215 if (msm_smd_debug_mask & MSM_SMx_POWER_INFO) \
216 printk(KERN_INFO x); \
217 } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218#else
219#define SMD_DBG(x...) do { } while (0)
220#define SMSM_DBG(x...) do { } while (0)
221#define SMD_INFO(x...) do { } while (0)
222#define SMSM_INFO(x...) do { } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700223#define SMx_POWER_INFO(x...) do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224#endif
225
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700226static unsigned last_heap_free = 0xffffffff;
227
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228static inline void smd_write_intr(unsigned int val,
229 const void __iomem *addr);
230
231#if defined(CONFIG_ARCH_MSM7X30)
232#define MSM_TRIG_A2M_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530233 (smd_write_intr(1 << 0, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700234#define MSM_TRIG_A2Q6_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530235 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236#define MSM_TRIG_A2M_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530237 (smd_write_intr(1 << 5, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238#define MSM_TRIG_A2Q6_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530239 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600241#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700242#define MSM_TRIG_A2WCNSS_SMD_INT
243#define MSM_TRIG_A2WCNSS_SMSM_INT
244#elif defined(CONFIG_ARCH_MSM8X60)
245#define MSM_TRIG_A2M_SMD_INT \
246 (smd_write_intr(1 << 3, MSM_GCC_BASE + 0x8))
247#define MSM_TRIG_A2Q6_SMD_INT \
248 (smd_write_intr(1 << 15, MSM_GCC_BASE + 0x8))
249#define MSM_TRIG_A2M_SMSM_INT \
250 (smd_write_intr(1 << 4, MSM_GCC_BASE + 0x8))
251#define MSM_TRIG_A2Q6_SMSM_INT \
252 (smd_write_intr(1 << 14, MSM_GCC_BASE + 0x8))
253#define MSM_TRIG_A2DSPS_SMD_INT \
254 (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4080))
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600255#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256#define MSM_TRIG_A2WCNSS_SMD_INT
257#define MSM_TRIG_A2WCNSS_SMSM_INT
Jeff Hugo56b933a2011-09-28 14:42:05 -0600258#elif defined(CONFIG_ARCH_MSM9615)
259#define MSM_TRIG_A2M_SMD_INT \
260 (smd_write_intr(1 << 3, MSM_APCS_GCC_BASE + 0x8))
261#define MSM_TRIG_A2Q6_SMD_INT \
262 (smd_write_intr(1 << 15, MSM_APCS_GCC_BASE + 0x8))
263#define MSM_TRIG_A2M_SMSM_INT \
264 (smd_write_intr(1 << 4, MSM_APCS_GCC_BASE + 0x8))
265#define MSM_TRIG_A2Q6_SMSM_INT \
266 (smd_write_intr(1 << 14, MSM_APCS_GCC_BASE + 0x8))
267#define MSM_TRIG_A2DSPS_SMD_INT
268#define MSM_TRIG_A2DSPS_SMSM_INT
269#define MSM_TRIG_A2WCNSS_SMD_INT
270#define MSM_TRIG_A2WCNSS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271#elif defined(CONFIG_ARCH_FSM9XXX)
272#define MSM_TRIG_A2Q6_SMD_INT \
273 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
274#define MSM_TRIG_A2Q6_SMSM_INT \
275 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
276#define MSM_TRIG_A2M_SMD_INT \
277 (smd_write_intr(1 << 0, MSM_GCC_BASE + 0x8))
278#define MSM_TRIG_A2M_SMSM_INT \
279 (smd_write_intr(1 << 5, MSM_GCC_BASE + 0x8))
280#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600281#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700282#define MSM_TRIG_A2WCNSS_SMD_INT
283#define MSM_TRIG_A2WCNSS_SMSM_INT
Eric Holmberg73d45462012-02-28 10:41:31 -0700284#elif defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7x25)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285#define MSM_TRIG_A2M_SMD_INT \
286 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700287#define MSM_TRIG_A2Q6_SMD_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288#define MSM_TRIG_A2M_SMSM_INT \
289 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700290#define MSM_TRIG_A2Q6_SMSM_INT
291#define MSM_TRIG_A2DSPS_SMD_INT
292#define MSM_TRIG_A2DSPS_SMSM_INT
293#define MSM_TRIG_A2WCNSS_SMD_INT
294#define MSM_TRIG_A2WCNSS_SMSM_INT
295#elif defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM7X27A)
296#define MSM_TRIG_A2M_SMD_INT \
297 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
298#define MSM_TRIG_A2Q6_SMD_INT
299#define MSM_TRIG_A2M_SMSM_INT \
300 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
301#define MSM_TRIG_A2Q6_SMSM_INT
302#define MSM_TRIG_A2DSPS_SMD_INT
303#define MSM_TRIG_A2DSPS_SMSM_INT
304#define MSM_TRIG_A2WCNSS_SMD_INT
305#define MSM_TRIG_A2WCNSS_SMSM_INT
306#else /* use platform device / device tree configuration */
307#define MSM_TRIG_A2M_SMD_INT
308#define MSM_TRIG_A2Q6_SMD_INT
309#define MSM_TRIG_A2M_SMSM_INT
310#define MSM_TRIG_A2Q6_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700311#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600312#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700313#define MSM_TRIG_A2WCNSS_SMD_INT
314#define MSM_TRIG_A2WCNSS_SMSM_INT
Brian Swetland37521a32009-07-01 18:30:47 -0700315#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316
Jeff Hugoee40b152012-02-09 17:39:47 -0700317/*
318 * stub out legacy macros if they are not being used so that the legacy
319 * code compiles even though it is not used
320 *
321 * these definitions should not be used in active code and will cause
322 * an early failure
323 */
324#ifndef INT_A9_M2A_0
325#define INT_A9_M2A_0 -1
326#endif
327#ifndef INT_A9_M2A_5
328#define INT_A9_M2A_5 -1
329#endif
330#ifndef INT_ADSP_A11
331#define INT_ADSP_A11 -1
332#endif
333#ifndef INT_ADSP_A11_SMSM
334#define INT_ADSP_A11_SMSM -1
335#endif
336#ifndef INT_DSPS_A11
337#define INT_DSPS_A11 -1
338#endif
339#ifndef INT_DSPS_A11_SMSM
340#define INT_DSPS_A11_SMSM -1
341#endif
342#ifndef INT_WCNSS_A11
343#define INT_WCNSS_A11 -1
344#endif
345#ifndef INT_WCNSS_A11_SMSM
346#define INT_WCNSS_A11_SMSM -1
347#endif
348
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349#define SMD_LOOPBACK_CID 100
350
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600351#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
352static remote_spinlock_t remote_spinlock;
353
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700354static LIST_HEAD(smd_ch_list_loopback);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700355static void smd_fake_irq_handler(unsigned long arg);
Eric Holmbergda31d042012-03-28 14:01:02 -0600356static void smsm_cb_snapshot(uint32_t use_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700357
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -0600358static struct workqueue_struct *smsm_cb_wq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700359static void notify_smsm_cb_clients_worker(struct work_struct *work);
360static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
Eric Holmbergc8002902011-09-16 13:55:57 -0600361static DEFINE_MUTEX(smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362static struct smsm_state_info *smsm_states;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +0530363static int spinlocks_initialized;
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -0600364static RAW_NOTIFIER_HEAD(smsm_driver_state_notifier_list);
365static DEFINE_MUTEX(smsm_driver_state_notifier_lock);
366static void smsm_driver_state_notify(uint32_t state, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367
368static inline void smd_write_intr(unsigned int val,
369 const void __iomem *addr)
370{
371 wmb();
372 __raw_writel(val, addr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700373}
374
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700375#ifdef CONFIG_WCNSS
376static inline void wakeup_v1_riva(void)
377{
378 /*
379 * workaround hack for RIVA v1 hardware bug
380 * trigger GPIO 40 to wake up RIVA from power collaspe
381 * not to be sent to customers
382 */
Jeff Hugoafb8c4a2011-10-27 15:57:27 -0600383 if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) == 1) {
384 __raw_writel(0x0, MSM_TLMM_BASE + 0x1284);
385 __raw_writel(0x2, MSM_TLMM_BASE + 0x1284);
386 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700387 /* end workaround */
388}
389#else
390static inline void wakeup_v1_riva(void) {}
391#endif
392
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700393static inline void notify_modem_smd(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700394{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530395 static const struct interrupt_config_item *intr
396 = &private_intr_config[SMD_MODEM].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700397 if (intr->out_base) {
398 ++interrupt_stats[SMD_MODEM].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530399 smd_write_intr(intr->out_bit_pos,
400 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700401 } else {
402 ++interrupt_stats[SMD_MODEM].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530403 MSM_TRIG_A2M_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700404 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700405}
406
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700407static inline void notify_dsp_smd(void)
408{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530409 static const struct interrupt_config_item *intr
410 = &private_intr_config[SMD_Q6].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700411 if (intr->out_base) {
412 ++interrupt_stats[SMD_Q6].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530413 smd_write_intr(intr->out_bit_pos,
414 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700415 } else {
416 ++interrupt_stats[SMD_Q6].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530417 MSM_TRIG_A2Q6_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700418 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700419}
420
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530421static inline void notify_dsps_smd(void)
422{
423 static const struct interrupt_config_item *intr
424 = &private_intr_config[SMD_DSPS].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700425 if (intr->out_base) {
426 ++interrupt_stats[SMD_DSPS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530427 smd_write_intr(intr->out_bit_pos,
428 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700429 } else {
430 ++interrupt_stats[SMD_DSPS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530431 MSM_TRIG_A2DSPS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700432 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530433}
434
435static inline void notify_wcnss_smd(void)
436{
437 static const struct interrupt_config_item *intr
438 = &private_intr_config[SMD_WCNSS].smd;
439 wakeup_v1_riva();
440
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700441 if (intr->out_base) {
442 ++interrupt_stats[SMD_WCNSS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530443 smd_write_intr(intr->out_bit_pos,
444 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700445 } else {
446 ++interrupt_stats[SMD_WCNSS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530447 MSM_TRIG_A2WCNSS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700448 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530449}
450
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600451static inline void notify_rpm_smd(void)
452{
453 static const struct interrupt_config_item *intr
454 = &private_intr_config[SMD_RPM].smd;
455
456 if (intr->out_base) {
457 ++interrupt_stats[SMD_RPM].smd_out_config_count;
458 smd_write_intr(intr->out_bit_pos,
459 intr->out_base + intr->out_offset);
460 }
461}
462
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530463static inline void notify_modem_smsm(void)
464{
465 static const struct interrupt_config_item *intr
466 = &private_intr_config[SMD_MODEM].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700467 if (intr->out_base) {
468 ++interrupt_stats[SMD_MODEM].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530469 smd_write_intr(intr->out_bit_pos,
470 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700471 } else {
472 ++interrupt_stats[SMD_MODEM].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530473 MSM_TRIG_A2M_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700474 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530475}
476
477static inline void notify_dsp_smsm(void)
478{
479 static const struct interrupt_config_item *intr
480 = &private_intr_config[SMD_Q6].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700481 if (intr->out_base) {
482 ++interrupt_stats[SMD_Q6].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530483 smd_write_intr(intr->out_bit_pos,
484 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700485 } else {
486 ++interrupt_stats[SMD_Q6].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530487 MSM_TRIG_A2Q6_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700488 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530489}
490
491static inline void notify_dsps_smsm(void)
492{
493 static const struct interrupt_config_item *intr
494 = &private_intr_config[SMD_DSPS].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700495 if (intr->out_base) {
496 ++interrupt_stats[SMD_DSPS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530497 smd_write_intr(intr->out_bit_pos,
498 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700499 } else {
500 ++interrupt_stats[SMD_DSPS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530501 MSM_TRIG_A2DSPS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700502 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530503}
504
505static inline void notify_wcnss_smsm(void)
506{
507 static const struct interrupt_config_item *intr
508 = &private_intr_config[SMD_WCNSS].smsm;
509 wakeup_v1_riva();
510
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700511 if (intr->out_base) {
512 ++interrupt_stats[SMD_WCNSS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530513 smd_write_intr(intr->out_bit_pos,
514 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700515 } else {
516 ++interrupt_stats[SMD_WCNSS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530517 MSM_TRIG_A2WCNSS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700518 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530519}
520
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700521static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
522{
523 /* older protocol don't use smsm_intr_mask,
524 but still communicates with modem */
525 if (!smsm_info.intr_mask ||
526 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
527 & notify_mask))
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530528 notify_modem_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529
530 if (smsm_info.intr_mask &&
531 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
532 & notify_mask)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700533 uint32_t mux_val;
534
Eric Holmberg6282c5d2011-10-27 17:30:57 -0600535 if (cpu_is_qsd8x50() && smsm_info.intr_mux) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536 mux_val = __raw_readl(
537 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
538 mux_val++;
539 __raw_writel(mux_val,
540 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
541 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530542 notify_dsp_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700543 }
544
545 if (smsm_info.intr_mask &&
546 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
547 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530548 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700549 }
550
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600551 if (smsm_info.intr_mask &&
552 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
553 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530554 notify_dsps_smsm();
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600555 }
556
Eric Holmbergda31d042012-03-28 14:01:02 -0600557 /*
558 * Notify local SMSM callback clients without wakelock since this
559 * code is used by power management during power-down/-up sequencing
560 * on DEM-based targets. Grabbing a wakelock in this case will
561 * abort the power-down sequencing.
562 */
Eric Holmberg51676a12012-07-10 18:45:23 -0600563 if (smsm_info.intr_mask &&
564 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS))
565 & notify_mask)) {
566 smsm_cb_snapshot(0);
567 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700568}
569
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700570void smd_diag(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700571{
572 char *x;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700573 int size;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700574
575 x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
576 if (x != 0) {
577 x[SZ_DIAG_ERR_MSG - 1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700578 SMD_INFO("smem: DIAG '%s'\n", x);
579 }
580
581 x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size);
582 if (x != 0) {
583 x[size - 1] = 0;
584 pr_err("smem: CRASH LOG\n'%s'\n", x);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700585 }
586}
587
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700588
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700589static void handle_modem_crash(void)
590{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700591 pr_err("MODEM/AMSS has CRASHED\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700592 smd_diag();
593
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700594 /* hard reboot if possible FIXME
595 if (msm_reset_hook)
596 msm_reset_hook();
597 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700598
599 /* in this case the modem or watchdog should reboot us */
600 for (;;)
601 ;
602}
603
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700604int smsm_check_for_modem_crash(void)
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700605{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700606 /* if the modem's not ready yet, we have to hope for the best */
607 if (!smsm_info.state)
608 return 0;
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700609
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700610 if (__raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)) & SMSM_RESET) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700611 handle_modem_crash();
612 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700613 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700614 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700615}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616EXPORT_SYMBOL(smsm_check_for_modem_crash);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700617
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700618/* the spinlock is used to synchronize between the
Brian Swetland03e00cd2009-07-01 17:58:37 -0700619 * irq handler and code that mutates the channel
620 * list or fiddles with channel state
621 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700622static DEFINE_SPINLOCK(smd_lock);
Brian Swetland03e00cd2009-07-01 17:58:37 -0700623DEFINE_SPINLOCK(smem_lock);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700624
625/* the mutex is used during open() and close()
Brian Swetland03e00cd2009-07-01 17:58:37 -0700626 * operations to avoid races while creating or
627 * destroying smd_channel structures
628 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700629static DEFINE_MUTEX(smd_creation_mutex);
630
631static int smd_initialized;
632
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700633struct smd_shared_v1 {
634 struct smd_half_channel ch0;
635 unsigned char data0[SMD_BUF_SIZE];
636 struct smd_half_channel ch1;
637 unsigned char data1[SMD_BUF_SIZE];
638};
639
640struct smd_shared_v2 {
641 struct smd_half_channel ch0;
642 struct smd_half_channel ch1;
643};
644
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600645struct smd_shared_v2_word_access {
646 struct smd_half_channel_word_access ch0;
647 struct smd_half_channel_word_access ch1;
648};
649
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700650struct smd_channel {
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600651 volatile void *send; /* some variant of smd_half_channel */
652 volatile void *recv; /* some variant of smd_half_channel */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700653 unsigned char *send_data;
654 unsigned char *recv_data;
655 unsigned fifo_size;
656 unsigned fifo_mask;
657 struct list_head ch_list;
658
659 unsigned current_packet;
660 unsigned n;
661 void *priv;
662 void (*notify)(void *priv, unsigned flags);
663
664 int (*read)(smd_channel_t *ch, void *data, int len, int user_buf);
665 int (*write)(smd_channel_t *ch, const void *data, int len,
666 int user_buf);
667 int (*read_avail)(smd_channel_t *ch);
668 int (*write_avail)(smd_channel_t *ch);
669 int (*read_from_cb)(smd_channel_t *ch, void *data, int len,
670 int user_buf);
671
672 void (*update_state)(smd_channel_t *ch);
673 unsigned last_state;
674 void (*notify_other_cpu)(void);
675
676 char name[20];
677 struct platform_device pdev;
678 unsigned type;
679
680 int pending_pkt_sz;
681
682 char is_pkt_ch;
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600683
684 /*
685 * private internal functions to access *send and *recv.
686 * never to be exported outside of smd
687 */
688 struct smd_half_channel_access *half_ch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700689};
690
691struct edge_to_pid {
692 uint32_t local_pid;
693 uint32_t remote_pid;
Eric Holmberg17992c12012-02-29 12:54:44 -0700694 char subsys_name[SMD_MAX_CH_NAME_LEN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700695};
696
697/**
698 * Maps edge type to local and remote processor ID's.
699 */
700static struct edge_to_pid edge_to_pids[] = {
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700701 [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
702 [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "q6"},
703 [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
704 [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
705 [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
706 [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
707 [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
708 [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
709 [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
710 [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
711 [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
712 [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
713 [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
714 [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
715 [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600716 [SMD_APPS_RPM] = {SMD_APPS, SMD_RPM},
717 [SMD_MODEM_RPM] = {SMD_MODEM, SMD_RPM},
718 [SMD_QDSP_RPM] = {SMD_Q6, SMD_RPM},
719 [SMD_WCNSS_RPM] = {SMD_WCNSS, SMD_RPM},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700720};
721
722struct restart_notifier_block {
723 unsigned processor;
724 char *name;
725 struct notifier_block nb;
726};
727
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -0600728static int disable_smsm_reset_handshake;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700729static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
730
731static LIST_HEAD(smd_ch_closed_list);
732static LIST_HEAD(smd_ch_closing_list);
733static LIST_HEAD(smd_ch_to_close_list);
734static LIST_HEAD(smd_ch_list_modem);
735static LIST_HEAD(smd_ch_list_dsp);
736static LIST_HEAD(smd_ch_list_dsps);
737static LIST_HEAD(smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600738static LIST_HEAD(smd_ch_list_rpm);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700739
740static unsigned char smd_ch_allocated[64];
741static struct work_struct probe_work;
742
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700743static void finalize_channel_close_fn(struct work_struct *work);
744static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
745static struct workqueue_struct *channel_close_wq;
746
747static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm);
748
749/* on smp systems, the probe might get called from multiple cores,
750 hence use a lock */
751static DEFINE_MUTEX(smd_probe_lock);
752
753static void smd_channel_probe_worker(struct work_struct *work)
754{
755 struct smd_alloc_elm *shared;
756 unsigned n;
757 uint32_t type;
758
759 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
760
761 if (!shared) {
762 pr_err("%s: allocation table not initialized\n", __func__);
763 return;
764 }
765
766 mutex_lock(&smd_probe_lock);
767 for (n = 0; n < 64; n++) {
768 if (smd_ch_allocated[n])
769 continue;
770
771 /* channel should be allocated only if APPS
772 processor is involved */
773 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600774 if (type >= ARRAY_SIZE(edge_to_pids) ||
775 edge_to_pids[type].local_pid != SMD_APPS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700776 continue;
777 if (!shared[n].ref_count)
778 continue;
779 if (!shared[n].name[0])
780 continue;
781
782 if (!smd_alloc_channel(&shared[n]))
783 smd_ch_allocated[n] = 1;
784 else
785 SMD_INFO("Probe skipping ch %d, not allocated\n", n);
786 }
787 mutex_unlock(&smd_probe_lock);
788}
789
790/**
791 * Lookup processor ID and determine if it belongs to the proved edge
792 * type.
793 *
794 * @shared2: Pointer to v2 shared channel structure
795 * @type: Edge type
796 * @pid: Processor ID of processor on edge
797 * @local_ch: Channel that belongs to processor @pid
798 * @remote_ch: Other side of edge contained @pid
Jeff Hugo70a7e562012-09-07 11:24:32 -0600799 * @is_word_access_ch: Bool, is this a word aligned access channel
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700800 *
801 * Returns 0 for not on edge, 1 for found on edge
802 */
Jeff Hugo70a7e562012-09-07 11:24:32 -0600803static int pid_is_on_edge(void *shared2,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700804 uint32_t type, uint32_t pid,
Jeff Hugo70a7e562012-09-07 11:24:32 -0600805 void **local_ch,
806 void **remote_ch,
807 int is_word_access_ch
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700808 )
809{
810 int ret = 0;
811 struct edge_to_pid *edge;
Jeff Hugo70a7e562012-09-07 11:24:32 -0600812 void *ch0;
813 void *ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700814
815 *local_ch = 0;
816 *remote_ch = 0;
817
818 if (!shared2 || (type >= ARRAY_SIZE(edge_to_pids)))
819 return 0;
820
Jeff Hugo70a7e562012-09-07 11:24:32 -0600821 if (is_word_access_ch) {
822 ch0 = &((struct smd_shared_v2_word_access *)(shared2))->ch0;
823 ch1 = &((struct smd_shared_v2_word_access *)(shared2))->ch1;
824 } else {
825 ch0 = &((struct smd_shared_v2 *)(shared2))->ch0;
826 ch1 = &((struct smd_shared_v2 *)(shared2))->ch1;
827 }
828
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700829 edge = &edge_to_pids[type];
830 if (edge->local_pid != edge->remote_pid) {
831 if (pid == edge->local_pid) {
Jeff Hugo70a7e562012-09-07 11:24:32 -0600832 *local_ch = ch0;
833 *remote_ch = ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700834 ret = 1;
835 } else if (pid == edge->remote_pid) {
Jeff Hugo70a7e562012-09-07 11:24:32 -0600836 *local_ch = ch1;
837 *remote_ch = ch0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700838 ret = 1;
839 }
840 }
841
842 return ret;
843}
844
Eric Holmberg17992c12012-02-29 12:54:44 -0700845/*
846 * Returns a pointer to the subsystem name or NULL if no
847 * subsystem name is available.
848 *
849 * @type - Edge definition
850 */
851const char *smd_edge_to_subsystem(uint32_t type)
852{
853 const char *subsys = NULL;
854
855 if (type < ARRAY_SIZE(edge_to_pids)) {
856 subsys = edge_to_pids[type].subsys_name;
857 if (subsys[0] == 0x0)
858 subsys = NULL;
859 }
860 return subsys;
861}
862EXPORT_SYMBOL(smd_edge_to_subsystem);
863
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700864/*
865 * Returns a pointer to the subsystem name given the
866 * remote processor ID.
867 *
868 * @pid Remote processor ID
869 * @returns Pointer to subsystem name or NULL if not found
870 */
871const char *smd_pid_to_subsystem(uint32_t pid)
872{
873 const char *subsys = NULL;
874 int i;
875
876 for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
877 if (pid == edge_to_pids[i].remote_pid &&
878 edge_to_pids[i].subsys_name[0] != 0x0
879 ) {
880 subsys = edge_to_pids[i].subsys_name;
881 break;
882 }
883 }
884
885 return subsys;
886}
887EXPORT_SYMBOL(smd_pid_to_subsystem);
Eric Holmberg17992c12012-02-29 12:54:44 -0700888
Jeff Hugo70a7e562012-09-07 11:24:32 -0600889static void smd_reset_edge(void *void_ch, unsigned new_state,
890 int is_word_access_ch)
Eric Holmberg2a563c32011-10-05 14:51:43 -0600891{
Jeff Hugo70a7e562012-09-07 11:24:32 -0600892 if (is_word_access_ch) {
893 struct smd_half_channel_word_access *ch =
894 (struct smd_half_channel_word_access *)(void_ch);
895 if (ch->state != SMD_SS_CLOSED) {
896 ch->state = new_state;
897 ch->fDSR = 0;
898 ch->fCTS = 0;
899 ch->fCD = 0;
900 ch->fSTATE = 1;
901 }
902 } else {
903 struct smd_half_channel *ch =
904 (struct smd_half_channel *)(void_ch);
905 if (ch->state != SMD_SS_CLOSED) {
906 ch->state = new_state;
907 ch->fDSR = 0;
908 ch->fCTS = 0;
909 ch->fCD = 0;
910 ch->fSTATE = 1;
911 }
Eric Holmberg2a563c32011-10-05 14:51:43 -0600912 }
913}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700914
915static void smd_channel_reset_state(struct smd_alloc_elm *shared,
916 unsigned new_state, unsigned pid)
917{
918 unsigned n;
Jeff Hugo70a7e562012-09-07 11:24:32 -0600919 void *shared2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700920 uint32_t type;
Jeff Hugo70a7e562012-09-07 11:24:32 -0600921 void *local_ch;
922 void *remote_ch;
923 int is_word_access;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700924
925 for (n = 0; n < SMD_CHANNELS; n++) {
926 if (!shared[n].ref_count)
927 continue;
928 if (!shared[n].name[0])
929 continue;
930
931 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo70a7e562012-09-07 11:24:32 -0600932 is_word_access = is_word_access_ch(type);
933 if (is_word_access)
934 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n,
935 sizeof(struct smd_shared_v2_word_access));
936 else
937 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n,
938 sizeof(struct smd_shared_v2));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700939 if (!shared2)
940 continue;
941
Jeff Hugo70a7e562012-09-07 11:24:32 -0600942 if (pid_is_on_edge(shared2, type, pid, &local_ch, &remote_ch,
943 is_word_access))
944 smd_reset_edge(local_ch, new_state, is_word_access);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700945
Eric Holmberg2a563c32011-10-05 14:51:43 -0600946 /*
947 * ModemFW is in the same subsystem as ModemSW, but has
948 * separate SMD edges that need to be reset.
949 */
950 if (pid == SMSM_MODEM &&
951 pid_is_on_edge(shared2, type, SMD_MODEM_Q6_FW,
Jeff Hugo70a7e562012-09-07 11:24:32 -0600952 &local_ch, &remote_ch, is_word_access))
953 smd_reset_edge(local_ch, new_state, is_word_access);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700954 }
955}
956
957
958void smd_channel_reset(uint32_t restart_pid)
959{
960 struct smd_alloc_elm *shared;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700961 unsigned long flags;
962
963 SMD_DBG("%s: starting reset\n", __func__);
Eric Holmberg957f5b52012-09-07 13:54:31 -0600964
965 /* release any held spinlocks */
966 remote_spin_release(&remote_spinlock, restart_pid);
967 remote_spin_release_all(restart_pid);
968
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700969 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
970 if (!shared) {
971 pr_err("%s: allocation table not initialized\n", __func__);
972 return;
973 }
974
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700975 /* reset SMSM entry */
976 if (smsm_info.state) {
977 writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
978
Eric Holmberg351a63c2011-12-02 17:49:43 -0700979 /* restart SMSM init handshake */
980 if (restart_pid == SMSM_MODEM) {
981 smsm_change_state(SMSM_APPS_STATE,
Eric Holmberg6b2f80e2012-01-09 12:22:52 -0700982 SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
983 0);
Eric Holmberg351a63c2011-12-02 17:49:43 -0700984 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700985
986 /* notify SMSM processors */
987 smsm_irq_handler(0, 0);
Eric Holmbergb7726442012-03-01 15:31:56 -0700988 notify_modem_smsm();
989 notify_dsp_smsm();
990 notify_dsps_smsm();
991 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700992 }
993
994 /* change all remote states to CLOSING */
995 mutex_lock(&smd_probe_lock);
996 spin_lock_irqsave(&smd_lock, flags);
997 smd_channel_reset_state(shared, SMD_SS_CLOSING, restart_pid);
998 spin_unlock_irqrestore(&smd_lock, flags);
999 mutex_unlock(&smd_probe_lock);
1000
1001 /* notify SMD processors */
1002 mb();
1003 smd_fake_irq_handler(0);
1004 notify_modem_smd();
1005 notify_dsp_smd();
1006 notify_dsps_smd();
1007 notify_wcnss_smd();
1008
1009 /* change all remote states to CLOSED */
1010 mutex_lock(&smd_probe_lock);
1011 spin_lock_irqsave(&smd_lock, flags);
1012 smd_channel_reset_state(shared, SMD_SS_CLOSED, restart_pid);
1013 spin_unlock_irqrestore(&smd_lock, flags);
1014 mutex_unlock(&smd_probe_lock);
1015
1016 /* notify SMD processors */
1017 mb();
1018 smd_fake_irq_handler(0);
1019 notify_modem_smd();
1020 notify_dsp_smd();
1021 notify_dsps_smd();
1022 notify_wcnss_smd();
1023
1024 SMD_DBG("%s: finished reset\n", __func__);
1025}
1026
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001027/* how many bytes are available for reading */
1028static int smd_stream_read_avail(struct smd_channel *ch)
1029{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001030 return (ch->half_ch->get_head(ch->recv) -
1031 ch->half_ch->get_tail(ch->recv)) & ch->fifo_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001032}
1033
1034/* how many bytes we are free to write */
1035static int smd_stream_write_avail(struct smd_channel *ch)
1036{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001037 return ch->fifo_mask - ((ch->half_ch->get_head(ch->send) -
1038 ch->half_ch->get_tail(ch->send)) & ch->fifo_mask);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001039}
1040
1041static int smd_packet_read_avail(struct smd_channel *ch)
1042{
1043 if (ch->current_packet) {
1044 int n = smd_stream_read_avail(ch);
1045 if (n > ch->current_packet)
1046 n = ch->current_packet;
1047 return n;
1048 } else {
1049 return 0;
1050 }
1051}
1052
1053static int smd_packet_write_avail(struct smd_channel *ch)
1054{
1055 int n = smd_stream_write_avail(ch);
1056 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
1057}
1058
1059static int ch_is_open(struct smd_channel *ch)
1060{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001061 return (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED ||
1062 ch->half_ch->get_state(ch->recv) == SMD_SS_FLUSHING)
1063 && (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001064}
1065
1066/* provide a pointer and length to readable data in the fifo */
1067static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
1068{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001069 unsigned head = ch->half_ch->get_head(ch->recv);
1070 unsigned tail = ch->half_ch->get_tail(ch->recv);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001071 *ptr = (void *) (ch->recv_data + tail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001072
1073 if (tail <= head)
1074 return head - tail;
1075 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001076 return ch->fifo_size - tail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001077}
1078
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001079static int read_intr_blocked(struct smd_channel *ch)
1080{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001081 return ch->half_ch->get_fBLOCKREADINTR(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001082}
1083
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001084/* advance the fifo read pointer after data from ch_read_buffer is consumed */
1085static void ch_read_done(struct smd_channel *ch, unsigned count)
1086{
1087 BUG_ON(count > smd_stream_read_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001088 ch->half_ch->set_tail(ch->recv,
1089 (ch->half_ch->get_tail(ch->recv) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001090 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001091 ch->half_ch->set_fTAIL(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001092}
1093
1094/* basic read interface to ch_read_{buffer,done} used
Brian Swetland03e00cd2009-07-01 17:58:37 -07001095 * by smd_*_read() and update_packet_state()
1096 * will read-and-discard if the _data pointer is null
1097 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001098static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001099{
1100 void *ptr;
1101 unsigned n;
1102 unsigned char *data = _data;
1103 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001104 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001105
1106 while (len > 0) {
1107 n = ch_read_buffer(ch, &ptr);
1108 if (n == 0)
1109 break;
1110
1111 if (n > len)
1112 n = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001113 if (_data) {
1114 if (user_buf) {
1115 r = copy_to_user(data, ptr, n);
1116 if (r > 0) {
1117 pr_err("%s: "
1118 "copy_to_user could not copy "
1119 "%i bytes.\n",
1120 __func__,
1121 r);
1122 }
1123 } else
1124 memcpy(data, ptr, n);
1125 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001126
1127 data += n;
1128 len -= n;
1129 ch_read_done(ch, n);
1130 }
1131
1132 return orig_len - len;
1133}
1134
1135static void update_stream_state(struct smd_channel *ch)
1136{
1137 /* streams have no special state requiring updating */
1138}
1139
1140static void update_packet_state(struct smd_channel *ch)
1141{
1142 unsigned hdr[5];
1143 int r;
1144
1145 /* can't do anything if we're in the middle of a packet */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001146 while (ch->current_packet == 0) {
1147 /* discard 0 length packets if any */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001148
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001149 /* don't bother unless we can get the full header */
1150 if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
1151 return;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001152
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001153 r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
1154 BUG_ON(r != SMD_HEADER_SIZE);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001155
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001156 ch->current_packet = hdr[0];
1157 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001158}
1159
1160/* provide a pointer and length to next free space in the fifo */
1161static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
1162{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001163 unsigned head = ch->half_ch->get_head(ch->send);
1164 unsigned tail = ch->half_ch->get_tail(ch->send);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001165 *ptr = (void *) (ch->send_data + head);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001166
1167 if (head < tail) {
1168 return tail - head - 1;
1169 } else {
1170 if (tail == 0)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001171 return ch->fifo_size - head - 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001172 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001173 return ch->fifo_size - head;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001174 }
1175}
1176
1177/* advace the fifo write pointer after freespace
1178 * from ch_write_buffer is filled
1179 */
1180static void ch_write_done(struct smd_channel *ch, unsigned count)
1181{
1182 BUG_ON(count > smd_stream_write_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001183 ch->half_ch->set_head(ch->send,
1184 (ch->half_ch->get_head(ch->send) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001185 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001186 ch->half_ch->set_fHEAD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001187}
1188
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001189static void ch_set_state(struct smd_channel *ch, unsigned n)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001190{
1191 if (n == SMD_SS_OPENED) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001192 ch->half_ch->set_fDSR(ch->send, 1);
1193 ch->half_ch->set_fCTS(ch->send, 1);
1194 ch->half_ch->set_fCD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001195 } else {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001196 ch->half_ch->set_fDSR(ch->send, 0);
1197 ch->half_ch->set_fCTS(ch->send, 0);
1198 ch->half_ch->set_fCD(ch->send, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001199 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001200 ch->half_ch->set_state(ch->send, n);
1201 ch->half_ch->set_fSTATE(ch->send, 1);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001202 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001203}
1204
1205static void do_smd_probe(void)
1206{
1207 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
1208 if (shared->heap_info.free_offset != last_heap_free) {
1209 last_heap_free = shared->heap_info.free_offset;
1210 schedule_work(&probe_work);
1211 }
1212}
1213
1214static void smd_state_change(struct smd_channel *ch,
1215 unsigned last, unsigned next)
1216{
1217 ch->last_state = next;
1218
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001219 SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001220
1221 switch (next) {
1222 case SMD_SS_OPENING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001223 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSING ||
1224 ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
1225 ch->half_ch->set_tail(ch->recv, 0);
1226 ch->half_ch->set_head(ch->send, 0);
1227 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001228 ch_set_state(ch, SMD_SS_OPENING);
1229 }
1230 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001231 case SMD_SS_OPENED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001232 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENING) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001233 ch_set_state(ch, SMD_SS_OPENED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001234 ch->notify(ch->priv, SMD_EVENT_OPEN);
1235 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001236 break;
1237 case SMD_SS_FLUSHING:
1238 case SMD_SS_RESET:
1239 /* we should force them to close? */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001240 break;
1241 case SMD_SS_CLOSED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001242 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001243 ch_set_state(ch, SMD_SS_CLOSING);
1244 ch->current_packet = 0;
Eric Holmbergad4fa8d2011-11-11 16:55:13 -07001245 ch->pending_pkt_sz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001246 ch->notify(ch->priv, SMD_EVENT_CLOSE);
1247 }
1248 break;
1249 case SMD_SS_CLOSING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001250 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001251 list_move(&ch->ch_list,
1252 &smd_ch_to_close_list);
1253 queue_work(channel_close_wq,
1254 &finalize_channel_close_work);
1255 }
1256 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001257 }
1258}
1259
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001260static void handle_smd_irq_closing_list(void)
1261{
1262 unsigned long flags;
1263 struct smd_channel *ch;
1264 struct smd_channel *index;
1265 unsigned tmp;
1266
1267 spin_lock_irqsave(&smd_lock, flags);
1268 list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001269 if (ch->half_ch->get_fSTATE(ch->recv))
1270 ch->half_ch->set_fSTATE(ch->recv, 0);
1271 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001272 if (tmp != ch->last_state)
1273 smd_state_change(ch, ch->last_state, tmp);
1274 }
1275 spin_unlock_irqrestore(&smd_lock, flags);
1276}
1277
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001278static void handle_smd_irq(struct list_head *list, void (*notify)(void))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001279{
1280 unsigned long flags;
1281 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001282 unsigned ch_flags;
1283 unsigned tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001284 unsigned char state_change;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001285
1286 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001287 list_for_each_entry(ch, list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001288 state_change = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001289 ch_flags = 0;
1290 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001291 if (ch->half_ch->get_fHEAD(ch->recv)) {
1292 ch->half_ch->set_fHEAD(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001293 ch_flags |= 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001294 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001295 if (ch->half_ch->get_fTAIL(ch->recv)) {
1296 ch->half_ch->set_fTAIL(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001297 ch_flags |= 2;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001298 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001299 if (ch->half_ch->get_fSTATE(ch->recv)) {
1300 ch->half_ch->set_fSTATE(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001301 ch_flags |= 4;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001302 }
1303 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001304 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001305 if (tmp != ch->last_state) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07001306 SMx_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
1307 ch->n, ch->name, ch->last_state, tmp);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001308 smd_state_change(ch, ch->last_state, tmp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001309 state_change = 1;
1310 }
Eric Holmberg65a7d432012-02-24 11:28:56 -07001311 if (ch_flags & 0x3) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001312 ch->update_state(ch);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001313 SMx_POWER_INFO("SMD ch%d '%s' Data event r%d/w%d\n",
1314 ch->n, ch->name,
1315 ch->read_avail(ch),
1316 ch->fifo_size - ch->write_avail(ch));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001317 ch->notify(ch->priv, SMD_EVENT_DATA);
1318 }
Eric Holmberg98c6c642012-02-24 11:29:35 -07001319 if (ch_flags & 0x4 && !state_change) {
1320 SMx_POWER_INFO("SMD ch%d '%s' State update\n",
1321 ch->n, ch->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001322 ch->notify(ch->priv, SMD_EVENT_STATUS);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001323 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001324 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001325 spin_unlock_irqrestore(&smd_lock, flags);
1326 do_smd_probe();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001327}
1328
Brian Swetland37521a32009-07-01 18:30:47 -07001329static irqreturn_t smd_modem_irq_handler(int irq, void *data)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001330{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001331 SMx_POWER_INFO("SMD Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001332 ++interrupt_stats[SMD_MODEM].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001333 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001334 handle_smd_irq_closing_list();
Brian Swetland37521a32009-07-01 18:30:47 -07001335 return IRQ_HANDLED;
1336}
1337
1338static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
1339{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001340 SMx_POWER_INFO("SMD Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001341 ++interrupt_stats[SMD_Q6].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001342 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001343 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001344 return IRQ_HANDLED;
1345}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001346
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001347static irqreturn_t smd_dsps_irq_handler(int irq, void *data)
1348{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001349 SMx_POWER_INFO("SMD Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001350 ++interrupt_stats[SMD_DSPS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001351 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1352 handle_smd_irq_closing_list();
1353 return IRQ_HANDLED;
1354}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001355
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001356static irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
1357{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001358 SMx_POWER_INFO("SMD Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001359 ++interrupt_stats[SMD_WCNSS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001360 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1361 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001362 return IRQ_HANDLED;
1363}
1364
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001365static irqreturn_t smd_rpm_irq_handler(int irq, void *data)
1366{
1367 SMx_POWER_INFO("SMD Int RPM->Apps\n");
1368 ++interrupt_stats[SMD_RPM].smd_in_count;
1369 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
1370 handle_smd_irq_closing_list();
1371 return IRQ_HANDLED;
1372}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001373
1374static void smd_fake_irq_handler(unsigned long arg)
1375{
Brian Swetland37521a32009-07-01 18:30:47 -07001376 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
1377 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001378 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1379 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001380 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001381 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001382}
1383
1384static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
1385
Brian Swetland37521a32009-07-01 18:30:47 -07001386static inline int smd_need_int(struct smd_channel *ch)
1387{
1388 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001389 if (ch->half_ch->get_fHEAD(ch->recv) ||
1390 ch->half_ch->get_fTAIL(ch->recv) ||
1391 ch->half_ch->get_fSTATE(ch->recv))
Brian Swetland37521a32009-07-01 18:30:47 -07001392 return 1;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001393 if (ch->half_ch->get_state(ch->recv) != ch->last_state)
Brian Swetland37521a32009-07-01 18:30:47 -07001394 return 1;
1395 }
1396 return 0;
1397}
1398
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001399void smd_sleep_exit(void)
1400{
1401 unsigned long flags;
1402 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001403 int need_int = 0;
1404
1405 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland37521a32009-07-01 18:30:47 -07001406 list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
1407 if (smd_need_int(ch)) {
1408 need_int = 1;
1409 break;
1410 }
1411 }
1412 list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
1413 if (smd_need_int(ch)) {
1414 need_int = 1;
1415 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001416 }
1417 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001418 list_for_each_entry(ch, &smd_ch_list_dsps, ch_list) {
1419 if (smd_need_int(ch)) {
1420 need_int = 1;
1421 break;
1422 }
1423 }
1424 list_for_each_entry(ch, &smd_ch_list_wcnss, ch_list) {
1425 if (smd_need_int(ch)) {
1426 need_int = 1;
1427 break;
1428 }
1429 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001430 spin_unlock_irqrestore(&smd_lock, flags);
1431 do_smd_probe();
Brian Swetland37521a32009-07-01 18:30:47 -07001432
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001433 if (need_int) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001434 SMD_DBG("smd_sleep_exit need interrupt\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001435 tasklet_schedule(&smd_fake_irq_tasklet);
1436 }
1437}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001438EXPORT_SYMBOL(smd_sleep_exit);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001439
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001440static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001441{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001442 if (SMD_XFER_TYPE(alloc_elm->type) == 1)
1443 return 0;
1444 else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001445 return 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001446
1447 /* for cases where xfer type is 0 */
1448 if (!strncmp(alloc_elm->name, "DAL", 3))
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001449 return 0;
1450
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001451 /* for cases where xfer type is 0 */
1452 if (!strncmp(alloc_elm->name, "RPCCALL_QDSP", 12))
1453 return 0;
1454
1455 if (alloc_elm->cid > 4 || alloc_elm->cid == 1)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001456 return 1;
1457 else
1458 return 0;
1459}
1460
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001461static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
1462 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001463{
1464 void *ptr;
1465 const unsigned char *buf = _data;
1466 unsigned xfer;
1467 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001468 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001469
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001470 SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001471 if (len < 0)
1472 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001473 else if (len == 0)
1474 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001475
1476 while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
Eric Holmberg7a717872012-02-03 11:58:04 -07001477 if (!ch_is_open(ch)) {
1478 len = orig_len;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001479 break;
Eric Holmberg7a717872012-02-03 11:58:04 -07001480 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001481 if (xfer > len)
1482 xfer = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001483 if (user_buf) {
1484 r = copy_from_user(ptr, buf, xfer);
1485 if (r > 0) {
1486 pr_err("%s: "
1487 "copy_from_user could not copy %i "
1488 "bytes.\n",
1489 __func__,
1490 r);
1491 }
1492 } else
1493 memcpy(ptr, buf, xfer);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001494 ch_write_done(ch, xfer);
1495 len -= xfer;
1496 buf += xfer;
1497 if (len == 0)
1498 break;
1499 }
1500
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001501 if (orig_len - len)
1502 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001503
1504 return orig_len - len;
1505}
1506
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001507static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
1508 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001509{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001510 int ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001511 unsigned hdr[5];
1512
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001513 SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001514 if (len < 0)
1515 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001516 else if (len == 0)
1517 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001518
1519 if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
1520 return -ENOMEM;
1521
1522 hdr[0] = len;
1523 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1524
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001525
1526 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1527 if (ret < 0 || ret != sizeof(hdr)) {
1528 SMD_DBG("%s failed to write pkt header: "
1529 "%d returned\n", __func__, ret);
1530 return -1;
1531 }
1532
1533
1534 ret = smd_stream_write(ch, _data, len, user_buf);
1535 if (ret < 0 || ret != len) {
1536 SMD_DBG("%s failed to write pkt data: "
1537 "%d returned\n", __func__, ret);
1538 return ret;
1539 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001540
1541 return len;
1542}
1543
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001544static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001545{
1546 int r;
1547
1548 if (len < 0)
1549 return -EINVAL;
1550
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001551 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001552 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001553 if (!read_intr_blocked(ch))
1554 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001555
1556 return r;
1557}
1558
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001559static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001560{
1561 unsigned long flags;
1562 int r;
1563
1564 if (len < 0)
1565 return -EINVAL;
1566
1567 if (len > ch->current_packet)
1568 len = ch->current_packet;
1569
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001570 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001571 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001572 if (!read_intr_blocked(ch))
1573 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001574
1575 spin_lock_irqsave(&smd_lock, flags);
1576 ch->current_packet -= r;
1577 update_packet_state(ch);
1578 spin_unlock_irqrestore(&smd_lock, flags);
1579
1580 return r;
1581}
1582
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001583static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
1584 int user_buf)
1585{
1586 int r;
1587
1588 if (len < 0)
1589 return -EINVAL;
1590
1591 if (len > ch->current_packet)
1592 len = ch->current_packet;
1593
1594 r = ch_read(ch, data, len, user_buf);
1595 if (r > 0)
1596 if (!read_intr_blocked(ch))
1597 ch->notify_other_cpu();
1598
1599 ch->current_packet -= r;
1600 update_packet_state(ch);
1601
1602 return r;
1603}
1604
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301605#if (defined(CONFIG_MSM_SMD_PKG4) || defined(CONFIG_MSM_SMD_PKG3))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001606static int smd_alloc_v2(struct smd_channel *ch)
1607{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001608 void *buffer;
1609 unsigned buffer_sz;
1610
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001611 if (is_word_access_ch(ch->type)) {
1612 struct smd_shared_v2_word_access *shared2;
1613 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1614 sizeof(*shared2));
1615 if (!shared2) {
1616 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1617 return -EINVAL;
1618 }
1619 ch->send = &shared2->ch0;
1620 ch->recv = &shared2->ch1;
1621 } else {
1622 struct smd_shared_v2 *shared2;
1623 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1624 sizeof(*shared2));
1625 if (!shared2) {
1626 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1627 return -EINVAL;
1628 }
1629 ch->send = &shared2->ch0;
1630 ch->recv = &shared2->ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001631 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001632 ch->half_ch = get_half_ch_funcs(ch->type);
1633
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001634 buffer = smem_get_entry(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
1635 if (!buffer) {
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301636 SMD_INFO("smem_get_entry failed\n");
1637 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001638 }
1639
1640 /* buffer must be a power-of-two size */
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301641 if (buffer_sz & (buffer_sz - 1)) {
1642 SMD_INFO("Buffer size: %u not power of two\n", buffer_sz);
1643 return -EINVAL;
1644 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001645 buffer_sz /= 2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001646 ch->send_data = buffer;
1647 ch->recv_data = buffer + buffer_sz;
1648 ch->fifo_size = buffer_sz;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001649
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001650 return 0;
1651}
1652
1653static int smd_alloc_v1(struct smd_channel *ch)
1654{
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301655 return -EINVAL;
1656}
1657
1658#else /* define v1 for older targets */
1659static int smd_alloc_v2(struct smd_channel *ch)
1660{
1661 return -EINVAL;
1662}
1663
1664static int smd_alloc_v1(struct smd_channel *ch)
1665{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001666 struct smd_shared_v1 *shared1;
1667 shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
1668 if (!shared1) {
1669 pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301670 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001671 }
1672 ch->send = &shared1->ch0;
1673 ch->recv = &shared1->ch1;
1674 ch->send_data = shared1->data0;
1675 ch->recv_data = shared1->data1;
1676 ch->fifo_size = SMD_BUF_SIZE;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001677 ch->half_ch = get_half_ch_funcs(ch->type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001678 return 0;
1679}
1680
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301681#endif
1682
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001683static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001684{
1685 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001686
1687 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1688 if (ch == 0) {
1689 pr_err("smd_alloc_channel() out of memory\n");
Brian Swetland34f719b2009-10-30 16:22:05 -07001690 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001691 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001692 ch->n = alloc_elm->cid;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001693 ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001694
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001695 if (smd_alloc_v2(ch) && smd_alloc_v1(ch)) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001696 kfree(ch);
Brian Swetland34f719b2009-10-30 16:22:05 -07001697 return -1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001698 }
1699
1700 ch->fifo_mask = ch->fifo_size - 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001701
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001702 /* probe_worker guarentees ch->type will be a valid type */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001703 if (ch->type == SMD_APPS_MODEM)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001704 ch->notify_other_cpu = notify_modem_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001705 else if (ch->type == SMD_APPS_QDSP)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001706 ch->notify_other_cpu = notify_dsp_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001707 else if (ch->type == SMD_APPS_DSPS)
1708 ch->notify_other_cpu = notify_dsps_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001709 else if (ch->type == SMD_APPS_WCNSS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001710 ch->notify_other_cpu = notify_wcnss_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001711 else if (ch->type == SMD_APPS_RPM)
1712 ch->notify_other_cpu = notify_rpm_smd;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001713
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001714 if (smd_is_packet(alloc_elm)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001715 ch->read = smd_packet_read;
1716 ch->write = smd_packet_write;
1717 ch->read_avail = smd_packet_read_avail;
1718 ch->write_avail = smd_packet_write_avail;
1719 ch->update_state = update_packet_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001720 ch->read_from_cb = smd_packet_read_from_cb;
1721 ch->is_pkt_ch = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001722 } else {
1723 ch->read = smd_stream_read;
1724 ch->write = smd_stream_write;
1725 ch->read_avail = smd_stream_read_avail;
1726 ch->write_avail = smd_stream_write_avail;
1727 ch->update_state = update_stream_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001728 ch->read_from_cb = smd_stream_read;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001729 }
1730
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001731 memcpy(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN);
1732 ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001733
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001734 ch->pdev.name = ch->name;
1735 ch->pdev.id = ch->type;
1736
1737 SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
1738 ch->name, ch->n);
1739
1740 mutex_lock(&smd_creation_mutex);
1741 list_add(&ch->ch_list, &smd_ch_closed_list);
1742 mutex_unlock(&smd_creation_mutex);
1743
1744 platform_device_register(&ch->pdev);
1745 if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
1746 /* create a platform driver to be used by smd_tty driver
1747 * so that it can access the loopback port
1748 */
1749 loopback_tty_pdev.id = ch->type;
1750 platform_device_register(&loopback_tty_pdev);
1751 }
1752 return 0;
1753}
1754
1755static inline void notify_loopback_smd(void)
1756{
1757 unsigned long flags;
1758 struct smd_channel *ch;
1759
1760 spin_lock_irqsave(&smd_lock, flags);
1761 list_for_each_entry(ch, &smd_ch_list_loopback, ch_list) {
1762 ch->notify(ch->priv, SMD_EVENT_DATA);
1763 }
1764 spin_unlock_irqrestore(&smd_lock, flags);
1765}
1766
1767static int smd_alloc_loopback_channel(void)
1768{
1769 static struct smd_half_channel smd_loopback_ctl;
1770 static char smd_loopback_data[SMD_BUF_SIZE];
1771 struct smd_channel *ch;
1772
1773 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1774 if (ch == 0) {
1775 pr_err("%s: out of memory\n", __func__);
1776 return -1;
1777 }
1778 ch->n = SMD_LOOPBACK_CID;
1779
1780 ch->send = &smd_loopback_ctl;
1781 ch->recv = &smd_loopback_ctl;
1782 ch->send_data = smd_loopback_data;
1783 ch->recv_data = smd_loopback_data;
1784 ch->fifo_size = SMD_BUF_SIZE;
1785
1786 ch->fifo_mask = ch->fifo_size - 1;
1787 ch->type = SMD_LOOPBACK_TYPE;
1788 ch->notify_other_cpu = notify_loopback_smd;
1789
1790 ch->read = smd_stream_read;
1791 ch->write = smd_stream_write;
1792 ch->read_avail = smd_stream_read_avail;
1793 ch->write_avail = smd_stream_write_avail;
1794 ch->update_state = update_stream_state;
1795 ch->read_from_cb = smd_stream_read;
1796
1797 memset(ch->name, 0, 20);
1798 memcpy(ch->name, "local_loopback", 14);
1799
1800 ch->pdev.name = ch->name;
1801 ch->pdev.id = ch->type;
1802
1803 SMD_INFO("%s: '%s' cid=%d\n", __func__, ch->name, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001804
1805 mutex_lock(&smd_creation_mutex);
1806 list_add(&ch->ch_list, &smd_ch_closed_list);
1807 mutex_unlock(&smd_creation_mutex);
1808
1809 platform_device_register(&ch->pdev);
Brian Swetland34f719b2009-10-30 16:22:05 -07001810 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001811}
1812
1813static void do_nothing_notify(void *priv, unsigned flags)
1814{
1815}
1816
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001817static void finalize_channel_close_fn(struct work_struct *work)
1818{
1819 unsigned long flags;
1820 struct smd_channel *ch;
1821 struct smd_channel *index;
1822
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001823 mutex_lock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001824 spin_lock_irqsave(&smd_lock, flags);
1825 list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
1826 list_del(&ch->ch_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001827 list_add(&ch->ch_list, &smd_ch_closed_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001828 ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
1829 ch->notify = do_nothing_notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001830 }
1831 spin_unlock_irqrestore(&smd_lock, flags);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001832 mutex_unlock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001833}
1834
1835struct smd_channel *smd_get_channel(const char *name, uint32_t type)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001836{
1837 struct smd_channel *ch;
1838
1839 mutex_lock(&smd_creation_mutex);
1840 list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001841 if (!strcmp(name, ch->name) &&
1842 (type == ch->type)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001843 list_del(&ch->ch_list);
1844 mutex_unlock(&smd_creation_mutex);
1845 return ch;
1846 }
1847 }
1848 mutex_unlock(&smd_creation_mutex);
1849
1850 return NULL;
1851}
1852
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001853int smd_named_open_on_edge(const char *name, uint32_t edge,
1854 smd_channel_t **_ch,
1855 void *priv, void (*notify)(void *, unsigned))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001856{
1857 struct smd_channel *ch;
1858 unsigned long flags;
1859
1860 if (smd_initialized == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001861 SMD_INFO("smd_open() before smd_init()\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001862 return -ENODEV;
1863 }
1864
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001865 SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
1866
1867 ch = smd_get_channel(name, edge);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001868 if (!ch) {
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001869 /* check closing list for port */
1870 spin_lock_irqsave(&smd_lock, flags);
1871 list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
1872 if (!strncmp(name, ch->name, 20) &&
1873 (edge == ch->type)) {
1874 /* channel exists, but is being closed */
1875 spin_unlock_irqrestore(&smd_lock, flags);
1876 return -EAGAIN;
1877 }
1878 }
1879
1880 /* check closing workqueue list for port */
1881 list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
1882 if (!strncmp(name, ch->name, 20) &&
1883 (edge == ch->type)) {
1884 /* channel exists, but is being closed */
1885 spin_unlock_irqrestore(&smd_lock, flags);
1886 return -EAGAIN;
1887 }
1888 }
1889 spin_unlock_irqrestore(&smd_lock, flags);
1890
1891 /* one final check to handle closing->closed race condition */
1892 ch = smd_get_channel(name, edge);
1893 if (!ch)
1894 return -ENODEV;
1895 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001896
1897 if (notify == 0)
1898 notify = do_nothing_notify;
1899
1900 ch->notify = notify;
1901 ch->current_packet = 0;
1902 ch->last_state = SMD_SS_CLOSED;
1903 ch->priv = priv;
1904
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001905 if (edge == SMD_LOOPBACK_TYPE) {
1906 ch->last_state = SMD_SS_OPENED;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001907 ch->half_ch->set_state(ch->send, SMD_SS_OPENED);
1908 ch->half_ch->set_fDSR(ch->send, 1);
1909 ch->half_ch->set_fCTS(ch->send, 1);
1910 ch->half_ch->set_fCD(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001911 }
1912
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001913 *_ch = ch;
1914
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001915 SMD_DBG("smd_open: opening '%s'\n", ch->name);
1916
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001917 spin_lock_irqsave(&smd_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001918 if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_MODEM)
Brian Swetland37521a32009-07-01 18:30:47 -07001919 list_add(&ch->ch_list, &smd_ch_list_modem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001920 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_QDSP)
Brian Swetland37521a32009-07-01 18:30:47 -07001921 list_add(&ch->ch_list, &smd_ch_list_dsp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001922 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_DSPS)
1923 list_add(&ch->ch_list, &smd_ch_list_dsps);
1924 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_WCNSS)
1925 list_add(&ch->ch_list, &smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001926 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_RPM)
1927 list_add(&ch->ch_list, &smd_ch_list_rpm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001928 else
1929 list_add(&ch->ch_list, &smd_ch_list_loopback);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001930
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001931 SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
1932
1933 if (edge != SMD_LOOPBACK_TYPE)
1934 smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
1935
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001936 spin_unlock_irqrestore(&smd_lock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001937
1938 return 0;
1939}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001940EXPORT_SYMBOL(smd_named_open_on_edge);
1941
1942
1943int smd_open(const char *name, smd_channel_t **_ch,
1944 void *priv, void (*notify)(void *, unsigned))
1945{
1946 return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv,
1947 notify);
1948}
1949EXPORT_SYMBOL(smd_open);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001950
1951int smd_close(smd_channel_t *ch)
1952{
1953 unsigned long flags;
1954
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001955 if (ch == 0)
1956 return -1;
1957
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001958 SMD_INFO("smd_close(%s)\n", ch->name);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001959
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001960 spin_lock_irqsave(&smd_lock, flags);
1961 list_del(&ch->ch_list);
1962 if (ch->n == SMD_LOOPBACK_CID) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001963 ch->half_ch->set_fDSR(ch->send, 0);
1964 ch->half_ch->set_fCTS(ch->send, 0);
1965 ch->half_ch->set_fCD(ch->send, 0);
1966 ch->half_ch->set_state(ch->send, SMD_SS_CLOSED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001967 } else
1968 ch_set_state(ch, SMD_SS_CLOSED);
1969
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001970 if (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001971 list_add(&ch->ch_list, &smd_ch_closing_list);
1972 spin_unlock_irqrestore(&smd_lock, flags);
1973 } else {
1974 spin_unlock_irqrestore(&smd_lock, flags);
1975 ch->notify = do_nothing_notify;
1976 mutex_lock(&smd_creation_mutex);
1977 list_add(&ch->ch_list, &smd_ch_closed_list);
1978 mutex_unlock(&smd_creation_mutex);
1979 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001980
1981 return 0;
1982}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001983EXPORT_SYMBOL(smd_close);
1984
1985int smd_write_start(smd_channel_t *ch, int len)
1986{
1987 int ret;
1988 unsigned hdr[5];
1989
1990 if (!ch) {
1991 pr_err("%s: Invalid channel specified\n", __func__);
1992 return -ENODEV;
1993 }
1994 if (!ch->is_pkt_ch) {
1995 pr_err("%s: non-packet channel specified\n", __func__);
1996 return -EACCES;
1997 }
1998 if (len < 1) {
1999 pr_err("%s: invalid length: %d\n", __func__, len);
2000 return -EINVAL;
2001 }
2002
2003 if (ch->pending_pkt_sz) {
2004 pr_err("%s: packet of size: %d in progress\n", __func__,
2005 ch->pending_pkt_sz);
2006 return -EBUSY;
2007 }
2008 ch->pending_pkt_sz = len;
2009
2010 if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
2011 ch->pending_pkt_sz = 0;
2012 SMD_DBG("%s: no space to write packet header\n", __func__);
2013 return -EAGAIN;
2014 }
2015
2016 hdr[0] = len;
2017 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
2018
2019
2020 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
2021 if (ret < 0 || ret != sizeof(hdr)) {
2022 ch->pending_pkt_sz = 0;
2023 pr_err("%s: packet header failed to write\n", __func__);
2024 return -EPERM;
2025 }
2026 return 0;
2027}
2028EXPORT_SYMBOL(smd_write_start);
2029
2030int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
2031{
2032 int bytes_written;
2033
2034 if (!ch) {
2035 pr_err("%s: Invalid channel specified\n", __func__);
2036 return -ENODEV;
2037 }
2038 if (len < 1) {
2039 pr_err("%s: invalid length: %d\n", __func__, len);
2040 return -EINVAL;
2041 }
2042
2043 if (!ch->pending_pkt_sz) {
2044 pr_err("%s: no transaction in progress\n", __func__);
2045 return -ENOEXEC;
2046 }
2047 if (ch->pending_pkt_sz - len < 0) {
2048 pr_err("%s: segment of size: %d will make packet go over "
2049 "length\n", __func__, len);
2050 return -EINVAL;
2051 }
2052
2053 bytes_written = smd_stream_write(ch, data, len, user_buf);
2054
2055 ch->pending_pkt_sz -= bytes_written;
2056
2057 return bytes_written;
2058}
2059EXPORT_SYMBOL(smd_write_segment);
2060
2061int smd_write_end(smd_channel_t *ch)
2062{
2063
2064 if (!ch) {
2065 pr_err("%s: Invalid channel specified\n", __func__);
2066 return -ENODEV;
2067 }
2068 if (ch->pending_pkt_sz) {
2069 pr_err("%s: current packet not completely written\n", __func__);
2070 return -E2BIG;
2071 }
2072
2073 return 0;
2074}
2075EXPORT_SYMBOL(smd_write_end);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002076
2077int smd_read(smd_channel_t *ch, void *data, int len)
2078{
Jack Pham1b236d12012-03-19 15:27:18 -07002079 if (!ch) {
2080 pr_err("%s: Invalid channel specified\n", __func__);
2081 return -ENODEV;
2082 }
2083
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002084 return ch->read(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002085}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002086EXPORT_SYMBOL(smd_read);
2087
2088int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
2089{
Jack Pham1b236d12012-03-19 15:27:18 -07002090 if (!ch) {
2091 pr_err("%s: Invalid channel specified\n", __func__);
2092 return -ENODEV;
2093 }
2094
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002095 return ch->read(ch, data, len, 1);
2096}
2097EXPORT_SYMBOL(smd_read_user_buffer);
2098
2099int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
2100{
Jack Pham1b236d12012-03-19 15:27:18 -07002101 if (!ch) {
2102 pr_err("%s: Invalid channel specified\n", __func__);
2103 return -ENODEV;
2104 }
2105
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002106 return ch->read_from_cb(ch, data, len, 0);
2107}
2108EXPORT_SYMBOL(smd_read_from_cb);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002109
2110int smd_write(smd_channel_t *ch, const void *data, int len)
2111{
Jack Pham1b236d12012-03-19 15:27:18 -07002112 if (!ch) {
2113 pr_err("%s: Invalid channel specified\n", __func__);
2114 return -ENODEV;
2115 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002116
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002117 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002118}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002119EXPORT_SYMBOL(smd_write);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002120
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002121int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
Brian Swetland636eb9c2009-12-07 15:28:08 -08002122{
Jack Pham1b236d12012-03-19 15:27:18 -07002123 if (!ch) {
2124 pr_err("%s: Invalid channel specified\n", __func__);
2125 return -ENODEV;
2126 }
2127
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002128 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002129}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002130EXPORT_SYMBOL(smd_write_user_buffer);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002131
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002132int smd_read_avail(smd_channel_t *ch)
2133{
Jack Pham1b236d12012-03-19 15:27:18 -07002134 if (!ch) {
2135 pr_err("%s: Invalid channel specified\n", __func__);
2136 return -ENODEV;
2137 }
2138
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002139 return ch->read_avail(ch);
2140}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002141EXPORT_SYMBOL(smd_read_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002142
2143int smd_write_avail(smd_channel_t *ch)
2144{
Jack Pham1b236d12012-03-19 15:27:18 -07002145 if (!ch) {
2146 pr_err("%s: Invalid channel specified\n", __func__);
2147 return -ENODEV;
2148 }
2149
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002150 return ch->write_avail(ch);
2151}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002152EXPORT_SYMBOL(smd_write_avail);
2153
2154void smd_enable_read_intr(smd_channel_t *ch)
2155{
2156 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002157 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002158}
2159EXPORT_SYMBOL(smd_enable_read_intr);
2160
2161void smd_disable_read_intr(smd_channel_t *ch)
2162{
2163 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002164 ch->half_ch->set_fBLOCKREADINTR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002165}
2166EXPORT_SYMBOL(smd_disable_read_intr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002167
Eric Holmbergdeace152012-07-25 12:17:11 -06002168/**
2169 * Enable/disable receive interrupts for the remote processor used by a
2170 * particular channel.
2171 * @ch: open channel handle to use for the edge
2172 * @mask: 1 = mask interrupts; 0 = unmask interrupts
2173 * @returns: 0 for success; < 0 for failure
2174 *
2175 * Note that this enables/disables all interrupts from the remote subsystem for
2176 * all channels. As such, it should be used with care and only for specific
2177 * use cases such as power-collapse sequencing.
2178 */
2179int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask)
2180{
2181 struct irq_chip *irq_chip;
2182 struct irq_data *irq_data;
2183 struct interrupt_config_item *int_cfg;
2184
2185 if (!ch)
2186 return -EINVAL;
2187
2188 if (ch->type >= ARRAY_SIZE(edge_to_pids))
2189 return -ENODEV;
2190
2191 int_cfg = &private_intr_config[edge_to_pids[ch->type].remote_pid].smd;
2192
2193 if (int_cfg->irq_id < 0)
2194 return -ENODEV;
2195
2196 irq_chip = irq_get_chip(int_cfg->irq_id);
2197 if (!irq_chip)
2198 return -ENODEV;
2199
2200 irq_data = irq_get_irq_data(int_cfg->irq_id);
2201 if (!irq_data)
2202 return -ENODEV;
2203
2204 if (mask) {
2205 SMx_POWER_INFO("SMD Masking interrupts from %s\n",
2206 edge_to_pids[ch->type].subsys_name);
2207 irq_chip->irq_mask(irq_data);
2208 } else {
2209 SMx_POWER_INFO("SMD Unmasking interrupts from %s\n",
2210 edge_to_pids[ch->type].subsys_name);
2211 irq_chip->irq_unmask(irq_data);
2212 }
2213
2214 return 0;
2215}
2216EXPORT_SYMBOL(smd_mask_receive_interrupt);
2217
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002218int smd_wait_until_readable(smd_channel_t *ch, int bytes)
2219{
2220 return -1;
2221}
2222
2223int smd_wait_until_writable(smd_channel_t *ch, int bytes)
2224{
2225 return -1;
2226}
2227
2228int smd_cur_packet_size(smd_channel_t *ch)
2229{
Jack Pham1b236d12012-03-19 15:27:18 -07002230 if (!ch) {
2231 pr_err("%s: Invalid channel specified\n", __func__);
2232 return -ENODEV;
2233 }
2234
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002235 return ch->current_packet;
2236}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002237EXPORT_SYMBOL(smd_cur_packet_size);
2238
2239int smd_tiocmget(smd_channel_t *ch)
2240{
Jack Pham1b236d12012-03-19 15:27:18 -07002241 if (!ch) {
2242 pr_err("%s: Invalid channel specified\n", __func__);
2243 return -ENODEV;
2244 }
2245
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002246 return (ch->half_ch->get_fDSR(ch->recv) ? TIOCM_DSR : 0) |
2247 (ch->half_ch->get_fCTS(ch->recv) ? TIOCM_CTS : 0) |
2248 (ch->half_ch->get_fCD(ch->recv) ? TIOCM_CD : 0) |
2249 (ch->half_ch->get_fRI(ch->recv) ? TIOCM_RI : 0) |
2250 (ch->half_ch->get_fCTS(ch->send) ? TIOCM_RTS : 0) |
2251 (ch->half_ch->get_fDSR(ch->send) ? TIOCM_DTR : 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002252}
2253EXPORT_SYMBOL(smd_tiocmget);
2254
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002255/* this api will be called while holding smd_lock */
2256int
2257smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002258{
Jack Pham1b236d12012-03-19 15:27:18 -07002259 if (!ch) {
2260 pr_err("%s: Invalid channel specified\n", __func__);
2261 return -ENODEV;
2262 }
2263
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002264 if (set & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002265 ch->half_ch->set_fDSR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002266
2267 if (set & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002268 ch->half_ch->set_fCTS(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002269
2270 if (clear & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002271 ch->half_ch->set_fDSR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002272
2273 if (clear & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002274 ch->half_ch->set_fCTS(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002275
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002276 ch->half_ch->set_fSTATE(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002277 barrier();
2278 ch->notify_other_cpu();
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002279
2280 return 0;
2281}
2282EXPORT_SYMBOL(smd_tiocmset_from_cb);
2283
2284int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
2285{
2286 unsigned long flags;
2287
Jack Pham1b236d12012-03-19 15:27:18 -07002288 if (!ch) {
2289 pr_err("%s: Invalid channel specified\n", __func__);
2290 return -ENODEV;
2291 }
2292
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002293 spin_lock_irqsave(&smd_lock, flags);
2294 smd_tiocmset_from_cb(ch, set, clear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002295 spin_unlock_irqrestore(&smd_lock, flags);
2296
2297 return 0;
2298}
2299EXPORT_SYMBOL(smd_tiocmset);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002300
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002301int smd_is_pkt_avail(smd_channel_t *ch)
2302{
Jeff Hugoa8549f12012-08-13 20:36:18 -06002303 unsigned long flags;
2304
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002305 if (!ch || !ch->is_pkt_ch)
2306 return -EINVAL;
2307
2308 if (ch->current_packet)
2309 return 1;
2310
Jeff Hugoa8549f12012-08-13 20:36:18 -06002311 spin_lock_irqsave(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002312 update_packet_state(ch);
Jeff Hugoa8549f12012-08-13 20:36:18 -06002313 spin_unlock_irqrestore(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002314
2315 return ch->current_packet ? 1 : 0;
2316}
2317EXPORT_SYMBOL(smd_is_pkt_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002318
2319
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002320/* -------------------------------------------------------------------------- */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002321
Jeff Hugobdc734d2012-03-26 16:05:39 -06002322/*
2323 * Shared Memory Range Check
2324 *
2325 * Takes a physical address and an offset and checks if the resulting physical
2326 * address would fit into one of the aux smem regions. If so, returns the
2327 * corresponding virtual address. Otherwise returns NULL. Expects the array
2328 * of smem regions to be in ascending physical address order.
2329 *
2330 * @base: physical base address to check
2331 * @offset: offset from the base to get the final address
2332 */
2333static void *smem_range_check(void *base, unsigned offset)
2334{
2335 int i;
2336 void *phys_addr;
2337 unsigned size;
2338
2339 for (i = 0; i < num_smem_areas; ++i) {
2340 phys_addr = smem_areas[i].phys_addr;
2341 size = smem_areas[i].size;
2342 if (base < phys_addr)
2343 return NULL;
2344 if (base > phys_addr + size)
2345 continue;
2346 if (base >= phys_addr && base + offset < phys_addr + size)
2347 return smem_areas[i].virt_addr + offset;
2348 }
2349
2350 return NULL;
2351}
2352
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002353/* smem_alloc returns the pointer to smem item if it is already allocated.
2354 * Otherwise, it returns NULL.
2355 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002356void *smem_alloc(unsigned id, unsigned size)
2357{
2358 return smem_find(id, size);
2359}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002360EXPORT_SYMBOL(smem_alloc);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002361
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002362/* smem_alloc2 returns the pointer to smem item. If it is not allocated,
2363 * it allocates it and then returns the pointer to it.
2364 */
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302365void *smem_alloc2(unsigned id, unsigned size_in)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002366{
2367 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2368 struct smem_heap_entry *toc = shared->heap_toc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002369 unsigned long flags;
2370 void *ret = NULL;
2371
2372 if (!shared->heap_info.initialized) {
2373 pr_err("%s: smem heap info not initialized\n", __func__);
2374 return NULL;
2375 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002376
2377 if (id >= SMEM_NUM_ITEMS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002378 return NULL;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002379
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002380 size_in = ALIGN(size_in, 8);
2381 remote_spin_lock_irqsave(&remote_spinlock, flags);
2382 if (toc[id].allocated) {
2383 SMD_DBG("%s: %u already allocated\n", __func__, id);
2384 if (size_in != toc[id].size)
2385 pr_err("%s: wrong size %u (expected %u)\n",
2386 __func__, toc[id].size, size_in);
2387 else
2388 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2389 } else if (id > SMEM_FIXED_ITEM_LAST) {
2390 SMD_DBG("%s: allocating %u\n", __func__, id);
2391 if (shared->heap_info.heap_remaining >= size_in) {
2392 toc[id].offset = shared->heap_info.free_offset;
2393 toc[id].size = size_in;
2394 wmb();
2395 toc[id].allocated = 1;
2396
2397 shared->heap_info.free_offset += size_in;
2398 shared->heap_info.heap_remaining -= size_in;
2399 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2400 } else
2401 pr_err("%s: not enough memory %u (required %u)\n",
2402 __func__, shared->heap_info.heap_remaining,
2403 size_in);
2404 }
2405 wmb();
2406 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
2407 return ret;
2408}
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302409EXPORT_SYMBOL(smem_alloc2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002410
2411void *smem_get_entry(unsigned id, unsigned *size)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002412{
2413 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2414 struct smem_heap_entry *toc = shared->heap_toc;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302415 int use_spinlocks = spinlocks_initialized;
2416 void *ret = 0;
2417 unsigned long flags = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002418
2419 if (id >= SMEM_NUM_ITEMS)
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302420 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002421
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302422 if (use_spinlocks)
2423 remote_spin_lock_irqsave(&remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002424 /* toc is in device memory and cannot be speculatively accessed */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002425 if (toc[id].allocated) {
2426 *size = toc[id].size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002427 barrier();
Jeff Hugobdc734d2012-03-26 16:05:39 -06002428 if (!(toc[id].reserved & BASE_ADDR_MASK))
2429 ret = (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
2430 else
2431 ret = smem_range_check(
2432 (void *)(toc[id].reserved & BASE_ADDR_MASK),
2433 toc[id].offset);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002434 } else {
2435 *size = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002436 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302437 if (use_spinlocks)
2438 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002439
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302440 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002441}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002442EXPORT_SYMBOL(smem_get_entry);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002443
2444void *smem_find(unsigned id, unsigned size_in)
2445{
2446 unsigned size;
2447 void *ptr;
2448
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002449 ptr = smem_get_entry(id, &size);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002450 if (!ptr)
2451 return 0;
2452
2453 size_in = ALIGN(size_in, 8);
2454 if (size_in != size) {
2455 pr_err("smem_find(%d, %d): wrong size %d\n",
2456 id, size_in, size);
2457 return 0;
2458 }
2459
2460 return ptr;
2461}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002462EXPORT_SYMBOL(smem_find);
2463
2464static int smsm_cb_init(void)
2465{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002466 struct smsm_state_info *state_info;
2467 int n;
2468 int ret = 0;
2469
2470 smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
2471 GFP_KERNEL);
2472
2473 if (!smsm_states) {
2474 pr_err("%s: SMSM init failed\n", __func__);
2475 return -ENOMEM;
2476 }
2477
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002478 smsm_cb_wq = create_singlethread_workqueue("smsm_cb_wq");
2479 if (!smsm_cb_wq) {
2480 pr_err("%s: smsm_cb_wq creation failed\n", __func__);
2481 kfree(smsm_states);
2482 return -EFAULT;
2483 }
2484
Eric Holmbergc8002902011-09-16 13:55:57 -06002485 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002486 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2487 state_info = &smsm_states[n];
2488 state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
Eric Holmberge8a39322012-04-03 15:14:02 -06002489 state_info->intr_mask_set = 0x0;
2490 state_info->intr_mask_clear = 0x0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002491 INIT_LIST_HEAD(&state_info->callbacks);
2492 }
Eric Holmbergc8002902011-09-16 13:55:57 -06002493 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002494
2495 return ret;
2496}
2497
2498static int smsm_init(void)
2499{
2500 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2501 int i;
2502 struct smsm_size_info_type *smsm_size_info;
2503
2504 i = remote_spin_lock_init(&remote_spinlock, SMEM_SPINLOCK_SMEM_ALLOC);
2505 if (i) {
2506 pr_err("%s: remote spinlock init failed %d\n", __func__, i);
2507 return i;
2508 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302509 spinlocks_initialized = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002510
2511 smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO,
2512 sizeof(struct smsm_size_info_type));
2513 if (smsm_size_info) {
2514 SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
2515 SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
2516 }
2517
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002518 i = kfifo_alloc(&smsm_snapshot_fifo,
2519 sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
2520 GFP_KERNEL);
2521 if (i) {
2522 pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
2523 return i;
2524 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002525 wake_lock_init(&smsm_snapshot_wakelock, WAKE_LOCK_SUSPEND,
2526 "smsm_snapshot");
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002527
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002528 if (!smsm_info.state) {
2529 smsm_info.state = smem_alloc2(ID_SHARED_STATE,
2530 SMSM_NUM_ENTRIES *
2531 sizeof(uint32_t));
2532
2533 if (smsm_info.state) {
2534 __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2535 if ((shared->version[VERSION_MODEM] >> 16) >= 0xB)
2536 __raw_writel(0, \
2537 SMSM_STATE_ADDR(SMSM_APPS_DEM_I));
2538 }
2539 }
2540
2541 if (!smsm_info.intr_mask) {
2542 smsm_info.intr_mask = smem_alloc2(SMEM_SMSM_CPU_INTR_MASK,
2543 SMSM_NUM_ENTRIES *
2544 SMSM_NUM_HOSTS *
2545 sizeof(uint32_t));
2546
Eric Holmberge8a39322012-04-03 15:14:02 -06002547 if (smsm_info.intr_mask) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002548 for (i = 0; i < SMSM_NUM_ENTRIES; i++)
Eric Holmberge8a39322012-04-03 15:14:02 -06002549 __raw_writel(0x0,
2550 SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
2551
2552 /* Configure legacy modem bits */
2553 __raw_writel(LEGACY_MODEM_SMSM_MASK,
2554 SMSM_INTR_MASK_ADDR(SMSM_MODEM_STATE,
2555 SMSM_APPS));
2556 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002557 }
2558
2559 if (!smsm_info.intr_mux)
2560 smsm_info.intr_mux = smem_alloc2(SMEM_SMD_SMSM_INTR_MUX,
2561 SMSM_NUM_INTR_MUX *
2562 sizeof(uint32_t));
2563
2564 i = smsm_cb_init();
2565 if (i)
2566 return i;
2567
2568 wmb();
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06002569 smsm_driver_state_notify(SMSM_INIT, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002570 return 0;
2571}
2572
2573void smsm_reset_modem(unsigned mode)
2574{
2575 if (mode == SMSM_SYSTEM_DOWNLOAD) {
2576 mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
2577 } else if (mode == SMSM_MODEM_WAIT) {
2578 mode = SMSM_RESET | SMSM_MODEM_WAIT;
2579 } else { /* reset_mode is SMSM_RESET or default */
2580 mode = SMSM_RESET;
2581 }
2582
2583 smsm_change_state(SMSM_APPS_STATE, mode, mode);
2584}
2585EXPORT_SYMBOL(smsm_reset_modem);
2586
2587void smsm_reset_modem_cont(void)
2588{
2589 unsigned long flags;
2590 uint32_t state;
2591
2592 if (!smsm_info.state)
2593 return;
2594
2595 spin_lock_irqsave(&smem_lock, flags);
2596 state = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)) \
2597 & ~SMSM_MODEM_WAIT;
2598 __raw_writel(state, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2599 wmb();
2600 spin_unlock_irqrestore(&smem_lock, flags);
2601}
2602EXPORT_SYMBOL(smsm_reset_modem_cont);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002603
Eric Holmbergda31d042012-03-28 14:01:02 -06002604static void smsm_cb_snapshot(uint32_t use_wakelock)
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002605{
2606 int n;
2607 uint32_t new_state;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002608 unsigned long flags;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002609 int ret;
2610
2611 ret = kfifo_avail(&smsm_snapshot_fifo);
Eric Holmbergda31d042012-03-28 14:01:02 -06002612 if (ret < SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002613 pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
2614 return;
2615 }
2616
Eric Holmberg96b55f62012-04-03 19:10:46 -06002617 /*
2618 * To avoid a race condition with notify_smsm_cb_clients_worker, the
2619 * following sequence must be followed:
2620 * 1) increment snapshot count
2621 * 2) insert data into FIFO
2622 *
2623 * Potentially in parallel, the worker:
2624 * a) verifies >= 1 snapshots are in FIFO
2625 * b) processes snapshot
2626 * c) decrements reference count
2627 *
2628 * This order ensures that 1 will always occur before abc.
2629 */
Eric Holmbergda31d042012-03-28 14:01:02 -06002630 if (use_wakelock) {
2631 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2632 if (smsm_snapshot_count == 0) {
2633 SMx_POWER_INFO("SMSM snapshot wake lock\n");
2634 wake_lock(&smsm_snapshot_wakelock);
2635 }
2636 ++smsm_snapshot_count;
2637 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2638 }
Eric Holmberg96b55f62012-04-03 19:10:46 -06002639
2640 /* queue state entries */
2641 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2642 new_state = __raw_readl(SMSM_STATE_ADDR(n));
2643
2644 ret = kfifo_in(&smsm_snapshot_fifo,
2645 &new_state, sizeof(new_state));
2646 if (ret != sizeof(new_state)) {
2647 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2648 goto restore_snapshot_count;
2649 }
2650 }
2651
2652 /* queue wakelock usage flag */
2653 ret = kfifo_in(&smsm_snapshot_fifo,
2654 &use_wakelock, sizeof(use_wakelock));
2655 if (ret != sizeof(use_wakelock)) {
2656 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2657 goto restore_snapshot_count;
2658 }
2659
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002660 queue_work(smsm_cb_wq, &smsm_cb_work);
Eric Holmberg96b55f62012-04-03 19:10:46 -06002661 return;
2662
2663restore_snapshot_count:
2664 if (use_wakelock) {
2665 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2666 if (smsm_snapshot_count) {
2667 --smsm_snapshot_count;
2668 if (smsm_snapshot_count == 0) {
2669 SMx_POWER_INFO("SMSM snapshot wake unlock\n");
2670 wake_unlock(&smsm_snapshot_wakelock);
2671 }
2672 } else {
2673 pr_err("%s: invalid snapshot count\n", __func__);
2674 }
2675 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2676 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002677}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002678
2679static irqreturn_t smsm_irq_handler(int irq, void *data)
2680{
2681 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002682
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002683 if (irq == INT_ADSP_A11_SMSM) {
Eric Holmberg6282c5d2011-10-27 17:30:57 -06002684 uint32_t mux_val;
2685 static uint32_t prev_smem_q6_apps_smsm;
2686
2687 if (smsm_info.intr_mux && cpu_is_qsd8x50()) {
2688 mux_val = __raw_readl(
2689 SMSM_INTR_MUX_ADDR(SMEM_Q6_APPS_SMSM));
2690 if (mux_val != prev_smem_q6_apps_smsm)
2691 prev_smem_q6_apps_smsm = mux_val;
2692 }
2693
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002694 spin_lock_irqsave(&smem_lock, flags);
Eric Holmbergda31d042012-03-28 14:01:02 -06002695 smsm_cb_snapshot(1);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002696 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002697 return IRQ_HANDLED;
2698 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002699
2700 spin_lock_irqsave(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002701 if (!smsm_info.state) {
2702 SMSM_INFO("<SM NO STATE>\n");
2703 } else {
2704 unsigned old_apps, apps;
2705 unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002706
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002707 old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002708
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002709 SMSM_DBG("<SM %08x %08x>\n", apps, modm);
2710 if (apps & SMSM_RESET) {
2711 /* If we get an interrupt and the apps SMSM_RESET
2712 bit is already set, the modem is acking the
2713 app's reset ack. */
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002714 if (!disable_smsm_reset_handshake)
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302715 apps &= ~SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002716 /* Issue a fake irq to handle any
2717 * smd state changes during reset
2718 */
2719 smd_fake_irq_handler(0);
Daniel Walker79848a22010-03-16 15:20:07 -07002720
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002721 /* queue modem restart notify chain */
2722 modem_queue_start_reset_notify();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002723
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002724 } else if (modm & SMSM_RESET) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002725 pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
Ram Somani8b9589f2012-04-03 12:07:18 +05302726 if (!disable_smsm_reset_handshake) {
2727 apps |= SMSM_RESET;
2728 flush_cache_all();
2729 outer_flush_all();
2730 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002731 modem_queue_start_reset_notify();
2732
2733 } else if (modm & SMSM_INIT) {
2734 if (!(apps & SMSM_INIT)) {
2735 apps |= SMSM_INIT;
2736 modem_queue_smsm_init_notify();
2737 }
2738
2739 if (modm & SMSM_SMDINIT)
2740 apps |= SMSM_SMDINIT;
2741 if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) ==
2742 (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT))
2743 apps |= SMSM_RUN;
2744 } else if (modm & SMSM_SYSTEM_DOWNLOAD) {
2745 pr_err("\nSMSM: Modem SMSM state changed to SMSM_SYSTEM_DOWNLOAD.");
2746 modem_queue_start_reset_notify();
2747 }
2748
2749 if (old_apps != apps) {
2750 SMSM_DBG("<SM %08x NOTIFY>\n", apps);
2751 __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2752 do_smd_probe();
2753 notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
2754 }
2755
Eric Holmbergda31d042012-03-28 14:01:02 -06002756 smsm_cb_snapshot(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002757 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002758 spin_unlock_irqrestore(&smem_lock, flags);
2759 return IRQ_HANDLED;
2760}
2761
Eric Holmberg98c6c642012-02-24 11:29:35 -07002762static irqreturn_t smsm_modem_irq_handler(int irq, void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002763{
Eric Holmberg98c6c642012-02-24 11:29:35 -07002764 SMx_POWER_INFO("SMSM Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002765 ++interrupt_stats[SMD_MODEM].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002766 return smsm_irq_handler(irq, data);
2767}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002768
Eric Holmberg98c6c642012-02-24 11:29:35 -07002769static irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
2770{
2771 SMx_POWER_INFO("SMSM Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002772 ++interrupt_stats[SMD_Q6].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002773 return smsm_irq_handler(irq, data);
2774}
2775
2776static irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
2777{
2778 SMx_POWER_INFO("SMSM Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002779 ++interrupt_stats[SMD_DSPS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002780 return smsm_irq_handler(irq, data);
2781}
2782
2783static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
2784{
2785 SMx_POWER_INFO("SMSM Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002786 ++interrupt_stats[SMD_WCNSS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002787 return smsm_irq_handler(irq, data);
2788}
2789
Eric Holmberge8a39322012-04-03 15:14:02 -06002790/*
2791 * Changes the global interrupt mask. The set and clear masks are re-applied
2792 * every time the global interrupt mask is updated for callback registration
2793 * and de-registration.
2794 *
2795 * The clear mask is applied first, so if a bit is set to 1 in both the clear
2796 * mask and the set mask, the result will be that the interrupt is set.
2797 *
2798 * @smsm_entry SMSM entry to change
2799 * @clear_mask 1 = clear bit, 0 = no-op
2800 * @set_mask 1 = set bit, 0 = no-op
2801 *
2802 * @returns 0 for success, < 0 for error
2803 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002804int smsm_change_intr_mask(uint32_t smsm_entry,
2805 uint32_t clear_mask, uint32_t set_mask)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002806{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002807 uint32_t old_mask, new_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002808 unsigned long flags;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002809
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002810 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2811 pr_err("smsm_change_state: Invalid entry %d\n",
2812 smsm_entry);
2813 return -EINVAL;
2814 }
2815
2816 if (!smsm_info.intr_mask) {
2817 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002818 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002819 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002820
2821 spin_lock_irqsave(&smem_lock, flags);
Eric Holmberge8a39322012-04-03 15:14:02 -06002822 smsm_states[smsm_entry].intr_mask_clear = clear_mask;
2823 smsm_states[smsm_entry].intr_mask_set = set_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002824
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002825 old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2826 new_mask = (old_mask & ~clear_mask) | set_mask;
2827 __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002828
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002829 wmb();
2830 spin_unlock_irqrestore(&smem_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002831
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002832 return 0;
2833}
2834EXPORT_SYMBOL(smsm_change_intr_mask);
2835
2836int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
2837{
2838 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2839 pr_err("smsm_change_state: Invalid entry %d\n",
2840 smsm_entry);
2841 return -EINVAL;
2842 }
2843
2844 if (!smsm_info.intr_mask) {
2845 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
2846 return -EIO;
2847 }
2848
2849 *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2850 return 0;
2851}
2852EXPORT_SYMBOL(smsm_get_intr_mask);
2853
2854int smsm_change_state(uint32_t smsm_entry,
2855 uint32_t clear_mask, uint32_t set_mask)
2856{
2857 unsigned long flags;
2858 uint32_t old_state, new_state;
2859
2860 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2861 pr_err("smsm_change_state: Invalid entry %d",
2862 smsm_entry);
2863 return -EINVAL;
2864 }
2865
2866 if (!smsm_info.state) {
2867 pr_err("smsm_change_state <SM NO STATE>\n");
2868 return -EIO;
2869 }
2870 spin_lock_irqsave(&smem_lock, flags);
2871
2872 old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2873 new_state = (old_state & ~clear_mask) | set_mask;
2874 __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
2875 SMSM_DBG("smsm_change_state %x\n", new_state);
2876 notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002877
2878 spin_unlock_irqrestore(&smem_lock, flags);
2879
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002880 return 0;
2881}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002882EXPORT_SYMBOL(smsm_change_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002883
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002884uint32_t smsm_get_state(uint32_t smsm_entry)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002885{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002886 uint32_t rv = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002887
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002888 /* needs interface change to return error code */
2889 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2890 pr_err("smsm_change_state: Invalid entry %d",
2891 smsm_entry);
2892 return 0;
2893 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002894
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002895 if (!smsm_info.state) {
2896 pr_err("smsm_get_state <SM NO STATE>\n");
2897 } else {
2898 rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2899 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002900
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002901 return rv;
2902}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002903EXPORT_SYMBOL(smsm_get_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002904
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002905/**
2906 * Performs SMSM callback client notifiction.
2907 */
2908void notify_smsm_cb_clients_worker(struct work_struct *work)
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002909{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002910 struct smsm_state_cb_info *cb_info;
2911 struct smsm_state_info *state_info;
2912 int n;
2913 uint32_t new_state;
2914 uint32_t state_changes;
Eric Holmbergda31d042012-03-28 14:01:02 -06002915 uint32_t use_wakelock;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002916 int ret;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002917 unsigned long flags;
Brian Swetland03e00cd2009-07-01 17:58:37 -07002918
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002919 if (!smd_initialized)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002920 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002921
Eric Holmbergda31d042012-03-28 14:01:02 -06002922 while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002923 mutex_lock(&smsm_lock);
2924 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2925 state_info = &smsm_states[n];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002926
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002927 ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
2928 sizeof(new_state));
2929 if (ret != sizeof(new_state)) {
2930 pr_err("%s: snapshot underflow %d\n",
2931 __func__, ret);
2932 mutex_unlock(&smsm_lock);
2933 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002934 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002935
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002936 state_changes = state_info->last_value ^ new_state;
2937 if (state_changes) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07002938 SMx_POWER_INFO("SMSM Change %d: %08x->%08x\n",
2939 n, state_info->last_value,
2940 new_state);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002941 list_for_each_entry(cb_info,
2942 &state_info->callbacks, cb_list) {
2943
2944 if (cb_info->mask & state_changes)
2945 cb_info->notify(cb_info->data,
2946 state_info->last_value,
2947 new_state);
2948 }
2949 state_info->last_value = new_state;
2950 }
2951 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002952
Eric Holmbergda31d042012-03-28 14:01:02 -06002953 /* read wakelock flag */
2954 ret = kfifo_out(&smsm_snapshot_fifo, &use_wakelock,
2955 sizeof(use_wakelock));
2956 if (ret != sizeof(use_wakelock)) {
2957 pr_err("%s: snapshot underflow %d\n",
2958 __func__, ret);
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002959 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002960 return;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002961 }
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002962 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002963
2964 if (use_wakelock) {
2965 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2966 if (smsm_snapshot_count) {
2967 --smsm_snapshot_count;
2968 if (smsm_snapshot_count == 0) {
2969 SMx_POWER_INFO("SMSM snapshot"
2970 " wake unlock\n");
2971 wake_unlock(&smsm_snapshot_wakelock);
2972 }
2973 } else {
2974 pr_err("%s: invalid snapshot count\n",
2975 __func__);
2976 }
2977 spin_unlock_irqrestore(&smsm_snapshot_count_lock,
2978 flags);
2979 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002980 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002981}
2982
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002983
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002984/**
2985 * Registers callback for SMSM state notifications when the specified
2986 * bits change.
2987 *
2988 * @smsm_entry Processor entry to deregister
2989 * @mask Bits to deregister (if result is 0, callback is removed)
2990 * @notify Notification function to deregister
2991 * @data Opaque data passed in to callback
2992 *
2993 * @returns Status code
2994 * <0 error code
2995 * 0 inserted new entry
2996 * 1 updated mask of existing entry
2997 */
2998int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
2999 void (*notify)(void *, uint32_t, uint32_t), void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003000{
Eric Holmberge8a39322012-04-03 15:14:02 -06003001 struct smsm_state_info *state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003002 struct smsm_state_cb_info *cb_info;
3003 struct smsm_state_cb_info *cb_found = 0;
Eric Holmberge8a39322012-04-03 15:14:02 -06003004 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003005 int ret = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003006
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003007 if (smsm_entry >= SMSM_NUM_ENTRIES)
3008 return -EINVAL;
3009
Eric Holmbergc8002902011-09-16 13:55:57 -06003010 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003011
3012 if (!smsm_states) {
3013 /* smsm not yet initialized */
3014 ret = -ENODEV;
3015 goto cleanup;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003016 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003017
Eric Holmberge8a39322012-04-03 15:14:02 -06003018 state = &smsm_states[smsm_entry];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003019 list_for_each_entry(cb_info,
Eric Holmberge8a39322012-04-03 15:14:02 -06003020 &state->callbacks, cb_list) {
3021 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003022 (cb_info->data == data)) {
3023 cb_info->mask |= mask;
3024 cb_found = cb_info;
3025 ret = 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003026 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003027 new_mask |= cb_info->mask;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003028 }
3029
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003030 if (!cb_found) {
3031 cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
3032 GFP_ATOMIC);
3033 if (!cb_info) {
3034 ret = -ENOMEM;
3035 goto cleanup;
3036 }
3037
3038 cb_info->mask = mask;
3039 cb_info->notify = notify;
3040 cb_info->data = data;
3041 INIT_LIST_HEAD(&cb_info->cb_list);
3042 list_add_tail(&cb_info->cb_list,
Eric Holmberge8a39322012-04-03 15:14:02 -06003043 &state->callbacks);
3044 new_mask |= mask;
3045 }
3046
3047 /* update interrupt notification mask */
3048 if (smsm_entry == SMSM_MODEM_STATE)
3049 new_mask |= LEGACY_MODEM_SMSM_MASK;
3050
3051 if (smsm_info.intr_mask) {
3052 unsigned long flags;
3053
3054 spin_lock_irqsave(&smem_lock, flags);
3055 new_mask = (new_mask & ~state->intr_mask_clear)
3056 | state->intr_mask_set;
3057 __raw_writel(new_mask,
3058 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3059 wmb();
3060 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003061 }
3062
3063cleanup:
Eric Holmbergc8002902011-09-16 13:55:57 -06003064 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003065 return ret;
3066}
3067EXPORT_SYMBOL(smsm_state_cb_register);
3068
3069
3070/**
3071 * Deregisters for SMSM state notifications for the specified bits.
3072 *
3073 * @smsm_entry Processor entry to deregister
3074 * @mask Bits to deregister (if result is 0, callback is removed)
3075 * @notify Notification function to deregister
3076 * @data Opaque data passed in to callback
3077 *
3078 * @returns Status code
3079 * <0 error code
3080 * 0 not found
3081 * 1 updated mask
3082 * 2 removed callback
3083 */
3084int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
3085 void (*notify)(void *, uint32_t, uint32_t), void *data)
3086{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003087 struct smsm_state_cb_info *cb_info;
Eric Holmberge8a39322012-04-03 15:14:02 -06003088 struct smsm_state_cb_info *cb_tmp;
3089 struct smsm_state_info *state;
3090 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003091 int ret = 0;
3092
3093 if (smsm_entry >= SMSM_NUM_ENTRIES)
3094 return -EINVAL;
3095
Eric Holmbergc8002902011-09-16 13:55:57 -06003096 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003097
3098 if (!smsm_states) {
3099 /* smsm not yet initialized */
Eric Holmbergc8002902011-09-16 13:55:57 -06003100 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003101 return -ENODEV;
3102 }
3103
Eric Holmberge8a39322012-04-03 15:14:02 -06003104 state = &smsm_states[smsm_entry];
3105 list_for_each_entry_safe(cb_info, cb_tmp,
3106 &state->callbacks, cb_list) {
3107 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003108 (cb_info->data == data)) {
3109 cb_info->mask &= ~mask;
3110 ret = 1;
3111 if (!cb_info->mask) {
3112 /* no mask bits set, remove callback */
3113 list_del(&cb_info->cb_list);
3114 kfree(cb_info);
3115 ret = 2;
Eric Holmberge8a39322012-04-03 15:14:02 -06003116 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003117 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003118 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003119 new_mask |= cb_info->mask;
3120 }
3121
3122 /* update interrupt notification mask */
3123 if (smsm_entry == SMSM_MODEM_STATE)
3124 new_mask |= LEGACY_MODEM_SMSM_MASK;
3125
3126 if (smsm_info.intr_mask) {
3127 unsigned long flags;
3128
3129 spin_lock_irqsave(&smem_lock, flags);
3130 new_mask = (new_mask & ~state->intr_mask_clear)
3131 | state->intr_mask_set;
3132 __raw_writel(new_mask,
3133 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3134 wmb();
3135 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003136 }
3137
Eric Holmbergc8002902011-09-16 13:55:57 -06003138 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003139 return ret;
3140}
3141EXPORT_SYMBOL(smsm_state_cb_deregister);
3142
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003143int smsm_driver_state_notifier_register(struct notifier_block *nb)
3144{
3145 int ret;
3146 if (!nb)
3147 return -EINVAL;
3148 mutex_lock(&smsm_driver_state_notifier_lock);
3149 ret = raw_notifier_chain_register(&smsm_driver_state_notifier_list, nb);
3150 mutex_unlock(&smsm_driver_state_notifier_lock);
3151 return ret;
3152}
3153EXPORT_SYMBOL(smsm_driver_state_notifier_register);
3154
3155int smsm_driver_state_notifier_unregister(struct notifier_block *nb)
3156{
3157 int ret;
3158 if (!nb)
3159 return -EINVAL;
3160 mutex_lock(&smsm_driver_state_notifier_lock);
3161 ret = raw_notifier_chain_unregister(&smsm_driver_state_notifier_list,
3162 nb);
3163 mutex_unlock(&smsm_driver_state_notifier_lock);
3164 return ret;
3165}
3166EXPORT_SYMBOL(smsm_driver_state_notifier_unregister);
3167
3168static void smsm_driver_state_notify(uint32_t state, void *data)
3169{
3170 mutex_lock(&smsm_driver_state_notifier_lock);
3171 raw_notifier_call_chain(&smsm_driver_state_notifier_list,
3172 state, data);
3173 mutex_unlock(&smsm_driver_state_notifier_lock);
3174}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003175
3176int smd_core_init(void)
3177{
3178 int r;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003179 unsigned long flags = IRQF_TRIGGER_RISING;
3180 SMD_INFO("smd_core_init()\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003181
Brian Swetland37521a32009-07-01 18:30:47 -07003182 r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003183 flags, "smd_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003184 if (r < 0)
3185 return r;
3186 r = enable_irq_wake(INT_A9_M2A_0);
3187 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003188 pr_err("smd_core_init: "
3189 "enable_irq_wake failed for INT_A9_M2A_0\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003190
Eric Holmberg98c6c642012-02-24 11:29:35 -07003191 r = request_irq(INT_A9_M2A_5, smsm_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003192 flags, "smsm_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003193 if (r < 0) {
3194 free_irq(INT_A9_M2A_0, 0);
3195 return r;
3196 }
3197 r = enable_irq_wake(INT_A9_M2A_5);
3198 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003199 pr_err("smd_core_init: "
3200 "enable_irq_wake failed for INT_A9_M2A_5\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003201
Brian Swetland37521a32009-07-01 18:30:47 -07003202#if defined(CONFIG_QDSP6)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003203#if (INT_ADSP_A11 == INT_ADSP_A11_SMSM)
3204 flags |= IRQF_SHARED;
3205#endif
Brian Swetland37521a32009-07-01 18:30:47 -07003206 r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003207 flags, "smd_dev", smd_dsp_irq_handler);
Brian Swetland37521a32009-07-01 18:30:47 -07003208 if (r < 0) {
3209 free_irq(INT_A9_M2A_0, 0);
3210 free_irq(INT_A9_M2A_5, 0);
3211 return r;
3212 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003213
Eric Holmberg98c6c642012-02-24 11:29:35 -07003214 r = request_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler,
3215 flags, "smsm_dev", smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003216 if (r < 0) {
3217 free_irq(INT_A9_M2A_0, 0);
3218 free_irq(INT_A9_M2A_5, 0);
3219 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
3220 return r;
3221 }
3222
3223 r = enable_irq_wake(INT_ADSP_A11);
3224 if (r < 0)
3225 pr_err("smd_core_init: "
3226 "enable_irq_wake failed for INT_ADSP_A11\n");
3227
3228#if (INT_ADSP_A11 != INT_ADSP_A11_SMSM)
3229 r = enable_irq_wake(INT_ADSP_A11_SMSM);
3230 if (r < 0)
3231 pr_err("smd_core_init: enable_irq_wake "
3232 "failed for INT_ADSP_A11_SMSM\n");
3233#endif
3234 flags &= ~IRQF_SHARED;
Brian Swetland37521a32009-07-01 18:30:47 -07003235#endif
3236
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003237#if defined(CONFIG_DSPS)
3238 r = request_irq(INT_DSPS_A11, smd_dsps_irq_handler,
3239 flags, "smd_dev", smd_dsps_irq_handler);
3240 if (r < 0) {
3241 free_irq(INT_A9_M2A_0, 0);
3242 free_irq(INT_A9_M2A_5, 0);
3243 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003244 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003245 return r;
3246 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003247
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003248 r = enable_irq_wake(INT_DSPS_A11);
3249 if (r < 0)
3250 pr_err("smd_core_init: "
3251 "enable_irq_wake failed for INT_ADSP_A11\n");
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003252#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003253
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003254#if defined(CONFIG_WCNSS)
3255 r = request_irq(INT_WCNSS_A11, smd_wcnss_irq_handler,
3256 flags, "smd_dev", smd_wcnss_irq_handler);
3257 if (r < 0) {
3258 free_irq(INT_A9_M2A_0, 0);
3259 free_irq(INT_A9_M2A_5, 0);
3260 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003261 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003262 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3263 return r;
3264 }
3265
3266 r = enable_irq_wake(INT_WCNSS_A11);
3267 if (r < 0)
3268 pr_err("smd_core_init: "
3269 "enable_irq_wake failed for INT_WCNSS_A11\n");
3270
Eric Holmberg98c6c642012-02-24 11:29:35 -07003271 r = request_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler,
3272 flags, "smsm_dev", smsm_wcnss_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003273 if (r < 0) {
3274 free_irq(INT_A9_M2A_0, 0);
3275 free_irq(INT_A9_M2A_5, 0);
3276 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003277 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003278 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3279 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
3280 return r;
3281 }
3282
3283 r = enable_irq_wake(INT_WCNSS_A11_SMSM);
3284 if (r < 0)
3285 pr_err("smd_core_init: "
3286 "enable_irq_wake failed for INT_WCNSS_A11_SMSM\n");
3287#endif
3288
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003289#if defined(CONFIG_DSPS_SMSM)
Eric Holmberg98c6c642012-02-24 11:29:35 -07003290 r = request_irq(INT_DSPS_A11_SMSM, smsm_dsps_irq_handler,
3291 flags, "smsm_dev", smsm_dsps_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003292 if (r < 0) {
3293 free_irq(INT_A9_M2A_0, 0);
3294 free_irq(INT_A9_M2A_5, 0);
3295 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003296 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003297 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3298 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003299 free_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003300 return r;
3301 }
3302
3303 r = enable_irq_wake(INT_DSPS_A11_SMSM);
3304 if (r < 0)
3305 pr_err("smd_core_init: "
3306 "enable_irq_wake failed for INT_DSPS_A11_SMSM\n");
3307#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003308 SMD_INFO("smd_core_init() done\n");
3309
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003310 return 0;
3311}
3312
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303313static int intr_init(struct interrupt_config_item *private_irq,
3314 struct smd_irq_config *platform_irq,
3315 struct platform_device *pdev
3316 )
3317{
3318 int irq_id;
3319 int ret;
3320 int ret_wake;
3321
3322 private_irq->out_bit_pos = platform_irq->out_bit_pos;
3323 private_irq->out_offset = platform_irq->out_offset;
3324 private_irq->out_base = platform_irq->out_base;
3325
3326 irq_id = platform_get_irq_byname(
3327 pdev,
3328 platform_irq->irq_name
3329 );
3330 SMD_DBG("smd: %s: register irq: %s id: %d\n", __func__,
3331 platform_irq->irq_name, irq_id);
3332 ret = request_irq(irq_id,
3333 private_irq->irq_handler,
3334 platform_irq->flags,
3335 platform_irq->device_name,
3336 (void *)platform_irq->dev_id
3337 );
3338 if (ret < 0) {
3339 platform_irq->irq_id = ret;
Eric Holmbergdeace152012-07-25 12:17:11 -06003340 private_irq->irq_id = ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303341 } else {
3342 platform_irq->irq_id = irq_id;
Eric Holmbergdeace152012-07-25 12:17:11 -06003343 private_irq->irq_id = irq_id;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303344 ret_wake = enable_irq_wake(irq_id);
3345 if (ret_wake < 0) {
3346 pr_err("smd: enable_irq_wake failed on %s",
3347 platform_irq->irq_name);
3348 }
3349 }
3350
3351 return ret;
3352}
3353
Jeff Hugobdc734d2012-03-26 16:05:39 -06003354int sort_cmp_func(const void *a, const void *b)
3355{
3356 struct smem_area *left = (struct smem_area *)(a);
3357 struct smem_area *right = (struct smem_area *)(b);
3358
3359 return left->phys_addr - right->phys_addr;
3360}
3361
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303362int smd_core_platform_init(struct platform_device *pdev)
3363{
3364 int i;
3365 int ret;
3366 uint32_t num_ss;
3367 struct smd_platform *smd_platform_data;
3368 struct smd_subsystem_config *smd_ss_config_list;
3369 struct smd_subsystem_config *cfg;
3370 int err_ret = 0;
Jeff Hugobdc734d2012-03-26 16:05:39 -06003371 struct smd_smem_regions *smd_smem_areas;
3372 int smem_idx = 0;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303373
3374 smd_platform_data = pdev->dev.platform_data;
3375 num_ss = smd_platform_data->num_ss_configs;
3376 smd_ss_config_list = smd_platform_data->smd_ss_configs;
3377
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06003378 if (smd_platform_data->smd_ssr_config)
3379 disable_smsm_reset_handshake = smd_platform_data->
3380 smd_ssr_config->disable_smsm_reset_handshake;
3381
Jeff Hugobdc734d2012-03-26 16:05:39 -06003382 smd_smem_areas = smd_platform_data->smd_smem_areas;
3383 if (smd_smem_areas) {
3384 num_smem_areas = smd_platform_data->num_smem_areas;
3385 smem_areas = kmalloc(sizeof(struct smem_area) * num_smem_areas,
3386 GFP_KERNEL);
3387 if (!smem_areas) {
3388 pr_err("%s: smem_areas kmalloc failed\n", __func__);
3389 err_ret = -ENOMEM;
3390 goto smem_areas_alloc_fail;
3391 }
3392
3393 for (smem_idx = 0; smem_idx < num_smem_areas; ++smem_idx) {
3394 smem_areas[smem_idx].phys_addr =
3395 smd_smem_areas[smem_idx].phys_addr;
3396 smem_areas[smem_idx].size =
3397 smd_smem_areas[smem_idx].size;
3398 smem_areas[smem_idx].virt_addr = ioremap_nocache(
3399 (unsigned long)(smem_areas[smem_idx].phys_addr),
3400 smem_areas[smem_idx].size);
3401 if (!smem_areas[smem_idx].virt_addr) {
3402 pr_err("%s: ioremap_nocache() of addr:%p"
3403 " size: %x\n", __func__,
3404 smem_areas[smem_idx].phys_addr,
3405 smem_areas[smem_idx].size);
3406 err_ret = -ENOMEM;
3407 ++smem_idx;
3408 goto smem_failed;
3409 }
3410 }
3411 sort(smem_areas, num_smem_areas,
3412 sizeof(struct smem_area),
3413 sort_cmp_func, NULL);
3414 }
3415
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303416 for (i = 0; i < num_ss; i++) {
3417 cfg = &smd_ss_config_list[i];
3418
3419 ret = intr_init(
3420 &private_intr_config[cfg->irq_config_id].smd,
3421 &cfg->smd_int,
3422 pdev
3423 );
3424
3425 if (ret < 0) {
3426 err_ret = ret;
3427 pr_err("smd: register irq failed on %s\n",
3428 cfg->smd_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003429 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303430 }
3431
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003432 /* only init smsm structs if this edge supports smsm */
3433 if (cfg->smsm_int.irq_id)
3434 ret = intr_init(
3435 &private_intr_config[cfg->irq_config_id].smsm,
3436 &cfg->smsm_int,
3437 pdev
3438 );
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303439
3440 if (ret < 0) {
3441 err_ret = ret;
3442 pr_err("smd: register irq failed on %s\n",
3443 cfg->smsm_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003444 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303445 }
Eric Holmberg17992c12012-02-29 12:54:44 -07003446
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003447 if (cfg->subsys_name)
3448 strlcpy(edge_to_pids[cfg->edge].subsys_name,
Eric Holmberg17992c12012-02-29 12:54:44 -07003449 cfg->subsys_name, SMD_MAX_CH_NAME_LEN);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303450 }
3451
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303452
3453 SMD_INFO("smd_core_platform_init() done\n");
3454 return 0;
3455
Jeff Hugobdc734d2012-03-26 16:05:39 -06003456intr_failed:
3457 pr_err("smd: deregistering IRQs\n");
3458 for (i = 0; i < num_ss; ++i) {
3459 cfg = &smd_ss_config_list[i];
3460
3461 if (cfg->smd_int.irq_id >= 0)
3462 free_irq(cfg->smd_int.irq_id,
3463 (void *)cfg->smd_int.dev_id
3464 );
3465 if (cfg->smsm_int.irq_id >= 0)
3466 free_irq(cfg->smsm_int.irq_id,
3467 (void *)cfg->smsm_int.dev_id
3468 );
3469 }
3470smem_failed:
3471 for (smem_idx = smem_idx - 1; smem_idx >= 0; --smem_idx)
3472 iounmap(smem_areas[smem_idx].virt_addr);
3473 kfree(smem_areas);
3474smem_areas_alloc_fail:
3475 return err_ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303476}
3477
Gregory Bean4416e9e2010-07-28 10:22:12 -07003478static int __devinit msm_smd_probe(struct platform_device *pdev)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003479{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303480 int ret;
Daniel Walker0aec66d2010-03-18 12:31:08 -07003481
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303482 SMD_INFO("smd probe\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003483 INIT_WORK(&probe_work, smd_channel_probe_worker);
3484
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003485 channel_close_wq = create_singlethread_workqueue("smd_channel_close");
3486 if (IS_ERR(channel_close_wq)) {
3487 pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
3488 return -ENOMEM;
3489 }
3490
3491 if (smsm_init()) {
3492 pr_err("smsm_init() failed\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003493 return -1;
3494 }
3495
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303496 if (pdev) {
3497 if (pdev->dev.of_node) {
3498 pr_err("SMD: Device tree not currently supported\n");
3499 return -ENODEV;
3500 } else if (pdev->dev.platform_data) {
3501 ret = smd_core_platform_init(pdev);
3502 if (ret) {
3503 pr_err(
3504 "SMD: smd_core_platform_init() failed\n");
3505 return -ENODEV;
3506 }
3507 } else {
3508 ret = smd_core_init();
3509 if (ret) {
3510 pr_err("smd_core_init() failed\n");
3511 return -ENODEV;
3512 }
3513 }
3514 } else {
3515 pr_err("SMD: PDEV not found\n");
3516 return -ENODEV;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003517 }
Iliyan Malchev1207bab2009-11-15 18:16:43 -08003518
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003519 smd_initialized = 1;
3520
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003521 smd_alloc_loopback_channel();
Eric Holmbergc33d4ab2011-10-24 10:28:25 -06003522 smsm_irq_handler(0, 0);
3523 tasklet_schedule(&smd_fake_irq_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003524
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003525 return 0;
3526}
3527
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003528static int restart_notifier_cb(struct notifier_block *this,
3529 unsigned long code,
3530 void *data);
3531
3532static struct restart_notifier_block restart_notifiers[] = {
Eric Holmbergca7ead22011-12-01 17:21:15 -07003533 {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
3534 {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
3535 {SMD_WCNSS, "riva", .nb.notifier_call = restart_notifier_cb},
3536 {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
Eric Holmberg8b0e74f2012-02-08 09:56:17 -07003537 {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003538};
3539
3540static int restart_notifier_cb(struct notifier_block *this,
3541 unsigned long code,
3542 void *data)
3543{
3544 if (code == SUBSYS_AFTER_SHUTDOWN) {
3545 struct restart_notifier_block *notifier;
3546
3547 notifier = container_of(this,
3548 struct restart_notifier_block, nb);
3549 SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
3550 __func__, notifier->processor,
3551 notifier->name);
3552
3553 smd_channel_reset(notifier->processor);
3554 }
3555
3556 return NOTIFY_DONE;
3557}
3558
3559static __init int modem_restart_late_init(void)
3560{
3561 int i;
3562 void *handle;
3563 struct restart_notifier_block *nb;
3564
3565 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
3566 nb = &restart_notifiers[i];
3567 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
3568 SMD_DBG("%s: registering notif for '%s', handle=%p\n",
3569 __func__, nb->name, handle);
3570 }
3571 return 0;
3572}
3573late_initcall(modem_restart_late_init);
3574
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003575static struct platform_driver msm_smd_driver = {
3576 .probe = msm_smd_probe,
3577 .driver = {
3578 .name = MODULE_NAME,
3579 .owner = THIS_MODULE,
3580 },
3581};
3582
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003583int __init msm_smd_init(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003584{
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003585 static bool registered;
3586
3587 if (registered)
3588 return 0;
3589
3590 registered = true;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003591 return platform_driver_register(&msm_smd_driver);
3592}
3593
3594module_init(msm_smd_init);
3595
3596MODULE_DESCRIPTION("MSM Shared Memory Core");
3597MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
3598MODULE_LICENSE("GPL");