blob: 796b5f9cc84a6ba2c40eee498c201b599cb4b0cf [file] [log] [blame]
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001/* arch/arm/mach-msm/smd.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Duy Truonge833aca2013-02-12 13:35:08 -08004 * Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
Brian Swetland2eb44eb2008-09-29 16:00:48 -07005 * Author: Brian Swetland <swetland@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/cdev.h>
22#include <linux/device.h>
23#include <linux/wait.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/list.h>
27#include <linux/slab.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070028#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/io.h>
30#include <linux/termios.h>
31#include <linux/ctype.h>
32#include <linux/remote_spinlock.h>
33#include <linux/uaccess.h>
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070034#include <linux/kfifo.h>
Eric Holmberg59a9f942012-03-19 10:04:22 -060035#include <linux/wakelock.h>
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -060036#include <linux/notifier.h>
Jeff Hugobdc734d2012-03-26 16:05:39 -060037#include <linux/sort.h>
Eric Holmberg1255fe12012-10-04 13:37:28 -060038#include <linux/suspend.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070039#include <mach/msm_smd.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040#include <mach/msm_iomap.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070041#include <mach/system.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070042#include <mach/subsystem_notif.h>
Angshuman Sarkaread67bd2011-09-21 20:13:12 +053043#include <mach/socinfo.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070044#include <mach/proc_comm.h>
Ram Somani8b9589f2012-04-03 12:07:18 +053045#include <asm/cacheflush.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070046
47#include "smd_private.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048#include "modem_notifier.h"
Brian Swetland2eb44eb2008-09-29 16:00:48 -070049
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM8X60) \
Jeff Hugo56b933a2011-09-28 14:42:05 -060051 || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX) \
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060052 || defined(CONFIG_ARCH_MSM9615) || defined(CONFIG_ARCH_APQ8064)
Brian Swetland37521a32009-07-01 18:30:47 -070053#define CONFIG_QDSP6 1
54#endif
55
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060056#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) \
57 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070058#define CONFIG_DSPS 1
59#endif
60
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060061#if defined(CONFIG_ARCH_MSM8960) \
62 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063#define CONFIG_WCNSS 1
Jeff Hugo6a8057c2011-08-16 13:47:12 -060064#define CONFIG_DSPS_SMSM 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -070066
67#define MODULE_NAME "msm_smd"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068#define SMEM_VERSION 0x000B
69#define SMD_VERSION 0x00020000
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070070#define SMSM_SNAPSHOT_CNT 64
Eric Holmbergda31d042012-03-28 14:01:02 -060071#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070072
73uint32_t SMSM_NUM_ENTRIES = 8;
74uint32_t SMSM_NUM_HOSTS = 3;
Brian Swetland2eb44eb2008-09-29 16:00:48 -070075
Eric Holmberge8a39322012-04-03 15:14:02 -060076/* Legacy SMSM interrupt notifications */
77#define LEGACY_MODEM_SMSM_MASK (SMSM_RESET | SMSM_INIT | SMSM_SMDINIT \
78 | SMSM_RUN | SMSM_SYSTEM_DOWNLOAD)
Brian Swetland2eb44eb2008-09-29 16:00:48 -070079
80enum {
81 MSM_SMD_DEBUG = 1U << 0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082 MSM_SMSM_DEBUG = 1U << 1,
83 MSM_SMD_INFO = 1U << 2,
84 MSM_SMSM_INFO = 1U << 3,
Eric Holmberg98c6c642012-02-24 11:29:35 -070085 MSM_SMx_POWER_INFO = 1U << 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086};
87
88struct smsm_shared_info {
89 uint32_t *state;
90 uint32_t *intr_mask;
91 uint32_t *intr_mux;
92};
93
94static struct smsm_shared_info smsm_info;
Eric Holmberg59a9f942012-03-19 10:04:22 -060095static struct kfifo smsm_snapshot_fifo;
96static struct wake_lock smsm_snapshot_wakelock;
97static int smsm_snapshot_count;
98static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070099
100struct smsm_size_info_type {
101 uint32_t num_hosts;
102 uint32_t num_entries;
103 uint32_t reserved0;
104 uint32_t reserved1;
105};
106
107struct smsm_state_cb_info {
108 struct list_head cb_list;
109 uint32_t mask;
110 void *data;
111 void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
112};
113
114struct smsm_state_info {
115 struct list_head callbacks;
116 uint32_t last_value;
Eric Holmberge8a39322012-04-03 15:14:02 -0600117 uint32_t intr_mask_set;
118 uint32_t intr_mask_clear;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700119};
120
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530121struct interrupt_config_item {
122 /* must be initialized */
123 irqreturn_t (*irq_handler)(int req, void *data);
124 /* outgoing interrupt config (set from platform data) */
125 uint32_t out_bit_pos;
126 void __iomem *out_base;
127 uint32_t out_offset;
Eric Holmbergdeace152012-07-25 12:17:11 -0600128 int irq_id;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530129};
130
131struct interrupt_config {
132 struct interrupt_config_item smd;
133 struct interrupt_config_item smsm;
134};
135
136static irqreturn_t smd_modem_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700137static irqreturn_t smsm_modem_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530138static irqreturn_t smd_dsp_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700139static irqreturn_t smsm_dsp_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530140static irqreturn_t smd_dsps_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700141static irqreturn_t smsm_dsps_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530142static irqreturn_t smd_wcnss_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700143static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600144static irqreturn_t smd_rpm_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530145static irqreturn_t smsm_irq_handler(int irq, void *data);
146
147static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
148 [SMD_MODEM] = {
149 .smd.irq_handler = smd_modem_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700150 .smsm.irq_handler = smsm_modem_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530151 },
152 [SMD_Q6] = {
153 .smd.irq_handler = smd_dsp_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700154 .smsm.irq_handler = smsm_dsp_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530155 },
156 [SMD_DSPS] = {
157 .smd.irq_handler = smd_dsps_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700158 .smsm.irq_handler = smsm_dsps_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530159 },
160 [SMD_WCNSS] = {
161 .smd.irq_handler = smd_wcnss_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700162 .smsm.irq_handler = smsm_wcnss_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530163 },
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600164 [SMD_RPM] = {
165 .smd.irq_handler = smd_rpm_irq_handler,
166 .smsm.irq_handler = NULL, /* does not support smsm */
167 },
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530168};
Jeff Hugobdc734d2012-03-26 16:05:39 -0600169
170struct smem_area {
171 void *phys_addr;
172 unsigned size;
173 void __iomem *virt_addr;
174};
175static uint32_t num_smem_areas;
176static struct smem_area *smem_areas;
177static void *smem_range_check(void *base, unsigned offset);
178
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700179struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530180
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700181#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
182#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
183 entry * SMSM_NUM_HOSTS + host)
184#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
185
186/* Internal definitions which are not exported in some targets */
187enum {
188 SMSM_APPS_DEM_I = 3,
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700189};
190
191static int msm_smd_debug_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700192module_param_named(debug_mask, msm_smd_debug_mask,
193 int, S_IRUGO | S_IWUSR | S_IWGRP);
194
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700195#if defined(CONFIG_MSM_SMD_DEBUG)
196#define SMD_DBG(x...) do { \
197 if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
198 printk(KERN_DEBUG x); \
199 } while (0)
200
201#define SMSM_DBG(x...) do { \
202 if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
203 printk(KERN_DEBUG x); \
204 } while (0)
205
206#define SMD_INFO(x...) do { \
207 if (msm_smd_debug_mask & MSM_SMD_INFO) \
208 printk(KERN_INFO x); \
209 } while (0)
210
211#define SMSM_INFO(x...) do { \
212 if (msm_smd_debug_mask & MSM_SMSM_INFO) \
213 printk(KERN_INFO x); \
214 } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700215#define SMx_POWER_INFO(x...) do { \
216 if (msm_smd_debug_mask & MSM_SMx_POWER_INFO) \
217 printk(KERN_INFO x); \
218 } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219#else
220#define SMD_DBG(x...) do { } while (0)
221#define SMSM_DBG(x...) do { } while (0)
222#define SMD_INFO(x...) do { } while (0)
223#define SMSM_INFO(x...) do { } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700224#define SMx_POWER_INFO(x...) do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225#endif
226
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700227static unsigned last_heap_free = 0xffffffff;
228
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700229static inline void smd_write_intr(unsigned int val,
230 const void __iomem *addr);
231
232#if defined(CONFIG_ARCH_MSM7X30)
233#define MSM_TRIG_A2M_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530234 (smd_write_intr(1 << 0, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700235#define MSM_TRIG_A2Q6_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530236 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700237#define MSM_TRIG_A2M_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530238 (smd_write_intr(1 << 5, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700239#define MSM_TRIG_A2Q6_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530240 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600242#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700243#define MSM_TRIG_A2WCNSS_SMD_INT
244#define MSM_TRIG_A2WCNSS_SMSM_INT
245#elif defined(CONFIG_ARCH_MSM8X60)
246#define MSM_TRIG_A2M_SMD_INT \
247 (smd_write_intr(1 << 3, MSM_GCC_BASE + 0x8))
248#define MSM_TRIG_A2Q6_SMD_INT \
249 (smd_write_intr(1 << 15, MSM_GCC_BASE + 0x8))
250#define MSM_TRIG_A2M_SMSM_INT \
251 (smd_write_intr(1 << 4, MSM_GCC_BASE + 0x8))
252#define MSM_TRIG_A2Q6_SMSM_INT \
253 (smd_write_intr(1 << 14, MSM_GCC_BASE + 0x8))
254#define MSM_TRIG_A2DSPS_SMD_INT \
255 (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4080))
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600256#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257#define MSM_TRIG_A2WCNSS_SMD_INT
258#define MSM_TRIG_A2WCNSS_SMSM_INT
Jeff Hugo56b933a2011-09-28 14:42:05 -0600259#elif defined(CONFIG_ARCH_MSM9615)
260#define MSM_TRIG_A2M_SMD_INT \
261 (smd_write_intr(1 << 3, MSM_APCS_GCC_BASE + 0x8))
262#define MSM_TRIG_A2Q6_SMD_INT \
263 (smd_write_intr(1 << 15, MSM_APCS_GCC_BASE + 0x8))
264#define MSM_TRIG_A2M_SMSM_INT \
265 (smd_write_intr(1 << 4, MSM_APCS_GCC_BASE + 0x8))
266#define MSM_TRIG_A2Q6_SMSM_INT \
267 (smd_write_intr(1 << 14, MSM_APCS_GCC_BASE + 0x8))
268#define MSM_TRIG_A2DSPS_SMD_INT
269#define MSM_TRIG_A2DSPS_SMSM_INT
270#define MSM_TRIG_A2WCNSS_SMD_INT
271#define MSM_TRIG_A2WCNSS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272#elif defined(CONFIG_ARCH_FSM9XXX)
273#define MSM_TRIG_A2Q6_SMD_INT \
274 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
275#define MSM_TRIG_A2Q6_SMSM_INT \
276 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
277#define MSM_TRIG_A2M_SMD_INT \
278 (smd_write_intr(1 << 0, MSM_GCC_BASE + 0x8))
279#define MSM_TRIG_A2M_SMSM_INT \
280 (smd_write_intr(1 << 5, MSM_GCC_BASE + 0x8))
281#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600282#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283#define MSM_TRIG_A2WCNSS_SMD_INT
284#define MSM_TRIG_A2WCNSS_SMSM_INT
Eric Holmberg73d45462012-02-28 10:41:31 -0700285#elif defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7x25)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700286#define MSM_TRIG_A2M_SMD_INT \
287 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700288#define MSM_TRIG_A2Q6_SMD_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700289#define MSM_TRIG_A2M_SMSM_INT \
290 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700291#define MSM_TRIG_A2Q6_SMSM_INT
292#define MSM_TRIG_A2DSPS_SMD_INT
293#define MSM_TRIG_A2DSPS_SMSM_INT
294#define MSM_TRIG_A2WCNSS_SMD_INT
295#define MSM_TRIG_A2WCNSS_SMSM_INT
296#elif defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM7X27A)
297#define MSM_TRIG_A2M_SMD_INT \
298 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
299#define MSM_TRIG_A2Q6_SMD_INT
300#define MSM_TRIG_A2M_SMSM_INT \
301 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
302#define MSM_TRIG_A2Q6_SMSM_INT
303#define MSM_TRIG_A2DSPS_SMD_INT
304#define MSM_TRIG_A2DSPS_SMSM_INT
305#define MSM_TRIG_A2WCNSS_SMD_INT
306#define MSM_TRIG_A2WCNSS_SMSM_INT
307#else /* use platform device / device tree configuration */
308#define MSM_TRIG_A2M_SMD_INT
309#define MSM_TRIG_A2Q6_SMD_INT
310#define MSM_TRIG_A2M_SMSM_INT
311#define MSM_TRIG_A2Q6_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700312#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600313#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700314#define MSM_TRIG_A2WCNSS_SMD_INT
315#define MSM_TRIG_A2WCNSS_SMSM_INT
Brian Swetland37521a32009-07-01 18:30:47 -0700316#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700317
Jeff Hugoee40b152012-02-09 17:39:47 -0700318/*
319 * stub out legacy macros if they are not being used so that the legacy
320 * code compiles even though it is not used
321 *
322 * these definitions should not be used in active code and will cause
323 * an early failure
324 */
325#ifndef INT_A9_M2A_0
326#define INT_A9_M2A_0 -1
327#endif
328#ifndef INT_A9_M2A_5
329#define INT_A9_M2A_5 -1
330#endif
331#ifndef INT_ADSP_A11
332#define INT_ADSP_A11 -1
333#endif
334#ifndef INT_ADSP_A11_SMSM
335#define INT_ADSP_A11_SMSM -1
336#endif
337#ifndef INT_DSPS_A11
338#define INT_DSPS_A11 -1
339#endif
340#ifndef INT_DSPS_A11_SMSM
341#define INT_DSPS_A11_SMSM -1
342#endif
343#ifndef INT_WCNSS_A11
344#define INT_WCNSS_A11 -1
345#endif
346#ifndef INT_WCNSS_A11_SMSM
347#define INT_WCNSS_A11_SMSM -1
348#endif
349
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700350#define SMD_LOOPBACK_CID 100
351
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600352#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
353static remote_spinlock_t remote_spinlock;
354
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700355static LIST_HEAD(smd_ch_list_loopback);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700356static void smd_fake_irq_handler(unsigned long arg);
Eric Holmbergda31d042012-03-28 14:01:02 -0600357static void smsm_cb_snapshot(uint32_t use_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700358
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -0600359static struct workqueue_struct *smsm_cb_wq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360static void notify_smsm_cb_clients_worker(struct work_struct *work);
361static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
Eric Holmbergc8002902011-09-16 13:55:57 -0600362static DEFINE_MUTEX(smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700363static struct smsm_state_info *smsm_states;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +0530364static int spinlocks_initialized;
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -0600365
366/**
367 * Variables to indicate smd module initialization.
368 * Dependents to register for smd module init notifier.
369 */
370static int smd_module_inited;
371static RAW_NOTIFIER_HEAD(smd_module_init_notifier_list);
372static DEFINE_MUTEX(smd_module_init_notifier_lock);
373static void smd_module_init_notify(uint32_t state, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700374
375static inline void smd_write_intr(unsigned int val,
376 const void __iomem *addr)
377{
378 wmb();
379 __raw_writel(val, addr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700380}
381
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700382static inline void notify_modem_smd(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700383{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530384 static const struct interrupt_config_item *intr
385 = &private_intr_config[SMD_MODEM].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700386 if (intr->out_base) {
387 ++interrupt_stats[SMD_MODEM].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530388 smd_write_intr(intr->out_bit_pos,
389 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700390 } else {
391 ++interrupt_stats[SMD_MODEM].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530392 MSM_TRIG_A2M_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700393 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700394}
395
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700396static inline void notify_dsp_smd(void)
397{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530398 static const struct interrupt_config_item *intr
399 = &private_intr_config[SMD_Q6].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700400 if (intr->out_base) {
401 ++interrupt_stats[SMD_Q6].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530402 smd_write_intr(intr->out_bit_pos,
403 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700404 } else {
405 ++interrupt_stats[SMD_Q6].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530406 MSM_TRIG_A2Q6_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700407 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700408}
409
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530410static inline void notify_dsps_smd(void)
411{
412 static const struct interrupt_config_item *intr
413 = &private_intr_config[SMD_DSPS].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700414 if (intr->out_base) {
415 ++interrupt_stats[SMD_DSPS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530416 smd_write_intr(intr->out_bit_pos,
417 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700418 } else {
419 ++interrupt_stats[SMD_DSPS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530420 MSM_TRIG_A2DSPS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700421 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530422}
423
424static inline void notify_wcnss_smd(void)
425{
426 static const struct interrupt_config_item *intr
427 = &private_intr_config[SMD_WCNSS].smd;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530428
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700429 if (intr->out_base) {
430 ++interrupt_stats[SMD_WCNSS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530431 smd_write_intr(intr->out_bit_pos,
432 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700433 } else {
434 ++interrupt_stats[SMD_WCNSS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530435 MSM_TRIG_A2WCNSS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700436 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530437}
438
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600439static inline void notify_rpm_smd(void)
440{
441 static const struct interrupt_config_item *intr
442 = &private_intr_config[SMD_RPM].smd;
443
444 if (intr->out_base) {
445 ++interrupt_stats[SMD_RPM].smd_out_config_count;
446 smd_write_intr(intr->out_bit_pos,
447 intr->out_base + intr->out_offset);
448 }
449}
450
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530451static inline void notify_modem_smsm(void)
452{
453 static const struct interrupt_config_item *intr
454 = &private_intr_config[SMD_MODEM].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700455 if (intr->out_base) {
456 ++interrupt_stats[SMD_MODEM].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530457 smd_write_intr(intr->out_bit_pos,
458 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700459 } else {
460 ++interrupt_stats[SMD_MODEM].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530461 MSM_TRIG_A2M_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700462 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530463}
464
465static inline void notify_dsp_smsm(void)
466{
467 static const struct interrupt_config_item *intr
468 = &private_intr_config[SMD_Q6].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700469 if (intr->out_base) {
470 ++interrupt_stats[SMD_Q6].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530471 smd_write_intr(intr->out_bit_pos,
472 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700473 } else {
474 ++interrupt_stats[SMD_Q6].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530475 MSM_TRIG_A2Q6_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700476 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530477}
478
479static inline void notify_dsps_smsm(void)
480{
481 static const struct interrupt_config_item *intr
482 = &private_intr_config[SMD_DSPS].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700483 if (intr->out_base) {
484 ++interrupt_stats[SMD_DSPS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530485 smd_write_intr(intr->out_bit_pos,
486 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700487 } else {
488 ++interrupt_stats[SMD_DSPS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530489 MSM_TRIG_A2DSPS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700490 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530491}
492
493static inline void notify_wcnss_smsm(void)
494{
495 static const struct interrupt_config_item *intr
496 = &private_intr_config[SMD_WCNSS].smsm;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530497
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700498 if (intr->out_base) {
499 ++interrupt_stats[SMD_WCNSS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530500 smd_write_intr(intr->out_bit_pos,
501 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700502 } else {
503 ++interrupt_stats[SMD_WCNSS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530504 MSM_TRIG_A2WCNSS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700505 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530506}
507
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700508static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
509{
510 /* older protocol don't use smsm_intr_mask,
511 but still communicates with modem */
512 if (!smsm_info.intr_mask ||
513 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
514 & notify_mask))
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530515 notify_modem_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700516
517 if (smsm_info.intr_mask &&
518 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
519 & notify_mask)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700520 uint32_t mux_val;
521
Eric Holmberg6282c5d2011-10-27 17:30:57 -0600522 if (cpu_is_qsd8x50() && smsm_info.intr_mux) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700523 mux_val = __raw_readl(
524 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
525 mux_val++;
526 __raw_writel(mux_val,
527 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
528 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530529 notify_dsp_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700530 }
531
532 if (smsm_info.intr_mask &&
533 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
534 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530535 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536 }
537
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600538 if (smsm_info.intr_mask &&
539 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
540 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530541 notify_dsps_smsm();
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600542 }
543
Eric Holmbergda31d042012-03-28 14:01:02 -0600544 /*
545 * Notify local SMSM callback clients without wakelock since this
546 * code is used by power management during power-down/-up sequencing
547 * on DEM-based targets. Grabbing a wakelock in this case will
548 * abort the power-down sequencing.
549 */
Eric Holmberg51676a12012-07-10 18:45:23 -0600550 if (smsm_info.intr_mask &&
551 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS))
552 & notify_mask)) {
553 smsm_cb_snapshot(0);
554 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700555}
556
Eric Holmberg1255fe12012-10-04 13:37:28 -0600557static int smsm_pm_notifier(struct notifier_block *nb,
558 unsigned long event, void *unused)
559{
560 switch (event) {
561 case PM_SUSPEND_PREPARE:
562 smsm_change_state(SMSM_APPS_STATE, SMSM_PROC_AWAKE, 0);
563 break;
564
565 case PM_POST_SUSPEND:
566 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_PROC_AWAKE);
567 break;
568 }
569 return NOTIFY_DONE;
570}
571
572static struct notifier_block smsm_pm_nb = {
573 .notifier_call = smsm_pm_notifier,
574 .priority = 0,
575};
576
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700577void smd_diag(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700578{
579 char *x;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700580 int size;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700581
582 x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
583 if (x != 0) {
584 x[SZ_DIAG_ERR_MSG - 1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700585 SMD_INFO("smem: DIAG '%s'\n", x);
586 }
587
588 x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size);
589 if (x != 0) {
590 x[size - 1] = 0;
591 pr_err("smem: CRASH LOG\n'%s'\n", x);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700592 }
593}
594
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700595
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700596static void handle_modem_crash(void)
597{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700598 pr_err("MODEM/AMSS has CRASHED\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700599 smd_diag();
600
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700601 /* hard reboot if possible FIXME
602 if (msm_reset_hook)
603 msm_reset_hook();
604 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700605
606 /* in this case the modem or watchdog should reboot us */
607 for (;;)
608 ;
609}
610
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700611int smsm_check_for_modem_crash(void)
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700612{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700613 /* if the modem's not ready yet, we have to hope for the best */
614 if (!smsm_info.state)
615 return 0;
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700616
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700617 if (__raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)) & SMSM_RESET) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700618 handle_modem_crash();
619 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700620 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700621 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700622}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700623EXPORT_SYMBOL(smsm_check_for_modem_crash);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700624
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700625/* the spinlock is used to synchronize between the
Brian Swetland03e00cd2009-07-01 17:58:37 -0700626 * irq handler and code that mutates the channel
627 * list or fiddles with channel state
628 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700629static DEFINE_SPINLOCK(smd_lock);
Brian Swetland03e00cd2009-07-01 17:58:37 -0700630DEFINE_SPINLOCK(smem_lock);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700631
632/* the mutex is used during open() and close()
Brian Swetland03e00cd2009-07-01 17:58:37 -0700633 * operations to avoid races while creating or
634 * destroying smd_channel structures
635 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700636static DEFINE_MUTEX(smd_creation_mutex);
637
638static int smd_initialized;
639
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700640struct smd_shared_v1 {
641 struct smd_half_channel ch0;
642 unsigned char data0[SMD_BUF_SIZE];
643 struct smd_half_channel ch1;
644 unsigned char data1[SMD_BUF_SIZE];
645};
646
647struct smd_shared_v2 {
648 struct smd_half_channel ch0;
649 struct smd_half_channel ch1;
650};
651
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600652struct smd_shared_v2_word_access {
653 struct smd_half_channel_word_access ch0;
654 struct smd_half_channel_word_access ch1;
655};
656
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700657struct smd_channel {
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600658 volatile void *send; /* some variant of smd_half_channel */
659 volatile void *recv; /* some variant of smd_half_channel */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700660 unsigned char *send_data;
661 unsigned char *recv_data;
662 unsigned fifo_size;
663 unsigned fifo_mask;
664 struct list_head ch_list;
665
666 unsigned current_packet;
667 unsigned n;
668 void *priv;
669 void (*notify)(void *priv, unsigned flags);
670
671 int (*read)(smd_channel_t *ch, void *data, int len, int user_buf);
672 int (*write)(smd_channel_t *ch, const void *data, int len,
673 int user_buf);
674 int (*read_avail)(smd_channel_t *ch);
675 int (*write_avail)(smd_channel_t *ch);
676 int (*read_from_cb)(smd_channel_t *ch, void *data, int len,
677 int user_buf);
678
679 void (*update_state)(smd_channel_t *ch);
680 unsigned last_state;
681 void (*notify_other_cpu)(void);
682
683 char name[20];
684 struct platform_device pdev;
685 unsigned type;
686
687 int pending_pkt_sz;
688
689 char is_pkt_ch;
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600690
691 /*
692 * private internal functions to access *send and *recv.
693 * never to be exported outside of smd
694 */
695 struct smd_half_channel_access *half_ch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700696};
697
698struct edge_to_pid {
699 uint32_t local_pid;
700 uint32_t remote_pid;
Eric Holmberg17992c12012-02-29 12:54:44 -0700701 char subsys_name[SMD_MAX_CH_NAME_LEN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700702};
703
704/**
705 * Maps edge type to local and remote processor ID's.
706 */
707static struct edge_to_pid edge_to_pids[] = {
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700708 [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
709 [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "q6"},
710 [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
711 [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
712 [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
713 [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
714 [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
715 [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
716 [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
717 [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
718 [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
719 [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
720 [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
721 [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
722 [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600723 [SMD_APPS_RPM] = {SMD_APPS, SMD_RPM},
724 [SMD_MODEM_RPM] = {SMD_MODEM, SMD_RPM},
725 [SMD_QDSP_RPM] = {SMD_Q6, SMD_RPM},
726 [SMD_WCNSS_RPM] = {SMD_WCNSS, SMD_RPM},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700727};
728
729struct restart_notifier_block {
730 unsigned processor;
731 char *name;
732 struct notifier_block nb;
733};
734
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -0600735static int disable_smsm_reset_handshake;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700736static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
737
738static LIST_HEAD(smd_ch_closed_list);
739static LIST_HEAD(smd_ch_closing_list);
740static LIST_HEAD(smd_ch_to_close_list);
741static LIST_HEAD(smd_ch_list_modem);
742static LIST_HEAD(smd_ch_list_dsp);
743static LIST_HEAD(smd_ch_list_dsps);
744static LIST_HEAD(smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600745static LIST_HEAD(smd_ch_list_rpm);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700746
747static unsigned char smd_ch_allocated[64];
748static struct work_struct probe_work;
749
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700750static void finalize_channel_close_fn(struct work_struct *work);
751static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
752static struct workqueue_struct *channel_close_wq;
753
754static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm);
755
756/* on smp systems, the probe might get called from multiple cores,
757 hence use a lock */
758static DEFINE_MUTEX(smd_probe_lock);
759
760static void smd_channel_probe_worker(struct work_struct *work)
761{
762 struct smd_alloc_elm *shared;
763 unsigned n;
764 uint32_t type;
765
766 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
767
768 if (!shared) {
769 pr_err("%s: allocation table not initialized\n", __func__);
770 return;
771 }
772
773 mutex_lock(&smd_probe_lock);
774 for (n = 0; n < 64; n++) {
775 if (smd_ch_allocated[n])
776 continue;
777
778 /* channel should be allocated only if APPS
779 processor is involved */
780 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600781 if (type >= ARRAY_SIZE(edge_to_pids) ||
782 edge_to_pids[type].local_pid != SMD_APPS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700783 continue;
784 if (!shared[n].ref_count)
785 continue;
786 if (!shared[n].name[0])
787 continue;
788
789 if (!smd_alloc_channel(&shared[n]))
790 smd_ch_allocated[n] = 1;
791 else
792 SMD_INFO("Probe skipping ch %d, not allocated\n", n);
793 }
794 mutex_unlock(&smd_probe_lock);
795}
796
797/**
798 * Lookup processor ID and determine if it belongs to the proved edge
799 * type.
800 *
801 * @shared2: Pointer to v2 shared channel structure
802 * @type: Edge type
803 * @pid: Processor ID of processor on edge
804 * @local_ch: Channel that belongs to processor @pid
805 * @remote_ch: Other side of edge contained @pid
Jeff Hugo70a7e562012-09-07 11:24:32 -0600806 * @is_word_access_ch: Bool, is this a word aligned access channel
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700807 *
808 * Returns 0 for not on edge, 1 for found on edge
809 */
Jeff Hugo70a7e562012-09-07 11:24:32 -0600810static int pid_is_on_edge(void *shared2,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700811 uint32_t type, uint32_t pid,
Jeff Hugo70a7e562012-09-07 11:24:32 -0600812 void **local_ch,
813 void **remote_ch,
814 int is_word_access_ch
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700815 )
816{
817 int ret = 0;
818 struct edge_to_pid *edge;
Jeff Hugo70a7e562012-09-07 11:24:32 -0600819 void *ch0;
820 void *ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700821
822 *local_ch = 0;
823 *remote_ch = 0;
824
825 if (!shared2 || (type >= ARRAY_SIZE(edge_to_pids)))
826 return 0;
827
Jeff Hugo70a7e562012-09-07 11:24:32 -0600828 if (is_word_access_ch) {
829 ch0 = &((struct smd_shared_v2_word_access *)(shared2))->ch0;
830 ch1 = &((struct smd_shared_v2_word_access *)(shared2))->ch1;
831 } else {
832 ch0 = &((struct smd_shared_v2 *)(shared2))->ch0;
833 ch1 = &((struct smd_shared_v2 *)(shared2))->ch1;
834 }
835
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700836 edge = &edge_to_pids[type];
837 if (edge->local_pid != edge->remote_pid) {
838 if (pid == edge->local_pid) {
Jeff Hugo70a7e562012-09-07 11:24:32 -0600839 *local_ch = ch0;
840 *remote_ch = ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700841 ret = 1;
842 } else if (pid == edge->remote_pid) {
Jeff Hugo70a7e562012-09-07 11:24:32 -0600843 *local_ch = ch1;
844 *remote_ch = ch0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700845 ret = 1;
846 }
847 }
848
849 return ret;
850}
851
Eric Holmberg17992c12012-02-29 12:54:44 -0700852/*
853 * Returns a pointer to the subsystem name or NULL if no
854 * subsystem name is available.
855 *
856 * @type - Edge definition
857 */
858const char *smd_edge_to_subsystem(uint32_t type)
859{
860 const char *subsys = NULL;
861
862 if (type < ARRAY_SIZE(edge_to_pids)) {
863 subsys = edge_to_pids[type].subsys_name;
864 if (subsys[0] == 0x0)
865 subsys = NULL;
866 }
867 return subsys;
868}
869EXPORT_SYMBOL(smd_edge_to_subsystem);
870
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700871/*
872 * Returns a pointer to the subsystem name given the
873 * remote processor ID.
874 *
875 * @pid Remote processor ID
876 * @returns Pointer to subsystem name or NULL if not found
877 */
878const char *smd_pid_to_subsystem(uint32_t pid)
879{
880 const char *subsys = NULL;
881 int i;
882
883 for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
884 if (pid == edge_to_pids[i].remote_pid &&
885 edge_to_pids[i].subsys_name[0] != 0x0
886 ) {
887 subsys = edge_to_pids[i].subsys_name;
888 break;
889 }
890 }
891
892 return subsys;
893}
894EXPORT_SYMBOL(smd_pid_to_subsystem);
Eric Holmberg17992c12012-02-29 12:54:44 -0700895
Jeff Hugo70a7e562012-09-07 11:24:32 -0600896static void smd_reset_edge(void *void_ch, unsigned new_state,
897 int is_word_access_ch)
Eric Holmberg2a563c32011-10-05 14:51:43 -0600898{
Jeff Hugo70a7e562012-09-07 11:24:32 -0600899 if (is_word_access_ch) {
900 struct smd_half_channel_word_access *ch =
901 (struct smd_half_channel_word_access *)(void_ch);
902 if (ch->state != SMD_SS_CLOSED) {
903 ch->state = new_state;
904 ch->fDSR = 0;
905 ch->fCTS = 0;
906 ch->fCD = 0;
907 ch->fSTATE = 1;
908 }
909 } else {
910 struct smd_half_channel *ch =
911 (struct smd_half_channel *)(void_ch);
912 if (ch->state != SMD_SS_CLOSED) {
913 ch->state = new_state;
914 ch->fDSR = 0;
915 ch->fCTS = 0;
916 ch->fCD = 0;
917 ch->fSTATE = 1;
918 }
Eric Holmberg2a563c32011-10-05 14:51:43 -0600919 }
920}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700921
922static void smd_channel_reset_state(struct smd_alloc_elm *shared,
923 unsigned new_state, unsigned pid)
924{
925 unsigned n;
Jeff Hugo70a7e562012-09-07 11:24:32 -0600926 void *shared2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700927 uint32_t type;
Jeff Hugo70a7e562012-09-07 11:24:32 -0600928 void *local_ch;
929 void *remote_ch;
930 int is_word_access;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700931
932 for (n = 0; n < SMD_CHANNELS; n++) {
933 if (!shared[n].ref_count)
934 continue;
935 if (!shared[n].name[0])
936 continue;
937
938 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo70a7e562012-09-07 11:24:32 -0600939 is_word_access = is_word_access_ch(type);
940 if (is_word_access)
941 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n,
942 sizeof(struct smd_shared_v2_word_access));
943 else
944 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n,
945 sizeof(struct smd_shared_v2));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700946 if (!shared2)
947 continue;
948
Jeff Hugo70a7e562012-09-07 11:24:32 -0600949 if (pid_is_on_edge(shared2, type, pid, &local_ch, &remote_ch,
950 is_word_access))
951 smd_reset_edge(local_ch, new_state, is_word_access);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700952
Eric Holmberg2a563c32011-10-05 14:51:43 -0600953 /*
954 * ModemFW is in the same subsystem as ModemSW, but has
955 * separate SMD edges that need to be reset.
956 */
957 if (pid == SMSM_MODEM &&
958 pid_is_on_edge(shared2, type, SMD_MODEM_Q6_FW,
Jeff Hugo70a7e562012-09-07 11:24:32 -0600959 &local_ch, &remote_ch, is_word_access))
960 smd_reset_edge(local_ch, new_state, is_word_access);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700961 }
962}
963
964
965void smd_channel_reset(uint32_t restart_pid)
966{
967 struct smd_alloc_elm *shared;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700968 unsigned long flags;
969
970 SMD_DBG("%s: starting reset\n", __func__);
Eric Holmberg957f5b52012-09-07 13:54:31 -0600971
972 /* release any held spinlocks */
973 remote_spin_release(&remote_spinlock, restart_pid);
974 remote_spin_release_all(restart_pid);
975
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700976 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
977 if (!shared) {
978 pr_err("%s: allocation table not initialized\n", __func__);
979 return;
980 }
981
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700982 /* reset SMSM entry */
983 if (smsm_info.state) {
984 writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
985
Eric Holmberg351a63c2011-12-02 17:49:43 -0700986 /* restart SMSM init handshake */
987 if (restart_pid == SMSM_MODEM) {
988 smsm_change_state(SMSM_APPS_STATE,
Eric Holmberg6b2f80e2012-01-09 12:22:52 -0700989 SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
990 0);
Eric Holmberg351a63c2011-12-02 17:49:43 -0700991 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700992
993 /* notify SMSM processors */
994 smsm_irq_handler(0, 0);
Eric Holmbergb7726442012-03-01 15:31:56 -0700995 notify_modem_smsm();
996 notify_dsp_smsm();
997 notify_dsps_smsm();
998 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700999 }
1000
1001 /* change all remote states to CLOSING */
1002 mutex_lock(&smd_probe_lock);
1003 spin_lock_irqsave(&smd_lock, flags);
1004 smd_channel_reset_state(shared, SMD_SS_CLOSING, restart_pid);
1005 spin_unlock_irqrestore(&smd_lock, flags);
1006 mutex_unlock(&smd_probe_lock);
1007
1008 /* notify SMD processors */
1009 mb();
1010 smd_fake_irq_handler(0);
1011 notify_modem_smd();
1012 notify_dsp_smd();
1013 notify_dsps_smd();
1014 notify_wcnss_smd();
1015
1016 /* change all remote states to CLOSED */
1017 mutex_lock(&smd_probe_lock);
1018 spin_lock_irqsave(&smd_lock, flags);
1019 smd_channel_reset_state(shared, SMD_SS_CLOSED, restart_pid);
1020 spin_unlock_irqrestore(&smd_lock, flags);
1021 mutex_unlock(&smd_probe_lock);
1022
1023 /* notify SMD processors */
1024 mb();
1025 smd_fake_irq_handler(0);
1026 notify_modem_smd();
1027 notify_dsp_smd();
1028 notify_dsps_smd();
1029 notify_wcnss_smd();
1030
1031 SMD_DBG("%s: finished reset\n", __func__);
1032}
1033
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001034/* how many bytes are available for reading */
1035static int smd_stream_read_avail(struct smd_channel *ch)
1036{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001037 return (ch->half_ch->get_head(ch->recv) -
1038 ch->half_ch->get_tail(ch->recv)) & ch->fifo_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001039}
1040
1041/* how many bytes we are free to write */
1042static int smd_stream_write_avail(struct smd_channel *ch)
1043{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001044 return ch->fifo_mask - ((ch->half_ch->get_head(ch->send) -
1045 ch->half_ch->get_tail(ch->send)) & ch->fifo_mask);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001046}
1047
1048static int smd_packet_read_avail(struct smd_channel *ch)
1049{
1050 if (ch->current_packet) {
1051 int n = smd_stream_read_avail(ch);
1052 if (n > ch->current_packet)
1053 n = ch->current_packet;
1054 return n;
1055 } else {
1056 return 0;
1057 }
1058}
1059
1060static int smd_packet_write_avail(struct smd_channel *ch)
1061{
1062 int n = smd_stream_write_avail(ch);
1063 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
1064}
1065
1066static int ch_is_open(struct smd_channel *ch)
1067{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001068 return (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED ||
1069 ch->half_ch->get_state(ch->recv) == SMD_SS_FLUSHING)
1070 && (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001071}
1072
1073/* provide a pointer and length to readable data in the fifo */
1074static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
1075{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001076 unsigned head = ch->half_ch->get_head(ch->recv);
1077 unsigned tail = ch->half_ch->get_tail(ch->recv);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001078 *ptr = (void *) (ch->recv_data + tail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001079
1080 if (tail <= head)
1081 return head - tail;
1082 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001083 return ch->fifo_size - tail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001084}
1085
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001086static int read_intr_blocked(struct smd_channel *ch)
1087{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001088 return ch->half_ch->get_fBLOCKREADINTR(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001089}
1090
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001091/* advance the fifo read pointer after data from ch_read_buffer is consumed */
1092static void ch_read_done(struct smd_channel *ch, unsigned count)
1093{
1094 BUG_ON(count > smd_stream_read_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001095 ch->half_ch->set_tail(ch->recv,
1096 (ch->half_ch->get_tail(ch->recv) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001097 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001098 ch->half_ch->set_fTAIL(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001099}
1100
1101/* basic read interface to ch_read_{buffer,done} used
Brian Swetland03e00cd2009-07-01 17:58:37 -07001102 * by smd_*_read() and update_packet_state()
1103 * will read-and-discard if the _data pointer is null
1104 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001105static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001106{
1107 void *ptr;
1108 unsigned n;
1109 unsigned char *data = _data;
1110 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001111 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001112
1113 while (len > 0) {
1114 n = ch_read_buffer(ch, &ptr);
1115 if (n == 0)
1116 break;
1117
1118 if (n > len)
1119 n = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001120 if (_data) {
1121 if (user_buf) {
1122 r = copy_to_user(data, ptr, n);
1123 if (r > 0) {
1124 pr_err("%s: "
1125 "copy_to_user could not copy "
1126 "%i bytes.\n",
1127 __func__,
1128 r);
1129 }
1130 } else
1131 memcpy(data, ptr, n);
1132 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001133
1134 data += n;
1135 len -= n;
1136 ch_read_done(ch, n);
1137 }
1138
1139 return orig_len - len;
1140}
1141
1142static void update_stream_state(struct smd_channel *ch)
1143{
1144 /* streams have no special state requiring updating */
1145}
1146
1147static void update_packet_state(struct smd_channel *ch)
1148{
1149 unsigned hdr[5];
1150 int r;
1151
1152 /* can't do anything if we're in the middle of a packet */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001153 while (ch->current_packet == 0) {
1154 /* discard 0 length packets if any */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001155
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001156 /* don't bother unless we can get the full header */
1157 if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
1158 return;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001159
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001160 r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
1161 BUG_ON(r != SMD_HEADER_SIZE);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001162
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001163 ch->current_packet = hdr[0];
1164 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001165}
1166
1167/* provide a pointer and length to next free space in the fifo */
1168static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
1169{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001170 unsigned head = ch->half_ch->get_head(ch->send);
1171 unsigned tail = ch->half_ch->get_tail(ch->send);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001172 *ptr = (void *) (ch->send_data + head);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001173
1174 if (head < tail) {
1175 return tail - head - 1;
1176 } else {
1177 if (tail == 0)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001178 return ch->fifo_size - head - 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001179 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001180 return ch->fifo_size - head;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001181 }
1182}
1183
1184/* advace the fifo write pointer after freespace
1185 * from ch_write_buffer is filled
1186 */
1187static void ch_write_done(struct smd_channel *ch, unsigned count)
1188{
1189 BUG_ON(count > smd_stream_write_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001190 ch->half_ch->set_head(ch->send,
1191 (ch->half_ch->get_head(ch->send) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001192 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001193 ch->half_ch->set_fHEAD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001194}
1195
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001196static void ch_set_state(struct smd_channel *ch, unsigned n)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001197{
1198 if (n == SMD_SS_OPENED) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001199 ch->half_ch->set_fDSR(ch->send, 1);
1200 ch->half_ch->set_fCTS(ch->send, 1);
1201 ch->half_ch->set_fCD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001202 } else {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001203 ch->half_ch->set_fDSR(ch->send, 0);
1204 ch->half_ch->set_fCTS(ch->send, 0);
1205 ch->half_ch->set_fCD(ch->send, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001206 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001207 ch->half_ch->set_state(ch->send, n);
1208 ch->half_ch->set_fSTATE(ch->send, 1);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001209 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001210}
1211
1212static void do_smd_probe(void)
1213{
1214 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
1215 if (shared->heap_info.free_offset != last_heap_free) {
1216 last_heap_free = shared->heap_info.free_offset;
1217 schedule_work(&probe_work);
1218 }
1219}
1220
1221static void smd_state_change(struct smd_channel *ch,
1222 unsigned last, unsigned next)
1223{
1224 ch->last_state = next;
1225
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001226 SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001227
1228 switch (next) {
1229 case SMD_SS_OPENING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001230 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSING ||
1231 ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
1232 ch->half_ch->set_tail(ch->recv, 0);
1233 ch->half_ch->set_head(ch->send, 0);
1234 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001235 ch_set_state(ch, SMD_SS_OPENING);
1236 }
1237 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001238 case SMD_SS_OPENED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001239 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENING) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001240 ch_set_state(ch, SMD_SS_OPENED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001241 ch->notify(ch->priv, SMD_EVENT_OPEN);
1242 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001243 break;
1244 case SMD_SS_FLUSHING:
1245 case SMD_SS_RESET:
1246 /* we should force them to close? */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001247 break;
1248 case SMD_SS_CLOSED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001249 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001250 ch_set_state(ch, SMD_SS_CLOSING);
1251 ch->current_packet = 0;
Eric Holmbergad4fa8d2011-11-11 16:55:13 -07001252 ch->pending_pkt_sz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001253 ch->notify(ch->priv, SMD_EVENT_CLOSE);
1254 }
1255 break;
1256 case SMD_SS_CLOSING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001257 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001258 list_move(&ch->ch_list,
1259 &smd_ch_to_close_list);
1260 queue_work(channel_close_wq,
1261 &finalize_channel_close_work);
1262 }
1263 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001264 }
1265}
1266
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001267static void handle_smd_irq_closing_list(void)
1268{
1269 unsigned long flags;
1270 struct smd_channel *ch;
1271 struct smd_channel *index;
1272 unsigned tmp;
1273
1274 spin_lock_irqsave(&smd_lock, flags);
1275 list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001276 if (ch->half_ch->get_fSTATE(ch->recv))
1277 ch->half_ch->set_fSTATE(ch->recv, 0);
1278 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001279 if (tmp != ch->last_state)
1280 smd_state_change(ch, ch->last_state, tmp);
1281 }
1282 spin_unlock_irqrestore(&smd_lock, flags);
1283}
1284
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001285static void handle_smd_irq(struct list_head *list, void (*notify)(void))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001286{
1287 unsigned long flags;
1288 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001289 unsigned ch_flags;
1290 unsigned tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001291 unsigned char state_change;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001292
1293 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001294 list_for_each_entry(ch, list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001295 state_change = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001296 ch_flags = 0;
1297 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001298 if (ch->half_ch->get_fHEAD(ch->recv)) {
1299 ch->half_ch->set_fHEAD(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001300 ch_flags |= 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001301 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001302 if (ch->half_ch->get_fTAIL(ch->recv)) {
1303 ch->half_ch->set_fTAIL(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001304 ch_flags |= 2;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001305 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001306 if (ch->half_ch->get_fSTATE(ch->recv)) {
1307 ch->half_ch->set_fSTATE(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001308 ch_flags |= 4;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001309 }
1310 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001311 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001312 if (tmp != ch->last_state) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07001313 SMx_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
1314 ch->n, ch->name, ch->last_state, tmp);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001315 smd_state_change(ch, ch->last_state, tmp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001316 state_change = 1;
1317 }
Eric Holmberg65a7d432012-02-24 11:28:56 -07001318 if (ch_flags & 0x3) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001319 ch->update_state(ch);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001320 SMx_POWER_INFO("SMD ch%d '%s' Data event r%d/w%d\n",
1321 ch->n, ch->name,
1322 ch->read_avail(ch),
1323 ch->fifo_size - ch->write_avail(ch));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001324 ch->notify(ch->priv, SMD_EVENT_DATA);
1325 }
Eric Holmberg98c6c642012-02-24 11:29:35 -07001326 if (ch_flags & 0x4 && !state_change) {
1327 SMx_POWER_INFO("SMD ch%d '%s' State update\n",
1328 ch->n, ch->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001329 ch->notify(ch->priv, SMD_EVENT_STATUS);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001330 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001331 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001332 spin_unlock_irqrestore(&smd_lock, flags);
1333 do_smd_probe();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001334}
1335
Brian Swetland37521a32009-07-01 18:30:47 -07001336static irqreturn_t smd_modem_irq_handler(int irq, void *data)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001337{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001338 SMx_POWER_INFO("SMD Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001339 ++interrupt_stats[SMD_MODEM].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001340 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001341 handle_smd_irq_closing_list();
Brian Swetland37521a32009-07-01 18:30:47 -07001342 return IRQ_HANDLED;
1343}
1344
1345static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
1346{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001347 SMx_POWER_INFO("SMD Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001348 ++interrupt_stats[SMD_Q6].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001349 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001350 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001351 return IRQ_HANDLED;
1352}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001353
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001354static irqreturn_t smd_dsps_irq_handler(int irq, void *data)
1355{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001356 SMx_POWER_INFO("SMD Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001357 ++interrupt_stats[SMD_DSPS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001358 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1359 handle_smd_irq_closing_list();
1360 return IRQ_HANDLED;
1361}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001362
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001363static irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
1364{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001365 SMx_POWER_INFO("SMD Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001366 ++interrupt_stats[SMD_WCNSS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001367 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1368 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001369 return IRQ_HANDLED;
1370}
1371
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001372static irqreturn_t smd_rpm_irq_handler(int irq, void *data)
1373{
1374 SMx_POWER_INFO("SMD Int RPM->Apps\n");
1375 ++interrupt_stats[SMD_RPM].smd_in_count;
1376 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
1377 handle_smd_irq_closing_list();
1378 return IRQ_HANDLED;
1379}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001380
1381static void smd_fake_irq_handler(unsigned long arg)
1382{
Brian Swetland37521a32009-07-01 18:30:47 -07001383 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
1384 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001385 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1386 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001387 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001388 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001389}
1390
1391static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
1392
Brian Swetland37521a32009-07-01 18:30:47 -07001393static inline int smd_need_int(struct smd_channel *ch)
1394{
1395 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001396 if (ch->half_ch->get_fHEAD(ch->recv) ||
1397 ch->half_ch->get_fTAIL(ch->recv) ||
1398 ch->half_ch->get_fSTATE(ch->recv))
Brian Swetland37521a32009-07-01 18:30:47 -07001399 return 1;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001400 if (ch->half_ch->get_state(ch->recv) != ch->last_state)
Brian Swetland37521a32009-07-01 18:30:47 -07001401 return 1;
1402 }
1403 return 0;
1404}
1405
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001406void smd_sleep_exit(void)
1407{
1408 unsigned long flags;
1409 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001410 int need_int = 0;
1411
1412 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland37521a32009-07-01 18:30:47 -07001413 list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
1414 if (smd_need_int(ch)) {
1415 need_int = 1;
1416 break;
1417 }
1418 }
1419 list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
1420 if (smd_need_int(ch)) {
1421 need_int = 1;
1422 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001423 }
1424 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001425 list_for_each_entry(ch, &smd_ch_list_dsps, ch_list) {
1426 if (smd_need_int(ch)) {
1427 need_int = 1;
1428 break;
1429 }
1430 }
1431 list_for_each_entry(ch, &smd_ch_list_wcnss, ch_list) {
1432 if (smd_need_int(ch)) {
1433 need_int = 1;
1434 break;
1435 }
1436 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001437 spin_unlock_irqrestore(&smd_lock, flags);
1438 do_smd_probe();
Brian Swetland37521a32009-07-01 18:30:47 -07001439
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001440 if (need_int) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001441 SMD_DBG("smd_sleep_exit need interrupt\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001442 tasklet_schedule(&smd_fake_irq_tasklet);
1443 }
1444}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001445EXPORT_SYMBOL(smd_sleep_exit);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001446
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001447static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001448{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001449 if (SMD_XFER_TYPE(alloc_elm->type) == 1)
1450 return 0;
1451 else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001452 return 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001453
1454 /* for cases where xfer type is 0 */
1455 if (!strncmp(alloc_elm->name, "DAL", 3))
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001456 return 0;
1457
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001458 /* for cases where xfer type is 0 */
1459 if (!strncmp(alloc_elm->name, "RPCCALL_QDSP", 12))
1460 return 0;
1461
1462 if (alloc_elm->cid > 4 || alloc_elm->cid == 1)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001463 return 1;
1464 else
1465 return 0;
1466}
1467
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001468static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
1469 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001470{
1471 void *ptr;
1472 const unsigned char *buf = _data;
1473 unsigned xfer;
1474 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001475 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001476
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001477 SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001478 if (len < 0)
1479 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001480 else if (len == 0)
1481 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001482
1483 while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
Eric Holmberg7a717872012-02-03 11:58:04 -07001484 if (!ch_is_open(ch)) {
1485 len = orig_len;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001486 break;
Eric Holmberg7a717872012-02-03 11:58:04 -07001487 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001488 if (xfer > len)
1489 xfer = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001490 if (user_buf) {
1491 r = copy_from_user(ptr, buf, xfer);
1492 if (r > 0) {
1493 pr_err("%s: "
1494 "copy_from_user could not copy %i "
1495 "bytes.\n",
1496 __func__,
1497 r);
1498 }
1499 } else
1500 memcpy(ptr, buf, xfer);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001501 ch_write_done(ch, xfer);
1502 len -= xfer;
1503 buf += xfer;
1504 if (len == 0)
1505 break;
1506 }
1507
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001508 if (orig_len - len)
1509 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001510
1511 return orig_len - len;
1512}
1513
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001514static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
1515 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001516{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001517 int ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001518 unsigned hdr[5];
1519
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001520 SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001521 if (len < 0)
1522 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001523 else if (len == 0)
1524 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001525
1526 if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
1527 return -ENOMEM;
1528
1529 hdr[0] = len;
1530 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1531
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001532
1533 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1534 if (ret < 0 || ret != sizeof(hdr)) {
1535 SMD_DBG("%s failed to write pkt header: "
1536 "%d returned\n", __func__, ret);
1537 return -1;
1538 }
1539
1540
1541 ret = smd_stream_write(ch, _data, len, user_buf);
1542 if (ret < 0 || ret != len) {
1543 SMD_DBG("%s failed to write pkt data: "
1544 "%d returned\n", __func__, ret);
1545 return ret;
1546 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001547
1548 return len;
1549}
1550
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001551static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001552{
1553 int r;
1554
1555 if (len < 0)
1556 return -EINVAL;
1557
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001558 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001559 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001560 if (!read_intr_blocked(ch))
1561 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001562
1563 return r;
1564}
1565
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001566static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001567{
1568 unsigned long flags;
1569 int r;
1570
1571 if (len < 0)
1572 return -EINVAL;
1573
1574 if (len > ch->current_packet)
1575 len = ch->current_packet;
1576
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001577 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001578 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001579 if (!read_intr_blocked(ch))
1580 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001581
1582 spin_lock_irqsave(&smd_lock, flags);
1583 ch->current_packet -= r;
1584 update_packet_state(ch);
1585 spin_unlock_irqrestore(&smd_lock, flags);
1586
1587 return r;
1588}
1589
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001590static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
1591 int user_buf)
1592{
1593 int r;
1594
1595 if (len < 0)
1596 return -EINVAL;
1597
1598 if (len > ch->current_packet)
1599 len = ch->current_packet;
1600
1601 r = ch_read(ch, data, len, user_buf);
1602 if (r > 0)
1603 if (!read_intr_blocked(ch))
1604 ch->notify_other_cpu();
1605
1606 ch->current_packet -= r;
1607 update_packet_state(ch);
1608
1609 return r;
1610}
1611
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301612#if (defined(CONFIG_MSM_SMD_PKG4) || defined(CONFIG_MSM_SMD_PKG3))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001613static int smd_alloc_v2(struct smd_channel *ch)
1614{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001615 void *buffer;
1616 unsigned buffer_sz;
1617
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001618 if (is_word_access_ch(ch->type)) {
1619 struct smd_shared_v2_word_access *shared2;
1620 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1621 sizeof(*shared2));
1622 if (!shared2) {
1623 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1624 return -EINVAL;
1625 }
1626 ch->send = &shared2->ch0;
1627 ch->recv = &shared2->ch1;
1628 } else {
1629 struct smd_shared_v2 *shared2;
1630 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1631 sizeof(*shared2));
1632 if (!shared2) {
1633 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1634 return -EINVAL;
1635 }
1636 ch->send = &shared2->ch0;
1637 ch->recv = &shared2->ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001638 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001639 ch->half_ch = get_half_ch_funcs(ch->type);
1640
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001641 buffer = smem_get_entry(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
1642 if (!buffer) {
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301643 SMD_INFO("smem_get_entry failed\n");
1644 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001645 }
1646
1647 /* buffer must be a power-of-two size */
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301648 if (buffer_sz & (buffer_sz - 1)) {
1649 SMD_INFO("Buffer size: %u not power of two\n", buffer_sz);
1650 return -EINVAL;
1651 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001652 buffer_sz /= 2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001653 ch->send_data = buffer;
1654 ch->recv_data = buffer + buffer_sz;
1655 ch->fifo_size = buffer_sz;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001656
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001657 return 0;
1658}
1659
1660static int smd_alloc_v1(struct smd_channel *ch)
1661{
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301662 return -EINVAL;
1663}
1664
1665#else /* define v1 for older targets */
1666static int smd_alloc_v2(struct smd_channel *ch)
1667{
1668 return -EINVAL;
1669}
1670
1671static int smd_alloc_v1(struct smd_channel *ch)
1672{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001673 struct smd_shared_v1 *shared1;
1674 shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
1675 if (!shared1) {
1676 pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301677 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001678 }
1679 ch->send = &shared1->ch0;
1680 ch->recv = &shared1->ch1;
1681 ch->send_data = shared1->data0;
1682 ch->recv_data = shared1->data1;
1683 ch->fifo_size = SMD_BUF_SIZE;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001684 ch->half_ch = get_half_ch_funcs(ch->type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001685 return 0;
1686}
1687
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301688#endif
1689
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001690static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001691{
1692 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001693
1694 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1695 if (ch == 0) {
1696 pr_err("smd_alloc_channel() out of memory\n");
Brian Swetland34f719b2009-10-30 16:22:05 -07001697 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001698 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001699 ch->n = alloc_elm->cid;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001700 ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001701
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001702 if (smd_alloc_v2(ch) && smd_alloc_v1(ch)) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001703 kfree(ch);
Brian Swetland34f719b2009-10-30 16:22:05 -07001704 return -1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001705 }
1706
1707 ch->fifo_mask = ch->fifo_size - 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001708
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001709 /* probe_worker guarentees ch->type will be a valid type */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001710 if (ch->type == SMD_APPS_MODEM)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001711 ch->notify_other_cpu = notify_modem_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001712 else if (ch->type == SMD_APPS_QDSP)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001713 ch->notify_other_cpu = notify_dsp_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001714 else if (ch->type == SMD_APPS_DSPS)
1715 ch->notify_other_cpu = notify_dsps_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001716 else if (ch->type == SMD_APPS_WCNSS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001717 ch->notify_other_cpu = notify_wcnss_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001718 else if (ch->type == SMD_APPS_RPM)
1719 ch->notify_other_cpu = notify_rpm_smd;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001720
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001721 if (smd_is_packet(alloc_elm)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001722 ch->read = smd_packet_read;
1723 ch->write = smd_packet_write;
1724 ch->read_avail = smd_packet_read_avail;
1725 ch->write_avail = smd_packet_write_avail;
1726 ch->update_state = update_packet_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001727 ch->read_from_cb = smd_packet_read_from_cb;
1728 ch->is_pkt_ch = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001729 } else {
1730 ch->read = smd_stream_read;
1731 ch->write = smd_stream_write;
1732 ch->read_avail = smd_stream_read_avail;
1733 ch->write_avail = smd_stream_write_avail;
1734 ch->update_state = update_stream_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001735 ch->read_from_cb = smd_stream_read;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001736 }
1737
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001738 memcpy(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN);
1739 ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001740
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001741 ch->pdev.name = ch->name;
1742 ch->pdev.id = ch->type;
1743
1744 SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
1745 ch->name, ch->n);
1746
1747 mutex_lock(&smd_creation_mutex);
1748 list_add(&ch->ch_list, &smd_ch_closed_list);
1749 mutex_unlock(&smd_creation_mutex);
1750
1751 platform_device_register(&ch->pdev);
1752 if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
1753 /* create a platform driver to be used by smd_tty driver
1754 * so that it can access the loopback port
1755 */
1756 loopback_tty_pdev.id = ch->type;
1757 platform_device_register(&loopback_tty_pdev);
1758 }
1759 return 0;
1760}
1761
1762static inline void notify_loopback_smd(void)
1763{
1764 unsigned long flags;
1765 struct smd_channel *ch;
1766
1767 spin_lock_irqsave(&smd_lock, flags);
1768 list_for_each_entry(ch, &smd_ch_list_loopback, ch_list) {
1769 ch->notify(ch->priv, SMD_EVENT_DATA);
1770 }
1771 spin_unlock_irqrestore(&smd_lock, flags);
1772}
1773
1774static int smd_alloc_loopback_channel(void)
1775{
1776 static struct smd_half_channel smd_loopback_ctl;
1777 static char smd_loopback_data[SMD_BUF_SIZE];
1778 struct smd_channel *ch;
1779
1780 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1781 if (ch == 0) {
1782 pr_err("%s: out of memory\n", __func__);
1783 return -1;
1784 }
1785 ch->n = SMD_LOOPBACK_CID;
1786
1787 ch->send = &smd_loopback_ctl;
1788 ch->recv = &smd_loopback_ctl;
1789 ch->send_data = smd_loopback_data;
1790 ch->recv_data = smd_loopback_data;
1791 ch->fifo_size = SMD_BUF_SIZE;
1792
1793 ch->fifo_mask = ch->fifo_size - 1;
1794 ch->type = SMD_LOOPBACK_TYPE;
1795 ch->notify_other_cpu = notify_loopback_smd;
1796
1797 ch->read = smd_stream_read;
1798 ch->write = smd_stream_write;
1799 ch->read_avail = smd_stream_read_avail;
1800 ch->write_avail = smd_stream_write_avail;
1801 ch->update_state = update_stream_state;
1802 ch->read_from_cb = smd_stream_read;
1803
1804 memset(ch->name, 0, 20);
1805 memcpy(ch->name, "local_loopback", 14);
1806
1807 ch->pdev.name = ch->name;
1808 ch->pdev.id = ch->type;
1809
1810 SMD_INFO("%s: '%s' cid=%d\n", __func__, ch->name, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001811
1812 mutex_lock(&smd_creation_mutex);
1813 list_add(&ch->ch_list, &smd_ch_closed_list);
1814 mutex_unlock(&smd_creation_mutex);
1815
1816 platform_device_register(&ch->pdev);
Brian Swetland34f719b2009-10-30 16:22:05 -07001817 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001818}
1819
1820static void do_nothing_notify(void *priv, unsigned flags)
1821{
1822}
1823
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001824static void finalize_channel_close_fn(struct work_struct *work)
1825{
1826 unsigned long flags;
1827 struct smd_channel *ch;
1828 struct smd_channel *index;
1829
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001830 mutex_lock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001831 spin_lock_irqsave(&smd_lock, flags);
1832 list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
1833 list_del(&ch->ch_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001834 list_add(&ch->ch_list, &smd_ch_closed_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001835 ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
1836 ch->notify = do_nothing_notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001837 }
1838 spin_unlock_irqrestore(&smd_lock, flags);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001839 mutex_unlock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001840}
1841
1842struct smd_channel *smd_get_channel(const char *name, uint32_t type)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001843{
1844 struct smd_channel *ch;
1845
1846 mutex_lock(&smd_creation_mutex);
1847 list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001848 if (!strcmp(name, ch->name) &&
1849 (type == ch->type)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001850 list_del(&ch->ch_list);
1851 mutex_unlock(&smd_creation_mutex);
1852 return ch;
1853 }
1854 }
1855 mutex_unlock(&smd_creation_mutex);
1856
1857 return NULL;
1858}
1859
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001860int smd_named_open_on_edge(const char *name, uint32_t edge,
1861 smd_channel_t **_ch,
1862 void *priv, void (*notify)(void *, unsigned))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001863{
1864 struct smd_channel *ch;
1865 unsigned long flags;
1866
1867 if (smd_initialized == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001868 SMD_INFO("smd_open() before smd_init()\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001869 return -ENODEV;
1870 }
1871
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001872 SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
1873
1874 ch = smd_get_channel(name, edge);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001875 if (!ch) {
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001876 /* check closing list for port */
1877 spin_lock_irqsave(&smd_lock, flags);
1878 list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
1879 if (!strncmp(name, ch->name, 20) &&
1880 (edge == ch->type)) {
1881 /* channel exists, but is being closed */
1882 spin_unlock_irqrestore(&smd_lock, flags);
1883 return -EAGAIN;
1884 }
1885 }
1886
1887 /* check closing workqueue list for port */
1888 list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
1889 if (!strncmp(name, ch->name, 20) &&
1890 (edge == ch->type)) {
1891 /* channel exists, but is being closed */
1892 spin_unlock_irqrestore(&smd_lock, flags);
1893 return -EAGAIN;
1894 }
1895 }
1896 spin_unlock_irqrestore(&smd_lock, flags);
1897
1898 /* one final check to handle closing->closed race condition */
1899 ch = smd_get_channel(name, edge);
1900 if (!ch)
1901 return -ENODEV;
1902 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001903
1904 if (notify == 0)
1905 notify = do_nothing_notify;
1906
1907 ch->notify = notify;
1908 ch->current_packet = 0;
1909 ch->last_state = SMD_SS_CLOSED;
1910 ch->priv = priv;
1911
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001912 if (edge == SMD_LOOPBACK_TYPE) {
1913 ch->last_state = SMD_SS_OPENED;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001914 ch->half_ch->set_state(ch->send, SMD_SS_OPENED);
1915 ch->half_ch->set_fDSR(ch->send, 1);
1916 ch->half_ch->set_fCTS(ch->send, 1);
1917 ch->half_ch->set_fCD(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001918 }
1919
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001920 *_ch = ch;
1921
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001922 SMD_DBG("smd_open: opening '%s'\n", ch->name);
1923
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001924 spin_lock_irqsave(&smd_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001925 if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_MODEM)
Brian Swetland37521a32009-07-01 18:30:47 -07001926 list_add(&ch->ch_list, &smd_ch_list_modem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001927 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_QDSP)
Brian Swetland37521a32009-07-01 18:30:47 -07001928 list_add(&ch->ch_list, &smd_ch_list_dsp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001929 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_DSPS)
1930 list_add(&ch->ch_list, &smd_ch_list_dsps);
1931 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_WCNSS)
1932 list_add(&ch->ch_list, &smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001933 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_RPM)
1934 list_add(&ch->ch_list, &smd_ch_list_rpm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001935 else
1936 list_add(&ch->ch_list, &smd_ch_list_loopback);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001937
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001938 SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
1939
1940 if (edge != SMD_LOOPBACK_TYPE)
1941 smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
1942
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001943 spin_unlock_irqrestore(&smd_lock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001944
1945 return 0;
1946}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001947EXPORT_SYMBOL(smd_named_open_on_edge);
1948
1949
1950int smd_open(const char *name, smd_channel_t **_ch,
1951 void *priv, void (*notify)(void *, unsigned))
1952{
1953 return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv,
1954 notify);
1955}
1956EXPORT_SYMBOL(smd_open);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001957
1958int smd_close(smd_channel_t *ch)
1959{
1960 unsigned long flags;
1961
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001962 if (ch == 0)
1963 return -1;
1964
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001965 SMD_INFO("smd_close(%s)\n", ch->name);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001966
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001967 spin_lock_irqsave(&smd_lock, flags);
1968 list_del(&ch->ch_list);
1969 if (ch->n == SMD_LOOPBACK_CID) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001970 ch->half_ch->set_fDSR(ch->send, 0);
1971 ch->half_ch->set_fCTS(ch->send, 0);
1972 ch->half_ch->set_fCD(ch->send, 0);
1973 ch->half_ch->set_state(ch->send, SMD_SS_CLOSED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001974 } else
1975 ch_set_state(ch, SMD_SS_CLOSED);
1976
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001977 if (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001978 list_add(&ch->ch_list, &smd_ch_closing_list);
1979 spin_unlock_irqrestore(&smd_lock, flags);
1980 } else {
1981 spin_unlock_irqrestore(&smd_lock, flags);
1982 ch->notify = do_nothing_notify;
1983 mutex_lock(&smd_creation_mutex);
1984 list_add(&ch->ch_list, &smd_ch_closed_list);
1985 mutex_unlock(&smd_creation_mutex);
1986 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001987
1988 return 0;
1989}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001990EXPORT_SYMBOL(smd_close);
1991
1992int smd_write_start(smd_channel_t *ch, int len)
1993{
1994 int ret;
1995 unsigned hdr[5];
1996
1997 if (!ch) {
1998 pr_err("%s: Invalid channel specified\n", __func__);
1999 return -ENODEV;
2000 }
2001 if (!ch->is_pkt_ch) {
2002 pr_err("%s: non-packet channel specified\n", __func__);
2003 return -EACCES;
2004 }
2005 if (len < 1) {
2006 pr_err("%s: invalid length: %d\n", __func__, len);
2007 return -EINVAL;
2008 }
2009
2010 if (ch->pending_pkt_sz) {
2011 pr_err("%s: packet of size: %d in progress\n", __func__,
2012 ch->pending_pkt_sz);
2013 return -EBUSY;
2014 }
2015 ch->pending_pkt_sz = len;
2016
2017 if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
2018 ch->pending_pkt_sz = 0;
2019 SMD_DBG("%s: no space to write packet header\n", __func__);
2020 return -EAGAIN;
2021 }
2022
2023 hdr[0] = len;
2024 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
2025
2026
2027 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
2028 if (ret < 0 || ret != sizeof(hdr)) {
2029 ch->pending_pkt_sz = 0;
2030 pr_err("%s: packet header failed to write\n", __func__);
2031 return -EPERM;
2032 }
2033 return 0;
2034}
2035EXPORT_SYMBOL(smd_write_start);
2036
2037int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
2038{
2039 int bytes_written;
2040
2041 if (!ch) {
2042 pr_err("%s: Invalid channel specified\n", __func__);
2043 return -ENODEV;
2044 }
2045 if (len < 1) {
2046 pr_err("%s: invalid length: %d\n", __func__, len);
2047 return -EINVAL;
2048 }
2049
2050 if (!ch->pending_pkt_sz) {
2051 pr_err("%s: no transaction in progress\n", __func__);
2052 return -ENOEXEC;
2053 }
2054 if (ch->pending_pkt_sz - len < 0) {
2055 pr_err("%s: segment of size: %d will make packet go over "
2056 "length\n", __func__, len);
2057 return -EINVAL;
2058 }
2059
2060 bytes_written = smd_stream_write(ch, data, len, user_buf);
2061
2062 ch->pending_pkt_sz -= bytes_written;
2063
2064 return bytes_written;
2065}
2066EXPORT_SYMBOL(smd_write_segment);
2067
2068int smd_write_end(smd_channel_t *ch)
2069{
2070
2071 if (!ch) {
2072 pr_err("%s: Invalid channel specified\n", __func__);
2073 return -ENODEV;
2074 }
2075 if (ch->pending_pkt_sz) {
2076 pr_err("%s: current packet not completely written\n", __func__);
2077 return -E2BIG;
2078 }
2079
2080 return 0;
2081}
2082EXPORT_SYMBOL(smd_write_end);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002083
2084int smd_read(smd_channel_t *ch, void *data, int len)
2085{
Jack Pham1b236d12012-03-19 15:27:18 -07002086 if (!ch) {
2087 pr_err("%s: Invalid channel specified\n", __func__);
2088 return -ENODEV;
2089 }
2090
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002091 return ch->read(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002092}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002093EXPORT_SYMBOL(smd_read);
2094
2095int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
2096{
Jack Pham1b236d12012-03-19 15:27:18 -07002097 if (!ch) {
2098 pr_err("%s: Invalid channel specified\n", __func__);
2099 return -ENODEV;
2100 }
2101
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002102 return ch->read(ch, data, len, 1);
2103}
2104EXPORT_SYMBOL(smd_read_user_buffer);
2105
2106int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
2107{
Jack Pham1b236d12012-03-19 15:27:18 -07002108 if (!ch) {
2109 pr_err("%s: Invalid channel specified\n", __func__);
2110 return -ENODEV;
2111 }
2112
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002113 return ch->read_from_cb(ch, data, len, 0);
2114}
2115EXPORT_SYMBOL(smd_read_from_cb);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002116
2117int smd_write(smd_channel_t *ch, const void *data, int len)
2118{
Jack Pham1b236d12012-03-19 15:27:18 -07002119 if (!ch) {
2120 pr_err("%s: Invalid channel specified\n", __func__);
2121 return -ENODEV;
2122 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002123
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002124 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002125}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002126EXPORT_SYMBOL(smd_write);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002127
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002128int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
Brian Swetland636eb9c2009-12-07 15:28:08 -08002129{
Jack Pham1b236d12012-03-19 15:27:18 -07002130 if (!ch) {
2131 pr_err("%s: Invalid channel specified\n", __func__);
2132 return -ENODEV;
2133 }
2134
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002135 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002136}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002137EXPORT_SYMBOL(smd_write_user_buffer);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002138
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002139int smd_read_avail(smd_channel_t *ch)
2140{
Jack Pham1b236d12012-03-19 15:27:18 -07002141 if (!ch) {
2142 pr_err("%s: Invalid channel specified\n", __func__);
2143 return -ENODEV;
2144 }
2145
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002146 return ch->read_avail(ch);
2147}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002148EXPORT_SYMBOL(smd_read_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002149
2150int smd_write_avail(smd_channel_t *ch)
2151{
Jack Pham1b236d12012-03-19 15:27:18 -07002152 if (!ch) {
2153 pr_err("%s: Invalid channel specified\n", __func__);
2154 return -ENODEV;
2155 }
2156
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002157 return ch->write_avail(ch);
2158}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002159EXPORT_SYMBOL(smd_write_avail);
2160
2161void smd_enable_read_intr(smd_channel_t *ch)
2162{
2163 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002164 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002165}
2166EXPORT_SYMBOL(smd_enable_read_intr);
2167
2168void smd_disable_read_intr(smd_channel_t *ch)
2169{
2170 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002171 ch->half_ch->set_fBLOCKREADINTR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002172}
2173EXPORT_SYMBOL(smd_disable_read_intr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002174
Eric Holmbergdeace152012-07-25 12:17:11 -06002175/**
2176 * Enable/disable receive interrupts for the remote processor used by a
2177 * particular channel.
2178 * @ch: open channel handle to use for the edge
2179 * @mask: 1 = mask interrupts; 0 = unmask interrupts
2180 * @returns: 0 for success; < 0 for failure
2181 *
2182 * Note that this enables/disables all interrupts from the remote subsystem for
2183 * all channels. As such, it should be used with care and only for specific
2184 * use cases such as power-collapse sequencing.
2185 */
2186int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask)
2187{
2188 struct irq_chip *irq_chip;
2189 struct irq_data *irq_data;
2190 struct interrupt_config_item *int_cfg;
2191
2192 if (!ch)
2193 return -EINVAL;
2194
2195 if (ch->type >= ARRAY_SIZE(edge_to_pids))
2196 return -ENODEV;
2197
2198 int_cfg = &private_intr_config[edge_to_pids[ch->type].remote_pid].smd;
2199
2200 if (int_cfg->irq_id < 0)
2201 return -ENODEV;
2202
2203 irq_chip = irq_get_chip(int_cfg->irq_id);
2204 if (!irq_chip)
2205 return -ENODEV;
2206
2207 irq_data = irq_get_irq_data(int_cfg->irq_id);
2208 if (!irq_data)
2209 return -ENODEV;
2210
2211 if (mask) {
2212 SMx_POWER_INFO("SMD Masking interrupts from %s\n",
2213 edge_to_pids[ch->type].subsys_name);
2214 irq_chip->irq_mask(irq_data);
2215 } else {
2216 SMx_POWER_INFO("SMD Unmasking interrupts from %s\n",
2217 edge_to_pids[ch->type].subsys_name);
2218 irq_chip->irq_unmask(irq_data);
2219 }
2220
2221 return 0;
2222}
2223EXPORT_SYMBOL(smd_mask_receive_interrupt);
2224
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002225int smd_wait_until_readable(smd_channel_t *ch, int bytes)
2226{
2227 return -1;
2228}
2229
2230int smd_wait_until_writable(smd_channel_t *ch, int bytes)
2231{
2232 return -1;
2233}
2234
2235int smd_cur_packet_size(smd_channel_t *ch)
2236{
Jack Pham1b236d12012-03-19 15:27:18 -07002237 if (!ch) {
2238 pr_err("%s: Invalid channel specified\n", __func__);
2239 return -ENODEV;
2240 }
2241
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002242 return ch->current_packet;
2243}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002244EXPORT_SYMBOL(smd_cur_packet_size);
2245
2246int smd_tiocmget(smd_channel_t *ch)
2247{
Jack Pham1b236d12012-03-19 15:27:18 -07002248 if (!ch) {
2249 pr_err("%s: Invalid channel specified\n", __func__);
2250 return -ENODEV;
2251 }
2252
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002253 return (ch->half_ch->get_fDSR(ch->recv) ? TIOCM_DSR : 0) |
2254 (ch->half_ch->get_fCTS(ch->recv) ? TIOCM_CTS : 0) |
2255 (ch->half_ch->get_fCD(ch->recv) ? TIOCM_CD : 0) |
2256 (ch->half_ch->get_fRI(ch->recv) ? TIOCM_RI : 0) |
2257 (ch->half_ch->get_fCTS(ch->send) ? TIOCM_RTS : 0) |
2258 (ch->half_ch->get_fDSR(ch->send) ? TIOCM_DTR : 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002259}
2260EXPORT_SYMBOL(smd_tiocmget);
2261
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002262/* this api will be called while holding smd_lock */
2263int
2264smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002265{
Jack Pham1b236d12012-03-19 15:27:18 -07002266 if (!ch) {
2267 pr_err("%s: Invalid channel specified\n", __func__);
2268 return -ENODEV;
2269 }
2270
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002271 if (set & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002272 ch->half_ch->set_fDSR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002273
2274 if (set & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002275 ch->half_ch->set_fCTS(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002276
2277 if (clear & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002278 ch->half_ch->set_fDSR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002279
2280 if (clear & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002281 ch->half_ch->set_fCTS(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002282
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002283 ch->half_ch->set_fSTATE(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002284 barrier();
2285 ch->notify_other_cpu();
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002286
2287 return 0;
2288}
2289EXPORT_SYMBOL(smd_tiocmset_from_cb);
2290
2291int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
2292{
2293 unsigned long flags;
2294
Jack Pham1b236d12012-03-19 15:27:18 -07002295 if (!ch) {
2296 pr_err("%s: Invalid channel specified\n", __func__);
2297 return -ENODEV;
2298 }
2299
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002300 spin_lock_irqsave(&smd_lock, flags);
2301 smd_tiocmset_from_cb(ch, set, clear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002302 spin_unlock_irqrestore(&smd_lock, flags);
2303
2304 return 0;
2305}
2306EXPORT_SYMBOL(smd_tiocmset);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002307
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002308int smd_is_pkt_avail(smd_channel_t *ch)
2309{
Jeff Hugoa8549f12012-08-13 20:36:18 -06002310 unsigned long flags;
2311
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002312 if (!ch || !ch->is_pkt_ch)
2313 return -EINVAL;
2314
2315 if (ch->current_packet)
2316 return 1;
2317
Jeff Hugoa8549f12012-08-13 20:36:18 -06002318 spin_lock_irqsave(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002319 update_packet_state(ch);
Jeff Hugoa8549f12012-08-13 20:36:18 -06002320 spin_unlock_irqrestore(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002321
2322 return ch->current_packet ? 1 : 0;
2323}
2324EXPORT_SYMBOL(smd_is_pkt_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002325
2326
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002327/* -------------------------------------------------------------------------- */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002328
Jeff Hugobdc734d2012-03-26 16:05:39 -06002329/*
2330 * Shared Memory Range Check
2331 *
2332 * Takes a physical address and an offset and checks if the resulting physical
2333 * address would fit into one of the aux smem regions. If so, returns the
2334 * corresponding virtual address. Otherwise returns NULL. Expects the array
2335 * of smem regions to be in ascending physical address order.
2336 *
2337 * @base: physical base address to check
2338 * @offset: offset from the base to get the final address
2339 */
2340static void *smem_range_check(void *base, unsigned offset)
2341{
2342 int i;
2343 void *phys_addr;
2344 unsigned size;
2345
2346 for (i = 0; i < num_smem_areas; ++i) {
2347 phys_addr = smem_areas[i].phys_addr;
2348 size = smem_areas[i].size;
2349 if (base < phys_addr)
2350 return NULL;
2351 if (base > phys_addr + size)
2352 continue;
2353 if (base >= phys_addr && base + offset < phys_addr + size)
2354 return smem_areas[i].virt_addr + offset;
2355 }
2356
2357 return NULL;
2358}
2359
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002360/* smem_alloc returns the pointer to smem item if it is already allocated.
2361 * Otherwise, it returns NULL.
2362 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002363void *smem_alloc(unsigned id, unsigned size)
2364{
2365 return smem_find(id, size);
2366}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002367EXPORT_SYMBOL(smem_alloc);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002368
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002369/* smem_alloc2 returns the pointer to smem item. If it is not allocated,
2370 * it allocates it and then returns the pointer to it.
2371 */
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302372void *smem_alloc2(unsigned id, unsigned size_in)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002373{
2374 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2375 struct smem_heap_entry *toc = shared->heap_toc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002376 unsigned long flags;
2377 void *ret = NULL;
2378
2379 if (!shared->heap_info.initialized) {
2380 pr_err("%s: smem heap info not initialized\n", __func__);
2381 return NULL;
2382 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002383
2384 if (id >= SMEM_NUM_ITEMS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002385 return NULL;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002386
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002387 size_in = ALIGN(size_in, 8);
2388 remote_spin_lock_irqsave(&remote_spinlock, flags);
2389 if (toc[id].allocated) {
2390 SMD_DBG("%s: %u already allocated\n", __func__, id);
2391 if (size_in != toc[id].size)
2392 pr_err("%s: wrong size %u (expected %u)\n",
2393 __func__, toc[id].size, size_in);
2394 else
2395 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2396 } else if (id > SMEM_FIXED_ITEM_LAST) {
2397 SMD_DBG("%s: allocating %u\n", __func__, id);
2398 if (shared->heap_info.heap_remaining >= size_in) {
2399 toc[id].offset = shared->heap_info.free_offset;
2400 toc[id].size = size_in;
2401 wmb();
2402 toc[id].allocated = 1;
2403
2404 shared->heap_info.free_offset += size_in;
2405 shared->heap_info.heap_remaining -= size_in;
2406 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2407 } else
2408 pr_err("%s: not enough memory %u (required %u)\n",
2409 __func__, shared->heap_info.heap_remaining,
2410 size_in);
2411 }
2412 wmb();
2413 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
2414 return ret;
2415}
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302416EXPORT_SYMBOL(smem_alloc2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002417
2418void *smem_get_entry(unsigned id, unsigned *size)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002419{
2420 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2421 struct smem_heap_entry *toc = shared->heap_toc;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302422 int use_spinlocks = spinlocks_initialized;
2423 void *ret = 0;
2424 unsigned long flags = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002425
2426 if (id >= SMEM_NUM_ITEMS)
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302427 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002428
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302429 if (use_spinlocks)
2430 remote_spin_lock_irqsave(&remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002431 /* toc is in device memory and cannot be speculatively accessed */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002432 if (toc[id].allocated) {
2433 *size = toc[id].size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002434 barrier();
Jeff Hugobdc734d2012-03-26 16:05:39 -06002435 if (!(toc[id].reserved & BASE_ADDR_MASK))
2436 ret = (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
2437 else
2438 ret = smem_range_check(
2439 (void *)(toc[id].reserved & BASE_ADDR_MASK),
2440 toc[id].offset);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002441 } else {
2442 *size = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002443 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302444 if (use_spinlocks)
2445 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002446
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302447 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002448}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002449EXPORT_SYMBOL(smem_get_entry);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002450
2451void *smem_find(unsigned id, unsigned size_in)
2452{
2453 unsigned size;
2454 void *ptr;
2455
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002456 ptr = smem_get_entry(id, &size);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002457 if (!ptr)
2458 return 0;
2459
2460 size_in = ALIGN(size_in, 8);
2461 if (size_in != size) {
2462 pr_err("smem_find(%d, %d): wrong size %d\n",
2463 id, size_in, size);
2464 return 0;
2465 }
2466
2467 return ptr;
2468}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002469EXPORT_SYMBOL(smem_find);
2470
2471static int smsm_cb_init(void)
2472{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002473 struct smsm_state_info *state_info;
2474 int n;
2475 int ret = 0;
2476
2477 smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
2478 GFP_KERNEL);
2479
2480 if (!smsm_states) {
2481 pr_err("%s: SMSM init failed\n", __func__);
2482 return -ENOMEM;
2483 }
2484
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002485 smsm_cb_wq = create_singlethread_workqueue("smsm_cb_wq");
2486 if (!smsm_cb_wq) {
2487 pr_err("%s: smsm_cb_wq creation failed\n", __func__);
2488 kfree(smsm_states);
2489 return -EFAULT;
2490 }
2491
Eric Holmbergc8002902011-09-16 13:55:57 -06002492 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002493 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2494 state_info = &smsm_states[n];
2495 state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
Eric Holmberge8a39322012-04-03 15:14:02 -06002496 state_info->intr_mask_set = 0x0;
2497 state_info->intr_mask_clear = 0x0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002498 INIT_LIST_HEAD(&state_info->callbacks);
2499 }
Eric Holmbergc8002902011-09-16 13:55:57 -06002500 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002501
2502 return ret;
2503}
2504
2505static int smsm_init(void)
2506{
2507 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2508 int i;
2509 struct smsm_size_info_type *smsm_size_info;
2510
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002511 smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO,
2512 sizeof(struct smsm_size_info_type));
2513 if (smsm_size_info) {
2514 SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
2515 SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
2516 }
2517
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002518 i = kfifo_alloc(&smsm_snapshot_fifo,
2519 sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
2520 GFP_KERNEL);
2521 if (i) {
2522 pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
2523 return i;
2524 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002525 wake_lock_init(&smsm_snapshot_wakelock, WAKE_LOCK_SUSPEND,
2526 "smsm_snapshot");
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002527
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002528 if (!smsm_info.state) {
2529 smsm_info.state = smem_alloc2(ID_SHARED_STATE,
2530 SMSM_NUM_ENTRIES *
2531 sizeof(uint32_t));
2532
2533 if (smsm_info.state) {
2534 __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2535 if ((shared->version[VERSION_MODEM] >> 16) >= 0xB)
2536 __raw_writel(0, \
2537 SMSM_STATE_ADDR(SMSM_APPS_DEM_I));
2538 }
2539 }
2540
2541 if (!smsm_info.intr_mask) {
2542 smsm_info.intr_mask = smem_alloc2(SMEM_SMSM_CPU_INTR_MASK,
2543 SMSM_NUM_ENTRIES *
2544 SMSM_NUM_HOSTS *
2545 sizeof(uint32_t));
2546
Eric Holmberge8a39322012-04-03 15:14:02 -06002547 if (smsm_info.intr_mask) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002548 for (i = 0; i < SMSM_NUM_ENTRIES; i++)
Eric Holmberge8a39322012-04-03 15:14:02 -06002549 __raw_writel(0x0,
2550 SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
2551
2552 /* Configure legacy modem bits */
2553 __raw_writel(LEGACY_MODEM_SMSM_MASK,
2554 SMSM_INTR_MASK_ADDR(SMSM_MODEM_STATE,
2555 SMSM_APPS));
2556 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002557 }
2558
2559 if (!smsm_info.intr_mux)
2560 smsm_info.intr_mux = smem_alloc2(SMEM_SMD_SMSM_INTR_MUX,
2561 SMSM_NUM_INTR_MUX *
2562 sizeof(uint32_t));
2563
2564 i = smsm_cb_init();
2565 if (i)
2566 return i;
2567
2568 wmb();
Eric Holmberg1255fe12012-10-04 13:37:28 -06002569
2570 smsm_pm_notifier(&smsm_pm_nb, PM_POST_SUSPEND, NULL);
2571 i = register_pm_notifier(&smsm_pm_nb);
2572 if (i)
2573 pr_err("%s: power state notif error %d\n", __func__, i);
2574
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002575 return 0;
2576}
2577
2578void smsm_reset_modem(unsigned mode)
2579{
2580 if (mode == SMSM_SYSTEM_DOWNLOAD) {
2581 mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
2582 } else if (mode == SMSM_MODEM_WAIT) {
2583 mode = SMSM_RESET | SMSM_MODEM_WAIT;
2584 } else { /* reset_mode is SMSM_RESET or default */
2585 mode = SMSM_RESET;
2586 }
2587
2588 smsm_change_state(SMSM_APPS_STATE, mode, mode);
2589}
2590EXPORT_SYMBOL(smsm_reset_modem);
2591
2592void smsm_reset_modem_cont(void)
2593{
2594 unsigned long flags;
2595 uint32_t state;
2596
2597 if (!smsm_info.state)
2598 return;
2599
2600 spin_lock_irqsave(&smem_lock, flags);
2601 state = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)) \
2602 & ~SMSM_MODEM_WAIT;
2603 __raw_writel(state, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2604 wmb();
2605 spin_unlock_irqrestore(&smem_lock, flags);
2606}
2607EXPORT_SYMBOL(smsm_reset_modem_cont);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002608
Eric Holmbergda31d042012-03-28 14:01:02 -06002609static void smsm_cb_snapshot(uint32_t use_wakelock)
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002610{
2611 int n;
2612 uint32_t new_state;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002613 unsigned long flags;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002614 int ret;
2615
2616 ret = kfifo_avail(&smsm_snapshot_fifo);
Eric Holmbergda31d042012-03-28 14:01:02 -06002617 if (ret < SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002618 pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
2619 return;
2620 }
2621
Eric Holmberg96b55f62012-04-03 19:10:46 -06002622 /*
2623 * To avoid a race condition with notify_smsm_cb_clients_worker, the
2624 * following sequence must be followed:
2625 * 1) increment snapshot count
2626 * 2) insert data into FIFO
2627 *
2628 * Potentially in parallel, the worker:
2629 * a) verifies >= 1 snapshots are in FIFO
2630 * b) processes snapshot
2631 * c) decrements reference count
2632 *
2633 * This order ensures that 1 will always occur before abc.
2634 */
Eric Holmbergda31d042012-03-28 14:01:02 -06002635 if (use_wakelock) {
2636 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2637 if (smsm_snapshot_count == 0) {
2638 SMx_POWER_INFO("SMSM snapshot wake lock\n");
2639 wake_lock(&smsm_snapshot_wakelock);
2640 }
2641 ++smsm_snapshot_count;
2642 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2643 }
Eric Holmberg96b55f62012-04-03 19:10:46 -06002644
2645 /* queue state entries */
2646 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2647 new_state = __raw_readl(SMSM_STATE_ADDR(n));
2648
2649 ret = kfifo_in(&smsm_snapshot_fifo,
2650 &new_state, sizeof(new_state));
2651 if (ret != sizeof(new_state)) {
2652 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2653 goto restore_snapshot_count;
2654 }
2655 }
2656
2657 /* queue wakelock usage flag */
2658 ret = kfifo_in(&smsm_snapshot_fifo,
2659 &use_wakelock, sizeof(use_wakelock));
2660 if (ret != sizeof(use_wakelock)) {
2661 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2662 goto restore_snapshot_count;
2663 }
2664
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002665 queue_work(smsm_cb_wq, &smsm_cb_work);
Eric Holmberg96b55f62012-04-03 19:10:46 -06002666 return;
2667
2668restore_snapshot_count:
2669 if (use_wakelock) {
2670 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2671 if (smsm_snapshot_count) {
2672 --smsm_snapshot_count;
2673 if (smsm_snapshot_count == 0) {
2674 SMx_POWER_INFO("SMSM snapshot wake unlock\n");
2675 wake_unlock(&smsm_snapshot_wakelock);
2676 }
2677 } else {
2678 pr_err("%s: invalid snapshot count\n", __func__);
2679 }
2680 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2681 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002682}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002683
2684static irqreturn_t smsm_irq_handler(int irq, void *data)
2685{
2686 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002687
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002688 if (irq == INT_ADSP_A11_SMSM) {
Eric Holmberg6282c5d2011-10-27 17:30:57 -06002689 uint32_t mux_val;
2690 static uint32_t prev_smem_q6_apps_smsm;
2691
2692 if (smsm_info.intr_mux && cpu_is_qsd8x50()) {
2693 mux_val = __raw_readl(
2694 SMSM_INTR_MUX_ADDR(SMEM_Q6_APPS_SMSM));
2695 if (mux_val != prev_smem_q6_apps_smsm)
2696 prev_smem_q6_apps_smsm = mux_val;
2697 }
2698
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002699 spin_lock_irqsave(&smem_lock, flags);
Eric Holmbergda31d042012-03-28 14:01:02 -06002700 smsm_cb_snapshot(1);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002701 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002702 return IRQ_HANDLED;
2703 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002704
2705 spin_lock_irqsave(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002706 if (!smsm_info.state) {
2707 SMSM_INFO("<SM NO STATE>\n");
2708 } else {
2709 unsigned old_apps, apps;
2710 unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002711
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002712 old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002713
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002714 SMSM_DBG("<SM %08x %08x>\n", apps, modm);
2715 if (apps & SMSM_RESET) {
2716 /* If we get an interrupt and the apps SMSM_RESET
2717 bit is already set, the modem is acking the
2718 app's reset ack. */
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002719 if (!disable_smsm_reset_handshake)
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302720 apps &= ~SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002721 /* Issue a fake irq to handle any
2722 * smd state changes during reset
2723 */
2724 smd_fake_irq_handler(0);
Daniel Walker79848a22010-03-16 15:20:07 -07002725
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002726 /* queue modem restart notify chain */
2727 modem_queue_start_reset_notify();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002728
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002729 } else if (modm & SMSM_RESET) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002730 pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
Ram Somani8b9589f2012-04-03 12:07:18 +05302731 if (!disable_smsm_reset_handshake) {
2732 apps |= SMSM_RESET;
2733 flush_cache_all();
2734 outer_flush_all();
2735 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002736 modem_queue_start_reset_notify();
2737
2738 } else if (modm & SMSM_INIT) {
2739 if (!(apps & SMSM_INIT)) {
2740 apps |= SMSM_INIT;
2741 modem_queue_smsm_init_notify();
2742 }
2743
2744 if (modm & SMSM_SMDINIT)
2745 apps |= SMSM_SMDINIT;
2746 if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) ==
2747 (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT))
2748 apps |= SMSM_RUN;
2749 } else if (modm & SMSM_SYSTEM_DOWNLOAD) {
2750 pr_err("\nSMSM: Modem SMSM state changed to SMSM_SYSTEM_DOWNLOAD.");
2751 modem_queue_start_reset_notify();
2752 }
2753
2754 if (old_apps != apps) {
2755 SMSM_DBG("<SM %08x NOTIFY>\n", apps);
2756 __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2757 do_smd_probe();
2758 notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
2759 }
2760
Eric Holmbergda31d042012-03-28 14:01:02 -06002761 smsm_cb_snapshot(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002762 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002763 spin_unlock_irqrestore(&smem_lock, flags);
2764 return IRQ_HANDLED;
2765}
2766
Eric Holmberg98c6c642012-02-24 11:29:35 -07002767static irqreturn_t smsm_modem_irq_handler(int irq, void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002768{
Eric Holmberg98c6c642012-02-24 11:29:35 -07002769 SMx_POWER_INFO("SMSM Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002770 ++interrupt_stats[SMD_MODEM].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002771 return smsm_irq_handler(irq, data);
2772}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002773
Eric Holmberg98c6c642012-02-24 11:29:35 -07002774static irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
2775{
2776 SMx_POWER_INFO("SMSM Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002777 ++interrupt_stats[SMD_Q6].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002778 return smsm_irq_handler(irq, data);
2779}
2780
2781static irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
2782{
2783 SMx_POWER_INFO("SMSM Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002784 ++interrupt_stats[SMD_DSPS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002785 return smsm_irq_handler(irq, data);
2786}
2787
2788static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
2789{
2790 SMx_POWER_INFO("SMSM Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002791 ++interrupt_stats[SMD_WCNSS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002792 return smsm_irq_handler(irq, data);
2793}
2794
Eric Holmberge8a39322012-04-03 15:14:02 -06002795/*
2796 * Changes the global interrupt mask. The set and clear masks are re-applied
2797 * every time the global interrupt mask is updated for callback registration
2798 * and de-registration.
2799 *
2800 * The clear mask is applied first, so if a bit is set to 1 in both the clear
2801 * mask and the set mask, the result will be that the interrupt is set.
2802 *
2803 * @smsm_entry SMSM entry to change
2804 * @clear_mask 1 = clear bit, 0 = no-op
2805 * @set_mask 1 = set bit, 0 = no-op
2806 *
2807 * @returns 0 for success, < 0 for error
2808 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002809int smsm_change_intr_mask(uint32_t smsm_entry,
2810 uint32_t clear_mask, uint32_t set_mask)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002811{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002812 uint32_t old_mask, new_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002813 unsigned long flags;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002814
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002815 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2816 pr_err("smsm_change_state: Invalid entry %d\n",
2817 smsm_entry);
2818 return -EINVAL;
2819 }
2820
2821 if (!smsm_info.intr_mask) {
2822 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002823 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002824 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002825
2826 spin_lock_irqsave(&smem_lock, flags);
Eric Holmberge8a39322012-04-03 15:14:02 -06002827 smsm_states[smsm_entry].intr_mask_clear = clear_mask;
2828 smsm_states[smsm_entry].intr_mask_set = set_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002829
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002830 old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2831 new_mask = (old_mask & ~clear_mask) | set_mask;
2832 __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002833
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002834 wmb();
2835 spin_unlock_irqrestore(&smem_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002836
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002837 return 0;
2838}
2839EXPORT_SYMBOL(smsm_change_intr_mask);
2840
2841int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
2842{
2843 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2844 pr_err("smsm_change_state: Invalid entry %d\n",
2845 smsm_entry);
2846 return -EINVAL;
2847 }
2848
2849 if (!smsm_info.intr_mask) {
2850 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
2851 return -EIO;
2852 }
2853
2854 *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2855 return 0;
2856}
2857EXPORT_SYMBOL(smsm_get_intr_mask);
2858
2859int smsm_change_state(uint32_t smsm_entry,
2860 uint32_t clear_mask, uint32_t set_mask)
2861{
2862 unsigned long flags;
2863 uint32_t old_state, new_state;
2864
2865 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2866 pr_err("smsm_change_state: Invalid entry %d",
2867 smsm_entry);
2868 return -EINVAL;
2869 }
2870
2871 if (!smsm_info.state) {
2872 pr_err("smsm_change_state <SM NO STATE>\n");
2873 return -EIO;
2874 }
2875 spin_lock_irqsave(&smem_lock, flags);
2876
2877 old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2878 new_state = (old_state & ~clear_mask) | set_mask;
2879 __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
2880 SMSM_DBG("smsm_change_state %x\n", new_state);
2881 notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002882
2883 spin_unlock_irqrestore(&smem_lock, flags);
2884
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002885 return 0;
2886}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002887EXPORT_SYMBOL(smsm_change_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002888
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002889uint32_t smsm_get_state(uint32_t smsm_entry)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002890{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002891 uint32_t rv = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002892
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002893 /* needs interface change to return error code */
2894 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2895 pr_err("smsm_change_state: Invalid entry %d",
2896 smsm_entry);
2897 return 0;
2898 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002899
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002900 if (!smsm_info.state) {
2901 pr_err("smsm_get_state <SM NO STATE>\n");
2902 } else {
2903 rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2904 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002905
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002906 return rv;
2907}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002908EXPORT_SYMBOL(smsm_get_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002909
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002910/**
2911 * Performs SMSM callback client notifiction.
2912 */
2913void notify_smsm_cb_clients_worker(struct work_struct *work)
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002914{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002915 struct smsm_state_cb_info *cb_info;
2916 struct smsm_state_info *state_info;
2917 int n;
2918 uint32_t new_state;
2919 uint32_t state_changes;
Eric Holmbergda31d042012-03-28 14:01:02 -06002920 uint32_t use_wakelock;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002921 int ret;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002922 unsigned long flags;
Brian Swetland03e00cd2009-07-01 17:58:37 -07002923
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002924 if (!smd_initialized)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002925 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002926
Eric Holmbergda31d042012-03-28 14:01:02 -06002927 while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002928 mutex_lock(&smsm_lock);
2929 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2930 state_info = &smsm_states[n];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002931
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002932 ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
2933 sizeof(new_state));
2934 if (ret != sizeof(new_state)) {
2935 pr_err("%s: snapshot underflow %d\n",
2936 __func__, ret);
2937 mutex_unlock(&smsm_lock);
2938 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002939 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002940
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002941 state_changes = state_info->last_value ^ new_state;
2942 if (state_changes) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07002943 SMx_POWER_INFO("SMSM Change %d: %08x->%08x\n",
2944 n, state_info->last_value,
2945 new_state);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002946 list_for_each_entry(cb_info,
2947 &state_info->callbacks, cb_list) {
2948
2949 if (cb_info->mask & state_changes)
2950 cb_info->notify(cb_info->data,
2951 state_info->last_value,
2952 new_state);
2953 }
2954 state_info->last_value = new_state;
2955 }
2956 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002957
Eric Holmbergda31d042012-03-28 14:01:02 -06002958 /* read wakelock flag */
2959 ret = kfifo_out(&smsm_snapshot_fifo, &use_wakelock,
2960 sizeof(use_wakelock));
2961 if (ret != sizeof(use_wakelock)) {
2962 pr_err("%s: snapshot underflow %d\n",
2963 __func__, ret);
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002964 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002965 return;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002966 }
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002967 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002968
2969 if (use_wakelock) {
2970 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2971 if (smsm_snapshot_count) {
2972 --smsm_snapshot_count;
2973 if (smsm_snapshot_count == 0) {
2974 SMx_POWER_INFO("SMSM snapshot"
2975 " wake unlock\n");
2976 wake_unlock(&smsm_snapshot_wakelock);
2977 }
2978 } else {
2979 pr_err("%s: invalid snapshot count\n",
2980 __func__);
2981 }
2982 spin_unlock_irqrestore(&smsm_snapshot_count_lock,
2983 flags);
2984 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002985 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002986}
2987
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002988
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002989/**
2990 * Registers callback for SMSM state notifications when the specified
2991 * bits change.
2992 *
2993 * @smsm_entry Processor entry to deregister
2994 * @mask Bits to deregister (if result is 0, callback is removed)
2995 * @notify Notification function to deregister
2996 * @data Opaque data passed in to callback
2997 *
2998 * @returns Status code
2999 * <0 error code
3000 * 0 inserted new entry
3001 * 1 updated mask of existing entry
3002 */
3003int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
3004 void (*notify)(void *, uint32_t, uint32_t), void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003005{
Eric Holmberge8a39322012-04-03 15:14:02 -06003006 struct smsm_state_info *state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003007 struct smsm_state_cb_info *cb_info;
3008 struct smsm_state_cb_info *cb_found = 0;
Eric Holmberge8a39322012-04-03 15:14:02 -06003009 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003010 int ret = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003011
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003012 if (smsm_entry >= SMSM_NUM_ENTRIES)
3013 return -EINVAL;
3014
Eric Holmbergc8002902011-09-16 13:55:57 -06003015 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003016
3017 if (!smsm_states) {
3018 /* smsm not yet initialized */
3019 ret = -ENODEV;
3020 goto cleanup;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003021 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003022
Eric Holmberge8a39322012-04-03 15:14:02 -06003023 state = &smsm_states[smsm_entry];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003024 list_for_each_entry(cb_info,
Eric Holmberge8a39322012-04-03 15:14:02 -06003025 &state->callbacks, cb_list) {
3026 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003027 (cb_info->data == data)) {
3028 cb_info->mask |= mask;
3029 cb_found = cb_info;
3030 ret = 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003031 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003032 new_mask |= cb_info->mask;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003033 }
3034
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003035 if (!cb_found) {
3036 cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
3037 GFP_ATOMIC);
3038 if (!cb_info) {
3039 ret = -ENOMEM;
3040 goto cleanup;
3041 }
3042
3043 cb_info->mask = mask;
3044 cb_info->notify = notify;
3045 cb_info->data = data;
3046 INIT_LIST_HEAD(&cb_info->cb_list);
3047 list_add_tail(&cb_info->cb_list,
Eric Holmberge8a39322012-04-03 15:14:02 -06003048 &state->callbacks);
3049 new_mask |= mask;
3050 }
3051
3052 /* update interrupt notification mask */
3053 if (smsm_entry == SMSM_MODEM_STATE)
3054 new_mask |= LEGACY_MODEM_SMSM_MASK;
3055
3056 if (smsm_info.intr_mask) {
3057 unsigned long flags;
3058
3059 spin_lock_irqsave(&smem_lock, flags);
3060 new_mask = (new_mask & ~state->intr_mask_clear)
3061 | state->intr_mask_set;
3062 __raw_writel(new_mask,
3063 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3064 wmb();
3065 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003066 }
3067
3068cleanup:
Eric Holmbergc8002902011-09-16 13:55:57 -06003069 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003070 return ret;
3071}
3072EXPORT_SYMBOL(smsm_state_cb_register);
3073
3074
3075/**
3076 * Deregisters for SMSM state notifications for the specified bits.
3077 *
3078 * @smsm_entry Processor entry to deregister
3079 * @mask Bits to deregister (if result is 0, callback is removed)
3080 * @notify Notification function to deregister
3081 * @data Opaque data passed in to callback
3082 *
3083 * @returns Status code
3084 * <0 error code
3085 * 0 not found
3086 * 1 updated mask
3087 * 2 removed callback
3088 */
3089int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
3090 void (*notify)(void *, uint32_t, uint32_t), void *data)
3091{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003092 struct smsm_state_cb_info *cb_info;
Eric Holmberge8a39322012-04-03 15:14:02 -06003093 struct smsm_state_cb_info *cb_tmp;
3094 struct smsm_state_info *state;
3095 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003096 int ret = 0;
3097
3098 if (smsm_entry >= SMSM_NUM_ENTRIES)
3099 return -EINVAL;
3100
Eric Holmbergc8002902011-09-16 13:55:57 -06003101 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003102
3103 if (!smsm_states) {
3104 /* smsm not yet initialized */
Eric Holmbergc8002902011-09-16 13:55:57 -06003105 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003106 return -ENODEV;
3107 }
3108
Eric Holmberge8a39322012-04-03 15:14:02 -06003109 state = &smsm_states[smsm_entry];
3110 list_for_each_entry_safe(cb_info, cb_tmp,
3111 &state->callbacks, cb_list) {
3112 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003113 (cb_info->data == data)) {
3114 cb_info->mask &= ~mask;
3115 ret = 1;
3116 if (!cb_info->mask) {
3117 /* no mask bits set, remove callback */
3118 list_del(&cb_info->cb_list);
3119 kfree(cb_info);
3120 ret = 2;
Eric Holmberge8a39322012-04-03 15:14:02 -06003121 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003122 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003123 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003124 new_mask |= cb_info->mask;
3125 }
3126
3127 /* update interrupt notification mask */
3128 if (smsm_entry == SMSM_MODEM_STATE)
3129 new_mask |= LEGACY_MODEM_SMSM_MASK;
3130
3131 if (smsm_info.intr_mask) {
3132 unsigned long flags;
3133
3134 spin_lock_irqsave(&smem_lock, flags);
3135 new_mask = (new_mask & ~state->intr_mask_clear)
3136 | state->intr_mask_set;
3137 __raw_writel(new_mask,
3138 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3139 wmb();
3140 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003141 }
3142
Eric Holmbergc8002902011-09-16 13:55:57 -06003143 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003144 return ret;
3145}
3146EXPORT_SYMBOL(smsm_state_cb_deregister);
3147
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -06003148int smd_module_init_notifier_register(struct notifier_block *nb)
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003149{
3150 int ret;
3151 if (!nb)
3152 return -EINVAL;
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -06003153 mutex_lock(&smd_module_init_notifier_lock);
3154 ret = raw_notifier_chain_register(&smd_module_init_notifier_list, nb);
3155 if (smd_module_inited)
3156 nb->notifier_call(nb, 0, NULL);
3157 mutex_unlock(&smd_module_init_notifier_lock);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003158 return ret;
3159}
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -06003160EXPORT_SYMBOL(smd_module_init_notifier_register);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003161
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -06003162int smd_module_init_notifier_unregister(struct notifier_block *nb)
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003163{
3164 int ret;
3165 if (!nb)
3166 return -EINVAL;
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -06003167 mutex_lock(&smd_module_init_notifier_lock);
3168 ret = raw_notifier_chain_unregister(&smd_module_init_notifier_list,
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003169 nb);
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -06003170 mutex_unlock(&smd_module_init_notifier_lock);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003171 return ret;
3172}
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -06003173EXPORT_SYMBOL(smd_module_init_notifier_unregister);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003174
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -06003175static void smd_module_init_notify(uint32_t state, void *data)
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003176{
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -06003177 mutex_lock(&smd_module_init_notifier_lock);
3178 smd_module_inited = 1;
3179 raw_notifier_call_chain(&smd_module_init_notifier_list,
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003180 state, data);
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -06003181 mutex_unlock(&smd_module_init_notifier_lock);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003182}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003183
3184int smd_core_init(void)
3185{
3186 int r;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003187 unsigned long flags = IRQF_TRIGGER_RISING;
3188 SMD_INFO("smd_core_init()\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003189
Brian Swetland37521a32009-07-01 18:30:47 -07003190 r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003191 flags, "smd_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003192 if (r < 0)
3193 return r;
Arun Kumar Neelakantam240be2e2012-10-16 22:17:55 +05303194 interrupt_stats[SMD_MODEM].smd_interrupt_id = INT_A9_M2A_0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003195 r = enable_irq_wake(INT_A9_M2A_0);
3196 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003197 pr_err("smd_core_init: "
3198 "enable_irq_wake failed for INT_A9_M2A_0\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003199
Eric Holmberg98c6c642012-02-24 11:29:35 -07003200 r = request_irq(INT_A9_M2A_5, smsm_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003201 flags, "smsm_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003202 if (r < 0) {
3203 free_irq(INT_A9_M2A_0, 0);
3204 return r;
3205 }
Arun Kumar Neelakantam240be2e2012-10-16 22:17:55 +05303206 interrupt_stats[SMD_MODEM].smsm_interrupt_id = INT_A9_M2A_5;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003207 r = enable_irq_wake(INT_A9_M2A_5);
3208 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003209 pr_err("smd_core_init: "
3210 "enable_irq_wake failed for INT_A9_M2A_5\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003211
Brian Swetland37521a32009-07-01 18:30:47 -07003212#if defined(CONFIG_QDSP6)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003213#if (INT_ADSP_A11 == INT_ADSP_A11_SMSM)
3214 flags |= IRQF_SHARED;
3215#endif
Brian Swetland37521a32009-07-01 18:30:47 -07003216 r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003217 flags, "smd_dev", smd_dsp_irq_handler);
Brian Swetland37521a32009-07-01 18:30:47 -07003218 if (r < 0) {
3219 free_irq(INT_A9_M2A_0, 0);
3220 free_irq(INT_A9_M2A_5, 0);
3221 return r;
3222 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003223
Arun Kumar Neelakantam240be2e2012-10-16 22:17:55 +05303224 interrupt_stats[SMD_Q6].smd_interrupt_id = INT_ADSP_A11;
Eric Holmberg98c6c642012-02-24 11:29:35 -07003225 r = request_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler,
3226 flags, "smsm_dev", smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003227 if (r < 0) {
3228 free_irq(INT_A9_M2A_0, 0);
3229 free_irq(INT_A9_M2A_5, 0);
3230 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
3231 return r;
3232 }
3233
Arun Kumar Neelakantam240be2e2012-10-16 22:17:55 +05303234 interrupt_stats[SMD_Q6].smsm_interrupt_id = INT_ADSP_A11_SMSM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003235 r = enable_irq_wake(INT_ADSP_A11);
3236 if (r < 0)
3237 pr_err("smd_core_init: "
3238 "enable_irq_wake failed for INT_ADSP_A11\n");
3239
3240#if (INT_ADSP_A11 != INT_ADSP_A11_SMSM)
3241 r = enable_irq_wake(INT_ADSP_A11_SMSM);
3242 if (r < 0)
3243 pr_err("smd_core_init: enable_irq_wake "
3244 "failed for INT_ADSP_A11_SMSM\n");
3245#endif
3246 flags &= ~IRQF_SHARED;
Brian Swetland37521a32009-07-01 18:30:47 -07003247#endif
3248
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003249#if defined(CONFIG_DSPS)
3250 r = request_irq(INT_DSPS_A11, smd_dsps_irq_handler,
3251 flags, "smd_dev", smd_dsps_irq_handler);
3252 if (r < 0) {
3253 free_irq(INT_A9_M2A_0, 0);
3254 free_irq(INT_A9_M2A_5, 0);
3255 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003256 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003257 return r;
3258 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003259
Arun Kumar Neelakantam240be2e2012-10-16 22:17:55 +05303260 interrupt_stats[SMD_DSPS].smd_interrupt_id = INT_DSPS_A11;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003261 r = enable_irq_wake(INT_DSPS_A11);
3262 if (r < 0)
3263 pr_err("smd_core_init: "
3264 "enable_irq_wake failed for INT_ADSP_A11\n");
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003265#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003266
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003267#if defined(CONFIG_WCNSS)
3268 r = request_irq(INT_WCNSS_A11, smd_wcnss_irq_handler,
3269 flags, "smd_dev", smd_wcnss_irq_handler);
3270 if (r < 0) {
3271 free_irq(INT_A9_M2A_0, 0);
3272 free_irq(INT_A9_M2A_5, 0);
3273 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003274 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003275 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3276 return r;
3277 }
3278
Arun Kumar Neelakantam240be2e2012-10-16 22:17:55 +05303279 interrupt_stats[SMD_WCNSS].smd_interrupt_id = INT_WCNSS_A11;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003280 r = enable_irq_wake(INT_WCNSS_A11);
3281 if (r < 0)
3282 pr_err("smd_core_init: "
3283 "enable_irq_wake failed for INT_WCNSS_A11\n");
3284
Eric Holmberg98c6c642012-02-24 11:29:35 -07003285 r = request_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler,
3286 flags, "smsm_dev", smsm_wcnss_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003287 if (r < 0) {
3288 free_irq(INT_A9_M2A_0, 0);
3289 free_irq(INT_A9_M2A_5, 0);
3290 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003291 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003292 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3293 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
3294 return r;
3295 }
3296
Arun Kumar Neelakantam240be2e2012-10-16 22:17:55 +05303297 interrupt_stats[SMD_WCNSS].smsm_interrupt_id = INT_WCNSS_A11_SMSM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003298 r = enable_irq_wake(INT_WCNSS_A11_SMSM);
3299 if (r < 0)
3300 pr_err("smd_core_init: "
3301 "enable_irq_wake failed for INT_WCNSS_A11_SMSM\n");
3302#endif
3303
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003304#if defined(CONFIG_DSPS_SMSM)
Eric Holmberg98c6c642012-02-24 11:29:35 -07003305 r = request_irq(INT_DSPS_A11_SMSM, smsm_dsps_irq_handler,
3306 flags, "smsm_dev", smsm_dsps_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003307 if (r < 0) {
3308 free_irq(INT_A9_M2A_0, 0);
3309 free_irq(INT_A9_M2A_5, 0);
3310 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003311 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003312 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3313 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003314 free_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003315 return r;
3316 }
3317
Arun Kumar Neelakantam240be2e2012-10-16 22:17:55 +05303318 interrupt_stats[SMD_DSPS].smsm_interrupt_id = INT_DSPS_A11_SMSM;
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003319 r = enable_irq_wake(INT_DSPS_A11_SMSM);
3320 if (r < 0)
3321 pr_err("smd_core_init: "
3322 "enable_irq_wake failed for INT_DSPS_A11_SMSM\n");
3323#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003324 SMD_INFO("smd_core_init() done\n");
3325
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003326 return 0;
3327}
3328
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303329static int intr_init(struct interrupt_config_item *private_irq,
3330 struct smd_irq_config *platform_irq,
3331 struct platform_device *pdev
3332 )
3333{
3334 int irq_id;
3335 int ret;
3336 int ret_wake;
3337
3338 private_irq->out_bit_pos = platform_irq->out_bit_pos;
3339 private_irq->out_offset = platform_irq->out_offset;
3340 private_irq->out_base = platform_irq->out_base;
3341
3342 irq_id = platform_get_irq_byname(
3343 pdev,
3344 platform_irq->irq_name
3345 );
3346 SMD_DBG("smd: %s: register irq: %s id: %d\n", __func__,
3347 platform_irq->irq_name, irq_id);
3348 ret = request_irq(irq_id,
3349 private_irq->irq_handler,
3350 platform_irq->flags,
3351 platform_irq->device_name,
3352 (void *)platform_irq->dev_id
3353 );
3354 if (ret < 0) {
3355 platform_irq->irq_id = ret;
Eric Holmbergdeace152012-07-25 12:17:11 -06003356 private_irq->irq_id = ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303357 } else {
3358 platform_irq->irq_id = irq_id;
Eric Holmbergdeace152012-07-25 12:17:11 -06003359 private_irq->irq_id = irq_id;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303360 ret_wake = enable_irq_wake(irq_id);
3361 if (ret_wake < 0) {
3362 pr_err("smd: enable_irq_wake failed on %s",
3363 platform_irq->irq_name);
3364 }
3365 }
3366
3367 return ret;
3368}
3369
Jeff Hugobdc734d2012-03-26 16:05:39 -06003370int sort_cmp_func(const void *a, const void *b)
3371{
3372 struct smem_area *left = (struct smem_area *)(a);
3373 struct smem_area *right = (struct smem_area *)(b);
3374
3375 return left->phys_addr - right->phys_addr;
3376}
3377
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303378int smd_core_platform_init(struct platform_device *pdev)
3379{
3380 int i;
3381 int ret;
3382 uint32_t num_ss;
3383 struct smd_platform *smd_platform_data;
3384 struct smd_subsystem_config *smd_ss_config_list;
3385 struct smd_subsystem_config *cfg;
3386 int err_ret = 0;
Jeff Hugobdc734d2012-03-26 16:05:39 -06003387 struct smd_smem_regions *smd_smem_areas;
3388 int smem_idx = 0;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303389
3390 smd_platform_data = pdev->dev.platform_data;
3391 num_ss = smd_platform_data->num_ss_configs;
3392 smd_ss_config_list = smd_platform_data->smd_ss_configs;
3393
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06003394 if (smd_platform_data->smd_ssr_config)
3395 disable_smsm_reset_handshake = smd_platform_data->
3396 smd_ssr_config->disable_smsm_reset_handshake;
3397
Jeff Hugobdc734d2012-03-26 16:05:39 -06003398 smd_smem_areas = smd_platform_data->smd_smem_areas;
3399 if (smd_smem_areas) {
3400 num_smem_areas = smd_platform_data->num_smem_areas;
3401 smem_areas = kmalloc(sizeof(struct smem_area) * num_smem_areas,
3402 GFP_KERNEL);
3403 if (!smem_areas) {
3404 pr_err("%s: smem_areas kmalloc failed\n", __func__);
3405 err_ret = -ENOMEM;
3406 goto smem_areas_alloc_fail;
3407 }
3408
3409 for (smem_idx = 0; smem_idx < num_smem_areas; ++smem_idx) {
3410 smem_areas[smem_idx].phys_addr =
3411 smd_smem_areas[smem_idx].phys_addr;
3412 smem_areas[smem_idx].size =
3413 smd_smem_areas[smem_idx].size;
3414 smem_areas[smem_idx].virt_addr = ioremap_nocache(
3415 (unsigned long)(smem_areas[smem_idx].phys_addr),
3416 smem_areas[smem_idx].size);
3417 if (!smem_areas[smem_idx].virt_addr) {
3418 pr_err("%s: ioremap_nocache() of addr:%p"
3419 " size: %x\n", __func__,
3420 smem_areas[smem_idx].phys_addr,
3421 smem_areas[smem_idx].size);
3422 err_ret = -ENOMEM;
3423 ++smem_idx;
3424 goto smem_failed;
3425 }
3426 }
3427 sort(smem_areas, num_smem_areas,
3428 sizeof(struct smem_area),
3429 sort_cmp_func, NULL);
3430 }
3431
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303432 for (i = 0; i < num_ss; i++) {
3433 cfg = &smd_ss_config_list[i];
3434
3435 ret = intr_init(
3436 &private_intr_config[cfg->irq_config_id].smd,
3437 &cfg->smd_int,
3438 pdev
3439 );
3440
3441 if (ret < 0) {
3442 err_ret = ret;
3443 pr_err("smd: register irq failed on %s\n",
3444 cfg->smd_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003445 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303446 }
3447
Arun Kumar Neelakantam240be2e2012-10-16 22:17:55 +05303448 interrupt_stats[cfg->irq_config_id].smd_interrupt_id
3449 = cfg->smd_int.irq_id;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003450 /* only init smsm structs if this edge supports smsm */
3451 if (cfg->smsm_int.irq_id)
3452 ret = intr_init(
3453 &private_intr_config[cfg->irq_config_id].smsm,
3454 &cfg->smsm_int,
3455 pdev
3456 );
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303457
3458 if (ret < 0) {
3459 err_ret = ret;
3460 pr_err("smd: register irq failed on %s\n",
3461 cfg->smsm_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003462 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303463 }
Eric Holmberg17992c12012-02-29 12:54:44 -07003464
Arun Kumar Neelakantam240be2e2012-10-16 22:17:55 +05303465 if (cfg->smsm_int.irq_id)
3466 interrupt_stats[cfg->irq_config_id].smsm_interrupt_id
3467 = cfg->smsm_int.irq_id;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003468 if (cfg->subsys_name)
3469 strlcpy(edge_to_pids[cfg->edge].subsys_name,
Eric Holmberg17992c12012-02-29 12:54:44 -07003470 cfg->subsys_name, SMD_MAX_CH_NAME_LEN);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303471 }
3472
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303473
3474 SMD_INFO("smd_core_platform_init() done\n");
3475 return 0;
3476
Jeff Hugobdc734d2012-03-26 16:05:39 -06003477intr_failed:
3478 pr_err("smd: deregistering IRQs\n");
3479 for (i = 0; i < num_ss; ++i) {
3480 cfg = &smd_ss_config_list[i];
3481
3482 if (cfg->smd_int.irq_id >= 0)
3483 free_irq(cfg->smd_int.irq_id,
3484 (void *)cfg->smd_int.dev_id
3485 );
3486 if (cfg->smsm_int.irq_id >= 0)
3487 free_irq(cfg->smsm_int.irq_id,
3488 (void *)cfg->smsm_int.dev_id
3489 );
3490 }
3491smem_failed:
3492 for (smem_idx = smem_idx - 1; smem_idx >= 0; --smem_idx)
3493 iounmap(smem_areas[smem_idx].virt_addr);
3494 kfree(smem_areas);
3495smem_areas_alloc_fail:
3496 return err_ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303497}
3498
Gregory Bean4416e9e2010-07-28 10:22:12 -07003499static int __devinit msm_smd_probe(struct platform_device *pdev)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003500{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303501 int ret;
Daniel Walker0aec66d2010-03-18 12:31:08 -07003502
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303503 SMD_INFO("smd probe\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003504 INIT_WORK(&probe_work, smd_channel_probe_worker);
3505
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003506 channel_close_wq = create_singlethread_workqueue("smd_channel_close");
3507 if (IS_ERR(channel_close_wq)) {
3508 pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
3509 return -ENOMEM;
3510 }
3511
3512 if (smsm_init()) {
3513 pr_err("smsm_init() failed\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003514 return -1;
3515 }
3516
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303517 if (pdev) {
3518 if (pdev->dev.of_node) {
3519 pr_err("SMD: Device tree not currently supported\n");
3520 return -ENODEV;
3521 } else if (pdev->dev.platform_data) {
3522 ret = smd_core_platform_init(pdev);
3523 if (ret) {
3524 pr_err(
3525 "SMD: smd_core_platform_init() failed\n");
3526 return -ENODEV;
3527 }
3528 } else {
3529 ret = smd_core_init();
3530 if (ret) {
3531 pr_err("smd_core_init() failed\n");
3532 return -ENODEV;
3533 }
3534 }
3535 } else {
3536 pr_err("SMD: PDEV not found\n");
3537 return -ENODEV;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003538 }
Iliyan Malchev1207bab2009-11-15 18:16:43 -08003539
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003540 smd_initialized = 1;
3541
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003542 smd_alloc_loopback_channel();
Eric Holmbergc33d4ab2011-10-24 10:28:25 -06003543 smsm_irq_handler(0, 0);
3544 tasklet_schedule(&smd_fake_irq_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003545
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003546 return 0;
3547}
3548
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003549static int restart_notifier_cb(struct notifier_block *this,
3550 unsigned long code,
3551 void *data);
3552
3553static struct restart_notifier_block restart_notifiers[] = {
Eric Holmbergca7ead22011-12-01 17:21:15 -07003554 {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
3555 {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
Sameer Thalappil3ec03e32012-10-04 17:22:24 -07003556 {SMD_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
Eric Holmbergca7ead22011-12-01 17:21:15 -07003557 {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
Eric Holmberg8b0e74f2012-02-08 09:56:17 -07003558 {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003559};
3560
3561static int restart_notifier_cb(struct notifier_block *this,
3562 unsigned long code,
3563 void *data)
3564{
3565 if (code == SUBSYS_AFTER_SHUTDOWN) {
3566 struct restart_notifier_block *notifier;
3567
3568 notifier = container_of(this,
3569 struct restart_notifier_block, nb);
3570 SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
3571 __func__, notifier->processor,
3572 notifier->name);
3573
3574 smd_channel_reset(notifier->processor);
3575 }
3576
3577 return NOTIFY_DONE;
3578}
3579
3580static __init int modem_restart_late_init(void)
3581{
3582 int i;
3583 void *handle;
3584 struct restart_notifier_block *nb;
3585
3586 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
3587 nb = &restart_notifiers[i];
3588 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
3589 SMD_DBG("%s: registering notif for '%s', handle=%p\n",
3590 __func__, nb->name, handle);
3591 }
3592 return 0;
3593}
3594late_initcall(modem_restart_late_init);
3595
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003596static struct platform_driver msm_smd_driver = {
3597 .probe = msm_smd_probe,
3598 .driver = {
3599 .name = MODULE_NAME,
3600 .owner = THIS_MODULE,
3601 },
3602};
3603
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003604int __init msm_smd_init(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003605{
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003606 static bool registered;
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -06003607 int rc;
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003608
3609 if (registered)
3610 return 0;
3611
3612 registered = true;
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -06003613 rc = remote_spin_lock_init(&remote_spinlock, SMEM_SPINLOCK_SMEM_ALLOC);
3614 if (rc) {
3615 pr_err("%s: remote spinlock init failed %d\n", __func__, rc);
3616 return rc;
3617 }
3618 spinlocks_initialized = 1;
3619
3620 rc = platform_driver_register(&msm_smd_driver);
3621 if (rc) {
3622 pr_err("%s: msm_smd_driver register failed %d\n",
3623 __func__, rc);
3624 return rc;
3625 }
3626
3627 smd_module_init_notify(0, NULL);
3628
3629 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003630}
3631
3632module_init(msm_smd_init);
3633
3634MODULE_DESCRIPTION("MSM Shared Memory Core");
3635MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
3636MODULE_LICENSE("GPL");