blob: 7cf15ca854be51c34034af88aaf1cc1dedd13271 [file] [log] [blame]
Sarah Sharp66d4ead2009-04-27 19:52:28 -07001/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/usb.h>
Sarah Sharp0ebbab32009-04-27 19:52:34 -070024#include <linux/pci.h>
Sarah Sharp66d4ead2009-04-27 19:52:28 -070025
26#include "xhci.h"
27
Sarah Sharp0ebbab32009-04-27 19:52:34 -070028/*
29 * Allocates a generic ring segment from the ring pool, sets the dma address,
30 * initializes the segment to zero, and sets the private next pointer to NULL.
31 *
32 * Section 4.11.1.1:
33 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
34 */
35static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
36{
37 struct xhci_segment *seg;
38 dma_addr_t dma;
39
40 seg = kzalloc(sizeof *seg, flags);
41 if (!seg)
42 return 0;
43 xhci_dbg(xhci, "Allocating priv segment structure at 0x%x\n",
44 (unsigned int) seg);
45
46 seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
47 if (!seg->trbs) {
48 kfree(seg);
49 return 0;
50 }
51 xhci_dbg(xhci, "// Allocating segment at 0x%x (virtual) 0x%x (DMA)\n",
52 (unsigned int) seg->trbs, (u32) dma);
53
54 memset(seg->trbs, 0, SEGMENT_SIZE);
55 seg->dma = dma;
56 seg->next = NULL;
57
58 return seg;
59}
60
61static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
62{
63 if (!seg)
64 return;
65 if (seg->trbs) {
66 xhci_dbg(xhci, "Freeing DMA segment at 0x%x"
67 " (virtual) 0x%x (DMA)\n",
68 (unsigned int) seg->trbs, (u32) seg->dma);
69 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
70 seg->trbs = NULL;
71 }
72 xhci_dbg(xhci, "Freeing priv segment structure at 0x%x\n",
73 (unsigned int) seg);
74 kfree(seg);
75}
76
77/*
78 * Make the prev segment point to the next segment.
79 *
80 * Change the last TRB in the prev segment to be a Link TRB which points to the
81 * DMA address of the next segment. The caller needs to set any Link TRB
82 * related flags, such as End TRB, Toggle Cycle, and no snoop.
83 */
84static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
85 struct xhci_segment *next, bool link_trbs)
86{
87 u32 val;
88
89 if (!prev || !next)
90 return;
91 prev->next = next;
92 if (link_trbs) {
93 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma;
94
95 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
96 val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
97 val &= ~TRB_TYPE_BITMASK;
98 val |= TRB_TYPE(TRB_LINK);
99 prev->trbs[TRBS_PER_SEGMENT-1].link.control = val;
100 }
101 xhci_dbg(xhci, "Linking segment 0x%x to segment 0x%x (DMA)\n",
102 prev->dma, next->dma);
103}
104
105/* XXX: Do we need the hcd structure in all these functions? */
106static void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
107{
108 struct xhci_segment *seg;
109 struct xhci_segment *first_seg;
110
111 if (!ring || !ring->first_seg)
112 return;
113 first_seg = ring->first_seg;
114 seg = first_seg->next;
115 xhci_dbg(xhci, "Freeing ring at 0x%x\n", (unsigned int) ring);
116 while (seg != first_seg) {
117 struct xhci_segment *next = seg->next;
118 xhci_segment_free(xhci, seg);
119 seg = next;
120 }
121 xhci_segment_free(xhci, first_seg);
122 ring->first_seg = NULL;
123 kfree(ring);
124}
125
126/**
127 * Create a new ring with zero or more segments.
128 *
129 * Link each segment together into a ring.
130 * Set the end flag and the cycle toggle bit on the last segment.
131 * See section 4.9.1 and figures 15 and 16.
132 */
133static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
134 unsigned int num_segs, bool link_trbs, gfp_t flags)
135{
136 struct xhci_ring *ring;
137 struct xhci_segment *prev;
138
139 ring = kzalloc(sizeof *(ring), flags);
140 xhci_dbg(xhci, "Allocating ring at 0x%x\n", (unsigned int) ring);
141 if (!ring)
142 return 0;
143
144 if (num_segs == 0)
145 return ring;
146
147 ring->first_seg = xhci_segment_alloc(xhci, flags);
148 if (!ring->first_seg)
149 goto fail;
150 num_segs--;
151
152 prev = ring->first_seg;
153 while (num_segs > 0) {
154 struct xhci_segment *next;
155
156 next = xhci_segment_alloc(xhci, flags);
157 if (!next)
158 goto fail;
159 xhci_link_segments(xhci, prev, next, link_trbs);
160
161 prev = next;
162 num_segs--;
163 }
164 xhci_link_segments(xhci, prev, ring->first_seg, link_trbs);
165
166 if (link_trbs) {
167 /* See section 4.9.2.1 and 6.4.4.1 */
168 prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE);
169 xhci_dbg(xhci, "Wrote link toggle flag to"
170 " segment 0x%x (virtual), 0x%x (DMA)\n",
171 (unsigned int) prev, (u32) prev->dma);
172 }
173 /* The ring is empty, so the enqueue pointer == dequeue pointer */
174 ring->enqueue = ring->first_seg->trbs;
175 ring->dequeue = ring->enqueue;
176 /* The ring is initialized to 0. The producer must write 1 to the cycle
177 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
178 * compare CCS to the cycle bit to check ownership, so CCS = 1.
179 */
180 ring->cycle_state = 1;
181
182 return ring;
183
184fail:
185 xhci_ring_free(xhci, ring);
186 return 0;
187}
188
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700189void xhci_mem_cleanup(struct xhci_hcd *xhci)
190{
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700191 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
192 int size;
193
194 /* XXX: Free all the segments in the various rings */
195
196 /* Free the Event Ring Segment Table and the actual Event Ring */
197 xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
198 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
199 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]);
200 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
201 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]);
202 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
203 if (xhci->erst.entries)
204 pci_free_consistent(pdev, size,
205 xhci->erst.entries, xhci->erst.erst_dma_addr);
206 xhci->erst.entries = NULL;
207 xhci_dbg(xhci, "Freed ERST\n");
208 if (xhci->event_ring)
209 xhci_ring_free(xhci, xhci->event_ring);
210 xhci->event_ring = NULL;
211 xhci_dbg(xhci, "Freed event ring\n");
212
213 xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]);
214 xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]);
215 if (xhci->cmd_ring)
216 xhci_ring_free(xhci, xhci->cmd_ring);
217 xhci->cmd_ring = NULL;
218 xhci_dbg(xhci, "Freed command ring\n");
219 if (xhci->segment_pool)
220 dma_pool_destroy(xhci->segment_pool);
221 xhci->segment_pool = NULL;
222 xhci_dbg(xhci, "Freed segment pool\n");
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700223 xhci->page_size = 0;
224 xhci->page_shift = 0;
225}
226
227int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
228{
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700229 dma_addr_t dma;
230 struct device *dev = xhci_to_hcd(xhci)->self.controller;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700231 unsigned int val, val2;
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700232 struct xhci_segment *seg;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700233 u32 page_size;
234 int i;
235
236 page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
237 xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
238 for (i = 0; i < 16; i++) {
239 if ((0x1 & page_size) != 0)
240 break;
241 page_size = page_size >> 1;
242 }
243 if (i < 16)
244 xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
245 else
246 xhci_warn(xhci, "WARN: no supported page size\n");
247 /* Use 4K pages, since that's common and the minimum the HC supports */
248 xhci->page_shift = 12;
249 xhci->page_size = 1 << xhci->page_shift;
250 xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
251
252 /*
253 * Program the Number of Device Slots Enabled field in the CONFIG
254 * register with the max value of slots the HC can handle.
255 */
256 val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
257 xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
258 (unsigned int) val);
259 val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
260 val |= (val2 & ~HCS_SLOTS_MASK);
261 xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
262 (unsigned int) val);
263 xhci_writel(xhci, val, &xhci->op_regs->config_reg);
264
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700265 /*
266 * Initialize the ring segment pool. The ring must be a contiguous
267 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
268 * however, the command ring segment needs 64-byte aligned segments,
269 * so we pick the greater alignment need.
270 */
271 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
272 SEGMENT_SIZE, 64, xhci->page_size);
273 if (!xhci->segment_pool)
274 goto fail;
275
276 /* Set up the command ring to have one segments for now. */
277 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
278 if (!xhci->cmd_ring)
279 goto fail;
280 xhci_dbg(xhci, "Allocated command ring at 0x%x\n", (unsigned int) xhci->cmd_ring);
281 xhci_dbg(xhci, "First segment DMA is 0x%x\n", (unsigned int) xhci->cmd_ring->first_seg->dma);
282
283 /* Set the address in the Command Ring Control register */
284 val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
285 val = (val & ~CMD_RING_ADDR_MASK) |
286 (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) |
287 xhci->cmd_ring->cycle_state;
288 xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n");
289 xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]);
290 xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val);
291 xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]);
292 xhci_dbg_cmd_ptrs(xhci);
293
294 val = xhci_readl(xhci, &xhci->cap_regs->db_off);
295 val &= DBOFF_MASK;
296 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
297 " from cap regs base addr\n", val);
298 xhci->dba = (void *) xhci->cap_regs + val;
299 xhci_dbg_regs(xhci);
300 xhci_print_run_regs(xhci);
301 /* Set ir_set to interrupt register set 0 */
302 xhci->ir_set = (void *) xhci->run_regs->ir_set;
303
304 /*
305 * Event ring setup: Allocate a normal ring, but also setup
306 * the event ring segment table (ERST). Section 4.9.3.
307 */
308 xhci_dbg(xhci, "// Allocating event ring\n");
309 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
310 if (!xhci->event_ring)
311 goto fail;
312
313 xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev),
314 sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
315 if (!xhci->erst.entries)
316 goto fail;
317 xhci_dbg(xhci, "// Allocated event ring segment table at 0x%x\n", dma);
318
319 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
320 xhci->erst.num_entries = ERST_NUM_SEGS;
321 xhci->erst.erst_dma_addr = dma;
322 xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = 0x%x, dma addr = 0x%x\n",
323 xhci->erst.num_entries,
324 (unsigned int) xhci->erst.entries,
325 xhci->erst.erst_dma_addr);
326
327 /* set ring base address and size for each segment table entry */
328 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
329 struct xhci_erst_entry *entry = &xhci->erst.entries[val];
330 entry->seg_addr[1] = 0;
331 entry->seg_addr[0] = seg->dma;
332 entry->seg_size = TRBS_PER_SEGMENT;
333 entry->rsvd = 0;
334 seg = seg->next;
335 }
336
337 /* set ERST count with the number of entries in the segment table */
338 val = xhci_readl(xhci, &xhci->ir_set->erst_size);
339 val &= ERST_SIZE_MASK;
340 val |= ERST_NUM_SEGS;
341 xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
342 val);
343 xhci_writel(xhci, val, &xhci->ir_set->erst_size);
344
345 xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
346 /* set the segment table base address */
347 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%x\n",
348 xhci->erst.erst_dma_addr);
349 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
350 val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]);
351 val &= ERST_PTR_MASK;
352 val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK);
353 xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]);
354
355 /* Set the event ring dequeue address */
356 xhci_dbg(xhci, "// Set ERST dequeue address for ir_set 0 = 0x%x%x\n",
357 xhci->erst.entries[0].seg_addr[1], xhci->erst.entries[0].seg_addr[0]);
358 val = xhci_readl(xhci, &xhci->run_regs->ir_set[0].erst_dequeue[0]);
359 val &= ERST_PTR_MASK;
360 val |= (xhci->erst.entries[0].seg_addr[0] & ~ERST_PTR_MASK);
361 xhci_writel(xhci, val, &xhci->run_regs->ir_set[0].erst_dequeue[0]);
362 xhci_writel(xhci, xhci->erst.entries[0].seg_addr[1],
363 &xhci->run_regs->ir_set[0].erst_dequeue[1]);
364 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
365 xhci_print_ir_set(xhci, xhci->ir_set, 0);
366
367 /*
368 * XXX: Might need to set the Interrupter Moderation Register to
369 * something other than the default (~1ms minimum between interrupts).
370 * See section 5.5.1.2.
371 */
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700372
373 return 0;
374fail:
375 xhci_warn(xhci, "Couldn't initialize memory\n");
376 xhci_mem_cleanup(xhci);
377 return -ENOMEM;
378}