aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNishanth Menon2015-10-28 11:42:34 -0500
committerTero Kristo2015-10-30 04:50:36 -0500
commit96671d9f698b048c9998ccdc9500b908143f8134 (patch)
tree99c34b95db5fbfc600db692928712eea78223463 /drivers/firmware/ti_sci.c
parentc72325aa6a7023d92f0a4685ca1dbd38a28b7ef8 (diff)
downloadti-linux-kernel-96671d9f698b048c9998ccdc9500b908143f8134.tar.gz
ti-linux-kernel-96671d9f698b048c9998ccdc9500b908143f8134.tar.xz
ti-linux-kernel-96671d9f698b048c9998ccdc9500b908143f8134.zip
firmware: Add support for TI System Control Interface (TI-SCI) protocol
Texas Instrument's System Control Interface (TI-SCI) Message Protocol is used in Texas Instrument's System on Chip (SoC) such as those in keystone family K2G SoC to communicate between various compute processors with a central system controller entity. SCI message protocol provides support for management of various hardware entitites within the SoC. Add support driver to allow communication with system controller entity within the SoC using the mailbox client. We introduce the basic registration and query capability for the driver protocol as part of this change. Subsequent patches add in functionality specific to the TI-SCI features. Signed-off-by: Nishanth Menon <nm@ti.com>
Diffstat (limited to 'drivers/firmware/ti_sci.c')
-rw-r--r--drivers/firmware/ti_sci.c790
1 files changed, 790 insertions, 0 deletions
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
new file mode 100644
index 000000000000..f874d9c8c611
--- /dev/null
+++ b/drivers/firmware/ti_sci.c
@@ -0,0 +1,790 @@
1/*
2 * Texas Instruments System Control Interface Protocol Driver
3 *
4 * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
5 * Nishanth Menon
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#define pr_fmt(fmt) "%s: " fmt, __func__
18
19#include <linux/bitmap.h>
20#include <linux/debugfs.h>
21#include <linux/export.h>
22#include <linux/io.h>
23#include <linux/kernel.h>
24#include <linux/mailbox_client.h>
25#include <linux/module.h>
26#include <linux/of_device.h>
27#include <linux/semaphore.h>
28#include <linux/slab.h>
29#include <linux/ti-msgmgr.h>
30#include <linux/ti_sci_protocol.h>
31
32#include "ti_sci.h"
33
34/* List of all TI SCI devices active in system */
35static LIST_HEAD(ti_sci_list);
36/* Protection for the entire list */
37static DEFINE_MUTEX(ti_sci_list_mutex);
38
39/**
40 * struct ti_sci_xfer - Structure representing a message flow
41 * @tx_message: Transmit message
42 * @rx_len: Receive message length
43 * @xfer_buf: Preallocated buffer to store receive message
44 * Since we work with request-ACK protocol, we can
45 * reuse the same buffer for the rx path as we
46 * use for the tx path.
47 * @done: completion event
48 */
49struct ti_sci_xfer {
50 struct ti_msgmgr_message tx_message;
51 u8 rx_len;
52 u8 *xfer_buf;
53 struct completion done;
54};
55
56/**
57 * struct ti_sci_xfers_info - Structure to manage transfer information
58 * @sem_xfer_count: Counting Semaphore for managing max simultaneous
59 * Messages.
60 * @xfer_block: Preallocated Message array
61 * @xfer_alloc_table: Bitmap table for allocated messages.
62 * Index of this bitmap table is also used for message
63 * sequence identifier.
64 * @xfer_lock: Protection for message allocation
65 */
66struct ti_sci_xfers_info {
67 struct semaphore sem_xfer_count;
68 struct ti_sci_xfer *xfer_block;
69 unsigned long *xfer_alloc_table;
70 /* protect transfer allocation */
71 spinlock_t xfer_lock;
72};
73
74/**
75 * struct ti_sci_desc - Description of SoC integration
76 * @host_id: Host identifier representing the compute entity
77 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
78 * @max_msgs: Maximum number of messages that can be pending
79 * simultaneously in the system
80 * @max_msg_size: Maximum size of data per message that can be handled.
81 */
82struct ti_sci_desc {
83 u8 host_id;
84 int max_rx_timeout_ms;
85 int max_msgs;
86 int max_msg_size;
87};
88
89/**
90 * struct ti_sci_info - Structure representing a TI SCI instance
91 * @dev: Device pointer
92 * @desc: SoC description for this instance
93 * @d: Debugfs file entry
94 * @debug_region: Memory region where the debug message are available
95 * @debug_region_size: Debug region size
96 * @debug_buffer: Buffer allocated to copy debug messages.
97 * @handle: Instance of TI SCI handle to send to clients.
98 * @cl: Mailbox Client
99 * @chan_tx: Transmit mailbox channel
100 * @chan_rx: Receive mailbox channel
101 * @minfo: Message info
102 * @node: list head
103 * @users: Number of users of this instance
104 */
105struct ti_sci_info {
106 struct device *dev;
107 const struct ti_sci_desc *desc;
108 struct dentry *d;
109 void __iomem *debug_region;
110 char *debug_buffer;
111 size_t debug_region_size;
112 struct ti_sci_handle handle;
113 struct mbox_client cl;
114 struct mbox_chan *chan_tx;
115 struct mbox_chan *chan_rx;
116 struct ti_sci_xfers_info minfo;
117 struct list_head node;
118 /* protected by ti_sci_list_mutex */
119 int users;
120};
121
122#define cl_to_ti_sci_info(cl) container_of(cl, struct ti_sci_info, cl)
123#define handle_to_ti_sci_info(handle) container_of(handle, struct ti_sci_info,\
124 handle)
125#ifdef CONFIG_DEBUG_FS
126
127/**
128 * ti_sci_debug_show() - Helper to dump the debug log
129 * @s: sequence file pointer
130 * @unused: unused.
131 *
132 * Return: 0
133 */
134static int ti_sci_debug_show(struct seq_file *s, void *unused)
135{
136 struct ti_sci_info *info = s->private;
137
138 memcpy_fromio(info->debug_buffer, info->debug_region,
139 info->debug_region_size);
140 /*
141 * XXX:
142 * 1. Can we trust firmware to leave NULL terminated last byte??
143 * 2. What do we do when log rolls over - how do we detect that and
144 * provide messages in the right order??
145 * TOBEFIXED: rewrite code as per final debug strategy.
146 */
147 seq_puts(s, info->debug_buffer);
148 return 0;
149}
150
151/**
152 * ti_sci_debug_open() - debug file open
153 * @inode: inode pointer
154 * @file: file pointer
155 *
156 * Return: result of single_open
157 */
158static int ti_sci_debug_open(struct inode *inode, struct file *file)
159{
160 return single_open(file, ti_sci_debug_show, inode->i_private);
161}
162
163/* log file operations */
164static const struct file_operations ti_sci_debug_fops = {
165 .open = ti_sci_debug_open,
166 .read = seq_read,
167 .llseek = seq_lseek,
168 .release = single_release,
169};
170
171/**
172 * ti_sci_debugfs_create() - Create log debug file
173 * @pdev: platform device pointer
174 * @info: Pointer to SCI entity information
175 *
176 * Return: 0 if all went fine, else corresponding error.
177 */
178static int ti_sci_debugfs_create(struct platform_device *pdev,
179 struct ti_sci_info *info)
180{
181 struct device *dev = &pdev->dev;
182 struct resource *res;
183 char debug_name[50] = "ti_sci_debug@";
184
185 /* Debug region is optional */
186 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
187 "debug_messages");
188 info->debug_region = devm_ioremap_resource(dev, res);
189 if (IS_ERR(info->debug_region))
190 return 0;
191 info->debug_region_size = res->end - res->start;
192
193 info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
194 sizeof(char), GFP_KERNEL);
195 if (!info->debug_buffer)
196 return -ENOMEM;
197 /* Setup NULL termination */
198 info->debug_buffer[info->debug_region_size] = 0;
199
200 info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
201 sizeof(debug_name)),
202 S_IRUGO, NULL, info, &ti_sci_debug_fops);
203 if (IS_ERR(info->d))
204 return PTR_ERR(info->d);
205
206 dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
207 info->debug_region, info->debug_region_size, res);
208 return 0;
209}
210
211/**
212 * ti_sci_debugfs_destroy() - clean up log debug file
213 * @pdev: platform device pointer
214 * @info: Pointer to SCI entity information
215 */
216static void ti_sci_debugfs_destroy(struct platform_device *pdev,
217 struct ti_sci_info *info)
218{
219 if (IS_ERR(info->debug_region))
220 return;
221
222 debugfs_remove(info->d);
223}
224#else /* CONFIG_DEBUG_FS */
225static inline int ti_sci_debugfs_create(struct platform_device *dev,
226 struct ti_sci_info *info)
227{
228 return 0;
229}
230
231static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
232 struct ti_sci_info *info)
233{
234}
235#endif /* CONFIG_DEBUG_FS */
236
237/**
238 * ti_sci_dump_header_dbg() - Helper to dump a message header.
239 * @dev: Device pointer corresponding to the SCI entity
240 * @hdr: pointer to header.
241 */
242static inline void ti_sci_dump_header_dbg(struct device *dev,
243 struct ti_sci_msg_hdr *hdr)
244{
245 dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
246 hdr->type, hdr->host, hdr->seq, hdr->flags);
247}
248
249/**
250 * ti_sci_rx_callback() - mailbox client callback for receive messages
251 * @cl: client pointer
252 * @m: mailbox message
253 *
254 * Processes one received message to appropriate transfer information and
255 * signals completion of the transfer.
256 *
257 * NOTE: This function will be invoked in IRQ context, hence should be
258 * as optimal as possible.
259 */
260static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
261{
262 struct ti_sci_info *info = cl_to_ti_sci_info(cl);
263 struct device *dev = info->dev;
264 struct ti_sci_xfers_info *minfo = &info->minfo;
265 struct ti_msgmgr_message *mbox_msg = m;
266 struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
267 struct ti_sci_xfer *xfer;
268 u8 xfer_id;
269
270 xfer_id = hdr->seq;
271
272 /*
273 * Are we even expecting this?
274 * NOTE: barriers were implicit in locks used for modifying the bitmap
275 */
276 if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
277 dev_err(dev, "Message for %d is not expected!\n", xfer_id);
278 return;
279 }
280
281 xfer = &minfo->xfer_block[xfer_id];
282
283 /* Is the message of valid length? */
284 if (mbox_msg->len > info->desc->max_msg_size) {
285 dev_err(dev, "Unable to handle %d xfer(max %d)\n",
286 mbox_msg->len, info->desc->max_msg_size);
287 ti_sci_dump_header_dbg(dev, hdr);
288 return;
289 }
290 if (mbox_msg->len < xfer->rx_len) {
291 dev_err(dev, "Recv xfer %d < expected %d length\n",
292 mbox_msg->len, xfer->rx_len);
293 ti_sci_dump_header_dbg(dev, hdr);
294 return;
295 }
296
297 ti_sci_dump_header_dbg(dev, hdr);
298 /* Take a copy to the rx buffer.. */
299 memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
300 complete(&xfer->done);
301}
302
303/**
304 * ti_sci_get_one_xfer() - Allocate one message
305 * @info: Pointer to SCI entity information
306 * @msg_type: Message type
307 * @msg_flags: Flag to set for the message
308 * @tx_message_size: transmit message size
309 * @rx_message_size: receive message size
310 *
311 * Helper function which is used by various command functions that are
312 * exposed to clients of this driver for allocating a message traffic event.
313 *
314 * This function can sleep depending on pending requests already in the system
315 * for the SCI entity. Further, this also holds a spinlock to maintain integrity
316 * of internal data structures.
317 *
318 * Return: 0 if all went fine, else corresponding error.
319 */
320static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
321 u16 msg_type, u32 msg_flags,
322 size_t tx_message_size,
323 size_t rx_message_size)
324{
325 struct ti_sci_xfers_info *minfo = &info->minfo;
326 struct ti_sci_xfer *xfer;
327 struct ti_sci_msg_hdr *hdr;
328 unsigned long flags;
329 unsigned long bit_pos;
330 u8 xfer_id;
331 int ret;
332 int timeout;
333
334 /* Ensure we have sane transfer sizes */
335 if (rx_message_size > info->desc->max_msg_size ||
336 tx_message_size > info->desc->max_msg_size ||
337 rx_message_size < sizeof(*hdr) || rx_message_size < sizeof(*hdr))
338 return ERR_PTR(-ERANGE);
339
340 /*
341 * Ensure we have only controlled number of pending messages.
342 * Ideally, we might just have to wait a single message, be
343 * conservative and wait 5 times that..
344 */
345 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
346 ret = down_timeout(&minfo->sem_xfer_count, timeout);
347 if (ret < 0)
348 return ERR_PTR(ret);
349
350 /* Keep the locked section as small as possible */
351 spin_lock_irqsave(&minfo->xfer_lock, flags);
352 bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
353 info->desc->max_msgs);
354 set_bit(bit_pos, minfo->xfer_alloc_table);
355 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
356
357 /*
358 * We already ensured in probe that we can have max messages that can
359 * fit in hdr.seq - NOTE: this improves access latencies
360 * to predictable O(1) access, BUT, it opens us to risk if
361 * remote misbehaves with corrupted message sequence responses.
362 * If that happens, we are going to be messed up anyways..
363 */
364 xfer_id = (u8)bit_pos;
365
366 xfer = &minfo->xfer_block[xfer_id];
367
368 hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
369 xfer->tx_message.len = tx_message_size;
370 xfer->rx_len = (u8)rx_message_size;
371
372 reinit_completion(&xfer->done);
373
374 hdr->seq = xfer_id;
375 hdr->type = msg_type;
376 hdr->host = info->desc->host_id;
377 hdr->flags = msg_flags;
378
379 return xfer;
380}
381
382/**
383 * ti_sci_put_one_xfer() - Release a message
384 * @minfo: transfer info pointer
385 * @xfer: message that was reserved by ti_sci_get_one_xfer
386 *
387 * This holds a spinlock to maintain integrity of internal data structures.
388 */
389static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
390 struct ti_sci_xfer *xfer)
391{
392 unsigned long flags;
393 struct ti_sci_msg_hdr *hdr;
394 u8 xfer_id;
395
396 hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
397 xfer_id = hdr->seq;
398
399 /*
400 * Keep the locked section as small as possible
401 * NOTE: we might escape with smp_mb and no lock here..
402 * but just be conservative and symmetric.
403 * */
404 spin_lock_irqsave(&minfo->xfer_lock, flags);
405 clear_bit(xfer_id, minfo->xfer_alloc_table);
406 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
407
408 /* Increment the count for the next user to get through */
409 up(&minfo->sem_xfer_count);
410}
411
412/**
413 * ti_sci_do_xfer() - Do one transfer
414 * @info: Pointer to SCI entity information
415 * @xfer: Transfer to initiate and wait for response
416 *
417 * Return: -ETIMEDOUT in case of no response, if transmit error,
418 * return corresponding error, else if all goes well,
419 * return 0.
420 */
421static inline int ti_sci_do_xfer(struct ti_sci_info *info,
422 struct ti_sci_xfer *xfer)
423{
424 int ret;
425 int timeout;
426 struct device *dev = info->dev;
427
428 ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
429 if (ret < 0)
430 return ret;
431
432 /*
433 * NOTE: we don't need the mailbox ticker to manage the transfer
434 * queueing since the protocol layer queues things by itself. So,
435 * kick it once we are done with current transmit. This forces
436 * the mailbox framework to submit next message allowing for
437 * transmission of next message to occur in parallel to processing
438 * in TISCI entity and subsequent response of the current message.
439 */
440 mbox_client_txdone(info->chan_tx, 0);
441 ret = 0;
442
443 /* And we wait for the response. */
444 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
445 if (!wait_for_completion_timeout(&xfer->done, timeout)) {
446 dev_err(dev, "Mbox timedout in resp(caller: %pF)\n",
447 (void *)_RET_IP_);
448 ret = -ETIMEDOUT;
449 }
450
451 return ret;
452}
453
454/**
455 * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
456 * @info: Pointer to SCI entity information
457 *
458 * Updates the SCI information in the internal data structure.
459 *
460 * Return: 0 if all went fine, else return appropriate error.
461 */
462static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
463{
464 struct device *dev = info->dev;
465 struct ti_sci_handle *handle = &info->handle;
466 struct ti_sci_version_info *ver = &handle->version;
467 struct ti_sci_msg_resp_version *rev_info;
468 struct ti_sci_xfer *xfer;
469 int ret;
470
471 /* No need to setup flags since it is expected to respond */
472 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
473 0x0, sizeof(struct ti_sci_msg_hdr),
474 sizeof(*rev_info));
475 if (IS_ERR(xfer)) {
476 ret = PTR_ERR(xfer);
477 dev_err(dev, "Message alloc failed(%d)\n", ret);
478 return ret;
479 }
480
481 rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
482
483 ret = ti_sci_do_xfer(info, xfer);
484 if (ret) {
485 dev_err(dev, "Mbox send fail %d\n", ret);
486 goto fail;
487 }
488
489 ver->abi_major = rev_info->abi_major;
490 ver->abi_minor = rev_info->abi_minor;
491 ver->firmware_revision = rev_info->firmware_revision;
492 strncpy(ver->firmware_description, rev_info->firmware_description,
493 sizeof(ver->firmware_description));
494
495fail:
496 ti_sci_put_one_xfer(&info->minfo, xfer);
497 return ret;
498}
499
500/**
501 * ti_sci_get_handle() - Get the TI SCI handle for a device
502 * @dev: Pointer to device for which we want SCI handle
503 *
504 * NOTE: The function does not track individual clients of the framework
505 * and is expected to be maintained by caller of TI SCI protocol library.
506 * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
507 * Return: pointer to handle if successful, else:
508 * -EPROBE_DEFER if the instance is not ready
509 * -ENODEV if the required node handler is missing
510 * -EINVAL if invalid conditions are encountered.
511 */
512const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
513{
514 struct device_node *np;
515 struct device_node *ti_sci_np;
516 struct list_head *p;
517 struct ti_sci_handle *handle = NULL;
518 struct ti_sci_info *info;
519
520 if (!dev) {
521 pr_err("I need a device pointer\n");
522 return ERR_PTR(-EINVAL);
523 }
524 np = dev->of_node;
525 if (!np) {
526 dev_err(dev, "No OF information\n");
527 return ERR_PTR(-EINVAL);
528 }
529
530 ti_sci_np = of_parse_phandle(np, "ti,sci", 0);
531 if (!ti_sci_np) {
532 dev_err(dev, "Needs a 'ti,sci' phandle\n");
533 return ERR_PTR(-ENODEV);
534 }
535
536 mutex_lock(&ti_sci_list_mutex);
537 list_for_each(p, &ti_sci_list) {
538 info = list_entry(p, struct ti_sci_info, node);
539 if (ti_sci_np == info->dev->of_node) {
540 handle = &info->handle;
541 info->users++;
542 break;
543 }
544 }
545 mutex_unlock(&ti_sci_list_mutex);
546 of_node_put(ti_sci_np);
547
548 if (!handle)
549 return ERR_PTR(-EPROBE_DEFER);
550
551 return handle;
552}
553EXPORT_SYMBOL_GPL(ti_sci_get_handle);
554
555/**
556 * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
557 * @handle: Handle acquired by ti_sci_get_handle
558 *
559 * NOTE: The function does not track individual clients of the framework
560 * and is expected to be maintained by caller of TI SCI protocol library.
561 * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
562 *
563 * Return: 0 is successfully released
564 * if an error pointer was passed, it returns the error value back,
565 * if null was passed, it returns -EINVAL;
566 */
567int ti_sci_put_handle(const struct ti_sci_handle *handle)
568{
569 struct ti_sci_info *info;
570
571 if (IS_ERR(handle))
572 return PTR_ERR(handle);
573 if (!handle)
574 return -EINVAL;
575
576 info = handle_to_ti_sci_info(handle);
577 mutex_lock(&ti_sci_list_mutex);
578 if (!WARN_ON(!info->users))
579 info->users--;
580 mutex_unlock(&ti_sci_list_mutex);
581
582 return 0;
583}
584EXPORT_SYMBOL_GPL(ti_sci_put_handle);
585
586static void devm_ti_sci_release(struct device *dev, void *res)
587{
588 const struct ti_sci_handle **ptr = res;
589 const struct ti_sci_handle *handle = *ptr;
590 int ret;
591
592 ret = ti_sci_put_handle(handle);
593 if (ret)
594 dev_err(dev, "failed to put handle %d\n", ret);
595}
596
597/**
598 * devm_ti_sci_get_handle() - Managed get handle
599 * @dev: device for which we want SCI handle for.
600 *
601 * NOTE: This releases the handle once the device resources are
602 * no longer needed. MUST NOT BE released with ti_sci_put_handle.
603 * The function does not track individual clients of the framework
604 * and is expected to be maintained by caller of TI SCI protocol library.
605 *
606 * Return: 0 if all went fine, else corresponding error.
607 */
608const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
609{
610 const struct ti_sci_handle **ptr;
611 const struct ti_sci_handle *handle;
612
613 ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
614 if (!ptr)
615 return ERR_PTR(-ENOMEM);
616 handle = ti_sci_get_handle(dev);
617
618 if (!IS_ERR(handle)) {
619 *ptr = handle;
620 devres_add(dev, ptr);
621 } else {
622 devres_free(ptr);
623 }
624
625 return handle;
626}
627EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
628
629/* Description for K2G */
630static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
631 .host_id = 2,
632 .max_rx_timeout_ms = 200,
633 .max_msgs = 128,
634 .max_msg_size = 64,
635};
636
637static const struct of_device_id ti_sci_of_match[] = {
638 {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
639 { /* Sentinel */ },
640};
641MODULE_DEVICE_TABLE(of, ti_sci_of_match);
642
643static int ti_sci_probe(struct platform_device *pdev)
644{
645 struct device *dev = &pdev->dev;
646 const struct of_device_id *of_id;
647 const struct ti_sci_desc *desc;
648 struct ti_sci_xfer *xfer;
649 struct ti_sci_info *info = NULL;
650 struct ti_sci_xfers_info *minfo;
651 struct mbox_client *cl;
652 int ret = -EINVAL;
653 int i;
654
655 of_id = of_match_device(ti_sci_of_match, dev);
656 if (!of_id) {
657 dev_err(dev, "OF data missing\n");
658 return -EINVAL;
659 }
660 desc = of_id->data;
661
662 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
663 if (!info)
664 return -ENOMEM;
665
666 info->dev = dev;
667 info->desc = desc;
668 INIT_LIST_HEAD(&info->node);
669 minfo = &info->minfo;
670
671 /*
672 * Pre-allocate messages
673 * NEVER allocate more than what we can indicate in hdr.seq
674 * if we have data description bug, force a fix..
675 */
676 if (WARN_ON(desc->max_msgs >=
677 1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
678 return -EINVAL;
679
680 minfo->xfer_block = devm_kcalloc(dev,
681 desc->max_msgs,
682 sizeof(*minfo->xfer_block),
683 GFP_KERNEL);
684 if (!minfo->xfer_block)
685 return -ENOMEM;
686
687 minfo->xfer_alloc_table = devm_kzalloc(dev,
688 BITS_TO_LONGS(desc->max_msgs)
689 * sizeof(unsigned long),
690 GFP_KERNEL);
691 if (!minfo->xfer_alloc_table)
692 return -ENOMEM;
693 bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs);
694
695 /* Pre-initialize the buffer pointer to pre-allocated buffers */
696 for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
697 xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
698 GFP_KERNEL);
699 if (!xfer->xfer_buf)
700 return -ENOMEM;
701
702 xfer->tx_message.buf = xfer->xfer_buf;
703 init_completion(&xfer->done);
704 }
705
706 ret = ti_sci_debugfs_create(pdev, info);
707 if (ret)
708 dev_warn(dev, "Failed to create debug file\n");
709
710 platform_set_drvdata(pdev, info);
711
712 cl = &info->cl;
713 cl->dev = dev;
714 cl->tx_block = false;
715 cl->rx_callback = ti_sci_rx_callback;
716 cl->knows_txdone = true;
717
718 spin_lock_init(&minfo->xfer_lock);
719 sema_init(&minfo->sem_xfer_count, desc->max_msgs);
720
721 info->chan_rx = mbox_request_channel_byname(cl, "rx");
722 if (IS_ERR(info->chan_rx)) {
723 ret = PTR_ERR(info->chan_rx);
724 goto out;
725 }
726
727 info->chan_tx = mbox_request_channel_byname(cl, "tx");
728 if (IS_ERR(info->chan_tx)) {
729 ret = PTR_ERR(info->chan_tx);
730 goto out;
731 }
732 ret = ti_sci_cmd_get_revision(info);
733 if (ret) {
734 dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
735 goto out;
736 }
737
738 dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
739 info->handle.version.abi_major, info->handle.version.abi_minor,
740 info->handle.version.firmware_revision,
741 info->handle.version.firmware_description);
742
743 mutex_lock(&ti_sci_list_mutex);
744 list_add_tail(&info->node, &ti_sci_list);
745 mutex_unlock(&ti_sci_list_mutex);
746
747 return 0;
748out:
749 if (!IS_ERR(info->chan_tx))
750 mbox_free_channel(info->chan_tx);
751 if (!IS_ERR(info->chan_rx))
752 mbox_free_channel(info->chan_rx);
753 debugfs_remove(info->d);
754 return ret;
755}
756
757static int ti_sci_remove(struct platform_device *pdev)
758{
759 struct ti_sci_info *info;
760 int ret = 0;
761
762 info = platform_get_drvdata(pdev);
763
764 mutex_lock(&ti_sci_list_mutex);
765 if (info->users)
766 ret = -EBUSY;
767 else
768 list_del(&info->node);
769 mutex_unlock(&ti_sci_list_mutex);
770
771 if (!ret)
772 ti_sci_debugfs_destroy(pdev, info);
773
774 return ret;
775}
776
777static struct platform_driver ti_sci_driver = {
778 .probe = ti_sci_probe,
779 .remove = ti_sci_remove,
780 .driver = {
781 .name = "ti-sci",
782 .of_match_table = of_match_ptr(ti_sci_of_match),
783 },
784};
785module_platform_driver(ti_sci_driver);
786
787MODULE_LICENSE("GPL v2");
788MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
789MODULE_AUTHOR("Nishanth Menon");
790MODULE_ALIAS("platform:ti-sci");