diff --git a/lib/rpmsg/rpmsg_core.c b/lib/rpmsg/rpmsg_core.c
index eff7e1ec4fbef4a70b6e7c65174de99b406e32b2..728645d381d9cb2754eaecc330fcae0cc46a0b52 100644 (file)
--- a/lib/rpmsg/rpmsg_core.c
+++ b/lib/rpmsg/rpmsg_core.c
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
* Copyright (c) 2015 Xilinx, Inc. All rights reserved.
- * Copyright (c) 2016 NXP, Inc. All rights reserved.
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
**************************************************************************/
+#include <string.h>
#include "openamp/rpmsg.h"
+#include "metal/utilities.h"
+#include "metal/io.h"
+#include "metal/cache.h"
+#include "metal/alloc.h"
+#include "metal/cpu.h"
/* Internal functions */
static void rpmsg_rx_callback(struct virtqueue *vq);
}
}
- status = rpmsg_rdev_notify(rdev);
+ if (rdev->role == RPMSG_MASTER) {
+ virt_dev->func->set_status(virt_dev,
+ VIRTIO_CONFIG_STATUS_DRIVER_OK);
+ status = rpmsg_rdev_notify(rdev);
+ }
+ if (status == RPMSG_SUCCESS)
+ rdev->state = RPMSG_DEV_STATE_ACTIVE;
return status;
}
unsigned long dst)
{
struct rpmsg_channel *rp_chnl;
- struct llist *node;
- rp_chnl = env_allocate_memory(sizeof(struct rpmsg_channel));
+ rp_chnl = metal_allocate_memory(sizeof(struct rpmsg_channel));
if (rp_chnl) {
- env_memset(rp_chnl, 0x00, sizeof(struct rpmsg_channel));
- env_strncpy(rp_chnl->name, name, sizeof(rp_chnl->name));
+ memset(rp_chnl, 0x00, sizeof(struct rpmsg_channel));
+ strncpy(rp_chnl->name, name, sizeof(rp_chnl->name));
rp_chnl->src = src;
rp_chnl->dst = dst;
rp_chnl->rdev = rdev;
/* Place channel on channels list */
- node = env_allocate_memory(sizeof(struct llist));
- if (!node) {
- env_free_memory(rp_chnl);
- return RPMSG_NULL;
- }
- node->data = rp_chnl;
- env_lock_mutex(rdev->lock);
- add_to_list(&rdev->rp_channels, node);
- env_unlock_mutex(rdev->lock);
+ metal_mutex_acquire(&rdev->lock);
+ metal_list_add_tail(&rdev->rp_channels, &rp_chnl->node);
+ metal_mutex_release(&rdev->lock);
}
return rp_chnl;
*/
void _rpmsg_delete_channel(struct rpmsg_channel *rp_chnl)
{
- struct llist *node;
if (rp_chnl) {
- env_lock_mutex(rp_chnl->rdev->lock);
- node =
- rpmsg_rdev_get_chnl_node_from_id(rp_chnl->rdev,
- rp_chnl->name);
- if (node) {
- remove_from_list(&rp_chnl->rdev->rp_channels, node);
- env_unlock_mutex(rp_chnl->rdev->lock);
- /* free node and rp_chnl */
- env_free_memory(node);
- env_free_memory(rp_chnl);
- } else {
- env_unlock_mutex(rp_chnl->rdev->lock);
- }
+ metal_mutex_acquire(&rp_chnl->rdev->lock);
+ metal_list_del(&rp_chnl->node);
+ metal_mutex_release(&rp_chnl->rdev->lock);
+ metal_free_memory(rp_chnl);
}
}
{
struct rpmsg_endpoint *rp_ept;
- struct llist *node;
int status = RPMSG_SUCCESS;
- rp_ept = env_allocate_memory(sizeof(struct rpmsg_endpoint));
+ rp_ept = metal_allocate_memory(sizeof(struct rpmsg_endpoint));
if (!rp_ept) {
return RPMSG_NULL;
}
- env_memset(rp_ept, 0x00, sizeof(struct rpmsg_endpoint));
-
- node = env_allocate_memory(sizeof(struct llist));
- if (!node) {
- env_free_memory(rp_ept);
- return RPMSG_NULL;
- }
+ memset(rp_ept, 0x00, sizeof(struct rpmsg_endpoint));
- env_lock_mutex(rdev->lock);
+ metal_mutex_acquire(&rdev->lock);
if (addr != RPMSG_ADDR_ANY) {
/*
/* Do cleanup in case of error and return */
if (RPMSG_SUCCESS != status) {
- env_free_memory(node);
- env_free_memory(rp_ept);
- env_unlock_mutex(rdev->lock);
+ metal_free_memory(rp_ept);
+ metal_mutex_release(&rdev->lock);
return RPMSG_NULL;
}
rp_ept->cb = cb;
rp_ept->priv = priv;
- node->data = rp_ept;
- add_to_list(&rdev->rp_endpoints, node);
+ metal_list_add_tail(&rdev->rp_endpoints, &rp_ept->node);
- env_unlock_mutex(rdev->lock);
+ metal_mutex_release(&rdev->lock);
return rp_ept;
}
void _destroy_endpoint(struct remote_device *rdev,
struct rpmsg_endpoint *rp_ept)
{
- struct llist *node;
- env_lock_mutex(rdev->lock);
- node = rpmsg_rdev_get_endpoint_from_addr(rdev, rp_ept->addr);
- if (node) {
- rpmsg_release_address(rdev->bitmap, RPMSG_ADDR_BMP_SIZE,
- rp_ept->addr);
- remove_from_list(&rdev->rp_endpoints, node);
- env_unlock_mutex(rdev->lock);
- /* free node and rp_ept */
- env_free_memory(node);
- env_free_memory(rp_ept);
- } else {
- env_unlock_mutex(rdev->lock);
- }
+ metal_mutex_acquire(&rdev->lock);
+ rpmsg_release_address(rdev->bitmap, RPMSG_ADDR_BMP_SIZE,
+ rp_ept->addr);
+ metal_list_del(&rp_ept->node);
+ metal_mutex_release(&rdev->lock);
+ /* free node and rp_ept */
+ metal_free_memory(rp_ept);
}
/**
* @param flags - Channel creation/deletion flags
*
*/
-void rpmsg_send_ns_message(struct remote_device *rdev,
+int rpmsg_send_ns_message(struct remote_device *rdev,
struct rpmsg_channel *rp_chnl, unsigned long flags)
{
unsigned short idx;
unsigned long len;
- env_lock_mutex(rdev->lock);
+ metal_mutex_acquire(&rdev->lock);
/* Get Tx buffer. */
rp_hdr = (struct rpmsg_hdr *)rpmsg_get_tx_buffer(rdev, &len, &idx);
if (!rp_hdr) {
- env_unlock_mutex(rdev->lock);
- return;
+ metal_mutex_release(&rdev->lock);
+ return -RPMSG_ERR_NO_BUFF;
}
/* Fill out name service data. */
rp_hdr->dst = RPMSG_NS_EPT_ADDR;
rp_hdr->len = sizeof(struct rpmsg_ns_msg);
ns_msg = (struct rpmsg_ns_msg *) RPMSG_LOCATE_DATA(rp_hdr);
- env_strncpy(ns_msg->name, rp_chnl->name, sizeof(rp_chnl->name));
+ strncpy(ns_msg->name, rp_chnl->name, sizeof(rp_chnl->name));
ns_msg->flags = flags;
ns_msg->addr = rp_chnl->src;
/* Notify the other side that it has data to process. */
virtqueue_kick(rdev->tvq);
- env_unlock_mutex(rdev->lock);
+ metal_mutex_release(&rdev->lock);
+ return RPMSG_SUCCESS;
}
/**
int rpmsg_enqueue_buffer(struct remote_device *rdev, void *buffer,
unsigned long len, unsigned short idx)
{
- struct llist node;
int status;
+ struct metal_sg sg;
+ struct metal_io_region *io;
- /* Initialize buffer node */
- node.data = buffer;
- node.attr = len;
- node.next = RPMSG_NULL;
- node.prev = RPMSG_NULL;
-
+ io = rdev->proc->sh_buff.io;
+ if (io) {
+ if (! (io->mem_flags & METAL_UNCACHED))
+ metal_cache_flush(buffer, (unsigned int)len);
+ }
if (rdev->role == RPMSG_REMOTE) {
- status = virtqueue_add_buffer(rdev->tvq, &node, 0, 1, buffer);
+ /* Initialize buffer node */
+ sg.virt = buffer;
+ sg.len = len;
+ sg.io = io;
+ status = virtqueue_add_buffer(rdev->tvq, &sg, 0, 1, buffer);
} else {
+ (void)sg;
status = virtqueue_add_consumed_buffer(rdev->tvq, idx, len);
}
void rpmsg_return_buffer(struct remote_device *rdev, void *buffer,
unsigned long len, unsigned short idx)
{
- struct llist node;
-
- /* Initialize buffer node */
- node.data = buffer;
- node.attr = len;
- node.next = RPMSG_NULL;
- node.prev = RPMSG_NULL;
+ struct metal_sg sg;
if (rdev->role == RPMSG_REMOTE) {
- virtqueue_add_buffer(rdev->rvq, &node, 0, 1, buffer);
+ /* Initialize buffer node */
+ sg.virt = buffer;
+ sg.len = len;
+ sg.io = rdev->proc->sh_buff.io;
+ virtqueue_add_buffer(rdev->rvq, &sg, 0, 1, buffer);
} else {
+ (void)sg;
virtqueue_add_consumed_buffer(rdev->rvq, idx, len);
}
}
void *data;
if (rdev->role == RPMSG_REMOTE) {
- data = virtqueue_get_buffer(rdev->tvq, (uint32_t *) len);
+ data = virtqueue_get_buffer(rdev->tvq, (uint32_t *) len, idx);
if (data == RPMSG_NULL) {
data = sh_mem_get_buffer(rdev->mem_pool);
*len = RPMSG_BUFFER_SIZE;
virtqueue_get_available_buffer(rdev->tvq, idx,
(uint32_t *) len);
}
- return ((void *)env_map_vatopa(data));
+ return data;
}
/**
void *data;
if (rdev->role == RPMSG_REMOTE) {
- data = virtqueue_get_buffer(rdev->rvq, (uint32_t *) len);
+ data = virtqueue_get_buffer(rdev->rvq, (uint32_t *) len, idx);
} else {
data =
virtqueue_get_available_buffer(rdev->rvq, idx,
(uint32_t *) len);
}
- return ((void *)env_map_vatopa(data));
+ if (data) {
+ struct metal_io_region *io;
+ io = rdev->proc->sh_buff.io;
+ if (io) {
+ if (! (io->mem_flags & METAL_UNCACHED))
+ metal_cache_invalidate(data,
+ (unsigned int)(*len));
+ }
+ }
+
+ return data;
}
/**
struct remote_device *rdev;
struct virtio_device *vdev;
struct rpmsg_channel *rp_chnl;
- struct llist *chnl_hd;
+ struct metal_list *node;
vdev = (struct virtio_device *)vq->vq_dev;
rdev = (struct remote_device *)vdev;
- chnl_hd = rdev->rp_channels;
/* Check if the remote device is master. */
if (rdev->role == RPMSG_MASTER) {
* b. It will update the channel state to active so that further communication
* can take place.
*/
- while (chnl_hd != RPMSG_NULL) {
- rp_chnl = (struct rpmsg_channel *)chnl_hd->data;
+ metal_list_for_each(&rdev->rp_channels, node) {
+ rp_chnl = metal_container_of(node,
+ struct rpmsg_channel, node);
if (rp_chnl->state == RPMSG_CHNL_STATE_IDLE) {
if (rdev->support_ns) {
- rp_chnl->state = RPMSG_CHNL_STATE_NS;
+ if (rpmsg_send_ns_message(rdev, rp_chnl,
+ RPMSG_NS_CREATE) ==
+ RPMSG_SUCCESS)
+ rp_chnl->state =
+ RPMSG_CHNL_STATE_NS;
} else {
rp_chnl->state =
RPMSG_CHNL_STATE_ACTIVE;
}
- if (rp_chnl->state == RPMSG_CHNL_STATE_NS) {
- rpmsg_send_ns_message(rdev, rp_chnl,
- RPMSG_NS_CREATE);
- }
}
- chnl_hd = chnl_hd->next;
}
}
}
struct rpmsg_channel *rp_chnl;
struct rpmsg_endpoint *rp_ept;
struct rpmsg_hdr *rp_hdr;
- struct llist *node;
+ struct rpmsg_hdr_reserved *reserved;
+ struct metal_list *node;
unsigned long len;
unsigned short idx;
- struct llist *chnl_hd;
vdev = (struct virtio_device *)vq->vq_dev;
rdev = (struct remote_device *)vdev;
- chnl_hd = rdev->rp_channels;
- if ((chnl_hd != RPMSG_NULL) && (rdev->role == RPMSG_MASTER)) {
- rp_chnl = (struct rpmsg_channel *)chnl_hd->data;
- if (rp_chnl->state == RPMSG_CHNL_STATE_IDLE) {
- if (rdev->support_ns) {
- rp_chnl->state = RPMSG_CHNL_STATE_NS;
- rpmsg_send_ns_message(rdev, rp_chnl,
- RPMSG_NS_CREATE);
- } else {
- rp_chnl->state = RPMSG_CHNL_STATE_ACTIVE;
+ if (rdev->role == RPMSG_MASTER) {
+ metal_list_for_each(&rdev->rp_channels, node) {
+ rp_chnl = metal_container_of(node,
+ struct rpmsg_channel, node);
+ if (rp_chnl->state == RPMSG_CHNL_STATE_IDLE) {
+ if (rdev->support_ns) {
+ if (rpmsg_send_ns_message(rdev, rp_chnl,
+ RPMSG_NS_CREATE) ==
+ RPMSG_SUCCESS)
+ rp_chnl->state =
+ RPMSG_CHNL_STATE_NS;
+ } else {
+ rp_chnl->state = RPMSG_CHNL_STATE_ACTIVE;
+ }
+ return;
}
- return;
}
}
- env_lock_mutex(rdev->lock);
+ metal_mutex_acquire(&rdev->lock);
/* Process the received data from remote node */
rp_hdr = (struct rpmsg_hdr *)rpmsg_get_rx_buffer(rdev, &len, &idx);
- env_unlock_mutex(rdev->lock);
+ metal_mutex_release(&rdev->lock);
while (rp_hdr) {
/* Get the channel node from the remote device channels list. */
- env_lock_mutex(rdev->lock);
- node = rpmsg_rdev_get_endpoint_from_addr(rdev, rp_hdr->dst);
- env_unlock_mutex(rdev->lock);
+ metal_mutex_acquire(&rdev->lock);
+ rp_ept = rpmsg_rdev_get_endpoint_from_addr(rdev, rp_hdr->dst);
+ metal_mutex_release(&rdev->lock);
- if (!node)
+ if (!rp_ept)
/* Fatal error no endpoint for the given dst addr. */
return;
- rp_ept = (struct rpmsg_endpoint *)node->data;
-
rp_chnl = rp_ept->rp_chnl;
if ((rp_chnl) && (rp_chnl->state == RPMSG_CHNL_STATE_NS)) {
rp_ept->priv, rp_hdr->src);
}
- env_lock_mutex(rdev->lock);
+ metal_mutex_acquire(&rdev->lock);
- /* Return used buffers. */
- rpmsg_return_buffer(rdev, rp_hdr, len, idx);
+ /* Check whether callback wants to hold buffer */
+ if (rp_hdr->reserved & RPMSG_BUF_HELD)
+ {
+ /* 'rp_hdr->reserved' field is now used as storage for
+ * 'idx' to release buffer later */
+ reserved = (struct rpmsg_hdr_reserved*)&rp_hdr->reserved;
+ reserved->idx = (uint16_t)idx;
+ } else {
+ /* Return used buffers. */
+ rpmsg_return_buffer(rdev, rp_hdr, len, idx);
+ }
rp_hdr =
(struct rpmsg_hdr *)rpmsg_get_rx_buffer(rdev, &len, &idx);
- env_unlock_mutex(rdev->lock);
+ metal_mutex_release(&rdev->lock);
}
}
struct remote_device *rdev;
struct rpmsg_channel *rp_chnl;
struct rpmsg_ns_msg *ns_msg;
- struct llist *node;
(void)server_chnl;
(void)src;
@@ -665,11 +668,10 @@ void rpmsg_ns_callback(struct rpmsg_channel *server_chnl, void *data, int len,
ns_msg->name[len - 1] = '\0';
if (ns_msg->flags & RPMSG_NS_DESTROY) {
- env_lock_mutex(rdev->lock);
- node = rpmsg_rdev_get_chnl_node_from_id(rdev, ns_msg->name);
- env_unlock_mutex(rdev->lock);
- if (node) {
- rp_chnl = (struct rpmsg_channel *)node->data;
+ metal_mutex_acquire(&rdev->lock);
+ rp_chnl = rpmsg_rdev_get_chnl_from_id(rdev, ns_msg->name);
+ metal_mutex_release(&rdev->lock);
+ if (rp_chnl) {
if (rdev->channel_destroyed) {
rdev->channel_destroyed(rp_chnl);
}