summaryrefslogtreecommitdiff
path: root/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c')
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c32
1 files changed, 14 insertions, 18 deletions
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
index 51e5b04ff0f5..daa2b9656552 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
@@ -162,7 +162,8 @@ struct vchiq_mmal_instance {
void *bulk_scratch;
struct idr context_map;
- spinlock_t context_map_lock;
+ /* protect accesses to context_map */
+ struct mutex context_map_lock;
/* component to use next */
int component_idx;
@@ -185,10 +186,10 @@ get_msg_context(struct vchiq_mmal_instance *instance)
* that when we service the VCHI reply, we can look up what
* message is being replied to.
*/
- spin_lock(&instance->context_map_lock);
+ mutex_lock(&instance->context_map_lock);
handle = idr_alloc(&instance->context_map, msg_context,
0, 0, GFP_KERNEL);
- spin_unlock(&instance->context_map_lock);
+ mutex_unlock(&instance->context_map_lock);
if (handle < 0) {
kfree(msg_context);
@@ -212,9 +213,9 @@ release_msg_context(struct mmal_msg_context *msg_context)
{
struct vchiq_mmal_instance *instance = msg_context->instance;
- spin_lock(&instance->context_map_lock);
+ mutex_lock(&instance->context_map_lock);
idr_remove(&instance->context_map, msg_context->handle);
- spin_unlock(&instance->context_map_lock);
+ mutex_unlock(&instance->context_map_lock);
kfree(msg_context);
}
@@ -240,6 +241,8 @@ static void buffer_work_cb(struct work_struct *work)
struct mmal_msg_context *msg_context =
container_of(work, struct mmal_msg_context, u.bulk.work);
+ atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu);
+
msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
msg_context->u.bulk.port,
msg_context->u.bulk.status,
@@ -288,8 +291,6 @@ static int bulk_receive(struct vchiq_mmal_instance *instance,
/* store length */
msg_context->u.bulk.buffer_used = rd_len;
- msg_context->u.bulk.mmal_flags =
- msg->u.buffer_from_host.buffer_header.flags;
msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
@@ -380,6 +381,8 @@ buffer_from_host(struct vchiq_mmal_instance *instance,
/* initialise work structure ready to schedule callback */
INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
+ atomic_inc(&port->buffers_with_vpu);
+
/* prep the buffer from host message */
memset(&m, 0xbc, sizeof(m)); /* just to make debug clearer */
@@ -448,6 +451,9 @@ static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
return;
}
+ msg_context->u.bulk.mmal_flags =
+ msg->u.buffer_from_host.buffer_header.flags;
+
if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
/* message reception had an error */
pr_warn("error %d in reply\n", msg->h.status);
@@ -1324,16 +1330,6 @@ static int port_enable(struct vchiq_mmal_instance *instance,
if (port->enabled)
return 0;
- /* ensure there are enough buffers queued to cover the buffer headers */
- if (port->buffer_cb) {
- hdr_count = 0;
- list_for_each(buf_head, &port->buffers) {
- hdr_count++;
- }
- if (hdr_count < port->current_buffer.num)
- return -ENOSPC;
- }
-
ret = port_action_port(instance, port,
MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
if (ret)
@@ -1854,7 +1850,7 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
instance->bulk_scratch = vmalloc(PAGE_SIZE);
- spin_lock_init(&instance->context_map_lock);
+ mutex_init(&instance->context_map_lock);
idr_init_base(&instance->context_map, 1);
params.callback_param = instance;