2014-10-01 20:54:14 -06:00
|
|
|
/*
|
|
|
|
* Greybus connections
|
|
|
|
*
|
|
|
|
* Copyright 2014 Google Inc.
|
2014-12-12 11:08:42 -07:00
|
|
|
* Copyright 2014 Linaro Ltd.
|
2014-10-01 20:54:14 -06:00
|
|
|
*
|
|
|
|
* Released under the GPLv2 only.
|
|
|
|
*/
|
|
|
|
|
2015-07-23 02:50:02 -06:00
|
|
|
#include <linux/workqueue.h>
|
|
|
|
|
2014-10-01 20:54:14 -06:00
|
|
|
#include "greybus.h"
|
|
|
|
|
2014-10-03 13:14:22 -06:00
|
|
|
static DEFINE_SPINLOCK(gb_connections_lock);
|
|
|
|
|
2015-06-09 16:42:58 -06:00
|
|
|
/* This is only used at initialization time; no locking is required. */
|
|
|
|
static struct gb_connection *
|
2015-07-01 00:43:56 -06:00
|
|
|
gb_connection_intf_find(struct gb_interface *intf, u16 cport_id)
|
2015-06-09 16:42:58 -06:00
|
|
|
{
|
2015-11-03 10:03:23 -07:00
|
|
|
struct gb_host_device *hd = intf->hd;
|
2015-06-09 16:42:58 -06:00
|
|
|
struct gb_connection *connection;
|
|
|
|
|
|
|
|
list_for_each_entry(connection, &hd->connections, hd_links)
|
2015-07-01 00:43:56 -06:00
|
|
|
if (connection->bundle->intf == intf &&
|
|
|
|
connection->intf_cport_id == cport_id)
|
2015-06-09 16:42:58 -06:00
|
|
|
return connection;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-06-08 11:05:11 -06:00
|
|
|
static struct gb_connection *
|
2015-11-03 10:03:23 -07:00
|
|
|
gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
|
2014-10-06 05:53:08 -06:00
|
|
|
{
|
2015-06-09 16:42:58 -06:00
|
|
|
struct gb_connection *connection;
|
2015-03-02 01:55:26 -07:00
|
|
|
unsigned long flags;
|
2014-10-06 05:53:08 -06:00
|
|
|
|
2015-03-02 01:55:26 -07:00
|
|
|
spin_lock_irqsave(&gb_connections_lock, flags);
|
2014-11-17 07:08:44 -07:00
|
|
|
list_for_each_entry(connection, &hd->connections, hd_links)
|
|
|
|
if (connection->hd_cport_id == cport_id)
|
2014-10-06 11:26:02 -06:00
|
|
|
goto found;
|
|
|
|
connection = NULL;
|
2015-06-09 16:42:58 -06:00
|
|
|
found:
|
2015-03-02 01:55:26 -07:00
|
|
|
spin_unlock_irqrestore(&gb_connections_lock, flags);
|
2014-10-06 05:53:08 -06:00
|
|
|
|
|
|
|
return connection;
|
|
|
|
}
|
|
|
|
|
2014-11-20 15:09:18 -07:00
|
|
|
/*
|
|
|
|
* Callback from the host driver to let us know that data has been
|
2014-12-12 15:10:17 -07:00
|
|
|
* received on the bundle.
|
2014-11-20 15:09:18 -07:00
|
|
|
*/
|
2015-11-03 10:03:23 -07:00
|
|
|
void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
|
2014-11-17 17:08:37 -07:00
|
|
|
u8 *data, size_t length)
|
|
|
|
{
|
|
|
|
struct gb_connection *connection;
|
|
|
|
|
2015-05-20 05:18:00 -06:00
|
|
|
connection = gb_connection_hd_find(hd, cport_id);
|
2014-11-17 17:08:37 -07:00
|
|
|
if (!connection) {
|
|
|
|
dev_err(hd->parent,
|
|
|
|
"nonexistent connection (%zu bytes dropped)\n", length);
|
|
|
|
return;
|
|
|
|
}
|
2014-11-18 12:26:50 -07:00
|
|
|
gb_connection_recv(connection, data, length);
|
2014-11-17 17:08:37 -07:00
|
|
|
}
|
2014-11-20 15:09:18 -07:00
|
|
|
EXPORT_SYMBOL_GPL(greybus_data_rcvd);
|
2014-11-17 17:08:37 -07:00
|
|
|
|
2015-10-16 17:56:38 -06:00
|
|
|
static DEFINE_MUTEX(connection_mutex);
|
2014-10-24 03:34:46 -06:00
|
|
|
|
2015-10-16 17:56:38 -06:00
|
|
|
static void gb_connection_kref_release(struct kref *kref)
|
2014-10-24 03:34:46 -06:00
|
|
|
{
|
2015-10-16 17:56:38 -06:00
|
|
|
struct gb_connection *connection;
|
2014-10-24 03:34:46 -06:00
|
|
|
|
2015-10-16 17:56:38 -06:00
|
|
|
connection = container_of(kref, struct gb_connection, kref);
|
2015-07-23 02:50:02 -06:00
|
|
|
destroy_workqueue(connection->wq);
|
2014-10-24 03:34:46 -06:00
|
|
|
kfree(connection);
|
2015-10-16 17:56:38 -06:00
|
|
|
mutex_unlock(&connection_mutex);
|
2014-10-24 03:34:46 -06:00
|
|
|
}
|
|
|
|
|
2015-07-24 04:02:19 -06:00
|
|
|
int svc_update_connection(struct gb_interface *intf,
|
|
|
|
struct gb_connection *connection)
|
|
|
|
{
|
|
|
|
struct gb_bundle *bundle;
|
|
|
|
|
|
|
|
bundle = gb_bundle_create(intf, GB_SVC_BUNDLE_ID, GREYBUS_CLASS_SVC);
|
|
|
|
if (!bundle)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
connection->bundle = bundle;
|
|
|
|
|
|
|
|
spin_lock_irq(&gb_connections_lock);
|
|
|
|
list_add(&connection->bundle_links, &bundle->connections);
|
|
|
|
spin_unlock_irq(&gb_connections_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-10-01 20:54:14 -06:00
|
|
|
/*
|
|
|
|
* Set up a Greybus connection, representing the bidirectional link
|
|
|
|
* between a CPort on a (local) Greybus host device and a CPort on
|
|
|
|
* another Greybus module.
|
|
|
|
*
|
2014-10-01 20:54:15 -06:00
|
|
|
* A connection also maintains the state of operations sent over the
|
|
|
|
* connection.
|
|
|
|
*
|
2014-10-01 20:54:14 -06:00
|
|
|
* Returns a pointer to the new connection if successful, or a null
|
|
|
|
* pointer otherwise.
|
|
|
|
*/
|
2015-07-21 06:14:15 -06:00
|
|
|
struct gb_connection *
|
2015-11-03 10:03:23 -07:00
|
|
|
gb_connection_create_range(struct gb_host_device *hd,
|
2015-07-21 06:14:16 -06:00
|
|
|
struct gb_bundle *bundle, struct device *parent,
|
|
|
|
u16 cport_id, u8 protocol_id, u32 ida_start,
|
|
|
|
u32 ida_end)
|
2014-10-01 20:54:14 -06:00
|
|
|
{
|
|
|
|
struct gb_connection *connection;
|
2015-06-12 09:21:08 -06:00
|
|
|
struct ida *id_map = &hd->cport_id_map;
|
2015-07-23 02:50:00 -06:00
|
|
|
int hd_cport_id;
|
2014-10-24 03:34:46 -06:00
|
|
|
int retval;
|
2014-11-05 15:12:50 -07:00
|
|
|
u8 major = 0;
|
|
|
|
u8 minor = 1;
|
2014-10-01 20:54:14 -06:00
|
|
|
|
2015-06-09 16:42:58 -06:00
|
|
|
/*
|
|
|
|
* If a manifest tries to reuse a cport, reject it. We
|
|
|
|
* initialize connections serially so we don't need to worry
|
|
|
|
* about holding the connection lock.
|
|
|
|
*/
|
2015-07-21 06:14:16 -06:00
|
|
|
if (bundle && gb_connection_intf_find(bundle->intf, cport_id)) {
|
2015-11-03 04:11:30 -07:00
|
|
|
dev_err(&bundle->dev, "cport 0x%04hx already connected\n",
|
|
|
|
cport_id);
|
2015-06-09 16:42:58 -06:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-07-23 02:50:01 -06:00
|
|
|
hd_cport_id = ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
|
|
|
|
if (hd_cport_id < 0)
|
|
|
|
return NULL;
|
|
|
|
|
2014-10-01 20:54:14 -06:00
|
|
|
connection = kzalloc(sizeof(*connection), GFP_KERNEL);
|
|
|
|
if (!connection)
|
2015-07-23 02:50:01 -06:00
|
|
|
goto err_remove_ida;
|
2014-10-01 20:54:14 -06:00
|
|
|
|
2015-07-23 02:50:00 -06:00
|
|
|
connection->hd_cport_id = hd_cport_id;
|
2015-06-12 09:21:08 -06:00
|
|
|
connection->intf_cport_id = cport_id;
|
2015-06-09 16:42:58 -06:00
|
|
|
connection->hd = hd;
|
2015-06-12 09:21:08 -06:00
|
|
|
|
2014-12-23 16:16:53 -07:00
|
|
|
connection->protocol_id = protocol_id;
|
|
|
|
connection->major = major;
|
|
|
|
connection->minor = minor;
|
2014-10-06 21:29:40 -06:00
|
|
|
|
2014-12-12 15:10:17 -07:00
|
|
|
connection->bundle = bundle;
|
2014-10-22 01:04:30 -06:00
|
|
|
connection->state = GB_CONNECTION_STATE_DISABLED;
|
2014-10-02 11:30:06 -06:00
|
|
|
|
2015-07-22 09:49:19 -06:00
|
|
|
atomic_set(&connection->op_cycle, 0);
|
|
|
|
spin_lock_init(&connection->lock);
|
|
|
|
INIT_LIST_HEAD(&connection->operations);
|
|
|
|
|
2015-07-23 02:50:02 -06:00
|
|
|
connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
|
|
|
|
dev_name(parent), cport_id);
|
|
|
|
if (!connection->wq)
|
|
|
|
goto err_free_connection;
|
|
|
|
|
2015-10-16 17:56:38 -06:00
|
|
|
kref_init(&connection->kref);
|
2014-10-24 03:34:46 -06:00
|
|
|
|
2014-10-03 13:14:22 -06:00
|
|
|
spin_lock_irq(&gb_connections_lock);
|
2015-06-04 06:46:45 -06:00
|
|
|
list_add(&connection->hd_links, &hd->connections);
|
2015-07-21 06:14:16 -06:00
|
|
|
|
|
|
|
if (bundle)
|
|
|
|
list_add(&connection->bundle_links, &bundle->connections);
|
|
|
|
else
|
|
|
|
INIT_LIST_HEAD(&connection->bundle_links);
|
|
|
|
|
2014-10-03 13:14:22 -06:00
|
|
|
spin_unlock_irq(&gb_connections_lock);
|
|
|
|
|
2015-10-13 09:34:51 -06:00
|
|
|
retval = gb_connection_bind_protocol(connection);
|
|
|
|
if (retval) {
|
2015-11-03 04:11:28 -07:00
|
|
|
dev_err(parent, "%d: failed to bind protocol: %d\n",
|
2015-10-16 17:56:23 -06:00
|
|
|
cport_id, retval);
|
2015-10-13 09:34:51 -06:00
|
|
|
gb_connection_destroy(connection);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-10-01 20:54:14 -06:00
|
|
|
return connection;
|
2015-07-23 02:50:01 -06:00
|
|
|
|
2015-07-23 02:50:02 -06:00
|
|
|
err_free_connection:
|
|
|
|
kfree(connection);
|
2015-07-23 02:50:01 -06:00
|
|
|
err_remove_ida:
|
|
|
|
ida_simple_remove(id_map, hd_cport_id);
|
|
|
|
|
|
|
|
return NULL;
|
2014-10-01 20:54:14 -06:00
|
|
|
}
|
|
|
|
|
2015-09-17 05:17:26 -06:00
|
|
|
static int gb_connection_hd_cport_enable(struct gb_connection *connection)
|
|
|
|
{
|
2015-11-03 10:03:23 -07:00
|
|
|
struct gb_host_device *hd = connection->hd;
|
2015-09-17 05:17:26 -06:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!hd->driver->cport_enable)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = hd->driver->cport_enable(hd, connection->hd_cport_id);
|
|
|
|
if (ret) {
|
2015-11-03 04:11:29 -07:00
|
|
|
dev_err(hd->parent,
|
2015-10-16 17:56:23 -06:00
|
|
|
"failed to enable host cport: %d\n", ret);
|
2015-09-17 05:17:26 -06:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gb_connection_hd_cport_disable(struct gb_connection *connection)
|
|
|
|
{
|
2015-11-03 10:03:23 -07:00
|
|
|
struct gb_host_device *hd = connection->hd;
|
2015-09-17 05:17:26 -06:00
|
|
|
|
|
|
|
if (!hd->driver->cport_disable)
|
|
|
|
return;
|
|
|
|
|
|
|
|
hd->driver->cport_disable(hd, connection->hd_cport_id);
|
|
|
|
}
|
|
|
|
|
2015-07-21 06:14:15 -06:00
|
|
|
struct gb_connection *gb_connection_create(struct gb_bundle *bundle,
|
|
|
|
u16 cport_id, u8 protocol_id)
|
|
|
|
{
|
2015-07-21 06:14:16 -06:00
|
|
|
return gb_connection_create_range(bundle->intf->hd, bundle,
|
|
|
|
&bundle->dev, cport_id, protocol_id,
|
2015-11-21 02:51:58 -07:00
|
|
|
0, bundle->intf->hd->num_cports);
|
2015-07-21 06:14:15 -06:00
|
|
|
}
|
|
|
|
|
2015-07-14 07:43:31 -06:00
|
|
|
/*
|
|
|
|
* Cancel all active operations on a connection.
|
|
|
|
*
|
|
|
|
* Should only be called during connection tear down.
|
|
|
|
*/
|
|
|
|
static void gb_connection_cancel_operations(struct gb_connection *connection,
|
|
|
|
int errno)
|
|
|
|
{
|
|
|
|
struct gb_operation *operation;
|
|
|
|
|
|
|
|
spin_lock_irq(&connection->lock);
|
|
|
|
while (!list_empty(&connection->operations)) {
|
|
|
|
operation = list_last_entry(&connection->operations,
|
|
|
|
struct gb_operation, links);
|
|
|
|
gb_operation_get(operation);
|
|
|
|
spin_unlock_irq(&connection->lock);
|
|
|
|
|
2015-07-14 07:43:35 -06:00
|
|
|
if (gb_operation_is_incoming(operation))
|
|
|
|
gb_operation_cancel_incoming(operation, errno);
|
|
|
|
else
|
|
|
|
gb_operation_cancel(operation, errno);
|
|
|
|
|
2015-07-14 07:43:31 -06:00
|
|
|
gb_operation_put(operation);
|
|
|
|
|
|
|
|
spin_lock_irq(&connection->lock);
|
|
|
|
}
|
|
|
|
spin_unlock_irq(&connection->lock);
|
|
|
|
}
|
|
|
|
|
2015-09-17 05:17:21 -06:00
|
|
|
/*
|
|
|
|
* Request the SVC to create a connection from AP's cport to interface's
|
|
|
|
* cport.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
gb_connection_svc_connection_create(struct gb_connection *connection)
|
|
|
|
{
|
2015-11-03 10:03:23 -07:00
|
|
|
struct gb_host_device *hd = connection->hd;
|
2015-09-17 05:17:21 -06:00
|
|
|
struct gb_protocol *protocol = connection->protocol;
|
2015-10-07 13:40:24 -06:00
|
|
|
struct gb_interface *intf;
|
2015-09-17 05:17:21 -06:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (protocol->flags & GB_PROTOCOL_SKIP_SVC_CONNECTION)
|
|
|
|
return 0;
|
|
|
|
|
2015-10-07 13:40:24 -06:00
|
|
|
intf = connection->bundle->intf;
|
2015-09-17 05:17:21 -06:00
|
|
|
ret = gb_svc_connection_create(hd->svc,
|
|
|
|
hd->endo->ap_intf_id,
|
|
|
|
connection->hd_cport_id,
|
2015-10-07 13:40:24 -06:00
|
|
|
intf->interface_id,
|
|
|
|
connection->intf_cport_id,
|
|
|
|
intf->boot_over_unipro);
|
2015-09-17 05:17:21 -06:00
|
|
|
if (ret) {
|
2015-10-16 17:56:23 -06:00
|
|
|
dev_err(&connection->bundle->dev,
|
|
|
|
"failed to create svc connection: %d\n", ret);
|
2015-09-17 05:17:21 -06:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-09-07 04:31:22 -06:00
|
|
|
static void
|
|
|
|
gb_connection_svc_connection_destroy(struct gb_connection *connection)
|
|
|
|
{
|
2015-09-07 04:31:24 -06:00
|
|
|
if (connection->protocol->flags & GB_PROTOCOL_SKIP_SVC_CONNECTION)
|
2015-09-07 04:31:22 -06:00
|
|
|
return;
|
|
|
|
|
|
|
|
gb_svc_connection_destroy(connection->hd->svc,
|
|
|
|
connection->hd->endo->ap_intf_id,
|
|
|
|
connection->hd_cport_id,
|
|
|
|
connection->bundle->intf->interface_id,
|
|
|
|
connection->intf_cport_id);
|
|
|
|
}
|
|
|
|
|
2015-09-17 05:17:24 -06:00
|
|
|
/* Inform Interface about active CPorts */
|
|
|
|
static int gb_connection_control_connected(struct gb_connection *connection)
|
|
|
|
{
|
|
|
|
struct gb_protocol *protocol = connection->protocol;
|
|
|
|
struct gb_control *control;
|
|
|
|
u16 cport_id = connection->intf_cport_id;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (protocol->flags & GB_PROTOCOL_SKIP_CONTROL_CONNECTED)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
control = connection->bundle->intf->control;
|
|
|
|
|
|
|
|
ret = gb_control_connected_operation(control, cport_id);
|
|
|
|
if (ret) {
|
2015-10-16 17:56:23 -06:00
|
|
|
dev_err(&connection->bundle->dev,
|
|
|
|
"failed to connect cport: %d\n", ret);
|
2015-09-17 05:17:24 -06:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-09-17 05:17:23 -06:00
|
|
|
/* Inform Interface about inactive CPorts */
|
|
|
|
static void
|
|
|
|
gb_connection_control_disconnected(struct gb_connection *connection)
|
2015-08-10 20:05:56 -06:00
|
|
|
{
|
2015-09-17 05:17:23 -06:00
|
|
|
struct gb_protocol *protocol = connection->protocol;
|
2015-08-10 20:05:56 -06:00
|
|
|
struct gb_control *control;
|
2015-09-17 05:17:23 -06:00
|
|
|
u16 cport_id = connection->intf_cport_id;
|
2015-08-10 20:05:56 -06:00
|
|
|
int ret;
|
|
|
|
|
2015-09-17 05:17:23 -06:00
|
|
|
if (protocol->flags & GB_PROTOCOL_SKIP_CONTROL_DISCONNECTED)
|
2015-08-10 20:05:56 -06:00
|
|
|
return;
|
|
|
|
|
|
|
|
control = connection->bundle->intf->control;
|
|
|
|
|
|
|
|
ret = gb_control_disconnected_operation(control, cport_id);
|
2015-09-17 05:17:23 -06:00
|
|
|
if (ret) {
|
2015-10-16 17:56:23 -06:00
|
|
|
dev_warn(&connection->bundle->dev,
|
|
|
|
"failed to disconnect cport: %d\n", ret);
|
2015-09-17 05:17:23 -06:00
|
|
|
}
|
2015-08-10 20:05:56 -06:00
|
|
|
}
|
|
|
|
|
2015-09-17 05:17:25 -06:00
|
|
|
/*
|
|
|
|
* Request protocol version supported by the module. We don't need to do
|
|
|
|
* this for SVC as that is initiated by the SVC.
|
|
|
|
*/
|
|
|
|
static int gb_connection_protocol_get_version(struct gb_connection *connection)
|
|
|
|
{
|
|
|
|
struct gb_protocol *protocol = connection->protocol;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (protocol->flags & GB_PROTOCOL_SKIP_VERSION)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = gb_protocol_get_version(connection);
|
|
|
|
if (ret) {
|
2015-10-16 17:56:23 -06:00
|
|
|
dev_err(&connection->bundle->dev,
|
|
|
|
"failed to get protocol version: %d\n", ret);
|
2015-09-17 05:17:25 -06:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-08-31 05:51:09 -06:00
|
|
|
static int gb_connection_init(struct gb_connection *connection)
|
2014-10-16 05:35:35 -06:00
|
|
|
{
|
2015-09-07 04:31:24 -06:00
|
|
|
struct gb_protocol *protocol = connection->protocol;
|
2014-10-22 01:04:30 -06:00
|
|
|
int ret;
|
|
|
|
|
2015-09-17 05:17:26 -06:00
|
|
|
ret = gb_connection_hd_cport_enable(connection);
|
2015-09-17 05:17:21 -06:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2015-09-07 04:31:21 -06:00
|
|
|
|
2015-09-17 05:17:26 -06:00
|
|
|
ret = gb_connection_svc_connection_create(connection);
|
|
|
|
if (ret)
|
|
|
|
goto err_hd_cport_disable;
|
|
|
|
|
2015-09-17 05:17:24 -06:00
|
|
|
ret = gb_connection_control_connected(connection);
|
|
|
|
if (ret)
|
|
|
|
goto err_svc_destroy;
|
2015-06-22 05:12:29 -06:00
|
|
|
|
2014-11-05 15:12:54 -07:00
|
|
|
/* Need to enable the connection to initialize it */
|
2015-07-14 07:43:30 -06:00
|
|
|
spin_lock_irq(&connection->lock);
|
2014-11-05 15:12:54 -07:00
|
|
|
connection->state = GB_CONNECTION_STATE_ENABLED;
|
2015-07-14 07:43:30 -06:00
|
|
|
spin_unlock_irq(&connection->lock);
|
|
|
|
|
2015-09-17 05:17:25 -06:00
|
|
|
ret = gb_connection_protocol_get_version(connection);
|
|
|
|
if (ret)
|
|
|
|
goto err_disconnect;
|
2014-10-22 01:04:30 -06:00
|
|
|
|
2015-09-07 04:31:24 -06:00
|
|
|
ret = protocol->connection_init(connection);
|
2015-09-17 05:17:22 -06:00
|
|
|
if (ret)
|
|
|
|
goto err_disconnect;
|
|
|
|
|
|
|
|
return 0;
|
2015-08-10 20:05:56 -06:00
|
|
|
|
2015-09-17 05:17:22 -06:00
|
|
|
err_disconnect:
|
2015-08-10 20:05:59 -06:00
|
|
|
spin_lock_irq(&connection->lock);
|
|
|
|
connection->state = GB_CONNECTION_STATE_ERROR;
|
|
|
|
spin_unlock_irq(&connection->lock);
|
|
|
|
|
2015-09-17 05:17:23 -06:00
|
|
|
gb_connection_control_disconnected(connection);
|
2015-09-17 05:17:22 -06:00
|
|
|
err_svc_destroy:
|
2015-09-07 04:31:22 -06:00
|
|
|
gb_connection_svc_connection_destroy(connection);
|
2015-09-17 05:17:26 -06:00
|
|
|
err_hd_cport_disable:
|
|
|
|
gb_connection_hd_cport_disable(connection);
|
2015-09-07 04:31:22 -06:00
|
|
|
|
2014-10-22 01:04:30 -06:00
|
|
|
return ret;
|
2014-10-16 05:35:35 -06:00
|
|
|
}
|
2014-10-20 22:01:04 -06:00
|
|
|
|
2015-08-31 05:51:13 -06:00
|
|
|
static void gb_connection_exit(struct gb_connection *connection)
|
2014-10-20 22:01:04 -06:00
|
|
|
{
|
2015-08-31 05:51:15 -06:00
|
|
|
if (!connection->protocol)
|
2014-10-27 05:04:30 -06:00
|
|
|
return;
|
2015-03-17 03:55:52 -06:00
|
|
|
|
2015-07-14 07:43:30 -06:00
|
|
|
spin_lock_irq(&connection->lock);
|
|
|
|
if (connection->state != GB_CONNECTION_STATE_ENABLED) {
|
|
|
|
spin_unlock_irq(&connection->lock);
|
2015-03-17 03:55:52 -06:00
|
|
|
return;
|
2015-07-14 07:43:30 -06:00
|
|
|
}
|
2014-10-27 05:04:30 -06:00
|
|
|
connection->state = GB_CONNECTION_STATE_DESTROYING;
|
2015-07-14 07:43:30 -06:00
|
|
|
spin_unlock_irq(&connection->lock);
|
|
|
|
|
2015-07-14 07:43:32 -06:00
|
|
|
gb_connection_cancel_operations(connection, -ESHUTDOWN);
|
|
|
|
|
2014-11-05 15:12:54 -07:00
|
|
|
connection->protocol->connection_exit(connection);
|
2015-09-17 05:17:23 -06:00
|
|
|
gb_connection_control_disconnected(connection);
|
2015-09-07 04:31:22 -06:00
|
|
|
gb_connection_svc_connection_destroy(connection);
|
2015-09-17 05:17:26 -06:00
|
|
|
gb_connection_hd_cport_disable(connection);
|
2014-10-20 22:01:04 -06:00
|
|
|
}
|
2015-07-08 23:26:30 -06:00
|
|
|
|
2015-08-31 05:51:13 -06:00
|
|
|
/*
|
|
|
|
* Tear down a previously set up connection.
|
|
|
|
*/
|
|
|
|
void gb_connection_destroy(struct gb_connection *connection)
|
|
|
|
{
|
|
|
|
struct ida *id_map;
|
|
|
|
|
|
|
|
if (WARN_ON(!connection))
|
|
|
|
return;
|
|
|
|
|
|
|
|
gb_connection_exit(connection);
|
|
|
|
|
|
|
|
spin_lock_irq(&gb_connections_lock);
|
|
|
|
list_del(&connection->bundle_links);
|
|
|
|
list_del(&connection->hd_links);
|
|
|
|
spin_unlock_irq(&gb_connections_lock);
|
|
|
|
|
|
|
|
if (connection->protocol)
|
|
|
|
gb_protocol_put(connection->protocol);
|
|
|
|
connection->protocol = NULL;
|
|
|
|
|
|
|
|
id_map = &connection->hd->cport_id_map;
|
|
|
|
ida_simple_remove(id_map, connection->hd_cport_id);
|
|
|
|
connection->hd_cport_id = CPORT_ID_BAD;
|
|
|
|
|
2015-10-16 17:56:38 -06:00
|
|
|
kref_put_mutex(&connection->kref, gb_connection_kref_release,
|
|
|
|
&connection_mutex);
|
2015-08-31 05:51:13 -06:00
|
|
|
}
|
|
|
|
|
2015-10-15 09:10:42 -06:00
|
|
|
void gb_connection_latency_tag_enable(struct gb_connection *connection)
|
|
|
|
{
|
2015-11-03 10:03:23 -07:00
|
|
|
struct gb_host_device *hd = connection->hd;
|
2015-10-15 09:10:42 -06:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!hd->driver->latency_tag_enable)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
|
|
|
|
if (ret) {
|
2015-10-16 17:56:23 -06:00
|
|
|
dev_err(&connection->bundle->dev,
|
2015-10-15 09:10:42 -06:00
|
|
|
"failed to enable latency tag: %d\n", ret);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
|
|
|
|
|
|
|
|
void gb_connection_latency_tag_disable(struct gb_connection *connection)
|
|
|
|
{
|
2015-11-03 10:03:23 -07:00
|
|
|
struct gb_host_device *hd = connection->hd;
|
2015-10-15 09:10:42 -06:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!hd->driver->latency_tag_disable)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
|
|
|
|
if (ret) {
|
2015-10-16 17:56:23 -06:00
|
|
|
dev_err(&connection->bundle->dev,
|
2015-10-15 09:10:42 -06:00
|
|
|
"failed to disable latency tag: %d\n", ret);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);
|
|
|
|
|
2015-08-31 05:51:14 -06:00
|
|
|
int gb_connection_bind_protocol(struct gb_connection *connection)
|
2015-08-31 05:51:09 -06:00
|
|
|
{
|
|
|
|
struct gb_protocol *protocol;
|
2015-08-31 05:51:14 -06:00
|
|
|
int ret;
|
2015-08-31 05:51:09 -06:00
|
|
|
|
|
|
|
/* If we already have a protocol bound here, just return */
|
|
|
|
if (connection->protocol)
|
2015-08-31 05:51:14 -06:00
|
|
|
return 0;
|
2015-08-31 05:51:09 -06:00
|
|
|
|
|
|
|
protocol = gb_protocol_get(connection->protocol_id,
|
|
|
|
connection->major,
|
|
|
|
connection->minor);
|
2015-10-13 11:10:28 -06:00
|
|
|
if (!protocol) {
|
2015-11-03 04:11:27 -07:00
|
|
|
dev_warn(connection->hd->parent,
|
2015-10-13 11:10:28 -06:00
|
|
|
"protocol 0x%02hhx version %hhu.%hhu not found\n",
|
|
|
|
connection->protocol_id,
|
|
|
|
connection->major, connection->minor);
|
2015-08-31 05:51:14 -06:00
|
|
|
return 0;
|
2015-10-13 11:10:28 -06:00
|
|
|
}
|
2015-08-31 05:51:09 -06:00
|
|
|
connection->protocol = protocol;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we have a valid device_id for the interface block, then we have an
|
|
|
|
* active device, so bring up the connection at the same time.
|
|
|
|
*/
|
|
|
|
if ((!connection->bundle &&
|
2015-09-07 04:31:24 -06:00
|
|
|
protocol->flags & GB_PROTOCOL_NO_BUNDLE) ||
|
2015-08-31 05:51:14 -06:00
|
|
|
connection->bundle->intf->device_id != GB_DEVICE_ID_BAD) {
|
|
|
|
ret = gb_connection_init(connection);
|
|
|
|
if (ret) {
|
|
|
|
gb_protocol_put(protocol);
|
|
|
|
connection->protocol = NULL;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2015-08-31 05:51:09 -06:00
|
|
|
}
|