}
spin_lock_irq(&gb_bundles_lock);
- list_add_tail(&bundle->links, &gb_ib->interfaces);
+ list_add_tail(&bundle->links, &gb_ib->bundles);
spin_unlock_irq(&gb_bundles_lock);
return bundle;
return;
spin_lock_irq(&gb_bundles_lock);
- list_for_each_entry_safe(bundle, temp, &gb_ib->interfaces, links) {
+ list_for_each_entry_safe(bundle, temp, &gb_ib->bundles, links) {
list_del(&bundle->links);
gb_bundle_connections_exit(bundle);
device_del(&bundle->dev);
struct gb_bundle *bundle;
spin_lock_irq(&gb_bundles_lock);
- list_for_each_entry(bundle, &gb_ib->interfaces, links)
+ list_for_each_entry(bundle, &gb_ib->bundles, links)
if (bundle->id == bundle_id) {
spin_unlock_irq(&gb_bundles_lock);
return bundle;
gb_ib->hd = hd; /* XXX refcount? */
gb_ib->module_id = module_id;
- INIT_LIST_HEAD(&gb_ib->interfaces);
+ INIT_LIST_HEAD(&gb_ib->bundles);
gb_ib->dev.parent = hd->parent;
gb_ib->dev.bus = &greybus_bus_type;
struct gb_interface_block {
struct device dev;
- struct list_head interfaces;
+ struct list_head bundles;
struct list_head links; /* greybus_host_device->modules */
u8 module_id; /* Physical location within the Endo */