summaryrefslogtreecommitdiff
path: root/drivers/firewire/ohci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/firewire/ohci.c')
-rw-r--r--drivers/firewire/ohci.c981
1 files changed, 522 insertions, 459 deletions
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 7bc71f4be64a..edaedd156a6d 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -40,8 +40,16 @@
#include "core.h"
#include "ohci.h"
+#include "packet-header-definitions.h"
+#include "phy-packet-definitions.h"
+
+#include <trace/events/firewire.h>
+
+static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk);
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/firewire_ohci.h>
-#define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args)
#define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args)
#define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args)
@@ -68,7 +76,7 @@ struct descriptor {
__le32 branch_address;
__le16 res_count;
__le16 transfer_status;
-} __attribute__((aligned(16)));
+} __aligned(16);
#define CONTROL_SET(regs) (regs)
#define CONTROL_CLEAR(regs) ((regs) + 4)
@@ -153,13 +161,6 @@ struct context {
struct tasklet_struct tasklet;
};
-#define IT_HEADER_SY(v) ((v) << 0)
-#define IT_HEADER_TCODE(v) ((v) << 4)
-#define IT_HEADER_CHANNEL(v) ((v) << 8)
-#define IT_HEADER_TAG(v) ((v) << 14)
-#define IT_HEADER_SPEED(v) ((v) << 16)
-#define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
-
struct iso_context {
struct fw_iso_context base;
struct context context;
@@ -173,7 +174,7 @@ struct iso_context {
u8 tags;
};
-#define CONFIG_ROM_SIZE 1024
+#define CONFIG_ROM_SIZE (CSR_CONFIG_ROM_END - CSR_CONFIG_ROM)
struct fw_ohci {
struct fw_card card;
@@ -255,7 +256,6 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
#define OHCI1394_REGISTER_SIZE 0x800
#define OHCI1394_PCI_HCI_Control 0x40
#define SELF_ID_BUF_SIZE 0x800
-#define OHCI_TCODE_PHY_PACKET 0x0e
#define OHCI_VERSION_1_1 0x010010
static char ohci_driver_name[] = KBUILD_MODNAME;
@@ -393,15 +393,13 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
#define OHCI_PARAM_DEBUG_AT_AR 1
#define OHCI_PARAM_DEBUG_SELFIDS 2
#define OHCI_PARAM_DEBUG_IRQS 4
-#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
static int param_debug;
module_param_named(debug, param_debug, int, 0644);
-MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
+MODULE_PARM_DESC(debug, "Verbose logging, deprecated in v6.11 kernel or later. (default = 0"
", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
- ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
", or a combination, or all = -1)");
static bool param_remote_dma;
@@ -410,12 +408,7 @@ MODULE_PARM_DESC(remote_dma, "Enable unfiltered remote DMA (default = N)");
static void log_irqs(struct fw_ohci *ohci, u32 evt)
{
- if (likely(!(param_debug &
- (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
- return;
-
- if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
- !(evt & OHCI1394_busReset))
+ if (likely(!(param_debug & OHCI_PARAM_DEBUG_IRQS)))
return;
ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
@@ -443,23 +436,25 @@ static void log_irqs(struct fw_ohci *ohci, u32 evt)
? " ?" : "");
}
-static const char *speed[] = {
- [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
-};
-static const char *power[] = {
- [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
- [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
-};
-static const char port[] = { '.', '-', 'p', 'c', };
-
-static char _p(u32 *s, int shift)
-{
- return port[*s >> shift & 3];
-}
-
static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count)
{
- u32 *s;
+ static const char *const speed[] = {
+ [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
+ };
+ static const char *const power[] = {
+ [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
+ [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
+ };
+ static const char port[] = {
+ [PHY_PACKET_SELF_ID_PORT_STATUS_NONE] = '.',
+ [PHY_PACKET_SELF_ID_PORT_STATUS_NCONN] = '-',
+ [PHY_PACKET_SELF_ID_PORT_STATUS_PARENT] = 'p',
+ [PHY_PACKET_SELF_ID_PORT_STATUS_CHILD] = 'c',
+ };
+ struct self_id_sequence_enumerator enumerator = {
+ .cursor = ohci->self_id_buffer,
+ .quadlet_count = self_id_count,
+ };
if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
return;
@@ -467,20 +462,46 @@ static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count)
ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n",
self_id_count, generation, ohci->node_id);
- for (s = ohci->self_id_buffer; self_id_count--; ++s)
- if ((*s & 1 << 23) == 0)
- ohci_notice(ohci,
- "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n",
- *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
- speed[*s >> 14 & 3], *s >> 16 & 63,
- power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
- *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
- else
+ while (enumerator.quadlet_count > 0) {
+ unsigned int quadlet_count;
+ unsigned int port_index;
+ const u32 *s;
+ int i;
+
+ s = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
+ if (IS_ERR(s))
+ break;
+
+ ohci_notice(ohci,
+ "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n",
+ *s,
+ phy_packet_self_id_get_phy_id(*s),
+ port[self_id_sequence_get_port_status(s, quadlet_count, 0)],
+ port[self_id_sequence_get_port_status(s, quadlet_count, 1)],
+ port[self_id_sequence_get_port_status(s, quadlet_count, 2)],
+ speed[*s >> 14 & 3], *s >> 16 & 63,
+ power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
+ *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
+
+ port_index = 3;
+ for (i = 1; i < quadlet_count; ++i) {
ohci_notice(ohci,
"selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
- *s, *s >> 24 & 63,
- _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
- _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
+ s[i],
+ phy_packet_self_id_get_phy_id(s[i]),
+ port[self_id_sequence_get_port_status(s, quadlet_count, port_index)],
+ port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 1)],
+ port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 2)],
+ port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 3)],
+ port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 4)],
+ port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 5)],
+ port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 6)],
+ port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 7)]
+ );
+
+ port_index += 8;
+ }
+ }
}
static const char *evts[] = {
@@ -502,28 +523,36 @@ static const char *evts[] = {
[0x1e] = "ack_type_error", [0x1f] = "-reserved-",
[0x20] = "pending/cancelled",
};
-static const char *tcodes[] = {
- [0x0] = "QW req", [0x1] = "BW req",
- [0x2] = "W resp", [0x3] = "-reserved-",
- [0x4] = "QR req", [0x5] = "BR req",
- [0x6] = "QR resp", [0x7] = "BR resp",
- [0x8] = "cycle start", [0x9] = "Lk req",
- [0xa] = "async stream packet", [0xb] = "Lk resp",
- [0xc] = "-reserved-", [0xd] = "-reserved-",
- [0xe] = "link internal", [0xf] = "-reserved-",
-};
static void log_ar_at_event(struct fw_ohci *ohci,
char dir, int speed, u32 *header, int evt)
{
- int tcode = header[0] >> 4 & 0xf;
+ static const char *const tcodes[] = {
+ [TCODE_WRITE_QUADLET_REQUEST] = "QW req",
+ [TCODE_WRITE_BLOCK_REQUEST] = "BW req",
+ [TCODE_WRITE_RESPONSE] = "W resp",
+ [0x3] = "-reserved-",
+ [TCODE_READ_QUADLET_REQUEST] = "QR req",
+ [TCODE_READ_BLOCK_REQUEST] = "BR req",
+ [TCODE_READ_QUADLET_RESPONSE] = "QR resp",
+ [TCODE_READ_BLOCK_RESPONSE] = "BR resp",
+ [TCODE_CYCLE_START] = "cycle start",
+ [TCODE_LOCK_REQUEST] = "Lk req",
+ [TCODE_STREAM_DATA] = "async stream packet",
+ [TCODE_LOCK_RESPONSE] = "Lk resp",
+ [0xc] = "-reserved-",
+ [0xd] = "-reserved-",
+ [TCODE_LINK_INTERNAL] = "link internal",
+ [0xf] = "-reserved-",
+ };
+ int tcode = async_header_get_tcode(header);
char specific[12];
if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
return;
if (unlikely(evt >= ARRAY_SIZE(evts)))
- evt = 0x1f;
+ evt = 0x1f;
if (evt == OHCI1394_evt_bus_reset) {
ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n",
@@ -532,40 +561,51 @@ static void log_ar_at_event(struct fw_ohci *ohci,
}
switch (tcode) {
- case 0x0: case 0x6: case 0x8:
+ case TCODE_WRITE_QUADLET_REQUEST:
+ case TCODE_READ_QUADLET_RESPONSE:
+ case TCODE_CYCLE_START:
snprintf(specific, sizeof(specific), " = %08x",
be32_to_cpu((__force __be32)header[3]));
break;
- case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
+ case TCODE_WRITE_BLOCK_REQUEST:
+ case TCODE_READ_BLOCK_REQUEST:
+ case TCODE_READ_BLOCK_RESPONSE:
+ case TCODE_LOCK_REQUEST:
+ case TCODE_LOCK_RESPONSE:
snprintf(specific, sizeof(specific), " %x,%x",
- header[3] >> 16, header[3] & 0xffff);
+ async_header_get_data_length(header),
+ async_header_get_extended_tcode(header));
break;
default:
specific[0] = '\0';
}
switch (tcode) {
- case 0xa:
+ case TCODE_STREAM_DATA:
ohci_notice(ohci, "A%c %s, %s\n",
dir, evts[evt], tcodes[tcode]);
break;
- case 0xe:
+ case TCODE_LINK_INTERNAL:
ohci_notice(ohci, "A%c %s, PHY %08x %08x\n",
dir, evts[evt], header[1], header[2]);
break;
- case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
+ case TCODE_WRITE_QUADLET_REQUEST:
+ case TCODE_WRITE_BLOCK_REQUEST:
+ case TCODE_READ_QUADLET_REQUEST:
+ case TCODE_READ_BLOCK_REQUEST:
+ case TCODE_LOCK_REQUEST:
ohci_notice(ohci,
- "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %04x%08x%s\n",
- dir, speed, header[0] >> 10 & 0x3f,
- header[1] >> 16, header[0] >> 16, evts[evt],
- tcodes[tcode], header[1] & 0xffff, header[2], specific);
+ "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %012llx%s\n",
+ dir, speed, async_header_get_tlabel(header),
+ async_header_get_source(header), async_header_get_destination(header),
+ evts[evt], tcodes[tcode], async_header_get_offset(header), specific);
break;
default:
ohci_notice(ohci,
"A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n",
- dir, speed, header[0] >> 10 & 0x3f,
- header[1] >> 16, header[0] >> 16, evts[evt],
- tcodes[tcode], specific);
+ dir, speed, async_header_get_tlabel(header),
+ async_header_get_source(header), async_header_get_destination(header),
+ evts[evt], tcodes[tcode], specific);
}
}
@@ -672,26 +712,20 @@ static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
static int ohci_read_phy_reg(struct fw_card *card, int addr)
{
struct fw_ohci *ohci = fw_ohci(card);
- int ret;
- mutex_lock(&ohci->phy_reg_mutex);
- ret = read_phy_reg(ohci, addr);
- mutex_unlock(&ohci->phy_reg_mutex);
+ guard(mutex)(&ohci->phy_reg_mutex);
- return ret;
+ return read_phy_reg(ohci, addr);
}
static int ohci_update_phy_reg(struct fw_card *card, int addr,
int clear_bits, int set_bits)
{
struct fw_ohci *ohci = fw_ohci(card);
- int ret;
- mutex_lock(&ohci->phy_reg_mutex);
- ret = update_phy_reg(ohci, addr, clear_bits, set_bits);
- mutex_unlock(&ohci->phy_reg_mutex);
+ guard(mutex)(&ohci->phy_reg_mutex);
- return ret;
+ return update_phy_reg(ohci, addr, clear_bits, set_bits);
}
static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i)
@@ -836,10 +870,25 @@ static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
}
#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
-#define cond_le32_to_cpu(v) \
- (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
+static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk)
+{
+ return has_be_header_quirk ? (__force __u32)value : le32_to_cpu(value);
+}
+
+static bool has_be_header_quirk(const struct fw_ohci *ohci)
+{
+ return !!(ohci->quirks & QUIRK_BE_HEADERS);
+}
#else
-#define cond_le32_to_cpu(v) le32_to_cpu(v)
+static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk __maybe_unused)
+{
+ return le32_to_cpu(value);
+}
+
+static bool has_be_header_quirk(const struct fw_ohci *ohci)
+{
+ return false;
+}
#endif
static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
@@ -849,11 +898,11 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
u32 status, length, tcode;
int evt;
- p.header[0] = cond_le32_to_cpu(buffer[0]);
- p.header[1] = cond_le32_to_cpu(buffer[1]);
- p.header[2] = cond_le32_to_cpu(buffer[2]);
+ p.header[0] = cond_le32_to_cpu(buffer[0], has_be_header_quirk(ohci));
+ p.header[1] = cond_le32_to_cpu(buffer[1], has_be_header_quirk(ohci));
+ p.header[2] = cond_le32_to_cpu(buffer[2], has_be_header_quirk(ohci));
- tcode = (p.header[0] >> 4) & 0x0f;
+ tcode = async_header_get_tcode(p.header);
switch (tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
case TCODE_READ_QUADLET_RESPONSE:
@@ -863,7 +912,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
break;
case TCODE_READ_BLOCK_REQUEST :
- p.header[3] = cond_le32_to_cpu(buffer[3]);
+ p.header[3] = cond_le32_to_cpu(buffer[3], has_be_header_quirk(ohci));
p.header_length = 16;
p.payload_length = 0;
break;
@@ -872,9 +921,9 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
case TCODE_READ_BLOCK_RESPONSE:
case TCODE_LOCK_REQUEST:
case TCODE_LOCK_RESPONSE:
- p.header[3] = cond_le32_to_cpu(buffer[3]);
+ p.header[3] = cond_le32_to_cpu(buffer[3], has_be_header_quirk(ohci));
p.header_length = 16;
- p.payload_length = p.header[3] >> 16;
+ p.payload_length = async_header_get_data_length(p.header);
if (p.payload_length > MAX_ASYNC_PAYLOAD) {
ar_context_abort(ctx, "invalid packet length");
return NULL;
@@ -883,7 +932,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
case TCODE_WRITE_RESPONSE:
case TCODE_READ_QUADLET_REQUEST:
- case OHCI_TCODE_PHY_PACKET:
+ case TCODE_LINK_INTERNAL:
p.header_length = 12;
p.payload_length = 0;
break;
@@ -897,7 +946,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
/* FIXME: What to do about evt_* errors? */
length = (p.header_length + p.payload_length + 3) / 4;
- status = cond_le32_to_cpu(buffer[length]);
+ status = cond_le32_to_cpu(buffer[length], has_be_header_quirk(ohci));
evt = (status >> 16) & 0x1f;
p.ack = evt - 16;
@@ -911,8 +960,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
* Several controllers, notably from NEC and VIA, forget to
* write ack_complete status at PHY packet reception.
*/
- if (evt == OHCI1394_evt_no_status &&
- (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4))
+ if (evt == OHCI1394_evt_no_status && tcode == TCODE_LINK_INTERNAL)
p.ack = ACK_COMPLETE;
/*
@@ -1093,9 +1141,8 @@ static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
return d + z - 1;
}
-static void context_tasklet(unsigned long data)
+static void context_retire_descriptors(struct context *ctx)
{
- struct context *ctx = (struct context *) data;
struct descriptor *d, *last;
u32 address;
int z;
@@ -1124,18 +1171,31 @@ static void context_tasklet(unsigned long data)
break;
if (old_desc != desc) {
- /* If we've advanced to the next buffer, move the
- * previous buffer to the free list. */
- unsigned long flags;
+ // If we've advanced to the next buffer, move the previous buffer to the
+ // free list.
old_desc->used = 0;
- spin_lock_irqsave(&ctx->ohci->lock, flags);
+ guard(spinlock_irqsave)(&ctx->ohci->lock);
list_move_tail(&old_desc->list, &ctx->buffer_list);
- spin_unlock_irqrestore(&ctx->ohci->lock, flags);
}
ctx->last = last;
}
}
+static void context_tasklet(unsigned long data)
+{
+ struct context *ctx = (struct context *) data;
+
+ context_retire_descriptors(ctx);
+}
+
+static void ohci_isoc_context_work(struct work_struct *work)
+{
+ struct fw_iso_context *base = container_of(work, struct fw_iso_context, work);
+ struct iso_context *isoc_ctx = container_of(base, struct iso_context, base);
+
+ context_retire_descriptors(&isoc_ctx->context);
+}
+
/*
* Allocate a new buffer and add it to the list of free buffers for this
* context. Must be called with ohci->lock held.
@@ -1324,7 +1384,7 @@ struct driver_data {
};
/*
- * This function apppends a packet to the DMA queue for transmission.
+ * This function appends a packet to the DMA queue for transmission.
* Must always be called with the ochi->lock held to ensure proper
* generation handling and locking around packet queue manipulation.
*/
@@ -1347,13 +1407,7 @@ static int at_context_queue_packet(struct context *ctx,
d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
d[0].res_count = cpu_to_le16(packet->timestamp);
- /*
- * The DMA format for asynchronous link packets is different
- * from the IEEE1394 layout, so shift the fields around
- * accordingly.
- */
-
- tcode = (packet->header[0] >> 4) & 0x0f;
+ tcode = async_header_get_tcode(packet->header);
header = (__le32 *) &d[1];
switch (tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
@@ -1365,23 +1419,33 @@ static int at_context_queue_packet(struct context *ctx,
case TCODE_READ_BLOCK_RESPONSE:
case TCODE_LOCK_REQUEST:
case TCODE_LOCK_RESPONSE:
- header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
- (packet->speed << 16));
- header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
- (packet->header[0] & 0xffff0000));
- header[2] = cpu_to_le32(packet->header[2]);
+ ohci1394_at_data_set_src_bus_id(header, false);
+ ohci1394_at_data_set_speed(header, packet->speed);
+ ohci1394_at_data_set_tlabel(header, async_header_get_tlabel(packet->header));
+ ohci1394_at_data_set_retry(header, async_header_get_retry(packet->header));
+ ohci1394_at_data_set_tcode(header, tcode);
+
+ ohci1394_at_data_set_destination_id(header,
+ async_header_get_destination(packet->header));
- if (TCODE_IS_BLOCK_PACKET(tcode))
+ if (ctx == &ctx->ohci->at_response_ctx) {
+ ohci1394_at_data_set_rcode(header, async_header_get_rcode(packet->header));
+ } else {
+ ohci1394_at_data_set_destination_offset(header,
+ async_header_get_offset(packet->header));
+ }
+
+ if (tcode_is_block_packet(tcode))
header[3] = cpu_to_le32(packet->header[3]);
else
header[3] = (__force __le32) packet->header[3];
d[0].req_count = cpu_to_le16(packet->header_length);
break;
-
case TCODE_LINK_INTERNAL:
- header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
- (packet->speed << 16));
+ ohci1394_at_data_set_speed(header, packet->speed);
+ ohci1394_at_data_set_tcode(header, TCODE_LINK_INTERNAL);
+
header[1] = cpu_to_le32(packet->header[1]);
header[2] = cpu_to_le32(packet->header[2]);
d[0].req_count = cpu_to_le16(12);
@@ -1391,9 +1455,14 @@ static int at_context_queue_packet(struct context *ctx,
break;
case TCODE_STREAM_DATA:
- header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
- (packet->speed << 16));
- header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
+ ohci1394_it_data_set_speed(header, packet->speed);
+ ohci1394_it_data_set_tag(header, isoc_header_get_tag(packet->header[0]));
+ ohci1394_it_data_set_channel(header, isoc_header_get_channel(packet->header[0]));
+ ohci1394_it_data_set_tcode(header, TCODE_STREAM_DATA);
+ ohci1394_it_data_set_sync(header, isoc_header_get_sy(packet->header[0]));
+
+ ohci1394_it_data_set_data_length(header, isoc_header_get_data_length(packet->header[0]));
+
d[0].req_count = cpu_to_le16(8);
break;
@@ -1550,11 +1619,7 @@ static int handle_at_packet(struct context *context,
return 1;
}
-#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
-#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
-#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
-#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
-#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
+static u32 get_cycle_time(struct fw_ohci *ohci);
static void handle_local_rom(struct fw_ohci *ohci,
struct fw_packet *packet, u32 csr)
@@ -1562,9 +1627,9 @@ static void handle_local_rom(struct fw_ohci *ohci,
struct fw_packet response;
int tcode, length, i;
- tcode = HEADER_GET_TCODE(packet->header[0]);
- if (TCODE_IS_BLOCK_PACKET(tcode))
- length = HEADER_GET_DATA_LENGTH(packet->header[3]);
+ tcode = async_header_get_tcode(packet->header);
+ if (tcode_is_block_packet(tcode))
+ length = async_header_get_data_length(packet->header);
else
length = 4;
@@ -1572,7 +1637,7 @@ static void handle_local_rom(struct fw_ohci *ohci,
if (i + length > CONFIG_ROM_SIZE) {
fw_fill_response(&response, packet->header,
RCODE_ADDRESS_ERROR, NULL, 0);
- } else if (!TCODE_IS_READ_REQUEST(tcode)) {
+ } else if (!tcode_is_read_request(tcode)) {
fw_fill_response(&response, packet->header,
RCODE_TYPE_ERROR, NULL, 0);
} else {
@@ -1580,6 +1645,8 @@ static void handle_local_rom(struct fw_ohci *ohci,
(void *) ohci->config_rom + i, length);
}
+ // Timestamping on behalf of the hardware.
+ response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
fw_core_handle_response(&ohci->card, &response);
}
@@ -1591,10 +1658,10 @@ static void handle_local_lock(struct fw_ohci *ohci,
__be32 *payload, lock_old;
u32 lock_arg, lock_data;
- tcode = HEADER_GET_TCODE(packet->header[0]);
- length = HEADER_GET_DATA_LENGTH(packet->header[3]);
+ tcode = async_header_get_tcode(packet->header);
+ length = async_header_get_data_length(packet->header);
payload = packet->payload;
- ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
+ ext_tcode = async_header_get_extended_tcode(packet->header);
if (tcode == TCODE_LOCK_REQUEST &&
ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
@@ -1628,6 +1695,8 @@ static void handle_local_lock(struct fw_ohci *ohci,
fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
out:
+ // Timestamping on behalf of the hardware.
+ response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
fw_core_handle_response(&ohci->card, &response);
}
@@ -1640,10 +1709,7 @@ static void handle_local_request(struct context *ctx, struct fw_packet *packet)
packet->callback(packet, &ctx->ohci->card, packet->ack);
}
- offset =
- ((unsigned long long)
- HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
- packet->header[2];
+ offset = async_header_get_offset(packet->header);
csr = offset - CSR_REGISTER_BASE;
/* Handle config rom reads. */
@@ -1670,8 +1736,6 @@ static void handle_local_request(struct context *ctx, struct fw_packet *packet)
}
}
-static u32 get_cycle_time(struct fw_ohci *ohci);
-
static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
{
unsigned long flags;
@@ -1679,7 +1743,7 @@ static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
spin_lock_irqsave(&ctx->ohci->lock, flags);
- if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
+ if (async_header_get_destination(packet->header) == ctx->ohci->node_id &&
ctx->ohci->generation == packet->generation) {
spin_unlock_irqrestore(&ctx->ohci->lock, flags);
@@ -1818,66 +1882,87 @@ static u32 update_bus_time(struct fw_ohci *ohci)
return ohci->bus_time | cycle_time_seconds;
}
-static int get_status_for_port(struct fw_ohci *ohci, int port_index)
+static int get_status_for_port(struct fw_ohci *ohci, int port_index,
+ enum phy_packet_self_id_port_status *status)
{
int reg;
- mutex_lock(&ohci->phy_reg_mutex);
- reg = write_phy_reg(ohci, 7, port_index);
- if (reg >= 0)
+ scoped_guard(mutex, &ohci->phy_reg_mutex) {
+ reg = write_phy_reg(ohci, 7, port_index);
+ if (reg < 0)
+ return reg;
+
reg = read_phy_reg(ohci, 8);
- mutex_unlock(&ohci->phy_reg_mutex);
- if (reg < 0)
- return reg;
+ if (reg < 0)
+ return reg;
+ }
switch (reg & 0x0f) {
case 0x06:
- return 2; /* is child node (connected to parent node) */
+ // is child node (connected to parent node)
+ *status = PHY_PACKET_SELF_ID_PORT_STATUS_PARENT;
+ break;
case 0x0e:
- return 3; /* is parent node (connected to child node) */
+ // is parent node (connected to child node)
+ *status = PHY_PACKET_SELF_ID_PORT_STATUS_CHILD;
+ break;
+ default:
+ // not connected
+ *status = PHY_PACKET_SELF_ID_PORT_STATUS_NCONN;
+ break;
}
- return 1; /* not connected */
+
+ return 0;
}
static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id,
int self_id_count)
{
+ unsigned int left_phy_id = phy_packet_self_id_get_phy_id(self_id);
int i;
- u32 entry;
for (i = 0; i < self_id_count; i++) {
- entry = ohci->self_id_buffer[i];
- if ((self_id & 0xff000000) == (entry & 0xff000000))
+ u32 entry = ohci->self_id_buffer[i];
+ unsigned int right_phy_id = phy_packet_self_id_get_phy_id(entry);
+
+ if (left_phy_id == right_phy_id)
return -1;
- if ((self_id & 0xff000000) < (entry & 0xff000000))
+ if (left_phy_id < right_phy_id)
return i;
}
return i;
}
-static int initiated_reset(struct fw_ohci *ohci)
+static int detect_initiated_reset(struct fw_ohci *ohci, bool *is_initiated_reset)
{
int reg;
- int ret = 0;
- mutex_lock(&ohci->phy_reg_mutex);
- reg = write_phy_reg(ohci, 7, 0xe0); /* Select page 7 */
- if (reg >= 0) {
- reg = read_phy_reg(ohci, 8);
- reg |= 0x40;
- reg = write_phy_reg(ohci, 8, reg); /* set PMODE bit */
- if (reg >= 0) {
- reg = read_phy_reg(ohci, 12); /* read register 12 */
- if (reg >= 0) {
- if ((reg & 0x08) == 0x08) {
- /* bit 3 indicates "initiated reset" */
- ret = 0x2;
- }
- }
- }
- }
- mutex_unlock(&ohci->phy_reg_mutex);
- return ret;
+ guard(mutex)(&ohci->phy_reg_mutex);
+
+ // Select page 7
+ reg = write_phy_reg(ohci, 7, 0xe0);
+ if (reg < 0)
+ return reg;
+
+ reg = read_phy_reg(ohci, 8);
+ if (reg < 0)
+ return reg;
+
+ // set PMODE bit
+ reg |= 0x40;
+ reg = write_phy_reg(ohci, 8, reg);
+ if (reg < 0)
+ return reg;
+
+ // read register 12
+ reg = read_phy_reg(ohci, 12);
+ if (reg < 0)
+ return reg;
+
+ // bit 3 indicates "initiated reset"
+ *is_initiated_reset = !!((reg & 0x08) == 0x08);
+
+ return 0;
}
/*
@@ -1887,9 +1972,15 @@ static int initiated_reset(struct fw_ohci *ohci)
*/
static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
{
- int reg, i, pos, status;
- /* link active 1, speed 3, bridge 0, contender 1, more packets 0 */
- u32 self_id = 0x8040c800;
+ int reg, i, pos, err;
+ bool is_initiated_reset;
+ u32 self_id = 0;
+
+ // link active 1, speed 3, bridge 0, contender 1, more packets 0.
+ phy_packet_set_packet_identifier(&self_id, PHY_PACKET_PACKET_IDENTIFIER_SELF_ID);
+ phy_packet_self_id_zero_set_link_active(&self_id, true);
+ phy_packet_self_id_zero_set_scode(&self_id, SCODE_800);
+ phy_packet_self_id_zero_set_contender(&self_id, true);
reg = reg_read(ohci, OHCI1394_NodeID);
if (!(reg & OHCI1394_NodeID_idValid)) {
@@ -1897,26 +1988,32 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
"node ID not valid, new bus reset in progress\n");
return -EBUSY;
}
- self_id |= ((reg & 0x3f) << 24); /* phy ID */
+ phy_packet_self_id_set_phy_id(&self_id, reg & 0x3f);
reg = ohci_read_phy_reg(&ohci->card, 4);
if (reg < 0)
return reg;
- self_id |= ((reg & 0x07) << 8); /* power class */
+ phy_packet_self_id_zero_set_power_class(&self_id, reg & 0x07);
reg = ohci_read_phy_reg(&ohci->card, 1);
if (reg < 0)
return reg;
- self_id |= ((reg & 0x3f) << 16); /* gap count */
+ phy_packet_self_id_zero_set_gap_count(&self_id, reg & 0x3f);
for (i = 0; i < 3; i++) {
- status = get_status_for_port(ohci, i);
- if (status < 0)
- return status;
- self_id |= ((status & 0x3) << (6 - (i * 2)));
+ enum phy_packet_self_id_port_status status;
+
+ err = get_status_for_port(ohci, i, &status);
+ if (err < 0)
+ return err;
+
+ self_id_sequence_set_port_status(&self_id, 1, i, status);
}
- self_id |= initiated_reset(ohci);
+ err = detect_initiated_reset(ohci, &is_initiated_reset);
+ if (err < 0)
+ return err;
+ phy_packet_self_id_zero_set_initiated_reset(&self_id, is_initiated_reset);
pos = get_self_id_pos(ohci, self_id, self_id_count);
if (pos >= 0) {
@@ -1934,7 +2031,7 @@ static void bus_reset_work(struct work_struct *work)
struct fw_ohci *ohci =
container_of(work, struct fw_ohci, bus_reset_work);
int self_id_count, generation, new_generation, i, j;
- u32 reg;
+ u32 reg, quadlet;
void *free_rom = NULL;
dma_addr_t free_rom_bus = 0;
bool is_new_root;
@@ -1959,7 +2056,7 @@ static void bus_reset_work(struct work_struct *work)
ohci->is_root = is_new_root;
reg = reg_read(ohci, OHCI1394_SelfIDCount);
- if (reg & OHCI1394_SelfIDCount_selfIDError) {
+ if (ohci1394_self_id_count_is_error(reg)) {
ohci_notice(ohci, "self ID receive error\n");
return;
}
@@ -1969,19 +2066,20 @@ static void bus_reset_work(struct work_struct *work)
* the inverted quadlets and a header quadlet, we shift one
* bit extra to get the actual number of self IDs.
*/
- self_id_count = (reg >> 3) & 0xff;
+ self_id_count = ohci1394_self_id_count_get_size(reg) >> 1;
if (self_id_count > 252) {
ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg);
return;
}
- generation = (cond_le32_to_cpu(ohci->self_id[0]) >> 16) & 0xff;
+ quadlet = cond_le32_to_cpu(ohci->self_id[0], has_be_header_quirk(ohci));
+ generation = ohci1394_self_id_receive_q0_get_generation(quadlet);
rmb();
for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
- u32 id = cond_le32_to_cpu(ohci->self_id[i]);
- u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1]);
+ u32 id = cond_le32_to_cpu(ohci->self_id[i], has_be_header_quirk(ohci));
+ u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1], has_be_header_quirk(ohci));
if (id != ~id2) {
/*
@@ -2033,20 +2131,19 @@ static void bus_reset_work(struct work_struct *work)
* of self IDs.
*/
- new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
+ reg = reg_read(ohci, OHCI1394_SelfIDCount);
+ new_generation = ohci1394_self_id_count_get_generation(reg);
if (new_generation != generation) {
ohci_notice(ohci, "new bus reset, discarding self ids\n");
return;
}
- /* FIXME: Document how the locking works. */
- spin_lock_irq(&ohci->lock);
-
- ohci->generation = -1; /* prevent AT packet queueing */
- context_stop(&ohci->at_request_ctx);
- context_stop(&ohci->at_response_ctx);
-
- spin_unlock_irq(&ohci->lock);
+ // FIXME: Document how the locking works.
+ scoped_guard(spinlock_irq, &ohci->lock) {
+ ohci->generation = -1; // prevent AT packet queueing
+ context_stop(&ohci->at_request_ctx);
+ context_stop(&ohci->at_response_ctx);
+ }
/*
* Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
@@ -2056,52 +2153,42 @@ static void bus_reset_work(struct work_struct *work)
at_context_flush(&ohci->at_request_ctx);
at_context_flush(&ohci->at_response_ctx);
- spin_lock_irq(&ohci->lock);
-
- ohci->generation = generation;
- reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
-
- if (ohci->quirks & QUIRK_RESET_PACKET)
- ohci->request_generation = generation;
-
- /*
- * This next bit is unrelated to the AT context stuff but we
- * have to do it under the spinlock also. If a new config rom
- * was set up before this reset, the old one is now no longer
- * in use and we can free it. Update the config rom pointers
- * to point to the current config rom and clear the
- * next_config_rom pointer so a new update can take place.
- */
-
- if (ohci->next_config_rom != NULL) {
- if (ohci->next_config_rom != ohci->config_rom) {
- free_rom = ohci->config_rom;
- free_rom_bus = ohci->config_rom_bus;
+ scoped_guard(spinlock_irq, &ohci->lock) {
+ ohci->generation = generation;
+ reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
+ reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
+
+ if (ohci->quirks & QUIRK_RESET_PACKET)
+ ohci->request_generation = generation;
+
+ // This next bit is unrelated to the AT context stuff but we have to do it under the
+ // spinlock also. If a new config rom was set up before this reset, the old one is
+ // now no longer in use and we can free it. Update the config rom pointers to point
+ // to the current config rom and clear the next_config_rom pointer so a new update
+ // can take place.
+ if (ohci->next_config_rom != NULL) {
+ if (ohci->next_config_rom != ohci->config_rom) {
+ free_rom = ohci->config_rom;
+ free_rom_bus = ohci->config_rom_bus;
+ }
+ ohci->config_rom = ohci->next_config_rom;
+ ohci->config_rom_bus = ohci->next_config_rom_bus;
+ ohci->next_config_rom = NULL;
+
+ // Restore config_rom image and manually update config_rom registers.
+ // Writing the header quadlet will indicate that the config rom is ready,
+ // so we do that last.
+ reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(ohci->config_rom[2]));
+ ohci->config_rom[0] = ohci->next_header;
+ reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(ohci->next_header));
}
- ohci->config_rom = ohci->next_config_rom;
- ohci->config_rom_bus = ohci->next_config_rom_bus;
- ohci->next_config_rom = NULL;
- /*
- * Restore config_rom image and manually update
- * config_rom registers. Writing the header quadlet
- * will indicate that the config rom is ready, so we
- * do that last.
- */
- reg_write(ohci, OHCI1394_BusOptions,
- be32_to_cpu(ohci->config_rom[2]));
- ohci->config_rom[0] = ohci->next_header;
- reg_write(ohci, OHCI1394_ConfigROMhdr,
- be32_to_cpu(ohci->next_header));
- }
-
- if (param_remote_dma) {
- reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
- reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
+ if (param_remote_dma) {
+ reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
+ reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
+ }
}
- spin_unlock_irq(&ohci->lock);
-
if (free_rom)
dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, free_rom, free_rom_bus);
@@ -2124,16 +2211,32 @@ static irqreturn_t irq_handler(int irq, void *data)
if (!event || !~event)
return IRQ_NONE;
+ if (unlikely(param_debug > 0)) {
+ dev_notice_ratelimited(ohci->card.device,
+ "The debug parameter is superseded by tracepoints events, and deprecated.");
+ }
+
/*
- * busReset and postedWriteErr must not be cleared yet
+ * busReset and postedWriteErr events must not be cleared yet
* (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
*/
reg_write(ohci, OHCI1394_IntEventClear,
event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
+ trace_irqs(ohci->card.index, event);
log_irqs(ohci, event);
+ // The flag is masked again at bus_reset_work() scheduled by selfID event.
+ if (event & OHCI1394_busReset)
+ reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
+
+ if (event & OHCI1394_selfIDComplete) {
+ if (trace_self_id_complete_enabled()) {
+ u32 reg = reg_read(ohci, OHCI1394_SelfIDCount);
- if (event & OHCI1394_selfIDComplete)
+ trace_self_id_complete(ohci->card.index, reg, ohci->self_id,
+ has_be_header_quirk(ohci));
+ }
queue_work(selfid_workqueue, &ohci->bus_reset_work);
+ }
if (event & OHCI1394_RQPkt)
tasklet_schedule(&ohci->ar_request_ctx.tasklet);
@@ -2153,8 +2256,7 @@ static irqreturn_t irq_handler(int irq, void *data)
while (iso_event) {
i = ffs(iso_event) - 1;
- tasklet_schedule(
- &ohci->ir_context_list[i].context.tasklet);
+ fw_iso_context_schedule_flush_completions(&ohci->ir_context_list[i].base);
iso_event &= ~(1 << i);
}
}
@@ -2165,8 +2267,7 @@ static irqreturn_t irq_handler(int irq, void *data)
while (iso_event) {
i = ffs(iso_event) - 1;
- tasklet_schedule(
- &ohci->it_context_list[i].context.tasklet);
+ fw_iso_context_schedule_flush_completions(&ohci->it_context_list[i].base);
iso_event &= ~(1 << i);
}
}
@@ -2179,13 +2280,11 @@ static irqreturn_t irq_handler(int irq, void *data)
reg_read(ohci, OHCI1394_PostedWriteAddressLo);
reg_write(ohci, OHCI1394_IntEventClear,
OHCI1394_postedWriteErr);
- if (printk_ratelimit())
- ohci_err(ohci, "PCI posted write error\n");
+ dev_err_ratelimited(ohci->card.device, "PCI posted write error\n");
}
if (unlikely(event & OHCI1394_cycleTooLong)) {
- if (printk_ratelimit())
- ohci_notice(ohci, "isochronous cycle too long\n");
+ dev_notice_ratelimited(ohci->card.device, "isochronous cycle too long\n");
reg_write(ohci, OHCI1394_LinkControlSet,
OHCI1394_LinkControl_cycleMaster);
}
@@ -2197,17 +2296,15 @@ static irqreturn_t irq_handler(int irq, void *data)
* stop active cycleMatch iso contexts now and restart
* them at least two cycles later. (FIXME?)
*/
- if (printk_ratelimit())
- ohci_notice(ohci, "isochronous cycle inconsistent\n");
+ dev_notice_ratelimited(ohci->card.device, "isochronous cycle inconsistent\n");
}
if (unlikely(event & OHCI1394_unrecoverableError))
handle_dead_contexts(ohci);
if (event & OHCI1394_cycle64Seconds) {
- spin_lock(&ohci->lock);
+ guard(spinlock)(&ohci->lock);
update_bus_time(ohci);
- spin_unlock(&ohci->lock);
} else
flush_writes(ohci);
@@ -2468,9 +2565,8 @@ static int ohci_enable(struct fw_card *card,
OHCI1394_cycleInconsistent |
OHCI1394_unrecoverableError |
OHCI1394_cycleTooLong |
- OHCI1394_masterIntEnable;
- if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
- irqs |= OHCI1394_busReset;
+ OHCI1394_masterIntEnable |
+ OHCI1394_busReset;
reg_write(ohci, OHCI1394_IntMaskSet, irqs);
reg_write(ohci, OHCI1394_HCControlSet,
@@ -2518,7 +2614,7 @@ static int ohci_set_config_rom(struct fw_card *card,
* ConfigRomHeader and BusOptions doesn't honor the
* noByteSwapData bit, so with a be32 config rom, the
* controller will load be32 values in to these registers
- * during the atomic update, even on litte endian
+ * during the atomic update, even on little endian
* architectures. The workaround we use is to put a 0 in the
* header quadlet; 0 is endian agnostic and means that the
* config rom isn't ready yet. In the bus reset tasklet we
@@ -2533,33 +2629,26 @@ static int ohci_set_config_rom(struct fw_card *card,
if (next_config_rom == NULL)
return -ENOMEM;
- spin_lock_irq(&ohci->lock);
-
- /*
- * If there is not an already pending config_rom update,
- * push our new allocation into the ohci->next_config_rom
- * and then mark the local variable as null so that we
- * won't deallocate the new buffer.
- *
- * OTOH, if there is a pending config_rom update, just
- * use that buffer with the new config_rom data, and
- * let this routine free the unused DMA allocation.
- */
-
- if (ohci->next_config_rom == NULL) {
- ohci->next_config_rom = next_config_rom;
- ohci->next_config_rom_bus = next_config_rom_bus;
- next_config_rom = NULL;
- }
-
- copy_config_rom(ohci->next_config_rom, config_rom, length);
+ scoped_guard(spinlock_irq, &ohci->lock) {
+ // If there is not an already pending config_rom update, push our new allocation
+ // into the ohci->next_config_rom and then mark the local variable as null so that
+ // we won't deallocate the new buffer.
+ //
+ // OTOH, if there is a pending config_rom update, just use that buffer with the new
+ // config_rom data, and let this routine free the unused DMA allocation.
+ if (ohci->next_config_rom == NULL) {
+ ohci->next_config_rom = next_config_rom;
+ ohci->next_config_rom_bus = next_config_rom_bus;
+ next_config_rom = NULL;
+ }
- ohci->next_header = config_rom[0];
- ohci->next_config_rom[0] = 0;
+ copy_config_rom(ohci->next_config_rom, config_rom, length);
- reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
+ ohci->next_header = config_rom[0];
+ ohci->next_config_rom[0] = 0;
- spin_unlock_irq(&ohci->lock);
+ reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
+ }
/* If we didn't use the DMA allocation, delete it. */
if (next_config_rom != NULL) {
@@ -2629,7 +2718,6 @@ static int ohci_enable_phys_dma(struct fw_card *card,
int node_id, int generation)
{
struct fw_ohci *ohci = fw_ohci(card);
- unsigned long flags;
int n, ret = 0;
if (param_remote_dma)
@@ -2640,12 +2728,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
* interrupt bit. Clear physReqResourceAllBuses on bus reset.
*/
- spin_lock_irqsave(&ohci->lock, flags);
+ guard(spinlock_irqsave)(&ohci->lock);
- if (ohci->generation != generation) {
- ret = -ESTALE;
- goto out;
- }
+ if (ohci->generation != generation)
+ return -ESTALE;
/*
* Note, if the node ID contains a non-local bus ID, physical DMA is
@@ -2659,8 +2745,6 @@ static int ohci_enable_phys_dma(struct fw_card *card,
reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
flush_writes(ohci);
- out:
- spin_unlock_irqrestore(&ohci->lock, flags);
return ret;
}
@@ -2668,7 +2752,6 @@ static int ohci_enable_phys_dma(struct fw_card *card,
static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
{
struct fw_ohci *ohci = fw_ohci(card);
- unsigned long flags;
u32 value;
switch (csr_offset) {
@@ -2692,16 +2775,14 @@ static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
return get_cycle_time(ohci);
case CSR_BUS_TIME:
- /*
- * We might be called just after the cycle timer has wrapped
- * around but just before the cycle64Seconds handler, so we
- * better check here, too, if the bus time needs to be updated.
- */
- spin_lock_irqsave(&ohci->lock, flags);
- value = update_bus_time(ohci);
- spin_unlock_irqrestore(&ohci->lock, flags);
- return value;
+ {
+ // We might be called just after the cycle timer has wrapped around but just before
+ // the cycle64Seconds handler, so we better check here, too, if the bus time needs
+ // to be updated.
+ guard(spinlock_irqsave)(&ohci->lock);
+ return update_bus_time(ohci);
+ }
case CSR_BUSY_TIMEOUT:
value = reg_read(ohci, OHCI1394_ATRetries);
return (value >> 4) & 0x0ffff00f;
@@ -2719,7 +2800,6 @@ static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
{
struct fw_ohci *ohci = fw_ohci(card);
- unsigned long flags;
switch (csr_offset) {
case CSR_STATE_CLEAR:
@@ -2755,12 +2835,11 @@ static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
break;
case CSR_BUS_TIME:
- spin_lock_irqsave(&ohci->lock, flags);
- ohci->bus_time = (update_bus_time(ohci) & 0x40) |
- (value & ~0x7f);
- spin_unlock_irqrestore(&ohci->lock, flags);
+ {
+ guard(spinlock_irqsave)(&ohci->lock);
+ ohci->bus_time = (update_bus_time(ohci) & 0x40) | (value & ~0x7f);
break;
-
+ }
case CSR_BUSY_TIMEOUT:
value = (value & 0xf) | ((value & 0xf) << 4) |
((value & 0xf) << 8) | ((value & 0x0ffff000) << 4);
@@ -2779,8 +2858,13 @@ static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
}
}
-static void flush_iso_completions(struct iso_context *ctx)
+static void flush_iso_completions(struct iso_context *ctx, enum fw_iso_context_completions_cause cause)
{
+ trace_isoc_inbound_single_completions(&ctx->base, ctx->last_timestamp, cause, ctx->header,
+ ctx->header_length);
+ trace_isoc_outbound_completions(&ctx->base, ctx->last_timestamp, cause, ctx->header,
+ ctx->header_length);
+
ctx->base.callback.sc(&ctx->base, ctx->last_timestamp,
ctx->header_length, ctx->header,
ctx->base.callback_data);
@@ -2794,7 +2878,7 @@ static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) {
if (ctx->base.drop_overflow_headers)
return;
- flush_iso_completions(ctx);
+ flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW);
}
ctx_hdr = ctx->header + ctx->header_length;
@@ -2843,7 +2927,7 @@ static int handle_ir_packet_per_buffer(struct context *context,
copy_iso_headers(ctx, (u32 *) (last + 1));
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
- flush_iso_completions(ctx);
+ flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT);
return 1;
}
@@ -2878,6 +2962,9 @@ static int handle_ir_buffer_fill(struct context *context,
completed, DMA_FROM_DEVICE);
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) {
+ trace_isoc_inbound_multiple_completions(&ctx->base, completed,
+ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT);
+
ctx->base.callback.mc(&ctx->base,
buffer_dma + completed,
ctx->base.callback_data);
@@ -2894,6 +2981,9 @@ static void flush_ir_buffer_fill(struct iso_context *ctx)
ctx->mc_buffer_bus & ~PAGE_MASK,
ctx->mc_completed, DMA_FROM_DEVICE);
+ trace_isoc_inbound_multiple_completions(&ctx->base, ctx->mc_completed,
+ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH);
+
ctx->base.callback.mc(&ctx->base,
ctx->mc_buffer_bus + ctx->mc_completed,
ctx->base.callback_data);
@@ -2958,7 +3048,7 @@ static int handle_it_packet(struct context *context,
if (ctx->header_length + 4 > PAGE_SIZE) {
if (ctx->base.drop_overflow_headers)
return 1;
- flush_iso_completions(ctx);
+ flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW);
}
ctx_hdr = ctx->header + ctx->header_length;
@@ -2969,7 +3059,7 @@ static int handle_it_packet(struct context *context,
ctx->header_length += 4;
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
- flush_iso_completions(ctx);
+ flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT);
return 1;
}
@@ -2995,55 +3085,53 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
u32 *mask, regs;
int index, ret = -EBUSY;
- spin_lock_irq(&ohci->lock);
+ scoped_guard(spinlock_irq, &ohci->lock) {
+ switch (type) {
+ case FW_ISO_CONTEXT_TRANSMIT:
+ mask = &ohci->it_context_mask;
+ callback = handle_it_packet;
+ index = ffs(*mask) - 1;
+ if (index >= 0) {
+ *mask &= ~(1 << index);
+ regs = OHCI1394_IsoXmitContextBase(index);
+ ctx = &ohci->it_context_list[index];
+ }
+ break;
- switch (type) {
- case FW_ISO_CONTEXT_TRANSMIT:
- mask = &ohci->it_context_mask;
- callback = handle_it_packet;
- index = ffs(*mask) - 1;
- if (index >= 0) {
- *mask &= ~(1 << index);
- regs = OHCI1394_IsoXmitContextBase(index);
- ctx = &ohci->it_context_list[index];
- }
- break;
+ case FW_ISO_CONTEXT_RECEIVE:
+ channels = &ohci->ir_context_channels;
+ mask = &ohci->ir_context_mask;
+ callback = handle_ir_packet_per_buffer;
+ index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
+ if (index >= 0) {
+ *channels &= ~(1ULL << channel);
+ *mask &= ~(1 << index);
+ regs = OHCI1394_IsoRcvContextBase(index);
+ ctx = &ohci->ir_context_list[index];
+ }
+ break;
- case FW_ISO_CONTEXT_RECEIVE:
- channels = &ohci->ir_context_channels;
- mask = &ohci->ir_context_mask;
- callback = handle_ir_packet_per_buffer;
- index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
- if (index >= 0) {
- *channels &= ~(1ULL << channel);
- *mask &= ~(1 << index);
- regs = OHCI1394_IsoRcvContextBase(index);
- ctx = &ohci->ir_context_list[index];
- }
- break;
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ mask = &ohci->ir_context_mask;
+ callback = handle_ir_buffer_fill;
+ index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
+ if (index >= 0) {
+ ohci->mc_allocated = true;
+ *mask &= ~(1 << index);
+ regs = OHCI1394_IsoRcvContextBase(index);
+ ctx = &ohci->ir_context_list[index];
+ }
+ break;
- case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
- mask = &ohci->ir_context_mask;
- callback = handle_ir_buffer_fill;
- index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
- if (index >= 0) {
- ohci->mc_allocated = true;
- *mask &= ~(1 << index);
- regs = OHCI1394_IsoRcvContextBase(index);
- ctx = &ohci->ir_context_list[index];
+ default:
+ index = -1;
+ ret = -ENOSYS;
}
- break;
- default:
- index = -1;
- ret = -ENOSYS;
+ if (index < 0)
+ return ERR_PTR(ret);
}
- spin_unlock_irq(&ohci->lock);
-
- if (index < 0)
- return ERR_PTR(ret);
-
memset(ctx, 0, sizeof(*ctx));
ctx->header_length = 0;
ctx->header = (void *) __get_free_page(GFP_KERNEL);
@@ -3054,6 +3142,7 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
ret = context_init(&ctx->context, ohci, regs, callback);
if (ret < 0)
goto out_with_header;
+ fw_iso_context_init_work(&ctx->base, ohci_isoc_context_work);
if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
set_multichannel_mask(ohci, 0);
@@ -3065,20 +3154,18 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
out_with_header:
free_page((unsigned long)ctx->header);
out:
- spin_lock_irq(&ohci->lock);
-
- switch (type) {
- case FW_ISO_CONTEXT_RECEIVE:
- *channels |= 1ULL << channel;
- break;
+ scoped_guard(spinlock_irq, &ohci->lock) {
+ switch (type) {
+ case FW_ISO_CONTEXT_RECEIVE:
+ *channels |= 1ULL << channel;
+ break;
- case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
- ohci->mc_allocated = false;
- break;
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ ohci->mc_allocated = false;
+ break;
+ }
+ *mask |= 1 << index;
}
- *mask |= 1 << index;
-
- spin_unlock_irq(&ohci->lock);
return ERR_PTR(ret);
}
@@ -3153,7 +3240,6 @@ static int ohci_stop_iso(struct fw_iso_context *base)
}
flush_writes(ohci);
context_stop(&ctx->context);
- tasklet_kill(&ctx->context.tasklet);
return 0;
}
@@ -3162,14 +3248,13 @@ static void ohci_free_iso_context(struct fw_iso_context *base)
{
struct fw_ohci *ohci = fw_ohci(base->card);
struct iso_context *ctx = container_of(base, struct iso_context, base);
- unsigned long flags;
int index;
ohci_stop_iso(base);
context_release(&ctx->context);
free_page((unsigned long)ctx->header);
- spin_lock_irqsave(&ohci->lock, flags);
+ guard(spinlock_irqsave)(&ohci->lock);
switch (base->type) {
case FW_ISO_CONTEXT_TRANSMIT:
@@ -3191,42 +3276,32 @@ static void ohci_free_iso_context(struct fw_iso_context *base)
ohci->mc_allocated = false;
break;
}
-
- spin_unlock_irqrestore(&ohci->lock, flags);
}
static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
{
struct fw_ohci *ohci = fw_ohci(base->card);
- unsigned long flags;
- int ret;
switch (base->type) {
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ {
+ guard(spinlock_irqsave)(&ohci->lock);
- spin_lock_irqsave(&ohci->lock, flags);
-
- /* Don't allow multichannel to grab other contexts' channels. */
+ // Don't allow multichannel to grab other contexts' channels.
if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) {
*channels = ohci->ir_context_channels;
- ret = -EBUSY;
+ return -EBUSY;
} else {
set_multichannel_mask(ohci, *channels);
- ret = 0;
+ return 0;
}
-
- spin_unlock_irqrestore(&ohci->lock, flags);
-
- break;
+ }
default:
- ret = -EINVAL;
+ return -EINVAL;
}
-
- return ret;
}
-#ifdef CONFIG_PM
-static void ohci_resume_iso_dma(struct fw_ohci *ohci)
+static void __maybe_unused ohci_resume_iso_dma(struct fw_ohci *ohci)
{
int i;
struct iso_context *ctx;
@@ -3243,7 +3318,6 @@ static void ohci_resume_iso_dma(struct fw_ohci *ohci)
ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
}
}
-#endif
static int queue_iso_transmit(struct iso_context *ctx,
struct fw_iso_packet *packet,
@@ -3297,14 +3371,14 @@ static int queue_iso_transmit(struct iso_context *ctx,
d[0].branch_address = cpu_to_le32(d_bus | z);
header = (__le32 *) &d[1];
- header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
- IT_HEADER_TAG(p->tag) |
- IT_HEADER_TCODE(TCODE_STREAM_DATA) |
- IT_HEADER_CHANNEL(ctx->base.channel) |
- IT_HEADER_SPEED(ctx->base.speed));
- header[1] =
- cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
- p->payload_length));
+
+ ohci1394_it_data_set_speed(header, ctx->base.speed);
+ ohci1394_it_data_set_tag(header, p->tag);
+ ohci1394_it_data_set_channel(header, ctx->base.channel);
+ ohci1394_it_data_set_tcode(header, TCODE_STREAM_DATA);
+ ohci1394_it_data_set_sync(header, p->sy);
+
+ ohci1394_it_data_set_data_length(header, p->header_length + p->payload_length);
}
if (p->header_length > 0) {
@@ -3492,24 +3566,19 @@ static int ohci_queue_iso(struct fw_iso_context *base,
unsigned long payload)
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
- unsigned long flags;
- int ret = -ENOSYS;
- spin_lock_irqsave(&ctx->context.ohci->lock, flags);
+ guard(spinlock_irqsave)(&ctx->context.ohci->lock);
+
switch (base->type) {
case FW_ISO_CONTEXT_TRANSMIT:
- ret = queue_iso_transmit(ctx, packet, buffer, payload);
- break;
+ return queue_iso_transmit(ctx, packet, buffer, payload);
case FW_ISO_CONTEXT_RECEIVE:
- ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
- break;
+ return queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
- ret = queue_iso_buffer_fill(ctx, packet, buffer, payload);
- break;
+ return queue_iso_buffer_fill(ctx, packet, buffer, payload);
+ default:
+ return -ENOSYS;
}
- spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
-
- return ret;
}
static void ohci_flush_queue_iso(struct fw_iso_context *base)
@@ -3525,16 +3594,14 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base)
struct iso_context *ctx = container_of(base, struct iso_context, base);
int ret = 0;
- tasklet_disable_in_atomic(&ctx->context.tasklet);
-
if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
- context_tasklet((unsigned long)&ctx->context);
+ ohci_isoc_context_work(&base->work);
switch (base->type) {
case FW_ISO_CONTEXT_TRANSMIT:
case FW_ISO_CONTEXT_RECEIVE:
if (ctx->header_length != 0)
- flush_iso_completions(ctx);
+ flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH);
break;
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
if (ctx->mc_completed != 0)
@@ -3548,8 +3615,6 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base)
smp_mb__after_atomic();
}
- tasklet_enable(&ctx->context.tasklet);
-
return ret;
}
@@ -3623,7 +3688,7 @@ static int pci_probe(struct pci_dev *dev,
struct fw_ohci *ohci;
u32 bus_options, max_receive, link_speed, version;
u64 guid;
- int i, err;
+ int i, flags, irq, err;
size_t size;
if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
@@ -3659,12 +3724,11 @@ static int pci_probe(struct pci_dev *dev,
return -ENXIO;
}
- err = pcim_iomap_regions(dev, 1 << 0, ohci_driver_name);
- if (err) {
+ ohci->registers = pcim_iomap_region(dev, 0, ohci_driver_name);
+ if (IS_ERR(ohci->registers)) {
ohci_err(ohci, "request and map MMIO resource unavailable\n");
return -ENXIO;
}
- ohci->registers = pcim_iomap_table(dev)[0];
for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
if ((ohci_quirks[i].vendor == dev->vendor) &&
@@ -3748,18 +3812,29 @@ static int pci_probe(struct pci_dev *dev,
guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
reg_read(ohci, OHCI1394_GUIDLo);
+ flags = PCI_IRQ_INTX;
if (!(ohci->quirks & QUIRK_NO_MSI))
- pci_enable_msi(dev);
- err = devm_request_irq(&dev->dev, dev->irq, irq_handler,
- pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, ohci_driver_name, ohci);
+ flags |= PCI_IRQ_MSI;
+ err = pci_alloc_irq_vectors(dev, 1, 1, flags);
+ if (err < 0)
+ return err;
+ irq = pci_irq_vector(dev, 0);
+ if (irq < 0) {
+ err = irq;
+ goto fail_msi;
+ }
+
+ err = request_threaded_irq(irq, irq_handler, NULL,
+ pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, ohci_driver_name,
+ ohci);
if (err < 0) {
- ohci_err(ohci, "failed to allocate interrupt %d\n", dev->irq);
+ ohci_err(ohci, "failed to allocate interrupt %d\n", irq);
goto fail_msi;
}
- err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
+ err = fw_card_add(&ohci->card, max_receive, link_speed, guid, ohci->n_it + ohci->n_ir);
if (err)
- goto fail_msi;
+ goto fail_irq;
version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
ohci_notice(ohci,
@@ -3772,9 +3847,10 @@ static int pci_probe(struct pci_dev *dev,
return 0;
+ fail_irq:
+ free_irq(irq, ohci);
fail_msi:
- devm_free_irq(&dev->dev, dev->irq, ohci);
- pci_disable_msi(dev);
+ pci_free_irq_vectors(dev);
return err;
}
@@ -3782,6 +3858,7 @@ static int pci_probe(struct pci_dev *dev,
static void pci_remove(struct pci_dev *dev)
{
struct fw_ohci *ohci = pci_get_drvdata(dev);
+ int irq;
/*
* If the removal is happening from the suspend state, LPS won't be
@@ -3801,45 +3878,33 @@ static void pci_remove(struct pci_dev *dev)
software_reset(ohci);
- devm_free_irq(&dev->dev, dev->irq, ohci);
- pci_disable_msi(dev);
+ irq = pci_irq_vector(dev, 0);
+ if (irq >= 0)
+ free_irq(irq, ohci);
+ pci_free_irq_vectors(dev);
dev_notice(&dev->dev, "removing fw-ohci device\n");
}
-#ifdef CONFIG_PM
-static int pci_suspend(struct pci_dev *dev, pm_message_t state)
+static int __maybe_unused pci_suspend(struct device *dev)
{
- struct fw_ohci *ohci = pci_get_drvdata(dev);
- int err;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct fw_ohci *ohci = pci_get_drvdata(pdev);
software_reset(ohci);
- err = pci_save_state(dev);
- if (err) {
- ohci_err(ohci, "pci_save_state failed\n");
- return err;
- }
- err = pci_set_power_state(dev, pci_choose_state(dev, state));
- if (err)
- ohci_err(ohci, "pci_set_power_state failed with %d\n", err);
- pmac_ohci_off(dev);
+ pmac_ohci_off(pdev);
return 0;
}
-static int pci_resume(struct pci_dev *dev)
+
+static int __maybe_unused pci_resume(struct device *dev)
{
- struct fw_ohci *ohci = pci_get_drvdata(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct fw_ohci *ohci = pci_get_drvdata(pdev);
int err;
- pmac_ohci_on(dev);
- pci_set_power_state(dev, PCI_D0);
- pci_restore_state(dev);
- err = pci_enable_device(dev);
- if (err) {
- ohci_err(ohci, "pci_enable_device failed\n");
- return err;
- }
+ pmac_ohci_on(pdev);
/* Some systems don't setup GUID register on resume from ram */
if (!reg_read(ohci, OHCI1394_GUIDLo) &&
@@ -3856,7 +3921,6 @@ static int pci_resume(struct pci_dev *dev)
return 0;
}
-#endif
static const struct pci_device_id pci_table[] = {
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
@@ -3865,15 +3929,14 @@ static const struct pci_device_id pci_table[] = {
MODULE_DEVICE_TABLE(pci, pci_table);
+static SIMPLE_DEV_PM_OPS(pci_pm_ops, pci_suspend, pci_resume);
+
static struct pci_driver fw_ohci_pci_driver = {
.name = ohci_driver_name,
.id_table = pci_table,
.probe = pci_probe,
.remove = pci_remove,
-#ifdef CONFIG_PM
- .resume = pci_resume,
- .suspend = pci_suspend,
-#endif
+ .driver.pm = &pci_pm_ops,
};
static int __init fw_ohci_init(void)