diff options
Diffstat (limited to 'drivers/firewire')
24 files changed, 5202 insertions, 2160 deletions
diff --git a/drivers/firewire/.kunitconfig b/drivers/firewire/.kunitconfig new file mode 100644 index 000000000000..21b7e9eef63d --- /dev/null +++ b/drivers/firewire/.kunitconfig @@ -0,0 +1,8 @@ +CONFIG_KUNIT=y +CONFIG_PCI=y +CONFIG_FIREWIRE=y +CONFIG_FIREWIRE_KUNIT_UAPI_TEST=y +CONFIG_FIREWIRE_KUNIT_DEVICE_ATTRIBUTE_TEST=y +CONFIG_FIREWIRE_KUNIT_PACKET_SERDES_TEST=y +CONFIG_FIREWIRE_KUNIT_SELF_ID_SEQUENCE_HELPER_TEST=y +CONFIG_FIREWIRE_KUNIT_OHCI_SERDES_TEST=y diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig index ec00a6f70da8..a5f5e250223a 100644 --- a/drivers/firewire/Kconfig +++ b/drivers/firewire/Kconfig @@ -11,16 +11,79 @@ config FIREWIRE This is the new-generation IEEE 1394 (FireWire) driver stack a.k.a. Juju, a new implementation designed for robustness and simplicity. - See http://ieee1394.wiki.kernel.org/index.php/Juju_Migration + See http://ieee1394.docs.kernel.org/en/latest/migration.html for information about migration from the older Linux 1394 stack to the new driver stack. To compile this driver as a module, say M here: the module will be called firewire-core. +config FIREWIRE_KUNIT_UAPI_TEST + tristate "KUnit tests for layout of structure in UAPI" if !KUNIT_ALL_TESTS + depends on FIREWIRE && KUNIT + default KUNIT_ALL_TESTS + help + This builds the KUnit tests whether structures exposed to user + space have expected layout. + + KUnit tests run during boot and output the results to the debug + log in TAP format (https://testanything.org/). Only useful for + kernel devs running KUnit test harness and are not for inclusion + into a production build. + + For more information on KUnit and unit tests in general, refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + +config FIREWIRE_KUNIT_DEVICE_ATTRIBUTE_TEST + tristate "KUnit tests for device attributes" if !KUNIT_ALL_TESTS + depends on FIREWIRE && KUNIT + default KUNIT_ALL_TESTS + help + This builds the KUnit tests for device attribute for node and + unit. + + KUnit tests run during boot and output the results to the debug + log in TAP format (https://testanything.org/). Only useful for + kernel devs running KUnit test harness and are not for inclusion + into a production build. + + For more information on KUnit and unit tests in general, refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + +config FIREWIRE_KUNIT_PACKET_SERDES_TEST + tristate "KUnit tests for packet serialization/deserialization" if !KUNIT_ALL_TESTS + depends on FIREWIRE && KUNIT + default KUNIT_ALL_TESTS + help + This builds the KUnit tests for packet serialization and + deserialization. + + KUnit tests run during boot and output the results to the debug + log in TAP format (https://testanything.org/). Only useful for + kernel devs running KUnit test harness and are not for inclusion + into a production build. + + For more information on KUnit and unit tests in general, refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + +config FIREWIRE_KUNIT_SELF_ID_SEQUENCE_HELPER_TEST + tristate "KUnit tests for helpers of self ID sequence" if !KUNIT_ALL_TESTS + depends on FIREWIRE && KUNIT + default KUNIT_ALL_TESTS + help + This builds the KUnit tests for helpers of self ID sequence. + + KUnit tests run during boot and output the results to the debug + log in TAP format (https://testanything.org/). Only useful for + kernel devs running KUnit test harness and are not for inclusion + into a production build. + + For more information on KUnit and unit tests in general, refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + config FIREWIRE_OHCI tristate "OHCI-1394 controllers" - depends on PCI && FIREWIRE && MMU + depends on PCI && FIREWIRE help Enable this driver if you have a FireWire controller based on the OHCI specification. For all practical purposes, this @@ -29,6 +92,22 @@ config FIREWIRE_OHCI To compile this driver as a module, say M here: The module will be called firewire-ohci. +config FIREWIRE_KUNIT_OHCI_SERDES_TEST + tristate "KUnit tests for serialization/deserialization of data in buffers/registers" if !KUNIT_ALL_TESTS + depends on FIREWIRE && KUNIT + default KUNIT_ALL_TESTS + help + This builds the KUnit tests to check serialization and deserialization + of data in buffers and registers defined in 1394 OHCI specification. + + KUnit tests run during boot and output the results to the debug + log in TAP format (https://testanything.org/). Only useful for + kernel devs running KUnit test harness and are not for inclusion + into a production build. + + For more information on KUnit and unit tests in general, refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + config FIREWIRE_SBP2 tristate "Storage devices (SBP-2 protocol)" depends on FIREWIRE && SCSI diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile index e58c8c794778..1ff550e93a8c 100644 --- a/drivers/firewire/Makefile +++ b/drivers/firewire/Makefile @@ -3,7 +3,7 @@ # Makefile for the Linux IEEE 1394 implementation # -firewire-core-y += core-card.o core-cdev.o core-device.o \ +firewire-core-y += core-trace.o core-card.o core-cdev.o core-device.o \ core-iso.o core-topology.o core-transaction.o firewire-ohci-y += ohci.o firewire-sbp2-y += sbp2.o @@ -15,3 +15,8 @@ obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o obj-$(CONFIG_FIREWIRE_NET) += firewire-net.o obj-$(CONFIG_FIREWIRE_NOSY) += nosy.o obj-$(CONFIG_PROVIDE_OHCI1394_DMA_INIT) += init_ohci1394_dma.o + +obj-$(CONFIG_FIREWIRE_KUNIT_UAPI_TEST) += uapi-test.o +obj-$(CONFIG_FIREWIRE_KUNIT_PACKET_SERDES_TEST) += packet-serdes-test.o +obj-$(CONFIG_FIREWIRE_KUNIT_SELF_ID_SEQUENCE_HELPER_TEST) += self-id-sequence-helper-test.o +obj-$(CONFIG_FIREWIRE_KUNIT_OHCI_SERDES_TEST) += ohci-serdes-test.o diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index 54be88167c60..0462d7b9e547 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c @@ -23,6 +23,7 @@ #include <asm/byteorder.h> #include "core.h" +#include <trace/events/firewire.h> #define define_fw_printk_level(func, kern_level) \ void func(const struct fw_card *card, const char *fmt, ...) \ @@ -85,8 +86,6 @@ static size_t config_rom_length = 1 + 4 + 1 + 1; */ #define DEFAULT_SPLIT_TIMEOUT (2 * 8000) -#define CANON_OUI 0x000085 - static void generate_config_rom(struct fw_card *card, __be32 *config_rom) { struct fw_descriptor *desc; @@ -167,7 +166,6 @@ static size_t required_space(struct fw_descriptor *desc) int fw_core_add_descriptor(struct fw_descriptor *desc) { size_t i; - int ret; /* * Check descriptor is valid; the length of all blocks in the @@ -181,29 +179,25 @@ int fw_core_add_descriptor(struct fw_descriptor *desc) if (i != desc->length) return -EINVAL; - mutex_lock(&card_mutex); + guard(mutex)(&card_mutex); - if (config_rom_length + required_space(desc) > 256) { - ret = -EBUSY; - } else { - list_add_tail(&desc->link, &descriptor_list); - config_rom_length += required_space(desc); - descriptor_count++; - if (desc->immediate > 0) - descriptor_count++; - update_config_roms(); - ret = 0; - } + if (config_rom_length + required_space(desc) > 256) + return -EBUSY; - mutex_unlock(&card_mutex); + list_add_tail(&desc->link, &descriptor_list); + config_rom_length += required_space(desc); + descriptor_count++; + if (desc->immediate > 0) + descriptor_count++; + update_config_roms(); - return ret; + return 0; } EXPORT_SYMBOL(fw_core_add_descriptor); void fw_core_remove_descriptor(struct fw_descriptor *desc) { - mutex_lock(&card_mutex); + guard(mutex)(&card_mutex); list_del(&desc->link); config_rom_length -= required_space(desc); @@ -211,8 +205,6 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc) if (desc->immediate > 0) descriptor_count--; update_config_roms(); - - mutex_unlock(&card_mutex); } EXPORT_SYMBOL(fw_core_remove_descriptor); @@ -221,30 +213,35 @@ static int reset_bus(struct fw_card *card, bool short_reset) int reg = short_reset ? 5 : 1; int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; + trace_bus_reset_initiate(card->index, card->generation, short_reset); + return card->driver->update_phy_reg(card, reg, 0, bit); } void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset) { + trace_bus_reset_schedule(card->index, card->generation, short_reset); + /* We don't try hard to sort out requests of long vs. short resets. */ card->br_short = short_reset; /* Use an arbitrary short delay to combine multiple reset requests. */ fw_card_get(card); - if (!queue_delayed_work(fw_workqueue, &card->br_work, - delayed ? DIV_ROUND_UP(HZ, 100) : 0)) + if (!queue_delayed_work(fw_workqueue, &card->br_work, delayed ? msecs_to_jiffies(10) : 0)) fw_card_put(card); } EXPORT_SYMBOL(fw_schedule_bus_reset); static void br_work(struct work_struct *work) { - struct fw_card *card = container_of(work, struct fw_card, br_work.work); + struct fw_card *card = from_work(card, work, br_work.work); /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */ if (card->reset_jiffies != 0 && - time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) { - if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ)) + time_is_after_jiffies64(card->reset_jiffies + secs_to_jiffies(2))) { + trace_bus_reset_postpone(card->index, card->generation, card->br_short); + + if (!queue_delayed_work(fw_workqueue, &card->br_work, secs_to_jiffies(2))) fw_card_put(card); return; } @@ -273,10 +270,6 @@ static void allocate_broadcast_channel(struct fw_card *card, int generation) fw_device_set_broadcast_channel); } -static const char gap_count_table[] = { - 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40 -}; - void fw_schedule_bm_work(struct fw_card *card, unsigned long delay) { fw_card_get(card); @@ -284,231 +277,282 @@ void fw_schedule_bm_work(struct fw_card *card, unsigned long delay) fw_card_put(card); } -static void bm_work(struct work_struct *work) -{ - struct fw_card *card = container_of(work, struct fw_card, bm_work.work); - struct fw_device *root_device, *irm_device; - struct fw_node *root_node; - int root_id, new_root_id, irm_id, bm_id, local_id; - int gap_count, generation, grace, rcode; - bool do_reset = false; - bool root_device_is_running; - bool root_device_is_cmc; - bool irm_is_1394_1995_only; - bool keep_this_irm; - __be32 transaction_data[2]; - - spin_lock_irq(&card->lock); +enum bm_contention_outcome { + // The bus management contention window is not expired. + BM_CONTENTION_OUTCOME_WITHIN_WINDOW = 0, + // The IRM node has link off. + BM_CONTENTION_OUTCOME_IRM_HAS_LINK_OFF, + // The IRM node complies IEEE 1394:1994 only. + BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY, + // Another bus reset, BM work has been rescheduled. + BM_CONTENTION_OUTCOME_AT_NEW_GENERATION, + // We have been unable to send the lock request to IRM node due to some local problem. + BM_CONTENTION_OUTCOME_LOCAL_PROBLEM_AT_TRANSACTION, + // The lock request failed, maybe the IRM isn't really IRM capable after all. + BM_CONTENTION_OUTCOME_IRM_IS_NOT_CAPABLE_FOR_IRM, + // Somebody else is BM. + BM_CONTENTION_OUTCOME_IRM_HOLDS_ANOTHER_NODE_AS_BM, + // The local node succeeds after contending for bus manager. + BM_CONTENTION_OUTCOME_IRM_HOLDS_LOCAL_NODE_AS_BM, +}; - if (card->local_node == NULL) { - spin_unlock_irq(&card->lock); - goto out_put_card; +static enum bm_contention_outcome contend_for_bm(struct fw_card *card) +__must_hold(&card->lock) +{ + int generation = card->generation; + int local_id = card->local_node->node_id; + __be32 data[2] = { + cpu_to_be32(BUS_MANAGER_ID_NOT_REGISTERED), + cpu_to_be32(local_id), + }; + bool grace = time_is_before_jiffies64(card->reset_jiffies + msecs_to_jiffies(125)); + struct fw_node *irm_node; + struct fw_device *irm_device; + int irm_node_id, irm_device_quirks = 0; + int rcode; + + lockdep_assert_held(&card->lock); + + if (!grace) { + if (!is_next_generation(generation, card->bm_generation) || card->bm_abdicate) + return BM_CONTENTION_OUTCOME_WITHIN_WINDOW; } - generation = card->generation; - - root_node = card->root_node; - fw_node_get(root_node); - root_device = root_node->data; - root_device_is_running = root_device && - atomic_read(&root_device->state) == FW_DEVICE_RUNNING; - root_device_is_cmc = root_device && root_device->cmc; + irm_node = card->irm_node; + if (!irm_node->link_on) { + fw_notice(card, "IRM has link off, making local node (%02x) root\n", local_id); + return BM_CONTENTION_OUTCOME_IRM_HAS_LINK_OFF; + } - irm_device = card->irm_node->data; - irm_is_1394_1995_only = irm_device && irm_device->config_rom && - (irm_device->config_rom[2] & 0x000000f0) == 0; + // NOTE: It is likely that the quirk detection for IRM device has not done yet. + irm_device = fw_node_get_device(irm_node); + if (irm_device) + irm_device_quirks = READ_ONCE(irm_device->quirks); + if ((irm_device_quirks & FW_DEVICE_QUIRK_IRM_IS_1394_1995_ONLY) && + !(irm_device_quirks & FW_DEVICE_QUIRK_IRM_IGNORES_BUS_MANAGER)) { + fw_notice(card, "IRM is not 1394a compliant, making local node (%02x) root\n", + local_id); + return BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY; + } - /* Canon MV5i works unreliably if it is not root node. */ - keep_this_irm = irm_device && irm_device->config_rom && - irm_device->config_rom[3] >> 8 == CANON_OUI; + irm_node_id = irm_node->node_id; - root_id = root_node->node_id; - irm_id = card->irm_node->node_id; - local_id = card->local_node->node_id; + spin_unlock_irq(&card->lock); - grace = time_after64(get_jiffies_64(), - card->reset_jiffies + DIV_ROUND_UP(HZ, 8)); + rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, irm_node_id, generation, + SCODE_100, CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, data, + sizeof(data)); - if ((is_next_generation(generation, card->bm_generation) && - !card->bm_abdicate) || - (card->bm_generation != generation && grace)) { - /* - * This first step is to figure out who is IRM and - * then try to become bus manager. If the IRM is not - * well defined (e.g. does not have an active link - * layer or does not responds to our lock request, we - * will have to do a little vigilante bus management. - * In that case, we do a goto into the gap count logic - * so that when we do the reset, we still optimize the - * gap count. That could well save a reset in the - * next generation. - */ + spin_lock_irq(&card->lock); - if (!card->irm_node->link_on) { - new_root_id = local_id; - fw_notice(card, "%s, making local node (%02x) root\n", - "IRM has link off", new_root_id); - goto pick_me; + switch (rcode) { + case RCODE_GENERATION: + return BM_CONTENTION_OUTCOME_AT_NEW_GENERATION; + case RCODE_SEND_ERROR: + return BM_CONTENTION_OUTCOME_LOCAL_PROBLEM_AT_TRANSACTION; + case RCODE_COMPLETE: + { + int bm_id = be32_to_cpu(data[0]); + + // Used by cdev layer for "struct fw_cdev_event_bus_reset". + if (bm_id != BUS_MANAGER_ID_NOT_REGISTERED) + card->bm_node_id = 0xffc0 & bm_id; + else + card->bm_node_id = local_id; + + if (bm_id != BUS_MANAGER_ID_NOT_REGISTERED) + return BM_CONTENTION_OUTCOME_IRM_HOLDS_ANOTHER_NODE_AS_BM; + else + return BM_CONTENTION_OUTCOME_IRM_HOLDS_LOCAL_NODE_AS_BM; + } + default: + if (!(irm_device_quirks & FW_DEVICE_QUIRK_IRM_IGNORES_BUS_MANAGER)) { + fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n", + fw_rcode_string(rcode), local_id); + return BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY; + } else { + return BM_CONTENTION_OUTCOME_IRM_IS_NOT_CAPABLE_FOR_IRM; } + } +} - if (irm_is_1394_1995_only && !keep_this_irm) { - new_root_id = local_id; - fw_notice(card, "%s, making local node (%02x) root\n", - "IRM is not 1394a compliant", new_root_id); - goto pick_me; - } +DEFINE_FREE(node_unref, struct fw_node *, if (_T) fw_node_put(_T)) +DEFINE_FREE(card_unref, struct fw_card *, if (_T) fw_card_put(_T)) - transaction_data[0] = cpu_to_be32(0x3f); - transaction_data[1] = cpu_to_be32(local_id); +static void bm_work(struct work_struct *work) +{ + static const char gap_count_table[] = { + 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40 + }; + struct fw_card *card __free(card_unref) = from_work(card, work, bm_work.work); + struct fw_node *root_node __free(node_unref) = NULL; + int root_id, new_root_id, irm_id, local_id; + int expected_gap_count, generation; + bool stand_for_root = false; + spin_lock_irq(&card->lock); + + if (card->local_node == NULL) { spin_unlock_irq(&card->lock); + return; + } - rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, - irm_id, generation, SCODE_100, - CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, - transaction_data, 8); + generation = card->generation; - if (rcode == RCODE_GENERATION) - /* Another bus reset, BM work has been rescheduled. */ - goto out; + root_node = fw_node_get(card->root_node); - bm_id = be32_to_cpu(transaction_data[0]); + root_id = root_node->node_id; + irm_id = card->irm_node->node_id; + local_id = card->local_node->node_id; - spin_lock_irq(&card->lock); - if (rcode == RCODE_COMPLETE && generation == card->generation) - card->bm_node_id = - bm_id == 0x3f ? local_id : 0xffc0 | bm_id; - spin_unlock_irq(&card->lock); + if (card->bm_generation != generation) { + enum bm_contention_outcome result = contend_for_bm(card); - if (rcode == RCODE_COMPLETE && bm_id != 0x3f) { - /* Somebody else is BM. Only act as IRM. */ - if (local_id == irm_id) + switch (result) { + case BM_CONTENTION_OUTCOME_WITHIN_WINDOW: + spin_unlock_irq(&card->lock); + fw_schedule_bm_work(card, msecs_to_jiffies(125)); + return; + case BM_CONTENTION_OUTCOME_IRM_HAS_LINK_OFF: + stand_for_root = true; + break; + case BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY: + stand_for_root = true; + break; + case BM_CONTENTION_OUTCOME_AT_NEW_GENERATION: + // BM work has been rescheduled. + spin_unlock_irq(&card->lock); + return; + case BM_CONTENTION_OUTCOME_LOCAL_PROBLEM_AT_TRANSACTION: + // Let's try again later and hope that the local problem has gone away by + // then. + spin_unlock_irq(&card->lock); + fw_schedule_bm_work(card, msecs_to_jiffies(125)); + return; + case BM_CONTENTION_OUTCOME_IRM_IS_NOT_CAPABLE_FOR_IRM: + // Let's do a bus reset and pick the local node as root, and thus, IRM. + stand_for_root = true; + break; + case BM_CONTENTION_OUTCOME_IRM_HOLDS_ANOTHER_NODE_AS_BM: + if (local_id == irm_id) { + // Only acts as IRM. + spin_unlock_irq(&card->lock); allocate_broadcast_channel(card, generation); - - goto out; - } - - if (rcode == RCODE_SEND_ERROR) { - /* - * We have been unable to send the lock request due to - * some local problem. Let's try again later and hope - * that the problem has gone away by then. - */ - fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); - goto out; + spin_lock_irq(&card->lock); + } + fallthrough; + case BM_CONTENTION_OUTCOME_IRM_HOLDS_LOCAL_NODE_AS_BM: + default: + card->bm_generation = generation; + break; } + } - spin_lock_irq(&card->lock); - - if (rcode != RCODE_COMPLETE && !keep_this_irm) { - /* - * The lock request failed, maybe the IRM - * isn't really IRM capable after all. Let's - * do a bus reset and pick the local node as - * root, and thus, IRM. - */ - new_root_id = local_id; - fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n", - fw_rcode_string(rcode), new_root_id); - goto pick_me; + // We're bus manager for this generation, so next step is to make sure we have an active + // cycle master and do gap count optimization. + if (!stand_for_root) { + if (card->gap_count == GAP_COUNT_MISMATCHED) { + // If self IDs have inconsistent gap counts, do a + // bus reset ASAP. The config rom read might never + // complete, so don't wait for it. However, still + // send a PHY configuration packet prior to the + // bus reset. The PHY configuration packet might + // fail, but 1394-2008 8.4.5.2 explicitly permits + // it in this case, so it should be safe to try. + stand_for_root = true; + + // We must always send a bus reset if the gap count + // is inconsistent, so bypass the 5-reset limit. + card->bm_retries = 0; + } else { + // Now investigate root node. + struct fw_device *root_device = fw_node_get_device(root_node); + + if (root_device == NULL) { + // Either link_on is false, or we failed to read the + // config rom. In either case, pick another root. + stand_for_root = true; + } else { + bool root_device_is_running = + atomic_read(&root_device->state) == FW_DEVICE_RUNNING; + + if (!root_device_is_running) { + // If we haven't probed this device yet, bail out now + // and let's try again once that's done. + spin_unlock_irq(&card->lock); + return; + } else if (!root_device->cmc) { + // Current root has an active link layer and we + // successfully read the config rom, but it's not + // cycle master capable. + stand_for_root = true; + } + } } - } else if (card->bm_generation != generation) { - /* - * We weren't BM in the last generation, and the last - * bus reset is less than 125ms ago. Reschedule this job. - */ - spin_unlock_irq(&card->lock); - fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); - goto out; } - /* - * We're bus manager for this generation, so next step is to - * make sure we have an active cycle master and do gap count - * optimization. - */ - card->bm_generation = generation; - - if (root_device == NULL) { - /* - * Either link_on is false, or we failed to read the - * config rom. In either case, pick another root. - */ + if (stand_for_root) { new_root_id = local_id; - } else if (!root_device_is_running) { - /* - * If we haven't probed this device yet, bail out now - * and let's try again once that's done. - */ - spin_unlock_irq(&card->lock); - goto out; - } else if (root_device_is_cmc) { - /* - * We will send out a force root packet for this - * node as part of the gap count optimization. - */ - new_root_id = root_id; } else { - /* - * Current root has an active link layer and we - * successfully read the config rom, but it's not - * cycle master capable. - */ - new_root_id = local_id; + // We will send out a force root packet for this node as part of the gap count + // optimization on behalf of the node. + new_root_id = root_id; } - pick_me: /* * Pick a gap count from 1394a table E-1. The table doesn't cover * the typically much larger 1394b beta repeater delays though. */ if (!card->beta_repeaters_present && root_node->max_hops < ARRAY_SIZE(gap_count_table)) - gap_count = gap_count_table[root_node->max_hops]; + expected_gap_count = gap_count_table[root_node->max_hops]; else - gap_count = 63; - - /* - * Finally, figure out if we should do a reset or not. If we have - * done less than 5 resets with the same physical topology and we - * have either a new root or a new gap count setting, let's do it. - */ + expected_gap_count = 63; - if (card->bm_retries++ < 5 && - (card->gap_count != gap_count || new_root_id != root_id)) - do_reset = true; + // Finally, figure out if we should do a reset or not. If we have done less than 5 resets + // with the same physical topology and we have either a new root or a new gap count + // setting, let's do it. + if (card->bm_retries++ < 5 && (card->gap_count != expected_gap_count || new_root_id != root_id)) { + int card_gap_count = card->gap_count; - spin_unlock_irq(&card->lock); + spin_unlock_irq(&card->lock); - if (do_reset) { fw_notice(card, "phy config: new root=%x, gap_count=%d\n", - new_root_id, gap_count); - fw_send_phy_config(card, new_root_id, generation, gap_count); - reset_bus(card, true); - /* Will allocate broadcast channel after the reset. */ - goto out; - } - - if (root_device_is_cmc) { + new_root_id, expected_gap_count); + fw_send_phy_config(card, new_root_id, generation, expected_gap_count); /* - * Make sure that the cycle master sends cycle start packets. + * Where possible, use a short bus reset to minimize + * disruption to isochronous transfers. But in the event + * of a gap count inconsistency, use a long bus reset. + * + * As noted in 1394a 8.4.6.2, nodes on a mixed 1394/1394a bus + * may set different gap counts after a bus reset. On a mixed + * 1394/1394a bus, a short bus reset can get doubled. Some + * nodes may treat the double reset as one bus reset and others + * may treat it as two, causing a gap count inconsistency + * again. Using a long bus reset prevents this. */ - transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR); - rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST, - root_id, generation, SCODE_100, - CSR_REGISTER_BASE + CSR_STATE_SET, - transaction_data, 4); - if (rcode == RCODE_GENERATION) - goto out; - } + reset_bus(card, card_gap_count != 0); + /* Will allocate broadcast channel after the reset. */ + } else { + struct fw_device *root_device = fw_node_get_device(root_node); + + spin_unlock_irq(&card->lock); - if (local_id == irm_id) - allocate_broadcast_channel(card, generation); + if (root_device && root_device->cmc) { + // Make sure that the cycle master sends cycle start packets. + __be32 data = cpu_to_be32(CSR_STATE_BIT_CMSTR); + int rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST, + root_id, generation, SCODE_100, + CSR_REGISTER_BASE + CSR_STATE_SET, + &data, sizeof(data)); + if (rcode == RCODE_GENERATION) + return; + } - out: - fw_node_put(root_node); - out_put_card: - fw_card_put(card); + if (local_id == irm_id) + allocate_broadcast_channel(card, generation); + } } void fw_card_initialize(struct fw_card *card, @@ -520,20 +564,26 @@ void fw_card_initialize(struct fw_card *card, card->index = atomic_inc_return(&index); card->driver = driver; card->device = device; - card->current_tlabel = 0; - card->tlabel_mask = 0; - card->split_timeout_hi = DEFAULT_SPLIT_TIMEOUT / 8000; - card->split_timeout_lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19; - card->split_timeout_cycles = DEFAULT_SPLIT_TIMEOUT; - card->split_timeout_jiffies = - DIV_ROUND_UP(DEFAULT_SPLIT_TIMEOUT * HZ, 8000); + + card->transactions.current_tlabel = 0; + card->transactions.tlabel_mask = 0; + INIT_LIST_HEAD(&card->transactions.list); + spin_lock_init(&card->transactions.lock); + + spin_lock_init(&card->topology_map.lock); + + card->split_timeout.hi = DEFAULT_SPLIT_TIMEOUT / 8000; + card->split_timeout.lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19; + card->split_timeout.cycles = DEFAULT_SPLIT_TIMEOUT; + card->split_timeout.jiffies = isoc_cycles_to_jiffies(DEFAULT_SPLIT_TIMEOUT); + spin_lock_init(&card->split_timeout.lock); + card->color = 0; card->broadcast_channel = BROADCAST_CHANNEL_INITIAL; kref_init(&card->kref); init_completion(&card->done); - INIT_LIST_HEAD(&card->transaction_list); - INIT_LIST_HEAD(&card->phy_receiver_list); + spin_lock_init(&card->lock); card->local_node = NULL; @@ -543,25 +593,69 @@ void fw_card_initialize(struct fw_card *card, } EXPORT_SYMBOL(fw_card_initialize); -int fw_card_add(struct fw_card *card, - u32 max_receive, u32 link_speed, u64 guid) +DEFINE_FREE(workqueue_destroy, struct workqueue_struct *, if (_T) destroy_workqueue(_T)) + +int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid, + unsigned int supported_isoc_contexts) { + struct workqueue_struct *isoc_wq __free(workqueue_destroy) = NULL; + struct workqueue_struct *async_wq __free(workqueue_destroy) = NULL; int ret; + // This workqueue should be: + // * != WQ_BH Sleepable. + // * == WQ_UNBOUND Any core can process data for isoc context. The + // implementation of unit protocol could consumes the core + // longer somehow. + // * != WQ_MEM_RECLAIM Not used for any backend of block device. + // * == WQ_FREEZABLE Isochronous communication is at regular interval in real + // time, thus should be drained if possible at freeze phase. + // * == WQ_HIGHPRI High priority to process semi-realtime timestamped data. + // * == WQ_SYSFS Parameters are available via sysfs. + // * max_active == n_it + n_ir A hardIRQ could notify events for multiple isochronous + // contexts if they are scheduled to the same cycle. + isoc_wq = alloc_workqueue("firewire-isoc-card%u", + WQ_UNBOUND | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS, + supported_isoc_contexts, card->index); + if (!isoc_wq) + return -ENOMEM; + + // This workqueue should be: + // * != WQ_BH Sleepable. + // * == WQ_UNBOUND Any core can process data for asynchronous context. + // * == WQ_MEM_RECLAIM Used for any backend of block device. + // * == WQ_FREEZABLE The target device would not be available when being freezed. + // * == WQ_HIGHPRI High priority to process semi-realtime timestamped data. + // * == WQ_SYSFS Parameters are available via sysfs. + // * max_active == 4 A hardIRQ could notify events for a pair of requests and + // response AR/AT contexts. + async_wq = alloc_workqueue("firewire-async-card%u", + WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS, + 4, card->index); + if (!async_wq) + return -ENOMEM; + + card->isoc_wq = isoc_wq; + card->async_wq = async_wq; card->max_receive = max_receive; card->link_speed = link_speed; card->guid = guid; - mutex_lock(&card_mutex); + scoped_guard(mutex, &card_mutex) { + generate_config_rom(card, tmp_config_rom); + ret = card->driver->enable(card, tmp_config_rom, config_rom_length); + if (ret < 0) { + card->isoc_wq = NULL; + card->async_wq = NULL; + return ret; + } + retain_and_null_ptr(isoc_wq); + retain_and_null_ptr(async_wq); - generate_config_rom(card, tmp_config_rom); - ret = card->driver->enable(card, tmp_config_rom, config_rom_length); - if (ret == 0) list_add_tail(&card->link, &card_list); + } - mutex_unlock(&card_mutex); - - return ret; + return 0; } EXPORT_SYMBOL(fw_card_add); @@ -616,6 +710,15 @@ static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card, return ERR_PTR(-ENODEV); } +static u32 dummy_read_csr(struct fw_card *card, int csr_offset) +{ + return 0; +} + +static void dummy_write_csr(struct fw_card *card, int csr_offset, u32 value) +{ +} + static int dummy_start_iso(struct fw_iso_context *ctx, s32 cycle, u32 sync, u32 tags) { @@ -649,6 +752,8 @@ static const struct fw_card_driver dummy_driver_template = { .send_response = dummy_send_response, .cancel_packet = dummy_cancel_packet, .enable_phys_dma = dummy_enable_phys_dma, + .read_csr = dummy_read_csr, + .write_csr = dummy_write_csr, .allocate_iso_context = dummy_allocate_iso_context, .start_iso = dummy_start_iso, .set_iso_channels = dummy_set_iso_channels, @@ -669,25 +774,64 @@ void fw_core_remove_card(struct fw_card *card) { struct fw_card_driver dummy_driver = dummy_driver_template; + might_sleep(); + card->driver->update_phy_reg(card, 4, PHY_LINK_ACTIVE | PHY_CONTENDER, 0); fw_schedule_bus_reset(card, false, true); - mutex_lock(&card_mutex); - list_del_init(&card->link); - mutex_unlock(&card_mutex); + scoped_guard(mutex, &card_mutex) + list_del_init(&card->link); /* Switch off most of the card driver interface. */ dummy_driver.free_iso_context = card->driver->free_iso_context; dummy_driver.stop_iso = card->driver->stop_iso; + dummy_driver.disable = card->driver->disable; card->driver = &dummy_driver; - fw_destroy_nodes(card); + drain_workqueue(card->isoc_wq); + drain_workqueue(card->async_wq); + card->driver->disable(card); + fw_cancel_pending_transactions(card); + + scoped_guard(spinlock_irqsave, &card->lock) + fw_destroy_nodes(card); /* Wait for all users, especially device workqueue jobs, to finish. */ fw_card_put(card); wait_for_completion(&card->done); - WARN_ON(!list_empty(&card->transaction_list)); + destroy_workqueue(card->isoc_wq); + destroy_workqueue(card->async_wq); + + WARN_ON(!list_empty(&card->transactions.list)); } EXPORT_SYMBOL(fw_core_remove_card); + +/** + * fw_card_read_cycle_time: read from Isochronous Cycle Timer Register of 1394 OHCI in MMIO region + * for controller card. + * @card: The instance of card for 1394 OHCI controller. + * @cycle_time: The mutual reference to value of cycle time for the read operation. + * + * Read value from Isochronous Cycle Timer Register of 1394 OHCI in MMIO region for the given + * controller card. This function accesses the region without any lock primitives or IRQ mask. + * When returning successfully, the content of @value argument has value aligned to host endianness, + * formetted by CYCLE_TIME CSR Register of IEEE 1394 std. + * + * Context: Any context. + * Return: + * * 0 - Read successfully. + * * -ENODEV - The controller is unavailable due to being removed or unbound. + */ +int fw_card_read_cycle_time(struct fw_card *card, u32 *cycle_time) +{ + if (card->driver->read_csr == dummy_read_csr) + return -ENODEV; + + // It's possible to switch to dummy driver between the above and the below. This is the best + // effort to return -ENODEV. + *cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME); + return 0; +} +EXPORT_SYMBOL_GPL(fw_card_read_cycle_time); diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index fb6c651214f3..49dc1612c691 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -10,10 +10,10 @@ #include <linux/delay.h> #include <linux/device.h> #include <linux/dma-mapping.h> +#include <linux/err.h> #include <linux/errno.h> #include <linux/firewire.h> #include <linux/firewire-cdev.h> -#include <linux/idr.h> #include <linux/irqflags.h> #include <linux/jiffies.h> #include <linux/kernel.h> @@ -34,14 +34,21 @@ #include "core.h" +#include <trace/events/firewire.h> + +#include "packet-header-definitions.h" /* * ABI version history is documented in linux/firewire-cdev.h. */ -#define FW_CDEV_KERNEL_VERSION 5 +#define FW_CDEV_KERNEL_VERSION 6 #define FW_CDEV_VERSION_EVENT_REQUEST2 4 #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 #define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5 +#define FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP 6 + +static DEFINE_SPINLOCK(phy_receiver_list_lock); +static LIST_HEAD(phy_receiver_list); struct client { u32 version; @@ -49,7 +56,7 @@ struct client { spinlock_t lock; bool in_shutdown; - struct idr resource_idr; + struct xarray resource_xa; struct list_head event_list; wait_queue_head_t wait; wait_queue_head_t tx_flush_wait; @@ -110,6 +117,7 @@ struct inbound_transaction_resource { struct client_resource resource; struct fw_card *card; struct fw_request *request; + bool is_fcp; void *data; size_t length; }; @@ -133,8 +141,41 @@ struct iso_resource { struct iso_resource_event *e_alloc, *e_dealloc; }; +static struct address_handler_resource *to_address_handler_resource(struct client_resource *resource) +{ + return container_of(resource, struct address_handler_resource, resource); +} + +static struct inbound_transaction_resource *to_inbound_transaction_resource(struct client_resource *resource) +{ + return container_of(resource, struct inbound_transaction_resource, resource); +} + +static struct descriptor_resource *to_descriptor_resource(struct client_resource *resource) +{ + return container_of(resource, struct descriptor_resource, resource); +} + +static struct iso_resource *to_iso_resource(struct client_resource *resource) +{ + return container_of(resource, struct iso_resource, resource); +} + static void release_iso_resource(struct client *, struct client_resource *); +static int is_iso_resource(const struct client_resource *resource) +{ + return resource->release == release_iso_resource; +} + +static void release_transaction(struct client *client, + struct client_resource *resource); + +static int is_outbound_transaction_resource(const struct client_resource *resource) +{ + return resource->release == release_transaction; +} + static void schedule_iso_resource(struct iso_resource *r, unsigned long delay) { client_get(r->client); @@ -142,13 +183,6 @@ static void schedule_iso_resource(struct iso_resource *r, unsigned long delay) client_put(r->client); } -static void schedule_if_iso_resource(struct client_resource *resource) -{ - if (resource->release == release_iso_resource) - schedule_iso_resource(container_of(resource, - struct iso_resource, resource), 0); -} - /* * dequeue_event() just kfree()'s the event, so the event has to be * the first field in a struct XYZ_event. @@ -167,7 +201,10 @@ struct outbound_transaction_event { struct event event; struct client *client; struct outbound_transaction_resource r; - struct fw_cdev_event_response response; + union { + struct fw_cdev_event_response without_tstamp; + struct fw_cdev_event_response2 with_tstamp; + } rsp; }; struct inbound_transaction_event { @@ -175,6 +212,7 @@ struct inbound_transaction_event { union { struct fw_cdev_event_request request; struct fw_cdev_event_request2 request2; + struct fw_cdev_event_request3 with_tstamp; } req; }; @@ -197,12 +235,18 @@ struct outbound_phy_packet_event { struct event event; struct client *client; struct fw_packet p; - struct fw_cdev_event_phy_packet phy_packet; + union { + struct fw_cdev_event_phy_packet without_tstamp; + struct fw_cdev_event_phy_packet2 with_tstamp; + } phy_packet; }; struct inbound_phy_packet_event { struct event event; - struct fw_cdev_event_phy_packet phy_packet; + union { + struct fw_cdev_event_phy_packet without_tstamp; + struct fw_cdev_event_phy_packet2 with_tstamp; + } phy_packet; }; #ifdef CONFIG_COMPAT @@ -255,7 +299,7 @@ static int fw_device_op_open(struct inode *inode, struct file *file) client->device = device; spin_lock_init(&client->lock); - idr_init(&client->resource_idr); + xa_init_flags(&client->resource_xa, XA_FLAGS_ALLOC1 | XA_FLAGS_LOCK_BH); INIT_LIST_HEAD(&client->event_list); init_waitqueue_head(&client->wait); init_waitqueue_head(&client->tx_flush_wait); @@ -271,19 +315,17 @@ static int fw_device_op_open(struct inode *inode, struct file *file) static void queue_event(struct client *client, struct event *event, void *data0, size_t size0, void *data1, size_t size1) { - unsigned long flags; - event->v[0].data = data0; event->v[0].size = size0; event->v[1].data = data1; event->v[1].size = size1; - spin_lock_irqsave(&client->lock, flags); - if (client->in_shutdown) - kfree(event); - else - list_add_tail(&event->link, &client->event_list); - spin_unlock_irqrestore(&client->lock, flags); + scoped_guard(spinlock_irqsave, &client->lock) { + if (client->in_shutdown) + kfree(event); + else + list_add_tail(&event->link, &client->event_list); + } wake_up_interruptible(&client->wait); } @@ -305,10 +347,10 @@ static int dequeue_event(struct client *client, fw_device_is_shutdown(client->device)) return -ENODEV; - spin_lock_irq(&client->lock); - event = list_first_entry(&client->event_list, struct event, link); - list_del(&event->link); - spin_unlock_irq(&client->lock); + scoped_guard(spinlock_irq, &client->lock) { + event = list_first_entry(&client->event_list, struct event, link); + list_del(&event->link); + } total = 0; for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { @@ -340,7 +382,7 @@ static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, { struct fw_card *card = client->device->card; - spin_lock_irq(&card->lock); + guard(spinlock_irq)(&card->lock); event->closure = client->bus_reset_closure; event->type = FW_CDEV_EVENT_BUS_RESET; @@ -350,8 +392,6 @@ static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, event->bm_node_id = card->bm_node_id; event->irm_node_id = card->irm_node->node_id; event->root_node_id = card->root_node->node_id; - - spin_unlock_irq(&card->lock); } static void for_each_client(struct fw_device *device, @@ -359,22 +399,17 @@ static void for_each_client(struct fw_device *device, { struct client *c; - mutex_lock(&device->client_list_mutex); + guard(mutex)(&device->client_list_mutex); + list_for_each_entry(c, &device->client_list, link) callback(c); - mutex_unlock(&device->client_list_mutex); -} - -static int schedule_reallocations(int id, void *p, void *data) -{ - schedule_if_iso_resource(p); - - return 0; } static void queue_bus_reset_event(struct client *client) { struct bus_reset_event *e; + struct client_resource *resource; + unsigned long index; e = kzalloc(sizeof(*e), GFP_KERNEL); if (e == NULL) @@ -385,9 +420,12 @@ static void queue_bus_reset_event(struct client *client) queue_event(client, &e->event, &e->reset, sizeof(e->reset), NULL, 0); - spin_lock_irq(&client->lock); - idr_for_each(&client->resource_idr, schedule_reallocations, client); - spin_unlock_irq(&client->lock); + guard(spinlock_irq)(&client->lock); + + xa_for_each(&client->resource_xa, index, resource) { + if (is_iso_resource(resource)) + schedule_iso_resource(to_iso_resource(resource), 0); + } } void fw_device_cdev_update(struct fw_device *device) @@ -438,23 +476,20 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg) a->version = FW_CDEV_KERNEL_VERSION; a->card = client->device->card->index; - down_read(&fw_device_rwsem); + scoped_guard(rwsem_read, &fw_device_rwsem) { + if (a->rom != 0) { + size_t want = a->rom_length; + size_t have = client->device->config_rom_length * 4; - if (a->rom != 0) { - size_t want = a->rom_length; - size_t have = client->device->config_rom_length * 4; - - ret = copy_to_user(u64_to_uptr(a->rom), - client->device->config_rom, min(want, have)); + ret = copy_to_user(u64_to_uptr(a->rom), client->device->config_rom, + min(want, have)); + if (ret != 0) + return -EFAULT; + } + a->rom_length = client->device->config_rom_length * 4; } - a->rom_length = client->device->config_rom_length * 4; - up_read(&fw_device_rwsem); - - if (ret != 0) - return -EFAULT; - - mutex_lock(&client->device->client_list_mutex); + guard(mutex)(&client->device->client_list_mutex); client->bus_reset_closure = a->bus_reset_closure; if (a->bus_reset != 0) { @@ -465,37 +500,36 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg) if (ret == 0 && list_empty(&client->link)) list_add_tail(&client->link, &client->device->client_list); - mutex_unlock(&client->device->client_list_mutex); - return ret ? -EFAULT : 0; } -static int add_client_resource(struct client *client, - struct client_resource *resource, gfp_t gfp_mask) +static int add_client_resource(struct client *client, struct client_resource *resource, + gfp_t gfp_mask) { - bool preload = gfpflags_allow_blocking(gfp_mask); - unsigned long flags; int ret; - if (preload) - idr_preload(gfp_mask); - spin_lock_irqsave(&client->lock, flags); - - if (client->in_shutdown) - ret = -ECANCELED; - else - ret = idr_alloc(&client->resource_idr, resource, 0, 0, - GFP_NOWAIT); - if (ret >= 0) { - resource->handle = ret; - client_get(client); - schedule_if_iso_resource(resource); + scoped_guard(spinlock_irqsave, &client->lock) { + u32 index; + + if (client->in_shutdown) { + ret = -ECANCELED; + } else { + if (gfpflags_allow_blocking(gfp_mask)) { + ret = xa_alloc(&client->resource_xa, &index, resource, xa_limit_32b, + GFP_NOWAIT); + } else { + ret = xa_alloc_bh(&client->resource_xa, &index, resource, + xa_limit_32b, GFP_NOWAIT); + } + } + if (ret >= 0) { + resource->handle = index; + client_get(client); + if (is_iso_resource(resource)) + schedule_iso_resource(to_iso_resource(resource), 0); + } } - spin_unlock_irqrestore(&client->lock, flags); - if (preload) - idr_preload_end(); - return ret < 0 ? ret : 0; } @@ -503,19 +537,19 @@ static int release_client_resource(struct client *client, u32 handle, client_resource_release_fn_t release, struct client_resource **return_resource) { + unsigned long index = handle; struct client_resource *resource; - spin_lock_irq(&client->lock); - if (client->in_shutdown) - resource = NULL; - else - resource = idr_find(&client->resource_idr, handle); - if (resource && resource->release == release) - idr_remove(&client->resource_idr, handle); - spin_unlock_irq(&client->lock); + scoped_guard(spinlock_irq, &client->lock) { + if (client->in_shutdown) + return -EINVAL; - if (!(resource && resource->release == release)) - return -EINVAL; + resource = xa_load(&client->resource_xa, index); + if (!resource || resource->release != release) + return -EINVAL; + + xa_erase(&client->resource_xa, handle); + } if (return_resource) *return_resource = resource; @@ -532,43 +566,66 @@ static void release_transaction(struct client *client, { } -static void complete_transaction(struct fw_card *card, int rcode, - void *payload, size_t length, void *data) +static void complete_transaction(struct fw_card *card, int rcode, u32 request_tstamp, + u32 response_tstamp, void *payload, size_t length, void *data) { struct outbound_transaction_event *e = data; - struct fw_cdev_event_response *rsp = &e->response; struct client *client = e->client; - unsigned long flags; + unsigned long index = e->r.resource.handle; - if (length < rsp->length) - rsp->length = length; - if (rcode == RCODE_COMPLETE) - memcpy(rsp->data, payload, rsp->length); + scoped_guard(spinlock_irqsave, &client->lock) { + xa_erase(&client->resource_xa, index); + if (client->in_shutdown) + wake_up(&client->tx_flush_wait); + } - spin_lock_irqsave(&client->lock, flags); - idr_remove(&client->resource_idr, e->r.resource.handle); - if (client->in_shutdown) - wake_up(&client->tx_flush_wait); - spin_unlock_irqrestore(&client->lock, flags); + switch (e->rsp.without_tstamp.type) { + case FW_CDEV_EVENT_RESPONSE: + { + struct fw_cdev_event_response *rsp = &e->rsp.without_tstamp; + + if (length < rsp->length) + rsp->length = length; + if (rcode == RCODE_COMPLETE) + memcpy(rsp->data, payload, rsp->length); + + rsp->rcode = rcode; + + // In the case that sizeof(*rsp) doesn't align with the position of the + // data, and the read is short, preserve an extra copy of the data + // to stay compatible with a pre-2.6.27 bug. Since the bug is harmless + // for short reads and some apps depended on it, this is both safe + // and prudent for compatibility. + if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data)) + queue_event(client, &e->event, rsp, sizeof(*rsp), rsp->data, rsp->length); + else + queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0); - rsp->type = FW_CDEV_EVENT_RESPONSE; - rsp->rcode = rcode; + break; + } + case FW_CDEV_EVENT_RESPONSE2: + { + struct fw_cdev_event_response2 *rsp = &e->rsp.with_tstamp; - /* - * In the case that sizeof(*rsp) doesn't align with the position of the - * data, and the read is short, preserve an extra copy of the data - * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless - * for short reads and some apps depended on it, this is both safe - * and prudent for compatibility. - */ - if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data)) - queue_event(client, &e->event, rsp, sizeof(*rsp), - rsp->data, rsp->length); - else - queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, - NULL, 0); + if (length < rsp->length) + rsp->length = length; + if (rcode == RCODE_COMPLETE) + memcpy(rsp->data, payload, rsp->length); + + rsp->rcode = rcode; + rsp->request_tstamp = request_tstamp; + rsp->response_tstamp = response_tstamp; - /* Drop the idr's reference */ + queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0); + + break; + } + default: + WARN_ON(1); + break; + } + + // Drop the xarray's reference. client_put(client); } @@ -577,6 +634,7 @@ static int init_request(struct client *client, int destination_id, int speed) { struct outbound_transaction_event *e; + void *payload; int ret; if (request->tcode != TCODE_STREAM_DATA && @@ -590,14 +648,25 @@ static int init_request(struct client *client, e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL); if (e == NULL) return -ENOMEM; - e->client = client; - e->response.length = request->length; - e->response.closure = request->closure; - if (request->data && - copy_from_user(e->response.data, - u64_to_uptr(request->data), request->length)) { + if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) { + struct fw_cdev_event_response *rsp = &e->rsp.without_tstamp; + + rsp->type = FW_CDEV_EVENT_RESPONSE; + rsp->length = request->length; + rsp->closure = request->closure; + payload = rsp->data; + } else { + struct fw_cdev_event_response2 *rsp = &e->rsp.with_tstamp; + + rsp->type = FW_CDEV_EVENT_RESPONSE2; + rsp->length = request->length; + rsp->closure = request->closure; + payload = rsp->data; + } + + if (request->data && copy_from_user(payload, u64_to_uptr(request->data), request->length)) { ret = -EFAULT; goto failed; } @@ -607,10 +676,9 @@ static int init_request(struct client *client, if (ret < 0) goto failed; - fw_send_request(client->device->card, &e->r.transaction, - request->tcode, destination_id, request->generation, - speed, request->offset, e->response.data, - request->length, complete_transaction, e); + fw_send_request_with_tstamp(client->device->card, &e->r.transaction, request->tcode, + destination_id, request->generation, speed, request->offset, + payload, request->length, complete_transaction, e); return 0; failed: @@ -642,19 +710,13 @@ static int ioctl_send_request(struct client *client, union ioctl_arg *arg) client->device->max_speed); } -static inline bool is_fcp_request(struct fw_request *request) -{ - return request == NULL; -} - static void release_request(struct client *client, struct client_resource *resource) { - struct inbound_transaction_resource *r = container_of(resource, - struct inbound_transaction_resource, resource); + struct inbound_transaction_resource *r = to_inbound_transaction_resource(resource); - if (is_fcp_request(r->request)) - kfree(r->data); + if (r->is_fcp) + fw_request_put(r->request); else fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR); @@ -668,15 +730,20 @@ static void handle_request(struct fw_card *card, struct fw_request *request, void *payload, size_t length, void *callback_data) { struct address_handler_resource *handler = callback_data; + bool is_fcp = is_in_fcp_region(offset, length); struct inbound_transaction_resource *r; struct inbound_transaction_event *e; size_t event_size0; - void *fcp_frame = NULL; int ret; /* card may be different from handler->client->device->card */ fw_card_get(card); + // Extend the lifetime of data for request so that its payload is safely accessible in + // the process context for the client. + if (is_fcp) + fw_request_get(request); + r = kmalloc(sizeof(*r), GFP_ATOMIC); e = kmalloc(sizeof(*e), GFP_ATOMIC); if (r == NULL || e == NULL) @@ -684,21 +751,10 @@ static void handle_request(struct fw_card *card, struct fw_request *request, r->card = card; r->request = request; + r->is_fcp = is_fcp; r->data = payload; r->length = length; - if (is_fcp_request(request)) { - /* - * FIXME: Let core-transaction.c manage a - * single reference-counted copy? - */ - fcp_frame = kmemdup(payload, length, GFP_ATOMIC); - if (fcp_frame == NULL) - goto failed; - - r->data = fcp_frame; - } - r->resource.release = release_request; ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC); if (ret < 0) @@ -717,7 +773,7 @@ static void handle_request(struct fw_card *card, struct fw_request *request, req->handle = r->resource.handle; req->closure = handler->closure; event_size0 = sizeof(*req); - } else { + } else if (handler->client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) { struct fw_cdev_event_request2 *req = &e->req.request2; req->type = FW_CDEV_EVENT_REQUEST2; @@ -731,6 +787,21 @@ static void handle_request(struct fw_card *card, struct fw_request *request, req->handle = r->resource.handle; req->closure = handler->closure; event_size0 = sizeof(*req); + } else { + struct fw_cdev_event_request3 *req = &e->req.with_tstamp; + + req->type = FW_CDEV_EVENT_REQUEST3; + req->tcode = tcode; + req->offset = offset; + req->source_node_id = source; + req->destination_node_id = destination; + req->card = card->index; + req->generation = generation; + req->length = length; + req->handle = r->resource.handle; + req->closure = handler->closure; + req->tstamp = fw_request_get_timestamp(request); + event_size0 = sizeof(*req); } queue_event(handler->client, &e->event, @@ -740,10 +811,11 @@ static void handle_request(struct fw_card *card, struct fw_request *request, failed: kfree(r); kfree(e); - kfree(fcp_frame); - if (!is_fcp_request(request)) + if (!is_fcp) fw_send_response(card, request, RCODE_CONFLICT_ERROR); + else + fw_request_put(request); fw_card_put(card); } @@ -751,8 +823,7 @@ static void handle_request(struct fw_card *card, struct fw_request *request, static void release_address_handler(struct client *client, struct client_resource *resource) { - struct address_handler_resource *r = - container_of(resource, struct address_handler_resource, resource); + struct address_handler_resource *r = to_address_handler_resource(resource); fw_core_remove_address_handler(&r->handler); kfree(r); @@ -816,19 +887,20 @@ static int ioctl_send_response(struct client *client, union ioctl_arg *arg) release_request, &resource) < 0) return -EINVAL; - r = container_of(resource, struct inbound_transaction_resource, - resource); - if (is_fcp_request(r->request)) + r = to_inbound_transaction_resource(resource); + if (r->is_fcp) { + fw_request_put(r->request); goto out; + } if (a->length != fw_get_response_length(r->request)) { ret = -EINVAL; - kfree(r->request); + fw_request_put(r->request); goto out; } if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) { ret = -EFAULT; - kfree(r->request); + fw_request_put(r->request); goto out; } fw_send_response(r->card, r->request, a->rcode); @@ -849,8 +921,7 @@ static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg) static void release_descriptor(struct client *client, struct client_resource *resource) { - struct descriptor_resource *r = - container_of(resource, struct descriptor_resource, resource); + struct descriptor_resource *r = to_descriptor_resource(resource); fw_core_remove_descriptor(&r->descriptor); kfree(r); @@ -869,11 +940,12 @@ static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg) if (a->length > 256) return -EINVAL; - r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL); + r = kmalloc(struct_size(r, data, a->length), GFP_KERNEL); if (r == NULL) return -ENOMEM; - if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) { + if (copy_from_user(r->data, u64_to_uptr(a->data), + flex_array_size(r, data, a->length))) { ret = -EFAULT; goto failed; } @@ -914,7 +986,7 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle, struct client *client = data; struct iso_interrupt_event *e; - e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC); + e = kmalloc(sizeof(*e) + header_length, GFP_KERNEL); if (e == NULL) return; @@ -933,7 +1005,7 @@ static void iso_mc_callback(struct fw_iso_context *context, struct client *client = data; struct iso_interrupt_mc_event *e; - e = kmalloc(sizeof(*e), GFP_ATOMIC); + e = kmalloc(sizeof(*e), GFP_KERNEL); if (e == NULL) return; @@ -953,11 +1025,25 @@ static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context) return DMA_FROM_DEVICE; } +static struct fw_iso_context *fw_iso_mc_context_create(struct fw_card *card, + fw_iso_mc_callback_t callback, + void *callback_data) +{ + struct fw_iso_context *ctx; + + ctx = fw_iso_context_create(card, FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL, + 0, 0, 0, NULL, callback_data); + if (!IS_ERR(ctx)) + ctx->callback.mc = callback; + + return ctx; +} + static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) { struct fw_cdev_create_iso_context *a = &arg->create_iso_context; struct fw_iso_context *context; - fw_iso_callback_t cb; + union fw_iso_callback cb; int ret; BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT || @@ -970,7 +1056,7 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) if (a->speed > SCODE_3200 || a->channel > 63) return -EINVAL; - cb = iso_callback; + cb.sc = iso_callback; break; case FW_ISO_CONTEXT_RECEIVE: @@ -978,28 +1064,33 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) a->channel > 63) return -EINVAL; - cb = iso_callback; + cb.sc = iso_callback; break; case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: - cb = (fw_iso_callback_t)iso_mc_callback; + cb.mc = iso_mc_callback; break; default: return -EINVAL; } - context = fw_iso_context_create(client->device->card, a->type, - a->channel, a->speed, a->header_size, cb, client); + if (a->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) + context = fw_iso_mc_context_create(client->device->card, cb.mc, + client); + else + context = fw_iso_context_create(client->device->card, a->type, + a->channel, a->speed, + a->header_size, cb.sc, client); if (IS_ERR(context)) return PTR_ERR(context); if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW) context->drop_overflow_headers = true; - /* We only support one context at this time. */ - spin_lock_irq(&client->lock); + // We only support one context at this time. + guard(spinlock_irq)(&client->lock); + if (client->iso_context != NULL) { - spin_unlock_irq(&client->lock); fw_iso_context_destroy(context); return -EBUSY; @@ -1009,7 +1100,6 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) client->device->card, iso_dma_direction(context)); if (ret < 0) { - spin_unlock_irq(&client->lock); fw_iso_context_destroy(context); return ret; @@ -1018,7 +1108,6 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) } client->iso_closure = a->closure; client->iso_context = context; - spin_unlock_irq(&client->lock); a->handle = 0; @@ -1052,10 +1141,7 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) unsigned long payload, buffer_end, transmit_header_bytes = 0; u32 control; int count; - struct { - struct fw_iso_packet packet; - u8 header[256]; - } u; + DEFINE_RAW_FLEX(struct fw_iso_packet, u, header, 64); if (ctx == NULL || a->handle != 0) return -EINVAL; @@ -1087,29 +1173,29 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) while (p < end) { if (get_user(control, &p->control)) return -EFAULT; - u.packet.payload_length = GET_PAYLOAD_LENGTH(control); - u.packet.interrupt = GET_INTERRUPT(control); - u.packet.skip = GET_SKIP(control); - u.packet.tag = GET_TAG(control); - u.packet.sy = GET_SY(control); - u.packet.header_length = GET_HEADER_LENGTH(control); + u->payload_length = GET_PAYLOAD_LENGTH(control); + u->interrupt = GET_INTERRUPT(control); + u->skip = GET_SKIP(control); + u->tag = GET_TAG(control); + u->sy = GET_SY(control); + u->header_length = GET_HEADER_LENGTH(control); switch (ctx->type) { case FW_ISO_CONTEXT_TRANSMIT: - if (u.packet.header_length & 3) + if (u->header_length & 3) return -EINVAL; - transmit_header_bytes = u.packet.header_length; + transmit_header_bytes = u->header_length; break; case FW_ISO_CONTEXT_RECEIVE: - if (u.packet.header_length == 0 || - u.packet.header_length % ctx->header_size != 0) + if (u->header_length == 0 || + u->header_length % ctx->header_size != 0) return -EINVAL; break; case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: - if (u.packet.payload_length == 0 || - u.packet.payload_length & 3) + if (u->payload_length == 0 || + u->payload_length & 3) return -EINVAL; break; } @@ -1119,20 +1205,19 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) if (next > end) return -EINVAL; if (copy_from_user - (u.packet.header, p->header, transmit_header_bytes)) + (u->header, p->header, transmit_header_bytes)) return -EFAULT; - if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT && - u.packet.header_length + u.packet.payload_length > 0) + if (u->skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT && + u->header_length + u->payload_length > 0) return -EINVAL; - if (payload + u.packet.payload_length > buffer_end) + if (payload + u->payload_length > buffer_end) return -EINVAL; - if (fw_iso_context_queue(ctx, &u.packet, - &client->buffer, payload)) + if (fw_iso_context_queue(ctx, u, &client->buffer, payload)) break; p = next; - payload += u.packet.payload_length; + payload += u->payload_length; count++; } fw_iso_context_queue_flush(ctx); @@ -1191,28 +1276,28 @@ static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg) struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2; struct fw_card *card = client->device->card; struct timespec64 ts = {0, 0}; - u32 cycle_time; - int ret = 0; + u32 cycle_time = 0; + int ret; - local_irq_disable(); + guard(irq)(); - cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME); + ret = fw_card_read_cycle_time(card, &cycle_time); + if (ret < 0) + return ret; switch (a->clk_id) { case CLOCK_REALTIME: ktime_get_real_ts64(&ts); break; case CLOCK_MONOTONIC: ktime_get_ts64(&ts); break; case CLOCK_MONOTONIC_RAW: ktime_get_raw_ts64(&ts); break; default: - ret = -EINVAL; + return -EINVAL; } - local_irq_enable(); - a->tv_sec = ts.tv_sec; a->tv_nsec = ts.tv_nsec; a->cycle_timer = cycle_time; - return ret; + return 0; } static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg) @@ -1232,31 +1317,30 @@ static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg) static void iso_resource_work(struct work_struct *work) { struct iso_resource_event *e; - struct iso_resource *r = - container_of(work, struct iso_resource, work.work); + struct iso_resource *r = from_work(r, work, work.work); struct client *client = r->client; + unsigned long index = r->resource.handle; int generation, channel, bandwidth, todo; bool skip, free, success; - spin_lock_irq(&client->lock); - generation = client->device->generation; - todo = r->todo; - /* Allow 1000ms grace period for other reallocations. */ - if (todo == ISO_RES_ALLOC && - time_before64(get_jiffies_64(), - client->device->card->reset_jiffies + HZ)) { - schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3)); - skip = true; - } else { - /* We could be called twice within the same generation. */ - skip = todo == ISO_RES_REALLOC && - r->generation == generation; + scoped_guard(spinlock_irq, &client->lock) { + generation = client->device->generation; + todo = r->todo; + // Allow 1000ms grace period for other reallocations. + if (todo == ISO_RES_ALLOC && + time_is_after_jiffies64(client->device->card->reset_jiffies + secs_to_jiffies(1))) { + schedule_iso_resource(r, msecs_to_jiffies(333)); + skip = true; + } else { + // We could be called twice within the same generation. + skip = todo == ISO_RES_REALLOC && + r->generation == generation; + } + free = todo == ISO_RES_DEALLOC || + todo == ISO_RES_ALLOC_ONCE || + todo == ISO_RES_DEALLOC_ONCE; + r->generation = generation; } - free = todo == ISO_RES_DEALLOC || - todo == ISO_RES_ALLOC_ONCE || - todo == ISO_RES_DEALLOC_ONCE; - r->generation = generation; - spin_unlock_irq(&client->lock); if (skip) goto out; @@ -1270,7 +1354,7 @@ static void iso_resource_work(struct work_struct *work) todo == ISO_RES_ALLOC_ONCE); /* * Is this generation outdated already? As long as this resource sticks - * in the idr, it will be scheduled again for a newer generation or at + * in the xarray, it will be scheduled again for a newer generation or at * shutdown. */ if (channel == -EAGAIN && @@ -1279,24 +1363,20 @@ static void iso_resource_work(struct work_struct *work) success = channel >= 0 || bandwidth > 0; - spin_lock_irq(&client->lock); - /* - * Transit from allocation to reallocation, except if the client - * requested deallocation in the meantime. - */ - if (r->todo == ISO_RES_ALLOC) - r->todo = ISO_RES_REALLOC; - /* - * Allocation or reallocation failure? Pull this resource out of the - * idr and prepare for deletion, unless the client is shutting down. - */ - if (r->todo == ISO_RES_REALLOC && !success && - !client->in_shutdown && - idr_remove(&client->resource_idr, r->resource.handle)) { - client_put(client); - free = true; + scoped_guard(spinlock_irq, &client->lock) { + // Transit from allocation to reallocation, except if the client + // requested deallocation in the meantime. + if (r->todo == ISO_RES_ALLOC) + r->todo = ISO_RES_REALLOC; + // Allocation or reallocation failure? Pull this resource out of the + // xarray and prepare for deletion, unless the client is shutting down. + if (r->todo == ISO_RES_REALLOC && !success && + !client->in_shutdown && + xa_erase(&client->resource_xa, index)) { + client_put(client); + free = true; + } } - spin_unlock_irq(&client->lock); if (todo == ISO_RES_ALLOC && channel >= 0) r->channels = 1ULL << channel; @@ -1331,13 +1411,12 @@ static void iso_resource_work(struct work_struct *work) static void release_iso_resource(struct client *client, struct client_resource *resource) { - struct iso_resource *r = - container_of(resource, struct iso_resource, resource); + struct iso_resource *r = to_iso_resource(resource); + + guard(spinlock_irq)(&client->lock); - spin_lock_irq(&client->lock); r->todo = ISO_RES_DEALLOC; schedule_iso_resource(r, 0); - spin_unlock_irq(&client->lock); } static int init_iso_resource(struct client *client, @@ -1480,25 +1559,65 @@ static void outbound_phy_packet_callback(struct fw_packet *packet, { struct outbound_phy_packet_event *e = container_of(packet, struct outbound_phy_packet_event, p); + struct client *e_client = e->client; + u32 rcode; + + trace_async_phy_outbound_complete((uintptr_t)packet, card->index, status, packet->generation, + packet->timestamp); switch (status) { - /* expected: */ - case ACK_COMPLETE: e->phy_packet.rcode = RCODE_COMPLETE; break; - /* should never happen with PHY packets: */ - case ACK_PENDING: e->phy_packet.rcode = RCODE_COMPLETE; break; + // expected: + case ACK_COMPLETE: + rcode = RCODE_COMPLETE; + break; + // should never happen with PHY packets: + case ACK_PENDING: + rcode = RCODE_COMPLETE; + break; case ACK_BUSY_X: case ACK_BUSY_A: - case ACK_BUSY_B: e->phy_packet.rcode = RCODE_BUSY; break; - case ACK_DATA_ERROR: e->phy_packet.rcode = RCODE_DATA_ERROR; break; - case ACK_TYPE_ERROR: e->phy_packet.rcode = RCODE_TYPE_ERROR; break; - /* stale generation; cancelled; on certain controllers: no ack */ - default: e->phy_packet.rcode = status; break; + case ACK_BUSY_B: + rcode = RCODE_BUSY; + break; + case ACK_DATA_ERROR: + rcode = RCODE_DATA_ERROR; + break; + case ACK_TYPE_ERROR: + rcode = RCODE_TYPE_ERROR; + break; + // stale generation; cancelled; on certain controllers: no ack + default: + rcode = status; + break; } - e->phy_packet.data[0] = packet->timestamp; - queue_event(e->client, &e->event, &e->phy_packet, - sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0); - client_put(e->client); + switch (e->phy_packet.without_tstamp.type) { + case FW_CDEV_EVENT_PHY_PACKET_SENT: + { + struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp; + + pp->rcode = rcode; + pp->data[0] = packet->timestamp; + queue_event(e->client, &e->event, &e->phy_packet, sizeof(*pp) + pp->length, + NULL, 0); + break; + } + case FW_CDEV_EVENT_PHY_PACKET_SENT2: + { + struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp; + + pp->rcode = rcode; + pp->tstamp = packet->timestamp; + queue_event(e->client, &e->event, &e->phy_packet, sizeof(*pp) + pp->length, + NULL, 0); + break; + } + default: + WARN_ON(1); + break; + } + + client_put(e_client); } static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) @@ -1511,7 +1630,7 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) if (!client->device->is_local) return -ENOSYS; - e = kzalloc(sizeof(*e) + 4, GFP_KERNEL); + e = kzalloc(sizeof(*e) + sizeof(a->data), GFP_KERNEL); if (e == NULL) return -ENOMEM; @@ -1519,15 +1638,32 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) e->client = client; e->p.speed = SCODE_100; e->p.generation = a->generation; - e->p.header[0] = TCODE_LINK_INTERNAL << 4; + async_header_set_tcode(e->p.header, TCODE_LINK_INTERNAL); e->p.header[1] = a->data[0]; e->p.header[2] = a->data[1]; e->p.header_length = 12; e->p.callback = outbound_phy_packet_callback; - e->phy_packet.closure = a->closure; - e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT; - if (is_ping_packet(a->data)) - e->phy_packet.length = 4; + + if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) { + struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp; + + pp->closure = a->closure; + pp->type = FW_CDEV_EVENT_PHY_PACKET_SENT; + if (is_ping_packet(a->data)) + pp->length = 4; + } else { + struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp; + + pp->closure = a->closure; + pp->type = FW_CDEV_EVENT_PHY_PACKET_SENT2; + // Keep the data field so that application can match the response event to the + // request. + pp->length = sizeof(a->data); + memcpy(pp->data, a->data, sizeof(a->data)); + } + + trace_async_phy_outbound_initiate((uintptr_t)&e->p, card->index, e->p.generation, + e->p.header[1], e->p.header[2]); card->driver->send_request(card, &e->p); @@ -1537,46 +1673,63 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg) { struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets; - struct fw_card *card = client->device->card; /* Access policy: Allow this ioctl only on local nodes' device files. */ if (!client->device->is_local) return -ENOSYS; - spin_lock_irq(&card->lock); + // NOTE: This can be without irq when we can guarantee that __fw_send_request() for local + // destination never runs in any type of IRQ context. + scoped_guard(spinlock_irq, &phy_receiver_list_lock) + list_move_tail(&client->phy_receiver_link, &phy_receiver_list); - list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list); client->phy_receiver_closure = a->closure; - spin_unlock_irq(&card->lock); - return 0; } void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p) { struct client *client; - struct inbound_phy_packet_event *e; - unsigned long flags; - spin_lock_irqsave(&card->lock, flags); + // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for local + // destination never runs in any type of IRQ context. + guard(spinlock_irqsave)(&phy_receiver_list_lock); + + list_for_each_entry(client, &phy_receiver_list, phy_receiver_link) { + struct inbound_phy_packet_event *e; + + if (client->device->card != card) + continue; - list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) { e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC); if (e == NULL) break; - e->phy_packet.closure = client->phy_receiver_closure; - e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED; - e->phy_packet.rcode = RCODE_COMPLETE; - e->phy_packet.length = 8; - e->phy_packet.data[0] = p->header[1]; - e->phy_packet.data[1] = p->header[2]; - queue_event(client, &e->event, - &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0); + if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) { + struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp; + + pp->closure = client->phy_receiver_closure; + pp->type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED; + pp->rcode = RCODE_COMPLETE; + pp->length = 8; + pp->data[0] = p->header[1]; + pp->data[1] = p->header[2]; + queue_event(client, &e->event, &e->phy_packet, sizeof(*pp) + 8, NULL, 0); + } else { + struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp; + + pp = &e->phy_packet.with_tstamp; + pp->closure = client->phy_receiver_closure; + pp->type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED2; + pp->rcode = RCODE_COMPLETE; + pp->length = 8; + pp->tstamp = p->timestamp; + pp->data[0] = p->header[1]; + pp->data[1] = p->header[2]; + queue_event(client, &e->event, &e->phy_packet, sizeof(*pp) + 8, NULL, 0); + } } - - spin_unlock_irqrestore(&card->lock, flags); } static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = { @@ -1673,16 +1826,15 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) if (ret < 0) return ret; - spin_lock_irq(&client->lock); - if (client->iso_context) { - ret = fw_iso_buffer_map_dma(&client->buffer, - client->device->card, - iso_dma_direction(client->iso_context)); - client->buffer_is_mapped = (ret == 0); + scoped_guard(spinlock_irq, &client->lock) { + if (client->iso_context) { + ret = fw_iso_buffer_map_dma(&client->buffer, client->device->card, + iso_dma_direction(client->iso_context)); + if (ret < 0) + goto fail; + client->buffer_is_mapped = true; + } } - spin_unlock_irq(&client->lock); - if (ret < 0) - goto fail; ret = vm_map_pages_zero(vma, client->buffer.pages, client->buffer.page_count); @@ -1695,48 +1847,35 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) return ret; } -static int is_outbound_transaction_resource(int id, void *p, void *data) -{ - struct client_resource *resource = p; - - return resource->release == release_transaction; -} - -static int has_outbound_transactions(struct client *client) +static bool has_outbound_transactions(struct client *client) { - int ret; - - spin_lock_irq(&client->lock); - ret = idr_for_each(&client->resource_idr, - is_outbound_transaction_resource, NULL); - spin_unlock_irq(&client->lock); - - return ret; -} + struct client_resource *resource; + unsigned long index; -static int shutdown_resource(int id, void *p, void *data) -{ - struct client_resource *resource = p; - struct client *client = data; + guard(spinlock_irq)(&client->lock); - resource->release(client, resource); - client_put(client); + xa_for_each(&client->resource_xa, index, resource) { + if (is_outbound_transaction_resource(resource)) + return true; + } - return 0; + return false; } static int fw_device_op_release(struct inode *inode, struct file *file) { struct client *client = file->private_data; struct event *event, *next_event; + struct client_resource *resource; + unsigned long index; - spin_lock_irq(&client->device->card->lock); - list_del(&client->phy_receiver_link); - spin_unlock_irq(&client->device->card->lock); + // NOTE: This can be without irq when we can guarantee that __fw_send_request() for local + // destination never runs in any type of IRQ context. + scoped_guard(spinlock_irq, &phy_receiver_list_lock) + list_del(&client->phy_receiver_link); - mutex_lock(&client->device->client_list_mutex); - list_del(&client->link); - mutex_unlock(&client->device->client_list_mutex); + scoped_guard(mutex, &client->device->client_list_mutex) + list_del(&client->link); if (client->iso_context) fw_iso_context_destroy(client->iso_context); @@ -1744,15 +1883,17 @@ static int fw_device_op_release(struct inode *inode, struct file *file) if (client->buffer.pages) fw_iso_buffer_destroy(&client->buffer, client->device->card); - /* Freeze client->resource_idr and client->event_list */ - spin_lock_irq(&client->lock); - client->in_shutdown = true; - spin_unlock_irq(&client->lock); + // Freeze client->resource_xa and client->event_list. + scoped_guard(spinlock_irq, &client->lock) + client->in_shutdown = true; wait_event(client->tx_flush_wait, !has_outbound_transactions(client)); - idr_for_each(&client->resource_idr, shutdown_resource, client); - idr_destroy(&client->resource_idr); + xa_for_each(&client->resource_xa, index, resource) { + resource->release(client, resource); + client_put(client); + } + xa_destroy(&client->resource_xa); list_for_each_entry_safe(event, next_event, &client->event_list, link) kfree(event); @@ -1779,7 +1920,6 @@ static __poll_t fw_device_op_poll(struct file *file, poll_table * pt) const struct file_operations fw_device_ops = { .owner = THIS_MODULE, - .llseek = no_llseek, .open = fw_device_op_open, .read = fw_device_op_read, .unlocked_ioctl = fw_device_op_ioctl, diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index 68216988391f..9b0080397154 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -12,7 +12,6 @@ #include <linux/errno.h> #include <linux/firewire.h> #include <linux/firewire-constants.h> -#include <linux/idr.h> #include <linux/jiffies.h> #include <linux/kobject.h> #include <linux/list.h> @@ -31,6 +30,8 @@ #include "core.h" +#define ROOT_DIR_OFFSET 5 + void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p) { ci->p = p + 1; @@ -47,6 +48,22 @@ int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value) } EXPORT_SYMBOL(fw_csr_iterator_next); +static const u32 *search_directory(const u32 *directory, int search_key) +{ + struct fw_csr_iterator ci; + int key, value; + + search_key |= CSR_DIRECTORY; + + fw_csr_iterator_init(&ci, directory); + while (fw_csr_iterator_next(&ci, &key, &value)) { + if (key == search_key) + return ci.p - 1 + value; + } + + return NULL; +} + static const u32 *search_leaf(const u32 *directory, int search_key) { struct fw_csr_iterator ci; @@ -100,10 +117,9 @@ static int textual_leaf_to_string(const u32 *block, char *buf, size_t size) * @buf: where to put the string * @size: size of @buf, in bytes * - * The string is taken from a minimal ASCII text descriptor leaf after - * the immediate entry with @key. The string is zero-terminated. - * An overlong string is silently truncated such that it and the - * zero byte fit into @size. + * The string is taken from a minimal ASCII text descriptor leaf just after the entry with the + * @key. The string is zero-terminated. An overlong string is silently truncated such that it + * and the zero byte fit into @size. * * Returns strlen(buf) or a negative error code. */ @@ -133,10 +149,27 @@ static void get_ids(const u32 *directory, int *id) } } -static void get_modalias_ids(struct fw_unit *unit, int *id) +static void get_modalias_ids(const struct fw_unit *unit, int *id) { - get_ids(&fw_parent_device(unit)->config_rom[5], id); - get_ids(unit->directory, id); + const u32 *root_directory = &fw_parent_device(unit)->config_rom[ROOT_DIR_OFFSET]; + const u32 *directories[] = {NULL, NULL, NULL}; + const u32 *vendor_directory; + int i; + + directories[0] = root_directory; + + // Legacy layout of configuration ROM described in Annex 1 of 'Configuration ROM for AV/C + // Devices 1.0 (December 12, 2000, 1394 Trading Association, TA Document 1999027)'. + vendor_directory = search_directory(root_directory, CSR_VENDOR); + if (!vendor_directory) { + directories[1] = unit->directory; + } else { + directories[1] = vendor_directory; + directories[2] = unit->directory; + } + + for (i = 0; i < ARRAY_SIZE(directories) && !!directories[i]; ++i) + get_ids(directories[i], id); } static bool match_ids(const struct ieee1394_device_id *id_table, int *id) @@ -156,10 +189,10 @@ static bool match_ids(const struct ieee1394_device_id *id_table, int *id) } static const struct ieee1394_device_id *unit_match(struct device *dev, - struct device_driver *drv) + const struct device_driver *drv) { const struct ieee1394_device_id *id_table = - container_of(drv, struct fw_driver, driver)->id_table; + container_of_const(drv, struct fw_driver, driver)->id_table; int id[] = {0, 0, 0, 0}; get_modalias_ids(fw_unit(dev), id); @@ -171,9 +204,9 @@ static const struct ieee1394_device_id *unit_match(struct device *dev, return NULL; } -static bool is_fw_unit(struct device *dev); +static bool is_fw_unit(const struct device *dev); -static int fw_unit_match(struct device *dev, struct device_driver *drv) +static int fw_unit_match(struct device *dev, const struct device_driver *drv) { /* We only allow binding to fw_units. */ return is_fw_unit(dev) && unit_match(dev, drv) != NULL; @@ -187,17 +220,15 @@ static int fw_unit_probe(struct device *dev) return driver->probe(fw_unit(dev), unit_match(dev, dev->driver)); } -static int fw_unit_remove(struct device *dev) +static void fw_unit_remove(struct device *dev) { struct fw_driver *driver = container_of(dev->driver, struct fw_driver, driver); driver->remove(fw_unit(dev)); - - return 0; } -static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size) +static int get_modalias(const struct fw_unit *unit, char *buffer, size_t buffer_size) { int id[] = {0, 0, 0, 0}; @@ -208,9 +239,9 @@ static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size) id[0], id[1], id[2], id[3]); } -static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env) +static int fw_unit_uevent(const struct device *dev, struct kobj_uevent_env *env) { - struct fw_unit *unit = fw_unit(dev); + const struct fw_unit *unit = fw_unit(dev); char modalias[64]; get_modalias(unit, modalias, sizeof(modalias)); @@ -221,7 +252,7 @@ static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env) return 0; } -struct bus_type fw_bus_type = { +const struct bus_type fw_bus_type = { .name = "firewire", .match = fw_unit_match, .probe = fw_unit_probe, @@ -253,27 +284,43 @@ static ssize_t show_immediate(struct device *dev, struct config_rom_attribute *attr = container_of(dattr, struct config_rom_attribute, attr); struct fw_csr_iterator ci; - const u32 *dir; - int key, value, ret = -ENOENT; + const u32 *directories[] = {NULL, NULL}; + int i, value = -1; - down_read(&fw_device_rwsem); + guard(rwsem_read)(&fw_device_rwsem); - if (is_fw_unit(dev)) - dir = fw_unit(dev)->directory; - else - dir = fw_device(dev)->config_rom + 5; + if (is_fw_unit(dev)) { + directories[0] = fw_unit(dev)->directory; + } else { + const u32 *root_directory = fw_device(dev)->config_rom + ROOT_DIR_OFFSET; + const u32 *vendor_directory = search_directory(root_directory, CSR_VENDOR); - fw_csr_iterator_init(&ci, dir); - while (fw_csr_iterator_next(&ci, &key, &value)) - if (attr->key == key) { - ret = snprintf(buf, buf ? PAGE_SIZE : 0, - "0x%06x\n", value); - break; + if (!vendor_directory) { + directories[0] = root_directory; + } else { + // Legacy layout of configuration ROM described in Annex 1 of + // 'Configuration ROM for AV/C Devices 1.0 (December 12, 2000, 1394 Trading + // Association, TA Document 1999027)'. + directories[0] = vendor_directory; + directories[1] = root_directory; } + } - up_read(&fw_device_rwsem); + for (i = 0; i < ARRAY_SIZE(directories) && !!directories[i]; ++i) { + int key, val; - return ret; + fw_csr_iterator_init(&ci, directories[i]); + while (fw_csr_iterator_next(&ci, &key, &val)) { + if (attr->key == key) + value = val; + } + } + + if (value < 0) + return -ENOENT; + + // Note that this function is also called by init_fw_attribute_group() with NULL pointer. + return buf ? sysfs_emit(buf, "0x%06x\n", value) : 0; } #define IMMEDIATE_ATTR(name, key) \ @@ -284,18 +331,31 @@ static ssize_t show_text_leaf(struct device *dev, { struct config_rom_attribute *attr = container_of(dattr, struct config_rom_attribute, attr); - const u32 *dir; + const u32 *directories[] = {NULL, NULL}; size_t bufsize; char dummy_buf[2]; - int ret; + int i, ret = -ENOENT; - down_read(&fw_device_rwsem); + guard(rwsem_read)(&fw_device_rwsem); - if (is_fw_unit(dev)) - dir = fw_unit(dev)->directory; - else - dir = fw_device(dev)->config_rom + 5; + if (is_fw_unit(dev)) { + directories[0] = fw_unit(dev)->directory; + } else { + const u32 *root_directory = fw_device(dev)->config_rom + ROOT_DIR_OFFSET; + const u32 *vendor_directory = search_directory(root_directory, CSR_VENDOR); + if (!vendor_directory) { + directories[0] = root_directory; + } else { + // Legacy layout of configuration ROM described in Annex 1 of + // 'Configuration ROM for AV/C Devices 1.0 (December 12, 2000, 1394 + // Trading Association, TA Document 1999027)'. + directories[0] = root_directory; + directories[1] = vendor_directory; + } + } + + // Note that this function is also called by init_fw_attribute_group() with NULL pointer. if (buf) { bufsize = PAGE_SIZE - 1; } else { @@ -303,17 +363,30 @@ static ssize_t show_text_leaf(struct device *dev, bufsize = 1; } - ret = fw_csr_string(dir, attr->key, buf, bufsize); - - if (ret >= 0) { - /* Strip trailing whitespace and add newline. */ - while (ret > 0 && isspace(buf[ret - 1])) - ret--; - strcpy(buf + ret, "\n"); - ret++; + for (i = 0; i < ARRAY_SIZE(directories) && !!directories[i]; ++i) { + int result = fw_csr_string(directories[i], attr->key, buf, bufsize); + // Detected. + if (result >= 0) { + ret = result; + } else if (i == 0 && attr->key == CSR_VENDOR) { + // Sony DVMC-DA1 has configuration ROM such that the descriptor leaf entry + // in the root directory follows to the directory entry for vendor ID + // instead of the immediate value for vendor ID. + result = fw_csr_string(directories[i], CSR_DIRECTORY | attr->key, buf, + bufsize); + if (result >= 0) + ret = result; + } } - up_read(&fw_device_rwsem); + if (ret < 0) + return ret; + + // Strip trailing whitespace and add newline. + while (ret > 0 && isspace(buf[ret - 1])) + ret--; + strcpy(buf + ret, "\n"); + ret++; return ret; } @@ -374,8 +447,7 @@ static ssize_t rom_index_show(struct device *dev, struct fw_device *device = fw_device(dev->parent); struct fw_unit *unit = fw_unit(dev); - return snprintf(buf, PAGE_SIZE, "%d\n", - (int)(unit->directory - device->config_rom)); + return sysfs_emit(buf, "%td\n", unit->directory - device->config_rom); } static struct device_attribute fw_unit_attributes[] = { @@ -390,10 +462,10 @@ static ssize_t config_rom_show(struct device *dev, struct fw_device *device = fw_device(dev); size_t length; - down_read(&fw_device_rwsem); + guard(rwsem_read)(&fw_device_rwsem); + length = device->config_rom_length * 4; memcpy(buf, device->config_rom, length); - up_read(&fw_device_rwsem); return length; } @@ -402,14 +474,10 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct fw_device *device = fw_device(dev); - int ret; - down_read(&fw_device_rwsem); - ret = snprintf(buf, PAGE_SIZE, "0x%08x%08x\n", - device->config_rom[3], device->config_rom[4]); - up_read(&fw_device_rwsem); + guard(rwsem_read)(&fw_device_rwsem); - return ret; + return sysfs_emit(buf, "0x%08x%08x\n", device->config_rom[3], device->config_rom[4]); } static ssize_t is_local_show(struct device *dev, @@ -417,7 +485,7 @@ static ssize_t is_local_show(struct device *dev, { struct fw_device *device = fw_device(dev); - return sprintf(buf, "%u\n", device->is_local); + return sysfs_emit(buf, "%u\n", device->is_local); } static int units_sprintf(char *buf, const u32 *directory) @@ -449,8 +517,9 @@ static ssize_t units_show(struct device *dev, struct fw_csr_iterator ci; int key, value, i = 0; - down_read(&fw_device_rwsem); - fw_csr_iterator_init(&ci, &device->config_rom[5]); + guard(rwsem_read)(&fw_device_rwsem); + + fw_csr_iterator_init(&ci, &device->config_rom[ROOT_DIR_OFFSET]); while (fw_csr_iterator_next(&ci, &key, &value)) { if (key != (CSR_UNIT | CSR_DIRECTORY)) continue; @@ -458,7 +527,6 @@ static ssize_t units_show(struct device *dev, if (i >= PAGE_SIZE - (8 + 1 + 8 + 1)) break; } - up_read(&fw_device_rwsem); if (i) buf[i - 1] = '\n'; @@ -474,8 +542,83 @@ static struct device_attribute fw_device_attributes[] = { __ATTR_NULL, }; -static int read_rom(struct fw_device *device, - int generation, int index, u32 *data) +#define CANON_OUI 0x000085 + +static int detect_quirks_by_bus_information_block(const u32 *bus_information_block) +{ + int quirks = 0; + + if ((bus_information_block[2] & 0x000000f0) == 0) + quirks |= FW_DEVICE_QUIRK_IRM_IS_1394_1995_ONLY; + + if ((bus_information_block[3] >> 8) == CANON_OUI) + quirks |= FW_DEVICE_QUIRK_IRM_IGNORES_BUS_MANAGER; + + return quirks; +} + +struct entry_match { + unsigned int index; + u32 value; +}; + +static const struct entry_match motu_audio_express_matches[] = { + { 1, 0x030001f2 }, + { 3, 0xd1000002 }, + { 4, 0x8d000005 }, + { 6, 0x120001f2 }, + { 7, 0x13000033 }, + { 8, 0x17104800 }, +}; + +static const struct entry_match tascam_fw_series_matches[] = { + { 1, 0x0300022e }, + { 3, 0x8d000006 }, + { 4, 0xd1000001 }, + { 6, 0x1200022e }, + { 8, 0xd4000004 }, +}; + +static int detect_quirks_by_root_directory(const u32 *root_directory, unsigned int length) +{ + static const struct { + enum fw_device_quirk quirk; + const struct entry_match *matches; + unsigned int match_count; + } *entry, entries[] = { + { + .quirk = FW_DEVICE_QUIRK_ACK_PACKET_WITH_INVALID_PENDING_CODE, + .matches = motu_audio_express_matches, + .match_count = ARRAY_SIZE(motu_audio_express_matches), + }, + { + .quirk = FW_DEVICE_QUIRK_UNSTABLE_AT_S400, + .matches = tascam_fw_series_matches, + .match_count = ARRAY_SIZE(tascam_fw_series_matches), + }, + }; + int quirks = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(entries); ++i) { + int j; + + entry = entries + i; + for (j = 0; j < entry->match_count; ++j) { + unsigned int index = entry->matches[j].index; + unsigned int value = entry->matches[j].value; + + if ((length < index) || (root_directory[index] != value)) + break; + } + if (j == entry->match_count) + quirks |= entry->quirk; + } + + return quirks; +} + +static int read_rom(struct fw_device *device, int generation, int speed, int index, u32 *data) { u64 offset = (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4; int i, rcode; @@ -486,7 +629,7 @@ static int read_rom(struct fw_device *device, for (i = 10; i < 100; i += 10) { rcode = fw_run_transaction(device->card, TCODE_READ_QUADLET_REQUEST, device->node_id, - generation, device->max_speed, offset, data, 4); + generation, speed, offset, data, 4); if (rcode != RCODE_BUSY) break; msleep(i); @@ -496,7 +639,8 @@ static int read_rom(struct fw_device *device, return rcode; } -#define MAX_CONFIG_ROM_SIZE 256 +// By quadlet unit. +#define MAX_CONFIG_ROM_SIZE ((CSR_CONFIG_ROM_END - CSR_CONFIG_ROM) / sizeof(u32)) /* * Read the bus info block, perform a speed probe, and read all of the rest of @@ -509,10 +653,11 @@ static int read_rom(struct fw_device *device, static int read_config_rom(struct fw_device *device, int generation) { struct fw_card *card = device->card; - const u32 *old_rom, *new_rom; - u32 *rom, *stack; + const u32 *new_rom, *old_rom __free(kfree) = NULL; + u32 *stack, *rom __free(kfree) = NULL; u32 sp, key; - int i, end, length, ret; + int i, end, length, ret, speed; + int quirks; rom = kmalloc(sizeof(*rom) * MAX_CONFIG_ROM_SIZE + sizeof(*stack) * MAX_CONFIG_ROM_SIZE, GFP_KERNEL); @@ -522,13 +667,13 @@ static int read_config_rom(struct fw_device *device, int generation) stack = &rom[MAX_CONFIG_ROM_SIZE]; memset(rom, 0, sizeof(*rom) * MAX_CONFIG_ROM_SIZE); - device->max_speed = SCODE_100; + speed = SCODE_100; /* First read the bus info block. */ for (i = 0; i < 5; i++) { - ret = read_rom(device, generation, i, &rom[i]); + ret = read_rom(device, generation, speed, i, &rom[i]); if (ret != RCODE_COMPLETE) - goto out; + return ret; /* * As per IEEE1212 7.2, during initialization, devices can * reply with a 0 for the first quadlet of the config @@ -537,39 +682,14 @@ static int read_config_rom(struct fw_device *device, int generation) * harddisk). In that case we just fail, and the * retry mechanism will try again later. */ - if (i == 0 && rom[i] == 0) { - ret = RCODE_BUSY; - goto out; - } + if (i == 0 && rom[i] == 0) + return RCODE_BUSY; } - device->max_speed = device->node->max_speed; - - /* - * Determine the speed of - * - devices with link speed less than PHY speed, - * - devices with 1394b PHY (unless only connected to 1394a PHYs), - * - all devices if there are 1394b repeaters. - * Note, we cannot use the bus info block's link_spd as starting point - * because some buggy firmwares set it lower than necessary and because - * 1394-1995 nodes do not have the field. - */ - if ((rom[2] & 0x7) < device->max_speed || - device->max_speed == SCODE_BETA || - card->beta_repeaters_present) { - u32 dummy; - - /* for S1600 and S3200 */ - if (device->max_speed == SCODE_BETA) - device->max_speed = card->link_speed; + quirks = detect_quirks_by_bus_information_block(rom); - while (device->max_speed > SCODE_100) { - if (read_rom(device, generation, 0, &dummy) == - RCODE_COMPLETE) - break; - device->max_speed--; - } - } + // Just prevent from torn writing/reading. + WRITE_ONCE(device->quirks, quirks); /* * Now parse the config rom. The config rom is a recursive @@ -590,15 +710,13 @@ static int read_config_rom(struct fw_device *device, int generation) */ key = stack[--sp]; i = key & 0xffffff; - if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE)) { - ret = -ENXIO; - goto out; - } + if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE)) + return -ENXIO; /* Read header quadlet for the block to get the length. */ - ret = read_rom(device, generation, i, &rom[i]); + ret = read_rom(device, generation, speed, i, &rom[i]); if (ret != RCODE_COMPLETE) - goto out; + return ret; end = i + (rom[i] >> 16) + 1; if (end > MAX_CONFIG_ROM_SIZE) { /* @@ -620,9 +738,9 @@ static int read_config_rom(struct fw_device *device, int generation) * it references another block, and push it in that case. */ for (; i < end; i++) { - ret = read_rom(device, generation, i, &rom[i]); + ret = read_rom(device, generation, speed, i, &rom[i]); if (ret != RCODE_COMPLETE) - goto out; + return ret; if ((key >> 30) != 3 || (rom[i] >> 30) < 2) continue; @@ -647,27 +765,54 @@ static int read_config_rom(struct fw_device *device, int generation) length = i; } + quirks |= detect_quirks_by_root_directory(rom + ROOT_DIR_OFFSET, length - ROOT_DIR_OFFSET); + + // Just prevent from torn writing/reading. + WRITE_ONCE(device->quirks, quirks); + + if (unlikely(quirks & FW_DEVICE_QUIRK_UNSTABLE_AT_S400)) + speed = SCODE_200; + else + speed = device->node->max_speed; + + // Determine the speed of + // - devices with link speed less than PHY speed, + // - devices with 1394b PHY (unless only connected to 1394a PHYs), + // - all devices if there are 1394b repeaters. + // Note, we cannot use the bus info block's link_spd as starting point because some buggy + // firmwares set it lower than necessary and because 1394-1995 nodes do not have the field. + if ((rom[2] & 0x7) < speed || speed == SCODE_BETA || card->beta_repeaters_present) { + u32 dummy; + + // for S1600 and S3200. + if (speed == SCODE_BETA) + speed = card->link_speed; + + while (speed > SCODE_100) { + if (read_rom(device, generation, speed, 0, &dummy) == + RCODE_COMPLETE) + break; + --speed; + } + } + + device->max_speed = speed; + old_rom = device->config_rom; new_rom = kmemdup(rom, length * 4, GFP_KERNEL); - if (new_rom == NULL) { - ret = -ENOMEM; - goto out; - } + if (new_rom == NULL) + return -ENOMEM; - down_write(&fw_device_rwsem); - device->config_rom = new_rom; - device->config_rom_length = length; - up_write(&fw_device_rwsem); + scoped_guard(rwsem_write, &fw_device_rwsem) { + device->config_rom = new_rom; + device->config_rom_length = length; + } - kfree(old_rom); - ret = RCODE_COMPLETE; device->max_rec = rom[2] >> 12 & 0xf; device->cmc = rom[2] >> 30 & 1; device->irmc = rom[2] >> 31 & 1; - out: - kfree(rom); - return ret; + return RCODE_COMPLETE; } static void fw_unit_release(struct device *dev) @@ -683,7 +828,7 @@ static struct device_type fw_unit_type = { .release = fw_unit_release, }; -static bool is_fw_unit(struct device *dev) +static bool is_fw_unit(const struct device *dev) { return dev->type == &fw_unit_type; } @@ -695,7 +840,7 @@ static void create_units(struct fw_device *device) int key, value, i; i = 0; - fw_csr_iterator_init(&ci, &device->config_rom[5]); + fw_csr_iterator_init(&ci, &device->config_rom[ROOT_DIR_OFFSET]); while (fw_csr_iterator_next(&ci, &key, &value)) { if (key != (CSR_UNIT | CSR_DIRECTORY)) continue; @@ -721,14 +866,11 @@ static void create_units(struct fw_device *device) fw_unit_attributes, &unit->attribute_group); - if (device_register(&unit->device) < 0) - goto skip_unit; - fw_device_get(device); - continue; - - skip_unit: - kfree(unit); + if (device_register(&unit->device) < 0) { + put_device(&unit->device); + continue; + } } } @@ -741,24 +883,21 @@ static int shutdown_unit(struct device *device, void *data) /* * fw_device_rwsem acts as dual purpose mutex: - * - serializes accesses to fw_device_idr, * - serializes accesses to fw_device.config_rom/.config_rom_length and * fw_unit.directory, unless those accesses happen at safe occasions */ DECLARE_RWSEM(fw_device_rwsem); -DEFINE_IDR(fw_device_idr); +DEFINE_XARRAY_ALLOC(fw_device_xa); int fw_cdev_major; struct fw_device *fw_device_get_by_devt(dev_t devt) { struct fw_device *device; - down_read(&fw_device_rwsem); - device = idr_find(&fw_device_idr, MINOR(devt)); + device = xa_load(&fw_device_xa, MINOR(devt)); if (device) fw_device_get(device); - up_read(&fw_device_rwsem); return device; } @@ -784,18 +923,15 @@ static void fw_schedule_device_work(struct fw_device *device, */ #define MAX_RETRIES 10 -#define RETRY_DELAY (3 * HZ) -#define INITIAL_DELAY (HZ / 2) -#define SHUTDOWN_DELAY (2 * HZ) +#define RETRY_DELAY secs_to_jiffies(3) +#define INITIAL_DELAY msecs_to_jiffies(500) +#define SHUTDOWN_DELAY secs_to_jiffies(2) static void fw_device_shutdown(struct work_struct *work) { - struct fw_device *device = - container_of(work, struct fw_device, work.work); - int minor = MINOR(device->device.devt); + struct fw_device *device = from_work(device, work, work.work); - if (time_before64(get_jiffies_64(), - device->card->reset_jiffies + SHUTDOWN_DELAY) + if (time_is_after_jiffies64(device->card->reset_jiffies + SHUTDOWN_DELAY) && !list_empty(&device->card->link)) { fw_schedule_device_work(device, SHUTDOWN_DELAY); return; @@ -810,9 +946,7 @@ static void fw_device_shutdown(struct work_struct *work) device_for_each_child(&device->device, NULL, shutdown_unit); device_unregister(&device->device); - down_write(&fw_device_rwsem); - idr_remove(&fw_device_idr, minor); - up_write(&fw_device_rwsem); + xa_erase(&fw_device_xa, MINOR(device->device.devt)); fw_device_put(device); } @@ -821,16 +955,14 @@ static void fw_device_release(struct device *dev) { struct fw_device *device = fw_device(dev); struct fw_card *card = device->card; - unsigned long flags; /* * Take the card lock so we don't set this to NULL while a * FW_NODE_UPDATED callback is being handled or while the * bus manager work looks at this node. */ - spin_lock_irqsave(&card->lock, flags); - device->node->data = NULL; - spin_unlock_irqrestore(&card->lock, flags); + scoped_guard(spinlock_irqsave, &card->lock) + fw_node_set_device(device->node, NULL); fw_node_put(device->node); kfree(device->config_rom); @@ -842,7 +974,7 @@ static struct device_type fw_device_type = { .release = fw_device_release, }; -static bool is_fw_device(struct device *dev) +static bool is_fw_device(const struct device *dev) { return dev->type == &fw_device_type; } @@ -863,66 +995,12 @@ static int update_unit(struct device *dev, void *data) static void fw_device_update(struct work_struct *work) { - struct fw_device *device = - container_of(work, struct fw_device, work.work); + struct fw_device *device = from_work(device, work, work.work); fw_device_cdev_update(device); device_for_each_child(&device->device, NULL, update_unit); } -/* - * If a device was pending for deletion because its node went away but its - * bus info block and root directory header matches that of a newly discovered - * device, revive the existing fw_device. - * The newly allocated fw_device becomes obsolete instead. - */ -static int lookup_existing_device(struct device *dev, void *data) -{ - struct fw_device *old = fw_device(dev); - struct fw_device *new = data; - struct fw_card *card = new->card; - int match = 0; - - if (!is_fw_device(dev)) - return 0; - - down_read(&fw_device_rwsem); /* serialize config_rom access */ - spin_lock_irq(&card->lock); /* serialize node access */ - - if (memcmp(old->config_rom, new->config_rom, 6 * 4) == 0 && - atomic_cmpxchg(&old->state, - FW_DEVICE_GONE, - FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { - struct fw_node *current_node = new->node; - struct fw_node *obsolete_node = old->node; - - new->node = obsolete_node; - new->node->data = new; - old->node = current_node; - old->node->data = old; - - old->max_speed = new->max_speed; - old->node_id = current_node->node_id; - smp_wmb(); /* update node_id before generation */ - old->generation = card->generation; - old->config_rom_retries = 0; - fw_notice(card, "rediscovered device %s\n", dev_name(dev)); - - old->workfn = fw_device_update; - fw_schedule_device_work(old, 0); - - if (current_node == card->root_node) - fw_schedule_bm_work(card, 0); - - match = 1; - } - - spin_unlock_irq(&card->lock); - up_read(&fw_device_rwsem); - - return match; -} - enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, }; static void set_broadcast_channel(struct fw_device *device, int generation) @@ -983,16 +1061,28 @@ int fw_device_set_broadcast_channel(struct device *dev, void *gen) return 0; } +static int compare_configuration_rom(struct device *dev, const void *data) +{ + const struct fw_device *old = fw_device(dev); + const u32 *config_rom = data; + + if (!is_fw_device(dev)) + return 0; + + // Compare the bus information block and root_length/root_crc. + return !memcmp(old->config_rom, config_rom, 6 * 4); +} + static void fw_device_init(struct work_struct *work) { - struct fw_device *device = - container_of(work, struct fw_device, work.work); + struct fw_device *device = from_work(device, work, work.work); struct fw_card *card = device->card; - struct device *revived_dev; - int minor, ret; + struct device *found; + u32 minor; + int ret; /* - * All failure paths here set node->data to NULL, so that we + * All failure paths here call fw_node_set_device(node, NULL), so that we * don't try to do device_for_each_child() on a kfree()'d * device. */ @@ -1015,24 +1105,62 @@ static void fw_device_init(struct work_struct *work) return; } - revived_dev = device_find_child(card->device, - device, lookup_existing_device); - if (revived_dev) { - put_device(revived_dev); - fw_device_release(&device->device); + // If a device was pending for deletion because its node went away but its bus info block + // and root directory header matches that of a newly discovered device, revive the + // existing fw_device. The newly allocated fw_device becomes obsolete instead. + // + // serialize config_rom access. + scoped_guard(rwsem_read, &fw_device_rwsem) { + found = device_find_child(card->device, device->config_rom, + compare_configuration_rom); + } + if (found) { + struct fw_device *reused = fw_device(found); + + if (atomic_cmpxchg(&reused->state, + FW_DEVICE_GONE, + FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { + // serialize node access + scoped_guard(spinlock_irq, &card->lock) { + struct fw_node *current_node = device->node; + struct fw_node *obsolete_node = reused->node; + + device->node = obsolete_node; + fw_node_set_device(device->node, device); + reused->node = current_node; + fw_node_set_device(reused->node, reused); + + reused->max_speed = device->max_speed; + reused->node_id = current_node->node_id; + smp_wmb(); /* update node_id before generation */ + reused->generation = card->generation; + reused->config_rom_retries = 0; + fw_notice(card, "rediscovered device %s\n", + dev_name(found)); + + reused->workfn = fw_device_update; + fw_schedule_device_work(reused, 0); + + if (current_node == card->root_node) + fw_schedule_bm_work(card, 0); + } - return; + put_device(found); + fw_device_release(&device->device); + + return; + } + + put_device(found); } device_initialize(&device->device); fw_device_get(device); - down_write(&fw_device_rwsem); - minor = idr_alloc(&fw_device_idr, device, 0, 1 << MINORBITS, - GFP_KERNEL); - up_write(&fw_device_rwsem); - if (minor < 0) + // The index of allocated entry is used for minor identifier of device node. + ret = xa_alloc(&fw_device_xa, &minor, device, XA_LIMIT(0, MINORMASK), GFP_KERNEL); + if (ret < 0) goto error; device->device.bus = &fw_bus_type; @@ -1070,10 +1198,10 @@ static void fw_device_init(struct work_struct *work) device->workfn = fw_device_shutdown; fw_schedule_device_work(device, SHUTDOWN_DELAY); } else { - fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n", + fw_notice(card, "created device %s: GUID %08x%08x, S%d00, quirks %08x\n", dev_name(&device->device), device->config_rom[3], device->config_rom[4], - 1 << device->max_speed); + 1 << device->max_speed, device->quirks); device->config_rom_retries = 0; set_broadcast_channel(device, device->generation); @@ -1093,11 +1221,9 @@ static void fw_device_init(struct work_struct *work) return; error_with_cdev: - down_write(&fw_device_rwsem); - idr_remove(&fw_device_idr, minor); - up_write(&fw_device_rwsem); + xa_erase(&fw_device_xa, minor); error: - fw_device_put(device); /* fw_device_idr's reference */ + fw_device_put(device); // fw_device_xa's reference. put_device(&device->device); /* our reference */ } @@ -1110,7 +1236,7 @@ static int reread_config_rom(struct fw_device *device, int generation, int i, rcode; for (i = 0; i < 6; i++) { - rcode = read_rom(device, generation, i, &q); + rcode = read_rom(device, generation, device->max_speed, i, &q); if (rcode != RCODE_COMPLETE) return rcode; @@ -1130,8 +1256,7 @@ static int reread_config_rom(struct fw_device *device, int generation, static void fw_device_refresh(struct work_struct *work) { - struct fw_device *device = - container_of(work, struct fw_device, work.work); + struct fw_device *device = from_work(device, work, work.work); struct fw_card *card = device->card; int ret, node_id = device->node_id; bool changed; @@ -1197,8 +1322,7 @@ static void fw_device_refresh(struct work_struct *work) static void fw_device_workfn(struct work_struct *work) { - struct fw_device *device = container_of(to_delayed_work(work), - struct fw_device, work); + struct fw_device *device = from_work(device, to_delayed_work(work), work); device->workfn(work); } @@ -1243,7 +1367,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) * FW_NODE_UPDATED callbacks can update the node_id * and generation for the device. */ - node->data = device; + fw_node_set_device(node, device); /* * Many devices are slow to respond after bus resets, @@ -1258,7 +1382,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) case FW_NODE_INITIATED_RESET: case FW_NODE_LINK_ON: - device = node->data; + device = fw_node_get_device(node); if (device == NULL) goto create; @@ -1275,7 +1399,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) break; case FW_NODE_UPDATED: - device = node->data; + device = fw_node_get_device(node); if (device == NULL) break; @@ -1290,7 +1414,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) case FW_NODE_DESTROYED: case FW_NODE_LINK_OFF: - if (!node->data) + if (!fw_node_get_device(node)) break; /* @@ -1305,7 +1429,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) * the device in shutdown state to have that code fail * to create the device. */ - device = node->data; + device = fw_node_get_device(node); if (atomic_xchg(&device->state, FW_DEVICE_GONE) == FW_DEVICE_RUNNING) { device->workfn = fw_device_shutdown; @@ -1315,3 +1439,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) break; } } + +#ifdef CONFIG_FIREWIRE_KUNIT_DEVICE_ATTRIBUTE_TEST +#include "device-attribute-test.c" +#endif diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index af70e74f9a7e..a67493862c85 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c @@ -22,6 +22,8 @@ #include "core.h" +#include <trace/events/firewire.h> + /* * Isochronous DMA context management */ @@ -148,12 +150,20 @@ struct fw_iso_context *fw_iso_context_create(struct fw_card *card, ctx->callback.sc = callback; ctx->callback_data = callback_data; + trace_isoc_outbound_allocate(ctx, channel, speed); + trace_isoc_inbound_single_allocate(ctx, channel, header_size); + trace_isoc_inbound_multiple_allocate(ctx); + return ctx; } EXPORT_SYMBOL(fw_iso_context_create); void fw_iso_context_destroy(struct fw_iso_context *ctx) { + trace_isoc_outbound_destroy(ctx); + trace_isoc_inbound_single_destroy(ctx); + trace_isoc_inbound_multiple_destroy(ctx); + ctx->card->driver->free_iso_context(ctx); } EXPORT_SYMBOL(fw_iso_context_destroy); @@ -161,12 +171,18 @@ EXPORT_SYMBOL(fw_iso_context_destroy); int fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags) { + trace_isoc_outbound_start(ctx, cycle); + trace_isoc_inbound_single_start(ctx, cycle, sync, tags); + trace_isoc_inbound_multiple_start(ctx, cycle, sync, tags); + return ctx->card->driver->start_iso(ctx, cycle, sync, tags); } EXPORT_SYMBOL(fw_iso_context_start); int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels) { + trace_isoc_inbound_multiple_channels(ctx, *channels); + return ctx->card->driver->set_iso_channels(ctx, channels); } @@ -175,25 +191,81 @@ int fw_iso_context_queue(struct fw_iso_context *ctx, struct fw_iso_buffer *buffer, unsigned long payload) { + trace_isoc_outbound_queue(ctx, payload, packet); + trace_isoc_inbound_single_queue(ctx, payload, packet); + trace_isoc_inbound_multiple_queue(ctx, payload, packet); + return ctx->card->driver->queue_iso(ctx, packet, buffer, payload); } EXPORT_SYMBOL(fw_iso_context_queue); void fw_iso_context_queue_flush(struct fw_iso_context *ctx) { + trace_isoc_outbound_flush(ctx); + trace_isoc_inbound_single_flush(ctx); + trace_isoc_inbound_multiple_flush(ctx); + ctx->card->driver->flush_queue_iso(ctx); } EXPORT_SYMBOL(fw_iso_context_queue_flush); +/** + * fw_iso_context_flush_completions() - process isochronous context in current process context. + * @ctx: the isochronous context + * + * Process the isochronous context in the current process context. The registered callback function + * is called when a queued packet buffer with the interrupt flag is completed, either after + * transmission in the IT context or after being filled in the IR context. Additionally, the + * callback function is also called for the packet buffer completed at last. Furthermore, the + * callback function is called as well when the header buffer in the context becomes full. If it is + * required to process the context asynchronously, fw_iso_context_schedule_flush_completions() is + * available instead. + * + * Context: Process context. May sleep due to disable_work_sync(). + */ int fw_iso_context_flush_completions(struct fw_iso_context *ctx) { - return ctx->card->driver->flush_iso_completions(ctx); + int err; + + trace_isoc_outbound_flush_completions(ctx); + trace_isoc_inbound_single_flush_completions(ctx); + trace_isoc_inbound_multiple_flush_completions(ctx); + + might_sleep(); + + // Avoid dead lock due to programming mistake. + if (WARN_ON_ONCE(current_work() == &ctx->work)) + return 0; + + disable_work_sync(&ctx->work); + + err = ctx->card->driver->flush_iso_completions(ctx); + + enable_work(&ctx->work); + + return err; } EXPORT_SYMBOL(fw_iso_context_flush_completions); int fw_iso_context_stop(struct fw_iso_context *ctx) { - return ctx->card->driver->stop_iso(ctx); + int err; + + trace_isoc_outbound_stop(ctx); + trace_isoc_inbound_single_stop(ctx); + trace_isoc_inbound_multiple_stop(ctx); + + might_sleep(); + + // Avoid dead lock due to programming mistake. + if (WARN_ON_ONCE(current_work() == &ctx->work)) + return 0; + + err = ctx->card->driver->stop_iso(ctx); + + cancel_work_sync(&ctx->work); + + return err; } EXPORT_SYMBOL(fw_iso_context_stop); @@ -343,9 +415,8 @@ void fw_iso_resource_manage(struct fw_card *card, int generation, u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ int irm_id, ret, c = -EINVAL; - spin_lock_irq(&card->lock); - irm_id = card->irm_node->node_id; - spin_unlock_irq(&card->lock); + scoped_guard(spinlock_irq, &card->lock) + irm_id = card->irm_node->node_id; if (channels_hi) c = manage_channel(card, irm_id, generation, channels_hi, diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c index b63d55f5ebd3..ed3ae8cdb0cd 100644 --- a/drivers/firewire/core-topology.c +++ b/drivers/firewire/core-topology.c @@ -20,82 +20,8 @@ #include <asm/byteorder.h> #include "core.h" - -#define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f) -#define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01) -#define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01) -#define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f) -#define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03) -#define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01) -#define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01) -#define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01) - -#define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07) - -#define SELFID_PORT_CHILD 0x3 -#define SELFID_PORT_PARENT 0x2 -#define SELFID_PORT_NCONN 0x1 -#define SELFID_PORT_NONE 0x0 - -static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count) -{ - u32 q; - int port_type, shift, seq; - - *total_port_count = 0; - *child_port_count = 0; - - shift = 6; - q = *sid; - seq = 0; - - while (1) { - port_type = (q >> shift) & 0x03; - switch (port_type) { - case SELFID_PORT_CHILD: - (*child_port_count)++; - fallthrough; - case SELFID_PORT_PARENT: - case SELFID_PORT_NCONN: - (*total_port_count)++; - fallthrough; - case SELFID_PORT_NONE: - break; - } - - shift -= 2; - if (shift == 0) { - if (!SELF_ID_MORE_PACKETS(q)) - return sid + 1; - - shift = 16; - sid++; - q = *sid; - - /* - * Check that the extra packets actually are - * extended self ID packets and that the - * sequence numbers in the extended self ID - * packets increase as expected. - */ - - if (!SELF_ID_EXTENDED(q) || - seq != SELF_ID_EXT_SEQUENCE(q)) - return NULL; - - seq++; - } - } -} - -static int get_port_type(u32 *sid, int port_index) -{ - int index, shift; - - index = (port_index + 5) / 8; - shift = 16 - ((port_index + 5) & 7) * 2; - return (sid[index] >> shift) & 0x03; -} +#include "phy-packet-definitions.h" +#include <trace/events/firewire.h> static struct fw_node *fw_node_create(u32 sid, int port_count, int color) { @@ -106,13 +32,14 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color) return NULL; node->color = color; - node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid); - node->link_on = SELF_ID_LINK_ON(sid); - node->phy_speed = SELF_ID_PHY_SPEED(sid); - node->initiated_reset = SELF_ID_PHY_INITIATOR(sid); + node->node_id = LOCAL_BUS | phy_packet_self_id_get_phy_id(sid); + node->link_on = phy_packet_self_id_zero_get_link_active(sid); + // NOTE: Only two bits, thus only for SCODE_100, SCODE_200, SCODE_400, and SCODE_BETA. + node->phy_speed = phy_packet_self_id_zero_get_scode(sid); + node->initiated_reset = phy_packet_self_id_zero_get_initiated_reset(sid); node->port_count = port_count; - refcount_set(&node->ref_count, 1); + kref_init(&node->kref); INIT_LIST_HEAD(&node->link); return node; @@ -129,7 +56,7 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color) * two cases: either the path goes through this node, in which case * the hop count is the sum of the two biggest child depths plus 2. * Or it could be the case that the max hop path is entirely - * containted in a child tree, in which case the max hop count is just + * contained in a child tree, in which case the max hop count is just * the max hop count of this child. */ static void update_hop_count(struct fw_node *node) @@ -168,13 +95,16 @@ static inline struct fw_node *fw_node(struct list_head *l) * internally consistent. On success this function returns the * fw_node corresponding to the local card otherwise NULL. */ -static struct fw_node *build_tree(struct fw_card *card, - u32 *sid, int self_id_count) +static struct fw_node *build_tree(struct fw_card *card, const u32 *sid, int self_id_count, + unsigned int generation) { + struct self_id_sequence_enumerator enumerator = { + .cursor = sid, + .quadlet_count = self_id_count, + }; struct fw_node *node, *child, *local_node, *irm_node; - struct list_head stack, *h; - u32 *next_sid, *end, q; - int i, port_count, child_port_count, phy_id, parent_count, stack_depth; + struct list_head stack; + int phy_id, stack_depth; int gap_count; bool beta_repeaters_present; @@ -182,24 +112,56 @@ static struct fw_node *build_tree(struct fw_card *card, node = NULL; INIT_LIST_HEAD(&stack); stack_depth = 0; - end = sid + self_id_count; phy_id = 0; irm_node = NULL; - gap_count = SELF_ID_GAP_COUNT(*sid); + gap_count = phy_packet_self_id_zero_get_gap_count(*sid); beta_repeaters_present = false; - while (sid < end) { - next_sid = count_ports(sid, &port_count, &child_port_count); + while (enumerator.quadlet_count > 0) { + unsigned int child_port_count = 0; + unsigned int total_port_count = 0; + unsigned int parent_count = 0; + unsigned int quadlet_count; + const u32 *self_id_sequence; + unsigned int port_capacity; + enum phy_packet_self_id_port_status port_status; + unsigned int port_index; + struct list_head *h; + int i; + + self_id_sequence = self_id_sequence_enumerator_next(&enumerator, &quadlet_count); + if (IS_ERR(self_id_sequence)) { + if (PTR_ERR(self_id_sequence) != -ENODATA) { + fw_err(card, "inconsistent extended self IDs: %ld\n", + PTR_ERR(self_id_sequence)); + return NULL; + } + break; + } - if (next_sid == NULL) { - fw_err(card, "inconsistent extended self IDs\n"); - return NULL; + port_capacity = self_id_sequence_get_port_capacity(quadlet_count); + trace_self_id_sequence(card->index, self_id_sequence, quadlet_count, generation); + + for (port_index = 0; port_index < port_capacity; ++port_index) { + port_status = self_id_sequence_get_port_status(self_id_sequence, quadlet_count, + port_index); + switch (port_status) { + case PHY_PACKET_SELF_ID_PORT_STATUS_CHILD: + ++child_port_count; + fallthrough; + case PHY_PACKET_SELF_ID_PORT_STATUS_PARENT: + case PHY_PACKET_SELF_ID_PORT_STATUS_NCONN: + ++total_port_count; + fallthrough; + case PHY_PACKET_SELF_ID_PORT_STATUS_NONE: + default: + break; + } } - q = *sid; - if (phy_id != SELF_ID_PHY_ID(q)) { + if (phy_id != phy_packet_self_id_get_phy_id(self_id_sequence[0])) { fw_err(card, "PHY ID mismatch in self ID: %d != %d\n", - phy_id, SELF_ID_PHY_ID(q)); + phy_id, phy_packet_self_id_get_phy_id(self_id_sequence[0])); return NULL; } @@ -220,7 +182,7 @@ static struct fw_node *build_tree(struct fw_card *card, */ child = fw_node(h); - node = fw_node_create(q, port_count, card->color); + node = fw_node_create(self_id_sequence[0], total_port_count, card->color); if (node == NULL) { fw_err(card, "out of memory while building topology\n"); return NULL; @@ -229,48 +191,40 @@ static struct fw_node *build_tree(struct fw_card *card, if (phy_id == (card->node_id & 0x3f)) local_node = node; - if (SELF_ID_CONTENDER(q)) + if (phy_packet_self_id_zero_get_contender(self_id_sequence[0])) irm_node = node; - parent_count = 0; - - for (i = 0; i < port_count; i++) { - switch (get_port_type(sid, i)) { - case SELFID_PORT_PARENT: - /* - * Who's your daddy? We dont know the - * parent node at this time, so we - * temporarily abuse node->color for - * remembering the entry in the - * node->ports array where the parent - * node should be. Later, when we - * handle the parent node, we fix up - * the reference. - */ - parent_count++; - node->color = i; + for (port_index = 0; port_index < total_port_count; ++port_index) { + port_status = self_id_sequence_get_port_status(self_id_sequence, quadlet_count, + port_index); + switch (port_status) { + case PHY_PACKET_SELF_ID_PORT_STATUS_PARENT: + // Who's your daddy? We dont know the parent node at this time, so + // we temporarily abuse node->color for remembering the entry in + // the node->ports array where the parent node should be. Later, + // when we handle the parent node, we fix up the reference. + ++parent_count; + node->color = port_index; break; - case SELFID_PORT_CHILD: - node->ports[i] = child; - /* - * Fix up parent reference for this - * child node. - */ + case PHY_PACKET_SELF_ID_PORT_STATUS_CHILD: + node->ports[port_index] = child; + // Fix up parent reference for this child node. child->ports[child->color] = node; child->color = card->color; child = fw_node(child->link.next); break; + case PHY_PACKET_SELF_ID_PORT_STATUS_NCONN: + case PHY_PACKET_SELF_ID_PORT_STATUS_NONE: + default: + break; } } - /* - * Check that the node reports exactly one parent - * port, except for the root, which of course should - * have no parents. - */ - if ((next_sid == end && parent_count != 0) || - (next_sid < end && parent_count != 1)) { + // Check that the node reports exactly one parent port, except for the root, which + // of course should have no parents. + if ((enumerator.quadlet_count == 0 && parent_count != 0) || + (enumerator.quadlet_count > 0 && parent_count != 1)) { fw_err(card, "parent port inconsistency for node %d: " "parent_count=%d\n", phy_id, parent_count); return NULL; @@ -281,20 +235,16 @@ static struct fw_node *build_tree(struct fw_card *card, list_add_tail(&node->link, &stack); stack_depth += 1 - child_port_count; - if (node->phy_speed == SCODE_BETA && - parent_count + child_port_count > 1) + if (node->phy_speed == SCODE_BETA && parent_count + child_port_count > 1) beta_repeaters_present = true; - /* - * If PHYs report different gap counts, set an invalid count - * which will force a gap count reconfiguration and a reset. - */ - if (SELF_ID_GAP_COUNT(q) != gap_count) - gap_count = 0; + // If PHYs report different gap counts, set an invalid count which will force a gap + // count reconfiguration and a reset. + if (phy_packet_self_id_zero_get_gap_count(self_id_sequence[0]) != gap_count) + gap_count = GAP_COUNT_MISMATCHED; update_hop_count(node); - sid = next_sid; phy_id++; } @@ -376,15 +326,14 @@ static void report_found_node(struct fw_card *card, } void fw_destroy_nodes(struct fw_card *card) +__must_hold(&card->lock) { - unsigned long flags; + lockdep_assert_held(&card->lock); - spin_lock_irqsave(&card->lock, flags); card->color++; if (card->local_node != NULL) for_each_fw_node(card, card->local_node, report_lost_node); card->local_node = NULL; - spin_unlock_irqrestore(&card->lock, flags); } static void move_tree(struct fw_node *node0, struct fw_node *node1, int port) @@ -488,70 +437,71 @@ static void update_tree(struct fw_card *card, struct fw_node *root) } } -static void update_topology_map(struct fw_card *card, - u32 *self_ids, int self_id_count) +static void update_topology_map(__be32 *buffer, size_t buffer_size, int root_node_id, + const u32 *self_ids, int self_id_count) { - int node_count = (card->root_node->node_id & 0x3f) + 1; - __be32 *map = card->topology_map; + __be32 *map = buffer; + u32 next_generation = be32_to_cpu(buffer[1]) + 1; + int node_count = (root_node_id & 0x3f) + 1; + + memset(map, 0, buffer_size); *map++ = cpu_to_be32((self_id_count + 2) << 16); - *map++ = cpu_to_be32(be32_to_cpu(card->topology_map[1]) + 1); + *map++ = cpu_to_be32(next_generation); *map++ = cpu_to_be32((node_count << 16) | self_id_count); while (self_id_count--) *map++ = cpu_to_be32p(self_ids++); - fw_compute_block_crc(card->topology_map); + fw_compute_block_crc(buffer); } void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, int self_id_count, u32 *self_ids, bool bm_abdicate) { struct fw_node *local_node; - unsigned long flags; - - /* - * If the selfID buffer is not the immediate successor of the - * previously processed one, we cannot reliably compare the - * old and new topologies. - */ - if (!is_next_generation(generation, card->generation) && - card->local_node != NULL) { - fw_destroy_nodes(card); - card->bm_retries = 0; - } - spin_lock_irqsave(&card->lock, flags); - - card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated; - card->node_id = node_id; - /* - * Update node_id before generation to prevent anybody from using - * a stale node_id together with a current generation. - */ - smp_wmb(); - card->generation = generation; - card->reset_jiffies = get_jiffies_64(); - card->bm_node_id = 0xffff; - card->bm_abdicate = bm_abdicate; - fw_schedule_bm_work(card, 0); - - local_node = build_tree(card, self_ids, self_id_count); + trace_bus_reset_handle(card->index, generation, node_id, bm_abdicate, self_ids, self_id_count); - update_topology_map(card, self_ids, self_id_count); + scoped_guard(spinlock, &card->lock) { + // If the selfID buffer is not the immediate successor of the + // previously processed one, we cannot reliably compare the + // old and new topologies. + if (!is_next_generation(generation, card->generation) && card->local_node != NULL) { + fw_destroy_nodes(card); + card->bm_retries = 0; + } + card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated; + card->node_id = node_id; + // Update node_id before generation to prevent anybody from using + // a stale node_id together with a current generation. + smp_wmb(); + card->generation = generation; + card->reset_jiffies = get_jiffies_64(); + card->bm_node_id = 0xffff; + card->bm_abdicate = bm_abdicate; + + local_node = build_tree(card, self_ids, self_id_count, generation); + + card->color++; + + if (local_node == NULL) { + fw_err(card, "topology build failed\n"); + // FIXME: We need to issue a bus reset in this case. + } else if (card->local_node == NULL) { + card->local_node = local_node; + for_each_fw_node(card, local_node, report_found_node); + } else { + update_tree(card, local_node); + } + } - card->color++; + fw_schedule_bm_work(card, 0); - if (local_node == NULL) { - fw_err(card, "topology build failed\n"); - /* FIXME: We need to issue a bus reset in this case. */ - } else if (card->local_node == NULL) { - card->local_node = local_node; - for_each_fw_node(card, local_node, report_found_node); - } else { - update_tree(card, local_node); + // Just used by transaction layer. + scoped_guard(spinlock, &card->topology_map.lock) { + update_topology_map(card->topology_map.buffer, sizeof(card->topology_map.buffer), + card->root_node->node_id, self_ids, self_id_count); } - - spin_unlock_irqrestore(&card->lock, flags); } EXPORT_SYMBOL(fw_core_handle_bus_reset); diff --git a/drivers/firewire/core-trace.c b/drivers/firewire/core-trace.c new file mode 100644 index 000000000000..b70947fc7b8d --- /dev/null +++ b/drivers/firewire/core-trace.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// Copyright (c) 2024 Takashi Sakamoto + +#include <linux/types.h> +#include <linux/err.h> +#include "packet-header-definitions.h" +#include "phy-packet-definitions.h" + +#define CREATE_TRACE_POINTS +#include <trace/events/firewire.h> + +#ifdef TRACEPOINTS_ENABLED +EXPORT_TRACEPOINT_SYMBOL_GPL(isoc_inbound_single_completions); +EXPORT_TRACEPOINT_SYMBOL_GPL(isoc_inbound_multiple_completions); +EXPORT_TRACEPOINT_SYMBOL_GPL(isoc_outbound_completions); +#endif diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index ac487c96bb71..7fea11a5e359 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -13,7 +13,6 @@ #include <linux/firewire-constants.h> #include <linux/fs.h> #include <linux/init.h> -#include <linux/idr.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/list.h> @@ -29,74 +28,93 @@ #include <asm/byteorder.h> #include "core.h" +#include "packet-header-definitions.h" +#include "phy-packet-definitions.h" +#include <trace/events/firewire.h> -#define HEADER_PRI(pri) ((pri) << 0) -#define HEADER_TCODE(tcode) ((tcode) << 4) -#define HEADER_RETRY(retry) ((retry) << 8) -#define HEADER_TLABEL(tlabel) ((tlabel) << 10) -#define HEADER_DESTINATION(destination) ((destination) << 16) -#define HEADER_SOURCE(source) ((source) << 16) -#define HEADER_RCODE(rcode) ((rcode) << 12) -#define HEADER_OFFSET_HIGH(offset_high) ((offset_high) << 0) -#define HEADER_DATA_LENGTH(length) ((length) << 16) -#define HEADER_EXTENDED_TCODE(tcode) ((tcode) << 0) - -#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f) -#define HEADER_GET_TLABEL(q) (((q) >> 10) & 0x3f) -#define HEADER_GET_RCODE(q) (((q) >> 12) & 0x0f) -#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff) -#define HEADER_GET_SOURCE(q) (((q) >> 16) & 0xffff) -#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff) -#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) -#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) - -#define HEADER_DESTINATION_IS_BROADCAST(q) \ - (((q) & HEADER_DESTINATION(0x3f)) == HEADER_DESTINATION(0x3f)) - -#define PHY_PACKET_CONFIG 0x0 -#define PHY_PACKET_LINK_ON 0x1 -#define PHY_PACKET_SELF_ID 0x2 - -#define PHY_CONFIG_GAP_COUNT(gap_count) (((gap_count) << 16) | (1 << 22)) -#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23)) -#define PHY_IDENTIFIER(id) ((id) << 30) +#define HEADER_DESTINATION_IS_BROADCAST(header) \ + ((async_header_get_destination(header) & 0x3f) == 0x3f) /* returns 0 if the split timeout handler is already running */ static int try_cancel_split_timeout(struct fw_transaction *t) { if (t->is_split_transaction) - return del_timer(&t->split_timeout_timer); + return timer_delete(&t->split_timeout_timer); else return 1; } -static int close_transaction(struct fw_transaction *transaction, - struct fw_card *card, int rcode) +// card->transactions.lock must be acquired in advance. +static void remove_transaction_entry(struct fw_card *card, struct fw_transaction *entry) { - struct fw_transaction *t; - unsigned long flags; - - spin_lock_irqsave(&card->lock, flags); - list_for_each_entry(t, &card->transaction_list, link) { - if (t == transaction) { - if (!try_cancel_split_timeout(t)) { - spin_unlock_irqrestore(&card->lock, flags); - goto timed_out; - } - list_del_init(&t->link); - card->tlabel_mask &= ~(1ULL << t->tlabel); - break; + list_del_init(&entry->link); + card->transactions.tlabel_mask &= ~(1ULL << entry->tlabel); +} + +// Must be called without holding card->transactions.lock. +void fw_cancel_pending_transactions(struct fw_card *card) +{ + struct fw_transaction *t, *tmp; + LIST_HEAD(pending_list); + + // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for + // local destination never runs in any type of IRQ context. + scoped_guard(spinlock_irqsave, &card->transactions.lock) { + list_for_each_entry_safe(t, tmp, &card->transactions.list, link) { + if (try_cancel_split_timeout(t)) + list_move(&t->link, &pending_list); } } - spin_unlock_irqrestore(&card->lock, flags); - if (&t->link != &card->transaction_list) { - t->callback(card, rcode, NULL, 0, t->callback_data); - return 0; + list_for_each_entry_safe(t, tmp, &pending_list, link) { + list_del(&t->link); + + if (!t->with_tstamp) { + t->callback.without_tstamp(card, RCODE_CANCELLED, NULL, 0, + t->callback_data); + } else { + t->callback.with_tstamp(card, RCODE_CANCELLED, t->packet.timestamp, 0, + NULL, 0, t->callback_data); + } + } +} + +// card->transactions.lock must be acquired in advance. +#define find_and_pop_transaction_entry(card, condition) \ +({ \ + struct fw_transaction *iter, *t = NULL; \ + list_for_each_entry(iter, &card->transactions.list, link) { \ + if (condition) { \ + t = iter; \ + break; \ + } \ + } \ + if (t && try_cancel_split_timeout(t)) \ + remove_transaction_entry(card, t); \ + t; \ +}) + +static int close_transaction(struct fw_transaction *transaction, struct fw_card *card, int rcode, + u32 response_tstamp) +{ + struct fw_transaction *t; + + // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for + // local destination never runs in any type of IRQ context. + scoped_guard(spinlock_irqsave, &card->transactions.lock) { + t = find_and_pop_transaction_entry(card, iter == transaction); + if (!t) + return -ENOENT; } - timed_out: - return -ENOENT; + if (!t->with_tstamp) { + t->callback.without_tstamp(card, rcode, NULL, 0, t->callback_data); + } else { + t->callback.with_tstamp(card, rcode, t->packet.timestamp, response_tstamp, NULL, 0, + t->callback_data); + } + + return 0; } /* @@ -106,6 +124,8 @@ static int close_transaction(struct fw_transaction *transaction, int fw_cancel_transaction(struct fw_card *card, struct fw_transaction *transaction) { + u32 tstamp; + /* * Cancel the packet transmission if it's still queued. That * will call the packet transmission callback which cancels @@ -120,77 +140,99 @@ int fw_cancel_transaction(struct fw_card *card, * if the transaction is still pending and remove it in that case. */ - return close_transaction(transaction, card, RCODE_CANCELLED); + if (transaction->packet.ack == 0) { + // The timestamp is reused since it was just read now. + tstamp = transaction->packet.timestamp; + } else { + u32 curr_cycle_time = 0; + + (void)fw_card_read_cycle_time(card, &curr_cycle_time); + tstamp = cycle_time_to_ohci_tstamp(curr_cycle_time); + } + + return close_transaction(transaction, card, RCODE_CANCELLED, tstamp); } EXPORT_SYMBOL(fw_cancel_transaction); static void split_transaction_timeout_callback(struct timer_list *timer) { - struct fw_transaction *t = from_timer(t, timer, split_timeout_timer); + struct fw_transaction *t = timer_container_of(t, timer, split_timeout_timer); struct fw_card *card = t->card; - unsigned long flags; - spin_lock_irqsave(&card->lock, flags); - if (list_empty(&t->link)) { - spin_unlock_irqrestore(&card->lock, flags); - return; + scoped_guard(spinlock_irqsave, &card->transactions.lock) { + if (list_empty(&t->link)) + return; + remove_transaction_entry(card, t); } - list_del(&t->link); - card->tlabel_mask &= ~(1ULL << t->tlabel); - spin_unlock_irqrestore(&card->lock, flags); - t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data); + if (!t->with_tstamp) { + t->callback.without_tstamp(card, RCODE_CANCELLED, NULL, 0, t->callback_data); + } else { + t->callback.with_tstamp(card, RCODE_CANCELLED, t->packet.timestamp, + t->split_timeout_cycle, NULL, 0, t->callback_data); + } } static void start_split_transaction_timeout(struct fw_transaction *t, struct fw_card *card) { - unsigned long flags; - - spin_lock_irqsave(&card->lock, flags); + unsigned long delta; - if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) { - spin_unlock_irqrestore(&card->lock, flags); + if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) return; - } t->is_split_transaction = true; - mod_timer(&t->split_timeout_timer, - jiffies + card->split_timeout_jiffies); - spin_unlock_irqrestore(&card->lock, flags); + // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for + // local destination never runs in any type of IRQ context. + scoped_guard(spinlock_irqsave, &card->split_timeout.lock) + delta = card->split_timeout.jiffies; + mod_timer(&t->split_timeout_timer, jiffies + delta); } +static u32 compute_split_timeout_timestamp(struct fw_card *card, u32 request_timestamp); + static void transmit_complete_callback(struct fw_packet *packet, struct fw_card *card, int status) { struct fw_transaction *t = container_of(packet, struct fw_transaction, packet); + trace_async_request_outbound_complete((uintptr_t)t, card->index, packet->generation, + packet->speed, status, packet->timestamp); + switch (status) { case ACK_COMPLETE: - close_transaction(t, card, RCODE_COMPLETE); + close_transaction(t, card, RCODE_COMPLETE, packet->timestamp); break; case ACK_PENDING: + { + // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for + // local destination never runs in any type of IRQ context. + scoped_guard(spinlock_irqsave, &card->split_timeout.lock) { + t->split_timeout_cycle = + compute_split_timeout_timestamp(card, packet->timestamp) & 0xffff; + } start_split_transaction_timeout(t, card); break; + } case ACK_BUSY_X: case ACK_BUSY_A: case ACK_BUSY_B: - close_transaction(t, card, RCODE_BUSY); + close_transaction(t, card, RCODE_BUSY, packet->timestamp); break; case ACK_DATA_ERROR: - close_transaction(t, card, RCODE_DATA_ERROR); + close_transaction(t, card, RCODE_DATA_ERROR, packet->timestamp); break; case ACK_TYPE_ERROR: - close_transaction(t, card, RCODE_TYPE_ERROR); + close_transaction(t, card, RCODE_TYPE_ERROR, packet->timestamp); break; default: /* * In this case the ack is really a juju specific * rcode, so just forward that to the callback. */ - close_transaction(t, card, status); + close_transaction(t, card, status, packet->timestamp); break; } } @@ -202,10 +244,11 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, int ext_tcode; if (tcode == TCODE_STREAM_DATA) { - packet->header[0] = - HEADER_DATA_LENGTH(length) | - destination_id | - HEADER_TCODE(TCODE_STREAM_DATA); + // The value of destination_id argument should include tag, channel, and sy fields + // as isochronous packet header has. + packet->header[0] = destination_id; + isoc_header_set_data_length(packet->header, length); + isoc_header_set_tcode(packet->header, TCODE_STREAM_DATA); packet->header_length = 4; packet->payload = payload; packet->payload_length = length; @@ -219,28 +262,24 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, } else ext_tcode = 0; - packet->header[0] = - HEADER_RETRY(RETRY_X) | - HEADER_TLABEL(tlabel) | - HEADER_TCODE(tcode) | - HEADER_DESTINATION(destination_id); - packet->header[1] = - HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id); - packet->header[2] = - offset; + async_header_set_retry(packet->header, RETRY_X); + async_header_set_tlabel(packet->header, tlabel); + async_header_set_tcode(packet->header, tcode); + async_header_set_destination(packet->header, destination_id); + async_header_set_source(packet->header, source_id); + async_header_set_offset(packet->header, offset); switch (tcode) { case TCODE_WRITE_QUADLET_REQUEST: - packet->header[3] = *(u32 *)payload; + async_header_set_quadlet_data(packet->header, *(u32 *)payload); packet->header_length = 16; packet->payload_length = 0; break; case TCODE_LOCK_REQUEST: case TCODE_WRITE_BLOCK_REQUEST: - packet->header[3] = - HEADER_DATA_LENGTH(length) | - HEADER_EXTENDED_TCODE(ext_tcode); + async_header_set_data_length(packet->header, length); + async_header_set_extended_tcode(packet->header, ext_tcode); packet->header_length = 16; packet->payload = payload; packet->payload_length = length; @@ -252,9 +291,8 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, break; case TCODE_READ_BLOCK_REQUEST: - packet->header[3] = - HEADER_DATA_LENGTH(length) | - HEADER_EXTENDED_TCODE(ext_tcode); + async_header_set_data_length(packet->header, length); + async_header_set_extended_tcode(packet->header, ext_tcode); packet->header_length = 16; packet->payload_length = 0; break; @@ -270,24 +308,28 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, } static int allocate_tlabel(struct fw_card *card) +__must_hold(&card->transactions.lock) { int tlabel; - tlabel = card->current_tlabel; - while (card->tlabel_mask & (1ULL << tlabel)) { + lockdep_assert_held(&card->transactions.lock); + + tlabel = card->transactions.current_tlabel; + while (card->transactions.tlabel_mask & (1ULL << tlabel)) { tlabel = (tlabel + 1) & 0x3f; - if (tlabel == card->current_tlabel) + if (tlabel == card->transactions.current_tlabel) return -EBUSY; } - card->current_tlabel = (tlabel + 1) & 0x3f; - card->tlabel_mask |= 1ULL << tlabel; + card->transactions.current_tlabel = (tlabel + 1) & 0x3f; + card->transactions.tlabel_mask |= 1ULL << tlabel; return tlabel; } /** - * fw_send_request() - submit a request packet for transmission + * __fw_send_request() - submit a request packet for transmission to generate callback for response + * subaction with or without time stamp. * @card: interface to send the request at * @t: transaction instance to which the request belongs * @tcode: transaction code @@ -297,7 +339,9 @@ static int allocate_tlabel(struct fw_card *card) * @offset: 48bit wide offset into destination's address space * @payload: data payload for the request subaction * @length: length of the payload, in bytes - * @callback: function to be called when the transaction is completed + * @callback: union of two functions whether to receive time stamp or not for response + * subaction. + * @with_tstamp: Whether to receive time stamp or not for response subaction. * @callback_data: data to be passed to the transaction completion callback * * Submit a request packet into the asynchronous request transmission queue. @@ -334,12 +378,11 @@ static int allocate_tlabel(struct fw_card *card) * transaction completion and hence execution of @callback may happen even * before fw_send_request() returns. */ -void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, - int destination_id, int generation, int speed, - unsigned long long offset, void *payload, size_t length, - fw_transaction_callback_t callback, void *callback_data) +void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, + int destination_id, int generation, int speed, unsigned long long offset, + void *payload, size_t length, union fw_transaction_callback callback, + bool with_tstamp, void *callback_data) { - unsigned long flags; int tlabel; /* @@ -347,12 +390,24 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, * the list while holding the card spinlock. */ - spin_lock_irqsave(&card->lock, flags); - - tlabel = allocate_tlabel(card); + // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for + // local destination never runs in any type of IRQ context. + scoped_guard(spinlock_irqsave, &card->transactions.lock) + tlabel = allocate_tlabel(card); if (tlabel < 0) { - spin_unlock_irqrestore(&card->lock, flags); - callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data); + if (!with_tstamp) { + callback.without_tstamp(card, RCODE_SEND_ERROR, NULL, 0, callback_data); + } else { + // Timestamping on behalf of hardware. + u32 curr_cycle_time = 0; + u32 tstamp; + + (void)fw_card_read_cycle_time(card, &curr_cycle_time); + tstamp = cycle_time_to_ohci_tstamp(curr_cycle_time); + + callback.with_tstamp(card, RCODE_SEND_ERROR, tstamp, tstamp, NULL, 0, + callback_data); + } return; } @@ -360,23 +415,33 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, t->tlabel = tlabel; t->card = card; t->is_split_transaction = false; - timer_setup(&t->split_timeout_timer, - split_transaction_timeout_callback, 0); + timer_setup(&t->split_timeout_timer, split_transaction_timeout_callback, 0); t->callback = callback; + t->with_tstamp = with_tstamp; t->callback_data = callback_data; - - fw_fill_request(&t->packet, tcode, t->tlabel, - destination_id, card->node_id, generation, - speed, offset, payload, length); t->packet.callback = transmit_complete_callback; - list_add_tail(&t->link, &card->transaction_list); + // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for + // local destination never runs in any type of IRQ context. + scoped_guard(spinlock_irqsave, &card->lock) { + // The node_id field of fw_card can be updated when handling SelfIDComplete. + fw_fill_request(&t->packet, tcode, t->tlabel, destination_id, card->node_id, + generation, speed, offset, payload, length); + } + + // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for + // local destination never runs in any type of IRQ context. + scoped_guard(spinlock_irqsave, &card->transactions.lock) + list_add_tail(&t->link, &card->transactions.list); - spin_unlock_irqrestore(&card->lock, flags); + // Safe with no lock, since the index field of fw_card is immutable once assigned. + trace_async_request_outbound_initiate((uintptr_t)t, card->index, generation, speed, + t->packet.header, payload, + tcode_is_read_request(tcode) ? 0 : length / 4); card->driver->send_request(card, &t->packet); } -EXPORT_SYMBOL(fw_send_request); +EXPORT_SYMBOL_GPL(__fw_send_request); struct transaction_callback_data { struct completion done; @@ -424,7 +489,7 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, fw_send_request(card, &t, tcode, destination_id, generation, speed, offset, payload, length, transaction_callback, &d); wait_for_completion(&d.done); - destroy_timer_on_stack(&t.split_timeout_timer); + timer_destroy_on_stack(&t.split_timeout_timer); return d.rcode; } @@ -436,12 +501,13 @@ static DECLARE_COMPLETION(phy_config_done); static void transmit_phy_packet_callback(struct fw_packet *packet, struct fw_card *card, int status) { + trace_async_phy_outbound_complete((uintptr_t)packet, card->index, packet->generation, status, + packet->timestamp); complete(&phy_config_done); } static struct fw_packet phy_config_packet = { .header_length = 12, - .header[0] = TCODE_LINK_INTERNAL << 4, .payload_length = 0, .speed = SCODE_100, .callback = transmit_phy_packet_callback, @@ -450,11 +516,15 @@ static struct fw_packet phy_config_packet = { void fw_send_phy_config(struct fw_card *card, int node_id, int generation, int gap_count) { - long timeout = DIV_ROUND_UP(HZ, 10); - u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG); + long timeout = msecs_to_jiffies(100); + u32 data = 0; - if (node_id != FW_PHY_CONFIG_NO_NODE_ID) - data |= PHY_CONFIG_ROOT_ID(node_id); + phy_packet_set_packet_identifier(&data, PHY_PACKET_PACKET_IDENTIFIER_PHY_CONFIG); + + if (node_id != FW_PHY_CONFIG_NO_NODE_ID) { + phy_packet_phy_config_set_root_id(&data, node_id); + phy_packet_phy_config_set_force_root_node(&data, true); + } if (gap_count == FW_PHY_CONFIG_CURRENT_GAP_COUNT) { gap_count = card->driver->read_phy_reg(card, 1); @@ -465,19 +535,23 @@ void fw_send_phy_config(struct fw_card *card, if (gap_count == 63) return; } - data |= PHY_CONFIG_GAP_COUNT(gap_count); + phy_packet_phy_config_set_gap_count(&data, gap_count); + phy_packet_phy_config_set_gap_count_optimization(&data, true); - mutex_lock(&phy_config_mutex); + guard(mutex)(&phy_config_mutex); + async_header_set_tcode(phy_config_packet.header, TCODE_LINK_INTERNAL); phy_config_packet.header[1] = data; phy_config_packet.header[2] = ~data; phy_config_packet.generation = generation; reinit_completion(&phy_config_done); + trace_async_phy_outbound_initiate((uintptr_t)&phy_config_packet, card->index, + phy_config_packet.generation, phy_config_packet.header[1], + phy_config_packet.header[2]); + card->driver->send_request(card, &phy_config_packet); wait_for_completion_timeout(&phy_config_done, timeout); - - mutex_unlock(&phy_config_mutex); } static struct fw_address_handler *lookup_overlapping_address_handler( @@ -534,10 +608,21 @@ const struct fw_address_region fw_unit_space_region = { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, }; #endif /* 0 */ -static bool is_in_fcp_region(u64 offset, size_t length) +static void complete_address_handler(struct kref *kref) +{ + struct fw_address_handler *handler = container_of(kref, struct fw_address_handler, kref); + + complete(&handler->done); +} + +static void get_address_handler(struct fw_address_handler *handler) { - return offset >= (CSR_REGISTER_BASE | CSR_FCP_COMMAND) && - offset + length <= (CSR_REGISTER_BASE | CSR_FCP_END); + kref_get(&handler->kref); +} + +static int put_address_handler(struct fw_address_handler *handler) +{ + return kref_put(&handler->kref, complete_address_handler); } /** @@ -547,9 +632,10 @@ static bool is_in_fcp_region(u64 offset, size_t length) * * region->start, ->end, and handler->length have to be quadlet-aligned. * - * When a request is received that falls within the specified address range, - * the specified callback is invoked. The parameters passed to the callback - * give the details of the particular request. + * When a request is received that falls within the specified address range, the specified callback + * is invoked. The parameters passed to the callback give the details of the particular request. + * The callback is invoked in the workqueue context in most cases. However, if the request is + * initiated by the local node, the callback is invoked in the initiator's context. * * To be called in process context. * Return value: 0 on success, non-zero otherwise. @@ -572,7 +658,7 @@ int fw_core_add_address_handler(struct fw_address_handler *handler, handler->length == 0) return -EINVAL; - spin_lock(&address_handler_list_lock); + guard(spinlock)(&address_handler_list_lock); handler->offset = region->start; while (handler->offset + handler->length <= region->end) { @@ -585,14 +671,14 @@ int fw_core_add_address_handler(struct fw_address_handler *handler, if (other != NULL) { handler->offset += other->length; } else { + init_completion(&handler->done); + kref_init(&handler->kref); list_add_tail_rcu(&handler->link, &address_handler_list); ret = 0; break; } } - spin_unlock(&address_handler_list_lock); - return ret; } EXPORT_SYMBOL(fw_core_add_address_handler); @@ -608,35 +694,63 @@ EXPORT_SYMBOL(fw_core_add_address_handler); */ void fw_core_remove_address_handler(struct fw_address_handler *handler) { - spin_lock(&address_handler_list_lock); - list_del_rcu(&handler->link); - spin_unlock(&address_handler_list_lock); + scoped_guard(spinlock, &address_handler_list_lock) + list_del_rcu(&handler->link); + synchronize_rcu(); + + if (!put_address_handler(handler)) + wait_for_completion(&handler->done); } EXPORT_SYMBOL(fw_core_remove_address_handler); struct fw_request { + struct kref kref; struct fw_packet response; - u32 request_header[4]; + u32 request_header[ASYNC_HEADER_QUADLET_COUNT]; int ack; + u32 timestamp; u32 length; u32 data[]; }; +void fw_request_get(struct fw_request *request) +{ + kref_get(&request->kref); +} + +static void release_request(struct kref *kref) +{ + struct fw_request *request = container_of(kref, struct fw_request, kref); + + kfree(request); +} + +void fw_request_put(struct fw_request *request) +{ + kref_put(&request->kref, release_request); +} + static void free_response_callback(struct fw_packet *packet, struct fw_card *card, int status) { - struct fw_request *request; + struct fw_request *request = container_of(packet, struct fw_request, response); - request = container_of(packet, struct fw_request, response); - kfree(request); + trace_async_response_outbound_complete((uintptr_t)request, card->index, packet->generation, + packet->speed, status, packet->timestamp); + + // Decrease the reference count since not at in-flight. + fw_request_put(request); + + // Decrease the reference count to release the object. + fw_request_put(request); } int fw_get_response_length(struct fw_request *r) { int tcode, ext_tcode, data_length; - tcode = HEADER_GET_TCODE(r->request_header[0]); + tcode = async_header_get_tcode(r->request_header); switch (tcode) { case TCODE_WRITE_QUADLET_REQUEST: @@ -647,12 +761,12 @@ int fw_get_response_length(struct fw_request *r) return 4; case TCODE_READ_BLOCK_REQUEST: - data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]); + data_length = async_header_get_data_length(r->request_header); return data_length; case TCODE_LOCK_REQUEST: - ext_tcode = HEADER_GET_EXTENDED_TCODE(r->request_header[3]); - data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]); + ext_tcode = async_header_get_extended_tcode(r->request_header); + data_length = async_header_get_data_length(r->request_header); switch (ext_tcode) { case EXTCODE_FETCH_ADD: case EXTCODE_LITTLE_ADD: @@ -672,46 +786,42 @@ void fw_fill_response(struct fw_packet *response, u32 *request_header, { int tcode, tlabel, extended_tcode, source, destination; - tcode = HEADER_GET_TCODE(request_header[0]); - tlabel = HEADER_GET_TLABEL(request_header[0]); - source = HEADER_GET_DESTINATION(request_header[0]); - destination = HEADER_GET_SOURCE(request_header[1]); - extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]); - - response->header[0] = - HEADER_RETRY(RETRY_1) | - HEADER_TLABEL(tlabel) | - HEADER_DESTINATION(destination); - response->header[1] = - HEADER_SOURCE(source) | - HEADER_RCODE(rcode); - response->header[2] = 0; + tcode = async_header_get_tcode(request_header); + tlabel = async_header_get_tlabel(request_header); + source = async_header_get_destination(request_header); // Exchange. + destination = async_header_get_source(request_header); // Exchange. + extended_tcode = async_header_get_extended_tcode(request_header); + + async_header_set_retry(response->header, RETRY_1); + async_header_set_tlabel(response->header, tlabel); + async_header_set_destination(response->header, destination); + async_header_set_source(response->header, source); + async_header_set_rcode(response->header, rcode); + response->header[2] = 0; // The field is reserved. switch (tcode) { case TCODE_WRITE_QUADLET_REQUEST: case TCODE_WRITE_BLOCK_REQUEST: - response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE); + async_header_set_tcode(response->header, TCODE_WRITE_RESPONSE); response->header_length = 12; response->payload_length = 0; break; case TCODE_READ_QUADLET_REQUEST: - response->header[0] |= - HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE); + async_header_set_tcode(response->header, TCODE_READ_QUADLET_RESPONSE); if (payload != NULL) - response->header[3] = *(u32 *)payload; + async_header_set_quadlet_data(response->header, *(u32 *)payload); else - response->header[3] = 0; + async_header_set_quadlet_data(response->header, 0); response->header_length = 16; response->payload_length = 0; break; case TCODE_READ_BLOCK_REQUEST: case TCODE_LOCK_REQUEST: - response->header[0] |= HEADER_TCODE(tcode + 2); - response->header[3] = - HEADER_DATA_LENGTH(length) | - HEADER_EXTENDED_TCODE(extended_tcode); + async_header_set_tcode(response->header, tcode + 2); + async_header_set_data_length(response->header, length); + async_header_set_extended_tcode(response->header, extended_tcode); response->header_length = 16; response->payload = payload; response->payload_length = length; @@ -727,11 +837,14 @@ EXPORT_SYMBOL(fw_fill_response); static u32 compute_split_timeout_timestamp(struct fw_card *card, u32 request_timestamp) +__must_hold(&card->split_timeout.lock) { unsigned int cycles; u32 timestamp; - cycles = card->split_timeout_cycles; + lockdep_assert_held(&card->split_timeout.lock); + + cycles = card->split_timeout.cycles; cycles += request_timestamp & 0x1fff; timestamp = request_timestamp & ~0x1fff; @@ -748,7 +861,7 @@ static struct fw_request *allocate_request(struct fw_card *card, u32 *data, length; int request_tcode; - request_tcode = HEADER_GET_TCODE(p->header[0]); + request_tcode = async_header_get_tcode(p->header); switch (request_tcode) { case TCODE_WRITE_QUADLET_REQUEST: data = &p->header[3]; @@ -758,7 +871,7 @@ static struct fw_request *allocate_request(struct fw_card *card, case TCODE_WRITE_BLOCK_REQUEST: case TCODE_LOCK_REQUEST: data = p->payload; - length = HEADER_GET_DATA_LENGTH(p->header[3]); + length = async_header_get_data_length(p->header); break; case TCODE_READ_QUADLET_REQUEST: @@ -768,7 +881,7 @@ static struct fw_request *allocate_request(struct fw_card *card, case TCODE_READ_BLOCK_REQUEST: data = NULL; - length = HEADER_GET_DATA_LENGTH(p->header[3]); + length = async_header_get_data_length(p->header); break; default: @@ -780,14 +893,19 @@ static struct fw_request *allocate_request(struct fw_card *card, request = kmalloc(sizeof(*request) + length, GFP_ATOMIC); if (request == NULL) return NULL; + kref_init(&request->kref); + + // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for + // local destination never runs in any type of IRQ context. + scoped_guard(spinlock_irqsave, &card->split_timeout.lock) + request->response.timestamp = compute_split_timeout_timestamp(card, p->timestamp); request->response.speed = p->speed; - request->response.timestamp = - compute_split_timeout_timestamp(card, p->timestamp); request->response.generation = p->generation; request->response.ack = 0; request->response.callback = free_response_callback; request->ack = p->ack; + request->timestamp = p->timestamp; request->length = length; if (data) memcpy(request->data, data, length); @@ -797,26 +915,42 @@ static struct fw_request *allocate_request(struct fw_card *card, return request; } +/** + * fw_send_response: - send response packet for asynchronous transaction. + * @card: interface to send the response at. + * @request: firewire request data for the transaction. + * @rcode: response code to send. + * + * Submit a response packet into the asynchronous response transmission queue. The @request + * is going to be released when the transmission successfully finishes later. + */ void fw_send_response(struct fw_card *card, struct fw_request *request, int rcode) { - if (WARN_ONCE(!request, "invalid for FCP address handlers")) - return; + u32 *data = NULL; + unsigned int data_length = 0; /* unified transaction or broadcast transaction: don't respond */ if (request->ack != ACK_PENDING || - HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) { - kfree(request); + HEADER_DESTINATION_IS_BROADCAST(request->request_header)) { + fw_request_put(request); return; } - if (rcode == RCODE_COMPLETE) - fw_fill_response(&request->response, request->request_header, - rcode, request->data, - fw_get_response_length(request)); - else - fw_fill_response(&request->response, request->request_header, - rcode, NULL, 0); + if (rcode == RCODE_COMPLETE) { + data = request->data; + data_length = fw_get_response_length(request); + } + + fw_fill_response(&request->response, request->request_header, rcode, data, data_length); + + // Increase the reference count so that the object is kept during in-flight. + fw_request_get(request); + + trace_async_response_outbound_initiate((uintptr_t)request, card->index, + request->response.generation, request->response.speed, + request->response.header, data, + data ? data_length / 4 : 0); card->driver->send_response(card, &request->response); } @@ -832,6 +966,22 @@ int fw_get_request_speed(struct fw_request *request) } EXPORT_SYMBOL(fw_get_request_speed); +/** + * fw_request_get_timestamp: Get timestamp of the request. + * @request: The opaque pointer to request structure. + * + * Get timestamp when 1394 OHCI controller receives the asynchronous request subaction. The + * timestamp consists of the low order 3 bits of second field and the full 13 bits of count + * field of isochronous cycle time register. + * + * Returns: timestamp of the request. + */ +u32 fw_request_get_timestamp(const struct fw_request *request) +{ + return request->timestamp; +} +EXPORT_SYMBOL_GPL(fw_request_get_timestamp); + static void handle_exclusive_region_request(struct fw_card *card, struct fw_packet *p, struct fw_request *request, @@ -840,34 +990,41 @@ static void handle_exclusive_region_request(struct fw_card *card, struct fw_address_handler *handler; int tcode, destination, source; - destination = HEADER_GET_DESTINATION(p->header[0]); - source = HEADER_GET_SOURCE(p->header[1]); - tcode = HEADER_GET_TCODE(p->header[0]); + destination = async_header_get_destination(p->header); + source = async_header_get_source(p->header); + tcode = async_header_get_tcode(p->header); if (tcode == TCODE_LOCK_REQUEST) - tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]); - - rcu_read_lock(); - handler = lookup_enclosing_address_handler(&address_handler_list, - offset, request->length); - if (handler) - handler->address_callback(card, request, - tcode, destination, source, - p->generation, offset, - request->data, request->length, - handler->callback_data); - rcu_read_unlock(); - - if (!handler) + tcode = 0x10 + async_header_get_extended_tcode(p->header); + + scoped_guard(rcu) { + handler = lookup_enclosing_address_handler(&address_handler_list, offset, + request->length); + if (handler) + get_address_handler(handler); + } + + if (!handler) { fw_send_response(card, request, RCODE_ADDRESS_ERROR); + return; + } + + // Outside the RCU read-side critical section. Without spinlock. With reference count. + handler->address_callback(card, request, tcode, destination, source, p->generation, offset, + request->data, request->length, handler->callback_data); + put_address_handler(handler); } +// To use kmalloc allocator efficiently, this should be power of two. +#define BUFFER_ON_KERNEL_STACK_SIZE 4 + static void handle_fcp_region_request(struct fw_card *card, struct fw_packet *p, struct fw_request *request, unsigned long long offset) { - struct fw_address_handler *handler; - int tcode, destination, source; + struct fw_address_handler *buffer_on_kernel_stack[BUFFER_ON_KERNEL_STACK_SIZE]; + struct fw_address_handler *handler, **handlers; + int tcode, destination, source, i, count, buffer_size; if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) && offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) || @@ -877,9 +1034,9 @@ static void handle_fcp_region_request(struct fw_card *card, return; } - tcode = HEADER_GET_TCODE(p->header[0]); - destination = HEADER_GET_DESTINATION(p->header[0]); - source = HEADER_GET_SOURCE(p->header[1]); + tcode = async_header_get_tcode(p->header); + destination = async_header_get_destination(p->header); + source = async_header_get_source(p->header); if (tcode != TCODE_WRITE_QUADLET_REQUEST && tcode != TCODE_WRITE_BLOCK_REQUEST) { @@ -888,17 +1045,54 @@ static void handle_fcp_region_request(struct fw_card *card, return; } - rcu_read_lock(); - list_for_each_entry_rcu(handler, &address_handler_list, link) { - if (is_enclosing_handler(handler, offset, request->length)) - handler->address_callback(card, NULL, tcode, - destination, source, - p->generation, offset, - request->data, - request->length, - handler->callback_data); + count = 0; + handlers = buffer_on_kernel_stack; + buffer_size = ARRAY_SIZE(buffer_on_kernel_stack); + scoped_guard(rcu) { + list_for_each_entry_rcu(handler, &address_handler_list, link) { + if (is_enclosing_handler(handler, offset, request->length)) { + if (count >= buffer_size) { + int next_size = buffer_size * 2; + struct fw_address_handler **buffer_on_kernel_heap; + + if (handlers == buffer_on_kernel_stack) + buffer_on_kernel_heap = NULL; + else + buffer_on_kernel_heap = handlers; + + buffer_on_kernel_heap = + krealloc_array(buffer_on_kernel_heap, next_size, + sizeof(*buffer_on_kernel_heap), GFP_ATOMIC); + // FCP is used for purposes unrelated to significant system + // resources (e.g. storage or networking), so allocation + // failures are not considered so critical. + if (!buffer_on_kernel_heap) + break; + + if (handlers == buffer_on_kernel_stack) { + memcpy(buffer_on_kernel_heap, buffer_on_kernel_stack, + sizeof(buffer_on_kernel_stack)); + } + + handlers = buffer_on_kernel_heap; + buffer_size = next_size; + } + get_address_handler(handler); + handlers[count++] = handler; + } + } + } + + for (i = 0; i < count; ++i) { + handler = handlers[i]; + handler->address_callback(card, request, tcode, destination, source, + p->generation, offset, request->data, + request->length, handler->callback_data); + put_address_handler(handler); } - rcu_read_unlock(); + + if (handlers != buffer_on_kernel_stack) + kfree(handlers); fw_send_response(card, request, RCODE_COMPLETE); } @@ -907,11 +1101,15 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p) { struct fw_request *request; unsigned long long offset; + unsigned int tcode; if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE) return; - if (TCODE_IS_LINK_INTERNAL(HEADER_GET_TCODE(p->header[0]))) { + tcode = async_header_get_tcode(p->header); + if (tcode_is_link_internal(tcode)) { + trace_async_phy_inbound((uintptr_t)p, card->index, p->generation, p->ack, p->timestamp, + p->header[1], p->header[2]); fw_cdev_handle_phy_packet(card, p); return; } @@ -922,8 +1120,11 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p) return; } - offset = ((u64)HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | - p->header[2]; + trace_async_request_inbound((uintptr_t)request, card->index, p->generation, p->speed, + p->ack, p->timestamp, p->header, request->data, + tcode_is_read_request(tcode) ? 0 : request->length / 4); + + offset = async_header_get_offset(p->header); if (!is_in_fcp_region(offset, request->length)) handle_exclusive_region_request(card, p, request, offset); @@ -935,42 +1136,20 @@ EXPORT_SYMBOL(fw_core_handle_request); void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) { - struct fw_transaction *t; - unsigned long flags; + struct fw_transaction *t = NULL; u32 *data; size_t data_length; int tcode, tlabel, source, rcode; - tcode = HEADER_GET_TCODE(p->header[0]); - tlabel = HEADER_GET_TLABEL(p->header[0]); - source = HEADER_GET_SOURCE(p->header[1]); - rcode = HEADER_GET_RCODE(p->header[1]); - - spin_lock_irqsave(&card->lock, flags); - list_for_each_entry(t, &card->transaction_list, link) { - if (t->node_id == source && t->tlabel == tlabel) { - if (!try_cancel_split_timeout(t)) { - spin_unlock_irqrestore(&card->lock, flags); - goto timed_out; - } - list_del_init(&t->link); - card->tlabel_mask &= ~(1ULL << t->tlabel); - break; - } - } - spin_unlock_irqrestore(&card->lock, flags); - - if (&t->link == &card->transaction_list) { - timed_out: - fw_notice(card, "unsolicited response (source %x, tlabel %x)\n", - source, tlabel); - return; - } + tcode = async_header_get_tcode(p->header); + tlabel = async_header_get_tlabel(p->header); + source = async_header_get_source(p->header); + rcode = async_header_get_rcode(p->header); - /* - * FIXME: sanity check packet, is length correct, does tcodes - * and addresses match. - */ + // FIXME: sanity check packet, is length correct, does tcodes + // and addresses match to the transaction request queried later. + // + // For the tracepoints event, let us decode the header here against the concern. switch (tcode) { case TCODE_READ_QUADLET_RESPONSE: @@ -986,7 +1165,7 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) case TCODE_READ_BLOCK_RESPONSE: case TCODE_LOCK_RESPONSE: data = p->payload; - data_length = HEADER_GET_DATA_LENGTH(p->header[3]); + data_length = async_header_get_data_length(p->header); break; default: @@ -996,13 +1175,34 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) break; } + // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for + // local destination never runs in any type of IRQ context. + scoped_guard(spinlock_irqsave, &card->transactions.lock) { + t = find_and_pop_transaction_entry(card, + iter->node_id == source && iter->tlabel == tlabel); + } + + trace_async_response_inbound((uintptr_t)t, card->index, p->generation, p->speed, p->ack, + p->timestamp, p->header, data, data_length / 4); + + if (!t) { + fw_notice(card, "unsolicited response (source %x, tlabel %x)\n", + source, tlabel); + return; + } + /* * The response handler may be executed while the request handler * is still pending. Cancel the request handler. */ card->driver->cancel_packet(card, &t->packet); - t->callback(card, rcode, data, data_length, t->callback_data); + if (!t->with_tstamp) { + t->callback.without_tstamp(card, rcode, data, data_length, t->callback_data); + } else { + t->callback.with_tstamp(card, rcode, t->packet.timestamp, p->timestamp, data, + data_length, t->callback_data); + } } EXPORT_SYMBOL(fw_core_handle_response); @@ -1043,7 +1243,7 @@ static void handle_topology_map(struct fw_card *card, struct fw_request *request { int start; - if (!TCODE_IS_READ_REQUEST(tcode)) { + if (!tcode_is_read_request(tcode)) { fw_send_response(card, request, RCODE_TYPE_ERROR); return; } @@ -1054,7 +1254,11 @@ static void handle_topology_map(struct fw_card *card, struct fw_request *request } start = (offset - topology_map_region.start) / 4; - memcpy(payload, &card->topology_map[start], length); + + // NOTE: This can be without irqsave when we can guarantee that fw_send_request() for local + // destination never runs in any type of IRQ context. + scoped_guard(spinlock_irqsave, &card->topology_map.lock) + memcpy(payload, &card->topology_map.buffer[start], length); fw_send_response(card, request, RCODE_COMPLETE); } @@ -1069,16 +1273,17 @@ static const struct fw_address_region registers_region = .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, }; static void update_split_timeout(struct fw_card *card) +__must_hold(&card->split_timeout.lock) { unsigned int cycles; - cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19); + cycles = card->split_timeout.hi * 8000 + (card->split_timeout.lo >> 19); /* minimum per IEEE 1394, maximum which doesn't overflow OHCI */ cycles = clamp(cycles, 800u, 3u * 8000u); - card->split_timeout_cycles = cycles; - card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000); + card->split_timeout.cycles = cycles; + card->split_timeout.jiffies = isoc_cycles_to_jiffies(cycles); } static void handle_registers(struct fw_card *card, struct fw_request *request, @@ -1089,7 +1294,6 @@ static void handle_registers(struct fw_card *card, struct fw_request *request, int reg = offset & ~CSR_REGISTER_BASE; __be32 *data = payload; int rcode = RCODE_COMPLETE; - unsigned long flags; switch (reg) { case CSR_PRIORITY_BUDGET: @@ -1129,12 +1333,15 @@ static void handle_registers(struct fw_card *card, struct fw_request *request, case CSR_SPLIT_TIMEOUT_HI: if (tcode == TCODE_READ_QUADLET_REQUEST) { - *data = cpu_to_be32(card->split_timeout_hi); + *data = cpu_to_be32(card->split_timeout.hi); } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) { - spin_lock_irqsave(&card->lock, flags); - card->split_timeout_hi = be32_to_cpu(*data) & 7; - update_split_timeout(card); - spin_unlock_irqrestore(&card->lock, flags); + // NOTE: This can be without irqsave when we can guarantee that + // __fw_send_request() for local destination never runs in any type of IRQ + // context. + scoped_guard(spinlock_irqsave, &card->split_timeout.lock) { + card->split_timeout.hi = be32_to_cpu(*data) & 7; + update_split_timeout(card); + } } else { rcode = RCODE_TYPE_ERROR; } @@ -1142,13 +1349,15 @@ static void handle_registers(struct fw_card *card, struct fw_request *request, case CSR_SPLIT_TIMEOUT_LO: if (tcode == TCODE_READ_QUADLET_REQUEST) { - *data = cpu_to_be32(card->split_timeout_lo); + *data = cpu_to_be32(card->split_timeout.lo); } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) { - spin_lock_irqsave(&card->lock, flags); - card->split_timeout_lo = - be32_to_cpu(*data) & 0xfff80000; - update_split_timeout(card); - spin_unlock_irqrestore(&card->lock, flags); + // NOTE: This can be without irqsave when we can guarantee that + // __fw_send_request() for local destination never runs in any type of IRQ + // context. + scoped_guard(spinlock_irqsave, &card->split_timeout.lock) { + card->split_timeout.lo = be32_to_cpu(*data) & 0xfff80000; + update_split_timeout(card); + } } else { rcode = RCODE_TYPE_ERROR; } @@ -1259,7 +1468,8 @@ static int __init fw_core_init(void) { int ret; - fw_workqueue = alloc_workqueue("firewire", WQ_MEM_RECLAIM, 0); + fw_workqueue = alloc_workqueue("firewire", WQ_MEM_RECLAIM | WQ_UNBOUND, + 0); if (!fw_workqueue) return -ENOMEM; @@ -1290,7 +1500,7 @@ static void __exit fw_core_cleanup(void) unregister_chrdev(fw_cdev_major, "firewire"); bus_unregister(&fw_bus_type); destroy_workqueue(fw_workqueue); - idr_destroy(&fw_device_idr); + xa_destroy(&fw_device_xa); } module_init(fw_core_init); diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index 71d5f16f311c..41fb39d9a4e6 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h @@ -7,7 +7,7 @@ #include <linux/dma-mapping.h> #include <linux/fs.h> #include <linux/list.h> -#include <linux/idr.h> +#include <linux/xarray.h> #include <linux/mm_types.h> #include <linux/rwsem.h> #include <linux/slab.h> @@ -27,6 +27,11 @@ struct fw_packet; /* -card */ +// This is the arbitrary value we use to indicate a mismatched gap count. +#define GAP_COUNT_MISMATCHED 0 + +#define isoc_cycles_to_jiffies(cycles) usecs_to_jiffies((u32)div_u64((u64)cycles * USEC_PER_SEC, 8000)) + extern __printf(2, 3) void fw_err(const struct fw_card *card, const char *fmt, ...); extern __printf(2, 3) @@ -60,6 +65,9 @@ struct fw_card_driver { int (*enable)(struct fw_card *card, const __be32 *config_rom, size_t length); + // After returning the call, any function is no longer triggered to handle hardware event. + void (*disable)(struct fw_card *card); + int (*read_phy_reg)(struct fw_card *card, int address); int (*update_phy_reg)(struct fw_card *card, int address, int clear_bits, int set_bits); @@ -80,7 +88,7 @@ struct fw_card_driver { /* * Allow the specified node ID to do direct DMA out and in of * host memory. The card will disable this for all node when - * a bus reset happens, so driver need to reenable this after + * a bus reset happens, so driver need to re-enable this after * bus reset. Returns 0 on success, -ENODEV if the card * doesn't support this, -ESTALE if the generation doesn't * match. @@ -115,8 +123,8 @@ struct fw_card_driver { void fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, struct device *device); -int fw_card_add(struct fw_card *card, - u32 max_receive, u32 link_speed, u64 guid); +int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid, + unsigned int supported_isoc_contexts); void fw_core_remove_card(struct fw_card *card); int fw_compute_block_crc(__be32 *block); void fw_schedule_bm_work(struct fw_card *card, unsigned long delay); @@ -133,7 +141,7 @@ void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p); /* -device */ extern struct rw_semaphore fw_device_rwsem; -extern struct idr fw_device_idr; +extern struct xarray fw_device_xa; extern int fw_cdev_major; static inline struct fw_device *fw_device_get(struct fw_device *device) @@ -159,9 +167,17 @@ int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count); int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card, enum dma_data_direction direction); +static inline void fw_iso_context_init_work(struct fw_iso_context *ctx, work_func_t func) +{ + INIT_WORK(&ctx->work, func); +} + /* -topology */ +// The initial value of BUS_MANAGER_ID register, to express nothing registered. +#define BUS_MANAGER_ID_NOT_REGISTERED 0x3f + enum { FW_NODE_CREATED, FW_NODE_UPDATED, @@ -183,28 +199,45 @@ struct fw_node { * local node to this node. */ u8 max_depth:4; /* Maximum depth to any leaf node */ u8 max_hops:4; /* Max hops in this sub tree */ - refcount_t ref_count; + + struct kref kref; /* For serializing node topology into a list. */ struct list_head link; - /* Upper layer specific data. */ - void *data; + // The device when already associated, else NULL. + struct fw_device *device; - struct fw_node *ports[]; + struct fw_node *ports[] __counted_by(port_count); }; static inline struct fw_node *fw_node_get(struct fw_node *node) { - refcount_inc(&node->ref_count); + kref_get(&node->kref); return node; } +static void release_node(struct kref *kref) +{ + struct fw_node *node = container_of(kref, struct fw_node, kref); + + kfree(node); +} + static inline void fw_node_put(struct fw_node *node) { - if (refcount_dec_and_test(&node->ref_count)) - kfree(node); + kref_put(&node->kref, release_node); +} + +static inline struct fw_device *fw_node_get_device(struct fw_node *node) +{ + return node->device; +} + +static inline void fw_node_set_device(struct fw_node *node, struct fw_device *device) +{ + node->device = device; } void fw_core_handle_bus_reset(struct fw_card *card, int node_id, @@ -225,13 +258,20 @@ static inline bool is_next_generation(int new_generation, int old_generation) #define TCODE_LINK_INTERNAL 0xe -#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) -#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) -#define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == TCODE_LINK_INTERNAL) -#define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0) -#define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0) -#define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4) -#define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0) +static inline bool tcode_is_read_request(unsigned int tcode) +{ + return (tcode & ~1u) == 4u; +} + +static inline bool tcode_is_block_packet(unsigned int tcode) +{ + return (tcode & 1u) != 0u; +} + +static inline bool tcode_is_link_internal(unsigned int tcode) +{ + return (tcode == TCODE_LINK_INTERNAL); +} #define LOCAL_BUS 0xffc0 @@ -244,6 +284,18 @@ int fw_get_response_length(struct fw_request *request); void fw_fill_response(struct fw_packet *response, u32 *request_header, int rcode, void *payload, size_t length); +void fw_request_get(struct fw_request *request); +void fw_request_put(struct fw_request *request); + +void fw_cancel_pending_transactions(struct fw_card *card); + +// Convert the value of IEEE 1394 CYCLE_TIME register to the format of timeStamp field in +// descriptors of 1394 OHCI. +static inline u32 cycle_time_to_ohci_tstamp(u32 tstamp) +{ + return (tstamp & 0x0ffff000) >> 12; +} + #define FW_PHY_CONFIG_NO_NODE_ID -1 #define FW_PHY_CONFIG_CURRENT_GAP_COUNT -1 void fw_send_phy_config(struct fw_card *card, @@ -254,4 +306,10 @@ static inline bool is_ping_packet(u32 *data) return (data[0] & 0xc0ffffff) == 0 && ~data[0] == data[1]; } +static inline bool is_in_fcp_region(u64 offset, size_t length) +{ + return offset >= (CSR_REGISTER_BASE | CSR_FCP_COMMAND) && + offset + length <= (CSR_REGISTER_BASE | CSR_FCP_END); +} + #endif /* _FIREWIRE_CORE_H */ diff --git a/drivers/firewire/device-attribute-test.c b/drivers/firewire/device-attribute-test.c new file mode 100644 index 000000000000..97478a96d1c9 --- /dev/null +++ b/drivers/firewire/device-attribute-test.c @@ -0,0 +1,253 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// device-attribute-test.c - An application of Kunit to test implementation for device attributes. +// +// Copyright (c) 2023 Takashi Sakamoto +// +// This file can not be built independently since it is intentionally included in core-device.c. + +#include <kunit/test.h> + +// Configuration ROM for AV/C Devices 1.0 (Dec. 12, 2000, 1394 Trading Association) +// Annex C:Configuration ROM example(informative) +// C.1 Simple AV/C device +// +// Copied from the documentation. +static const u32 simple_avc_config_rom[] = { + 0x0404eabf, + 0x31333934, + 0xe0646102, + 0xffffffff, + 0xffffffff, + 0x00063287, // root directory. + 0x03ffffff, + 0x8100000a, + 0x17ffffff, + 0x8100000e, + 0x0c0083c0, + 0xd1000001, + 0x0004442d, // unit 0 directory. + 0x1200a02d, + 0x13010001, + 0x17ffffff, + 0x81000007, + 0x0005c915, // leaf for textual descriptor. + 0x00000000, + 0x00000000, + 0x56656e64, + 0x6f72204e, + 0x616d6500, + 0x00057f16, // leaf for textual descriptor. + 0x00000000, + 0x00000000, + 0x4d6f6465, + 0x6c204e61, + 0x6d650000, +}; + +// Ibid. +// Annex A:Consideration for configuration ROM reader design (informative) +// A.1 Vendor directory +// +// Written by hand. +static const u32 legacy_avc_config_rom[] = { + 0x04199fe7, + 0x31333934, + 0xe0644000, + 0x00112233, + 0x44556677, + 0x0005dace, // root directory. + 0x03012345, + 0x0c0083c0, + 0x8d000009, + 0xd1000002, + 0xc3000004, + 0x0002e107, // unit 0 directory. + 0x12abcdef, + 0x13543210, + 0x0002cb73, // vendor directory. + 0x17fedcba, + 0x81000004, + 0x00026dc1, // leaf for EUI-64. + 0x00112233, + 0x44556677, + 0x00050e84, // leaf for textual descriptor. + 0x00000000, + 0x00000000, + 0x41424344, + 0x45464748, + 0x494a0000, +}; + +static void device_attr_simple_avc(struct kunit *test) +{ + static const struct fw_device node = { + .device = { + .type = &fw_device_type, + }, + .config_rom = simple_avc_config_rom, + .config_rom_length = sizeof(simple_avc_config_rom), + }; + static const struct fw_unit unit0 = { + .device = { + .type = &fw_unit_type, + .parent = (struct device *)&node.device, + }, + .directory = &simple_avc_config_rom[12], + }; + struct device *node_dev = (struct device *)&node.device; + struct device *unit0_dev = (struct device *)&unit0.device; + static const int unit0_expected_ids[] = {0x00ffffff, 0x00ffffff, 0x0000a02d, 0x00010001}; + char *buf = kunit_kzalloc(test, PAGE_SIZE, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf); + int ids[4] = {0, 0, 0, 0}; + + // Ensure associations for node and unit devices. + + KUNIT_ASSERT_TRUE(test, is_fw_device(node_dev)); + KUNIT_ASSERT_FALSE(test, is_fw_unit(node_dev)); + KUNIT_ASSERT_PTR_EQ(test, fw_device(node_dev), &node); + + KUNIT_ASSERT_FALSE(test, is_fw_device(unit0_dev)); + KUNIT_ASSERT_TRUE(test, is_fw_unit(unit0_dev)); + KUNIT_ASSERT_PTR_EQ(test, fw_parent_device((&unit0)), &node); + KUNIT_ASSERT_PTR_EQ(test, fw_unit(unit0_dev), &unit0); + + // For entries in root directory. + + // Vendor immediate entry is found. + KUNIT_EXPECT_GT(test, show_immediate(node_dev, &config_rom_attributes[0].attr, buf), 0); + KUNIT_EXPECT_STREQ(test, buf, "0xffffff\n"); + + // Model immediate entry is found. + KUNIT_EXPECT_GT(test, show_immediate(node_dev, &config_rom_attributes[4].attr, buf), 0); + KUNIT_EXPECT_STREQ(test, buf, "0xffffff\n"); + + // Descriptor leaf entry for vendor is found. + KUNIT_EXPECT_GT(test, show_text_leaf(node_dev, &config_rom_attributes[5].attr, buf), 0); + KUNIT_EXPECT_STREQ(test, buf, "Vendor Name\n"); + + // Descriptor leaf entry for model is found. + KUNIT_EXPECT_GT(test, show_text_leaf(node_dev, &config_rom_attributes[6].attr, buf), 0); + KUNIT_EXPECT_STREQ(test, buf, "Model Name\n"); + + // For entries in unit 0 directory. + + // Vendor immediate entry is not found. + KUNIT_EXPECT_LT(test, show_immediate(unit0_dev, &config_rom_attributes[0].attr, buf), 0); + + // Model immediate entry is found. + KUNIT_EXPECT_GT(test, show_immediate(unit0_dev, &config_rom_attributes[4].attr, buf), 0); + KUNIT_EXPECT_STREQ(test, buf, "0xffffff\n"); + + // Descriptor leaf entry for vendor is not found. + KUNIT_EXPECT_LT(test, show_text_leaf(unit0_dev, &config_rom_attributes[5].attr, buf), 0); + + // Descriptor leaf entry for model is found. + KUNIT_EXPECT_GT(test, show_text_leaf(unit0_dev, &config_rom_attributes[6].attr, buf), 0); + KUNIT_EXPECT_STREQ(test, buf, "Model Name\n"); + + // Specifier_ID immediate entry is found. + KUNIT_EXPECT_GT(test, show_immediate(unit0_dev, &config_rom_attributes[2].attr, buf), 0); + KUNIT_EXPECT_STREQ(test, buf, "0x00a02d\n"); + + // Version immediate entry is found. + KUNIT_EXPECT_GT(test, show_immediate(unit0_dev, &config_rom_attributes[3].attr, buf), 0); + KUNIT_EXPECT_STREQ(test, buf, "0x010001\n"); + + kunit_kfree(test, buf); + + get_modalias_ids(&unit0, ids); + KUNIT_EXPECT_MEMEQ(test, ids, unit0_expected_ids, sizeof(ids)); +} + +static void device_attr_legacy_avc(struct kunit *test) +{ + static const struct fw_device node = { + .device = { + .type = &fw_device_type, + }, + .config_rom = legacy_avc_config_rom, + .config_rom_length = sizeof(legacy_avc_config_rom), + }; + static const struct fw_unit unit0 = { + .device = { + .type = &fw_unit_type, + .parent = (struct device *)&node.device, + }, + .directory = &legacy_avc_config_rom[11], + }; + struct device *node_dev = (struct device *)&node.device; + struct device *unit0_dev = (struct device *)&unit0.device; + static const int unit0_expected_ids[] = {0x00012345, 0x00fedcba, 0x00abcdef, 0x00543210}; + char *buf = kunit_kzalloc(test, PAGE_SIZE, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf); + int ids[4] = {0, 0, 0, 0}; + + // Ensure associations for node and unit devices. + + KUNIT_ASSERT_TRUE(test, is_fw_device(node_dev)); + KUNIT_ASSERT_FALSE(test, is_fw_unit(node_dev)); + KUNIT_ASSERT_PTR_EQ(test, fw_device((node_dev)), &node); + + KUNIT_ASSERT_FALSE(test, is_fw_device(unit0_dev)); + KUNIT_ASSERT_TRUE(test, is_fw_unit(unit0_dev)); + KUNIT_ASSERT_PTR_EQ(test, fw_parent_device((&unit0)), &node); + KUNIT_ASSERT_PTR_EQ(test, fw_unit(unit0_dev), &unit0); + + // For entries in root directory. + + // Vendor immediate entry is found. + KUNIT_EXPECT_GT(test, show_immediate(node_dev, &config_rom_attributes[0].attr, buf), 0); + KUNIT_EXPECT_STREQ(test, buf, "0x012345\n"); + + // Model immediate entry is found. + KUNIT_EXPECT_GT(test, show_immediate(node_dev, &config_rom_attributes[4].attr, buf), 0); + KUNIT_EXPECT_STREQ(test, buf, "0xfedcba\n"); + + // Descriptor leaf entry for vendor is not found. + KUNIT_EXPECT_LT(test, show_text_leaf(node_dev, &config_rom_attributes[5].attr, buf), 0); + + // Descriptor leaf entry for model is found. + KUNIT_EXPECT_GT(test, show_text_leaf(node_dev, &config_rom_attributes[6].attr, buf), 0); + KUNIT_EXPECT_STREQ(test, buf, "ABCDEFGHIJ\n"); + + // For entries in unit 0 directory. + + // Vendor immediate entry is not found. + KUNIT_EXPECT_LT(test, show_immediate(unit0_dev, &config_rom_attributes[0].attr, buf), 0); + + // Model immediate entry is not found. + KUNIT_EXPECT_LT(test, show_immediate(unit0_dev, &config_rom_attributes[4].attr, buf), 0); + + // Descriptor leaf entry for vendor is not found. + KUNIT_EXPECT_LT(test, show_text_leaf(unit0_dev, &config_rom_attributes[5].attr, buf), 0); + + // Descriptor leaf entry for model is not found. + KUNIT_EXPECT_LT(test, show_text_leaf(unit0_dev, &config_rom_attributes[6].attr, buf), 0); + + // Specifier_ID immediate entry is found. + KUNIT_EXPECT_GT(test, show_immediate(unit0_dev, &config_rom_attributes[2].attr, buf), 0); + KUNIT_EXPECT_STREQ(test, buf, "0xabcdef\n"); + + // Version immediate entry is found. + KUNIT_EXPECT_GT(test, show_immediate(unit0_dev, &config_rom_attributes[3].attr, buf), 0); + KUNIT_EXPECT_STREQ(test, buf, "0x543210\n"); + + kunit_kfree(test, buf); + + get_modalias_ids(&unit0, ids); + KUNIT_EXPECT_MEMEQ(test, ids, unit0_expected_ids, sizeof(ids)); +} + +static struct kunit_case device_attr_test_cases[] = { + KUNIT_CASE(device_attr_simple_avc), + KUNIT_CASE(device_attr_legacy_avc), + {} +}; + +static struct kunit_suite device_attr_test_suite = { + .name = "firewire-device-attribute", + .test_cases = device_attr_test_cases, +}; +kunit_test_suite(device_attr_test_suite); diff --git a/drivers/firewire/init_ohci1394_dma.c b/drivers/firewire/init_ohci1394_dma.c index 659256927b42..121f0c2f6401 100644 --- a/drivers/firewire/init_ohci1394_dma.c +++ b/drivers/firewire/init_ohci1394_dma.c @@ -167,6 +167,7 @@ static inline void __init init_ohci1394_initialize(struct ohci *ohci) /** * init_ohci1394_wait_for_busresets - wait until bus resets are completed + * @ohci: Pointer to the OHCI-1394 controller structure * * OHCI1394 initialization itself and any device going on- or offline * and any cable issue cause a IEEE1394 bus reset. The OHCI1394 spec @@ -189,6 +190,8 @@ static inline void __init init_ohci1394_wait_for_busresets(struct ohci *ohci) /** * init_ohci1394_enable_physical_dma - Enable physical DMA for remote debugging + * @ohci: Pointer to the OHCI-1394 controller structure + * * This enables remote DMA access over IEEE1394 from every host for the low * 4GB of address space. DMA accesses above 4GB are not available currently. */ @@ -201,6 +204,8 @@ static inline void __init init_ohci1394_enable_physical_dma(struct ohci *ohci) /** * init_ohci1394_reset_and_init_dma - init controller and enable DMA + * @ohci: Pointer to the OHCI-1394 controller structure + * * This initializes the given controller and enables physical DMA engine in it. */ static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci) @@ -230,6 +235,10 @@ static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci) /** * init_ohci1394_controller - Map the registers of the controller and init DMA + * @num: PCI bus number + * @slot: PCI device number + * @func: PCI function number + * * This maps the registers of the specified controller and initializes it */ static inline void __init init_ohci1394_controller(int num, int slot, int func) @@ -251,7 +260,7 @@ static inline void __init init_ohci1394_controller(int num, int slot, int func) } /** - * debug_init_ohci1394_dma - scan for OHCI1394 controllers and init DMA on them + * init_ohci1394_dma_on_all_controllers - scan for OHCI1394 controllers and init DMA on them * Scans the whole PCI space for OHCI1394 controllers and inits DMA on them */ void __init init_ohci1394_dma_on_all_controllers(void) @@ -283,7 +292,8 @@ void __init init_ohci1394_dma_on_all_controllers(void) } /** - * setup_init_ohci1394_early - enables early OHCI1394 DMA initialization + * setup_ohci1394_dma - enables early OHCI1394 DMA initialization + * @opt: Kernel command line parameter string */ static int __init setup_ohci1394_dma(char *opt) { diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index 715e491dfbc3..6d6446713539 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -28,7 +28,7 @@ #include <linux/slab.h> #include <linux/spinlock.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <net/arp.h> #include <net/firewire.h> @@ -202,15 +202,6 @@ struct fwnet_packet_task { }; /* - * Get fifo address embedded in hwaddr - */ -static __u64 fwnet_hwaddr_fifo(union fwnet_hwaddr *ha) -{ - return (u64)get_unaligned_be16(&ha->uc.fifo_hi) << 32 - | get_unaligned_be32(&ha->uc.fifo_lo); -} - -/* * saddr == NULL means use device source address. * daddr == NULL means leave destination address (eg unresolved arp). */ @@ -488,9 +479,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net, struct sk_buff *skb, u16 source_node_id, bool is_broadcast, u16 ether_type) { - struct fwnet_device *dev; - int status; - __be64 guid; + int status, len; switch (ether_type) { case ETH_P_ARP: @@ -503,7 +492,6 @@ static int fwnet_finish_incoming_packet(struct net_device *net, goto err; } - dev = netdev_priv(net); /* Write metadata, and then pass to the receive level */ skb->dev = net; skb->ip_summed = CHECKSUM_NONE; @@ -512,7 +500,6 @@ static int fwnet_finish_incoming_packet(struct net_device *net, * Parse the encapsulation header. This actually does the job of * converting to an ethernet-like pseudo frame header. */ - guid = cpu_to_be64(dev->card->guid); if (dev_hard_header(skb, net, ether_type, is_broadcast ? net->broadcast : net->dev_addr, NULL, skb->len) >= 0) { @@ -546,13 +533,15 @@ static int fwnet_finish_incoming_packet(struct net_device *net, } skb->protocol = protocol; } + + len = skb->len; status = netif_rx(skb); if (status == NET_RX_DROP) { net->stats.rx_errors++; net->stats.rx_dropped++; } else { net->stats.rx_packets++; - net->stats.rx_bytes += skb->len; + net->stats.rx_bytes += len; } return 0; @@ -719,21 +708,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, int rcode; if (destination == IEEE1394_ALL_NODES) { - kfree(r); - - return; - } - - if (offset != dev->handler.offset) + // Although the response to the broadcast packet is not necessarily required, the + // fw_send_response() function should still be called to maintain the reference + // counting of the object. In the case, the call of function just releases the + // object as a result to decrease the reference counting. + rcode = RCODE_COMPLETE; + } else if (offset != dev->handler.offset) { rcode = RCODE_ADDRESS_ERROR; - else if (tcode != TCODE_WRITE_BLOCK_REQUEST) + } else if (tcode != TCODE_WRITE_BLOCK_REQUEST) { rcode = RCODE_TYPE_ERROR; - else if (fwnet_incoming_packet(dev, payload, length, - source, generation, false) != 0) { + } else if (fwnet_incoming_packet(dev, payload, length, + source, generation, false) != 0) { dev_err(&dev->netdev->dev, "incoming packet failure\n"); rcode = RCODE_CONFLICT_ERROR; - } else + } else { rcode = RCODE_COMPLETE; + } fw_send_response(card, r, rcode); } @@ -1017,7 +1007,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask) spin_lock_irqsave(&dev->lock, flags); - /* If the AT tasklet already ran, we may be last user. */ + /* If the AT work item already ran, we may be last user. */ free = (ptask->outstanding_pkts == 0 && !ptask->enqueued); if (!free) ptask->enqueued = true; @@ -1036,7 +1026,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask) spin_lock_irqsave(&dev->lock, flags); - /* If the AT tasklet already ran, we may be last user. */ + /* If the AT work item already ran, we may be last user. */ free = (ptask->outstanding_pkts == 0 && !ptask->enqueued); if (!free) ptask->enqueued = true; @@ -1310,7 +1300,7 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net) max_payload = peer->max_payload; datagram_label_ptr = &peer->datagram_label; - ptask->fifo_addr = fwnet_hwaddr_fifo(ha); + ptask->fifo_addr = get_unaligned_be48(ha->uc.fifo); ptask->generation = generation; ptask->dest_node = dest_node; ptask->speed = peer->speed; @@ -1447,8 +1437,8 @@ static int fwnet_probe(struct fw_unit *unit, struct net_device *net; bool allocated_netdev = false; struct fwnet_device *dev; + union fwnet_hwaddr ha; int ret; - union fwnet_hwaddr *ha; mutex_lock(&fwnet_device_mutex); @@ -1495,12 +1485,11 @@ static int fwnet_probe(struct fw_unit *unit, net->max_mtu = 4096U; /* Set our hardware address while we're at it */ - ha = (union fwnet_hwaddr *)net->dev_addr; - put_unaligned_be64(card->guid, &ha->uc.uniq_id); - ha->uc.max_rec = dev->card->max_receive; - ha->uc.sspd = dev->card->link_speed; - put_unaligned_be16(dev->local_fifo >> 32, &ha->uc.fifo_hi); - put_unaligned_be32(dev->local_fifo & 0xffffffff, &ha->uc.fifo_lo); + ha.uc.uniq_id = cpu_to_be64(card->guid); + ha.uc.max_rec = dev->card->max_receive; + ha.uc.sspd = dev->card->link_speed; + put_unaligned_be48(dev->local_fifo, ha.uc.fifo); + dev_addr_set(net, ha.u); memset(net->broadcast, -1, net->addr_len); diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c index b0d671db178a..ea31ac7ac1ca 100644 --- a/drivers/firewire/nosy.c +++ b/drivers/firewire/nosy.c @@ -148,10 +148,12 @@ packet_buffer_get(struct client *client, char __user *data, size_t user_length) if (atomic_read(&buffer->size) == 0) return -ENODEV; - /* FIXME: Check length <= user_length. */ + length = buffer->head->length; + + if (length > user_length) + return 0; end = buffer->data + buffer->capacity; - length = buffer->head->length; if (&buffer->head->data[length] < end) { if (copy_to_user(data, buffer->head->data, length)) diff --git a/drivers/firewire/ohci-serdes-test.c b/drivers/firewire/ohci-serdes-test.c new file mode 100644 index 000000000000..258f668619ef --- /dev/null +++ b/drivers/firewire/ohci-serdes-test.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// +// ohci-serdes-test.c - An application of Kunit to check serialization/deserialization of data in +// buffers and registers defined in 1394 OHCI specification. +// +// Copyright (c) 2024 Takashi Sakamoto + +#include <kunit/test.h> + +#include "ohci.h" + + +static void test_self_id_count_register_deserialization(struct kunit *test) +{ + const u32 expected = 0x803d0594; + + bool is_error = ohci1394_self_id_count_is_error(expected); + u8 generation = ohci1394_self_id_count_get_generation(expected); + u32 size = ohci1394_self_id_count_get_size(expected); + + KUNIT_EXPECT_TRUE(test, is_error); + KUNIT_EXPECT_EQ(test, 0x3d, generation); + KUNIT_EXPECT_EQ(test, 0x165, size); +} + +static void test_self_id_receive_buffer_deserialization(struct kunit *test) +{ + const u32 buffer[] = { + 0x0006f38b, + 0x807fcc56, + 0x7f8033a9, + 0x8145cc5e, + 0x7eba33a1, + }; + + u8 generation = ohci1394_self_id_receive_q0_get_generation(buffer[0]); + u16 timestamp = ohci1394_self_id_receive_q0_get_timestamp(buffer[0]); + + KUNIT_EXPECT_EQ(test, 0x6, generation); + KUNIT_EXPECT_EQ(test, 0xf38b, timestamp); +} + +static void test_at_data_serdes(struct kunit *test) +{ + static const __le32 expected[] = { + cpu_to_le32(0x00020e80), + cpu_to_le32(0xffc2ffff), + cpu_to_le32(0xe0000000), + }; + __le32 quadlets[] = {0, 0, 0}; + bool has_src_bus_id = ohci1394_at_data_get_src_bus_id(expected); + unsigned int speed = ohci1394_at_data_get_speed(expected); + unsigned int tlabel = ohci1394_at_data_get_tlabel(expected); + unsigned int retry = ohci1394_at_data_get_retry(expected); + unsigned int tcode = ohci1394_at_data_get_tcode(expected); + unsigned int destination_id = ohci1394_at_data_get_destination_id(expected); + u64 destination_offset = ohci1394_at_data_get_destination_offset(expected); + + KUNIT_EXPECT_FALSE(test, has_src_bus_id); + KUNIT_EXPECT_EQ(test, 0x02, speed); + KUNIT_EXPECT_EQ(test, 0x03, tlabel); + KUNIT_EXPECT_EQ(test, 0x02, retry); + KUNIT_EXPECT_EQ(test, 0x08, tcode); + + ohci1394_at_data_set_src_bus_id(quadlets, has_src_bus_id); + ohci1394_at_data_set_speed(quadlets, speed); + ohci1394_at_data_set_tlabel(quadlets, tlabel); + ohci1394_at_data_set_retry(quadlets, retry); + ohci1394_at_data_set_tcode(quadlets, tcode); + ohci1394_at_data_set_destination_id(quadlets, destination_id); + ohci1394_at_data_set_destination_offset(quadlets, destination_offset); + + KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected)); +} + +static void test_it_data_serdes(struct kunit *test) +{ + static const __le32 expected[] = { + cpu_to_le32(0x000349a7), + cpu_to_le32(0x02300000), + }; + __le32 quadlets[] = {0, 0}; + unsigned int scode = ohci1394_it_data_get_speed(expected); + unsigned int tag = ohci1394_it_data_get_tag(expected); + unsigned int channel = ohci1394_it_data_get_channel(expected); + unsigned int tcode = ohci1394_it_data_get_tcode(expected); + unsigned int sync = ohci1394_it_data_get_sync(expected); + unsigned int data_length = ohci1394_it_data_get_data_length(expected); + + KUNIT_EXPECT_EQ(test, 0x03, scode); + KUNIT_EXPECT_EQ(test, 0x01, tag); + KUNIT_EXPECT_EQ(test, 0x09, channel); + KUNIT_EXPECT_EQ(test, 0x0a, tcode); + KUNIT_EXPECT_EQ(test, 0x7, sync); + KUNIT_EXPECT_EQ(test, 0x0230, data_length); + + ohci1394_it_data_set_speed(quadlets, scode); + ohci1394_it_data_set_tag(quadlets, tag); + ohci1394_it_data_set_channel(quadlets, channel); + ohci1394_it_data_set_tcode(quadlets, tcode); + ohci1394_it_data_set_sync(quadlets, sync); + ohci1394_it_data_set_data_length(quadlets, data_length); + + KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected)); +} + +static struct kunit_case ohci_serdes_test_cases[] = { + KUNIT_CASE(test_self_id_count_register_deserialization), + KUNIT_CASE(test_self_id_receive_buffer_deserialization), + KUNIT_CASE(test_at_data_serdes), + KUNIT_CASE(test_it_data_serdes), + {} +}; + +static struct kunit_suite ohci_serdes_test_suite = { + .name = "firewire-ohci-serdes", + .test_cases = ohci_serdes_test_cases, +}; +kunit_test_suite(ohci_serdes_test_suite); + +MODULE_DESCRIPTION("FireWire buffers and registers serialization/deserialization unit test suite"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 17c9d825188b..e3e78dc42530 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -40,8 +40,16 @@ #include "core.h" #include "ohci.h" +#include "packet-header-definitions.h" +#include "phy-packet-definitions.h" + +#include <trace/events/firewire.h> + +static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk); + +#define CREATE_TRACE_POINTS +#include <trace/events/firewire_ohci.h> -#define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args) #define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args) #define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args) @@ -68,7 +76,7 @@ struct descriptor { __le32 branch_address; __le16 res_count; __le16 transfer_status; -} __attribute__((aligned(16))); +} __aligned(16); #define CONTROL_SET(regs) (regs) #define CONTROL_CLEAR(regs) ((regs) + 4) @@ -93,7 +101,7 @@ struct ar_context { void *pointer; unsigned int last_buffer_index; u32 regs; - struct tasklet_struct tasklet; + struct work_struct work; }; struct context; @@ -120,7 +128,6 @@ struct context { int total_allocation; u32 current_bus; bool running; - bool flushing; /* * List of page-sized buffers for storing DMA descriptors. @@ -149,16 +156,13 @@ struct context { int prev_z; descriptor_callback_t callback; - - struct tasklet_struct tasklet; }; -#define IT_HEADER_SY(v) ((v) << 0) -#define IT_HEADER_TCODE(v) ((v) << 4) -#define IT_HEADER_CHANNEL(v) ((v) << 8) -#define IT_HEADER_TAG(v) ((v) << 14) -#define IT_HEADER_SPEED(v) ((v) << 16) -#define IT_HEADER_DATA_LENGTH(v) ((v) << 16) +struct at_context { + struct context context; + struct work_struct work; + bool flushing; +}; struct iso_context { struct fw_iso_context base; @@ -173,7 +177,7 @@ struct iso_context { u8 tags; }; -#define CONFIG_ROM_SIZE 1024 +#define CONFIG_ROM_SIZE (CSR_CONFIG_ROM_END - CSR_CONFIG_ROM) struct fw_ohci { struct fw_card card; @@ -203,8 +207,8 @@ struct fw_ohci { struct ar_context ar_request_ctx; struct ar_context ar_response_ctx; - struct context at_request_ctx; - struct context at_response_ctx; + struct at_context at_request_ctx; + struct at_context at_response_ctx; u32 it_context_support; u32 it_context_mask; /* unoccupied IT contexts */ @@ -224,13 +228,10 @@ struct fw_ohci { __le32 *self_id; dma_addr_t self_id_bus; - struct work_struct bus_reset_work; u32 self_id_buffer[512]; }; -static struct workqueue_struct *selfid_workqueue; - static inline struct fw_ohci *fw_ohci(struct fw_card *card) { return container_of(card, struct fw_ohci, card); @@ -255,7 +256,6 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card) #define OHCI1394_REGISTER_SIZE 0x800 #define OHCI1394_PCI_HCI_Control 0x40 #define SELF_ID_BUF_SIZE 0x800 -#define OHCI_TCODE_PHY_PACKET 0x0e #define OHCI_VERSION_1_1 0x010010 static char ohci_driver_name[] = KBUILD_MODNAME; @@ -279,6 +279,51 @@ static char ohci_driver_name[] = KBUILD_MODNAME; #define QUIRK_TI_SLLZ059 0x20 #define QUIRK_IR_WAKE 0x40 +// On PCI Express Root Complex in any type of AMD Ryzen machine, VIA VT6306/6307/6308 with Asmedia +// ASM1083/1085 brings an inconvenience that the read accesses to 'Isochronous Cycle Timer' register +// (at offset 0xf0 in PCI I/O space) often causes unexpected system reboot. The mechanism is not +// clear, since the read access to the other registers is enough safe; e.g. 'Node ID' register, +// while it is probable due to detection of any type of PCIe error. +#define QUIRK_REBOOT_BY_CYCLE_TIMER_READ 0x80000000 + +#if IS_ENABLED(CONFIG_X86) + +static bool has_reboot_by_cycle_timer_read_quirk(const struct fw_ohci *ohci) +{ + return !!(ohci->quirks & QUIRK_REBOOT_BY_CYCLE_TIMER_READ); +} + +#define PCI_DEVICE_ID_ASMEDIA_ASM108X 0x1080 + +static bool detect_vt630x_with_asm1083_on_amd_ryzen_machine(const struct pci_dev *pdev) +{ + const struct pci_dev *pcie_to_pci_bridge; + + // Detect any type of AMD Ryzen machine. + if (!static_cpu_has(X86_FEATURE_ZEN)) + return false; + + // Detect VIA VT6306/6307/6308. + if (pdev->vendor != PCI_VENDOR_ID_VIA) + return false; + if (pdev->device != PCI_DEVICE_ID_VIA_VT630X) + return false; + + // Detect Asmedia ASM1083/1085. + pcie_to_pci_bridge = pdev->bus->self; + if (pcie_to_pci_bridge->vendor != PCI_VENDOR_ID_ASMEDIA) + return false; + if (pcie_to_pci_bridge->device != PCI_DEVICE_ID_ASMEDIA_ASM108X) + return false; + + return true; +} + +#else +#define has_reboot_by_cycle_timer_read_quirk(ohci) false +#define detect_vt630x_with_asm1083_on_amd_ryzen_machine(pdev) false +#endif + /* In case of multiple matches in ohci_quirks[], only the first one is used. */ static const struct { unsigned short vendor, device, revision, flags; @@ -345,185 +390,10 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0" ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE) ")"); -#define OHCI_PARAM_DEBUG_AT_AR 1 -#define OHCI_PARAM_DEBUG_SELFIDS 2 -#define OHCI_PARAM_DEBUG_IRQS 4 -#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */ - -static int param_debug; -module_param_named(debug, param_debug, int, 0644); -MODULE_PARM_DESC(debug, "Verbose logging (default = 0" - ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR) - ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS) - ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS) - ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS) - ", or a combination, or all = -1)"); - static bool param_remote_dma; module_param_named(remote_dma, param_remote_dma, bool, 0444); MODULE_PARM_DESC(remote_dma, "Enable unfiltered remote DMA (default = N)"); -static void log_irqs(struct fw_ohci *ohci, u32 evt) -{ - if (likely(!(param_debug & - (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS)))) - return; - - if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) && - !(evt & OHCI1394_busReset)) - return; - - ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, - evt & OHCI1394_selfIDComplete ? " selfID" : "", - evt & OHCI1394_RQPkt ? " AR_req" : "", - evt & OHCI1394_RSPkt ? " AR_resp" : "", - evt & OHCI1394_reqTxComplete ? " AT_req" : "", - evt & OHCI1394_respTxComplete ? " AT_resp" : "", - evt & OHCI1394_isochRx ? " IR" : "", - evt & OHCI1394_isochTx ? " IT" : "", - evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", - evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", - evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "", - evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", - evt & OHCI1394_regAccessFail ? " regAccessFail" : "", - evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "", - evt & OHCI1394_busReset ? " busReset" : "", - evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt | - OHCI1394_RSPkt | OHCI1394_reqTxComplete | - OHCI1394_respTxComplete | OHCI1394_isochRx | - OHCI1394_isochTx | OHCI1394_postedWriteErr | - OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds | - OHCI1394_cycleInconsistent | - OHCI1394_regAccessFail | OHCI1394_busReset) - ? " ?" : ""); -} - -static const char *speed[] = { - [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta", -}; -static const char *power[] = { - [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W", - [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W", -}; -static const char port[] = { '.', '-', 'p', 'c', }; - -static char _p(u32 *s, int shift) -{ - return port[*s >> shift & 3]; -} - -static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count) -{ - u32 *s; - - if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS))) - return; - - ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n", - self_id_count, generation, ohci->node_id); - - for (s = ohci->self_id_buffer; self_id_count--; ++s) - if ((*s & 1 << 23) == 0) - ohci_notice(ohci, - "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n", - *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2), - speed[*s >> 14 & 3], *s >> 16 & 63, - power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "", - *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : ""); - else - ohci_notice(ohci, - "selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n", - *s, *s >> 24 & 63, - _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10), - _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2)); -} - -static const char *evts[] = { - [0x00] = "evt_no_status", [0x01] = "-reserved-", - [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack", - [0x04] = "evt_underrun", [0x05] = "evt_overrun", - [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read", - [0x08] = "evt_data_write", [0x09] = "evt_bus_reset", - [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err", - [0x0c] = "-reserved-", [0x0d] = "-reserved-", - [0x0e] = "evt_unknown", [0x0f] = "evt_flushed", - [0x10] = "-reserved-", [0x11] = "ack_complete", - [0x12] = "ack_pending ", [0x13] = "-reserved-", - [0x14] = "ack_busy_X", [0x15] = "ack_busy_A", - [0x16] = "ack_busy_B", [0x17] = "-reserved-", - [0x18] = "-reserved-", [0x19] = "-reserved-", - [0x1a] = "-reserved-", [0x1b] = "ack_tardy", - [0x1c] = "-reserved-", [0x1d] = "ack_data_error", - [0x1e] = "ack_type_error", [0x1f] = "-reserved-", - [0x20] = "pending/cancelled", -}; -static const char *tcodes[] = { - [0x0] = "QW req", [0x1] = "BW req", - [0x2] = "W resp", [0x3] = "-reserved-", - [0x4] = "QR req", [0x5] = "BR req", - [0x6] = "QR resp", [0x7] = "BR resp", - [0x8] = "cycle start", [0x9] = "Lk req", - [0xa] = "async stream packet", [0xb] = "Lk resp", - [0xc] = "-reserved-", [0xd] = "-reserved-", - [0xe] = "link internal", [0xf] = "-reserved-", -}; - -static void log_ar_at_event(struct fw_ohci *ohci, - char dir, int speed, u32 *header, int evt) -{ - int tcode = header[0] >> 4 & 0xf; - char specific[12]; - - if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR))) - return; - - if (unlikely(evt >= ARRAY_SIZE(evts))) - evt = 0x1f; - - if (evt == OHCI1394_evt_bus_reset) { - ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n", - dir, (header[2] >> 16) & 0xff); - return; - } - - switch (tcode) { - case 0x0: case 0x6: case 0x8: - snprintf(specific, sizeof(specific), " = %08x", - be32_to_cpu((__force __be32)header[3])); - break; - case 0x1: case 0x5: case 0x7: case 0x9: case 0xb: - snprintf(specific, sizeof(specific), " %x,%x", - header[3] >> 16, header[3] & 0xffff); - break; - default: - specific[0] = '\0'; - } - - switch (tcode) { - case 0xa: - ohci_notice(ohci, "A%c %s, %s\n", - dir, evts[evt], tcodes[tcode]); - break; - case 0xe: - ohci_notice(ohci, "A%c %s, PHY %08x %08x\n", - dir, evts[evt], header[1], header[2]); - break; - case 0x0: case 0x1: case 0x4: case 0x5: case 0x9: - ohci_notice(ohci, - "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %04x%08x%s\n", - dir, speed, header[0] >> 10 & 0x3f, - header[1] >> 16, header[0] >> 16, evts[evt], - tcodes[tcode], header[1] & 0xffff, header[2], specific); - break; - default: - ohci_notice(ohci, - "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n", - dir, speed, header[0] >> 10 & 0x3f, - header[1] >> 16, header[0] >> 16, evts[evt], - tcodes[tcode], specific); - } -} - static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data) { writel(data, ohci->registers + offset); @@ -627,26 +497,20 @@ static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr) static int ohci_read_phy_reg(struct fw_card *card, int addr) { struct fw_ohci *ohci = fw_ohci(card); - int ret; - mutex_lock(&ohci->phy_reg_mutex); - ret = read_phy_reg(ohci, addr); - mutex_unlock(&ohci->phy_reg_mutex); + guard(mutex)(&ohci->phy_reg_mutex); - return ret; + return read_phy_reg(ohci, addr); } static int ohci_update_phy_reg(struct fw_card *card, int addr, int clear_bits, int set_bits) { struct fw_ohci *ohci = fw_ohci(card); - int ret; - mutex_lock(&ohci->phy_reg_mutex); - ret = update_phy_reg(ohci, addr, clear_bits, set_bits); - mutex_unlock(&ohci->phy_reg_mutex); + guard(mutex)(&ohci->phy_reg_mutex); - return ret; + return update_phy_reg(ohci, addr, clear_bits, set_bits); } static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i) @@ -677,6 +541,9 @@ static void ar_context_release(struct ar_context *ctx) struct device *dev = ctx->ohci->card.device; unsigned int i; + if (!ctx->buffer) + return; + vunmap(ctx->buffer); for (i = 0; i < AR_BUFFERS; i++) { @@ -788,10 +655,25 @@ static void ar_sync_buffers_for_cpu(struct ar_context *ctx, } #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) -#define cond_le32_to_cpu(v) \ - (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v)) +static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk) +{ + return has_be_header_quirk ? (__force __u32)value : le32_to_cpu(value); +} + +static bool has_be_header_quirk(const struct fw_ohci *ohci) +{ + return !!(ohci->quirks & QUIRK_BE_HEADERS); +} #else -#define cond_le32_to_cpu(v) le32_to_cpu(v) +static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk __maybe_unused) +{ + return le32_to_cpu(value); +} + +static bool has_be_header_quirk(const struct fw_ohci *ohci) +{ + return false; +} #endif static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) @@ -801,11 +683,11 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) u32 status, length, tcode; int evt; - p.header[0] = cond_le32_to_cpu(buffer[0]); - p.header[1] = cond_le32_to_cpu(buffer[1]); - p.header[2] = cond_le32_to_cpu(buffer[2]); + p.header[0] = cond_le32_to_cpu(buffer[0], has_be_header_quirk(ohci)); + p.header[1] = cond_le32_to_cpu(buffer[1], has_be_header_quirk(ohci)); + p.header[2] = cond_le32_to_cpu(buffer[2], has_be_header_quirk(ohci)); - tcode = (p.header[0] >> 4) & 0x0f; + tcode = async_header_get_tcode(p.header); switch (tcode) { case TCODE_WRITE_QUADLET_REQUEST: case TCODE_READ_QUADLET_RESPONSE: @@ -815,7 +697,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) break; case TCODE_READ_BLOCK_REQUEST : - p.header[3] = cond_le32_to_cpu(buffer[3]); + p.header[3] = cond_le32_to_cpu(buffer[3], has_be_header_quirk(ohci)); p.header_length = 16; p.payload_length = 0; break; @@ -824,9 +706,9 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) case TCODE_READ_BLOCK_RESPONSE: case TCODE_LOCK_REQUEST: case TCODE_LOCK_RESPONSE: - p.header[3] = cond_le32_to_cpu(buffer[3]); + p.header[3] = cond_le32_to_cpu(buffer[3], has_be_header_quirk(ohci)); p.header_length = 16; - p.payload_length = p.header[3] >> 16; + p.payload_length = async_header_get_data_length(p.header); if (p.payload_length > MAX_ASYNC_PAYLOAD) { ar_context_abort(ctx, "invalid packet length"); return NULL; @@ -835,7 +717,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) case TCODE_WRITE_RESPONSE: case TCODE_READ_QUADLET_REQUEST: - case OHCI_TCODE_PHY_PACKET: + case TCODE_LINK_INTERNAL: p.header_length = 12; p.payload_length = 0; break; @@ -849,7 +731,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) /* FIXME: What to do about evt_* errors? */ length = (p.header_length + p.payload_length + 3) / 4; - status = cond_le32_to_cpu(buffer[length]); + status = cond_le32_to_cpu(buffer[length], has_be_header_quirk(ohci)); evt = (status >> 16) & 0x1f; p.ack = evt - 16; @@ -857,14 +739,11 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) p.timestamp = status & 0xffff; p.generation = ohci->request_generation; - log_ar_at_event(ohci, 'R', p.speed, p.header, evt); - /* * Several controllers, notably from NEC and VIA, forget to * write ack_complete status at PHY packet reception. */ - if (evt == OHCI1394_evt_no_status && - (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4)) + if (evt == OHCI1394_evt_no_status && tcode == TCODE_LINK_INTERNAL) p.ack = ACK_COMPLETE; /* @@ -878,7 +757,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) * * Alas some chips sometimes emit bus reset packets with a * wrong generation. We set the correct generation for these - * at a slightly incorrect time (in bus_reset_work). + * at a slightly incorrect time (in handle_selfid_complete_event). */ if (evt == OHCI1394_evt_bus_reset) { if (!(ohci->quirks & QUIRK_RESET_PACKET)) @@ -920,9 +799,9 @@ static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer) } } -static void ar_context_tasklet(unsigned long data) +static void ohci_ar_context_work(struct work_struct *work) { - struct ar_context *ctx = (struct ar_context *)data; + struct ar_context *ctx = from_work(ctx, work, work); unsigned int end_buffer_index, end_buffer_offset; void *p, *end; @@ -930,23 +809,19 @@ static void ar_context_tasklet(unsigned long data) if (!p) return; - end_buffer_index = ar_search_last_active_buffer(ctx, - &end_buffer_offset); + end_buffer_index = ar_search_last_active_buffer(ctx, &end_buffer_offset); ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset); end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset; if (end_buffer_index < ar_first_buffer_index(ctx)) { - /* - * The filled part of the overall buffer wraps around; handle - * all packets up to the buffer end here. If the last packet - * wraps around, its tail will be visible after the buffer end - * because the buffer start pages are mapped there again. - */ + // The filled part of the overall buffer wraps around; handle all packets up to the + // buffer end here. If the last packet wraps around, its tail will be visible after + // the buffer end because the buffer start pages are mapped there again. void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE; p = handle_ar_packets(ctx, p, buffer_end); if (p < buffer_end) goto error; - /* adjust p to point back into the actual buffer */ + // adjust p to point back into the actual buffer p -= AR_BUFFERS * PAGE_SIZE; } @@ -961,7 +836,6 @@ static void ar_context_tasklet(unsigned long data) ar_recycle_buffers(ctx, end_buffer_index); return; - error: ctx->pointer = NULL; } @@ -977,7 +851,7 @@ static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, ctx->regs = regs; ctx->ohci = ohci; - tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); + INIT_WORK(&ctx->work, ohci_ar_context_work); for (i = 0; i < AR_BUFFERS; i++) { ctx->pages[i] = dma_alloc_pages(dev, PAGE_SIZE, &dma_addr, @@ -1045,9 +919,8 @@ static struct descriptor *find_branch_descriptor(struct descriptor *d, int z) return d + z - 1; } -static void context_tasklet(unsigned long data) +static void context_retire_descriptors(struct context *ctx) { - struct context *ctx = (struct context *) data; struct descriptor *d, *last; u32 address; int z; @@ -1076,18 +949,31 @@ static void context_tasklet(unsigned long data) break; if (old_desc != desc) { - /* If we've advanced to the next buffer, move the - * previous buffer to the free list. */ - unsigned long flags; + // If we've advanced to the next buffer, move the previous buffer to the + // free list. old_desc->used = 0; - spin_lock_irqsave(&ctx->ohci->lock, flags); + guard(spinlock_irqsave)(&ctx->ohci->lock); list_move_tail(&old_desc->list, &ctx->buffer_list); - spin_unlock_irqrestore(&ctx->ohci->lock, flags); } ctx->last = last; } } +static void ohci_at_context_work(struct work_struct *work) +{ + struct at_context *ctx = from_work(ctx, work, work); + + context_retire_descriptors(&ctx->context); +} + +static void ohci_isoc_context_work(struct work_struct *work) +{ + struct fw_iso_context *base = from_work(base, work, work); + struct iso_context *isoc_ctx = container_of(base, struct iso_context, base); + + context_retire_descriptors(&isoc_ctx->context); +} + /* * Allocate a new buffer and add it to the list of free buffers for this * context. Must be called with ohci->lock held. @@ -1105,8 +991,7 @@ static int context_add_buffer(struct context *ctx) if (ctx->total_allocation >= 16*1024*1024) return -ENOMEM; - desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, - &bus_addr, GFP_ATOMIC); + desc = dmam_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, &bus_addr, GFP_ATOMIC); if (!desc) return -ENOMEM; @@ -1141,7 +1026,6 @@ static int context_init(struct context *ctx, struct fw_ohci *ohci, ctx->buffer_tail = list_entry(ctx->buffer_list.next, struct descriptor_buffer, list); - tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx); ctx->callback = callback; /* @@ -1165,10 +1049,10 @@ static void context_release(struct context *ctx) struct fw_card *card = &ctx->ohci->card; struct descriptor_buffer *desc, *tmp; - list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) - dma_free_coherent(card->device, PAGE_SIZE, desc, - desc->buffer_bus - - ((void *)&desc->buffer - (void *)desc)); + list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) { + dmam_free_coherent(card->device, PAGE_SIZE, desc, + desc->buffer_bus - ((void *)&desc->buffer - (void *)desc)); + } } /* Must be called with ohci->lock held */ @@ -1277,21 +1161,21 @@ struct driver_data { }; /* - * This function apppends a packet to the DMA queue for transmission. + * This function appends a packet to the DMA queue for transmission. * Must always be called with the ochi->lock held to ensure proper * generation handling and locking around packet queue manipulation. */ -static int at_context_queue_packet(struct context *ctx, - struct fw_packet *packet) +static int at_context_queue_packet(struct at_context *ctx, struct fw_packet *packet) { - struct fw_ohci *ohci = ctx->ohci; + struct context *context = &ctx->context; + struct fw_ohci *ohci = context->ohci; dma_addr_t d_bus, payload_bus; struct driver_data *driver_data; struct descriptor *d, *last; __le32 *header; int z, tcode; - d = context_get_descriptors(ctx, 4, &d_bus); + d = context_get_descriptors(context, 4, &d_bus); if (d == NULL) { packet->ack = RCODE_SEND_ERROR; return -1; @@ -1300,13 +1184,7 @@ static int at_context_queue_packet(struct context *ctx, d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); d[0].res_count = cpu_to_le16(packet->timestamp); - /* - * The DMA format for asynchronous link packets is different - * from the IEEE1394 layout, so shift the fields around - * accordingly. - */ - - tcode = (packet->header[0] >> 4) & 0x0f; + tcode = async_header_get_tcode(packet->header); header = (__le32 *) &d[1]; switch (tcode) { case TCODE_WRITE_QUADLET_REQUEST: @@ -1318,23 +1196,33 @@ static int at_context_queue_packet(struct context *ctx, case TCODE_READ_BLOCK_RESPONSE: case TCODE_LOCK_REQUEST: case TCODE_LOCK_RESPONSE: - header[0] = cpu_to_le32((packet->header[0] & 0xffff) | - (packet->speed << 16)); - header[1] = cpu_to_le32((packet->header[1] & 0xffff) | - (packet->header[0] & 0xffff0000)); - header[2] = cpu_to_le32(packet->header[2]); + ohci1394_at_data_set_src_bus_id(header, false); + ohci1394_at_data_set_speed(header, packet->speed); + ohci1394_at_data_set_tlabel(header, async_header_get_tlabel(packet->header)); + ohci1394_at_data_set_retry(header, async_header_get_retry(packet->header)); + ohci1394_at_data_set_tcode(header, tcode); + + ohci1394_at_data_set_destination_id(header, + async_header_get_destination(packet->header)); - if (TCODE_IS_BLOCK_PACKET(tcode)) + if (ctx == &ohci->at_response_ctx) { + ohci1394_at_data_set_rcode(header, async_header_get_rcode(packet->header)); + } else { + ohci1394_at_data_set_destination_offset(header, + async_header_get_offset(packet->header)); + } + + if (tcode_is_block_packet(tcode)) header[3] = cpu_to_le32(packet->header[3]); else header[3] = (__force __le32) packet->header[3]; d[0].req_count = cpu_to_le16(packet->header_length); break; - case TCODE_LINK_INTERNAL: - header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | - (packet->speed << 16)); + ohci1394_at_data_set_speed(header, packet->speed); + ohci1394_at_data_set_tcode(header, TCODE_LINK_INTERNAL); + header[1] = cpu_to_le32(packet->header[1]); header[2] = cpu_to_le32(packet->header[2]); d[0].req_count = cpu_to_le16(12); @@ -1344,9 +1232,14 @@ static int at_context_queue_packet(struct context *ctx, break; case TCODE_STREAM_DATA: - header[0] = cpu_to_le32((packet->header[0] & 0xffff) | - (packet->speed << 16)); - header[1] = cpu_to_le32(packet->header[0] & 0xffff0000); + ohci1394_it_data_set_speed(header, packet->speed); + ohci1394_it_data_set_tag(header, isoc_header_get_tag(packet->header[0])); + ohci1394_it_data_set_channel(header, isoc_header_get_channel(packet->header[0])); + ohci1394_it_data_set_tcode(header, TCODE_STREAM_DATA); + ohci1394_it_data_set_sync(header, isoc_header_get_sy(packet->header[0])); + + ohci1394_it_data_set_data_length(header, isoc_header_get_data_length(packet->header[0])); + d[0].req_count = cpu_to_le16(8); break; @@ -1401,37 +1294,50 @@ static int at_context_queue_packet(struct context *ctx, return -1; } - context_append(ctx, d, z, 4 - z); + context_append(context, d, z, 4 - z); - if (ctx->running) - reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); + if (context->running) + reg_write(ohci, CONTROL_SET(context->regs), CONTEXT_WAKE); else - context_run(ctx, 0); + context_run(context, 0); return 0; } -static void at_context_flush(struct context *ctx) +static void at_context_flush(struct at_context *ctx) { - tasklet_disable(&ctx->tasklet); + // Avoid dead lock due to programming mistake. + if (WARN_ON_ONCE(current_work() == &ctx->work)) + return; + + disable_work_sync(&ctx->work); + + WRITE_ONCE(ctx->flushing, true); + ohci_at_context_work(&ctx->work); + WRITE_ONCE(ctx->flushing, false); - ctx->flushing = true; - context_tasklet((unsigned long)ctx); - ctx->flushing = false; + enable_work(&ctx->work); +} + +static int find_fw_device(struct device *dev, const void *data) +{ + struct fw_device *device = fw_device(dev); + const u32 *params = data; - tasklet_enable(&ctx->tasklet); + return (device->generation == params[0]) && (device->node_id == params[1]); } static int handle_at_packet(struct context *context, struct descriptor *d, struct descriptor *last) { + struct at_context *ctx = container_of(context, struct at_context, context); + struct fw_ohci *ohci = ctx->context.ohci; struct driver_data *driver_data; struct fw_packet *packet; - struct fw_ohci *ohci = context->ohci; int evt; - if (last->transfer_status == 0 && !context->flushing) + if (last->transfer_status == 0 && !READ_ONCE(ctx->flushing)) /* This descriptor isn't done yet, stop iteration. */ return 0; @@ -1448,8 +1354,6 @@ static int handle_at_packet(struct context *context, evt = le16_to_cpu(last->transfer_status) & 0x1f; packet->timestamp = le16_to_cpu(last->res_count); - log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt); - switch (evt) { case OHCI1394_evt_timeout: /* Async response transmit timed out. */ @@ -1465,7 +1369,7 @@ static int handle_at_packet(struct context *context, break; case OHCI1394_evt_missing_ack: - if (context->flushing) + if (READ_ONCE(ctx->flushing)) packet->ack = RCODE_GENERATION; else { /* @@ -1487,13 +1391,34 @@ static int handle_at_packet(struct context *context, break; case OHCI1394_evt_no_status: - if (context->flushing) { + if (READ_ONCE(ctx->flushing)) { packet->ack = RCODE_GENERATION; break; } fallthrough; default: + if (unlikely(evt == 0x10)) { + u32 params[2] = { + packet->generation, + async_header_get_destination(packet->header), + }; + struct device *dev; + + fw_card_get(&ohci->card); + dev = device_find_child(ohci->card.device, (const void *)params, find_fw_device); + fw_card_put(&ohci->card); + if (dev) { + struct fw_device *device = fw_device(dev); + int quirks = READ_ONCE(device->quirks); + + put_device(dev); + if (quirks & FW_DEVICE_QUIRK_ACK_PACKET_WITH_INVALID_PENDING_CODE) { + packet->ack = ACK_PENDING; + break; + } + } + } packet->ack = RCODE_SEND_ERROR; break; } @@ -1503,11 +1428,7 @@ static int handle_at_packet(struct context *context, return 1; } -#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff) -#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f) -#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff) -#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) -#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) +static u32 get_cycle_time(struct fw_ohci *ohci); static void handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) @@ -1515,9 +1436,9 @@ static void handle_local_rom(struct fw_ohci *ohci, struct fw_packet response; int tcode, length, i; - tcode = HEADER_GET_TCODE(packet->header[0]); - if (TCODE_IS_BLOCK_PACKET(tcode)) - length = HEADER_GET_DATA_LENGTH(packet->header[3]); + tcode = async_header_get_tcode(packet->header); + if (tcode_is_block_packet(tcode)) + length = async_header_get_data_length(packet->header); else length = 4; @@ -1525,7 +1446,7 @@ static void handle_local_rom(struct fw_ohci *ohci, if (i + length > CONFIG_ROM_SIZE) { fw_fill_response(&response, packet->header, RCODE_ADDRESS_ERROR, NULL, 0); - } else if (!TCODE_IS_READ_REQUEST(tcode)) { + } else if (!tcode_is_read_request(tcode)) { fw_fill_response(&response, packet->header, RCODE_TYPE_ERROR, NULL, 0); } else { @@ -1533,6 +1454,8 @@ static void handle_local_rom(struct fw_ohci *ohci, (void *) ohci->config_rom + i, length); } + // Timestamping on behalf of the hardware. + response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci)); fw_core_handle_response(&ohci->card, &response); } @@ -1544,10 +1467,10 @@ static void handle_local_lock(struct fw_ohci *ohci, __be32 *payload, lock_old; u32 lock_arg, lock_data; - tcode = HEADER_GET_TCODE(packet->header[0]); - length = HEADER_GET_DATA_LENGTH(packet->header[3]); + tcode = async_header_get_tcode(packet->header); + length = async_header_get_data_length(packet->header); payload = packet->payload; - ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]); + ext_tcode = async_header_get_extended_tcode(packet->header); if (tcode == TCODE_LOCK_REQUEST && ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) { @@ -1581,73 +1504,100 @@ static void handle_local_lock(struct fw_ohci *ohci, fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0); out: + // Timestamping on behalf of the hardware. + response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci)); fw_core_handle_response(&ohci->card, &response); } -static void handle_local_request(struct context *ctx, struct fw_packet *packet) +static void handle_local_request(struct at_context *ctx, struct fw_packet *packet) { + struct fw_ohci *ohci = ctx->context.ohci; u64 offset, csr; - if (ctx == &ctx->ohci->at_request_ctx) { + if (ctx == &ohci->at_request_ctx) { packet->ack = ACK_PENDING; - packet->callback(packet, &ctx->ohci->card, packet->ack); + packet->callback(packet, &ohci->card, packet->ack); } - offset = - ((unsigned long long) - HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) | - packet->header[2]; + offset = async_header_get_offset(packet->header); csr = offset - CSR_REGISTER_BASE; /* Handle config rom reads. */ if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END) - handle_local_rom(ctx->ohci, packet, csr); + handle_local_rom(ohci, packet, csr); else switch (csr) { case CSR_BUS_MANAGER_ID: case CSR_BANDWIDTH_AVAILABLE: case CSR_CHANNELS_AVAILABLE_HI: case CSR_CHANNELS_AVAILABLE_LO: - handle_local_lock(ctx->ohci, packet, csr); + handle_local_lock(ohci, packet, csr); break; default: - if (ctx == &ctx->ohci->at_request_ctx) - fw_core_handle_request(&ctx->ohci->card, packet); + if (ctx == &ohci->at_request_ctx) + fw_core_handle_request(&ohci->card, packet); else - fw_core_handle_response(&ctx->ohci->card, packet); + fw_core_handle_response(&ohci->card, packet); break; } - if (ctx == &ctx->ohci->at_response_ctx) { + if (ctx == &ohci->at_response_ctx) { packet->ack = ACK_COMPLETE; - packet->callback(packet, &ctx->ohci->card, packet->ack); + packet->callback(packet, &ohci->card, packet->ack); } } -static void at_context_transmit(struct context *ctx, struct fw_packet *packet) +static void at_context_transmit(struct at_context *ctx, struct fw_packet *packet) { + struct fw_ohci *ohci = ctx->context.ohci; unsigned long flags; int ret; - spin_lock_irqsave(&ctx->ohci->lock, flags); + spin_lock_irqsave(&ohci->lock, flags); + + if (async_header_get_destination(packet->header) == ohci->node_id && + ohci->generation == packet->generation) { + spin_unlock_irqrestore(&ohci->lock, flags); + + // Timestamping on behalf of the hardware. + packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci)); - if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id && - ctx->ohci->generation == packet->generation) { - spin_unlock_irqrestore(&ctx->ohci->lock, flags); handle_local_request(ctx, packet); return; } ret = at_context_queue_packet(ctx, packet); - spin_unlock_irqrestore(&ctx->ohci->lock, flags); + spin_unlock_irqrestore(&ohci->lock, flags); - if (ret < 0) - packet->callback(packet, &ctx->ohci->card, packet->ack); + if (ret < 0) { + // Timestamping on behalf of the hardware. + packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci)); + packet->callback(packet, &ohci->card, packet->ack); + } } static void detect_dead_context(struct fw_ohci *ohci, const char *name, unsigned int regs) { + static const char *const evts[] = { + [0x00] = "evt_no_status", [0x01] = "-reserved-", + [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack", + [0x04] = "evt_underrun", [0x05] = "evt_overrun", + [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read", + [0x08] = "evt_data_write", [0x09] = "evt_bus_reset", + [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err", + [0x0c] = "-reserved-", [0x0d] = "-reserved-", + [0x0e] = "evt_unknown", [0x0f] = "evt_flushed", + [0x10] = "-reserved-", [0x11] = "ack_complete", + [0x12] = "ack_pending ", [0x13] = "-reserved-", + [0x14] = "ack_busy_X", [0x15] = "ack_busy_A", + [0x16] = "ack_busy_B", [0x17] = "-reserved-", + [0x18] = "-reserved-", [0x19] = "-reserved-", + [0x1a] = "-reserved-", [0x1b] = "ack_tardy", + [0x1c] = "-reserved-", [0x1d] = "ack_data_error", + [0x1e] = "ack_type_error", [0x1f] = "-reserved-", + [0x20] = "pending/cancelled", + }; u32 ctl; ctl = reg_read(ohci, CONTROL_SET(regs)); @@ -1713,6 +1663,9 @@ static u32 get_cycle_time(struct fw_ohci *ohci) s32 diff01, diff12; int i; + if (has_reboot_by_cycle_timer_read_quirk(ohci)) + return 0; + c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); if (ohci->quirks & QUIRK_CYCLE_TIMER) { @@ -1759,66 +1712,87 @@ static u32 update_bus_time(struct fw_ohci *ohci) return ohci->bus_time | cycle_time_seconds; } -static int get_status_for_port(struct fw_ohci *ohci, int port_index) +static int get_status_for_port(struct fw_ohci *ohci, int port_index, + enum phy_packet_self_id_port_status *status) { int reg; - mutex_lock(&ohci->phy_reg_mutex); - reg = write_phy_reg(ohci, 7, port_index); - if (reg >= 0) + scoped_guard(mutex, &ohci->phy_reg_mutex) { + reg = write_phy_reg(ohci, 7, port_index); + if (reg < 0) + return reg; + reg = read_phy_reg(ohci, 8); - mutex_unlock(&ohci->phy_reg_mutex); - if (reg < 0) - return reg; + if (reg < 0) + return reg; + } switch (reg & 0x0f) { case 0x06: - return 2; /* is child node (connected to parent node) */ + // is child node (connected to parent node) + *status = PHY_PACKET_SELF_ID_PORT_STATUS_PARENT; + break; case 0x0e: - return 3; /* is parent node (connected to child node) */ + // is parent node (connected to child node) + *status = PHY_PACKET_SELF_ID_PORT_STATUS_CHILD; + break; + default: + // not connected + *status = PHY_PACKET_SELF_ID_PORT_STATUS_NCONN; + break; } - return 1; /* not connected */ + + return 0; } static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id, int self_id_count) { + unsigned int left_phy_id = phy_packet_self_id_get_phy_id(self_id); int i; - u32 entry; for (i = 0; i < self_id_count; i++) { - entry = ohci->self_id_buffer[i]; - if ((self_id & 0xff000000) == (entry & 0xff000000)) + u32 entry = ohci->self_id_buffer[i]; + unsigned int right_phy_id = phy_packet_self_id_get_phy_id(entry); + + if (left_phy_id == right_phy_id) return -1; - if ((self_id & 0xff000000) < (entry & 0xff000000)) + if (left_phy_id < right_phy_id) return i; } return i; } -static int initiated_reset(struct fw_ohci *ohci) +static int detect_initiated_reset(struct fw_ohci *ohci, bool *is_initiated_reset) { int reg; - int ret = 0; - mutex_lock(&ohci->phy_reg_mutex); - reg = write_phy_reg(ohci, 7, 0xe0); /* Select page 7 */ - if (reg >= 0) { - reg = read_phy_reg(ohci, 8); - reg |= 0x40; - reg = write_phy_reg(ohci, 8, reg); /* set PMODE bit */ - if (reg >= 0) { - reg = read_phy_reg(ohci, 12); /* read register 12 */ - if (reg >= 0) { - if ((reg & 0x08) == 0x08) { - /* bit 3 indicates "initiated reset" */ - ret = 0x2; - } - } - } - } - mutex_unlock(&ohci->phy_reg_mutex); - return ret; + guard(mutex)(&ohci->phy_reg_mutex); + + // Select page 7 + reg = write_phy_reg(ohci, 7, 0xe0); + if (reg < 0) + return reg; + + reg = read_phy_reg(ohci, 8); + if (reg < 0) + return reg; + + // set PMODE bit + reg |= 0x40; + reg = write_phy_reg(ohci, 8, reg); + if (reg < 0) + return reg; + + // read register 12 + reg = read_phy_reg(ohci, 12); + if (reg < 0) + return reg; + + // bit 3 indicates "initiated reset" + *is_initiated_reset = !!((reg & 0x08) == 0x08); + + return 0; } /* @@ -1828,9 +1802,15 @@ static int initiated_reset(struct fw_ohci *ohci) */ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count) { - int reg, i, pos, status; - /* link active 1, speed 3, bridge 0, contender 1, more packets 0 */ - u32 self_id = 0x8040c800; + int reg, i, pos, err; + bool is_initiated_reset; + u32 self_id = 0; + + // link active 1, speed 3, bridge 0, contender 1, more packets 0. + phy_packet_set_packet_identifier(&self_id, PHY_PACKET_PACKET_IDENTIFIER_SELF_ID); + phy_packet_self_id_zero_set_link_active(&self_id, true); + phy_packet_self_id_zero_set_scode(&self_id, SCODE_800); + phy_packet_self_id_zero_set_contender(&self_id, true); reg = reg_read(ohci, OHCI1394_NodeID); if (!(reg & OHCI1394_NodeID_idValid)) { @@ -1838,26 +1818,32 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count) "node ID not valid, new bus reset in progress\n"); return -EBUSY; } - self_id |= ((reg & 0x3f) << 24); /* phy ID */ + phy_packet_self_id_set_phy_id(&self_id, reg & 0x3f); reg = ohci_read_phy_reg(&ohci->card, 4); if (reg < 0) return reg; - self_id |= ((reg & 0x07) << 8); /* power class */ + phy_packet_self_id_zero_set_power_class(&self_id, reg & 0x07); reg = ohci_read_phy_reg(&ohci->card, 1); if (reg < 0) return reg; - self_id |= ((reg & 0x3f) << 16); /* gap count */ + phy_packet_self_id_zero_set_gap_count(&self_id, reg & 0x3f); for (i = 0; i < 3; i++) { - status = get_status_for_port(ohci, i); - if (status < 0) - return status; - self_id |= ((status & 0x3) << (6 - (i * 2))); + enum phy_packet_self_id_port_status status; + + err = get_status_for_port(ohci, i, &status); + if (err < 0) + return err; + + self_id_sequence_set_port_status(&self_id, 1, i, status); } - self_id |= initiated_reset(ohci); + err = detect_initiated_reset(ohci, &is_initiated_reset); + if (err < 0) + return err; + phy_packet_self_id_zero_set_initiated_reset(&self_id, is_initiated_reset); pos = get_self_id_pos(ohci, self_id, self_id_count); if (pos >= 0) { @@ -1870,12 +1856,11 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count) return self_id_count; } -static void bus_reset_work(struct work_struct *work) +static irqreturn_t handle_selfid_complete_event(int irq, void *data) { - struct fw_ohci *ohci = - container_of(work, struct fw_ohci, bus_reset_work); + struct fw_ohci *ohci = data; int self_id_count, generation, new_generation, i, j; - u32 reg; + u32 reg, quadlet; void *free_rom = NULL; dma_addr_t free_rom_bus = 0; bool is_new_root; @@ -1884,11 +1869,11 @@ static void bus_reset_work(struct work_struct *work) if (!(reg & OHCI1394_NodeID_idValid)) { ohci_notice(ohci, "node ID not valid, new bus reset in progress\n"); - return; + goto end; } if ((reg & OHCI1394_NodeID_nodeNumber) == 63) { ohci_notice(ohci, "malconfigured bus\n"); - return; + goto end; } ohci->node_id = reg & (OHCI1394_NodeID_busNumber | OHCI1394_NodeID_nodeNumber); @@ -1900,29 +1885,33 @@ static void bus_reset_work(struct work_struct *work) ohci->is_root = is_new_root; reg = reg_read(ohci, OHCI1394_SelfIDCount); - if (reg & OHCI1394_SelfIDCount_selfIDError) { + if (ohci1394_self_id_count_is_error(reg)) { ohci_notice(ohci, "self ID receive error\n"); - return; + goto end; } + + trace_self_id_complete(ohci->card.index, reg, ohci->self_id, has_be_header_quirk(ohci)); + /* * The count in the SelfIDCount register is the number of * bytes in the self ID receive buffer. Since we also receive * the inverted quadlets and a header quadlet, we shift one * bit extra to get the actual number of self IDs. */ - self_id_count = (reg >> 3) & 0xff; + self_id_count = ohci1394_self_id_count_get_size(reg) >> 1; if (self_id_count > 252) { ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg); - return; + goto end; } - generation = (cond_le32_to_cpu(ohci->self_id[0]) >> 16) & 0xff; + quadlet = cond_le32_to_cpu(ohci->self_id[0], has_be_header_quirk(ohci)); + generation = ohci1394_self_id_receive_q0_get_generation(quadlet); rmb(); for (i = 1, j = 0; j < self_id_count; i += 2, j++) { - u32 id = cond_le32_to_cpu(ohci->self_id[i]); - u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1]); + u32 id = cond_le32_to_cpu(ohci->self_id[i], has_be_header_quirk(ohci)); + u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1], has_be_header_quirk(ohci)); if (id != ~id2) { /* @@ -1940,7 +1929,7 @@ static void bus_reset_work(struct work_struct *work) ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n", j, self_id_count, id, id2); - return; + goto end; } ohci->self_id_buffer[j] = id; } @@ -1950,13 +1939,13 @@ static void bus_reset_work(struct work_struct *work) if (self_id_count < 0) { ohci_notice(ohci, "could not construct local self ID\n"); - return; + goto end; } } if (self_id_count == 0) { ohci_notice(ohci, "no self IDs\n"); - return; + goto end; } rmb(); @@ -1974,20 +1963,19 @@ static void bus_reset_work(struct work_struct *work) * of self IDs. */ - new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff; + reg = reg_read(ohci, OHCI1394_SelfIDCount); + new_generation = ohci1394_self_id_count_get_generation(reg); if (new_generation != generation) { ohci_notice(ohci, "new bus reset, discarding self ids\n"); - return; + goto end; } - /* FIXME: Document how the locking works. */ - spin_lock_irq(&ohci->lock); - - ohci->generation = -1; /* prevent AT packet queueing */ - context_stop(&ohci->at_request_ctx); - context_stop(&ohci->at_response_ctx); - - spin_unlock_irq(&ohci->lock); + // FIXME: Document how the locking works. + scoped_guard(spinlock_irq, &ohci->lock) { + ohci->generation = -1; // prevent AT packet queueing + context_stop(&ohci->at_request_ctx.context); + context_stop(&ohci->at_response_ctx.context); + } /* * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent @@ -1997,62 +1985,51 @@ static void bus_reset_work(struct work_struct *work) at_context_flush(&ohci->at_request_ctx); at_context_flush(&ohci->at_response_ctx); - spin_lock_irq(&ohci->lock); - - ohci->generation = generation; - reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); - - if (ohci->quirks & QUIRK_RESET_PACKET) - ohci->request_generation = generation; - - /* - * This next bit is unrelated to the AT context stuff but we - * have to do it under the spinlock also. If a new config rom - * was set up before this reset, the old one is now no longer - * in use and we can free it. Update the config rom pointers - * to point to the current config rom and clear the - * next_config_rom pointer so a new update can take place. - */ - - if (ohci->next_config_rom != NULL) { - if (ohci->next_config_rom != ohci->config_rom) { - free_rom = ohci->config_rom; - free_rom_bus = ohci->config_rom_bus; + scoped_guard(spinlock_irq, &ohci->lock) { + ohci->generation = generation; + reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); + reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset); + + if (ohci->quirks & QUIRK_RESET_PACKET) + ohci->request_generation = generation; + + // This next bit is unrelated to the AT context stuff but we have to do it under the + // spinlock also. If a new config rom was set up before this reset, the old one is + // now no longer in use and we can free it. Update the config rom pointers to point + // to the current config rom and clear the next_config_rom pointer so a new update + // can take place. + if (ohci->next_config_rom != NULL) { + if (ohci->next_config_rom != ohci->config_rom) { + free_rom = ohci->config_rom; + free_rom_bus = ohci->config_rom_bus; + } + ohci->config_rom = ohci->next_config_rom; + ohci->config_rom_bus = ohci->next_config_rom_bus; + ohci->next_config_rom = NULL; + + // Restore config_rom image and manually update config_rom registers. + // Writing the header quadlet will indicate that the config rom is ready, + // so we do that last. + reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(ohci->config_rom[2])); + ohci->config_rom[0] = ohci->next_header; + reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(ohci->next_header)); } - ohci->config_rom = ohci->next_config_rom; - ohci->config_rom_bus = ohci->next_config_rom_bus; - ohci->next_config_rom = NULL; - - /* - * Restore config_rom image and manually update - * config_rom registers. Writing the header quadlet - * will indicate that the config rom is ready, so we - * do that last. - */ - reg_write(ohci, OHCI1394_BusOptions, - be32_to_cpu(ohci->config_rom[2])); - ohci->config_rom[0] = ohci->next_header; - reg_write(ohci, OHCI1394_ConfigROMhdr, - be32_to_cpu(ohci->next_header)); - } - if (param_remote_dma) { - reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0); - reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0); + if (param_remote_dma) { + reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0); + reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0); + } } - spin_unlock_irq(&ohci->lock); - if (free_rom) - dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, - free_rom, free_rom_bus); - - log_selfids(ohci, generation, self_id_count); + dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, free_rom, free_rom_bus); fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, self_id_count, ohci->self_id_buffer, ohci->csr_state_setclear_abdicate); ohci->csr_state_setclear_abdicate = false; +end: + return IRQ_HANDLED; } static irqreturn_t irq_handler(int irq, void *data) @@ -2067,27 +2044,28 @@ static irqreturn_t irq_handler(int irq, void *data) return IRQ_NONE; /* - * busReset and postedWriteErr must not be cleared yet + * busReset and postedWriteErr events must not be cleared yet * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1) */ reg_write(ohci, OHCI1394_IntEventClear, event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr)); - log_irqs(ohci, event); + trace_irqs(ohci->card.index, event); - if (event & OHCI1394_selfIDComplete) - queue_work(selfid_workqueue, &ohci->bus_reset_work); + // The flag is masked again at handle_selfid_complete_event() scheduled by selfID event. + if (event & OHCI1394_busReset) + reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset); if (event & OHCI1394_RQPkt) - tasklet_schedule(&ohci->ar_request_ctx.tasklet); + queue_work(ohci->card.async_wq, &ohci->ar_request_ctx.work); if (event & OHCI1394_RSPkt) - tasklet_schedule(&ohci->ar_response_ctx.tasklet); + queue_work(ohci->card.async_wq, &ohci->ar_response_ctx.work); if (event & OHCI1394_reqTxComplete) - tasklet_schedule(&ohci->at_request_ctx.tasklet); + queue_work(ohci->card.async_wq, &ohci->at_request_ctx.work); if (event & OHCI1394_respTxComplete) - tasklet_schedule(&ohci->at_response_ctx.tasklet); + queue_work(ohci->card.async_wq, &ohci->at_response_ctx.work); if (event & OHCI1394_isochRx) { iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear); @@ -2095,8 +2073,7 @@ static irqreturn_t irq_handler(int irq, void *data) while (iso_event) { i = ffs(iso_event) - 1; - tasklet_schedule( - &ohci->ir_context_list[i].context.tasklet); + fw_iso_context_schedule_flush_completions(&ohci->ir_context_list[i].base); iso_event &= ~(1 << i); } } @@ -2107,8 +2084,7 @@ static irqreturn_t irq_handler(int irq, void *data) while (iso_event) { i = ffs(iso_event) - 1; - tasklet_schedule( - &ohci->it_context_list[i].context.tasklet); + fw_iso_context_schedule_flush_completions(&ohci->it_context_list[i].base); iso_event &= ~(1 << i); } } @@ -2121,13 +2097,11 @@ static irqreturn_t irq_handler(int irq, void *data) reg_read(ohci, OHCI1394_PostedWriteAddressLo); reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_postedWriteErr); - if (printk_ratelimit()) - ohci_err(ohci, "PCI posted write error\n"); + dev_err_ratelimited(ohci->card.device, "PCI posted write error\n"); } if (unlikely(event & OHCI1394_cycleTooLong)) { - if (printk_ratelimit()) - ohci_notice(ohci, "isochronous cycle too long\n"); + dev_notice_ratelimited(ohci->card.device, "isochronous cycle too long\n"); reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_cycleMaster); } @@ -2139,21 +2113,22 @@ static irqreturn_t irq_handler(int irq, void *data) * stop active cycleMatch iso contexts now and restart * them at least two cycles later. (FIXME?) */ - if (printk_ratelimit()) - ohci_notice(ohci, "isochronous cycle inconsistent\n"); + dev_notice_ratelimited(ohci->card.device, "isochronous cycle inconsistent\n"); } if (unlikely(event & OHCI1394_unrecoverableError)) handle_dead_contexts(ohci); if (event & OHCI1394_cycle64Seconds) { - spin_lock(&ohci->lock); + guard(spinlock)(&ohci->lock); update_bus_time(ohci); - spin_unlock(&ohci->lock); } else flush_writes(ohci); - return IRQ_HANDLED; + if (event & OHCI1394_selfIDComplete) + return IRQ_WAKE_THREAD; + else + return IRQ_HANDLED; } static int software_reset(struct fw_ohci *ohci) @@ -2373,14 +2348,12 @@ static int ohci_enable(struct fw_card *card, * They shouldn't do that in this initial case where the link * isn't enabled. This means we have to use the same * workaround here, setting the bus header to 0 and then write - * the right values in the bus reset tasklet. + * the right values in the bus reset work item. */ if (config_rom) { - ohci->next_config_rom = - dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, - &ohci->next_config_rom_bus, - GFP_KERNEL); + ohci->next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, + &ohci->next_config_rom_bus, GFP_KERNEL); if (ohci->next_config_rom == NULL) return -ENOMEM; @@ -2412,9 +2385,8 @@ static int ohci_enable(struct fw_card *card, OHCI1394_cycleInconsistent | OHCI1394_unrecoverableError | OHCI1394_cycleTooLong | - OHCI1394_masterIntEnable; - if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) - irqs |= OHCI1394_busReset; + OHCI1394_masterIntEnable | + OHCI1394_busReset; reg_write(ohci, OHCI1394_IntMaskSet, irqs); reg_write(ohci, OHCI1394_HCControlSet, @@ -2436,6 +2408,41 @@ static int ohci_enable(struct fw_card *card, return 0; } +static void ohci_disable(struct fw_card *card) +{ + struct pci_dev *pdev = to_pci_dev(card->device); + struct fw_ohci *ohci = pci_get_drvdata(pdev); + int i, irq = pci_irq_vector(pdev, 0); + + // If the removal is happening from the suspend state, LPS won't be enabled and host + // registers (eg., IntMaskClear) won't be accessible. + if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS)) + return; + + reg_write(ohci, OHCI1394_IntMaskClear, ~0); + flush_writes(ohci); + + if (irq >= 0) + synchronize_irq(irq); + + flush_work(&ohci->ar_request_ctx.work); + flush_work(&ohci->ar_response_ctx.work); + flush_work(&ohci->at_request_ctx.work); + flush_work(&ohci->at_response_ctx.work); + + for (i = 0; i < ohci->n_ir; ++i) { + if (!(ohci->ir_context_mask & BIT(i))) + flush_work(&ohci->ir_context_list[i].base.work); + } + for (i = 0; i < ohci->n_it; ++i) { + if (!(ohci->it_context_mask & BIT(i))) + flush_work(&ohci->it_context_list[i].base.work); + } + + at_context_flush(&ohci->at_request_ctx); + at_context_flush(&ohci->at_response_ctx); +} + static int ohci_set_config_rom(struct fw_card *card, const __be32 *config_rom, size_t length) { @@ -2462,59 +2469,52 @@ static int ohci_set_config_rom(struct fw_card *card, * ConfigRomHeader and BusOptions doesn't honor the * noByteSwapData bit, so with a be32 config rom, the * controller will load be32 values in to these registers - * during the atomic update, even on litte endian + * during the atomic update, even on little endian * architectures. The workaround we use is to put a 0 in the * header quadlet; 0 is endian agnostic and means that the - * config rom isn't ready yet. In the bus reset tasklet we + * config rom isn't ready yet. In the bus reset work item we * then set up the real values for the two registers. * * We use ohci->lock to avoid racing with the code that sets - * ohci->next_config_rom to NULL (see bus_reset_work). + * ohci->next_config_rom to NULL (see handle_selfid_complete_event). */ - next_config_rom = - dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, - &next_config_rom_bus, GFP_KERNEL); + next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, + &next_config_rom_bus, GFP_KERNEL); if (next_config_rom == NULL) return -ENOMEM; - spin_lock_irq(&ohci->lock); - - /* - * If there is not an already pending config_rom update, - * push our new allocation into the ohci->next_config_rom - * and then mark the local variable as null so that we - * won't deallocate the new buffer. - * - * OTOH, if there is a pending config_rom update, just - * use that buffer with the new config_rom data, and - * let this routine free the unused DMA allocation. - */ - - if (ohci->next_config_rom == NULL) { - ohci->next_config_rom = next_config_rom; - ohci->next_config_rom_bus = next_config_rom_bus; - next_config_rom = NULL; - } - - copy_config_rom(ohci->next_config_rom, config_rom, length); + scoped_guard(spinlock_irq, &ohci->lock) { + // If there is not an already pending config_rom update, push our new allocation + // into the ohci->next_config_rom and then mark the local variable as null so that + // we won't deallocate the new buffer. + // + // OTOH, if there is a pending config_rom update, just use that buffer with the new + // config_rom data, and let this routine free the unused DMA allocation. + if (ohci->next_config_rom == NULL) { + ohci->next_config_rom = next_config_rom; + ohci->next_config_rom_bus = next_config_rom_bus; + next_config_rom = NULL; + } - ohci->next_header = config_rom[0]; - ohci->next_config_rom[0] = 0; + copy_config_rom(ohci->next_config_rom, config_rom, length); - reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); + ohci->next_header = config_rom[0]; + ohci->next_config_rom[0] = 0; - spin_unlock_irq(&ohci->lock); + reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); + } /* If we didn't use the DMA allocation, delete it. */ - if (next_config_rom != NULL) - dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, - next_config_rom, next_config_rom_bus); + if (next_config_rom != NULL) { + dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, next_config_rom, + next_config_rom_bus); + } /* * Now initiate a bus reset to have the changes take * effect. We clean up the old config rom memory and DMA - * mappings in the bus reset tasklet, since the OHCI + * mappings in the bus reset work item, since the OHCI * controller could need to access it before the bus reset * takes effect. */ @@ -2541,11 +2541,14 @@ static void ohci_send_response(struct fw_card *card, struct fw_packet *packet) static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) { struct fw_ohci *ohci = fw_ohci(card); - struct context *ctx = &ohci->at_request_ctx; + struct at_context *ctx = &ohci->at_request_ctx; struct driver_data *driver_data = packet->driver_data; int ret = -ENOENT; - tasklet_disable_in_atomic(&ctx->tasklet); + // Avoid dead lock due to programming mistake. + if (WARN_ON_ONCE(current_work() == &ctx->work)) + return 0; + disable_work_sync(&ctx->work); if (packet->ack != 0) goto out; @@ -2554,13 +2557,16 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) dma_unmap_single(ohci->card.device, packet->payload_bus, packet->payload_length, DMA_TO_DEVICE); - log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20); driver_data->packet = NULL; packet->ack = RCODE_CANCELLED; + + // Timestamping on behalf of the hardware. + packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci)); + packet->callback(packet, &ohci->card, packet->ack); ret = 0; out: - tasklet_enable(&ctx->tasklet); + enable_work(&ctx->work); return ret; } @@ -2569,7 +2575,6 @@ static int ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) { struct fw_ohci *ohci = fw_ohci(card); - unsigned long flags; int n, ret = 0; if (param_remote_dma) @@ -2580,12 +2585,10 @@ static int ohci_enable_phys_dma(struct fw_card *card, * interrupt bit. Clear physReqResourceAllBuses on bus reset. */ - spin_lock_irqsave(&ohci->lock, flags); + guard(spinlock_irqsave)(&ohci->lock); - if (ohci->generation != generation) { - ret = -ESTALE; - goto out; - } + if (ohci->generation != generation) + return -ESTALE; /* * Note, if the node ID contains a non-local bus ID, physical DMA is @@ -2599,8 +2602,6 @@ static int ohci_enable_phys_dma(struct fw_card *card, reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32)); flush_writes(ohci); - out: - spin_unlock_irqrestore(&ohci->lock, flags); return ret; } @@ -2608,7 +2609,6 @@ static int ohci_enable_phys_dma(struct fw_card *card, static u32 ohci_read_csr(struct fw_card *card, int csr_offset) { struct fw_ohci *ohci = fw_ohci(card); - unsigned long flags; u32 value; switch (csr_offset) { @@ -2632,16 +2632,14 @@ static u32 ohci_read_csr(struct fw_card *card, int csr_offset) return get_cycle_time(ohci); case CSR_BUS_TIME: - /* - * We might be called just after the cycle timer has wrapped - * around but just before the cycle64Seconds handler, so we - * better check here, too, if the bus time needs to be updated. - */ - spin_lock_irqsave(&ohci->lock, flags); - value = update_bus_time(ohci); - spin_unlock_irqrestore(&ohci->lock, flags); - return value; + { + // We might be called just after the cycle timer has wrapped around but just before + // the cycle64Seconds handler, so we better check here, too, if the bus time needs + // to be updated. + guard(spinlock_irqsave)(&ohci->lock); + return update_bus_time(ohci); + } case CSR_BUSY_TIMEOUT: value = reg_read(ohci, OHCI1394_ATRetries); return (value >> 4) & 0x0ffff00f; @@ -2659,7 +2657,6 @@ static u32 ohci_read_csr(struct fw_card *card, int csr_offset) static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value) { struct fw_ohci *ohci = fw_ohci(card); - unsigned long flags; switch (csr_offset) { case CSR_STATE_CLEAR: @@ -2695,12 +2692,11 @@ static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value) break; case CSR_BUS_TIME: - spin_lock_irqsave(&ohci->lock, flags); - ohci->bus_time = (update_bus_time(ohci) & 0x40) | - (value & ~0x7f); - spin_unlock_irqrestore(&ohci->lock, flags); + { + guard(spinlock_irqsave)(&ohci->lock); + ohci->bus_time = (update_bus_time(ohci) & 0x40) | (value & ~0x7f); break; - + } case CSR_BUSY_TIMEOUT: value = (value & 0xf) | ((value & 0xf) << 4) | ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4); @@ -2719,8 +2715,13 @@ static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value) } } -static void flush_iso_completions(struct iso_context *ctx) +static void flush_iso_completions(struct iso_context *ctx, enum fw_iso_context_completions_cause cause) { + trace_isoc_inbound_single_completions(&ctx->base, ctx->last_timestamp, cause, ctx->header, + ctx->header_length); + trace_isoc_outbound_completions(&ctx->base, ctx->last_timestamp, cause, ctx->header, + ctx->header_length); + ctx->base.callback.sc(&ctx->base, ctx->last_timestamp, ctx->header_length, ctx->header, ctx->base.callback_data); @@ -2734,7 +2735,7 @@ static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr) if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) { if (ctx->base.drop_overflow_headers) return; - flush_iso_completions(ctx); + flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW); } ctx_hdr = ctx->header + ctx->header_length; @@ -2783,7 +2784,7 @@ static int handle_ir_packet_per_buffer(struct context *context, copy_iso_headers(ctx, (u32 *) (last + 1)); if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) - flush_iso_completions(ctx); + flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT); return 1; } @@ -2818,6 +2819,9 @@ static int handle_ir_buffer_fill(struct context *context, completed, DMA_FROM_DEVICE); if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) { + trace_isoc_inbound_multiple_completions(&ctx->base, completed, + FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT); + ctx->base.callback.mc(&ctx->base, buffer_dma + completed, ctx->base.callback_data); @@ -2834,6 +2838,9 @@ static void flush_ir_buffer_fill(struct iso_context *ctx) ctx->mc_buffer_bus & ~PAGE_MASK, ctx->mc_completed, DMA_FROM_DEVICE); + trace_isoc_inbound_multiple_completions(&ctx->base, ctx->mc_completed, + FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH); + ctx->base.callback.mc(&ctx->base, ctx->mc_buffer_bus + ctx->mc_completed, ctx->base.callback_data); @@ -2898,7 +2905,7 @@ static int handle_it_packet(struct context *context, if (ctx->header_length + 4 > PAGE_SIZE) { if (ctx->base.drop_overflow_headers) return 1; - flush_iso_completions(ctx); + flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW); } ctx_hdr = ctx->header + ctx->header_length; @@ -2909,7 +2916,7 @@ static int handle_it_packet(struct context *context, ctx->header_length += 4; if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) - flush_iso_completions(ctx); + flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT); return 1; } @@ -2935,55 +2942,53 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, u32 *mask, regs; int index, ret = -EBUSY; - spin_lock_irq(&ohci->lock); + scoped_guard(spinlock_irq, &ohci->lock) { + switch (type) { + case FW_ISO_CONTEXT_TRANSMIT: + mask = &ohci->it_context_mask; + callback = handle_it_packet; + index = ffs(*mask) - 1; + if (index >= 0) { + *mask &= ~(1 << index); + regs = OHCI1394_IsoXmitContextBase(index); + ctx = &ohci->it_context_list[index]; + } + break; - switch (type) { - case FW_ISO_CONTEXT_TRANSMIT: - mask = &ohci->it_context_mask; - callback = handle_it_packet; - index = ffs(*mask) - 1; - if (index >= 0) { - *mask &= ~(1 << index); - regs = OHCI1394_IsoXmitContextBase(index); - ctx = &ohci->it_context_list[index]; - } - break; + case FW_ISO_CONTEXT_RECEIVE: + channels = &ohci->ir_context_channels; + mask = &ohci->ir_context_mask; + callback = handle_ir_packet_per_buffer; + index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; + if (index >= 0) { + *channels &= ~(1ULL << channel); + *mask &= ~(1 << index); + regs = OHCI1394_IsoRcvContextBase(index); + ctx = &ohci->ir_context_list[index]; + } + break; - case FW_ISO_CONTEXT_RECEIVE: - channels = &ohci->ir_context_channels; - mask = &ohci->ir_context_mask; - callback = handle_ir_packet_per_buffer; - index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; - if (index >= 0) { - *channels &= ~(1ULL << channel); - *mask &= ~(1 << index); - regs = OHCI1394_IsoRcvContextBase(index); - ctx = &ohci->ir_context_list[index]; - } - break; + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + mask = &ohci->ir_context_mask; + callback = handle_ir_buffer_fill; + index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1; + if (index >= 0) { + ohci->mc_allocated = true; + *mask &= ~(1 << index); + regs = OHCI1394_IsoRcvContextBase(index); + ctx = &ohci->ir_context_list[index]; + } + break; - case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: - mask = &ohci->ir_context_mask; - callback = handle_ir_buffer_fill; - index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1; - if (index >= 0) { - ohci->mc_allocated = true; - *mask &= ~(1 << index); - regs = OHCI1394_IsoRcvContextBase(index); - ctx = &ohci->ir_context_list[index]; + default: + index = -1; + ret = -ENOSYS; } - break; - default: - index = -1; - ret = -ENOSYS; + if (index < 0) + return ERR_PTR(ret); } - spin_unlock_irq(&ohci->lock); - - if (index < 0) - return ERR_PTR(ret); - memset(ctx, 0, sizeof(*ctx)); ctx->header_length = 0; ctx->header = (void *) __get_free_page(GFP_KERNEL); @@ -2994,6 +2999,7 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, ret = context_init(&ctx->context, ohci, regs, callback); if (ret < 0) goto out_with_header; + fw_iso_context_init_work(&ctx->base, ohci_isoc_context_work); if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) { set_multichannel_mask(ohci, 0); @@ -3005,20 +3011,18 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, out_with_header: free_page((unsigned long)ctx->header); out: - spin_lock_irq(&ohci->lock); - - switch (type) { - case FW_ISO_CONTEXT_RECEIVE: - *channels |= 1ULL << channel; - break; + scoped_guard(spinlock_irq, &ohci->lock) { + switch (type) { + case FW_ISO_CONTEXT_RECEIVE: + *channels |= 1ULL << channel; + break; - case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: - ohci->mc_allocated = false; - break; + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + ohci->mc_allocated = false; + break; + } + *mask |= 1 << index; } - *mask |= 1 << index; - - spin_unlock_irq(&ohci->lock); return ERR_PTR(ret); } @@ -3093,7 +3097,6 @@ static int ohci_stop_iso(struct fw_iso_context *base) } flush_writes(ohci); context_stop(&ctx->context); - tasklet_kill(&ctx->context.tasklet); return 0; } @@ -3102,14 +3105,13 @@ static void ohci_free_iso_context(struct fw_iso_context *base) { struct fw_ohci *ohci = fw_ohci(base->card); struct iso_context *ctx = container_of(base, struct iso_context, base); - unsigned long flags; int index; ohci_stop_iso(base); context_release(&ctx->context); free_page((unsigned long)ctx->header); - spin_lock_irqsave(&ohci->lock, flags); + guard(spinlock_irqsave)(&ohci->lock); switch (base->type) { case FW_ISO_CONTEXT_TRANSMIT: @@ -3131,42 +3133,32 @@ static void ohci_free_iso_context(struct fw_iso_context *base) ohci->mc_allocated = false; break; } - - spin_unlock_irqrestore(&ohci->lock, flags); } static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels) { struct fw_ohci *ohci = fw_ohci(base->card); - unsigned long flags; - int ret; switch (base->type) { case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + { + guard(spinlock_irqsave)(&ohci->lock); - spin_lock_irqsave(&ohci->lock, flags); - - /* Don't allow multichannel to grab other contexts' channels. */ + // Don't allow multichannel to grab other contexts' channels. if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) { *channels = ohci->ir_context_channels; - ret = -EBUSY; + return -EBUSY; } else { set_multichannel_mask(ohci, *channels); - ret = 0; + return 0; } - - spin_unlock_irqrestore(&ohci->lock, flags); - - break; + } default: - ret = -EINVAL; + return -EINVAL; } - - return ret; } -#ifdef CONFIG_PM -static void ohci_resume_iso_dma(struct fw_ohci *ohci) +static void __maybe_unused ohci_resume_iso_dma(struct fw_ohci *ohci) { int i; struct iso_context *ctx; @@ -3183,7 +3175,6 @@ static void ohci_resume_iso_dma(struct fw_ohci *ohci) ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); } } -#endif static int queue_iso_transmit(struct iso_context *ctx, struct fw_iso_packet *packet, @@ -3237,14 +3228,14 @@ static int queue_iso_transmit(struct iso_context *ctx, d[0].branch_address = cpu_to_le32(d_bus | z); header = (__le32 *) &d[1]; - header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | - IT_HEADER_TAG(p->tag) | - IT_HEADER_TCODE(TCODE_STREAM_DATA) | - IT_HEADER_CHANNEL(ctx->base.channel) | - IT_HEADER_SPEED(ctx->base.speed)); - header[1] = - cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length + - p->payload_length)); + + ohci1394_it_data_set_speed(header, ctx->base.speed); + ohci1394_it_data_set_tag(header, p->tag); + ohci1394_it_data_set_channel(header, ctx->base.channel); + ohci1394_it_data_set_tcode(header, TCODE_STREAM_DATA); + ohci1394_it_data_set_sync(header, p->sy); + + ohci1394_it_data_set_data_length(header, p->header_length + p->payload_length); } if (p->header_length > 0) { @@ -3432,24 +3423,19 @@ static int ohci_queue_iso(struct fw_iso_context *base, unsigned long payload) { struct iso_context *ctx = container_of(base, struct iso_context, base); - unsigned long flags; - int ret = -ENOSYS; - spin_lock_irqsave(&ctx->context.ohci->lock, flags); + guard(spinlock_irqsave)(&ctx->context.ohci->lock); + switch (base->type) { case FW_ISO_CONTEXT_TRANSMIT: - ret = queue_iso_transmit(ctx, packet, buffer, payload); - break; + return queue_iso_transmit(ctx, packet, buffer, payload); case FW_ISO_CONTEXT_RECEIVE: - ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload); - break; + return queue_iso_packet_per_buffer(ctx, packet, buffer, payload); case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: - ret = queue_iso_buffer_fill(ctx, packet, buffer, payload); - break; + return queue_iso_buffer_fill(ctx, packet, buffer, payload); + default: + return -ENOSYS; } - spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); - - return ret; } static void ohci_flush_queue_iso(struct fw_iso_context *base) @@ -3465,16 +3451,14 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base) struct iso_context *ctx = container_of(base, struct iso_context, base); int ret = 0; - tasklet_disable_in_atomic(&ctx->context.tasklet); - if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) { - context_tasklet((unsigned long)&ctx->context); + ohci_isoc_context_work(&base->work); switch (base->type) { case FW_ISO_CONTEXT_TRANSMIT: case FW_ISO_CONTEXT_RECEIVE: if (ctx->header_length != 0) - flush_iso_completions(ctx); + flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH); break; case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: if (ctx->mc_completed != 0) @@ -3488,13 +3472,12 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base) smp_mb__after_atomic(); } - tasklet_enable(&ctx->context.tasklet); - return ret; } static const struct fw_card_driver ohci_driver = { .enable = ohci_enable, + .disable = ohci_disable, .read_phy_reg = ohci_read_phy_reg, .update_phy_reg = ohci_update_phy_reg, .set_config_rom = ohci_set_config_rom, @@ -3544,63 +3527,62 @@ static inline void pmac_ohci_on(struct pci_dev *dev) {} static inline void pmac_ohci_off(struct pci_dev *dev) {} #endif /* CONFIG_PPC_PMAC */ +static void release_ohci(struct device *dev, void *data) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct fw_ohci *ohci = pci_get_drvdata(pdev); + + pmac_ohci_off(pdev); + + ar_context_release(&ohci->ar_response_ctx); + ar_context_release(&ohci->ar_request_ctx); + + dev_notice(dev, "removed fw-ohci device\n"); +} + static int pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { struct fw_ohci *ohci; u32 bus_options, max_receive, link_speed, version; u64 guid; - int i, err; - size_t size; + int i, flags, irq, err; if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) { dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n"); return -ENOSYS; } - ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); - if (ohci == NULL) { - err = -ENOMEM; - goto fail; - } - + ohci = devres_alloc(release_ohci, sizeof(*ohci), GFP_KERNEL); + if (ohci == NULL) + return -ENOMEM; fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev); - + pci_set_drvdata(dev, ohci); pmac_ohci_on(dev); + devres_add(&dev->dev, ohci); - err = pci_enable_device(dev); + err = pcim_enable_device(dev); if (err) { dev_err(&dev->dev, "failed to enable OHCI hardware\n"); - goto fail_free; + return err; } pci_set_master(dev); pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0); - pci_set_drvdata(dev, ohci); spin_lock_init(&ohci->lock); mutex_init(&ohci->phy_reg_mutex); - INIT_WORK(&ohci->bus_reset_work, bus_reset_work); - if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) || pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) { ohci_err(ohci, "invalid MMIO resource\n"); - err = -ENXIO; - goto fail_disable; - } - - err = pci_request_region(dev, 0, ohci_driver_name); - if (err) { - ohci_err(ohci, "MMIO resource unavailable\n"); - goto fail_disable; + return -ENXIO; } - ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE); - if (ohci->registers == NULL) { - ohci_err(ohci, "failed to remap registers\n"); - err = -ENXIO; - goto fail_iomem; + ohci->registers = pcim_iomap_region(dev, 0, ohci_driver_name); + if (IS_ERR(ohci->registers)) { + ohci_err(ohci, "request and map MMIO resource unavailable\n"); + return -ENXIO; } for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++) @@ -3615,6 +3597,9 @@ static int pci_probe(struct pci_dev *dev, if (param_quirks) ohci->quirks = param_quirks; + if (detect_vt630x_with_asm1083_on_amd_ryzen_machine(dev)) + ohci->quirks |= QUIRK_REBOOT_BY_CYCLE_TIMER_READ; + /* * Because dma_alloc_coherent() allocates at least one page, * we save space by using a common buffer for the AR request/ @@ -3622,34 +3607,32 @@ static int pci_probe(struct pci_dev *dev, */ BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4); BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2); - ohci->misc_buffer = dma_alloc_coherent(ohci->card.device, - PAGE_SIZE, - &ohci->misc_buffer_bus, - GFP_KERNEL); - if (!ohci->misc_buffer) { - err = -ENOMEM; - goto fail_iounmap; - } + ohci->misc_buffer = dmam_alloc_coherent(&dev->dev, PAGE_SIZE, &ohci->misc_buffer_bus, + GFP_KERNEL); + if (!ohci->misc_buffer) + return -ENOMEM; err = ar_context_init(&ohci->ar_request_ctx, ohci, 0, OHCI1394_AsReqRcvContextControlSet); if (err < 0) - goto fail_misc_buf; + return err; err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4, OHCI1394_AsRspRcvContextControlSet); if (err < 0) - goto fail_arreq_ctx; + return err; - err = context_init(&ohci->at_request_ctx, ohci, + err = context_init(&ohci->at_request_ctx.context, ohci, OHCI1394_AsReqTrContextControlSet, handle_at_packet); if (err < 0) - goto fail_arrsp_ctx; + return err; + INIT_WORK(&ohci->at_request_ctx.work, ohci_at_context_work); - err = context_init(&ohci->at_response_ctx, ohci, + err = context_init(&ohci->at_response_ctx.context, ohci, OHCI1394_AsRspTrContextControlSet, handle_at_packet); if (err < 0) - goto fail_atreq_ctx; + return err; + INIT_WORK(&ohci->at_response_ctx.work, ohci_at_context_work); reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); ohci->ir_context_channels = ~0ULL; @@ -3657,8 +3640,9 @@ static int pci_probe(struct pci_dev *dev, reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0); ohci->ir_context_mask = ohci->ir_context_support; ohci->n_ir = hweight32(ohci->ir_context_mask); - size = sizeof(struct iso_context) * ohci->n_ir; - ohci->ir_context_list = kzalloc(size, GFP_KERNEL); + ohci->ir_context_list = devm_kcalloc(&dev->dev, ohci->n_ir, sizeof(struct iso_context), GFP_KERNEL); + if (!ohci->ir_context_list) + return -ENOMEM; reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); @@ -3670,13 +3654,9 @@ static int pci_probe(struct pci_dev *dev, reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); ohci->it_context_mask = ohci->it_context_support; ohci->n_it = hweight32(ohci->it_context_mask); - size = sizeof(struct iso_context) * ohci->n_it; - ohci->it_context_list = kzalloc(size, GFP_KERNEL); - - if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) { - err = -ENOMEM; - goto fail_contexts; - } + ohci->it_context_list = devm_kcalloc(&dev->dev, ohci->n_it, sizeof(struct iso_context), GFP_KERNEL); + if (!ohci->it_context_list) + return -ENOMEM; ohci->self_id = ohci->misc_buffer + PAGE_SIZE/2; ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2; @@ -3687,17 +3667,29 @@ static int pci_probe(struct pci_dev *dev, guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) | reg_read(ohci, OHCI1394_GUIDLo); + flags = PCI_IRQ_INTX; if (!(ohci->quirks & QUIRK_NO_MSI)) - pci_enable_msi(dev); - if (request_irq(dev->irq, irq_handler, - pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, - ohci_driver_name, ohci)) { - ohci_err(ohci, "failed to allocate interrupt %d\n", dev->irq); - err = -EIO; + flags |= PCI_IRQ_MSI; + err = pci_alloc_irq_vectors(dev, 1, 1, flags); + if (err < 0) + return err; + irq = pci_irq_vector(dev, 0); + if (irq < 0) { + err = irq; + goto fail_msi; + } + + // IRQF_ONESHOT is not applied so that any events are handled in the hardIRQ handler during + // invoking the threaded IRQ handler for SelfIDComplete event. + err = request_threaded_irq(irq, irq_handler, handle_selfid_complete_event, + pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, ohci_driver_name, + ohci); + if (err < 0) { + ohci_err(ohci, "failed to allocate interrupt %d\n", irq); goto fail_msi; } - err = fw_card_add(&ohci->card, max_receive, link_speed, guid); + err = fw_card_add(&ohci->card, max_receive, link_speed, guid, ohci->n_it + ohci->n_ir); if (err) goto fail_irq; @@ -3713,115 +3705,49 @@ static int pci_probe(struct pci_dev *dev, return 0; fail_irq: - free_irq(dev->irq, ohci); + free_irq(irq, ohci); fail_msi: - pci_disable_msi(dev); - fail_contexts: - kfree(ohci->ir_context_list); - kfree(ohci->it_context_list); - context_release(&ohci->at_response_ctx); - fail_atreq_ctx: - context_release(&ohci->at_request_ctx); - fail_arrsp_ctx: - ar_context_release(&ohci->ar_response_ctx); - fail_arreq_ctx: - ar_context_release(&ohci->ar_request_ctx); - fail_misc_buf: - dma_free_coherent(ohci->card.device, PAGE_SIZE, - ohci->misc_buffer, ohci->misc_buffer_bus); - fail_iounmap: - pci_iounmap(dev, ohci->registers); - fail_iomem: - pci_release_region(dev, 0); - fail_disable: - pci_disable_device(dev); - fail_free: - kfree(ohci); - pmac_ohci_off(dev); - fail: + pci_free_irq_vectors(dev); + return err; } static void pci_remove(struct pci_dev *dev) { struct fw_ohci *ohci = pci_get_drvdata(dev); + int irq; - /* - * If the removal is happening from the suspend state, LPS won't be - * enabled and host registers (eg., IntMaskClear) won't be accessible. - */ - if (reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS) { - reg_write(ohci, OHCI1394_IntMaskClear, ~0); - flush_writes(ohci); - } - cancel_work_sync(&ohci->bus_reset_work); fw_core_remove_card(&ohci->card); - /* - * FIXME: Fail all pending packets here, now that the upper - * layers can't queue any more. - */ - software_reset(ohci); - free_irq(dev->irq, ohci); - - if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom) - dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, - ohci->next_config_rom, ohci->next_config_rom_bus); - if (ohci->config_rom) - dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, - ohci->config_rom, ohci->config_rom_bus); - ar_context_release(&ohci->ar_request_ctx); - ar_context_release(&ohci->ar_response_ctx); - dma_free_coherent(ohci->card.device, PAGE_SIZE, - ohci->misc_buffer, ohci->misc_buffer_bus); - context_release(&ohci->at_request_ctx); - context_release(&ohci->at_response_ctx); - kfree(ohci->it_context_list); - kfree(ohci->ir_context_list); - pci_disable_msi(dev); - pci_iounmap(dev, ohci->registers); - pci_release_region(dev, 0); - pci_disable_device(dev); - kfree(ohci); - pmac_ohci_off(dev); - dev_notice(&dev->dev, "removed fw-ohci device\n"); + irq = pci_irq_vector(dev, 0); + if (irq >= 0) + free_irq(irq, ohci); + pci_free_irq_vectors(dev); + + dev_notice(&dev->dev, "removing fw-ohci device\n"); } -#ifdef CONFIG_PM -static int pci_suspend(struct pci_dev *dev, pm_message_t state) +static int __maybe_unused pci_suspend(struct device *dev) { - struct fw_ohci *ohci = pci_get_drvdata(dev); - int err; + struct pci_dev *pdev = to_pci_dev(dev); + struct fw_ohci *ohci = pci_get_drvdata(pdev); software_reset(ohci); - err = pci_save_state(dev); - if (err) { - ohci_err(ohci, "pci_save_state failed\n"); - return err; - } - err = pci_set_power_state(dev, pci_choose_state(dev, state)); - if (err) - ohci_err(ohci, "pci_set_power_state failed with %d\n", err); - pmac_ohci_off(dev); + pmac_ohci_off(pdev); return 0; } -static int pci_resume(struct pci_dev *dev) + +static int __maybe_unused pci_resume(struct device *dev) { - struct fw_ohci *ohci = pci_get_drvdata(dev); + struct pci_dev *pdev = to_pci_dev(dev); + struct fw_ohci *ohci = pci_get_drvdata(pdev); int err; - pmac_ohci_on(dev); - pci_set_power_state(dev, PCI_D0); - pci_restore_state(dev); - err = pci_enable_device(dev); - if (err) { - ohci_err(ohci, "pci_enable_device failed\n"); - return err; - } + pmac_ohci_on(pdev); /* Some systems don't setup GUID register on resume from ram */ if (!reg_read(ohci, OHCI1394_GUIDLo) && @@ -3838,7 +3764,6 @@ static int pci_resume(struct pci_dev *dev) return 0; } -#endif static const struct pci_device_id pci_table[] = { { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) }, @@ -3847,30 +3772,24 @@ static const struct pci_device_id pci_table[] = { MODULE_DEVICE_TABLE(pci, pci_table); +static SIMPLE_DEV_PM_OPS(pci_pm_ops, pci_suspend, pci_resume); + static struct pci_driver fw_ohci_pci_driver = { .name = ohci_driver_name, .id_table = pci_table, .probe = pci_probe, .remove = pci_remove, -#ifdef CONFIG_PM - .resume = pci_resume, - .suspend = pci_suspend, -#endif + .driver.pm = &pci_pm_ops, }; static int __init fw_ohci_init(void) { - selfid_workqueue = alloc_workqueue(KBUILD_MODNAME, WQ_MEM_RECLAIM, 0); - if (!selfid_workqueue) - return -ENOMEM; - return pci_register_driver(&fw_ohci_pci_driver); } static void __exit fw_ohci_cleanup(void) { pci_unregister_driver(&fw_ohci_pci_driver); - destroy_workqueue(selfid_workqueue); } module_init(fw_ohci_init); diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h index c4d005a9901a..218666cfe14a 100644 --- a/drivers/firewire/ohci.h +++ b/drivers/firewire/ohci.h @@ -31,7 +31,6 @@ #define OHCI1394_HCControl_softReset 0x00010000 #define OHCI1394_SelfIDBuffer 0x064 #define OHCI1394_SelfIDCount 0x068 -#define OHCI1394_SelfIDCount_selfIDError 0x80000000 #define OHCI1394_IRMultiChanMaskHiSet 0x070 #define OHCI1394_IRMultiChanMaskHiClear 0x074 #define OHCI1394_IRMultiChanMaskLoSet 0x078 @@ -154,6 +153,246 @@ #define OHCI1394_evt_unknown 0xe #define OHCI1394_evt_flushed 0xf -#define OHCI1394_phy_tcode 0xe + +// Asynchronous Transmit DMA. +// +// The content of first two quadlets of data for AT DMA is different from the header for IEEE 1394 +// asynchronous packet. + +#define OHCI1394_AT_DATA_Q0_srcBusID_MASK 0x00800000 +#define OHCI1394_AT_DATA_Q0_srcBusID_SHIFT 23 +#define OHCI1394_AT_DATA_Q0_spd_MASK 0x00070000 +#define OHCI1394_AT_DATA_Q0_spd_SHIFT 16 +#define OHCI1394_AT_DATA_Q0_tLabel_MASK 0x0000fc00 +#define OHCI1394_AT_DATA_Q0_tLabel_SHIFT 10 +#define OHCI1394_AT_DATA_Q0_rt_MASK 0x00000300 +#define OHCI1394_AT_DATA_Q0_rt_SHIFT 8 +#define OHCI1394_AT_DATA_Q0_tCode_MASK 0x000000f0 +#define OHCI1394_AT_DATA_Q0_tCode_SHIFT 4 +#define OHCI1394_AT_DATA_Q1_destinationId_MASK 0xffff0000 +#define OHCI1394_AT_DATA_Q1_destinationId_SHIFT 16 +#define OHCI1394_AT_DATA_Q1_destinationOffsetHigh_MASK 0x0000ffff +#define OHCI1394_AT_DATA_Q1_destinationOffsetHigh_SHIFT 0 +#define OHCI1394_AT_DATA_Q1_rCode_MASK 0x0000f000 +#define OHCI1394_AT_DATA_Q1_rCode_SHIFT 12 + +static inline bool ohci1394_at_data_get_src_bus_id(const __le32 *data) +{ + return !!((data[0] & OHCI1394_AT_DATA_Q0_srcBusID_MASK) >> OHCI1394_AT_DATA_Q0_srcBusID_SHIFT); +} + +static inline void ohci1394_at_data_set_src_bus_id(__le32 *data, bool src_bus_id) +{ + data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_srcBusID_MASK); + data[0] |= cpu_to_le32((src_bus_id << OHCI1394_AT_DATA_Q0_srcBusID_SHIFT) & OHCI1394_AT_DATA_Q0_srcBusID_MASK); +} + +static inline unsigned int ohci1394_at_data_get_speed(const __le32 *data) +{ + return (le32_to_cpu(data[0]) & OHCI1394_AT_DATA_Q0_spd_MASK) >> OHCI1394_AT_DATA_Q0_spd_SHIFT; +} + +static inline void ohci1394_at_data_set_speed(__le32 *data, unsigned int scode) +{ + data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_spd_MASK); + data[0] |= cpu_to_le32((scode << OHCI1394_AT_DATA_Q0_spd_SHIFT) & OHCI1394_AT_DATA_Q0_spd_MASK); +} + +static inline unsigned int ohci1394_at_data_get_tlabel(const __le32 *data) +{ + return (le32_to_cpu(data[0]) & OHCI1394_AT_DATA_Q0_tLabel_MASK) >> OHCI1394_AT_DATA_Q0_tLabel_SHIFT; +} + +static inline void ohci1394_at_data_set_tlabel(__le32 *data, unsigned int tlabel) +{ + data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_tLabel_MASK); + data[0] |= cpu_to_le32((tlabel << OHCI1394_AT_DATA_Q0_tLabel_SHIFT) & OHCI1394_AT_DATA_Q0_tLabel_MASK); +} + +static inline unsigned int ohci1394_at_data_get_retry(const __le32 *data) +{ + return (le32_to_cpu(data[0]) & OHCI1394_AT_DATA_Q0_rt_MASK) >> OHCI1394_AT_DATA_Q0_rt_SHIFT; +} + +static inline void ohci1394_at_data_set_retry(__le32 *data, unsigned int retry) +{ + data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_rt_MASK); + data[0] |= cpu_to_le32((retry << OHCI1394_AT_DATA_Q0_rt_SHIFT) & OHCI1394_AT_DATA_Q0_rt_MASK); +} + +static inline unsigned int ohci1394_at_data_get_tcode(const __le32 *data) +{ + return (le32_to_cpu(data[0]) & OHCI1394_AT_DATA_Q0_tCode_MASK) >> OHCI1394_AT_DATA_Q0_tCode_SHIFT; +} + +static inline void ohci1394_at_data_set_tcode(__le32 *data, unsigned int tcode) +{ + data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_tCode_MASK); + data[0] |= cpu_to_le32((tcode << OHCI1394_AT_DATA_Q0_tCode_SHIFT) & OHCI1394_AT_DATA_Q0_tCode_MASK); +} + +static inline unsigned int ohci1394_at_data_get_destination_id(const __le32 *data) +{ + return (le32_to_cpu(data[1]) & OHCI1394_AT_DATA_Q1_destinationId_MASK) >> OHCI1394_AT_DATA_Q1_destinationId_SHIFT; +} + +static inline void ohci1394_at_data_set_destination_id(__le32 *data, unsigned int destination_id) +{ + data[1] &= cpu_to_le32(~OHCI1394_AT_DATA_Q1_destinationId_MASK); + data[1] |= cpu_to_le32((destination_id << OHCI1394_AT_DATA_Q1_destinationId_SHIFT) & OHCI1394_AT_DATA_Q1_destinationId_MASK); +} + +static inline u64 ohci1394_at_data_get_destination_offset(const __le32 *data) +{ + u64 hi = (u64)((le32_to_cpu(data[1]) & OHCI1394_AT_DATA_Q1_destinationOffsetHigh_MASK) >> OHCI1394_AT_DATA_Q1_destinationOffsetHigh_SHIFT); + u64 lo = (u64)le32_to_cpu(data[2]); + return (hi << 32) | lo; +} + +static inline void ohci1394_at_data_set_destination_offset(__le32 *data, u64 offset) +{ + u32 hi = (u32)(offset >> 32); + u32 lo = (u32)(offset & 0x00000000ffffffff); + data[1] &= cpu_to_le32(~OHCI1394_AT_DATA_Q1_destinationOffsetHigh_MASK); + data[1] |= cpu_to_le32((hi << OHCI1394_AT_DATA_Q1_destinationOffsetHigh_SHIFT) & OHCI1394_AT_DATA_Q1_destinationOffsetHigh_MASK); + data[2] = cpu_to_le32(lo); +} + +static inline unsigned int ohci1394_at_data_get_rcode(const __le32 *data) +{ + return (le32_to_cpu(data[1]) & OHCI1394_AT_DATA_Q1_rCode_MASK) >> OHCI1394_AT_DATA_Q1_rCode_SHIFT; +} + +static inline void ohci1394_at_data_set_rcode(__le32 *data, unsigned int rcode) +{ + data[1] &= cpu_to_le32(~OHCI1394_AT_DATA_Q1_rCode_MASK); + data[1] |= cpu_to_le32((rcode << OHCI1394_AT_DATA_Q1_rCode_SHIFT) & OHCI1394_AT_DATA_Q1_rCode_MASK); +} + +// Isochronous Transmit DMA. +// +// The content of first two quadlets of data for IT DMA is different from the header for IEEE 1394 +// isochronous packet. + +#define OHCI1394_IT_DATA_Q0_spd_MASK 0x00070000 +#define OHCI1394_IT_DATA_Q0_spd_SHIFT 16 +#define OHCI1394_IT_DATA_Q0_tag_MASK 0x0000c000 +#define OHCI1394_IT_DATA_Q0_tag_SHIFT 14 +#define OHCI1394_IT_DATA_Q0_chanNum_MASK 0x00003f00 +#define OHCI1394_IT_DATA_Q0_chanNum_SHIFT 8 +#define OHCI1394_IT_DATA_Q0_tcode_MASK 0x000000f0 +#define OHCI1394_IT_DATA_Q0_tcode_SHIFT 4 +#define OHCI1394_IT_DATA_Q0_sy_MASK 0x0000000f +#define OHCI1394_IT_DATA_Q0_sy_SHIFT 0 +#define OHCI1394_IT_DATA_Q1_dataLength_MASK 0xffff0000 +#define OHCI1394_IT_DATA_Q1_dataLength_SHIFT 16 + +static inline unsigned int ohci1394_it_data_get_speed(const __le32 *data) +{ + return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_spd_MASK) >> OHCI1394_IT_DATA_Q0_spd_SHIFT; +} + +static inline void ohci1394_it_data_set_speed(__le32 *data, unsigned int scode) +{ + data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_spd_MASK); + data[0] |= cpu_to_le32((scode << OHCI1394_IT_DATA_Q0_spd_SHIFT) & OHCI1394_IT_DATA_Q0_spd_MASK); +} + +static inline unsigned int ohci1394_it_data_get_tag(const __le32 *data) +{ + return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_tag_MASK) >> OHCI1394_IT_DATA_Q0_tag_SHIFT; +} + +static inline void ohci1394_it_data_set_tag(__le32 *data, unsigned int tag) +{ + data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_tag_MASK); + data[0] |= cpu_to_le32((tag << OHCI1394_IT_DATA_Q0_tag_SHIFT) & OHCI1394_IT_DATA_Q0_tag_MASK); +} + +static inline unsigned int ohci1394_it_data_get_channel(const __le32 *data) +{ + return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_chanNum_MASK) >> OHCI1394_IT_DATA_Q0_chanNum_SHIFT; +} + +static inline void ohci1394_it_data_set_channel(__le32 *data, unsigned int channel) +{ + data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_chanNum_MASK); + data[0] |= cpu_to_le32((channel << OHCI1394_IT_DATA_Q0_chanNum_SHIFT) & OHCI1394_IT_DATA_Q0_chanNum_MASK); +} + +static inline unsigned int ohci1394_it_data_get_tcode(const __le32 *data) +{ + return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_tcode_MASK) >> OHCI1394_IT_DATA_Q0_tcode_SHIFT; +} + +static inline void ohci1394_it_data_set_tcode(__le32 *data, unsigned int tcode) +{ + data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_tcode_MASK); + data[0] |= cpu_to_le32((tcode << OHCI1394_IT_DATA_Q0_tcode_SHIFT) & OHCI1394_IT_DATA_Q0_tcode_MASK); +} + +static inline unsigned int ohci1394_it_data_get_sync(const __le32 *data) +{ + return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_sy_MASK) >> OHCI1394_IT_DATA_Q0_sy_SHIFT; +} + +static inline void ohci1394_it_data_set_sync(__le32 *data, unsigned int sync) +{ + data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_sy_MASK); + data[0] |= cpu_to_le32((sync << OHCI1394_IT_DATA_Q0_sy_SHIFT) & OHCI1394_IT_DATA_Q0_sy_MASK); +} + +static inline unsigned int ohci1394_it_data_get_data_length(const __le32 *data) +{ + return (le32_to_cpu(data[1]) & OHCI1394_IT_DATA_Q1_dataLength_MASK) >> OHCI1394_IT_DATA_Q1_dataLength_SHIFT; +} + +static inline void ohci1394_it_data_set_data_length(__le32 *data, unsigned int data_length) +{ + data[1] &= cpu_to_le32(~OHCI1394_IT_DATA_Q1_dataLength_MASK); + data[1] |= cpu_to_le32((data_length << OHCI1394_IT_DATA_Q1_dataLength_SHIFT) & OHCI1394_IT_DATA_Q1_dataLength_MASK); +} + +// Self-ID DMA. + +#define OHCI1394_SelfIDCount_selfIDError_MASK 0x80000000 +#define OHCI1394_SelfIDCount_selfIDError_SHIFT 31 +#define OHCI1394_SelfIDCount_selfIDGeneration_MASK 0x00ff0000 +#define OHCI1394_SelfIDCount_selfIDGeneration_SHIFT 16 +#define OHCI1394_SelfIDCount_selfIDSize_MASK 0x000007fc +#define OHCI1394_SelfIDCount_selfIDSize_SHIFT 2 + +static inline bool ohci1394_self_id_count_is_error(u32 value) +{ + return !!((value & OHCI1394_SelfIDCount_selfIDError_MASK) >> OHCI1394_SelfIDCount_selfIDError_SHIFT); +} + +static inline u8 ohci1394_self_id_count_get_generation(u32 value) +{ + return (value & OHCI1394_SelfIDCount_selfIDGeneration_MASK) >> OHCI1394_SelfIDCount_selfIDGeneration_SHIFT; +} + +// In 1394 OHCI specification, the maximum size of self ID stream is 504 quadlets +// (= 63 devices * 4 self ID packets * 2 quadlets). The selfIDSize field accommodates it and its +// additional first quadlet, since the field is 9 bits (0x1ff = 511). +static inline u32 ohci1394_self_id_count_get_size(u32 value) +{ + return (value & OHCI1394_SelfIDCount_selfIDSize_MASK) >> OHCI1394_SelfIDCount_selfIDSize_SHIFT; +} + +#define OHCI1394_SELF_ID_RECEIVE_Q0_GENERATION_MASK 0x00ff0000 +#define OHCI1394_SELF_ID_RECEIVE_Q0_GENERATION_SHIFT 16 +#define OHCI1394_SELF_ID_RECEIVE_Q0_TIMESTAMP_MASK 0x0000ffff +#define OHCI1394_SELF_ID_RECEIVE_Q0_TIMESTAMP_SHIFT 0 + +static inline u8 ohci1394_self_id_receive_q0_get_generation(u32 quadlet0) +{ + return (quadlet0 & OHCI1394_SELF_ID_RECEIVE_Q0_GENERATION_MASK) >> OHCI1394_SELF_ID_RECEIVE_Q0_GENERATION_SHIFT; +} + +static inline u16 ohci1394_self_id_receive_q0_get_timestamp(u32 quadlet0) +{ + return (quadlet0 & OHCI1394_SELF_ID_RECEIVE_Q0_TIMESTAMP_MASK) >> OHCI1394_SELF_ID_RECEIVE_Q0_TIMESTAMP_SHIFT; +} #endif /* _FIREWIRE_OHCI_H */ diff --git a/drivers/firewire/packet-header-definitions.h b/drivers/firewire/packet-header-definitions.h new file mode 100644 index 000000000000..87a5a31845c3 --- /dev/null +++ b/drivers/firewire/packet-header-definitions.h @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// +// packet-header-definitions.h - The definitions of header fields for IEEE 1394 packet. +// +// Copyright (c) 2024 Takashi Sakamoto + +#ifndef _FIREWIRE_PACKET_HEADER_DEFINITIONS_H +#define _FIREWIRE_PACKET_HEADER_DEFINITIONS_H + +#include <linux/types.h> + +#define ASYNC_HEADER_QUADLET_COUNT 4 + +#define ASYNC_HEADER_Q0_DESTINATION_SHIFT 16 +#define ASYNC_HEADER_Q0_DESTINATION_MASK 0xffff0000 +#define ASYNC_HEADER_Q0_TLABEL_SHIFT 10 +#define ASYNC_HEADER_Q0_TLABEL_MASK 0x0000fc00 +#define ASYNC_HEADER_Q0_RETRY_SHIFT 8 +#define ASYNC_HEADER_Q0_RETRY_MASK 0x00000300 +#define ASYNC_HEADER_Q0_TCODE_SHIFT 4 +#define ASYNC_HEADER_Q0_TCODE_MASK 0x000000f0 +#define ASYNC_HEADER_Q0_PRIORITY_SHIFT 0 +#define ASYNC_HEADER_Q0_PRIORITY_MASK 0x0000000f +#define ASYNC_HEADER_Q1_SOURCE_SHIFT 16 +#define ASYNC_HEADER_Q1_SOURCE_MASK 0xffff0000 +#define ASYNC_HEADER_Q1_RCODE_SHIFT 12 +#define ASYNC_HEADER_Q1_RCODE_MASK 0x0000f000 +#define ASYNC_HEADER_Q1_RCODE_SHIFT 12 +#define ASYNC_HEADER_Q1_RCODE_MASK 0x0000f000 +#define ASYNC_HEADER_Q1_OFFSET_HIGH_SHIFT 0 +#define ASYNC_HEADER_Q1_OFFSET_HIGH_MASK 0x0000ffff +#define ASYNC_HEADER_Q3_DATA_LENGTH_SHIFT 16 +#define ASYNC_HEADER_Q3_DATA_LENGTH_MASK 0xffff0000 +#define ASYNC_HEADER_Q3_EXTENDED_TCODE_SHIFT 0 +#define ASYNC_HEADER_Q3_EXTENDED_TCODE_MASK 0x0000ffff + +static inline unsigned int async_header_get_destination(const u32 header[ASYNC_HEADER_QUADLET_COUNT]) +{ + return (header[0] & ASYNC_HEADER_Q0_DESTINATION_MASK) >> ASYNC_HEADER_Q0_DESTINATION_SHIFT; +} + +static inline unsigned int async_header_get_tlabel(const u32 header[ASYNC_HEADER_QUADLET_COUNT]) +{ + return (header[0] & ASYNC_HEADER_Q0_TLABEL_MASK) >> ASYNC_HEADER_Q0_TLABEL_SHIFT; +} + +static inline unsigned int async_header_get_retry(const u32 header[ASYNC_HEADER_QUADLET_COUNT]) +{ + return (header[0] & ASYNC_HEADER_Q0_RETRY_MASK) >> ASYNC_HEADER_Q0_RETRY_SHIFT; +} + +static inline unsigned int async_header_get_tcode(const u32 header[ASYNC_HEADER_QUADLET_COUNT]) +{ + return (header[0] & ASYNC_HEADER_Q0_TCODE_MASK) >> ASYNC_HEADER_Q0_TCODE_SHIFT; +} + +static inline unsigned int async_header_get_priority(const u32 header[ASYNC_HEADER_QUADLET_COUNT]) +{ + return (header[0] & ASYNC_HEADER_Q0_PRIORITY_MASK) >> ASYNC_HEADER_Q0_PRIORITY_SHIFT; +} + +static inline unsigned int async_header_get_source(const u32 header[ASYNC_HEADER_QUADLET_COUNT]) +{ + return (header[1] & ASYNC_HEADER_Q1_SOURCE_MASK) >> ASYNC_HEADER_Q1_SOURCE_SHIFT; +} + +static inline unsigned int async_header_get_rcode(const u32 header[ASYNC_HEADER_QUADLET_COUNT]) +{ + return (header[1] & ASYNC_HEADER_Q1_RCODE_MASK) >> ASYNC_HEADER_Q1_RCODE_SHIFT; +} + +static inline u64 async_header_get_offset(const u32 header[ASYNC_HEADER_QUADLET_COUNT]) +{ + u32 hi = (header[1] & ASYNC_HEADER_Q1_OFFSET_HIGH_MASK) >> ASYNC_HEADER_Q1_OFFSET_HIGH_SHIFT; + return (((u64)hi) << 32) | ((u64)header[2]); +} + +static inline u32 async_header_get_quadlet_data(const u32 header[ASYNC_HEADER_QUADLET_COUNT]) +{ + return header[3]; +} + +static inline unsigned int async_header_get_data_length(const u32 header[ASYNC_HEADER_QUADLET_COUNT]) +{ + return (header[3] & ASYNC_HEADER_Q3_DATA_LENGTH_MASK) >> ASYNC_HEADER_Q3_DATA_LENGTH_SHIFT; +} + +static inline unsigned int async_header_get_extended_tcode(const u32 header[ASYNC_HEADER_QUADLET_COUNT]) +{ + return (header[3] & ASYNC_HEADER_Q3_EXTENDED_TCODE_MASK) >> ASYNC_HEADER_Q3_EXTENDED_TCODE_SHIFT; +} + +static inline void async_header_set_destination(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int destination) +{ + header[0] &= ~ASYNC_HEADER_Q0_DESTINATION_MASK; + header[0] |= (((u32)destination) << ASYNC_HEADER_Q0_DESTINATION_SHIFT) & ASYNC_HEADER_Q0_DESTINATION_MASK; +} + +static inline void async_header_set_tlabel(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int tlabel) +{ + header[0] &= ~ASYNC_HEADER_Q0_TLABEL_MASK; + header[0] |= (((u32)tlabel) << ASYNC_HEADER_Q0_TLABEL_SHIFT) & ASYNC_HEADER_Q0_TLABEL_MASK; +} + +static inline void async_header_set_retry(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int retry) +{ + header[0] &= ~ASYNC_HEADER_Q0_RETRY_MASK; + header[0] |= (((u32)retry) << ASYNC_HEADER_Q0_RETRY_SHIFT) & ASYNC_HEADER_Q0_RETRY_MASK; +} + +static inline void async_header_set_tcode(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int tcode) +{ + header[0] &= ~ASYNC_HEADER_Q0_TCODE_MASK; + header[0] |= (((u32)tcode) << ASYNC_HEADER_Q0_TCODE_SHIFT) & ASYNC_HEADER_Q0_TCODE_MASK; +} + +static inline void async_header_set_priority(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int priority) +{ + header[0] &= ~ASYNC_HEADER_Q0_PRIORITY_MASK; + header[0] |= (((u32)priority) << ASYNC_HEADER_Q0_PRIORITY_SHIFT) & ASYNC_HEADER_Q0_PRIORITY_MASK; +} + + +static inline void async_header_set_source(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int source) +{ + header[1] &= ~ASYNC_HEADER_Q1_SOURCE_MASK; + header[1] |= (((u32)source) << ASYNC_HEADER_Q1_SOURCE_SHIFT) & ASYNC_HEADER_Q1_SOURCE_MASK; +} + +static inline void async_header_set_rcode(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int rcode) +{ + header[1] &= ~ASYNC_HEADER_Q1_RCODE_MASK; + header[1] |= (((u32)rcode) << ASYNC_HEADER_Q1_RCODE_SHIFT) & ASYNC_HEADER_Q1_RCODE_MASK; +} + +static inline void async_header_set_offset(u32 header[ASYNC_HEADER_QUADLET_COUNT], u64 offset) +{ + u32 hi = (u32)(offset >> 32); + header[1] &= ~ASYNC_HEADER_Q1_OFFSET_HIGH_MASK; + header[1] |= (hi << ASYNC_HEADER_Q1_OFFSET_HIGH_SHIFT) & ASYNC_HEADER_Q1_OFFSET_HIGH_MASK; + header[2] = (u32)(offset & 0x00000000ffffffff); +} + +static inline void async_header_set_quadlet_data(u32 header[ASYNC_HEADER_QUADLET_COUNT], u32 quadlet_data) +{ + header[3] = quadlet_data; +} + +static inline void async_header_set_data_length(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int data_length) +{ + header[3] &= ~ASYNC_HEADER_Q3_DATA_LENGTH_MASK; + header[3] |= (((u32)data_length) << ASYNC_HEADER_Q3_DATA_LENGTH_SHIFT) & ASYNC_HEADER_Q3_DATA_LENGTH_MASK; +} + +static inline void async_header_set_extended_tcode(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int extended_tcode) +{ + header[3] &= ~ASYNC_HEADER_Q3_EXTENDED_TCODE_MASK; + header[3] |= (((u32)extended_tcode) << ASYNC_HEADER_Q3_EXTENDED_TCODE_SHIFT) & ASYNC_HEADER_Q3_EXTENDED_TCODE_MASK; +} + +#define ISOC_HEADER_DATA_LENGTH_SHIFT 16 +#define ISOC_HEADER_DATA_LENGTH_MASK 0xffff0000 +#define ISOC_HEADER_TAG_SHIFT 14 +#define ISOC_HEADER_TAG_MASK 0x0000c000 +#define ISOC_HEADER_CHANNEL_SHIFT 8 +#define ISOC_HEADER_CHANNEL_MASK 0x00003f00 +#define ISOC_HEADER_TCODE_SHIFT 4 +#define ISOC_HEADER_TCODE_MASK 0x000000f0 +#define ISOC_HEADER_SY_SHIFT 0 +#define ISOC_HEADER_SY_MASK 0x0000000f + +static inline unsigned int isoc_header_get_data_length(u32 header) +{ + return (header & ISOC_HEADER_DATA_LENGTH_MASK) >> ISOC_HEADER_DATA_LENGTH_SHIFT; +} + +static inline unsigned int isoc_header_get_tag(u32 header) +{ + return (header & ISOC_HEADER_TAG_MASK) >> ISOC_HEADER_TAG_SHIFT; +} + +static inline unsigned int isoc_header_get_channel(u32 header) +{ + return (header & ISOC_HEADER_CHANNEL_MASK) >> ISOC_HEADER_CHANNEL_SHIFT; +} + +static inline unsigned int isoc_header_get_tcode(u32 header) +{ + return (header & ISOC_HEADER_TCODE_MASK) >> ISOC_HEADER_TCODE_SHIFT; +} + +static inline unsigned int isoc_header_get_sy(u32 header) +{ + return (header & ISOC_HEADER_SY_MASK) >> ISOC_HEADER_SY_SHIFT; +} + +static inline void isoc_header_set_data_length(u32 *header, unsigned int data_length) +{ + *header &= ~ISOC_HEADER_DATA_LENGTH_MASK; + *header |= (((u32)data_length) << ISOC_HEADER_DATA_LENGTH_SHIFT) & ISOC_HEADER_DATA_LENGTH_MASK; +} + +static inline void isoc_header_set_tag(u32 *header, unsigned int tag) +{ + *header &= ~ISOC_HEADER_TAG_MASK; + *header |= (((u32)tag) << ISOC_HEADER_TAG_SHIFT) & ISOC_HEADER_TAG_MASK; +} + +static inline void isoc_header_set_channel(u32 *header, unsigned int channel) +{ + *header &= ~ISOC_HEADER_CHANNEL_MASK; + *header |= (((u32)channel) << ISOC_HEADER_CHANNEL_SHIFT) & ISOC_HEADER_CHANNEL_MASK; +} + +static inline void isoc_header_set_tcode(u32 *header, unsigned int tcode) +{ + *header &= ~ISOC_HEADER_TCODE_MASK; + *header |= (((u32)tcode) << ISOC_HEADER_TCODE_SHIFT) & ISOC_HEADER_TCODE_MASK; +} + +static inline void isoc_header_set_sy(u32 *header, unsigned int sy) +{ + *header &= ~ISOC_HEADER_SY_MASK; + *header |= (((u32)sy) << ISOC_HEADER_SY_SHIFT) & ISOC_HEADER_SY_MASK; +} + +#endif // _FIREWIRE_PACKET_HEADER_DEFINITIONS_H diff --git a/drivers/firewire/packet-serdes-test.c b/drivers/firewire/packet-serdes-test.c new file mode 100644 index 000000000000..62ba433756ae --- /dev/null +++ b/drivers/firewire/packet-serdes-test.c @@ -0,0 +1,917 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// +// packet-serdes-test.c - An application of Kunit to check serialization/deserialization of packets +// defined by IEEE 1394. +// +// Copyright (c) 2024 Takashi Sakamoto + +#include <kunit/test.h> + +#include <linux/firewire-constants.h> + +#include "packet-header-definitions.h" +#include "phy-packet-definitions.h" + +static void serialize_async_header_common(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int dst_id, unsigned int tlabel, + unsigned int retry, unsigned int tcode, + unsigned int priority, unsigned int src_id) +{ + async_header_set_destination(header, dst_id); + async_header_set_tlabel(header, tlabel); + async_header_set_retry(header, retry); + async_header_set_tcode(header, tcode); + async_header_set_priority(header, priority); + async_header_set_source(header, src_id); +} + +static void serialize_async_header_request(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int dst_id, unsigned int tlabel, + unsigned int retry, unsigned int tcode, + unsigned int priority, unsigned int src_id, u64 offset) +{ + serialize_async_header_common(header, dst_id, tlabel, retry, tcode, priority, src_id); + async_header_set_offset(header, offset); +} + +static void serialize_async_header_quadlet_request(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int dst_id, unsigned int tlabel, + unsigned int retry, unsigned int tcode, + unsigned int priority, unsigned int src_id, + u64 offset) +{ + serialize_async_header_request(header, dst_id, tlabel, retry, tcode, priority, src_id, + offset); +} + +static void serialize_async_header_block_request(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int dst_id, unsigned int tlabel, + unsigned int retry, unsigned int tcode, + unsigned int priority, unsigned int src_id, + u64 offset, unsigned int data_length, + unsigned int extended_tcode) +{ + serialize_async_header_request(header, dst_id, tlabel, retry, tcode, priority, src_id, + offset); + async_header_set_data_length(header, data_length); + async_header_set_extended_tcode(header, extended_tcode); +} + +static void serialize_async_header_response(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int dst_id, unsigned int tlabel, + unsigned int retry, unsigned int tcode, + unsigned int priority, unsigned int src_id, + unsigned int rcode) +{ + serialize_async_header_common(header, dst_id, tlabel, retry, tcode, priority, src_id); + async_header_set_rcode(header, rcode); +} + +static void serialize_async_header_quadlet_response(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int dst_id, unsigned int tlabel, + unsigned int retry, unsigned int tcode, + unsigned int priority, unsigned int src_id, + unsigned int rcode) +{ + serialize_async_header_response(header, dst_id, tlabel, retry, tcode, priority, src_id, + rcode); +} + +static void serialize_async_header_block_response(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int dst_id, unsigned int tlabel, + unsigned int retry, unsigned int tcode, + unsigned int priority, unsigned int src_id, + unsigned int rcode, unsigned int data_length, + unsigned int extended_tcode) +{ + serialize_async_header_response(header, dst_id, tlabel, retry, tcode, priority, src_id, + rcode); + async_header_set_data_length(header, data_length); + async_header_set_extended_tcode(header, extended_tcode); +} + +static void deserialize_async_header_common(const u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int *dst_id, unsigned int *tlabel, + unsigned int *retry, unsigned int *tcode, + unsigned int *priority, unsigned int *src_id) +{ + *dst_id = async_header_get_destination(header); + *tlabel = async_header_get_tlabel(header); + *retry = async_header_get_retry(header); + *tcode = async_header_get_tcode(header); + *priority = async_header_get_priority(header); + *src_id = async_header_get_source(header); +} + +static void deserialize_async_header_request(const u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int *dst_id, unsigned int *tlabel, + unsigned int *retry, unsigned int *tcode, + unsigned int *priority, unsigned int *src_id, + u64 *offset) +{ + deserialize_async_header_common(header, dst_id, tlabel, retry, tcode, priority, src_id); + *offset = async_header_get_offset(header); +} + +static void deserialize_async_header_quadlet_request(const u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int *dst_id, unsigned int *tlabel, + unsigned int *retry, unsigned int *tcode, + unsigned int *priority, unsigned int *src_id, + u64 *offset) +{ + deserialize_async_header_request(header, dst_id, tlabel, retry, tcode, priority, src_id, + offset); +} + +static void deserialize_async_header_block_request(const u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int *dst_id, unsigned int *tlabel, + unsigned int *retry, unsigned int *tcode, + unsigned int *priority, unsigned int *src_id, + u64 *offset, + unsigned int *data_length, + unsigned int *extended_tcode) +{ + deserialize_async_header_request(header, dst_id, tlabel, retry, tcode, priority, src_id, + offset); + *data_length = async_header_get_data_length(header); + *extended_tcode = async_header_get_extended_tcode(header); +} + +static void deserialize_async_header_response(const u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int *dst_id, unsigned int *tlabel, + unsigned int *retry, unsigned int *tcode, + unsigned int *priority, unsigned int *src_id, + unsigned int *rcode) +{ + deserialize_async_header_common(header, dst_id, tlabel, retry, tcode, priority, src_id); + *rcode = async_header_get_rcode(header); +} + +static void deserialize_async_header_quadlet_response(const u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int *dst_id, unsigned int *tlabel, + unsigned int *retry, unsigned int *tcode, + unsigned int *priority, unsigned int *src_id, + unsigned int *rcode) +{ + deserialize_async_header_response(header, dst_id, tlabel, retry, tcode, priority, src_id, rcode); +} + +static void deserialize_async_header_block_response(const u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int *dst_id, unsigned int *tlabel, + unsigned int *retry, unsigned int *tcode, + unsigned int *priority, unsigned int *src_id, + unsigned int *rcode, unsigned int *data_length, + unsigned int *extended_tcode) +{ + deserialize_async_header_response(header, dst_id, tlabel, retry, tcode, priority, src_id, rcode); + *data_length = async_header_get_data_length(header); + *extended_tcode = async_header_get_extended_tcode(header); +} + +static void serialize_isoc_header(u32 *header, unsigned int data_length, unsigned int tag, + unsigned int channel, unsigned int tcode, unsigned int sy) +{ + isoc_header_set_data_length(header, data_length); + isoc_header_set_tag(header, tag); + isoc_header_set_channel(header, channel); + isoc_header_set_tcode(header, tcode); + isoc_header_set_sy(header, sy); +} + +static void deserialize_isoc_header(u32 header, unsigned int *data_length, unsigned int *tag, + unsigned int *channel, unsigned int *tcode, unsigned int *sy) +{ + *data_length = isoc_header_get_data_length(header); + *tag = isoc_header_get_tag(header); + *channel = isoc_header_get_channel(header); + *tcode = isoc_header_get_tcode(header); + *sy = isoc_header_get_sy(header); +} + +static void serialize_phy_packet_self_id_zero(u32 *quadlet, unsigned int packet_identifier, + unsigned int phy_id, bool extended, + bool link_is_active, unsigned int gap_count, + unsigned int scode, bool is_contender, + unsigned int power_class, bool is_initiated_reset, + bool has_more_packets) +{ + phy_packet_set_packet_identifier(quadlet, packet_identifier); + phy_packet_self_id_set_phy_id(quadlet, phy_id); + phy_packet_self_id_set_extended(quadlet, extended); + phy_packet_self_id_zero_set_link_active(quadlet, link_is_active); + phy_packet_self_id_zero_set_gap_count(quadlet, gap_count); + phy_packet_self_id_zero_set_scode(quadlet, scode); + phy_packet_self_id_zero_set_contender(quadlet, is_contender); + phy_packet_self_id_zero_set_power_class(quadlet, power_class); + phy_packet_self_id_zero_set_initiated_reset(quadlet, is_initiated_reset); + phy_packet_self_id_set_more_packets(quadlet, has_more_packets); +} + +static void deserialize_phy_packet_self_id_zero(u32 quadlet, unsigned int *packet_identifier, + unsigned int *phy_id, bool *extended, + bool *link_is_active, unsigned int *gap_count, + unsigned int *scode, bool *is_contender, + unsigned int *power_class, + bool *is_initiated_reset, bool *has_more_packets) +{ + *packet_identifier = phy_packet_get_packet_identifier(quadlet); + *phy_id = phy_packet_self_id_get_phy_id(quadlet); + *extended = phy_packet_self_id_get_extended(quadlet); + *link_is_active = phy_packet_self_id_zero_get_link_active(quadlet); + *gap_count = phy_packet_self_id_zero_get_gap_count(quadlet); + *scode = phy_packet_self_id_zero_get_scode(quadlet); + *is_contender = phy_packet_self_id_zero_get_contender(quadlet); + *power_class = phy_packet_self_id_zero_get_power_class(quadlet); + *is_initiated_reset = phy_packet_self_id_zero_get_initiated_reset(quadlet); + *has_more_packets = phy_packet_self_id_get_more_packets(quadlet); +} + +static void serialize_phy_packet_self_id_extended(u32 *quadlet, unsigned int packet_identifier, + unsigned int phy_id, bool extended, + unsigned int sequence, bool has_more_packets) +{ + phy_packet_set_packet_identifier(quadlet, packet_identifier); + phy_packet_self_id_set_phy_id(quadlet, phy_id); + phy_packet_self_id_set_extended(quadlet, extended); + phy_packet_self_id_extended_set_sequence(quadlet, sequence); + phy_packet_self_id_set_more_packets(quadlet, has_more_packets); +} + +static void deserialize_phy_packet_self_id_extended(u32 quadlet, unsigned int *packet_identifier, + unsigned int *phy_id, bool *extended, + unsigned int *sequence, bool *has_more_packets) +{ + *packet_identifier = phy_packet_get_packet_identifier(quadlet); + *phy_id = phy_packet_self_id_get_phy_id(quadlet); + *extended = phy_packet_self_id_get_extended(quadlet); + *sequence = phy_packet_self_id_extended_get_sequence(quadlet); + *has_more_packets = phy_packet_self_id_get_more_packets(quadlet); +} + +static void serialize_phy_packet_phy_config(u32 *quadlet, unsigned int packet_identifier, + unsigned int root_id, bool has_force_root_node, + bool has_gap_count_optimization, unsigned int gap_count) +{ + phy_packet_set_packet_identifier(quadlet, packet_identifier); + phy_packet_phy_config_set_root_id(quadlet, root_id); + phy_packet_phy_config_set_force_root_node(quadlet, has_force_root_node); + phy_packet_phy_config_set_gap_count_optimization(quadlet, has_gap_count_optimization); + phy_packet_phy_config_set_gap_count(quadlet, gap_count); +} + +static void deserialize_phy_packet_phy_config(u32 quadlet, unsigned int *packet_identifier, + unsigned int *root_id, bool *has_force_root_node, + bool *has_gap_count_optimization, + unsigned int *gap_count) +{ + *packet_identifier = phy_packet_get_packet_identifier(quadlet); + *root_id = phy_packet_phy_config_get_root_id(quadlet); + *has_force_root_node = phy_packet_phy_config_get_force_root_node(quadlet); + *has_gap_count_optimization = phy_packet_phy_config_get_gap_count_optimization(quadlet); + *gap_count = phy_packet_phy_config_get_gap_count(quadlet); +} + +static void test_async_header_write_quadlet_request(struct kunit *test) +{ + static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = { + 0xffc05100, + 0xffc1ffff, + 0xf0000234, + 0x1f0000c0, + }; + u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0}; + + unsigned int dst_id; + unsigned int tlabel; + unsigned int retry; + unsigned int tcode; + unsigned int priority; + unsigned int src_id; + u64 offset; + u32 quadlet_data; + + deserialize_async_header_quadlet_request(expected, &dst_id, &tlabel, &retry, &tcode, + &priority, &src_id, &offset); + quadlet_data = async_header_get_quadlet_data(expected); + + KUNIT_EXPECT_EQ(test, 0xffc0, dst_id); + KUNIT_EXPECT_EQ(test, 0x14, tlabel); + KUNIT_EXPECT_EQ(test, 0x01, retry); + KUNIT_EXPECT_EQ(test, TCODE_WRITE_QUADLET_REQUEST, tcode); + KUNIT_EXPECT_EQ(test, 0x00, priority); + KUNIT_EXPECT_EQ(test, 0xffc1, src_id); + KUNIT_EXPECT_EQ(test, 0xfffff0000234, offset); + KUNIT_EXPECT_EQ(test, 0x1f0000c0, quadlet_data); + + serialize_async_header_quadlet_request(header, dst_id, tlabel, retry, tcode, priority, + src_id, offset); + async_header_set_quadlet_data(header, quadlet_data); + + KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected)); +} + +static void test_async_header_write_block_request(struct kunit *test) +{ + static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = { + 0xffc06510, + 0xffc1ecc0, + 0x00000000, + 0x00180000, + }; + u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0}; + + unsigned int dst_id; + unsigned int tlabel; + unsigned int retry; + unsigned int tcode; + unsigned int priority; + unsigned int src_id; + u64 offset; + unsigned int data_length; + unsigned int extended_tcode; + + deserialize_async_header_block_request(expected, &dst_id, &tlabel, &retry, &tcode, + &priority, &src_id, &offset, &data_length, + &extended_tcode); + + KUNIT_EXPECT_EQ(test, 0xffc0, dst_id); + KUNIT_EXPECT_EQ(test, 0x19, tlabel); + KUNIT_EXPECT_EQ(test, 0x01, retry); + KUNIT_EXPECT_EQ(test, TCODE_WRITE_BLOCK_REQUEST, tcode); + KUNIT_EXPECT_EQ(test, 0x00, priority); + KUNIT_EXPECT_EQ(test, 0xffc1, src_id); + KUNIT_EXPECT_EQ(test, 0xecc000000000, offset); + KUNIT_EXPECT_EQ(test, 0x0018, data_length); + KUNIT_EXPECT_EQ(test, 0x0000, extended_tcode); + + serialize_async_header_block_request(header, dst_id, tlabel, retry, tcode, priority, src_id, + offset, data_length, extended_tcode); + + KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected)); +} + +static void test_async_header_write_response(struct kunit *test) +{ + static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = { + 0xffc15120, + 0xffc00000, + 0x00000000, + 0x00000000, + }; + u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0}; + + unsigned int dst_id; + unsigned int tlabel; + unsigned int retry; + unsigned int tcode; + unsigned int priority; + unsigned int src_id; + unsigned int rcode; + + deserialize_async_header_quadlet_response(expected, &dst_id, &tlabel, &retry, &tcode, + &priority, &src_id, &rcode); + + KUNIT_EXPECT_EQ(test, 0xffc1, dst_id); + KUNIT_EXPECT_EQ(test, 0x14, tlabel); + KUNIT_EXPECT_EQ(test, 0x01, retry); + KUNIT_EXPECT_EQ(test, TCODE_WRITE_RESPONSE, tcode); + KUNIT_EXPECT_EQ(test, 0x00, priority); + KUNIT_EXPECT_EQ(test, 0xffc0, src_id); + KUNIT_EXPECT_EQ(test, RCODE_COMPLETE, rcode); + + serialize_async_header_quadlet_response(header, dst_id, tlabel, retry, tcode, priority, + src_id, rcode); + + KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected) - sizeof(expected[0])); +} + +static void test_async_header_read_quadlet_request(struct kunit *test) +{ + static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = { + 0xffc0f140, + 0xffc1ffff, + 0xf0000984, + 0x00000000, + }; + u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0}; + + unsigned int dst_id; + unsigned int tlabel; + unsigned int retry; + unsigned int tcode; + unsigned int priority; + unsigned int src_id; + u64 offset; + + deserialize_async_header_quadlet_request(expected, &dst_id, &tlabel, &retry, &tcode, + &priority, &src_id, &offset); + + KUNIT_EXPECT_EQ(test, 0xffc0, dst_id); + KUNIT_EXPECT_EQ(test, 0x3c, tlabel); + KUNIT_EXPECT_EQ(test, 0x01, retry); + KUNIT_EXPECT_EQ(test, TCODE_READ_QUADLET_REQUEST, tcode); + KUNIT_EXPECT_EQ(test, 0x00, priority); + KUNIT_EXPECT_EQ(test, 0xffc1, src_id); + KUNIT_EXPECT_EQ(test, 0xfffff0000984, offset); + + serialize_async_header_quadlet_request(header, dst_id, tlabel, retry, tcode, priority, + src_id, offset); + + KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected)); +} + +static void test_async_header_read_quadlet_response(struct kunit *test) +{ + static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = { + 0xffc1f160, + 0xffc00000, + 0x00000000, + 0x00000180, + }; + u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0}; + + unsigned int dst_id; + unsigned int tlabel; + unsigned int retry; + unsigned int tcode; + unsigned int priority; + unsigned int src_id; + unsigned int rcode; + u32 quadlet_data; + + deserialize_async_header_quadlet_response(expected, &dst_id, &tlabel, &retry, &tcode, + &priority, &src_id, &rcode); + quadlet_data = async_header_get_quadlet_data(expected); + + KUNIT_EXPECT_EQ(test, 0xffc1, dst_id); + KUNIT_EXPECT_EQ(test, 0x3c, tlabel); + KUNIT_EXPECT_EQ(test, 0x01, retry); + KUNIT_EXPECT_EQ(test, TCODE_READ_QUADLET_RESPONSE, tcode); + KUNIT_EXPECT_EQ(test, 0x00, priority); + KUNIT_EXPECT_EQ(test, 0xffc0, src_id); + KUNIT_EXPECT_EQ(test, RCODE_COMPLETE, rcode); + KUNIT_EXPECT_EQ(test, 0x00000180, quadlet_data); + + serialize_async_header_quadlet_response(header, dst_id, tlabel, retry, tcode, priority, + src_id, rcode); + async_header_set_quadlet_data(header, quadlet_data); + + KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected)); +} + +static void test_async_header_read_block_request(struct kunit *test) +{ + static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = { + 0xffc0e150, + 0xffc1ffff, + 0xf0000400, + 0x00200000, + }; + u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0}; + + unsigned int dst_id; + unsigned int tlabel; + unsigned int retry; + unsigned int tcode; + unsigned int priority; + unsigned int src_id; + u64 offset; + unsigned int data_length; + unsigned int extended_tcode; + + deserialize_async_header_block_request(expected, &dst_id, &tlabel, &retry, &tcode, + &priority, &src_id, &offset, &data_length, + &extended_tcode); + + KUNIT_EXPECT_EQ(test, 0xffc0, dst_id); + KUNIT_EXPECT_EQ(test, 0x38, tlabel); + KUNIT_EXPECT_EQ(test, 0x01, retry); + KUNIT_EXPECT_EQ(test, TCODE_READ_BLOCK_REQUEST, tcode); + KUNIT_EXPECT_EQ(test, 0x00, priority); + KUNIT_EXPECT_EQ(test, 0xffc1, src_id); + KUNIT_EXPECT_EQ(test, 0xfffff0000400, offset); + KUNIT_EXPECT_EQ(test, 0x0020, data_length); + KUNIT_EXPECT_EQ(test, 0x0000, extended_tcode); + + serialize_async_header_block_request(header, dst_id, tlabel, retry, tcode, priority, src_id, + offset, data_length, extended_tcode); + + KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected)); +} + +static void test_async_header_read_block_response(struct kunit *test) +{ + static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = { + 0xffc1e170, + 0xffc00000, + 0x00000000, + 0x00200000, + }; + u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0}; + + unsigned int dst_id; + unsigned int tlabel; + unsigned int retry; + unsigned int tcode; + unsigned int priority; + unsigned int src_id; + unsigned int rcode; + unsigned int data_length; + unsigned int extended_tcode; + + deserialize_async_header_block_response(expected, &dst_id, &tlabel, &retry, &tcode, + &priority, &src_id, &rcode, &data_length, + &extended_tcode); + + KUNIT_EXPECT_EQ(test, 0xffc1, dst_id); + KUNIT_EXPECT_EQ(test, 0x38, tlabel); + KUNIT_EXPECT_EQ(test, 0x01, retry); + KUNIT_EXPECT_EQ(test, TCODE_READ_BLOCK_RESPONSE, tcode); + KUNIT_EXPECT_EQ(test, 0x00, priority); + KUNIT_EXPECT_EQ(test, 0xffc0, src_id); + KUNIT_EXPECT_EQ(test, RCODE_COMPLETE, rcode); + KUNIT_EXPECT_EQ(test, 0x0020, data_length); + KUNIT_EXPECT_EQ(test, 0x0000, extended_tcode); + + serialize_async_header_block_response(header, dst_id, tlabel, retry, tcode, priority, + src_id, rcode, data_length, extended_tcode); + + KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected)); +} + +static void test_async_header_lock_request(struct kunit *test) +{ + static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = { + 0xffc02d90, + 0xffc1ffff, + 0xf0000984, + 0x00080002, + }; + u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0}; + + unsigned int dst_id; + unsigned int tlabel; + unsigned int retry; + unsigned int tcode; + unsigned int priority; + unsigned int src_id; + u64 offset; + unsigned int data_length; + unsigned int extended_tcode; + + deserialize_async_header_block_request(expected, &dst_id, &tlabel, &retry, &tcode, + &priority, &src_id, &offset, &data_length, + &extended_tcode); + + KUNIT_EXPECT_EQ(test, 0xffc0, dst_id); + KUNIT_EXPECT_EQ(test, 0x0b, tlabel); + KUNIT_EXPECT_EQ(test, 0x01, retry); + KUNIT_EXPECT_EQ(test, TCODE_LOCK_REQUEST, tcode); + KUNIT_EXPECT_EQ(test, 0x00, priority); + KUNIT_EXPECT_EQ(test, 0xffc1, src_id); + KUNIT_EXPECT_EQ(test, 0xfffff0000984, offset); + KUNIT_EXPECT_EQ(test, 0x0008, data_length); + KUNIT_EXPECT_EQ(test, EXTCODE_COMPARE_SWAP, extended_tcode); + + serialize_async_header_block_request(header, dst_id, tlabel, retry, tcode, priority, src_id, + offset, data_length, extended_tcode); + + KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected)); +} + +static void test_async_header_lock_response(struct kunit *test) +{ + static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = { + 0xffc12db0, + 0xffc00000, + 0x00000000, + 0x00040002, + }; + u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0}; + + unsigned int dst_id; + unsigned int tlabel; + unsigned int retry; + unsigned int tcode; + unsigned int priority; + unsigned int src_id; + unsigned int rcode; + unsigned int data_length; + unsigned int extended_tcode; + + deserialize_async_header_block_response(expected, &dst_id, &tlabel, &retry, &tcode, + &priority, &src_id, &rcode, &data_length, + &extended_tcode); + + KUNIT_EXPECT_EQ(test, 0xffc1, dst_id); + KUNIT_EXPECT_EQ(test, 0x0b, tlabel); + KUNIT_EXPECT_EQ(test, 0x01, retry); + KUNIT_EXPECT_EQ(test, TCODE_LOCK_RESPONSE, tcode); + KUNIT_EXPECT_EQ(test, 0x00, priority); + KUNIT_EXPECT_EQ(test, 0xffc0, src_id); + KUNIT_EXPECT_EQ(test, RCODE_COMPLETE, rcode); + KUNIT_EXPECT_EQ(test, 0x0004, data_length); + KUNIT_EXPECT_EQ(test, EXTCODE_COMPARE_SWAP, extended_tcode); + + serialize_async_header_block_response(header, dst_id, tlabel, retry, tcode, priority, + src_id, rcode, data_length, extended_tcode); + + KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected)); +} + +static void test_isoc_header(struct kunit *test) +{ + const u32 expected = 0x00d08dec; + u32 header = 0; + + unsigned int data_length; + unsigned int tag; + unsigned int channel; + unsigned int tcode; + unsigned int sy; + + deserialize_isoc_header(expected, &data_length, &tag, &channel, &tcode, &sy); + + KUNIT_EXPECT_EQ(test, 0xd0, data_length); + KUNIT_EXPECT_EQ(test, 0x02, tag); + KUNIT_EXPECT_EQ(test, 0x0d, channel); + KUNIT_EXPECT_EQ(test, 0x0e, tcode); + KUNIT_EXPECT_EQ(test, 0x0c, sy); + + serialize_isoc_header(&header, data_length, tag, channel, tcode, sy); + + KUNIT_EXPECT_EQ(test, header, expected); +} + +static void test_phy_packet_self_id_zero_case0(struct kunit *test) +{ + // TSB41AB1/2 with 1 port. + const u32 expected[] = {0x80458c80}; + u32 quadlets[] = {0}; + + unsigned int packet_identifier; + unsigned int phy_id; + bool extended; + bool link_is_active; + unsigned int gap_count; + unsigned int scode; + bool is_contender; + unsigned int power_class; + enum phy_packet_self_id_port_status port_status[3]; + bool is_initiated_reset; + bool has_more_packets; + unsigned int port_index; + + deserialize_phy_packet_self_id_zero(expected[0], &packet_identifier, &phy_id, &extended, + &link_is_active, &gap_count, &scode, &is_contender, + &power_class, &is_initiated_reset, &has_more_packets); + + KUNIT_EXPECT_EQ(test, PHY_PACKET_PACKET_IDENTIFIER_SELF_ID, packet_identifier); + KUNIT_EXPECT_EQ(test, 0, phy_id); + KUNIT_EXPECT_FALSE(test, extended); + KUNIT_EXPECT_TRUE(test, link_is_active); + KUNIT_EXPECT_EQ(test, 0x05, gap_count); + KUNIT_EXPECT_EQ(test, SCODE_400, scode); + KUNIT_EXPECT_TRUE(test, is_contender); + KUNIT_EXPECT_EQ(test, 0x4, power_class); + KUNIT_EXPECT_FALSE(test, is_initiated_reset); + KUNIT_EXPECT_FALSE(test, has_more_packets); + + serialize_phy_packet_self_id_zero(quadlets, packet_identifier, phy_id, extended, + link_is_active, gap_count, scode, is_contender, + power_class, is_initiated_reset, has_more_packets); + + for (port_index = 0; port_index < ARRAY_SIZE(port_status); ++port_index) { + port_status[port_index] = + self_id_sequence_get_port_status(expected, ARRAY_SIZE(expected), port_index); + } + + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[0]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[1]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[2]); + + for (port_index = 0; port_index < ARRAY_SIZE(port_status); ++port_index) { + self_id_sequence_set_port_status(quadlets, ARRAY_SIZE(quadlets), port_index, + port_status[port_index]); + } + + KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected)); +} + +static void test_phy_packet_self_id_zero_case1(struct kunit *test) +{ + // XIO2213 and TSB81BA3E with 3 ports. + const u32 expected[] = {0x817fcc5e}; + u32 quadlets[] = {0}; + + unsigned int packet_identifier; + unsigned int phy_id; + bool extended; + bool link_is_active; + unsigned int gap_count; + unsigned int scode; + bool is_contender; + unsigned int power_class; + enum phy_packet_self_id_port_status port_status[3]; + bool is_initiated_reset; + bool has_more_packets; + unsigned int port_index; + + deserialize_phy_packet_self_id_zero(expected[0], &packet_identifier, &phy_id, &extended, + &link_is_active, &gap_count, &scode, &is_contender, + &power_class, &is_initiated_reset, &has_more_packets); + + KUNIT_EXPECT_EQ(test, PHY_PACKET_PACKET_IDENTIFIER_SELF_ID, packet_identifier); + KUNIT_EXPECT_EQ(test, 1, phy_id); + KUNIT_EXPECT_FALSE(test, extended); + KUNIT_EXPECT_TRUE(test, link_is_active); + KUNIT_EXPECT_EQ(test, 0x3f, gap_count); + KUNIT_EXPECT_EQ(test, SCODE_800, scode); + KUNIT_EXPECT_TRUE(test, is_contender); + KUNIT_EXPECT_EQ(test, 0x4, power_class); + KUNIT_EXPECT_TRUE(test, is_initiated_reset); + KUNIT_EXPECT_FALSE(test, has_more_packets); + + serialize_phy_packet_self_id_zero(quadlets, packet_identifier, phy_id, extended, + link_is_active, gap_count, scode, is_contender, + power_class, is_initiated_reset, has_more_packets); + + for (port_index = 0; port_index < ARRAY_SIZE(port_status); ++port_index) { + port_status[port_index] = + self_id_sequence_get_port_status(expected, ARRAY_SIZE(expected), port_index); + } + + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[0]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[1]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[2]); + + for (port_index = 0; port_index < ARRAY_SIZE(port_status); ++port_index) { + self_id_sequence_set_port_status(quadlets, ARRAY_SIZE(quadlets), port_index, + port_status[port_index]); + } + + KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected)); +} + +static void test_phy_packet_self_id_zero_and_one(struct kunit *test) +{ + // TSB41LV06A with 6 ports. + const u32 expected[] = { + 0x803f8459, + 0x80815000, + }; + u32 quadlets[] = {0, 0}; + + unsigned int packet_identifier; + unsigned int phy_id; + bool extended; + bool link_is_active; + unsigned int gap_count; + unsigned int scode; + bool is_contender; + unsigned int power_class; + enum phy_packet_self_id_port_status port_status[11]; + bool is_initiated_reset; + bool has_more_packets; + + unsigned int sequence; + unsigned int port_index; + + deserialize_phy_packet_self_id_zero(expected[0], &packet_identifier, &phy_id, &extended, + &link_is_active, &gap_count, &scode, &is_contender, + &power_class, &is_initiated_reset, &has_more_packets); + + KUNIT_EXPECT_EQ(test, PHY_PACKET_PACKET_IDENTIFIER_SELF_ID, packet_identifier); + KUNIT_EXPECT_EQ(test, 0, phy_id); + KUNIT_EXPECT_FALSE(test, extended); + KUNIT_EXPECT_FALSE(test, link_is_active); + KUNIT_EXPECT_EQ(test, 0x3f, gap_count); + KUNIT_EXPECT_EQ(test, SCODE_400, scode); + KUNIT_EXPECT_FALSE(test, is_contender); + KUNIT_EXPECT_EQ(test, 0x4, power_class); + KUNIT_EXPECT_FALSE(test, is_initiated_reset); + KUNIT_EXPECT_TRUE(test, has_more_packets); + + serialize_phy_packet_self_id_zero(quadlets, packet_identifier, phy_id, extended, + link_is_active, gap_count, scode, is_contender, + power_class, is_initiated_reset, has_more_packets); + + deserialize_phy_packet_self_id_extended(expected[1], &packet_identifier, &phy_id, &extended, + &sequence, &has_more_packets); + + KUNIT_EXPECT_EQ(test, PHY_PACKET_PACKET_IDENTIFIER_SELF_ID, packet_identifier); + KUNIT_EXPECT_EQ(test, 0, phy_id); + KUNIT_EXPECT_TRUE(test, extended); + KUNIT_EXPECT_EQ(test, 0, sequence); + KUNIT_EXPECT_FALSE(test, has_more_packets); + + serialize_phy_packet_self_id_extended(&quadlets[1], packet_identifier, phy_id, extended, + sequence, has_more_packets); + + + for (port_index = 0; port_index < ARRAY_SIZE(port_status); ++port_index) { + port_status[port_index] = + self_id_sequence_get_port_status(expected, ARRAY_SIZE(expected), port_index); + } + + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[0]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[1]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[2]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[3]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[4]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[5]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[6]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[7]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[8]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[9]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[10]); + + for (port_index = 0; port_index < ARRAY_SIZE(port_status); ++port_index) { + self_id_sequence_set_port_status(quadlets, ARRAY_SIZE(quadlets), port_index, + port_status[port_index]); + } + + KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected)); +} + +static void test_phy_packet_phy_config_force_root_node(struct kunit *test) +{ + const u32 expected = 0x02800000; + u32 quadlet = 0; + + unsigned int packet_identifier; + unsigned int root_id; + bool has_force_root_node; + bool has_gap_count_optimization; + unsigned int gap_count; + + deserialize_phy_packet_phy_config(expected, &packet_identifier, &root_id, + &has_force_root_node, &has_gap_count_optimization, + &gap_count); + + KUNIT_EXPECT_EQ(test, PHY_PACKET_PACKET_IDENTIFIER_PHY_CONFIG, packet_identifier); + KUNIT_EXPECT_EQ(test, 0x02, root_id); + KUNIT_EXPECT_TRUE(test, has_force_root_node); + KUNIT_EXPECT_FALSE(test, has_gap_count_optimization); + KUNIT_EXPECT_EQ(test, 0, gap_count); + + serialize_phy_packet_phy_config(&quadlet, packet_identifier, root_id, has_force_root_node, + has_gap_count_optimization, gap_count); + + KUNIT_EXPECT_EQ(test, quadlet, expected); +} + +static void test_phy_packet_phy_config_gap_count_optimization(struct kunit *test) +{ + const u32 expected = 0x034f0000; + u32 quadlet = 0; + + unsigned int packet_identifier; + unsigned int root_id; + bool has_force_root_node; + bool has_gap_count_optimization; + unsigned int gap_count; + + deserialize_phy_packet_phy_config(expected, &packet_identifier, &root_id, + &has_force_root_node, &has_gap_count_optimization, + &gap_count); + + KUNIT_EXPECT_EQ(test, PHY_PACKET_PACKET_IDENTIFIER_PHY_CONFIG, packet_identifier); + KUNIT_EXPECT_EQ(test, 0x03, root_id); + KUNIT_EXPECT_FALSE(test, has_force_root_node); + KUNIT_EXPECT_TRUE(test, has_gap_count_optimization); + KUNIT_EXPECT_EQ(test, 0x0f, gap_count); + + serialize_phy_packet_phy_config(&quadlet, packet_identifier, root_id, has_force_root_node, + has_gap_count_optimization, gap_count); + + KUNIT_EXPECT_EQ(test, quadlet, expected); +} + +static struct kunit_case packet_serdes_test_cases[] = { + KUNIT_CASE(test_async_header_write_quadlet_request), + KUNIT_CASE(test_async_header_write_block_request), + KUNIT_CASE(test_async_header_write_response), + KUNIT_CASE(test_async_header_read_quadlet_request), + KUNIT_CASE(test_async_header_read_quadlet_response), + KUNIT_CASE(test_async_header_read_block_request), + KUNIT_CASE(test_async_header_read_block_response), + KUNIT_CASE(test_async_header_lock_request), + KUNIT_CASE(test_async_header_lock_response), + KUNIT_CASE(test_isoc_header), + KUNIT_CASE(test_phy_packet_self_id_zero_case0), + KUNIT_CASE(test_phy_packet_self_id_zero_case1), + KUNIT_CASE(test_phy_packet_self_id_zero_and_one), + KUNIT_CASE(test_phy_packet_phy_config_force_root_node), + KUNIT_CASE(test_phy_packet_phy_config_gap_count_optimization), + {} +}; + +static struct kunit_suite packet_serdes_test_suite = { + .name = "firewire-packet-serdes", + .test_cases = packet_serdes_test_cases, +}; +kunit_test_suite(packet_serdes_test_suite); + +MODULE_DESCRIPTION("FireWire packet serialization/deserialization unit test suite"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firewire/phy-packet-definitions.h b/drivers/firewire/phy-packet-definitions.h new file mode 100644 index 000000000000..03c7c606759f --- /dev/null +++ b/drivers/firewire/phy-packet-definitions.h @@ -0,0 +1,302 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// +// phy-packet-definitions.h - The definitions of phy packet for IEEE 1394. +// +// Copyright (c) 2024 Takashi Sakamoto + +#ifndef _FIREWIRE_PHY_PACKET_DEFINITIONS_H +#define _FIREWIRE_PHY_PACKET_DEFINITIONS_H + +#define PACKET_IDENTIFIER_MASK 0xc0000000 +#define PACKET_IDENTIFIER_SHIFT 30 + +static inline unsigned int phy_packet_get_packet_identifier(u32 quadlet) +{ + return (quadlet & PACKET_IDENTIFIER_MASK) >> PACKET_IDENTIFIER_SHIFT; +} + +static inline void phy_packet_set_packet_identifier(u32 *quadlet, unsigned int packet_identifier) +{ + *quadlet &= ~PACKET_IDENTIFIER_MASK; + *quadlet |= (packet_identifier << PACKET_IDENTIFIER_SHIFT) & PACKET_IDENTIFIER_MASK; +} + +#define PHY_PACKET_PACKET_IDENTIFIER_PHY_CONFIG 0 + +#define PHY_CONFIG_ROOT_ID_MASK 0x3f000000 +#define PHY_CONFIG_ROOT_ID_SHIFT 24 +#define PHY_CONFIG_FORCE_ROOT_NODE_MASK 0x00800000 +#define PHY_CONFIG_FORCE_ROOT_NODE_SHIFT 23 +#define PHY_CONFIG_GAP_COUNT_OPTIMIZATION_MASK 0x00400000 +#define PHY_CONFIG_GAP_COUNT_OPTIMIZATION_SHIFT 22 +#define PHY_CONFIG_GAP_COUNT_MASK 0x003f0000 +#define PHY_CONFIG_GAP_COUNT_SHIFT 16 + +static inline unsigned int phy_packet_phy_config_get_root_id(u32 quadlet) +{ + return (quadlet & PHY_CONFIG_ROOT_ID_MASK) >> PHY_CONFIG_ROOT_ID_SHIFT; +} + +static inline void phy_packet_phy_config_set_root_id(u32 *quadlet, unsigned int root_id) +{ + *quadlet &= ~PHY_CONFIG_ROOT_ID_MASK; + *quadlet |= (root_id << PHY_CONFIG_ROOT_ID_SHIFT) & PHY_CONFIG_ROOT_ID_MASK; +} + +static inline bool phy_packet_phy_config_get_force_root_node(u32 quadlet) +{ + return (quadlet & PHY_CONFIG_FORCE_ROOT_NODE_MASK) >> PHY_CONFIG_FORCE_ROOT_NODE_SHIFT; +} + +static inline void phy_packet_phy_config_set_force_root_node(u32 *quadlet, bool has_force_root_node) +{ + *quadlet &= ~PHY_CONFIG_FORCE_ROOT_NODE_MASK; + *quadlet |= (has_force_root_node << PHY_CONFIG_FORCE_ROOT_NODE_SHIFT) & PHY_CONFIG_FORCE_ROOT_NODE_MASK; +} + +static inline bool phy_packet_phy_config_get_gap_count_optimization(u32 quadlet) +{ + return (quadlet & PHY_CONFIG_GAP_COUNT_OPTIMIZATION_MASK) >> PHY_CONFIG_GAP_COUNT_OPTIMIZATION_SHIFT; +} + +static inline void phy_packet_phy_config_set_gap_count_optimization(u32 *quadlet, bool has_gap_count_optimization) +{ + *quadlet &= ~PHY_CONFIG_GAP_COUNT_OPTIMIZATION_MASK; + *quadlet |= (has_gap_count_optimization << PHY_CONFIG_GAP_COUNT_OPTIMIZATION_SHIFT) & PHY_CONFIG_GAP_COUNT_OPTIMIZATION_MASK; +} + +static inline unsigned int phy_packet_phy_config_get_gap_count(u32 quadlet) +{ + return (quadlet & PHY_CONFIG_GAP_COUNT_MASK) >> PHY_CONFIG_GAP_COUNT_SHIFT; +} + +static inline void phy_packet_phy_config_set_gap_count(u32 *quadlet, unsigned int gap_count) +{ + *quadlet &= ~PHY_CONFIG_GAP_COUNT_MASK; + *quadlet |= (gap_count << PHY_CONFIG_GAP_COUNT_SHIFT) & PHY_CONFIG_GAP_COUNT_MASK; +} + +#define PHY_PACKET_PACKET_IDENTIFIER_SELF_ID 2 + +#define SELF_ID_PHY_ID_MASK 0x3f000000 +#define SELF_ID_PHY_ID_SHIFT 24 +#define SELF_ID_EXTENDED_MASK 0x00800000 +#define SELF_ID_EXTENDED_SHIFT 23 +#define SELF_ID_MORE_PACKETS_MASK 0x00000001 +#define SELF_ID_MORE_PACKETS_SHIFT 0 + +#define SELF_ID_ZERO_LINK_ACTIVE_MASK 0x00400000 +#define SELF_ID_ZERO_LINK_ACTIVE_SHIFT 22 +#define SELF_ID_ZERO_GAP_COUNT_MASK 0x003f0000 +#define SELF_ID_ZERO_GAP_COUNT_SHIFT 16 +#define SELF_ID_ZERO_SCODE_MASK 0x0000c000 +#define SELF_ID_ZERO_SCODE_SHIFT 14 +#define SELF_ID_ZERO_CONTENDER_MASK 0x00000800 +#define SELF_ID_ZERO_CONTENDER_SHIFT 11 +#define SELF_ID_ZERO_POWER_CLASS_MASK 0x00000700 +#define SELF_ID_ZERO_POWER_CLASS_SHIFT 8 +#define SELF_ID_ZERO_INITIATED_RESET_MASK 0x00000002 +#define SELF_ID_ZERO_INITIATED_RESET_SHIFT 1 + +#define SELF_ID_EXTENDED_SEQUENCE_MASK 0x00700000 +#define SELF_ID_EXTENDED_SEQUENCE_SHIFT 20 + +#define SELF_ID_PORT_STATUS_MASK 0x3 + +#define SELF_ID_SEQUENCE_MAXIMUM_QUADLET_COUNT 4 + +static inline unsigned int phy_packet_self_id_get_phy_id(u32 quadlet) +{ + return (quadlet & SELF_ID_PHY_ID_MASK) >> SELF_ID_PHY_ID_SHIFT; +} + +static inline void phy_packet_self_id_set_phy_id(u32 *quadlet, unsigned int phy_id) +{ + *quadlet &= ~SELF_ID_PHY_ID_MASK; + *quadlet |= (phy_id << SELF_ID_PHY_ID_SHIFT) & SELF_ID_PHY_ID_MASK; +} + +static inline bool phy_packet_self_id_get_extended(u32 quadlet) +{ + return (quadlet & SELF_ID_EXTENDED_MASK) >> SELF_ID_EXTENDED_SHIFT; +} + +static inline void phy_packet_self_id_set_extended(u32 *quadlet, bool extended) +{ + *quadlet &= ~SELF_ID_EXTENDED_MASK; + *quadlet |= (extended << SELF_ID_EXTENDED_SHIFT) & SELF_ID_EXTENDED_MASK; +} + +static inline bool phy_packet_self_id_zero_get_link_active(u32 quadlet) +{ + return (quadlet & SELF_ID_ZERO_LINK_ACTIVE_MASK) >> SELF_ID_ZERO_LINK_ACTIVE_SHIFT; +} + +static inline void phy_packet_self_id_zero_set_link_active(u32 *quadlet, bool is_active) +{ + *quadlet &= ~SELF_ID_ZERO_LINK_ACTIVE_MASK; + *quadlet |= (is_active << SELF_ID_ZERO_LINK_ACTIVE_SHIFT) & SELF_ID_ZERO_LINK_ACTIVE_MASK; +} + +static inline unsigned int phy_packet_self_id_zero_get_gap_count(u32 quadlet) +{ + return (quadlet & SELF_ID_ZERO_GAP_COUNT_MASK) >> SELF_ID_ZERO_GAP_COUNT_SHIFT; +} + +static inline void phy_packet_self_id_zero_set_gap_count(u32 *quadlet, unsigned int gap_count) +{ + *quadlet &= ~SELF_ID_ZERO_GAP_COUNT_MASK; + *quadlet |= (gap_count << SELF_ID_ZERO_GAP_COUNT_SHIFT) & SELF_ID_ZERO_GAP_COUNT_MASK; +} + +static inline unsigned int phy_packet_self_id_zero_get_scode(u32 quadlet) +{ + return (quadlet & SELF_ID_ZERO_SCODE_MASK) >> SELF_ID_ZERO_SCODE_SHIFT; +} + +static inline void phy_packet_self_id_zero_set_scode(u32 *quadlet, unsigned int speed) +{ + *quadlet &= ~SELF_ID_ZERO_SCODE_MASK; + *quadlet |= (speed << SELF_ID_ZERO_SCODE_SHIFT) & SELF_ID_ZERO_SCODE_MASK; +} + +static inline bool phy_packet_self_id_zero_get_contender(u32 quadlet) +{ + return (quadlet & SELF_ID_ZERO_CONTENDER_MASK) >> SELF_ID_ZERO_CONTENDER_SHIFT; +} + +static inline void phy_packet_self_id_zero_set_contender(u32 *quadlet, bool is_contender) +{ + *quadlet &= ~SELF_ID_ZERO_CONTENDER_MASK; + *quadlet |= (is_contender << SELF_ID_ZERO_CONTENDER_SHIFT) & SELF_ID_ZERO_CONTENDER_MASK; +} + +static inline unsigned int phy_packet_self_id_zero_get_power_class(u32 quadlet) +{ + return (quadlet & SELF_ID_ZERO_POWER_CLASS_MASK) >> SELF_ID_ZERO_POWER_CLASS_SHIFT; +} + +static inline void phy_packet_self_id_zero_set_power_class(u32 *quadlet, unsigned int power_class) +{ + *quadlet &= ~SELF_ID_ZERO_POWER_CLASS_MASK; + *quadlet |= (power_class << SELF_ID_ZERO_POWER_CLASS_SHIFT) & SELF_ID_ZERO_POWER_CLASS_MASK; +} + +static inline bool phy_packet_self_id_zero_get_initiated_reset(u32 quadlet) +{ + return (quadlet & SELF_ID_ZERO_INITIATED_RESET_MASK) >> SELF_ID_ZERO_INITIATED_RESET_SHIFT; +} + +static inline void phy_packet_self_id_zero_set_initiated_reset(u32 *quadlet, bool is_initiated_reset) +{ + *quadlet &= ~SELF_ID_ZERO_INITIATED_RESET_MASK; + *quadlet |= (is_initiated_reset << SELF_ID_ZERO_INITIATED_RESET_SHIFT) & SELF_ID_ZERO_INITIATED_RESET_MASK; +} + +static inline bool phy_packet_self_id_get_more_packets(u32 quadlet) +{ + return (quadlet & SELF_ID_MORE_PACKETS_MASK) >> SELF_ID_MORE_PACKETS_SHIFT; +} + +static inline void phy_packet_self_id_set_more_packets(u32 *quadlet, bool is_more_packets) +{ + *quadlet &= ~SELF_ID_MORE_PACKETS_MASK; + *quadlet |= (is_more_packets << SELF_ID_MORE_PACKETS_SHIFT) & SELF_ID_MORE_PACKETS_MASK; +} + +static inline unsigned int phy_packet_self_id_extended_get_sequence(u32 quadlet) +{ + return (quadlet & SELF_ID_EXTENDED_SEQUENCE_MASK) >> SELF_ID_EXTENDED_SEQUENCE_SHIFT; +} + +static inline void phy_packet_self_id_extended_set_sequence(u32 *quadlet, unsigned int sequence) +{ + *quadlet &= ~SELF_ID_EXTENDED_SEQUENCE_MASK; + *quadlet |= (sequence << SELF_ID_EXTENDED_SHIFT) & SELF_ID_EXTENDED_SEQUENCE_MASK; +} + +struct self_id_sequence_enumerator { + const u32 *cursor; + unsigned int quadlet_count; +}; + +static inline const u32 *self_id_sequence_enumerator_next( + struct self_id_sequence_enumerator *enumerator, unsigned int *quadlet_count) +{ + const u32 *self_id_sequence, *cursor; + u32 quadlet; + unsigned int count; + unsigned int sequence; + + if (enumerator->cursor == NULL || enumerator->quadlet_count == 0) + return ERR_PTR(-ENODATA); + cursor = enumerator->cursor; + count = 1; + + quadlet = *cursor; + sequence = 0; + while (phy_packet_self_id_get_more_packets(quadlet)) { + if (count >= enumerator->quadlet_count || + count >= SELF_ID_SEQUENCE_MAXIMUM_QUADLET_COUNT) + return ERR_PTR(-EPROTO); + ++cursor; + ++count; + quadlet = *cursor; + + if (!phy_packet_self_id_get_extended(quadlet) || + sequence != phy_packet_self_id_extended_get_sequence(quadlet)) + return ERR_PTR(-EPROTO); + ++sequence; + } + + *quadlet_count = count; + self_id_sequence = enumerator->cursor; + + enumerator->cursor += count; + enumerator->quadlet_count -= count; + + return self_id_sequence; +} + +enum phy_packet_self_id_port_status { + PHY_PACKET_SELF_ID_PORT_STATUS_NONE = 0, + PHY_PACKET_SELF_ID_PORT_STATUS_NCONN = 1, + PHY_PACKET_SELF_ID_PORT_STATUS_PARENT = 2, + PHY_PACKET_SELF_ID_PORT_STATUS_CHILD = 3, +}; + +static inline unsigned int self_id_sequence_get_port_capacity(unsigned int quadlet_count) +{ + return quadlet_count * 8 - 5; +} + +static inline enum phy_packet_self_id_port_status self_id_sequence_get_port_status( + const u32 *self_id_sequence, unsigned int quadlet_count, unsigned int port_index) +{ + unsigned int index, shift; + + index = (port_index + 5) / 8; + shift = 16 - ((port_index + 5) % 8) * 2; + + if (index < quadlet_count && index < SELF_ID_SEQUENCE_MAXIMUM_QUADLET_COUNT) + return (self_id_sequence[index] >> shift) & SELF_ID_PORT_STATUS_MASK; + + return PHY_PACKET_SELF_ID_PORT_STATUS_NONE; +} + +static inline void self_id_sequence_set_port_status(u32 *self_id_sequence, unsigned int quadlet_count, + unsigned int port_index, + enum phy_packet_self_id_port_status status) +{ + unsigned int index, shift; + + index = (port_index + 5) / 8; + shift = 16 - ((port_index + 5) % 8) * 2; + + if (index < quadlet_count) { + self_id_sequence[index] &= ~(SELF_ID_PORT_STATUS_MASK << shift); + self_id_sequence[index] |= status << shift; + } +} + +#endif // _FIREWIRE_PHY_PACKET_DEFINITIONS_H diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 4d5054211550..1a19828114cf 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -81,7 +81,8 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " * * - power condition * Set the power condition field in the START STOP UNIT commands sent by - * sd_mod on suspend, resume, and shutdown (if manage_start_stop is on). + * sd_mod on suspend, resume, and shutdown (if manage_system_start_stop or + * manage_runtime_start_stop is on). * Some disks need this to spin down or to resume properly. * * - override internal blacklist @@ -408,7 +409,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request, void *payload, size_t length, void *callback_data) { struct sbp2_logical_unit *lu = callback_data; - struct sbp2_orb *orb; + struct sbp2_orb *orb = NULL, *iter; struct sbp2_status status; unsigned long flags; @@ -433,17 +434,18 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request, /* Lookup the orb corresponding to this status write. */ spin_lock_irqsave(&lu->tgt->lock, flags); - list_for_each_entry(orb, &lu->orb_list, link) { + list_for_each_entry(iter, &lu->orb_list, link) { if (STATUS_GET_ORB_HIGH(status) == 0 && - STATUS_GET_ORB_LOW(status) == orb->request_bus) { - orb->rcode = RCODE_COMPLETE; - list_del(&orb->link); + STATUS_GET_ORB_LOW(status) == iter->request_bus) { + iter->rcode = RCODE_COMPLETE; + list_del(&iter->link); + orb = iter; break; } } spin_unlock_irqrestore(&lu->tgt->lock, flags); - if (&orb->link != &lu->orb_list) { + if (orb) { orb->callback(orb, &status); kref_put(&orb->kref, free_orb); /* orb callback reference */ } else { @@ -1116,7 +1118,7 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, tgt->workarounds = w; } -static struct scsi_host_template scsi_driver_template; +static const struct scsi_host_template scsi_driver_template; static void sbp2_remove(struct fw_unit *unit); static int sbp2_probe(struct fw_unit *unit, const struct ieee1394_device_id *id) @@ -1375,7 +1377,7 @@ static void complete_command_orb(struct sbp2_orb *base_orb, sbp2_unmap_scatterlist(device->card->device, orb); orb->cmd->result = result; - orb->cmd->scsi_done(orb->cmd); + scsi_done(orb->cmd); } static int sbp2_map_scatterlist(struct sbp2_command_orb *orb, @@ -1488,7 +1490,7 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost, return retval; } -static int sbp2_scsi_slave_alloc(struct scsi_device *sdev) +static int sbp2_scsi_sdev_init(struct scsi_device *sdev) { struct sbp2_logical_unit *lu = sdev->hostdata; @@ -1498,26 +1500,24 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev) sdev->allow_restart = 1; - /* - * SBP-2 does not require any alignment, but we set it anyway - * for compatibility with earlier versions of this driver. - */ - blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1); - if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36) sdev->inquiry_len = 36; return 0; } -static int sbp2_scsi_slave_configure(struct scsi_device *sdev) +static int sbp2_scsi_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct sbp2_logical_unit *lu = sdev->hostdata; sdev->use_10_for_rw = 1; - if (sbp2_param_exclusive_login) - sdev->manage_start_stop = 1; + if (sbp2_param_exclusive_login) { + sdev->manage_system_start_stop = 1; + sdev->manage_runtime_start_stop = 1; + sdev->manage_shutdown = 1; + } if (sdev->type == TYPE_ROM) sdev->use_10_for_ms = 1; @@ -1533,7 +1533,7 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev) sdev->start_stop_pwr_cond = 1; if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) - blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512); + lim->max_hw_sectors = 128 * 1024 / 512; return 0; } @@ -1578,24 +1578,26 @@ static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev, static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL); -static struct device_attribute *sbp2_scsi_sysfs_attrs[] = { - &dev_attr_ieee1394_id, +static struct attribute *sbp2_scsi_sysfs_attrs[] = { + &dev_attr_ieee1394_id.attr, NULL }; -static struct scsi_host_template scsi_driver_template = { +ATTRIBUTE_GROUPS(sbp2_scsi_sysfs); + +static const struct scsi_host_template scsi_driver_template = { .module = THIS_MODULE, .name = "SBP-2 IEEE-1394", .proc_name = "sbp2", .queuecommand = sbp2_scsi_queuecommand, - .slave_alloc = sbp2_scsi_slave_alloc, - .slave_configure = sbp2_scsi_slave_configure, + .sdev_init = sbp2_scsi_sdev_init, + .sdev_configure = sbp2_scsi_sdev_configure, .eh_abort_handler = sbp2_scsi_abort, .this_id = -1, .sg_tablesize = SG_ALL, .max_segment_size = SBP2_MAX_SEG_SIZE, .can_queue = 1, - .sdev_attrs = sbp2_scsi_sysfs_attrs, + .sdev_groups = sbp2_scsi_sysfs_groups, }; MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); diff --git a/drivers/firewire/self-id-sequence-helper-test.c b/drivers/firewire/self-id-sequence-helper-test.c new file mode 100644 index 000000000000..eed7a2294e64 --- /dev/null +++ b/drivers/firewire/self-id-sequence-helper-test.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// +// self-id-sequence-helper-test.c - An application of Kunit to test helpers of self ID sequence. +// +// Copyright (c) 2024 Takashi Sakamoto + +#include <kunit/test.h> + +#include "phy-packet-definitions.h" + +static void test_self_id_sequence_enumerator_valid(struct kunit *test) +{ + static const u32 valid_sequences[] = { + 0x00000000, + 0x00000001, 0x00800000, + 0x00000001, 0x00800001, 0x00900000, + 0x00000000, + }; + struct self_id_sequence_enumerator enumerator; + const u32 *entry; + unsigned int quadlet_count; + + enumerator.cursor = valid_sequences; + enumerator.quadlet_count = ARRAY_SIZE(valid_sequences); + + entry = self_id_sequence_enumerator_next(&enumerator, &quadlet_count); + KUNIT_EXPECT_PTR_EQ(test, entry, &valid_sequences[0]); + KUNIT_EXPECT_EQ(test, quadlet_count, 1); + KUNIT_EXPECT_EQ(test, enumerator.quadlet_count, 6); + + entry = self_id_sequence_enumerator_next(&enumerator, &quadlet_count); + KUNIT_EXPECT_PTR_EQ(test, entry, &valid_sequences[1]); + KUNIT_EXPECT_EQ(test, quadlet_count, 2); + KUNIT_EXPECT_EQ(test, enumerator.quadlet_count, 4); + + entry = self_id_sequence_enumerator_next(&enumerator, &quadlet_count); + KUNIT_EXPECT_PTR_EQ(test, entry, &valid_sequences[3]); + KUNIT_EXPECT_EQ(test, quadlet_count, 3); + KUNIT_EXPECT_EQ(test, enumerator.quadlet_count, 1); + + entry = self_id_sequence_enumerator_next(&enumerator, &quadlet_count); + KUNIT_EXPECT_PTR_EQ(test, entry, &valid_sequences[6]); + KUNIT_EXPECT_EQ(test, quadlet_count, 1); + KUNIT_EXPECT_EQ(test, enumerator.quadlet_count, 0); + + entry = self_id_sequence_enumerator_next(&enumerator, &quadlet_count); + KUNIT_EXPECT_EQ(test, PTR_ERR(entry), -ENODATA); +} + +static void test_self_id_sequence_enumerator_invalid(struct kunit *test) +{ + static const u32 invalid_sequences[] = { + 0x00000001, + }; + struct self_id_sequence_enumerator enumerator; + const u32 *entry; + unsigned int count; + + enumerator.cursor = invalid_sequences; + enumerator.quadlet_count = ARRAY_SIZE(invalid_sequences); + + entry = self_id_sequence_enumerator_next(&enumerator, &count); + KUNIT_EXPECT_EQ(test, PTR_ERR(entry), -EPROTO); +} + +static void test_self_id_sequence_get_port_status(struct kunit *test) +{ + static const u32 expected[] = { + 0x000000e5, + 0x00839e79, + 0x0091e79d, + 0x00a279e4, + }; + u32 quadlets [] = { + 0x00000001, + 0x00800001, + 0x00900001, + 0x00a00000, + }; + enum phy_packet_self_id_port_status port_status[28]; + unsigned int port_capacity; + unsigned int port_index; + + KUNIT_ASSERT_EQ(test, ARRAY_SIZE(expected), ARRAY_SIZE(quadlets)); + + // With an extra port. + port_capacity = self_id_sequence_get_port_capacity(ARRAY_SIZE(expected)) + 1; + KUNIT_ASSERT_EQ(test, port_capacity, ARRAY_SIZE(port_status)); + + for (port_index = 0; port_index < port_capacity; ++port_index) { + port_status[port_index] = + self_id_sequence_get_port_status(expected, ARRAY_SIZE(expected), port_index); + self_id_sequence_set_port_status(quadlets, ARRAY_SIZE(quadlets), port_index, + port_status[port_index]); + } + + // Self ID zero. + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[0]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[1]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[2]); + + // Self ID one. + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[3]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[4]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[5]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[6]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[7]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[8]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[9]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[10]); + + // Self ID two. + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[11]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[12]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[13]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[14]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[15]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[16]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[17]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[18]); + + // Self ID three. + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[19]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[20]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[21]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[22]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[23]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[24]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[25]); + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[26]); + + // Our of order. + KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[27]); + + KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected)); +} + +static struct kunit_case self_id_sequence_helper_test_cases[] = { + KUNIT_CASE(test_self_id_sequence_enumerator_valid), + KUNIT_CASE(test_self_id_sequence_enumerator_invalid), + KUNIT_CASE(test_self_id_sequence_get_port_status), + {} +}; + +static struct kunit_suite self_id_sequence_helper_test_suite = { + .name = "self-id-sequence-helper", + .test_cases = self_id_sequence_helper_test_cases, +}; +kunit_test_suite(self_id_sequence_helper_test_suite); + +MODULE_DESCRIPTION("Unit test suite for helpers of self ID sequence"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firewire/uapi-test.c b/drivers/firewire/uapi-test.c new file mode 100644 index 000000000000..bc3f10a2e516 --- /dev/null +++ b/drivers/firewire/uapi-test.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// uapi_test.c - An application of Kunit to check layout of structures exposed to user space for +// FireWire subsystem. +// +// Copyright (c) 2023 Takashi Sakamoto + +#include <kunit/test.h> +#include <linux/firewire-cdev.h> + +// Known issue added at v2.6.27 kernel. +static void structure_layout_event_response(struct kunit *test) +{ +#if defined(CONFIG_X86_32) + // 4 bytes alignment for aggregate type including 8 bytes storage types. + KUNIT_EXPECT_EQ(test, 20, sizeof(struct fw_cdev_event_response)); +#else + // 8 bytes alignment for aggregate type including 8 bytes storage types. + KUNIT_EXPECT_EQ(test, 24, sizeof(struct fw_cdev_event_response)); +#endif + + KUNIT_EXPECT_EQ(test, 0, offsetof(struct fw_cdev_event_response, closure)); + KUNIT_EXPECT_EQ(test, 8, offsetof(struct fw_cdev_event_response, type)); + KUNIT_EXPECT_EQ(test, 12, offsetof(struct fw_cdev_event_response, rcode)); + KUNIT_EXPECT_EQ(test, 16, offsetof(struct fw_cdev_event_response, length)); + KUNIT_EXPECT_EQ(test, 20, offsetof(struct fw_cdev_event_response, data)); +} + +// Added at v6.5. +static void structure_layout_event_request3(struct kunit *test) +{ + KUNIT_EXPECT_EQ(test, 56, sizeof(struct fw_cdev_event_request3)); + + KUNIT_EXPECT_EQ(test, 0, offsetof(struct fw_cdev_event_request3, closure)); + KUNIT_EXPECT_EQ(test, 8, offsetof(struct fw_cdev_event_request3, type)); + KUNIT_EXPECT_EQ(test, 12, offsetof(struct fw_cdev_event_request3, tcode)); + KUNIT_EXPECT_EQ(test, 16, offsetof(struct fw_cdev_event_request3, offset)); + KUNIT_EXPECT_EQ(test, 24, offsetof(struct fw_cdev_event_request3, source_node_id)); + KUNIT_EXPECT_EQ(test, 28, offsetof(struct fw_cdev_event_request3, destination_node_id)); + KUNIT_EXPECT_EQ(test, 32, offsetof(struct fw_cdev_event_request3, card)); + KUNIT_EXPECT_EQ(test, 36, offsetof(struct fw_cdev_event_request3, generation)); + KUNIT_EXPECT_EQ(test, 40, offsetof(struct fw_cdev_event_request3, handle)); + KUNIT_EXPECT_EQ(test, 44, offsetof(struct fw_cdev_event_request3, length)); + KUNIT_EXPECT_EQ(test, 48, offsetof(struct fw_cdev_event_request3, tstamp)); + KUNIT_EXPECT_EQ(test, 56, offsetof(struct fw_cdev_event_request3, data)); +} + +// Added at v6.5. +static void structure_layout_event_response2(struct kunit *test) +{ + KUNIT_EXPECT_EQ(test, 32, sizeof(struct fw_cdev_event_response2)); + + KUNIT_EXPECT_EQ(test, 0, offsetof(struct fw_cdev_event_response2, closure)); + KUNIT_EXPECT_EQ(test, 8, offsetof(struct fw_cdev_event_response2, type)); + KUNIT_EXPECT_EQ(test, 12, offsetof(struct fw_cdev_event_response2, rcode)); + KUNIT_EXPECT_EQ(test, 16, offsetof(struct fw_cdev_event_response2, length)); + KUNIT_EXPECT_EQ(test, 20, offsetof(struct fw_cdev_event_response2, request_tstamp)); + KUNIT_EXPECT_EQ(test, 24, offsetof(struct fw_cdev_event_response2, response_tstamp)); + KUNIT_EXPECT_EQ(test, 32, offsetof(struct fw_cdev_event_response2, data)); +} + +// Added at v6.5. +static void structure_layout_event_phy_packet2(struct kunit *test) +{ + KUNIT_EXPECT_EQ(test, 24, sizeof(struct fw_cdev_event_phy_packet2)); + + KUNIT_EXPECT_EQ(test, 0, offsetof(struct fw_cdev_event_phy_packet2, closure)); + KUNIT_EXPECT_EQ(test, 8, offsetof(struct fw_cdev_event_phy_packet2, type)); + KUNIT_EXPECT_EQ(test, 12, offsetof(struct fw_cdev_event_phy_packet2, rcode)); + KUNIT_EXPECT_EQ(test, 16, offsetof(struct fw_cdev_event_phy_packet2, length)); + KUNIT_EXPECT_EQ(test, 20, offsetof(struct fw_cdev_event_phy_packet2, tstamp)); + KUNIT_EXPECT_EQ(test, 24, offsetof(struct fw_cdev_event_phy_packet2, data)); +} + +static struct kunit_case structure_layout_test_cases[] = { + KUNIT_CASE(structure_layout_event_response), + KUNIT_CASE(structure_layout_event_request3), + KUNIT_CASE(structure_layout_event_response2), + KUNIT_CASE(structure_layout_event_phy_packet2), + {} +}; + +static struct kunit_suite structure_layout_test_suite = { + .name = "firewire-uapi-structure-layout", + .test_cases = structure_layout_test_cases, +}; +kunit_test_suite(structure_layout_test_suite); + +MODULE_DESCRIPTION("FireWire UAPI unit test suite"); +MODULE_LICENSE("GPL"); |
