firewire: core: use spin lock specific to topology map

At present, the operation for read transaction to topology map register is
not protected by any kind of lock primitives. This causes a potential
problem to result in the mixed content of topology map.

This commit adds and uses spin lock specific to topology map.

Link: https://lore.kernel.org/r/20250915234747.915922-4-o-takashi@sakamocchi.jp
Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
This commit is contained in:
Takashi Sakamoto
2025-09-16 08:47:44 +09:00
parent 07c446e35b
commit 7d138cb269
3 changed files with 24 additions and 10 deletions

View File

@@ -435,20 +435,22 @@ static void update_tree(struct fw_card *card, struct fw_node *root)
} }
} }
static void update_topology_map(struct fw_card *card, static void update_topology_map(__be32 *buffer, size_t buffer_size, int root_node_id,
u32 *self_ids, int self_id_count) const u32 *self_ids, int self_id_count)
{ {
int node_count = (card->root_node->node_id & 0x3f) + 1; __be32 *map = buffer;
__be32 *map = card->topology_map; int node_count = (root_node_id & 0x3f) + 1;
memset(map, 0, buffer_size);
*map++ = cpu_to_be32((self_id_count + 2) << 16); *map++ = cpu_to_be32((self_id_count + 2) << 16);
*map++ = cpu_to_be32(be32_to_cpu(card->topology_map[1]) + 1); *map++ = cpu_to_be32(be32_to_cpu(buffer[1]) + 1);
*map++ = cpu_to_be32((node_count << 16) | self_id_count); *map++ = cpu_to_be32((node_count << 16) | self_id_count);
while (self_id_count--) while (self_id_count--)
*map++ = cpu_to_be32p(self_ids++); *map++ = cpu_to_be32p(self_ids++);
fw_compute_block_crc(card->topology_map); fw_compute_block_crc(buffer);
} }
void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
@@ -479,8 +481,6 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
local_node = build_tree(card, self_ids, self_id_count, generation); local_node = build_tree(card, self_ids, self_id_count, generation);
update_topology_map(card, self_ids, self_id_count);
card->color++; card->color++;
if (local_node == NULL) { if (local_node == NULL) {
@@ -493,5 +493,11 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
update_tree(card, local_node); update_tree(card, local_node);
} }
} }
// Just used by transaction layer.
scoped_guard(spinlock, &card->topology_map.lock) {
update_topology_map(card->topology_map.buffer, sizeof(card->topology_map.buffer),
card->root_node->node_id, self_ids, self_id_count);
}
} }
EXPORT_SYMBOL(fw_core_handle_bus_reset); EXPORT_SYMBOL(fw_core_handle_bus_reset);

View File

@@ -1196,7 +1196,11 @@ static void handle_topology_map(struct fw_card *card, struct fw_request *request
} }
start = (offset - topology_map_region.start) / 4; start = (offset - topology_map_region.start) / 4;
memcpy(payload, &card->topology_map[start], length);
// NOTE: This can be without irqsave when we can guarantee that fw_send_request() for local
// destination never runs in any type of IRQ context.
scoped_guard(spinlock_irqsave, &card->topology_map.lock)
memcpy(payload, &card->topology_map.buffer[start], length);
fw_send_response(card, request, RCODE_COMPLETE); fw_send_response(card, request, RCODE_COMPLETE);
} }

View File

@@ -129,7 +129,11 @@ struct fw_card {
bool broadcast_channel_allocated; bool broadcast_channel_allocated;
u32 broadcast_channel; u32 broadcast_channel;
__be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
struct {
__be32 buffer[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
spinlock_t lock;
} topology_map;
__be32 maint_utility_register; __be32 maint_utility_register;