diff options
Diffstat (limited to 'sound/firewire/amdtp-stream.c')
-rw-r--r-- | sound/firewire/amdtp-stream.c | 72 |
1 files changed, 51 insertions, 21 deletions
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c index c9f153f85ae6..7fc51f829ecc 100644 --- a/sound/firewire/amdtp-stream.c +++ b/sound/firewire/amdtp-stream.c @@ -77,6 +77,8 @@ // overrun. Actual device can skip more, then this module stops the packet streaming. #define IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES 5 +static void pcm_period_work(struct work_struct *work); + /** * amdtp_stream_init - initialize an AMDTP stream structure * @s: the AMDTP stream to initialize @@ -105,6 +107,7 @@ int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit, s->flags = flags; s->context = ERR_PTR(-1); mutex_init(&s->mutex); + INIT_WORK(&s->period_work, pcm_period_work); s->packet_index = 0; init_waitqueue_head(&s->ready_wait); @@ -169,6 +172,9 @@ static int apply_constraint_to_size(struct snd_pcm_hw_params *params, step = max(step, amdtp_syt_intervals[i]); } + if (step == 0) + return -EINVAL; + t.min = roundup(s->min, step); t.max = rounddown(s->max, step); t.integer = 1; @@ -347,6 +353,7 @@ EXPORT_SYMBOL(amdtp_stream_get_max_payload); */ void amdtp_stream_pcm_prepare(struct amdtp_stream *s) { + cancel_work_sync(&s->period_work); s->pcm_buffer_pointer = 0; s->pcm_period_pointer = 0; } @@ -611,19 +618,37 @@ static void update_pcm_pointers(struct amdtp_stream *s, // The program in user process should periodically check the status of intermediate // buffer associated to PCM substream to process PCM frames in the buffer, instead // of receiving notification of period elapsed by poll wait. - if (!pcm->runtime->no_period_wakeup) { - if (in_softirq()) { - // In software IRQ context for 1394 OHCI. - snd_pcm_period_elapsed(pcm); - } else { - // In process context of ALSA PCM application under acquired lock of - // PCM substream. - snd_pcm_period_elapsed_under_stream_lock(pcm); - } - } + // + // Use another work item for period elapsed event to prevent the following AB/BA + // deadlock: + // + // thread 1 thread 2 + // ================================= ================================= + // A.work item (process) pcm ioctl (process) + // v v + // process_rx_packets() B.PCM stream lock + // process_tx_packets() v + // v callbacks in snd_pcm_ops + // update_pcm_pointers() v + // snd_pcm_elapsed() fw_iso_context_flush_completions() + // snd_pcm_stream_lock_irqsave() disable_work_sync() + // v v + // wait until release of B wait until A exits + if (!pcm->runtime->no_period_wakeup) + queue_work(system_highpri_wq, &s->period_work); } } +static void pcm_period_work(struct work_struct *work) +{ + struct amdtp_stream *s = container_of(work, struct amdtp_stream, + period_work); + struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); + + if (pcm) + snd_pcm_period_elapsed(pcm); +} + static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params, bool sched_irq) { @@ -1049,8 +1074,15 @@ static void generate_rx_packet_descs(struct amdtp_stream *s, struct pkt_desc *de static inline void cancel_stream(struct amdtp_stream *s) { + struct work_struct *work = current_work(); + s->packet_index = -1; - if (in_softirq()) + + // Detect work items for any isochronous context. The work item for pcm_period_work() + // should be avoided since the call of snd_pcm_period_elapsed() can reach via + // snd_pcm_ops.pointer() under acquiring PCM stream(group) lock and causes dead lock at + // snd_pcm_stop_xrun(). + if (work && work != &s->period_work) amdtp_stream_pcm_abort(s); WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); } @@ -1180,13 +1212,10 @@ static void process_rx_packets(struct fw_iso_context *context, u32 tstamp, size_ (void)fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &curr_cycle_time); for (i = 0; i < packets; ++i) { - struct { - struct fw_iso_packet params; - __be32 header[CIP_HEADER_QUADLETS]; - } template = { {0}, {0} }; + DEFINE_RAW_FLEX(struct fw_iso_packet, template, header, CIP_HEADER_QUADLETS); bool sched_irq = false; - build_it_pkt_header(s, desc->cycle, &template.params, pkt_header_length, + build_it_pkt_header(s, desc->cycle, template, pkt_header_length, desc->data_blocks, desc->data_block_counter, desc->syt, i, curr_cycle_time); @@ -1198,7 +1227,7 @@ static void process_rx_packets(struct fw_iso_context *context, u32 tstamp, size_ } } - if (queue_out_packet(s, &template.params, sched_irq) < 0) { + if (queue_out_packet(s, template, sched_irq) < 0) { cancel_stream(s); return; } @@ -1852,11 +1881,11 @@ unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d, { struct amdtp_stream *irq_target = d->irq_target; - // Process isochronous packets queued till recent isochronous cycle to handle PCM frames. if (irq_target && amdtp_stream_running(irq_target)) { - // In software IRQ context, the call causes dead-lock to disable the tasklet - // synchronously. - if (!in_softirq()) + // The work item to call snd_pcm_period_elapsed() can reach here by the call of + // snd_pcm_ops.pointer(), however less packets would be available then. Therefore + // the following call is just for user process contexts. + if (current_work() != &s->period_work) fw_iso_context_flush_completions(irq_target->context); } @@ -1912,6 +1941,7 @@ static void amdtp_stream_stop(struct amdtp_stream *s) return; } + cancel_work_sync(&s->period_work); fw_iso_context_stop(s->context); fw_iso_context_destroy(s->context); s->context = ERR_PTR(-1); |