summaryrefslogtreecommitdiff
path: root/sound/firewire/amdtp-stream.c
diff options
context:
space:
mode:
Diffstat (limited to 'sound/firewire/amdtp-stream.c')
-rw-r--r--sound/firewire/amdtp-stream.c407
1 files changed, 326 insertions, 81 deletions
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index e50e28f77e74..37d38efb4c87 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <sound/pcm.h>
@@ -52,10 +53,6 @@
#define CIP_FMT_AM 0x10
#define AMDTP_FDF_NO_DATA 0xff
-/* TODO: make these configurable */
-#define INTERRUPT_INTERVAL 16
-#define QUEUE_LENGTH 48
-
// For iso header, tstamp and 2 CIP header.
#define IR_CTX_HEADER_SIZE_CIP 16
// For iso header and tstamp.
@@ -180,6 +177,8 @@ int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
struct snd_pcm_runtime *runtime)
{
struct snd_pcm_hardware *hw = &runtime->hw;
+ unsigned int ctx_header_size;
+ unsigned int maximum_usec_per_period;
int err;
hw->info = SNDRV_PCM_INFO_BATCH |
@@ -200,19 +199,36 @@ int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
hw->period_bytes_max = hw->period_bytes_min * 2048;
hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
- /*
- * Currently firewire-lib processes 16 packets in one software
- * interrupt callback. This equals to 2msec but actually the
- * interval of the interrupts has a jitter.
- * Additionally, even if adding a constraint to fit period size to
- * 2msec, actual calculated frames per period doesn't equal to 2msec,
- * depending on sampling rate.
- * Anyway, the interval to call snd_pcm_period_elapsed() cannot 2msec.
- * Here let us use 5msec for safe period interrupt.
- */
+ // Linux driver for 1394 OHCI controller voluntarily flushes isoc
+ // context when total size of accumulated context header reaches
+ // PAGE_SIZE. This kicks tasklet for the isoc context and brings
+ // callback in the middle of scheduled interrupts.
+ // Although AMDTP streams in the same domain use the same events per
+ // IRQ, use the largest size of context header between IT/IR contexts.
+ // Here, use the value of context header in IR context is for both
+ // contexts.
+ if (!(s->flags & CIP_NO_HEADER))
+ ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
+ else
+ ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
+ maximum_usec_per_period = USEC_PER_SEC * PAGE_SIZE /
+ CYCLES_PER_SECOND / ctx_header_size;
+
+ // In IEC 61883-6, one isoc packet can transfer events up to the value
+ // of syt interval. This comes from the interval of isoc cycle. As 1394
+ // OHCI controller can generate hardware IRQ per isoc packet, the
+ // interval is 125 usec.
+ // However, there are two ways of transmission in IEC 61883-6; blocking
+ // and non-blocking modes. In blocking mode, the sequence of isoc packet
+ // includes 'empty' or 'NODATA' packets which include no event. In
+ // non-blocking mode, the number of events per packet is variable up to
+ // the syt interval.
+ // Due to the above protocol design, the minimum PCM frames per
+ // interrupt should be double of the value of syt interval, thus it is
+ // 250 usec.
err = snd_pcm_hw_constraint_minmax(runtime,
SNDRV_PCM_HW_PARAM_PERIOD_TIME,
- 5000, UINT_MAX);
+ 250, maximum_usec_per_period);
if (err < 0)
goto end;
@@ -436,11 +452,12 @@ static void pcm_period_tasklet(unsigned long data)
snd_pcm_period_elapsed(pcm);
}
-static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params)
+static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params,
+ bool sched_irq)
{
int err;
- params->interrupt = IS_ALIGNED(s->packet_index + 1, INTERRUPT_INTERVAL);
+ params->interrupt = sched_irq;
params->tag = s->tag;
params->sy = 0;
@@ -451,18 +468,18 @@ static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params)
goto end;
}
- if (++s->packet_index >= QUEUE_LENGTH)
+ if (++s->packet_index >= s->queue_size)
s->packet_index = 0;
end:
return err;
}
static inline int queue_out_packet(struct amdtp_stream *s,
- struct fw_iso_packet *params)
+ struct fw_iso_packet *params, bool sched_irq)
{
params->skip =
!!(params->header_length == 0 && params->payload_length == 0);
- return queue_packet(s, params);
+ return queue_packet(s, params, sched_irq);
}
static inline int queue_in_packet(struct amdtp_stream *s,
@@ -472,7 +489,7 @@ static inline int queue_in_packet(struct amdtp_stream *s,
params->header_length = s->ctx_data.tx.ctx_header_size;
params->payload_length = s->ctx_data.tx.max_ctx_payload_length;
params->skip = false;
- return queue_packet(s, params);
+ return queue_packet(s, params, false);
}
static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2],
@@ -669,13 +686,14 @@ static inline u32 increment_cycle_count(u32 cycle, unsigned int addend)
}
// Align to actual cycle count for the packet which is going to be scheduled.
-// This module queued the same number of isochronous cycle as QUEUE_LENGTH to
-// skip isochronous cycle, therefore it's OK to just increment the cycle by
-// QUEUE_LENGTH for scheduled cycle.
-static inline u32 compute_it_cycle(const __be32 ctx_header_tstamp)
+// This module queued the same number of isochronous cycle as the size of queue
+// to kip isochronous cycle, therefore it's OK to just increment the cycle by
+// the size of queue for scheduled cycle.
+static inline u32 compute_it_cycle(const __be32 ctx_header_tstamp,
+ unsigned int queue_size)
{
u32 cycle = compute_cycle_count(ctx_header_tstamp);
- return increment_cycle_count(cycle, QUEUE_LENGTH);
+ return increment_cycle_count(cycle, queue_size);
}
static int generate_device_pkt_descs(struct amdtp_stream *s,
@@ -689,7 +707,7 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
for (i = 0; i < packets; ++i) {
struct pkt_desc *desc = descs + i;
- unsigned int index = (s->packet_index + i) % QUEUE_LENGTH;
+ unsigned int index = (s->packet_index + i) % s->queue_size;
unsigned int cycle;
unsigned int payload_length;
unsigned int data_blocks;
@@ -730,9 +748,9 @@ static void generate_ideal_pkt_descs(struct amdtp_stream *s,
for (i = 0; i < packets; ++i) {
struct pkt_desc *desc = descs + i;
- unsigned int index = (s->packet_index + i) % QUEUE_LENGTH;
+ unsigned int index = (s->packet_index + i) % s->queue_size;
- desc->cycle = compute_it_cycle(*ctx_header);
+ desc->cycle = compute_it_cycle(*ctx_header, s->queue_size);
desc->syt = calculate_syt(s, desc->cycle);
desc->data_blocks = calculate_data_blocks(s, desc->syt);
@@ -773,22 +791,40 @@ static void process_ctx_payloads(struct amdtp_stream *s,
update_pcm_pointers(s, pcm, pcm_frames);
}
+static void amdtp_stream_master_callback(struct fw_iso_context *context,
+ u32 tstamp, size_t header_length,
+ void *header, void *private_data);
+
+static void amdtp_stream_master_first_callback(struct fw_iso_context *context,
+ u32 tstamp, size_t header_length,
+ void *header, void *private_data);
+
static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
size_t header_length, void *header,
void *private_data)
{
struct amdtp_stream *s = private_data;
const __be32 *ctx_header = header;
- unsigned int packets = header_length / sizeof(*ctx_header);
+ unsigned int events_per_period = s->ctx_data.rx.events_per_period;
+ unsigned int event_count = s->ctx_data.rx.event_count;
+ unsigned int packets;
+ bool is_irq_target;
int i;
if (s->packet_index < 0)
return;
+ // Calculate the number of packets in buffer and check XRUN.
+ packets = header_length / sizeof(*ctx_header);
+
generate_ideal_pkt_descs(s, s->pkt_descs, ctx_header, packets);
process_ctx_payloads(s, s->pkt_descs, packets);
+ is_irq_target =
+ !!(context->callback.sc == amdtp_stream_master_callback ||
+ context->callback.sc == amdtp_stream_master_first_callback);
+
for (i = 0; i < packets; ++i) {
const struct pkt_desc *desc = s->pkt_descs + i;
unsigned int syt;
@@ -796,6 +832,7 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
struct fw_iso_packet params;
__be32 header[IT_PKT_HEADER_SIZE_CIP / sizeof(__be32)];
} template = { {0}, {0} };
+ bool sched_irq = false;
if (s->ctx_data.rx.syt_override < 0)
syt = desc->syt;
@@ -806,13 +843,21 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
desc->data_blocks, desc->data_block_counter,
syt, i);
- if (queue_out_packet(s, &template.params) < 0) {
+ if (is_irq_target) {
+ event_count += desc->data_blocks;
+ if (event_count >= events_per_period) {
+ event_count -= events_per_period;
+ sched_irq = true;
+ }
+ }
+
+ if (queue_out_packet(s, &template.params, sched_irq) < 0) {
cancel_stream(s);
return;
}
}
- fw_iso_context_queue_flush(s->context);
+ s->ctx_data.rx.event_count = event_count;
}
static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
@@ -820,15 +865,15 @@ static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
void *private_data)
{
struct amdtp_stream *s = private_data;
- unsigned int packets;
__be32 *ctx_header = header;
+ unsigned int packets;
int i;
int err;
if (s->packet_index < 0)
return;
- // The number of packets in buffer.
+ // Calculate the number of packets in buffer and check XRUN.
packets = header_length / s->ctx_data.tx.ctx_header_size;
err = generate_device_pkt_descs(s, s->pkt_descs, ctx_header, packets);
@@ -849,11 +894,40 @@ static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
return;
}
}
+}
+
+static void amdtp_stream_master_callback(struct fw_iso_context *context,
+ u32 tstamp, size_t header_length,
+ void *header, void *private_data)
+{
+ struct amdtp_domain *d = private_data;
+ struct amdtp_stream *irq_target = d->irq_target;
+ struct amdtp_stream *s;
+
+ out_stream_callback(context, tstamp, header_length, header, irq_target);
+ if (amdtp_streaming_error(irq_target))
+ goto error;
+
+ list_for_each_entry(s, &d->streams, list) {
+ if (s != irq_target && amdtp_stream_running(s)) {
+ fw_iso_context_flush_completions(s->context);
+ if (amdtp_streaming_error(s))
+ goto error;
+ }
+ }
- fw_iso_context_queue_flush(s->context);
+ return;
+error:
+ if (amdtp_stream_running(irq_target))
+ cancel_stream(irq_target);
+
+ list_for_each_entry(s, &d->streams, list) {
+ if (amdtp_stream_running(s))
+ cancel_stream(s);
+ }
}
-/* this is executed one time */
+// this is executed one time.
static void amdtp_stream_first_callback(struct fw_iso_context *context,
u32 tstamp, size_t header_length,
void *header, void *private_data)
@@ -874,7 +948,7 @@ static void amdtp_stream_first_callback(struct fw_iso_context *context,
context->callback.sc = in_stream_callback;
} else {
- cycle = compute_it_cycle(*ctx_header);
+ cycle = compute_it_cycle(*ctx_header, s->queue_size);
context->callback.sc = out_stream_callback;
}
@@ -884,17 +958,42 @@ static void amdtp_stream_first_callback(struct fw_iso_context *context,
context->callback.sc(context, tstamp, header_length, header, s);
}
+static void amdtp_stream_master_first_callback(struct fw_iso_context *context,
+ u32 tstamp, size_t header_length,
+ void *header, void *private_data)
+{
+ struct amdtp_domain *d = private_data;
+ struct amdtp_stream *s = d->irq_target;
+ const __be32 *ctx_header = header;
+
+ s->callbacked = true;
+ wake_up(&s->callback_wait);
+
+ s->start_cycle = compute_it_cycle(*ctx_header, s->queue_size);
+
+ context->callback.sc = amdtp_stream_master_callback;
+
+ context->callback.sc(context, tstamp, header_length, header, d);
+}
+
/**
* amdtp_stream_start - start transferring packets
* @s: the AMDTP stream to start
* @channel: the isochronous channel on the bus
* @speed: firewire speed code
+ * @d: the AMDTP domain to which the AMDTP stream belongs
+ * @is_irq_target: whether isoc context for the AMDTP stream is used to generate
+ * hardware IRQ.
+ * @start_cycle: the isochronous cycle to start the context. Start immediately
+ * if negative value is given.
*
* The stream cannot be started until it has been configured with
* amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
* device can be started.
*/
-static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
+static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
+ struct amdtp_domain *d, bool is_irq_target,
+ int start_cycle)
{
static const struct {
unsigned int data_block;
@@ -908,10 +1007,15 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
[CIP_SFC_88200] = { 0, 67 },
[CIP_SFC_176400] = { 0, 67 },
};
+ unsigned int events_per_buffer = d->events_per_buffer;
+ unsigned int events_per_period = d->events_per_period;
+ unsigned int idle_irq_interval;
unsigned int ctx_header_size;
unsigned int max_ctx_payload_size;
enum dma_data_direction dir;
int type, tag, err;
+ fw_iso_callback_t ctx_cb;
+ void *ctx_data;
mutex_lock(&s->mutex);
@@ -922,6 +1026,12 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
}
if (s->direction == AMDTP_IN_STREAM) {
+ // NOTE: IT context should be used for constant IRQ.
+ if (is_irq_target) {
+ err = -EINVAL;
+ goto err_unlock;
+ }
+
s->data_block_counter = UINT_MAX;
} else {
entry = &initial_state[s->sfc];
@@ -953,14 +1063,37 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
max_ctx_payload_size -= IT_PKT_HEADER_SIZE_CIP;
}
- err = iso_packets_buffer_init(&s->buffer, s->unit, QUEUE_LENGTH,
+ // This is a case that AMDTP streams in domain run just for MIDI
+ // substream. Use the number of events equivalent to 10 msec as
+ // interval of hardware IRQ.
+ if (events_per_period == 0)
+ events_per_period = amdtp_rate_table[s->sfc] / 100;
+ if (events_per_buffer == 0)
+ events_per_buffer = events_per_period * 3;
+
+ idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period,
+ amdtp_rate_table[s->sfc]);
+ s->queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer,
+ amdtp_rate_table[s->sfc]);
+
+ err = iso_packets_buffer_init(&s->buffer, s->unit, s->queue_size,
max_ctx_payload_size, dir);
if (err < 0)
goto err_unlock;
+ if (is_irq_target) {
+ s->ctx_data.rx.events_per_period = events_per_period;
+ s->ctx_data.rx.event_count = 0;
+ ctx_cb = amdtp_stream_master_first_callback;
+ ctx_data = d;
+ } else {
+ ctx_cb = amdtp_stream_first_callback;
+ ctx_data = s;
+ }
+
s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
type, channel, speed, ctx_header_size,
- amdtp_stream_first_callback, s);
+ ctx_cb, ctx_data);
if (IS_ERR(s->context)) {
err = PTR_ERR(s->context);
if (err == -EBUSY)
@@ -981,7 +1114,7 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
else
s->tag = TAG_CIP;
- s->pkt_descs = kcalloc(INTERRUPT_INTERVAL, sizeof(*s->pkt_descs),
+ s->pkt_descs = kcalloc(s->queue_size, sizeof(*s->pkt_descs),
GFP_KERNEL);
if (!s->pkt_descs) {
err = -ENOMEM;
@@ -991,12 +1124,21 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
s->packet_index = 0;
do {
struct fw_iso_packet params;
+
if (s->direction == AMDTP_IN_STREAM) {
err = queue_in_packet(s, &params);
} else {
+ bool sched_irq = false;
+
params.header_length = 0;
params.payload_length = 0;
- err = queue_out_packet(s, &params);
+
+ if (is_irq_target) {
+ sched_irq = !((s->packet_index + 1) %
+ idle_irq_interval);
+ }
+
+ err = queue_out_packet(s, &params, sched_irq);
}
if (err < 0)
goto err_pkt_descs;
@@ -1008,7 +1150,7 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
tag |= FW_ISO_CONTEXT_MATCH_TAG0;
s->callbacked = false;
- err = fw_iso_context_start(s->context, -1, 0, tag);
+ err = fw_iso_context_start(s->context, start_cycle, 0, tag);
if (err < 0)
goto err_pkt_descs;
@@ -1029,54 +1171,69 @@ err_unlock:
}
/**
- * amdtp_stream_pcm_pointer - get the PCM buffer position
+ * amdtp_domain_stream_pcm_pointer - get the PCM buffer position
+ * @d: the AMDTP domain.
* @s: the AMDTP stream that transports the PCM data
*
* Returns the current buffer position, in frames.
*/
-unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s)
+unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
+ struct amdtp_stream *s)
{
- /*
- * This function is called in software IRQ context of period_tasklet or
- * process context.
- *
- * When the software IRQ context was scheduled by software IRQ context
- * of IR/IT contexts, queued packets were already handled. Therefore,
- * no need to flush the queue in buffer anymore.
- *
- * When the process context reach here, some packets will be already
- * queued in the buffer. These packets should be handled immediately
- * to keep better granularity of PCM pointer.
- *
- * Later, the process context will sometimes schedules software IRQ
- * context of the period_tasklet. Then, no need to flush the queue by
- * the same reason as described for IR/IT contexts.
- */
- if (!in_interrupt() && amdtp_stream_running(s))
- fw_iso_context_flush_completions(s->context);
+ struct amdtp_stream *irq_target = d->irq_target;
+
+ if (irq_target && amdtp_stream_running(irq_target)) {
+ // This function is called in software IRQ context of
+ // period_tasklet or process context.
+ //
+ // When the software IRQ context was scheduled by software IRQ
+ // context of IT contexts, queued packets were already handled.
+ // Therefore, no need to flush the queue in buffer furthermore.
+ //
+ // When the process context reach here, some packets will be
+ // already queued in the buffer. These packets should be handled
+ // immediately to keep better granularity of PCM pointer.
+ //
+ // Later, the process context will sometimes schedules software
+ // IRQ context of the period_tasklet. Then, no need to flush the
+ // queue by the same reason as described in the above
+ if (!in_interrupt()) {
+ // Queued packet should be processed without any kernel
+ // preemption to keep latency against bus cycle.
+ preempt_disable();
+ fw_iso_context_flush_completions(irq_target->context);
+ preempt_enable();
+ }
+ }
return READ_ONCE(s->pcm_buffer_pointer);
}
-EXPORT_SYMBOL(amdtp_stream_pcm_pointer);
+EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer);
/**
- * amdtp_stream_pcm_ack - acknowledge queued PCM frames
+ * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames
+ * @d: the AMDTP domain.
* @s: the AMDTP stream that transfers the PCM frames
*
* Returns zero always.
*/
-int amdtp_stream_pcm_ack(struct amdtp_stream *s)
+int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s)
{
- /*
- * Process isochronous packets for recent isochronous cycle to handle
- * queued PCM frames.
- */
- if (amdtp_stream_running(s))
- fw_iso_context_flush_completions(s->context);
+ struct amdtp_stream *irq_target = d->irq_target;
+
+ // Process isochronous packets for recent isochronous cycle to handle
+ // queued PCM frames.
+ if (irq_target && amdtp_stream_running(irq_target)) {
+ // Queued packet should be processed without any kernel
+ // preemption to keep latency against bus cycle.
+ preempt_disable();
+ fw_iso_context_flush_completions(irq_target->context);
+ preempt_enable();
+ }
return 0;
}
-EXPORT_SYMBOL(amdtp_stream_pcm_ack);
+EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack);
/**
* amdtp_stream_update - update the stream after a bus reset
@@ -1143,6 +1300,8 @@ int amdtp_domain_init(struct amdtp_domain *d)
{
INIT_LIST_HEAD(&d->streams);
+ d->events_per_period = 0;
+
return 0;
}
EXPORT_SYMBOL_GPL(amdtp_domain_init);
@@ -1184,26 +1343,105 @@ int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s,
}
EXPORT_SYMBOL_GPL(amdtp_domain_add_stream);
+static int get_current_cycle_time(struct fw_card *fw_card, int *cur_cycle)
+{
+ int generation;
+ int rcode;
+ __be32 reg;
+ u32 data;
+
+ // This is a request to local 1394 OHCI controller and expected to
+ // complete without any event waiting.
+ generation = fw_card->generation;
+ smp_rmb(); // node_id vs. generation.
+ rcode = fw_run_transaction(fw_card, TCODE_READ_QUADLET_REQUEST,
+ fw_card->node_id, generation, SCODE_100,
+ CSR_REGISTER_BASE + CSR_CYCLE_TIME,
+ &reg, sizeof(reg));
+ if (rcode != RCODE_COMPLETE)
+ return -EIO;
+
+ data = be32_to_cpu(reg);
+ *cur_cycle = data >> 12;
+
+ return 0;
+}
+
/**
* amdtp_domain_start - start sending packets for isoc context in the domain.
* @d: the AMDTP domain.
+ * @ir_delay_cycle: the cycle delay to start all IR contexts.
*/
-int amdtp_domain_start(struct amdtp_domain *d)
+int amdtp_domain_start(struct amdtp_domain *d, unsigned int ir_delay_cycle)
{
struct amdtp_stream *s;
- int err = 0;
+ int cycle;
+ int err;
+ // Select an IT context as IRQ target.
list_for_each_entry(s, &d->streams, list) {
- err = amdtp_stream_start(s, s->channel, s->speed);
- if (err < 0)
+ if (s->direction == AMDTP_OUT_STREAM)
break;
}
+ if (!s)
+ return -ENXIO;
+ d->irq_target = s;
- if (err < 0) {
- list_for_each_entry(s, &d->streams, list)
- amdtp_stream_stop(s);
+ if (ir_delay_cycle > 0) {
+ struct fw_card *fw_card = fw_parent_device(s->unit)->card;
+
+ err = get_current_cycle_time(fw_card, &cycle);
+ if (err < 0)
+ return err;
+
+ // No need to care overflow in cycle field because of enough
+ // width.
+ cycle += ir_delay_cycle;
+
+ // Round up to sec field.
+ if ((cycle & 0x00001fff) >= CYCLES_PER_SECOND) {
+ unsigned int sec;
+
+ // The sec field can overflow.
+ sec = (cycle & 0xffffe000) >> 13;
+ cycle = (++sec << 13) |
+ ((cycle & 0x00001fff) / CYCLES_PER_SECOND);
+ }
+
+ // In OHCI 1394 specification, lower 2 bits are available for
+ // sec field.
+ cycle &= 0x00007fff;
+ } else {
+ cycle = -1;
+ }
+
+ list_for_each_entry(s, &d->streams, list) {
+ int cycle_match;
+
+ if (s->direction == AMDTP_IN_STREAM) {
+ cycle_match = cycle;
+ } else {
+ // IT context starts immediately.
+ cycle_match = -1;
+ }
+
+ if (s != d->irq_target) {
+ err = amdtp_stream_start(s, s->channel, s->speed, d,
+ false, cycle_match);
+ if (err < 0)
+ goto error;
+ }
}
+ s = d->irq_target;
+ err = amdtp_stream_start(s, s->channel, s->speed, d, true, -1);
+ if (err < 0)
+ goto error;
+
+ return 0;
+error:
+ list_for_each_entry(s, &d->streams, list)
+ amdtp_stream_stop(s);
return err;
}
EXPORT_SYMBOL_GPL(amdtp_domain_start);
@@ -1216,10 +1454,17 @@ void amdtp_domain_stop(struct amdtp_domain *d)
{
struct amdtp_stream *s, *next;
+ if (d->irq_target)
+ amdtp_stream_stop(d->irq_target);
+
list_for_each_entry_safe(s, next, &d->streams, list) {
list_del(&s->list);
- amdtp_stream_stop(s);
+ if (s != d->irq_target)
+ amdtp_stream_stop(s);
}
+
+ d->events_per_period = 0;
+ d->irq_target = NULL;
}
EXPORT_SYMBOL_GPL(amdtp_domain_stop);