1 // SPDX-License-Identifier: GPL-2.0-only
3 * Audio and Music Data Transmission Protocol (IEC 61883-6) streams
4 * with Common Isochronous Packet (IEC 61883-1) headers
6 * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/firewire.h>
12 #include <linux/firewire-constants.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <sound/pcm.h>
16 #include <sound/pcm_params.h>
17 #include "amdtp-stream.h"
19 #define TICKS_PER_CYCLE 3072
20 #define CYCLES_PER_SECOND 8000
21 #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
23 #define OHCI_SECOND_MODULUS 8
25 /* Always support Linux tracing subsystem. */
26 #define CREATE_TRACE_POINTS
27 #include "amdtp-stream-trace.h"
29 #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */
31 /* isochronous header parameters */
32 #define ISO_DATA_LENGTH_SHIFT 16
33 #define TAG_NO_CIP_HEADER 0
36 // Common Isochronous Packet (CIP) header parameters. Use two quadlets CIP header when supported.
37 #define CIP_HEADER_QUADLETS 2
38 #define CIP_EOH_SHIFT 31
39 #define CIP_EOH (1u << CIP_EOH_SHIFT)
40 #define CIP_EOH_MASK 0x80000000
41 #define CIP_SID_SHIFT 24
42 #define CIP_SID_MASK 0x3f000000
43 #define CIP_DBS_MASK 0x00ff0000
44 #define CIP_DBS_SHIFT 16
45 #define CIP_SPH_MASK 0x00000400
46 #define CIP_SPH_SHIFT 10
47 #define CIP_DBC_MASK 0x000000ff
48 #define CIP_FMT_SHIFT 24
49 #define CIP_FMT_MASK 0x3f000000
50 #define CIP_FDF_MASK 0x00ff0000
51 #define CIP_FDF_SHIFT 16
52 #define CIP_FDF_NO_DATA 0xff
53 #define CIP_SYT_MASK 0x0000ffff
54 #define CIP_SYT_NO_INFO 0xffff
55 #define CIP_SYT_CYCLE_MODULUS 16
56 #define CIP_NO_DATA ((CIP_FDF_NO_DATA << CIP_FDF_SHIFT) | CIP_SYT_NO_INFO)
58 #define CIP_HEADER_SIZE (sizeof(__be32) * CIP_HEADER_QUADLETS)
60 /* Audio and Music transfer protocol specific parameters */
61 #define CIP_FMT_AM 0x10
62 #define AMDTP_FDF_NO_DATA 0xff
64 // For iso header and tstamp.
65 #define IR_CTX_HEADER_DEFAULT_QUADLETS 2
67 #define IR_CTX_HEADER_SIZE_NO_CIP (sizeof(__be32) * IR_CTX_HEADER_DEFAULT_QUADLETS)
68 // Add two quadlets CIP header.
69 #define IR_CTX_HEADER_SIZE_CIP (IR_CTX_HEADER_SIZE_NO_CIP + CIP_HEADER_SIZE)
70 #define HEADER_TSTAMP_MASK 0x0000ffff
72 #define IT_PKT_HEADER_SIZE_CIP CIP_HEADER_SIZE
73 #define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing.
75 // The initial firmware of OXFW970 can postpone transmission of packet during finishing
76 // asynchronous transaction. This module accepts 5 cycles to skip as maximum to avoid buffer
77 // overrun. Actual device can skip more, then this module stops the packet streaming.
78 #define IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES 5
81 * amdtp_stream_init - initialize an AMDTP stream structure
82 * @s: the AMDTP stream to initialize
83 * @unit: the target of the stream
84 * @dir: the direction of stream
85 * @flags: the details of the streaming protocol consist of cip_flags enumeration-constants.
86 * @fmt: the value of fmt field in CIP header
87 * @process_ctx_payloads: callback handler to process payloads of isoc context
88 * @protocol_size: the size to allocate newly for protocol
90 int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
91 enum amdtp_stream_direction dir, unsigned int flags,
93 amdtp_stream_process_ctx_payloads_t process_ctx_payloads,
94 unsigned int protocol_size)
96 if (process_ctx_payloads == NULL)
99 s->protocol = kzalloc(protocol_size, GFP_KERNEL);
106 s->context = ERR_PTR(-1);
107 mutex_init(&s->mutex);
110 init_waitqueue_head(&s->ready_wait);
113 s->process_ctx_payloads = process_ctx_payloads;
117 EXPORT_SYMBOL(amdtp_stream_init);
120 * amdtp_stream_destroy - free stream resources
121 * @s: the AMDTP stream to destroy
123 void amdtp_stream_destroy(struct amdtp_stream *s)
125 /* Not initialized. */
126 if (s->protocol == NULL)
129 WARN_ON(amdtp_stream_running(s));
131 mutex_destroy(&s->mutex);
133 EXPORT_SYMBOL(amdtp_stream_destroy);
135 const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = {
139 [CIP_SFC_88200] = 16,
140 [CIP_SFC_96000] = 16,
141 [CIP_SFC_176400] = 32,
142 [CIP_SFC_192000] = 32,
144 EXPORT_SYMBOL(amdtp_syt_intervals);
146 const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = {
147 [CIP_SFC_32000] = 32000,
148 [CIP_SFC_44100] = 44100,
149 [CIP_SFC_48000] = 48000,
150 [CIP_SFC_88200] = 88200,
151 [CIP_SFC_96000] = 96000,
152 [CIP_SFC_176400] = 176400,
153 [CIP_SFC_192000] = 192000,
155 EXPORT_SYMBOL(amdtp_rate_table);
157 static int apply_constraint_to_size(struct snd_pcm_hw_params *params,
158 struct snd_pcm_hw_rule *rule)
160 struct snd_interval *s = hw_param_interval(params, rule->var);
161 const struct snd_interval *r =
162 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
163 struct snd_interval t = {0};
164 unsigned int step = 0;
167 for (i = 0; i < CIP_SFC_COUNT; ++i) {
168 if (snd_interval_test(r, amdtp_rate_table[i]))
169 step = max(step, amdtp_syt_intervals[i]);
172 t.min = roundup(s->min, step);
173 t.max = rounddown(s->max, step);
176 return snd_interval_refine(s, &t);
180 * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
181 * @s: the AMDTP stream, which must be initialized.
182 * @runtime: the PCM substream runtime
184 int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
185 struct snd_pcm_runtime *runtime)
187 struct snd_pcm_hardware *hw = &runtime->hw;
188 unsigned int ctx_header_size;
189 unsigned int maximum_usec_per_period;
192 hw->info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
193 SNDRV_PCM_INFO_INTERLEAVED |
194 SNDRV_PCM_INFO_JOINT_DUPLEX |
195 SNDRV_PCM_INFO_MMAP |
196 SNDRV_PCM_INFO_MMAP_VALID |
197 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP;
200 hw->periods_max = UINT_MAX;
202 /* bytes for a frame */
203 hw->period_bytes_min = 4 * hw->channels_max;
205 /* Just to prevent from allocating much pages. */
206 hw->period_bytes_max = hw->period_bytes_min * 2048;
207 hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
209 // Linux driver for 1394 OHCI controller voluntarily flushes isoc
210 // context when total size of accumulated context header reaches
211 // PAGE_SIZE. This kicks work for the isoc context and brings
212 // callback in the middle of scheduled interrupts.
213 // Although AMDTP streams in the same domain use the same events per
214 // IRQ, use the largest size of context header between IT/IR contexts.
215 // Here, use the value of context header in IR context is for both
217 if (!(s->flags & CIP_NO_HEADER))
218 ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
220 ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
221 maximum_usec_per_period = USEC_PER_SEC * PAGE_SIZE /
222 CYCLES_PER_SECOND / ctx_header_size;
224 // In IEC 61883-6, one isoc packet can transfer events up to the value
225 // of syt interval. This comes from the interval of isoc cycle. As 1394
226 // OHCI controller can generate hardware IRQ per isoc packet, the
227 // interval is 125 usec.
228 // However, there are two ways of transmission in IEC 61883-6; blocking
229 // and non-blocking modes. In blocking mode, the sequence of isoc packet
230 // includes 'empty' or 'NODATA' packets which include no event. In
231 // non-blocking mode, the number of events per packet is variable up to
233 // Due to the above protocol design, the minimum PCM frames per
234 // interrupt should be double of the value of syt interval, thus it is
236 err = snd_pcm_hw_constraint_minmax(runtime,
237 SNDRV_PCM_HW_PARAM_PERIOD_TIME,
238 250, maximum_usec_per_period);
242 /* Non-Blocking stream has no more constraints */
243 if (!(s->flags & CIP_BLOCKING))
247 * One AMDTP packet can include some frames. In blocking mode, the
248 * number equals to SYT_INTERVAL. So the number is 8, 16 or 32,
249 * depending on its sampling rate. For accurate period interrupt, it's
250 * preferrable to align period/buffer sizes to current SYT_INTERVAL.
252 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
253 apply_constraint_to_size, NULL,
254 SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
255 SNDRV_PCM_HW_PARAM_RATE, -1);
258 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
259 apply_constraint_to_size, NULL,
260 SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
261 SNDRV_PCM_HW_PARAM_RATE, -1);
267 EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints);
270 * amdtp_stream_set_parameters - set stream parameters
271 * @s: the AMDTP stream to configure
272 * @rate: the sample rate
273 * @data_block_quadlets: the size of a data block in quadlet unit
275 * The parameters must be set before the stream is started, and must not be
276 * changed while the stream is running.
278 int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate,
279 unsigned int data_block_quadlets)
283 for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) {
284 if (amdtp_rate_table[sfc] == rate)
287 if (sfc == ARRAY_SIZE(amdtp_rate_table))
291 s->data_block_quadlets = data_block_quadlets;
292 s->syt_interval = amdtp_syt_intervals[sfc];
294 // default buffering in the device.
295 s->transfer_delay = TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
297 // additional buffering needed to adjust for no-data packets.
298 if (s->flags & CIP_BLOCKING)
299 s->transfer_delay += TICKS_PER_SECOND * s->syt_interval / rate;
303 EXPORT_SYMBOL(amdtp_stream_set_parameters);
305 // The CIP header is processed in context header apart from context payload.
306 static int amdtp_stream_get_max_ctx_payload_size(struct amdtp_stream *s)
308 unsigned int multiplier;
310 if (s->flags & CIP_JUMBO_PAYLOAD)
311 multiplier = IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES;
315 return s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier;
319 * amdtp_stream_get_max_payload - get the stream's packet size
320 * @s: the AMDTP stream
322 * This function must not be called before the stream has been configured
323 * with amdtp_stream_set_parameters().
325 unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s)
327 unsigned int cip_header_size;
329 if (!(s->flags & CIP_NO_HEADER))
330 cip_header_size = CIP_HEADER_SIZE;
334 return cip_header_size + amdtp_stream_get_max_ctx_payload_size(s);
336 EXPORT_SYMBOL(amdtp_stream_get_max_payload);
339 * amdtp_stream_pcm_prepare - prepare PCM device for running
340 * @s: the AMDTP stream
342 * This function should be called from the PCM device's .prepare callback.
344 void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
346 s->pcm_buffer_pointer = 0;
347 s->pcm_period_pointer = 0;
349 EXPORT_SYMBOL(amdtp_stream_pcm_prepare);
351 static void pool_blocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs,
352 unsigned int size, unsigned int pos, unsigned int count)
354 const unsigned int syt_interval = s->syt_interval;
357 for (i = 0; i < count; ++i) {
358 struct seq_desc *desc = descs + pos;
360 if (desc->syt_offset != CIP_SYT_NO_INFO)
361 desc->data_blocks = syt_interval;
363 desc->data_blocks = 0;
365 pos = (pos + 1) % size;
369 static void pool_ideal_nonblocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs,
370 unsigned int size, unsigned int pos,
373 const enum cip_sfc sfc = s->sfc;
374 unsigned int state = s->ctx_data.rx.data_block_state;
377 for (i = 0; i < count; ++i) {
378 struct seq_desc *desc = descs + pos;
380 if (!cip_sfc_is_base_44100(sfc)) {
381 // Sample_rate / 8000 is an integer, and precomputed.
382 desc->data_blocks = state;
384 unsigned int phase = state;
387 * This calculates the number of data blocks per packet so that
388 * 1) the overall rate is correct and exactly synchronized to
390 * 2) packets with a rounded-up number of blocks occur as early
391 * as possible in the sequence (to prevent underruns of the
394 if (sfc == CIP_SFC_44100)
395 /* 6 6 5 6 5 6 5 ... */
396 desc->data_blocks = 5 + ((phase & 1) ^ (phase == 0 || phase >= 40));
398 /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */
399 desc->data_blocks = 11 * (sfc >> 1) + (phase == 0);
400 if (++phase >= (80 >> (sfc >> 1)))
405 pos = (pos + 1) % size;
408 s->ctx_data.rx.data_block_state = state;
411 static unsigned int calculate_syt_offset(unsigned int *last_syt_offset,
412 unsigned int *syt_offset_state, enum cip_sfc sfc)
414 unsigned int syt_offset;
416 if (*last_syt_offset < TICKS_PER_CYCLE) {
417 if (!cip_sfc_is_base_44100(sfc))
418 syt_offset = *last_syt_offset + *syt_offset_state;
421 * The time, in ticks, of the n'th SYT_INTERVAL sample is:
422 * n * SYT_INTERVAL * 24576000 / sample_rate
423 * Modulo TICKS_PER_CYCLE, the difference between successive
424 * elements is about 1386.23. Rounding the results of this
425 * formula to the SYT precision results in a sequence of
426 * differences that begins with:
427 * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ...
428 * This code generates _exactly_ the same sequence.
430 unsigned int phase = *syt_offset_state;
431 unsigned int index = phase % 13;
433 syt_offset = *last_syt_offset;
434 syt_offset += 1386 + ((index && !(index & 3)) ||
438 *syt_offset_state = phase;
441 syt_offset = *last_syt_offset - TICKS_PER_CYCLE;
442 *last_syt_offset = syt_offset;
444 if (syt_offset >= TICKS_PER_CYCLE)
445 syt_offset = CIP_SYT_NO_INFO;
450 static void pool_ideal_syt_offsets(struct amdtp_stream *s, struct seq_desc *descs,
451 unsigned int size, unsigned int pos, unsigned int count)
453 const enum cip_sfc sfc = s->sfc;
454 unsigned int last = s->ctx_data.rx.last_syt_offset;
455 unsigned int state = s->ctx_data.rx.syt_offset_state;
458 for (i = 0; i < count; ++i) {
459 struct seq_desc *desc = descs + pos;
461 desc->syt_offset = calculate_syt_offset(&last, &state, sfc);
463 pos = (pos + 1) % size;
466 s->ctx_data.rx.last_syt_offset = last;
467 s->ctx_data.rx.syt_offset_state = state;
470 static unsigned int compute_syt_offset(unsigned int syt, unsigned int cycle,
471 unsigned int transfer_delay)
473 unsigned int cycle_lo = (cycle % CYCLES_PER_SECOND) & 0x0f;
474 unsigned int syt_cycle_lo = (syt & 0xf000) >> 12;
475 unsigned int syt_offset;
478 if (syt_cycle_lo < cycle_lo)
479 syt_cycle_lo += CIP_SYT_CYCLE_MODULUS;
480 syt_cycle_lo -= cycle_lo;
482 // Subtract transfer delay so that the synchronization offset is not so large
484 syt_offset = syt_cycle_lo * TICKS_PER_CYCLE + (syt & 0x0fff);
485 if (syt_offset < transfer_delay)
486 syt_offset += CIP_SYT_CYCLE_MODULUS * TICKS_PER_CYCLE;
488 return syt_offset - transfer_delay;
491 // Both of the producer and consumer of the queue runs in the same clock of IEEE 1394 bus.
492 // Additionally, the sequence of tx packets is severely checked against any discontinuity
493 // before filling entries in the queue. The calculation is safe even if it looks fragile by
495 static unsigned int calculate_cached_cycle_count(struct amdtp_stream *s, unsigned int head)
497 const unsigned int cache_size = s->ctx_data.tx.cache.size;
498 unsigned int cycles = s->ctx_data.tx.cache.pos;
501 cycles += cache_size;
507 static void cache_seq(struct amdtp_stream *s, const struct pkt_desc *src, unsigned int desc_count)
509 const unsigned int transfer_delay = s->transfer_delay;
510 const unsigned int cache_size = s->ctx_data.tx.cache.size;
511 struct seq_desc *cache = s->ctx_data.tx.cache.descs;
512 unsigned int cache_pos = s->ctx_data.tx.cache.pos;
513 bool aware_syt = !(s->flags & CIP_UNAWARE_SYT);
516 for (i = 0; i < desc_count; ++i) {
517 struct seq_desc *dst = cache + cache_pos;
519 if (aware_syt && src->syt != CIP_SYT_NO_INFO)
520 dst->syt_offset = compute_syt_offset(src->syt, src->cycle, transfer_delay);
522 dst->syt_offset = CIP_SYT_NO_INFO;
523 dst->data_blocks = src->data_blocks;
525 cache_pos = (cache_pos + 1) % cache_size;
526 src = amdtp_stream_next_packet_desc(s, src);
529 s->ctx_data.tx.cache.pos = cache_pos;
532 static void pool_ideal_seq_descs(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
533 unsigned int pos, unsigned int count)
535 pool_ideal_syt_offsets(s, descs, size, pos, count);
537 if (s->flags & CIP_BLOCKING)
538 pool_blocking_data_blocks(s, descs, size, pos, count);
540 pool_ideal_nonblocking_data_blocks(s, descs, size, pos, count);
543 static void pool_replayed_seq(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
544 unsigned int pos, unsigned int count)
546 struct amdtp_stream *target = s->ctx_data.rx.replay_target;
547 const struct seq_desc *cache = target->ctx_data.tx.cache.descs;
548 const unsigned int cache_size = target->ctx_data.tx.cache.size;
549 unsigned int cache_pos = s->ctx_data.rx.cache_pos;
552 for (i = 0; i < count; ++i) {
553 descs[pos] = cache[cache_pos];
554 cache_pos = (cache_pos + 1) % cache_size;
555 pos = (pos + 1) % size;
558 s->ctx_data.rx.cache_pos = cache_pos;
561 static void pool_seq_descs(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
562 unsigned int pos, unsigned int count)
564 struct amdtp_domain *d = s->domain;
565 void (*pool_seq_descs)(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
566 unsigned int pos, unsigned int count);
568 if (!d->replay.enable || !s->ctx_data.rx.replay_target) {
569 pool_seq_descs = pool_ideal_seq_descs;
571 if (!d->replay.on_the_fly) {
572 pool_seq_descs = pool_replayed_seq;
574 struct amdtp_stream *tx = s->ctx_data.rx.replay_target;
575 const unsigned int cache_size = tx->ctx_data.tx.cache.size;
576 const unsigned int cache_pos = s->ctx_data.rx.cache_pos;
577 unsigned int cached_cycles = calculate_cached_cycle_count(tx, cache_pos);
579 if (cached_cycles > count && cached_cycles > cache_size / 2)
580 pool_seq_descs = pool_replayed_seq;
582 pool_seq_descs = pool_ideal_seq_descs;
586 pool_seq_descs(s, descs, size, pos, count);
589 static void update_pcm_pointers(struct amdtp_stream *s,
590 struct snd_pcm_substream *pcm,
595 ptr = s->pcm_buffer_pointer + frames;
596 if (ptr >= pcm->runtime->buffer_size)
597 ptr -= pcm->runtime->buffer_size;
598 WRITE_ONCE(s->pcm_buffer_pointer, ptr);
600 s->pcm_period_pointer += frames;
601 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
602 s->pcm_period_pointer -= pcm->runtime->period_size;
604 // The program in user process should periodically check the status of intermediate
605 // buffer associated to PCM substream to process PCM frames in the buffer, instead
606 // of receiving notification of period elapsed by poll wait.
607 if (!pcm->runtime->no_period_wakeup) {
609 // In software IRQ context for 1394 OHCI.
610 snd_pcm_period_elapsed(pcm);
612 // In process context of ALSA PCM application under acquired lock of
614 snd_pcm_period_elapsed_under_stream_lock(pcm);
620 static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params,
625 params->interrupt = sched_irq;
626 params->tag = s->tag;
629 err = fw_iso_context_queue(s->context, params, &s->buffer.iso_buffer,
630 s->buffer.packets[s->packet_index].offset);
632 dev_err(&s->unit->device, "queueing error: %d\n", err);
636 if (++s->packet_index >= s->queue_size)
642 static inline int queue_out_packet(struct amdtp_stream *s,
643 struct fw_iso_packet *params, bool sched_irq)
646 !!(params->header_length == 0 && params->payload_length == 0);
647 return queue_packet(s, params, sched_irq);
650 static inline int queue_in_packet(struct amdtp_stream *s,
651 struct fw_iso_packet *params)
653 // Queue one packet for IR context.
654 params->header_length = s->ctx_data.tx.ctx_header_size;
655 params->payload_length = s->ctx_data.tx.max_ctx_payload_length;
656 params->skip = false;
657 return queue_packet(s, params, false);
660 static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2],
661 unsigned int data_block_counter, unsigned int syt)
663 cip_header[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) |
664 (s->data_block_quadlets << CIP_DBS_SHIFT) |
665 ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) |
667 cip_header[1] = cpu_to_be32(CIP_EOH |
668 ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) |
669 ((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) |
670 (syt & CIP_SYT_MASK));
673 static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle,
674 struct fw_iso_packet *params, unsigned int header_length,
675 unsigned int data_blocks,
676 unsigned int data_block_counter,
677 unsigned int syt, unsigned int index)
679 unsigned int payload_length;
682 payload_length = data_blocks * sizeof(__be32) * s->data_block_quadlets;
683 params->payload_length = payload_length;
685 if (header_length > 0) {
686 cip_header = (__be32 *)params->header;
687 generate_cip_header(s, cip_header, data_block_counter, syt);
688 params->header_length = header_length;
693 trace_amdtp_packet(s, cycle, cip_header, payload_length + header_length, data_blocks,
694 data_block_counter, s->packet_index, index);
697 static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
698 unsigned int payload_length,
699 unsigned int *data_blocks,
700 unsigned int *data_block_counter, unsigned int *syt)
709 cip_header[0] = be32_to_cpu(buf[0]);
710 cip_header[1] = be32_to_cpu(buf[1]);
713 * This module supports 'Two-quadlet CIP header with SYT field'.
714 * For convenience, also check FMT field is AM824 or not.
716 if ((((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) ||
717 ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) &&
718 (!(s->flags & CIP_HEADER_WITHOUT_EOH))) {
719 dev_info_ratelimited(&s->unit->device,
720 "Invalid CIP header for AMDTP: %08X:%08X\n",
721 cip_header[0], cip_header[1]);
725 /* Check valid protocol or not. */
726 sph = (cip_header[0] & CIP_SPH_MASK) >> CIP_SPH_SHIFT;
727 fmt = (cip_header[1] & CIP_FMT_MASK) >> CIP_FMT_SHIFT;
728 if (sph != s->sph || fmt != s->fmt) {
729 dev_info_ratelimited(&s->unit->device,
730 "Detect unexpected protocol: %08x %08x\n",
731 cip_header[0], cip_header[1]);
735 /* Calculate data blocks */
736 fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT;
737 if (payload_length == 0 || (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) {
740 unsigned int data_block_quadlets =
741 (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT;
742 /* avoid division by zero */
743 if (data_block_quadlets == 0) {
744 dev_err(&s->unit->device,
745 "Detect invalid value in dbs field: %08X\n",
749 if (s->flags & CIP_WRONG_DBS)
750 data_block_quadlets = s->data_block_quadlets;
752 *data_blocks = payload_length / sizeof(__be32) / data_block_quadlets;
755 /* Check data block counter continuity */
756 dbc = cip_header[0] & CIP_DBC_MASK;
757 if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) &&
758 *data_block_counter != UINT_MAX)
759 dbc = *data_block_counter;
761 if ((dbc == 0x00 && (s->flags & CIP_SKIP_DBC_ZERO_CHECK)) ||
762 *data_block_counter == UINT_MAX) {
764 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
765 lost = dbc != *data_block_counter;
767 unsigned int dbc_interval;
769 if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0)
770 dbc_interval = s->ctx_data.tx.dbc_interval;
772 dbc_interval = *data_blocks;
774 lost = dbc != ((*data_block_counter + dbc_interval) & 0xff);
778 dev_err(&s->unit->device,
779 "Detect discontinuity of CIP: %02X %02X\n",
780 *data_block_counter, dbc);
784 *data_block_counter = dbc;
786 if (!(s->flags & CIP_UNAWARE_SYT))
787 *syt = cip_header[1] & CIP_SYT_MASK;
792 static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
793 const __be32 *ctx_header,
794 unsigned int *data_blocks,
795 unsigned int *data_block_counter,
796 unsigned int *syt, unsigned int packet_index, unsigned int index)
798 unsigned int payload_length;
799 const __be32 *cip_header;
800 unsigned int cip_header_size;
802 payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT;
804 if (!(s->flags & CIP_NO_HEADER))
805 cip_header_size = CIP_HEADER_SIZE;
809 if (payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) {
810 dev_err(&s->unit->device,
811 "Detect jumbo payload: %04x %04x\n",
812 payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length);
816 if (cip_header_size > 0) {
817 if (payload_length >= cip_header_size) {
820 cip_header = ctx_header + IR_CTX_HEADER_DEFAULT_QUADLETS;
821 err = check_cip_header(s, cip_header, payload_length - cip_header_size,
822 data_blocks, data_block_counter, syt);
826 // Handle the cycle so that empty packet arrives.
833 *data_blocks = payload_length / sizeof(__be32) / s->data_block_quadlets;
836 if (*data_block_counter == UINT_MAX)
837 *data_block_counter = 0;
840 trace_amdtp_packet(s, cycle, cip_header, payload_length, *data_blocks,
841 *data_block_counter, packet_index, index);
846 // In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On
847 // the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent
848 // it. Thus, via Linux firewire subsystem, we can get the 3 bits for second.
849 static inline u32 compute_ohci_cycle_count(__be32 ctx_header_tstamp)
851 u32 tstamp = be32_to_cpu(ctx_header_tstamp) & HEADER_TSTAMP_MASK;
852 return (((tstamp >> 13) & 0x07) * 8000) + (tstamp & 0x1fff);
855 static inline u32 increment_ohci_cycle_count(u32 cycle, unsigned int addend)
858 if (cycle >= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND)
859 cycle -= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND;
863 static int compare_ohci_cycle_count(u32 lval, u32 rval)
867 else if (lval < rval && rval - lval < OHCI_SECOND_MODULUS * CYCLES_PER_SECOND / 2)
873 // Align to actual cycle count for the packet which is going to be scheduled.
874 // This module queued the same number of isochronous cycle as the size of queue
875 // to kip isochronous cycle, therefore it's OK to just increment the cycle by
876 // the size of queue for scheduled cycle.
877 static inline u32 compute_ohci_it_cycle(const __be32 ctx_header_tstamp,
878 unsigned int queue_size)
880 u32 cycle = compute_ohci_cycle_count(ctx_header_tstamp);
881 return increment_ohci_cycle_count(cycle, queue_size);
884 static int generate_tx_packet_descs(struct amdtp_stream *s, struct pkt_desc *desc,
885 const __be32 *ctx_header, unsigned int packet_count,
886 unsigned int *desc_count)
888 unsigned int next_cycle = s->next_cycle;
889 unsigned int dbc = s->data_block_counter;
890 unsigned int packet_index = s->packet_index;
891 unsigned int queue_size = s->queue_size;
896 for (i = 0; i < packet_count; ++i) {
899 unsigned int data_blocks;
902 cycle = compute_ohci_cycle_count(ctx_header[1]);
903 lost = (next_cycle != cycle);
905 if (s->flags & CIP_NO_HEADER) {
906 // Fireface skips transmission just for an isoc cycle corresponding
908 unsigned int prev_cycle = next_cycle;
910 next_cycle = increment_ohci_cycle_count(next_cycle, 1);
911 lost = (next_cycle != cycle);
913 // Prepare a description for the skipped cycle for
915 desc->cycle = prev_cycle;
917 desc->data_blocks = 0;
918 desc->data_block_counter = dbc;
919 desc->ctx_payload = NULL;
920 desc = amdtp_stream_next_packet_desc(s, desc);
923 } else if (s->flags & CIP_JUMBO_PAYLOAD) {
924 // OXFW970 skips transmission for several isoc cycles during
925 // asynchronous transaction. The sequence replay is impossible due
927 unsigned int safe_cycle = increment_ohci_cycle_count(next_cycle,
928 IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES);
929 lost = (compare_ohci_cycle_count(safe_cycle, cycle) > 0);
932 dev_err(&s->unit->device, "Detect discontinuity of cycle: %d %d\n",
938 err = parse_ir_ctx_header(s, cycle, ctx_header, &data_blocks, &dbc, &syt,
945 desc->data_blocks = data_blocks;
946 desc->data_block_counter = dbc;
947 desc->ctx_payload = s->buffer.packets[packet_index].buffer;
949 if (!(s->flags & CIP_DBC_IS_END_EVENT))
950 dbc = (dbc + desc->data_blocks) & 0xff;
952 next_cycle = increment_ohci_cycle_count(next_cycle, 1);
953 desc = amdtp_stream_next_packet_desc(s, desc);
955 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
956 packet_index = (packet_index + 1) % queue_size;
959 s->next_cycle = next_cycle;
960 s->data_block_counter = dbc;
965 static unsigned int compute_syt(unsigned int syt_offset, unsigned int cycle,
966 unsigned int transfer_delay)
970 syt_offset += transfer_delay;
971 syt = ((cycle + syt_offset / TICKS_PER_CYCLE) << 12) |
972 (syt_offset % TICKS_PER_CYCLE);
973 return syt & CIP_SYT_MASK;
976 static void generate_rx_packet_descs(struct amdtp_stream *s, struct pkt_desc *desc,
977 const __be32 *ctx_header, unsigned int packet_count)
979 struct seq_desc *seq_descs = s->ctx_data.rx.seq.descs;
980 unsigned int seq_size = s->ctx_data.rx.seq.size;
981 unsigned int seq_pos = s->ctx_data.rx.seq.pos;
982 unsigned int dbc = s->data_block_counter;
983 bool aware_syt = !(s->flags & CIP_UNAWARE_SYT);
986 pool_seq_descs(s, seq_descs, seq_size, seq_pos, packet_count);
988 for (i = 0; i < packet_count; ++i) {
989 unsigned int index = (s->packet_index + i) % s->queue_size;
990 const struct seq_desc *seq = seq_descs + seq_pos;
992 desc->cycle = compute_ohci_it_cycle(*ctx_header, s->queue_size);
994 if (aware_syt && seq->syt_offset != CIP_SYT_NO_INFO)
995 desc->syt = compute_syt(seq->syt_offset, desc->cycle, s->transfer_delay);
997 desc->syt = CIP_SYT_NO_INFO;
999 desc->data_blocks = seq->data_blocks;
1001 if (s->flags & CIP_DBC_IS_END_EVENT)
1002 dbc = (dbc + desc->data_blocks) & 0xff;
1004 desc->data_block_counter = dbc;
1006 if (!(s->flags & CIP_DBC_IS_END_EVENT))
1007 dbc = (dbc + desc->data_blocks) & 0xff;
1009 desc->ctx_payload = s->buffer.packets[index].buffer;
1011 seq_pos = (seq_pos + 1) % seq_size;
1012 desc = amdtp_stream_next_packet_desc(s, desc);
1017 s->data_block_counter = dbc;
1018 s->ctx_data.rx.seq.pos = seq_pos;
1021 static inline void cancel_stream(struct amdtp_stream *s)
1023 s->packet_index = -1;
1025 amdtp_stream_pcm_abort(s);
1026 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
1029 static void process_ctx_payloads(struct amdtp_stream *s,
1030 const struct pkt_desc *descs,
1033 struct snd_pcm_substream *pcm;
1034 unsigned int pcm_frames;
1036 pcm = READ_ONCE(s->pcm);
1037 pcm_frames = s->process_ctx_payloads(s, descs, count, pcm);
1039 update_pcm_pointers(s, pcm, pcm_frames);
1042 static void process_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1043 void *header, void *private_data)
1045 struct amdtp_stream *s = private_data;
1046 const struct amdtp_domain *d = s->domain;
1047 const __be32 *ctx_header = header;
1048 const unsigned int events_per_period = d->events_per_period;
1049 unsigned int event_count = s->ctx_data.rx.event_count;
1050 struct pkt_desc *desc = s->packet_descs_cursor;
1051 unsigned int pkt_header_length;
1052 unsigned int packets;
1056 if (s->packet_index < 0)
1059 // Calculate the number of packets in buffer and check XRUN.
1060 packets = header_length / sizeof(*ctx_header);
1062 generate_rx_packet_descs(s, desc, ctx_header, packets);
1064 process_ctx_payloads(s, desc, packets);
1066 if (!(s->flags & CIP_NO_HEADER))
1067 pkt_header_length = IT_PKT_HEADER_SIZE_CIP;
1069 pkt_header_length = 0;
1071 if (s == d->irq_target) {
1072 // At NO_PERIOD_WAKEUP mode, the packets for all IT/IR contexts are processed by
1073 // the tasks of user process operating ALSA PCM character device by calling ioctl(2)
1074 // with some requests, instead of scheduled hardware IRQ of an IT context.
1075 struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
1076 need_hw_irq = !pcm || !pcm->runtime->no_period_wakeup;
1078 need_hw_irq = false;
1081 for (i = 0; i < packets; ++i) {
1083 struct fw_iso_packet params;
1084 __be32 header[CIP_HEADER_QUADLETS];
1085 } template = { {0}, {0} };
1086 bool sched_irq = false;
1088 build_it_pkt_header(s, desc->cycle, &template.params, pkt_header_length,
1089 desc->data_blocks, desc->data_block_counter,
1092 if (s == s->domain->irq_target) {
1093 event_count += desc->data_blocks;
1094 if (event_count >= events_per_period) {
1095 event_count -= events_per_period;
1096 sched_irq = need_hw_irq;
1100 if (queue_out_packet(s, &template.params, sched_irq) < 0) {
1105 desc = amdtp_stream_next_packet_desc(s, desc);
1108 s->ctx_data.rx.event_count = event_count;
1109 s->packet_descs_cursor = desc;
1112 static void skip_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1113 void *header, void *private_data)
1115 struct amdtp_stream *s = private_data;
1116 struct amdtp_domain *d = s->domain;
1117 const __be32 *ctx_header = header;
1118 unsigned int packets;
1122 if (s->packet_index < 0)
1125 packets = header_length / sizeof(*ctx_header);
1127 cycle = compute_ohci_it_cycle(ctx_header[packets - 1], s->queue_size);
1128 s->next_cycle = increment_ohci_cycle_count(cycle, 1);
1130 for (i = 0; i < packets; ++i) {
1131 struct fw_iso_packet params = {
1133 .payload_length = 0,
1135 bool sched_irq = (s == d->irq_target && i == packets - 1);
1137 if (queue_out_packet(s, ¶ms, sched_irq) < 0) {
1144 static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1145 void *header, void *private_data);
1147 static void process_rx_packets_intermediately(struct fw_iso_context *context, u32 tstamp,
1148 size_t header_length, void *header, void *private_data)
1150 struct amdtp_stream *s = private_data;
1151 struct amdtp_domain *d = s->domain;
1152 __be32 *ctx_header = header;
1153 const unsigned int queue_size = s->queue_size;
1154 unsigned int packets;
1155 unsigned int offset;
1157 if (s->packet_index < 0)
1160 packets = header_length / sizeof(*ctx_header);
1163 while (offset < packets) {
1164 unsigned int cycle = compute_ohci_it_cycle(ctx_header[offset], queue_size);
1166 if (compare_ohci_cycle_count(cycle, d->processing_cycle.rx_start) >= 0)
1173 unsigned int length = sizeof(*ctx_header) * offset;
1175 skip_rx_packets(context, tstamp, length, ctx_header, private_data);
1176 if (amdtp_streaming_error(s))
1179 ctx_header += offset;
1180 header_length -= length;
1183 if (offset < packets) {
1184 s->ready_processing = true;
1185 wake_up(&s->ready_wait);
1187 if (d->replay.enable)
1188 s->ctx_data.rx.cache_pos = 0;
1190 process_rx_packets(context, tstamp, header_length, ctx_header, private_data);
1191 if (amdtp_streaming_error(s))
1194 if (s == d->irq_target)
1195 s->context->callback.sc = irq_target_callback;
1197 s->context->callback.sc = process_rx_packets;
1201 static void process_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1202 void *header, void *private_data)
1204 struct amdtp_stream *s = private_data;
1205 __be32 *ctx_header = header;
1206 struct pkt_desc *desc = s->packet_descs_cursor;
1207 unsigned int packet_count;
1208 unsigned int desc_count;
1212 if (s->packet_index < 0)
1215 // Calculate the number of packets in buffer and check XRUN.
1216 packet_count = header_length / s->ctx_data.tx.ctx_header_size;
1219 err = generate_tx_packet_descs(s, desc, ctx_header, packet_count, &desc_count);
1221 if (err != -EAGAIN) {
1226 struct amdtp_domain *d = s->domain;
1228 process_ctx_payloads(s, desc, desc_count);
1230 if (d->replay.enable)
1231 cache_seq(s, desc, desc_count);
1233 for (i = 0; i < desc_count; ++i)
1234 desc = amdtp_stream_next_packet_desc(s, desc);
1235 s->packet_descs_cursor = desc;
1238 for (i = 0; i < packet_count; ++i) {
1239 struct fw_iso_packet params = {0};
1241 if (queue_in_packet(s, ¶ms) < 0) {
1248 static void drop_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1249 void *header, void *private_data)
1251 struct amdtp_stream *s = private_data;
1252 const __be32 *ctx_header = header;
1253 unsigned int packets;
1257 if (s->packet_index < 0)
1260 packets = header_length / s->ctx_data.tx.ctx_header_size;
1262 ctx_header += (packets - 1) * s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
1263 cycle = compute_ohci_cycle_count(ctx_header[1]);
1264 s->next_cycle = increment_ohci_cycle_count(cycle, 1);
1266 for (i = 0; i < packets; ++i) {
1267 struct fw_iso_packet params = {0};
1269 if (queue_in_packet(s, ¶ms) < 0) {
1276 static void process_tx_packets_intermediately(struct fw_iso_context *context, u32 tstamp,
1277 size_t header_length, void *header, void *private_data)
1279 struct amdtp_stream *s = private_data;
1280 struct amdtp_domain *d = s->domain;
1282 unsigned int packets;
1283 unsigned int offset;
1285 if (s->packet_index < 0)
1288 packets = header_length / s->ctx_data.tx.ctx_header_size;
1291 ctx_header = header;
1292 while (offset < packets) {
1293 unsigned int cycle = compute_ohci_cycle_count(ctx_header[1]);
1295 if (compare_ohci_cycle_count(cycle, d->processing_cycle.tx_start) >= 0)
1298 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32);
1302 ctx_header = header;
1305 size_t length = s->ctx_data.tx.ctx_header_size * offset;
1307 drop_tx_packets(context, tstamp, length, ctx_header, s);
1308 if (amdtp_streaming_error(s))
1311 ctx_header += length / sizeof(*ctx_header);
1312 header_length -= length;
1315 if (offset < packets) {
1316 s->ready_processing = true;
1317 wake_up(&s->ready_wait);
1319 process_tx_packets(context, tstamp, header_length, ctx_header, s);
1320 if (amdtp_streaming_error(s))
1323 context->callback.sc = process_tx_packets;
1327 static void drop_tx_packets_initially(struct fw_iso_context *context, u32 tstamp,
1328 size_t header_length, void *header, void *private_data)
1330 struct amdtp_stream *s = private_data;
1331 struct amdtp_domain *d = s->domain;
1334 unsigned int events;
1337 if (s->packet_index < 0)
1340 count = header_length / s->ctx_data.tx.ctx_header_size;
1342 // Attempt to detect any event in the batch of packets.
1344 ctx_header = header;
1345 for (i = 0; i < count; ++i) {
1346 unsigned int payload_quads =
1347 (be32_to_cpu(*ctx_header) >> ISO_DATA_LENGTH_SHIFT) / sizeof(__be32);
1348 unsigned int data_blocks;
1350 if (s->flags & CIP_NO_HEADER) {
1351 data_blocks = payload_quads / s->data_block_quadlets;
1353 __be32 *cip_headers = ctx_header + IR_CTX_HEADER_DEFAULT_QUADLETS;
1355 if (payload_quads < CIP_HEADER_QUADLETS) {
1358 payload_quads -= CIP_HEADER_QUADLETS;
1360 if (s->flags & CIP_UNAWARE_SYT) {
1361 data_blocks = payload_quads / s->data_block_quadlets;
1363 u32 cip1 = be32_to_cpu(cip_headers[1]);
1365 // NODATA packet can includes any data blocks but they are
1366 // not available as event.
1367 if ((cip1 & CIP_NO_DATA) == CIP_NO_DATA)
1370 data_blocks = payload_quads / s->data_block_quadlets;
1375 events += data_blocks;
1377 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32);
1380 drop_tx_packets(context, tstamp, header_length, header, s);
1383 s->ctx_data.tx.event_starts = true;
1385 // Decide the cycle count to begin processing content of packet in IR contexts.
1387 unsigned int stream_count = 0;
1388 unsigned int event_starts_count = 0;
1389 unsigned int cycle = UINT_MAX;
1391 list_for_each_entry(s, &d->streams, list) {
1392 if (s->direction == AMDTP_IN_STREAM) {
1394 if (s->ctx_data.tx.event_starts)
1395 ++event_starts_count;
1399 if (stream_count == event_starts_count) {
1400 unsigned int next_cycle;
1402 list_for_each_entry(s, &d->streams, list) {
1403 if (s->direction != AMDTP_IN_STREAM)
1406 next_cycle = increment_ohci_cycle_count(s->next_cycle,
1407 d->processing_cycle.tx_init_skip);
1408 if (cycle == UINT_MAX ||
1409 compare_ohci_cycle_count(next_cycle, cycle) > 0)
1412 s->context->callback.sc = process_tx_packets_intermediately;
1415 d->processing_cycle.tx_start = cycle;
1420 static void process_ctxs_in_domain(struct amdtp_domain *d)
1422 struct amdtp_stream *s;
1424 list_for_each_entry(s, &d->streams, list) {
1425 if (s != d->irq_target && amdtp_stream_running(s))
1426 fw_iso_context_flush_completions(s->context);
1428 if (amdtp_streaming_error(s))
1434 if (amdtp_stream_running(d->irq_target))
1435 cancel_stream(d->irq_target);
1437 list_for_each_entry(s, &d->streams, list) {
1438 if (amdtp_stream_running(s))
1443 static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1444 void *header, void *private_data)
1446 struct amdtp_stream *s = private_data;
1447 struct amdtp_domain *d = s->domain;
1449 process_rx_packets(context, tstamp, header_length, header, private_data);
1450 process_ctxs_in_domain(d);
1453 static void irq_target_callback_intermediately(struct fw_iso_context *context, u32 tstamp,
1454 size_t header_length, void *header, void *private_data)
1456 struct amdtp_stream *s = private_data;
1457 struct amdtp_domain *d = s->domain;
1459 process_rx_packets_intermediately(context, tstamp, header_length, header, private_data);
1460 process_ctxs_in_domain(d);
1463 static void irq_target_callback_skip(struct fw_iso_context *context, u32 tstamp,
1464 size_t header_length, void *header, void *private_data)
1466 struct amdtp_stream *s = private_data;
1467 struct amdtp_domain *d = s->domain;
1468 bool ready_to_start;
1470 skip_rx_packets(context, tstamp, header_length, header, private_data);
1471 process_ctxs_in_domain(d);
1473 if (d->replay.enable && !d->replay.on_the_fly) {
1474 unsigned int rx_count = 0;
1475 unsigned int rx_ready_count = 0;
1476 struct amdtp_stream *rx;
1478 list_for_each_entry(rx, &d->streams, list) {
1479 struct amdtp_stream *tx;
1480 unsigned int cached_cycles;
1482 if (rx->direction != AMDTP_OUT_STREAM)
1486 tx = rx->ctx_data.rx.replay_target;
1487 cached_cycles = calculate_cached_cycle_count(tx, 0);
1488 if (cached_cycles > tx->ctx_data.tx.cache.size / 2)
1492 ready_to_start = (rx_count == rx_ready_count);
1494 ready_to_start = true;
1497 // Decide the cycle count to begin processing content of packet in IT contexts. All of IT
1498 // contexts are expected to start and get callback when reaching here.
1499 if (ready_to_start) {
1500 unsigned int cycle = s->next_cycle;
1501 list_for_each_entry(s, &d->streams, list) {
1502 if (s->direction != AMDTP_OUT_STREAM)
1505 if (compare_ohci_cycle_count(s->next_cycle, cycle) > 0)
1506 cycle = s->next_cycle;
1508 if (s == d->irq_target)
1509 s->context->callback.sc = irq_target_callback_intermediately;
1511 s->context->callback.sc = process_rx_packets_intermediately;
1514 d->processing_cycle.rx_start = cycle;
1518 // This is executed one time. For in-stream, first packet has come. For out-stream, prepared to
1519 // transmit first packet.
1520 static void amdtp_stream_first_callback(struct fw_iso_context *context,
1521 u32 tstamp, size_t header_length,
1522 void *header, void *private_data)
1524 struct amdtp_stream *s = private_data;
1525 struct amdtp_domain *d = s->domain;
1527 if (s->direction == AMDTP_IN_STREAM) {
1528 context->callback.sc = drop_tx_packets_initially;
1530 if (s == d->irq_target)
1531 context->callback.sc = irq_target_callback_skip;
1533 context->callback.sc = skip_rx_packets;
1536 context->callback.sc(context, tstamp, header_length, header, s);
1540 * amdtp_stream_start - start transferring packets
1541 * @s: the AMDTP stream to start
1542 * @channel: the isochronous channel on the bus
1543 * @speed: firewire speed code
1544 * @queue_size: The number of packets in the queue.
1545 * @idle_irq_interval: the interval to queue packet during initial state.
1547 * The stream cannot be started until it has been configured with
1548 * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
1549 * device can be started.
1551 static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
1552 unsigned int queue_size, unsigned int idle_irq_interval)
1554 bool is_irq_target = (s == s->domain->irq_target);
1555 unsigned int ctx_header_size;
1556 unsigned int max_ctx_payload_size;
1557 enum dma_data_direction dir;
1558 struct pkt_desc *descs;
1559 int i, type, tag, err;
1561 mutex_lock(&s->mutex);
1563 if (WARN_ON(amdtp_stream_running(s) ||
1564 (s->data_block_quadlets < 1))) {
1569 if (s->direction == AMDTP_IN_STREAM) {
1570 // NOTE: IT context should be used for constant IRQ.
1571 if (is_irq_target) {
1576 s->data_block_counter = UINT_MAX;
1578 s->data_block_counter = 0;
1581 // initialize packet buffer.
1582 if (s->direction == AMDTP_IN_STREAM) {
1583 dir = DMA_FROM_DEVICE;
1584 type = FW_ISO_CONTEXT_RECEIVE;
1585 if (!(s->flags & CIP_NO_HEADER))
1586 ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
1588 ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
1590 dir = DMA_TO_DEVICE;
1591 type = FW_ISO_CONTEXT_TRANSMIT;
1592 ctx_header_size = 0; // No effect for IT context.
1594 max_ctx_payload_size = amdtp_stream_get_max_ctx_payload_size(s);
1596 err = iso_packets_buffer_init(&s->buffer, s->unit, queue_size, max_ctx_payload_size, dir);
1599 s->queue_size = queue_size;
1601 s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
1602 type, channel, speed, ctx_header_size,
1603 amdtp_stream_first_callback, s);
1604 if (IS_ERR(s->context)) {
1605 err = PTR_ERR(s->context);
1607 dev_err(&s->unit->device,
1608 "no free stream on this controller\n");
1612 amdtp_stream_update(s);
1614 if (s->direction == AMDTP_IN_STREAM) {
1615 s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size;
1616 s->ctx_data.tx.ctx_header_size = ctx_header_size;
1617 s->ctx_data.tx.event_starts = false;
1619 if (s->domain->replay.enable) {
1620 // struct fw_iso_context.drop_overflow_headers is false therefore it's
1621 // possible to cache much unexpectedly.
1622 s->ctx_data.tx.cache.size = max_t(unsigned int, s->syt_interval * 2,
1623 queue_size * 3 / 2);
1624 s->ctx_data.tx.cache.pos = 0;
1625 s->ctx_data.tx.cache.descs = kcalloc(s->ctx_data.tx.cache.size,
1626 sizeof(*s->ctx_data.tx.cache.descs), GFP_KERNEL);
1627 if (!s->ctx_data.tx.cache.descs) {
1633 static const struct {
1634 unsigned int data_block;
1635 unsigned int syt_offset;
1636 } *entry, initial_state[] = {
1637 [CIP_SFC_32000] = { 4, 3072 },
1638 [CIP_SFC_48000] = { 6, 1024 },
1639 [CIP_SFC_96000] = { 12, 1024 },
1640 [CIP_SFC_192000] = { 24, 1024 },
1641 [CIP_SFC_44100] = { 0, 67 },
1642 [CIP_SFC_88200] = { 0, 67 },
1643 [CIP_SFC_176400] = { 0, 67 },
1646 s->ctx_data.rx.seq.descs = kcalloc(queue_size, sizeof(*s->ctx_data.rx.seq.descs), GFP_KERNEL);
1647 if (!s->ctx_data.rx.seq.descs) {
1651 s->ctx_data.rx.seq.size = queue_size;
1652 s->ctx_data.rx.seq.pos = 0;
1654 entry = &initial_state[s->sfc];
1655 s->ctx_data.rx.data_block_state = entry->data_block;
1656 s->ctx_data.rx.syt_offset_state = entry->syt_offset;
1657 s->ctx_data.rx.last_syt_offset = TICKS_PER_CYCLE;
1659 s->ctx_data.rx.event_count = 0;
1662 if (s->flags & CIP_NO_HEADER)
1663 s->tag = TAG_NO_CIP_HEADER;
1667 descs = kcalloc(s->queue_size, sizeof(*descs), GFP_KERNEL);
1672 s->packet_descs = descs;
1674 INIT_LIST_HEAD(&s->packet_descs_list);
1675 for (i = 0; i < s->queue_size; ++i) {
1676 INIT_LIST_HEAD(&descs->link);
1677 list_add_tail(&descs->link, &s->packet_descs_list);
1680 s->packet_descs_cursor = list_first_entry(&s->packet_descs_list, struct pkt_desc, link);
1682 s->packet_index = 0;
1684 struct fw_iso_packet params;
1686 if (s->direction == AMDTP_IN_STREAM) {
1687 err = queue_in_packet(s, ¶ms);
1689 bool sched_irq = false;
1691 params.header_length = 0;
1692 params.payload_length = 0;
1694 if (is_irq_target) {
1695 sched_irq = !((s->packet_index + 1) %
1699 err = queue_out_packet(s, ¶ms, sched_irq);
1703 } while (s->packet_index > 0);
1705 /* NOTE: TAG1 matches CIP. This just affects in stream. */
1706 tag = FW_ISO_CONTEXT_MATCH_TAG1;
1707 if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER))
1708 tag |= FW_ISO_CONTEXT_MATCH_TAG0;
1710 s->ready_processing = false;
1711 err = fw_iso_context_start(s->context, -1, 0, tag);
1715 mutex_unlock(&s->mutex);
1719 kfree(s->packet_descs);
1720 s->packet_descs = NULL;
1722 if (s->direction == AMDTP_OUT_STREAM) {
1723 kfree(s->ctx_data.rx.seq.descs);
1725 if (s->domain->replay.enable)
1726 kfree(s->ctx_data.tx.cache.descs);
1728 fw_iso_context_destroy(s->context);
1729 s->context = ERR_PTR(-1);
1731 iso_packets_buffer_destroy(&s->buffer, s->unit);
1733 mutex_unlock(&s->mutex);
1739 * amdtp_domain_stream_pcm_pointer - get the PCM buffer position
1740 * @d: the AMDTP domain.
1741 * @s: the AMDTP stream that transports the PCM data
1743 * Returns the current buffer position, in frames.
1745 unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
1746 struct amdtp_stream *s)
1748 struct amdtp_stream *irq_target = d->irq_target;
1750 // Process isochronous packets queued till recent isochronous cycle to handle PCM frames.
1751 if (irq_target && amdtp_stream_running(irq_target)) {
1752 // In software IRQ context, the call causes dead-lock to disable the tasklet
1755 fw_iso_context_flush_completions(irq_target->context);
1758 return READ_ONCE(s->pcm_buffer_pointer);
1760 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer);
1763 * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames
1764 * @d: the AMDTP domain.
1765 * @s: the AMDTP stream that transfers the PCM frames
1767 * Returns zero always.
1769 int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s)
1771 struct amdtp_stream *irq_target = d->irq_target;
1773 // Process isochronous packets for recent isochronous cycle to handle
1774 // queued PCM frames.
1775 if (irq_target && amdtp_stream_running(irq_target))
1776 fw_iso_context_flush_completions(irq_target->context);
1780 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack);
1783 * amdtp_stream_update - update the stream after a bus reset
1784 * @s: the AMDTP stream
1786 void amdtp_stream_update(struct amdtp_stream *s)
1789 WRITE_ONCE(s->source_node_id_field,
1790 (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK);
1792 EXPORT_SYMBOL(amdtp_stream_update);
1795 * amdtp_stream_stop - stop sending packets
1796 * @s: the AMDTP stream to stop
1798 * All PCM and MIDI devices of the stream must be stopped before the stream
1799 * itself can be stopped.
1801 static void amdtp_stream_stop(struct amdtp_stream *s)
1803 mutex_lock(&s->mutex);
1805 if (!amdtp_stream_running(s)) {
1806 mutex_unlock(&s->mutex);
1810 fw_iso_context_stop(s->context);
1811 fw_iso_context_destroy(s->context);
1812 s->context = ERR_PTR(-1);
1813 iso_packets_buffer_destroy(&s->buffer, s->unit);
1814 kfree(s->packet_descs);
1815 s->packet_descs = NULL;
1817 if (s->direction == AMDTP_OUT_STREAM) {
1818 kfree(s->ctx_data.rx.seq.descs);
1820 if (s->domain->replay.enable)
1821 kfree(s->ctx_data.tx.cache.descs);
1824 mutex_unlock(&s->mutex);
1828 * amdtp_stream_pcm_abort - abort the running PCM device
1829 * @s: the AMDTP stream about to be stopped
1831 * If the isochronous stream needs to be stopped asynchronously, call this
1832 * function first to stop the PCM device.
1834 void amdtp_stream_pcm_abort(struct amdtp_stream *s)
1836 struct snd_pcm_substream *pcm;
1838 pcm = READ_ONCE(s->pcm);
1840 snd_pcm_stop_xrun(pcm);
1842 EXPORT_SYMBOL(amdtp_stream_pcm_abort);
1845 * amdtp_domain_init - initialize an AMDTP domain structure
1846 * @d: the AMDTP domain to initialize.
1848 int amdtp_domain_init(struct amdtp_domain *d)
1850 INIT_LIST_HEAD(&d->streams);
1852 d->events_per_period = 0;
1856 EXPORT_SYMBOL_GPL(amdtp_domain_init);
1859 * amdtp_domain_destroy - destroy an AMDTP domain structure
1860 * @d: the AMDTP domain to destroy.
1862 void amdtp_domain_destroy(struct amdtp_domain *d)
1864 // At present nothing to do.
1867 EXPORT_SYMBOL_GPL(amdtp_domain_destroy);
1870 * amdtp_domain_add_stream - register isoc context into the domain.
1871 * @d: the AMDTP domain.
1872 * @s: the AMDTP stream.
1873 * @channel: the isochronous channel on the bus.
1874 * @speed: firewire speed code.
1876 int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s,
1877 int channel, int speed)
1879 struct amdtp_stream *tmp;
1881 list_for_each_entry(tmp, &d->streams, list) {
1886 list_add(&s->list, &d->streams);
1888 s->channel = channel;
1894 EXPORT_SYMBOL_GPL(amdtp_domain_add_stream);
1896 // Make the reference from rx stream to tx stream for sequence replay. When the number of tx streams
1897 // is less than the number of rx streams, the first tx stream is selected.
1898 static int make_association(struct amdtp_domain *d)
1900 unsigned int dst_index = 0;
1901 struct amdtp_stream *rx;
1903 // Make association to replay target.
1904 list_for_each_entry(rx, &d->streams, list) {
1905 if (rx->direction == AMDTP_OUT_STREAM) {
1906 unsigned int src_index = 0;
1907 struct amdtp_stream *tx = NULL;
1908 struct amdtp_stream *s;
1910 list_for_each_entry(s, &d->streams, list) {
1911 if (s->direction == AMDTP_IN_STREAM) {
1912 if (dst_index == src_index) {
1921 // Select the first entry.
1922 list_for_each_entry(s, &d->streams, list) {
1923 if (s->direction == AMDTP_IN_STREAM) {
1928 // No target is available to replay sequence.
1933 rx->ctx_data.rx.replay_target = tx;
1943 * amdtp_domain_start - start sending packets for isoc context in the domain.
1944 * @d: the AMDTP domain.
1945 * @tx_init_skip_cycles: the number of cycles to skip processing packets at initial stage of IR
1947 * @replay_seq: whether to replay the sequence of packet in IR context for the sequence of packet in
1949 * @replay_on_the_fly: transfer rx packets according to nominal frequency, then begin to replay
1950 * according to arrival of events in tx packets.
1952 int amdtp_domain_start(struct amdtp_domain *d, unsigned int tx_init_skip_cycles, bool replay_seq,
1953 bool replay_on_the_fly)
1955 unsigned int events_per_buffer = d->events_per_buffer;
1956 unsigned int events_per_period = d->events_per_period;
1957 unsigned int queue_size;
1958 struct amdtp_stream *s;
1963 err = make_association(d);
1967 d->replay.enable = replay_seq;
1968 d->replay.on_the_fly = replay_on_the_fly;
1970 // Select an IT context as IRQ target.
1971 list_for_each_entry(s, &d->streams, list) {
1972 if (s->direction == AMDTP_OUT_STREAM) {
1981 d->processing_cycle.tx_init_skip = tx_init_skip_cycles;
1983 // This is a case that AMDTP streams in domain run just for MIDI
1984 // substream. Use the number of events equivalent to 10 msec as
1985 // interval of hardware IRQ.
1986 if (events_per_period == 0)
1987 events_per_period = amdtp_rate_table[d->irq_target->sfc] / 100;
1988 if (events_per_buffer == 0)
1989 events_per_buffer = events_per_period * 3;
1991 queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer,
1992 amdtp_rate_table[d->irq_target->sfc]);
1994 list_for_each_entry(s, &d->streams, list) {
1995 unsigned int idle_irq_interval = 0;
1997 if (s->direction == AMDTP_OUT_STREAM && s == d->irq_target) {
1998 idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period,
1999 amdtp_rate_table[d->irq_target->sfc]);
2002 // Starts immediately but actually DMA context starts several hundred cycles later.
2003 err = amdtp_stream_start(s, s->channel, s->speed, queue_size, idle_irq_interval);
2010 list_for_each_entry(s, &d->streams, list)
2011 amdtp_stream_stop(s);
2014 EXPORT_SYMBOL_GPL(amdtp_domain_start);
2017 * amdtp_domain_stop - stop sending packets for isoc context in the same domain.
2018 * @d: the AMDTP domain to which the isoc contexts belong.
2020 void amdtp_domain_stop(struct amdtp_domain *d)
2022 struct amdtp_stream *s, *next;
2025 amdtp_stream_stop(d->irq_target);
2027 list_for_each_entry_safe(s, next, &d->streams, list) {
2030 if (s != d->irq_target)
2031 amdtp_stream_stop(s);
2034 d->events_per_period = 0;
2035 d->irq_target = NULL;
2037 EXPORT_SYMBOL_GPL(amdtp_domain_stop);