+
+static void context_tasklet(unsigned long data)
+{
+ struct context *ctx = (struct context *) data;
+ struct fw_ohci *ohci = ctx->ohci;
+ struct descriptor *d, *last;
+ u32 address;
+ int z;
+
+ dma_sync_single_for_cpu(ohci->card.device, ctx->buffer_bus,
+ ctx->buffer_size, DMA_TO_DEVICE);
+
+ d = ctx->tail_descriptor;
+ last = ctx->tail_descriptor_last;
+
+ while (last->branch_address != 0) {
+ address = le32_to_cpu(last->branch_address);
+ z = address & 0xf;
+ d = ctx->buffer + (address - ctx->buffer_bus) / sizeof *d;
+ last = (z == 2) ? d : d + z - 1;
+
+ if (!ctx->callback(ctx, d, last))
+ break;
+
+ ctx->tail_descriptor = d;
+ ctx->tail_descriptor_last = last;
+ }
+}
+
+static int
+context_init(struct context *ctx, struct fw_ohci *ohci,
+ size_t buffer_size, u32 regs,
+ descriptor_callback_t callback)
+{
+ ctx->ohci = ohci;
+ ctx->regs = regs;
+ ctx->buffer_size = buffer_size;
+ ctx->buffer = kmalloc(buffer_size, GFP_KERNEL);
+ if (ctx->buffer == NULL)
+ return -ENOMEM;
+
+ tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
+ ctx->callback = callback;
+
+ ctx->buffer_bus =
+ dma_map_single(ohci->card.device, ctx->buffer,
+ buffer_size, DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->buffer_bus)) {
+ kfree(ctx->buffer);
+ return -ENOMEM;
+ }
+
+ ctx->head_descriptor = ctx->buffer;
+ ctx->prev_descriptor = ctx->buffer;
+ ctx->tail_descriptor = ctx->buffer;
+ ctx->tail_descriptor_last = ctx->buffer;
+
+ /* We put a dummy descriptor in the buffer that has a NULL
+ * branch address and looks like it's been sent. That way we
+ * have a descriptor to append DMA programs to. Also, the
+ * ring buffer invariant is that it always has at least one
+ * element so that head == tail means buffer full. */
+
+ memset(ctx->head_descriptor, 0, sizeof *ctx->head_descriptor);
+ ctx->head_descriptor->control = cpu_to_le16(descriptor_output_last);
+ ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011);
+ ctx->head_descriptor++;
+
+ return 0;
+}
+
+ static void
+context_release(struct context *ctx)
+{
+ struct fw_card *card = &ctx->ohci->card;
+
+ dma_unmap_single(card->device, ctx->buffer_bus,
+ ctx->buffer_size, DMA_TO_DEVICE);
+ kfree(ctx->buffer);
+}
+
+static struct descriptor *
+context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
+{
+ struct descriptor *d, *tail, *end;
+
+ d = ctx->head_descriptor;
+ tail = ctx->tail_descriptor;
+ end = ctx->buffer + ctx->buffer_size / sizeof(struct descriptor);
+
+ if (d + z <= tail) {
+ goto has_space;
+ } else if (d > tail && d + z <= end) {
+ goto has_space;
+ } else if (d > tail && ctx->buffer + z <= tail) {
+ d = ctx->buffer;
+ goto has_space;
+ }
+
+ return NULL;
+
+ has_space:
+ memset(d, 0, z * sizeof *d);
+ *d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof *d;
+
+ return d;
+}
+
+static void context_run(struct context *ctx, u32 cycle_match)
+{
+ struct fw_ohci *ohci = ctx->ohci;
+
+ reg_write(ohci, command_ptr(ctx->regs),
+ le32_to_cpu(ctx->tail_descriptor_last->branch_address));
+ reg_write(ohci, control_clear(ctx->regs), ~0);
+ reg_write(ohci, control_set(ctx->regs), CONTEXT_RUN | cycle_match);
+ flush_writes(ohci);
+}
+
+static void context_append(struct context *ctx,
+ struct descriptor *d, int z, int extra)
+{
+ dma_addr_t d_bus;
+
+ d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof *d;
+
+ ctx->head_descriptor = d + z + extra;
+ ctx->prev_descriptor->branch_address = cpu_to_le32(d_bus | z);
+ ctx->prev_descriptor = z == 2 ? d : d + z - 1;
+
+ dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus,
+ ctx->buffer_size, DMA_TO_DEVICE);
+
+ reg_write(ctx->ohci, control_set(ctx->regs), CONTEXT_WAKE);
+ flush_writes(ctx->ohci);
+}
+
+static void context_stop(struct context *ctx)
+{
+ u32 reg;
+
+ reg_write(ctx->ohci, control_clear(ctx->regs), CONTEXT_RUN);
+
+ reg = reg_read(ctx->ohci, control_set(ctx->regs));
+ if (reg & CONTEXT_ACTIVE)
+ fw_notify("Tried to stop context, but it is still active "
+ "(0x%08x).\n", reg);
+}