-
Notifications
You must be signed in to change notification settings - Fork 4
/
dma.c
68 lines (52 loc) · 1.82 KB
/
dma.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
#include "gb.h"
#define GB_DMA_LENGTH_BYTES (GB_GPU_MAX_SPRITES * 4)
void gb_dma_reset(struct gb *gb) {
struct gb_dma *dma = &gb->dma;
dma->running = false;
dma->source = 0;
dma->position = 0;
}
void gb_dma_sync(struct gb *gb) {
struct gb_dma *dma = &gb->dma;
int32_t elapsed = gb_sync_resync(gb, GB_SYNC_DMA);
unsigned length;
if (!dma->running) {
/* Nothing to do */
gb_sync_next(gb, GB_SYNC_DMA, GB_SYNC_NEVER);
return;
}
/* CPU always increments the counter in multiples of 4 cycles (2 in
* double-speed mode) so we know for sure that there won't be any remainder
* here. */
length = elapsed / (4 >> gb->double_speed);
while (length && dma->position < GB_DMA_LENGTH_BYTES) {
uint32_t b = gb_memory_readb(gb, dma->source + dma->position);
gb->gpu.oam[dma->position] = b;
length--;
dma->position++;
}
if (dma->position >= GB_DMA_LENGTH_BYTES) {
/* We're done */
dma->running = false;
gb_sync_next(gb, GB_SYNC_DMA, GB_SYNC_NEVER);
} else {
/* The DMA copies one byte ever 4 cycles (2 cycles in double-speed
* mode) */
gb_sync_next(gb, GB_SYNC_DMA, 4 >> gb->double_speed);
}
}
void gb_dma_start(struct gb *gb, uint8_t source) {
struct gb_dma *dma = &gb->dma;
/* Sync our state in case we were already running */
gb_dma_sync(gb);
dma->source = (uint16_t)source << 8;
dma->position = 0;
/* The GBC can copy directly from the cartridge, DMG only from RAM */
if ((!gb->gbc && dma->source < 0x8000U) || dma->source >= 0xe000U) {
/* The DMA can't access this memory region */
dma->running = false;
} else {
dma->running = true;
}
gb_dma_sync(gb);
}