211 lines
5.2 KiB
ArmAsm
211 lines
5.2 KiB
ArmAsm
|
|
.ifndef _gc_s_file
|
|
_gc_s_file:
|
|
|
|
.section .bss
|
|
_write_region_start:
|
|
# begin of the active memory area
|
|
cell 0
|
|
_write_region_end:
|
|
# end of the active memory area (%rsp kinda starts here and goes down
|
|
# towars the start)
|
|
cell 0
|
|
_gc_trigger:
|
|
# point in memory where the gc will trigger (we don't necessarily wait for the write region to fill up!)
|
|
cell 0
|
|
|
|
_gc_last_size:
|
|
# how much data we evacuated last time
|
|
cell 0
|
|
_gc_min_alloc:
|
|
# minimum possible allocation
|
|
cell 0 # tunable constant
|
|
_gc_grow_ratio:
|
|
# 256th's of the minimal amount of memory increment compared to the
|
|
# last time. New minimal amount is compared as:
|
|
# (ratio * last size) >> 8
|
|
cell 0 # tunable constant
|
|
_gc_shrink_ratio:
|
|
# 256th's of the ratio of post-gc still-free to-space that should be considered for discarding
|
|
cell 0
|
|
|
|
_gc_region_start:
|
|
# in GC, this region is being evacuated and will eventually disappear
|
|
cell 0
|
|
_gc_region_end:
|
|
# end of the disappear region
|
|
cell 0
|
|
|
|
_gc_backup_thunk:
|
|
# backup of %rsi so that we can use the register for other nonsense
|
|
cell 0
|
|
_gc_backup_cont:
|
|
# backup of %rbp for same reason
|
|
cell 0
|
|
|
|
.section .text
|
|
|
|
.macro needs_alloc amount
|
|
mov %rsp, %rax
|
|
sub _write_region_start, %rax
|
|
cmp \amount, %rax
|
|
jb _uskel_gc
|
|
.endm
|
|
|
|
_uskel_alloc:
|
|
mov %rsi, %r15 # %rsi is the return address; back it up
|
|
|
|
# calculate the desired size to %r14
|
|
mov _gc_min_alloc, %r14
|
|
#add _gc_region_end, %r14
|
|
#sub _gc_region_start, %r14
|
|
|
|
# check if the desired size isn't greater because of the last gc use
|
|
mov _gc_last_size, %rax
|
|
mulq _gc_grow_ratio
|
|
shr $8, %rax
|
|
add _gc_min_alloc, %rax
|
|
cmp %r14, %rax
|
|
cmova %rax, %r14
|
|
|
|
# check if we don't need even more space because we need to evacuate stuff
|
|
mov _gc_region_end, %rax
|
|
sub %rsp, %rax # trick -- if we counted from gc region start, allocated memory could never shrink
|
|
cmp %r14, %rax
|
|
cmova %rax, %r14
|
|
|
|
and $0xfffffffffffffff8, %r14 #align
|
|
|
|
alloc_goes_mmap:
|
|
mov $9, %rax # mmap
|
|
mov $0, %rdi # addr = NULL
|
|
mov %r14, %rsi # len = %r14
|
|
mov $0b11, %rdx # prot = PROT_READ 0b1 | PROT_WRITE 0b10
|
|
mov $0x22, %r10 # flags = MAP_PRIVATE 0x2 | MAP_ANONYMOUS 0x20
|
|
mov $-1, %r8 # fd = -1
|
|
mov $0, %r9 # off = 0
|
|
syscall
|
|
|
|
# store the results
|
|
mov %rax, _write_region_start
|
|
add %r14, %rax
|
|
mov %rax, _write_region_end
|
|
mov %rax, %rsp # initialize writing into the new region
|
|
|
|
jmp *%r15
|
|
|
|
_uskel_gc_init:
|
|
mov %rsi, %r13
|
|
movq $0x100000, _gc_min_alloc # must be higher than 2x the biggest thunk possible
|
|
movq $0x180, _gc_grow_ratio
|
|
movq $0x40, _gc_shrink_ratio
|
|
mov $0, %rsp # fake original rsp for first alloc run
|
|
mov $_uskel_gc_init_cont, %rsi
|
|
jmp _uskel_alloc
|
|
_uskel_gc_init_cont:
|
|
mov _write_region_start, %rax
|
|
mov %rax, _gc_trigger
|
|
jmp *%r13
|
|
|
|
_uskel_gc:
|
|
# save what we did before ending up here
|
|
mov %rbp, _gc_backup_thunk
|
|
mov %rsi, _gc_backup_cont
|
|
|
|
# first we need a new memory area
|
|
mov _write_region_start, %rbx
|
|
mov _write_region_end, %rcx
|
|
mov %rbx, _gc_region_start
|
|
mov %rcx, _gc_region_end
|
|
mov $_uskel_gc_evacuate, %rsi
|
|
jmp _uskel_alloc
|
|
_uskel_gc_evacuate:
|
|
|
|
# point the writer to the new memory area
|
|
mov _write_region_end, %rsp
|
|
mov %rsp, %r8 # % r8 is the "last thing that was scavenged"
|
|
|
|
# start by evacuating the thunk and cont
|
|
mov _gc_backup_thunk, %rbp
|
|
mov $_uskel_gc_evacuate_cont_thunk, %rsi
|
|
jmp _gc_evacuate
|
|
_uskel_gc_evacuate_cont_thunk:
|
|
mov %rbp, _gc_backup_thunk
|
|
|
|
mov _gc_backup_cont, %rbp
|
|
mov $_uskel_gc_evacuate_cont_cont, %rsi
|
|
jmp _gc_evacuate
|
|
_uskel_gc_evacuate_cont_cont:
|
|
mov %rbp, _gc_backup_cont
|
|
|
|
# scavenge everything
|
|
_uskel_gc_scavenge:
|
|
# start at what we wrote last
|
|
mov %rsp, %rbp # rbp is the iterator (conveniently)
|
|
mov %rsp, %r9 # % r9 stores where we started with this evacuate round
|
|
|
|
# if the thing is already scavenged, we didn't write anything, mark done.
|
|
cmp %rbp, %r8
|
|
jbe _uskel_gc_scavenge_end
|
|
|
|
_uskel_gc_scavenge1:
|
|
# if all ok, scavenge one thing (moving %rbp) and recheck
|
|
mov (%rbp), %rax
|
|
jmp *-020(%rax) # scavenge position in infotable
|
|
_gc_scavenge_ret:
|
|
cmp %rbp, %r8
|
|
ja _uskel_gc_scavenge1
|
|
|
|
# everything above r9 is now scavenged, continue with next round
|
|
mov %r9, %r8 # we started at r9, so that is now "done"
|
|
jmp _uskel_gc_scavenge
|
|
|
|
_uskel_gc_scavenge_end:
|
|
# deallocate the old memory region
|
|
mov $11, %rax # munmap
|
|
mov _gc_region_end, %rsi
|
|
mov _gc_region_start, %rdi # addr = gc start
|
|
sub %rdi, %rsi # len = gc end - gc start
|
|
syscall
|
|
|
|
# recalculate the gc trigger point
|
|
mov %rsp, %rax
|
|
sub _write_region_start, %rax
|
|
mulq _gc_shrink_ratio
|
|
shr $8, %rax
|
|
add _write_region_start, %rax
|
|
mov %rax, _gc_trigger
|
|
|
|
# save how much data we actually had at this point
|
|
mov _write_region_end, %rax
|
|
sub %rsp, %rax
|
|
mov %rax, _gc_last_size
|
|
|
|
# restore what we were doing
|
|
mov _gc_backup_thunk, %rbp
|
|
mov _gc_backup_cont, %rsi
|
|
enter_rbp # for simplicity just restart the thunk
|
|
|
|
_gc_evacuate:
|
|
# check if we are really out of the target region
|
|
cmp _write_region_start, %rbp
|
|
jb _gc_evacuate_go
|
|
cmp _write_region_end, %rbp
|
|
jae _gc_evacuate_go
|
|
_gc_evacuate_skip:
|
|
# if not, let's just jump to cont and leave %rbp as result
|
|
jmp *%rsi
|
|
_gc_evacuate_go:
|
|
# if we should evacuate, jump to the evac routine
|
|
mov %rbp, %r10
|
|
mov (%rbp), %rax
|
|
jmp *-030(%rax)
|
|
_gc_evacuate_ret:
|
|
# install the indirection
|
|
movq $IND_code, 000(%r10)
|
|
mov %rbp, 010(%r10)
|
|
jmp *%rsi
|
|
|
|
|
|
.endif #_gc_s_file
|