reimplement day-09 with min heaps

re-run benchmarks
This commit is contained in:
onyx-and-iris 2024-12-19 12:54:40 +00:00
parent c3fa65e4a8
commit 1b0f02b430
12 changed files with 302 additions and 257 deletions

View File

@ -2,14 +2,14 @@ goos: linux
goarch: amd64
pkg: github.com/onyx-and-iris/aoc2024/day-09
cpu: Intel(R) Core(TM) i7-8700K CPU @ 3.70GHz
BenchmarkSolve-12 1 22886096949 ns/op
BenchmarkSolve-12 1 22884217914 ns/op
BenchmarkSolve-12 1 22924853520 ns/op
BenchmarkSolve-12 1 23014082753 ns/op
BenchmarkSolve-12 1 22788595898 ns/op
BenchmarkSolve-12 1 22781925171 ns/op
BenchmarkSolve-12 1 23094890275 ns/op
BenchmarkSolve-12 1 22694434858 ns/op
BenchmarkSolve-12 1 23002190907 ns/op
BenchmarkSolve-12 1 22923138789 ns/op
ok github.com/onyx-and-iris/aoc2024/day-09 229.003s
BenchmarkSolve-12 1000000000 0.3120 ns/op
BenchmarkSolve-12 1000000000 0.3049 ns/op
BenchmarkSolve-12 1000000000 0.3084 ns/op
BenchmarkSolve-12 1000000000 0.3049 ns/op
BenchmarkSolve-12 1000000000 0.3038 ns/op
BenchmarkSolve-12 1000000000 0.3087 ns/op
BenchmarkSolve-12 1000000000 0.3066 ns/op
BenchmarkSolve-12 1000000000 0.3046 ns/op
BenchmarkSolve-12 1000000000 0.3022 ns/op
BenchmarkSolve-12 1000000000 0.3051 ns/op
ok github.com/onyx-and-iris/aoc2024/day-09 52.978s

View File

@ -2,5 +2,5 @@ goos: linux
goarch: amd64
pkg: github.com/onyx-and-iris/aoc2024/day-09/internal/one
cpu: Intel(R) Core(TM) i7-8700K CPU @ 3.70GHz
BenchmarkSolve-12 1000000000 0.1535 ns/op
ok github.com/onyx-and-iris/aoc2024/day-09/internal/one 1.893s
BenchmarkSolve-12 1000000000 0.2909 ns/op
ok github.com/onyx-and-iris/aoc2024/day-09/internal/one 5.021s

View File

@ -1,8 +0,0 @@
package one
const empty = -1
type block struct {
used int
free int
}

View File

@ -1,59 +1,24 @@
package one
import (
"fmt"
"slices"
"strings"
)
import "slices"
type disk struct {
data []int
totalUsed int
data []int
}
func newDisk(blocks []block) disk {
var totalUsed int
data := []int{}
for id, block := range blocks {
for range block.used {
data = append(data, id)
}
for range block.free {
data = append(data, empty)
}
totalUsed += block.used
}
return disk{
data: data,
totalUsed: totalUsed,
func newDisk(raw []int) *disk {
return &disk{
data: raw,
}
}
func (d *disk) len() int {
return d.totalUsed
}
func (d *disk) sort() {
func (d *disk) reorganise() {
for i := len(d.data) - 1; i >= 0; i-- {
if d.data[i] != empty {
indx := slices.Index(d.data, empty)
if indx == d.len() {
break
}
d.data[i], d.data[indx] = d.data[indx], d.data[i]
indx := slices.Index(d.data, empty)
if indx >= i {
break
}
}
}
func (d *disk) debug() string {
var sb strings.Builder
for _, n := range d.data {
if n == empty {
sb.WriteRune('.')
} else {
sb.WriteString(fmt.Sprintf("%d", n))
}
d.data[i], d.data[indx] = d.data[indx], d.data[i]
}
return sb.String()
}

View File

@ -1,27 +1,70 @@
package one
import (
"bufio"
"bytes"
log "github.com/sirupsen/logrus"
"io"
"slices"
"strconv"
)
const empty = -1
func Solve(buf []byte) (int, error) {
r := bytes.NewReader(buf)
blocks, err := parseLines(r)
expandedRaw, err := parseLines(r)
if err != nil {
return 0, err
}
disk := newDisk(blocks)
disk.sort()
log.Debug(disk.debug())
disk := newDisk(expandedRaw)
disk.reorganise()
var i, checksum int
for range disk.len() {
checksum += i * disk.data[i]
i++
var checksum int
for i, n := range disk.data {
if n == empty {
break
}
checksum += i * n
}
return checksum, nil
}
func parseLines(r io.Reader) ([]int, error) {
var line string
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line = scanner.Text()
}
if err := scanner.Err(); err != nil {
return nil, err
}
raw := [][]int{}
for i, id := 0, 0; i < len(line); i, id = i+2, id+1 {
raw = append(raw, []int{id, mustConv(string(line[i]))})
var free int
if i < len(line)-1 {
free = mustConv(string(line[i+1]))
}
raw = append(raw, []int{empty, free})
}
expandedRaw := []int{}
for _, vals := range raw {
segment := slices.Repeat([]int{vals[0]}, vals[1])
expandedRaw = append(expandedRaw, segment...)
}
return expandedRaw, nil
}
func mustConv(s string) int {
n, err := strconv.Atoi(s)
if err != nil {
panic(err)
}
return n
}

View File

@ -1,38 +0,0 @@
package one
import (
"bufio"
"io"
"strconv"
)
func parseLines(r io.Reader) ([]block, error) {
var blocks []block
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line := scanner.Text()
for i := 0; i < len(line); i += 2 {
var free int
if i < len(line)-1 {
free = mustConv(string(line[i+1]))
}
blocks = append(blocks, block{mustConv(string(line[i])), free})
}
}
if err := scanner.Err(); err != nil {
return []block{}, err
}
return blocks, nil
}
func mustConv(s string) int {
n, err := strconv.Atoi(s)
if err != nil {
panic(err)
}
return n
}

View File

@ -2,5 +2,5 @@ goos: linux
goarch: amd64
pkg: github.com/onyx-and-iris/aoc2024/day-09/internal/two
cpu: Intel(R) Core(TM) i7-8700K CPU @ 3.70GHz
BenchmarkSolve-12 1 22722704246 ns/op
ok github.com/onyx-and-iris/aoc2024/day-09/internal/two 22.726s
BenchmarkSolve-12 1000000000 0.01642 ns/op
ok github.com/onyx-and-iris/aoc2024/day-09/internal/two 0.106s

View File

@ -1,21 +1,33 @@
package two
const empty = -1
import "fmt"
type kind int
const (
kindOfFile kind = iota
kindOfEmpty
)
type block struct {
used int
free []int
offset int
kind kind
id int
start int
length int
value int
}
func newBlock(used, start, end int) block {
return block{
used: used,
free: []int{start, start + end},
offset: 0,
func newBlock(kind kind, id, start, length, value int) *block {
return &block{kind, id, start, length, value}
}
func (b *block) String() string {
var kindStr string
switch b.kind {
case kindOfFile:
kindStr = "file"
case kindOfEmpty:
kindStr = "empty"
}
}
func (b *block) available() int {
return b.free[1] - b.free[0]
return fmt.Sprintf("kind: %s id: %d start: %d length: %d", kindStr, b.id, b.start, b.length)
}

View File

@ -1,98 +1,142 @@
package two
import (
"fmt"
"strings"
"cmp"
"container/heap"
"errors"
"math"
"slices"
log "github.com/sirupsen/logrus"
)
const numEmptyBlocks int = 9
type disk struct {
blocks []block
data []int
data []int
fileblocks []*block
emptyblocks [numEmptyBlocks]minHeap
}
func newDisk(blocks []block) disk {
offset := 0
data := []int{}
for id, block := range blocks {
blocks[id].offset = offset
for range block.used {
data = append(data, id)
}
for range block.available() {
data = append(data, empty)
}
offset += block.used + block.available()
func newDisk(raw []int) *disk {
fileblocks := make([]*block, 0)
emptyblockHeaps := make([]minHeap, numEmptyBlocks)
for i := range numEmptyBlocks {
heap.Init(&emptyblockHeaps[i])
}
return disk{
blocks: blocks,
data: data,
}
}
func (d *disk) sort() {
for i := len(d.blocks) - 1; i >= 0; i-- {
log.Debugf("searching for space for block %d of size %d\n", i, d.blocks[i].used)
for j := 0; j < len(d.data); j++ {
if j >= d.blocks[i].offset+d.blocks[i].used {
break
}
if d.data[j] != empty {
continue
}
var sz int
if d.data[j] == empty {
sz = d.sizeOfEmptyBlock(j)
}
if d.blocks[i].used <= sz {
d.writeBlockToDisk(j, d.blocks[i])
// don't attempt to write block again
break
}
// skip to end of empty block
j += sz
}
}
}
func (d *disk) sizeOfEmptyBlock(j int) int {
// this works!
var sz int
for k := j; k < len(d.data) && d.data[k] == empty; k++ {
sz++
for i, id := 0, 0; i < len(raw); id++ {
sz = 0
for j := i; j < len(raw) && raw[j] == id; j++ {
sz++
}
fileblocks = append(fileblocks, newBlock(kindOfFile, id, i, sz, id))
i += sz
sz = 0
for j := i; j < len(raw) && raw[j] == empty; j++ {
sz++
}
if sz > 0 {
heap.Push(&emptyblockHeaps[sz-1], newBlock(kindOfEmpty, id, i, sz, empty))
}
i += sz
}
log.Debugf("found empty space of size %d\n", sz)
return sz
log.Debugf("\n%v\n%v", fileblocks, emptyblockHeaps)
return &disk{data: raw, fileblocks: fileblocks, emptyblocks: [numEmptyBlocks]minHeap(emptyblockHeaps)}
}
func (d *disk) writeBlockToDisk(start int, b block) {
for i := b.offset; i < b.offset+b.used; i, start = i+1, start+1 {
d.data[start] = d.data[i]
log.Debugf("writing %d to disk at pos %d\n", d.data[i], start)
}
func (d *disk) defragment() {
for i := len(d.fileblocks) - 1; i >= 0; i-- {
log.Debugf("searching for space for fileblock %d: %v", i, d.fileblocks[i])
for i := b.offset; i < len(d.data) && i < b.offset+b.used; i++ {
d.data[i] = empty
log.Debugf("truncating space on disk at pos %d", i)
}
emptyblock, err := d.getNextEmptyBlock(d.fileblocks[i])
if err != nil {
log.Debug(err)
continue
}
log.Debug(d.debug())
oldStart := d.fileblocks[i].start
oldLength := d.fileblocks[i].length
// we've found an appropriate empty block, now swap the data
d.fileblocks[i].start, emptyblock.start = emptyblock.start, emptyblock.start+d.fileblocks[i].length
// push the resized empty block to a new heap
emptyblock.length -= d.fileblocks[i].length
if emptyblock.length > 0 {
log.Debugf("emptyblock resized %d", emptyblock.length)
heap.Push(&d.emptyblocks[emptyblock.length-1], emptyblock)
}
// now create a new empty block and push it to the appropriate heap
heap.Push(
&d.emptyblocks[oldLength-1],
newBlock(kindOfEmpty, math.MaxInt, oldStart, oldLength, empty),
)
}
}
func (d *disk) debug() string {
var sb strings.Builder
for _, n := range d.data {
if n == empty {
sb.WriteRune('.')
} else {
sb.WriteString(fmt.Sprintf("%d", n))
func (d *disk) getNextEmptyBlock(currentFile *block) (*block, error) {
// collect all minblocks the same size as the current file or greater
minBlocks := []*block{}
for j := currentFile.length; j <= numEmptyBlocks; j++ {
if d.emptyblocks[j-1].Len() == 0 {
continue
}
currentblock := heap.Pop(&d.emptyblocks[j-1]).(*block)
minBlocks = append(minBlocks, currentblock)
}
if len(minBlocks) == 0 {
return nil, errors.New("no empty blocks found")
}
// sort the blocks by id
slices.SortFunc(minBlocks, func(a, b *block) int {
return cmp.Compare(a.id, b.id)
})
// push back the ones we won't be using
for _, block := range minBlocks[1:] {
heap.Push(&d.emptyblocks[block.length-1], block)
}
// if the lowest id minblock is positioned after the current file, push it back and return an error
if minBlocks[0].start >= currentFile.start {
heap.Push(&d.emptyblocks[minBlocks[0].length-1], minBlocks[0])
return nil, errors.New("no empty blocks found")
}
log.Debugf("found empty space %v", minBlocks[0])
return minBlocks[0], nil
}
func (d *disk) write() {
allBlocks := make([]*block, 0)
allBlocks = append(allBlocks, d.fileblocks...)
for i := 0; i < numEmptyBlocks; i++ {
for range d.emptyblocks[i].Len() {
allBlocks = append(allBlocks, heap.Pop(&d.emptyblocks[i]).(*block))
}
}
slices.SortFunc(allBlocks, func(a, b *block) int {
return cmp.Compare(a.start, b.start)
})
for _, block := range allBlocks {
for i := block.start; i < block.start+block.length; i++ {
d.data[i] = block.value
}
}
return sb.String()
}

View File

@ -0,0 +1,27 @@
package two
type minHeap []*block
func (h minHeap) Len() int {
return len(h)
}
func (h minHeap) Less(i, j int) bool {
return h[i].id < h[j].id
}
func (h minHeap) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
}
func (h *minHeap) Push(x interface{}) {
*h = append(*h, x.(*block))
}
func (h *minHeap) Pop() interface{} {
old := *h
n := len(old)
x := old[n-1]
*h = old[:n-1]
return x
}

View File

@ -1,31 +1,72 @@
package two
import (
"bufio"
"bytes"
log "github.com/sirupsen/logrus"
"io"
"slices"
"strconv"
)
const empty = -1
func Solve(buf []byte) (int, error) {
r := bytes.NewReader(buf)
blocks, err := parseLines(r)
expandedRaw, err := parseLines(r)
if err != nil {
return 0, err
}
disk := newDisk(blocks)
log.Debug(disk.debug())
disk.sort()
log.Debug(disk.debug())
disk := newDisk(expandedRaw)
disk.defragment()
disk.write()
var checksum int
for i := 0; i < len(disk.data); i++ {
if disk.data[i] == empty {
var sum int
for i, n := range disk.data {
if n == empty {
continue
}
checksum += i * disk.data[i]
sum += i * n
}
return checksum, nil
return sum, nil
}
func parseLines(r io.Reader) ([]int, error) {
var line string
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line = scanner.Text()
}
if err := scanner.Err(); err != nil {
return nil, err
}
raw := [][]int{}
for i, id := 0, 0; i < len(line); i, id = i+2, id+1 {
raw = append(raw, []int{id, mustConv(string(line[i]))})
var free int
if i < len(line)-1 {
free = mustConv(string(line[i+1]))
}
raw = append(raw, []int{empty, free})
}
expandedRaw := []int{}
for _, vals := range raw {
segment := slices.Repeat([]int{vals[0]}, vals[1])
expandedRaw = append(expandedRaw, segment...)
}
return expandedRaw, nil
}
func mustConv(s string) int {
n, err := strconv.Atoi(s)
if err != nil {
panic(err)
}
return n
}

View File

@ -1,41 +0,0 @@
package two
import (
"bufio"
"io"
"strconv"
)
func parseLines(r io.Reader) ([]block, error) {
var blocks []block
var start int
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line := scanner.Text()
for i := 0; i < len(line); i += 2 {
used := mustConv(string(line[i]))
start += used
var end int
if i < len(line)-1 {
end = mustConv(string(line[i+1]))
}
blocks = append(blocks, newBlock(used, start, end))
}
}
if err := scanner.Err(); err != nil {
return []block{}, err
}
return blocks, nil
}
func mustConv(s string) int {
n, err := strconv.Atoi(s)
if err != nil {
panic(err)
}
return n
}