Pyro

joined 2 years ago
[โ€“] Pyro@programming.dev 1 points 4 weeks ago (1 children)

Yeah I've got the DSU algorithm ingrained because of the number of the times I had to practice it for coding rounds. I didn't need to do path compression and union by rank either but might as well.

[โ€“] Pyro@programming.dev 2 points 1 month ago (2 children)

Python

I spent way too long optimizing this solution from ~20s to <1s

# Build grid as list of integers (bitmask for each row)
def build_grid(data: str) -> list[int]:
    grid = []
    for line in data.splitlines():
        row = 0
        for j, cell in enumerate(line):
            if cell == '#':
                row |= (1 << j)
        grid.append(row)
    return grid

# Compute next step of the grid while counting new active cells
# Highly optimized using bitwise operations
def compute_next_step(grid: list[int], m, n):
    next_grid = []
    new_active_cells = 0
    mask = (1 << n) - 1     # mask to keep only n bits

    for i in range(m):
        row_above = grid[i-1] if i > 0 else 0
        row_below = grid[i+1] if i < m - 1 else 0
        
        # Neighbors are diagonal: (i-1, j-1), (i-1, j+1), (i+1, j-1), (i+1, j+1)
        n_above = (row_above << 1) ^ (row_above >> 1)   # above neighbors are odd parity
        n_below = (row_below << 1) ^ (row_below >> 1)   # below neighbors are odd parity
        # total active neighbors are odd parity
        neighbors_xor = n_above ^ n_below
        
        # the rules say a cell becomes active if:
        # - it is currently active, and has an odd number of active neighbors
        # - it is currently inactive, and has an even number of active neighbors
        # this is equivalent to having an even number of (active neighbors + itself)
        # For single bit, equivalent to: ~(self ^ neighbors_xor) & 1
        #   self ^ neighbors_xor = 0 if both are even or both are odd (desired case)
        #   so we negate it to get 1 in desired case
        #   and we apply mask to keep only n bits
        # This is all done in parallel for the entire row using bitwise operations
        next_row = ~(grid[i] ^ neighbors_xor) & mask
        
        next_grid.append(next_row)
        new_active_cells += next_row.bit_count()

    return next_grid, new_active_cells

# Simulate for given rounds and return total active cells
def part1(data: str, rounds = 10):
    grid = build_grid(data)
    m, n = len(grid), data.index('\n')

    total_active = 0
    for _ in range(rounds):
        next_grid, new_active_cells = compute_next_step(grid, m, n)
        total_active += new_active_cells
        grid = next_grid
    return total_active

assert (t := part1(""".#.##.
##..#.
..##.#
.#.##.
.###..
###.##""")) == 200, f"Expected 200 but got {t}"

# part 2 is just part 1 with more rounds
from functools import partial
part2 = partial(part1, rounds=2025)

# Match 8x8 pattern with center of 34x34 grid
def match_pattern(next_grid, pattern_grid):
    for i in range(8):
        # skip first 13 rows and shift right by 13 to align with pattern
        if ((next_grid[i+13] >> 13) & 0xFF) != pattern_grid[i]:
            return False
    return True

def part3(pattern: str):
    pattern_grid = build_grid(pattern)
    grid_size = 34

    rounds = 1000000000
    grid = [0] * grid_size  # we start with an empty 34x34 grid

    seen = set()                    # grids we have seen
    cumu_matches_for_round = [0]    # cumulative matches for each round in the cycle
    cumu_matches = 0                # current cumulative matches

    # Simulate until we find a cycle or reach the rounds limit
    round = 1
    while round <= rounds:
        next_grid, next_actives = compute_next_step(grid, grid_size, grid_size)

        # get current state and check for cycles
        state = tuple(next_grid)
        if state in seen:
            break
        else:
            seen.add(state)
        
        # check for pattern match and update cumulative matches
        if match_pattern(next_grid, pattern_grid):
            cumu_matches += next_actives        
        # store cumulative matches from the start to this round
        cumu_matches_for_round.append(cumu_matches)

        # update variables for next round
        grid = next_grid
        round += 1
    
    # the length of the cycle is round - 1 since we break AFTER detecting a cycle (cycle + 1)
    cycle_len = round - 1
    # the number of full cycles we can fit in rounds
    full_cycles = rounds // cycle_len
    # the total matches from full cycles
    matches = full_cycles * cumu_matches
    # the remaining rounds after full cycles
    matches += cumu_matches_for_round[rounds % cycle_len]

    return matches

assert (t := part3("""#......#
..#..#..
.##..##.
...##...
...##...
.##..##.
..#..#..
#......#""")) == 278388552, f"Expected 278388552 but got {t}"
[โ€“] Pyro@programming.dev 2 points 1 month ago (2 children)

Python

A much simpler problem compared to earlier ones

# generator to yield dial values in the required order
# yields (value: str, is_reverse: bool)
def yield_dial_vals(coll: list):
    n = len(coll)
    for i in range(0, n, 2):
        yield coll[i], False
    right_start = n - 2 if n % 2 == 1 else n - 1
    for i in range(right_start, -1, -2):
        yield coll[i], True

def part1(data: str):
    nums = data.splitlines()
    n = len(nums)
    
    # total number of values on the dial
    #   + 1 for the '1' value
    total_vals = n + 1
    # effective rotation after full cycles
    effective_rot = 2025 % total_vals
    # if full cycles, return '1'
    if effective_rot == 0:
        return 1
    # adjust for 0-indexing the remaining dial values
    effective_rot -= 1
    
    # skip numbers on the dial until we reach the final value
    val_gen = yield_dial_vals(nums)
    for _ in range(effective_rot):
        next(val_gen)
    
    # return the final value
    return int(next(val_gen)[0])

assert (t := part1("""72
58
47
61
67""")) == 67, f"Expected: 67, Actual: {t}"

def part2(data: str, rotations = 20252025):
    ranges = data.splitlines()
    
    # count values on the dial other than '1'
    n = 0
    for val, _ in yield_dial_vals(ranges):
        a, b = map(int, val.split("-"))
        n += b - a + 1
    
    # total number of values on the dial
    #   + 1 for the '1' value
    total_vals = n + 1
    # effective rotation after full cycles
    effective_rot = rotations % total_vals
    # if full cycles, return '1'
    if effective_rot == 0:
        return 1
    # adjust for 0-indexing the remaining dial values
    effective_rot -= 1
    
    # iterate through ranges until we reach the one the dial lands on
    for val, is_reverse in yield_dial_vals(ranges):
        # get range bound and size
        a, b = map(int, val.split("-"))
        r = b - a + 1
        # check if the dial lands within this range
        if effective_rot < r:
            # it does!
            # return the appropriate value in the range based on direction
            if is_reverse:
                return b - effective_rot
            else:
                return a + effective_rot
        # consume the rotations for skipping this range
        effective_rot -= r
    
    assert False, "Should have found the target range"


assert part2("""10-15
12-13
20-21
19-23
30-37""") == 30

# part 3 is just part 2 with a larger number of rotations
from functools import partial
part3 = partial(part2, rotations = 202520252025)
[โ€“] Pyro@programming.dev 2 points 1 month ago (2 children)

Python

# get all valid neighbors of a cell in a grid
DELTAS = [(-1, 0), (1, 0), (0, -1), (0, 1)]
def getNeighbors(i, j, m, n):
    for di, dj in DELTAS:
        ni, nj = i + di, j + dj
        if 0 <= ni < m and 0 <= nj < n:
            yield ni, nj

# constant for marking permanently burnt cells
PERMA_BURNT = -1

# DFS to burn the grid from a list of starting points
# If perma_burn is True, cells that are burnt will be modified to PERMA_BURNT
# returns the set of all burnt cells
def dfs_burn(grid: list[list[int]], start: list[tuple[int, int]], perma_burn=False) -> set[tuple[int, int]]:
    m, n = len(grid), len(grid[0])

    ignited = start
    burnt = set()

    while ignited:
        i, j = ignited.pop()
        if (i, j) in burnt:
            continue
        burnt.add((i, j))

        for ni, nj in getNeighbors(i, j, m, n):
            if (ni, nj) in burnt or grid[ni][nj] == PERMA_BURNT:
                continue
            if grid[ni][nj] <= grid[i][j]:
                ignited.append((ni, nj))
        
        # mark as permanently burnt (if requested)
        if perma_burn: grid[i][j] = PERMA_BURNT
    return burnt

# Part 1: DFS burn from single source (0, 0)
def part1(data: str) -> int:
    grid = [[int(c) for c in row] for row in data.splitlines()]
    return len(dfs_burn(grid, [(0, 0)]))

assert part1("""989601
857782
746543
766789""") == 16

# Part 2: DFS burn from two sources (0,0) and (m-1,n-1)
def part2(data: str) -> int:
    grid = [[int(c) for c in row] for row in data.splitlines()]
    m, n = len(grid), len(grid[0])
    return len(dfs_burn(grid, [(0, 0), (m - 1, n - 1)]))

assert part2("""9589233445
9679121695
8469121876
8352919876
7342914327
7234193437
6789193538
6781219648
5691219769
5443329859""") == 58

from copy import deepcopy

# Part 3: Greedy selection of three best burn points
def part3(data: str) -> int:
    grid = [[int(c) for c in row] for row in data.splitlines()]
    m, n = len(grid), len(grid[0])
    # keep original grid for final burn count
    og_grid = deepcopy(grid)

    # generate the list of best burn candidates
    # note that the cell with the max value is not necessarily the single best burn point
    # this will help us skip the cells that are clearly suboptimal
    candidates = [(grid[i][j], i, j) for i in range(m) for j in range(n)]
    candidates.sort(reverse=True)

    # best three burn points (separately chosen)
    best_three = []

    for _ in range(3):
        # set of all cells burnt in this iteration
        burnt = set()

        # track the best burn location in this iteration
        max_burnt = 0
        max_burn_loc = None

        for _, i, j in candidates:
            # skip candidates that are already burnt by a previous burn point
            # this is because a previous burn point will contain all the burns of this candidate
            #   plus possibly more
            if (i, j) in burnt:
                continue
            # burn (impermanently) from this candidate
            burns = dfs_burn(grid, [(i, j)])
            if len(burns) > max_burnt:
                max_burnt = len(burns)
                max_burn_loc = (i, j)
            # record newly burnt cells to skip in future candidates
            burnt |= burns
        
        assert max_burn_loc is not None, "Should have found a max burn location"
        # record and permanently burn from the best location found
        dfs_burn(grid, [max_burn_loc], perma_burn=True)
        best_three.append(max_burn_loc)
    
    # return total burnt cells from all three best burn points simultaneously
    return len(dfs_burn(og_grid, best_three))

assert (t := part3("""5411
3362
5235
3112""")) == 14, f"Expected: 14, Actual: {t}"

assert (t := part3("""41951111131882511179
32112222211518122215
31223333322115122219
31234444432147511128
91223333322176121892
61112222211166431583
14661111166111111746
11111119142122222177
41222118881233333219
71222127839122222196
56111126279711111517""")) == 136, f"Expected: 136, Actual: {t}"
[โ€“] Pyro@programming.dev 2 points 1 month ago

Python

Since, an optimized phase 2 is enough to solve p2 and p3, I did not optimize any further.

import math

# Brute-force phase one
#   Simulate all phase one rounds until no more moves can be made or max rounds reached
def phase_one(ducks: list[int], max_rounds: int) -> int:
    n = len(ducks)
    round = 1

    while round <= max_rounds:
        moved = False
        for i in range(n - 1):
            if ducks[i] > ducks[i + 1]:
                ducks[i] -= 1
                ducks[i + 1] += 1
                moved = True
        if not moved:
            break
        round += 1
    return round - 1

# Brute-force phase two
#   Simulate all phase two rounds until no more moves can be made or max rounds reached
def phase_two(ducks: list[int], max_rounds: int) -> int:
    n = len(ducks)
    round = 1

    while round <= max_rounds:
        moved = False
        for i in range(n - 1):
            if ducks[i] < ducks[i + 1]:
                ducks[i] += 1
                ducks[i + 1] -= 1
                moved = True
        if not moved:
            break
        round += 1
    return round - 1

# Optimized phase two for limitless rounds
#   Every round will move one duck from every duck above average to below average
#   So the resulting number of rounds is simply the total number of ducks above average
#   Inversely, you can also total the number of ducks below average
def phase_two_limitless(ducks: list[int]) -> int:
    avg = 0
    for i, d in enumerate(ducks):
        avg += (d - avg) / (i + 1)
    avg = round(avg)

    rounds = sum(x - avg for x in ducks if x > avg)
    return rounds

# Balance ducks through both phases, respecting max rounds
def balance_ducks(ducks: list[int], max_rounds: int) -> int:
    rounds1 = phase_one(ducks, max_rounds)
    # if the number of rounds is unbounded, use the optimized phase two
    if max_rounds < math.inf:
        rounds2 = phase_two(ducks, max_rounds - rounds1)
    else:
        rounds2 = phase_two_limitless(ducks)
    return rounds1 + rounds2

# Calculate checksum of the ducks
def calculate_checksum(ducks: list[int]) -> int:
    n = len(ducks)
    checksum = sum((i + 1) * ducks[i] for i in range(n))
    return checksum

# Brute-force both phases
def part1(data: str):
    ducks = [int(d) for d in data.splitlines()]
    balance_ducks(ducks, 10)
    return calculate_checksum(ducks)

# <asserts snipped>

# Brute-force phase one, use optimized phase two
def part2(data: str):
    ducks = [int(d) for d in data.splitlines()]
    rounds = balance_ducks(ducks, math.inf)  # type: ignore
    return rounds

# <asserts snipped>

# You can use the same function for p2 and p3
part3 = part2
[โ€“] Pyro@programming.dev 2 points 1 month ago (1 children)

Python

Took me quite a while to figure out. Did part 3 using DFS initially, then optimized to use memoization.

# queue for BFS
from collections import deque

# possible moves for the dragon (knight-like moves)
DRAGON_MOVES = [(2, 1), (2, -1), (-2, 1), (-2, -1), (1, 2), (1, -2), (-1, 2), (-1, -2)]

# yields all possible moves for dragon at (i, j)
def get_next_moves(i, j, m, n):
    for dx, dy in DRAGON_MOVES:
        n_i, n_j = i + dx, j + dy
        if 0 <= n_i < m and 0 <= n_j < n:
            yield n_i, n_j

# BFS for given number of moves
def part1(data: str, moves: int = 4):
    board = [list(row) for row in data.splitlines()]
    m, n = len(board), len(board[0])

    # initialize queue to dragon position
    queue = deque()
    for i in range(m):
        for j in range(n):
            if board[i][j] == 'D':
                queue.append((i, j))

    eaten = 0
    visited = set()
    # perform BFS for the given number of moves
    # we do moves+1 because we only process a move after popping from queue
    for _ in range(moves+1):
        nq = len(queue)
        for _ in range(nq):
            i, j = queue.popleft()
            if (i, j) in visited:
                continue
            visited.add((i, j))

            # eat sheep if present
            if board[i][j] == 'S':
                eaten += 1

            # add unvisited next moves to queue
            for n_i, n_j in get_next_moves(i, j, m, n):
                if (n_i, n_j) in visited:
                    continue
                queue.append((n_i, n_j))

    return eaten

# <assert snipped>

def part2(data: str, rounds: int = 20):
    board = [list(row) for row in data.splitlines()]
    m, n = len(board), len(board[0])

    # initialize list to initial dragon location
    dragon_locs = []
    for i in range(m):
        for j in range(n):
            if board[i][j] == 'D':
                dragon_locs.append((i, j))

    eaten = 0
    # simulate for given rounds
    for _ in range(rounds):
        # move dragon
        # calculate all possible new positions for the dragon
        dragon_moved_to = set()
        for i, j in dragon_locs:
            for n_i, n_j in get_next_moves(i, j, m, n):
                dragon_moved_to.add((n_i, n_j))
        # update dragon locations and eat any sheep present
        dragon_locs = list(dragon_moved_to)
        for i, j in dragon_locs:
            if board[i][j] == 'S':
                eaten += 1
                board[i][j] = '.'  # sheep is eaten

        # move sheep
        # process from bottom to top to avoid overlaps
        for i in range(m-1, -1, -1):
            for j in range(n):
                if 'S' not in board[i][j]:
                    # no sheep here
                    continue
                # move sheep out of current position
                board[i][j] = board[i][j].replace('S', '')
                if i == m-1:
                    # sheep escapes
                    continue
                if board[i+1][j] == '#':
                    # sheep moves into hiding
                    board[i+1][j] = 'S#'
                    continue
                if (i+1, j) in dragon_moved_to:
                    # sheep runs into dragon
                    eaten += 1
                    continue
                # sheep moves down
                board[i+1][j] = 'S'
            
    return eaten

# <assert snipped>

# DP with memoization
def part3(data: str):
    board_lines = data.splitlines()
    m, n = len(board_lines), len(board_lines[0])
    
    hideouts = set()            # positions of hideouts
    initial_sheep = [None] * n  # initial positions of sheep in each column
    dragon_loc = None           # initial position of dragon
    
    # iterate through board to find hideouts, sheep, and dragon
    for i in range(m):
        for j in range(n):
            char = board_lines[i][j]
            if char == '#':
                hideouts.add((i, j))
            elif char == 'D':
                dragon_loc = (i, j)
            elif char == 'S':
                initial_sheep[j] = i
    
    # convert to tuple for immutability and hashability
    initial_sheep = tuple(initial_sheep)

    # optional: calculate lost positions for each column
    # lost_positions[j] is the row index where if the sheep reaches, can escape or stay hidden until escape (game over)
    # used to quickly prune unwinnable states
    lost_positions = [m] * n
    for j in range(n):
        for i in range(m-1, -1, -1):
            if (i, j) not in hideouts:
                break
            lost_positions[j] = i

    # memoization of state to number of paths
    # state is a tuple of (player, dragon_loc, sheep_positions)
    memo = {}

    # do sheeps' turn
    def solve_sheep(dragon_loc: tuple[int, int], sheep_positions: tuple[int|None, ...]) -> int:
        # check for memoized result
        state = ('S', dragon_loc, sheep_positions)
        if state in memo:
            return memo[state]
        
        total_paths = 0
        # used to check if any sheep can move
        # this helps determine if the dragon gets another turn
        can_move = False
        
        for j, i in enumerate(sheep_positions):
            if i is None:
                # no sheep in this column
                continue
            
            next_i = i + 1
            
            # Check collision with dragon
            if (next_i, j) == dragon_loc and (next_i, j) not in hideouts:
                # smart sheep avoids dragon
                continue
            
            # Check escape (entering safe zone)
            if next_i == lost_positions[j]:
                # this is a valid move, but its game over so we skip exploring further
                can_move = True
                continue
            
            # this sheep will move down
            can_move = True
            # create new state with sheep moved down
            new_sheep = list(sheep_positions)
            new_sheep[j] = next_i

            # continue solving for dragon's turn
            total_paths += solve_dragon(dragon_loc, tuple(new_sheep))
        
        # if no sheep could move, dragon gets another turn
        if not can_move:
            total_paths += solve_dragon(dragon_loc, sheep_positions)
        
        # memoize result before returning
        memo[state] = total_paths
        return total_paths

    # do dragon's turn
    def solve_dragon(dragon_loc, sheep_positions):
        # check for memoized result
        state = ('D', dragon_loc, sheep_positions)
        if state in memo:
            return memo[state]
        
        total_paths = 0
        i, j = dragon_loc
        
        # try all possible dragon moves
        for n_i, n_j in get_next_moves(i, j, m, n):
            # get where the sheep is in the column the dragon is moving to
            sheep_loc_in_col = sheep_positions[n_j]
            
            # if sheep is in the same row as the dragon and not in hideout, eat it
            if sheep_loc_in_col == n_i and (n_i, n_j) not in hideouts:
                sheep_count = sum(1 for x in sheep_positions if x is not None)
                if sheep_count == 1:
                    # all sheep eaten, end of path
                    total_paths += 1
                else:
                    # create new state with sheep eaten
                    new_sheep = list(sheep_positions)
                    new_sheep[n_j] = None
                    # continue solving for sheep's turn
                    total_paths += solve_sheep((n_i, n_j), tuple(new_sheep))
            else:
                # Empty, hideout, or sheep hidden in hideout
                # continue solving for sheep's turn with dragon moved
                total_paths += solve_sheep((n_i, n_j), sheep_positions)

        # memoize result before returning
        memo[state] = total_paths
        return total_paths

    # start solving with sheep's turn
    return solve_sheep(dragon_loc, initial_sheep)

# <asserts snipped>
[โ€“] Pyro@programming.dev 2 points 1 month ago (3 children)

Python

# returns parents of child if found, else None
def get_parents(dnas: list[list[str]], child: int):
    candidates = len(dnas)
    seq_len = len(dnas[0])

    for p1 in range(candidates):
        if p1 == child:
            continue
        for p2 in range(p1 + 1, candidates):
            if p2 == child:
                continue

            for idx in range(seq_len):
                if dnas[child][idx] not in [dnas[p1][idx], dnas[p2][idx]]:
                    # mismatch found
                    break
            else:
                # no-break => all matched
                return p1, p2
    return None

# yields all children with their parents from a collection of dnas
def yield_children(dnas: list[list[str]]):
    for child in range(len(dnas)):
        parents = get_parents(dnas, child)
        if parents is not None:
            yield child, parents


def part1(data: str):
    # parse input data into list of DNA sequences
    dnas = [list(dna.split(":")[1]) for dna in data.splitlines()]
    seq_len = len(dnas[0])

    child, parents = next(yield_children(dnas))
    similarities = [0, 0]
    for idx in range(seq_len):
        for pi in range(2):
            if dnas[child][idx] == dnas[parents[pi]][idx]:
                similarities[pi] += 1

    return similarities[0] * similarities[1]


assert (
    part1("""1:CAAGCGCTAAGTTCGCTGGATGTGTGCCCGCG
2:CTTGAATTGGGCCGTTTACCTGGTTTAACCAT
3:CTAGCGCTGAGCTGGCTGCCTGGTTGACCGCG""")
    == 414
)


def part2(data: str):
    # parse input data into list of DNA sequences
    dnas = [list(dna.split(":")[1]) for dna in data.splitlines()]
    seq_len = len(dnas[0])

    children_gen = yield_children(dnas)

    all_similarity = 0
    for child, parents in children_gen:
        similarities = [0, 0]
        for idx in range(seq_len):
            for pi in range(2):
                if dnas[child][idx] == dnas[parents[pi]][idx]:
                    similarities[pi] += 1
        all_similarity += similarities[0] * similarities[1]
    return all_similarity


assert (
    part2("""1:GCAGGCGAGTATGATACCCGGCTAGCCACCCC
2:TCTCGCGAGGATATTACTGGGCCAGACCCCCC
3:GGTGGAACATTCGAAAGTTGCATAGGGTGGTG
4:GCTCGCGAGTATATTACCGAACCAGCCCCTCA
5:GCAGCTTAGTATGACCGCCAAATCGCGACTCA
6:AGTGGAACCTTGGATAGTCTCATATAGCGGCA
7:GGCGTAATAATCGGATGCTGCAGAGGCTGCTG""")
    == 1245
)


# Disjoint Set Union (Union-Find) data structure
#   with path compression and union by rank
class DSU:
    def __init__(self, size):
        self.parent = list(range(size))
        self.rank = [1] * size

    def find(self, x):
        # path compression
        if self.parent[x] != x:
            self.parent[x] = self.find(self.parent[x])
        return self.parent[x]

    def union(self, x, y):
        rootX = self.find(x)
        rootY = self.find(y)

        if rootX == rootY:
            return False

        # union by rank
        #   attach smaller rank tree under root of higher rank tree
        if self.rank[rootX] < self.rank[rootY]:
            self.parent[rootX] = rootY
        elif self.rank[rootX] > self.rank[rootY]:
            self.parent[rootY] = rootX
        else:
            # ranks are same, so make one as root and increment its rank by one
            self.parent[rootY] = rootX
            self.rank[rootX] += 1

        return True


def part3(data: str):
    # parse input data into list of DNA sequences
    dnas = [list(dna.split(":")[1]) for dna in data.splitlines()]
    candidates = len(dnas)

    dsu = DSU(candidates)
    children_gen = yield_children(dnas)

    # union children with their parents
    for child, (p1, p2) in children_gen:
        dsu.union(child, p1)
        dsu.union(child, p2)
    
    # record [size, scale_sum] for each group
    groups = {}
    for scale_idx in range(candidates):
        # find the group of the current candidate
        group = dsu.find(scale_idx)
        # update group's size and scale sum
        entry = groups.setdefault(group, [0, 0])
        entry[0] += 1
        entry[1] += scale_idx + 1
    
    # return the maximum scale sum among all groups
    return max(groups.values())[1]


assert (
    part3("""1:GCAGGCGAGTATGATACCCGGCTAGCCACCCC
2:TCTCGCGAGGATATTACTGGGCCAGACCCCCC
3:GGTGGAACATTCGAAAGTTGCATAGGGTGGTG
4:GCTCGCGAGTATATTACCGAACCAGCCCCTCA
5:GCAGCTTAGTATGACCGCCAAATCGCGACTCA
6:AGTGGAACCTTGGATAGTCTCATATAGCGGCA
7:GGCGTAATAATCGGATGCTGCAGAGGCTGCTG""")
) == 12

assert (
    part3("""1:GCAGGCGAGTATGATACCCGGCTAGCCACCCC
2:TCTCGCGAGGATATTACTGGGCCAGACCCCCC
3:GGTGGAACATTCGAAAGTTGCATAGGGTGGTG
4:GCTCGCGAGTATATTACCGAACCAGCCCCTCA
5:GCAGCTTAGTATGACCGCCAAATCGCGACTCA
6:AGTGGAACCTTGGATAGTCTCATATAGCGGCA
7:GGCGTAATAATCGGATGCTGCAGAGGCTGCTG
8:GGCGTAAAGTATGGATGCTGGCTAGGCACCCG""")
) == 36
[โ€“] Pyro@programming.dev 2 points 1 month ago* (last edited 1 month ago)

Python

# pairwise helps picking consecutive values easier
#   [A,B,C,D] -> [AB, BC, CD]
from itertools import pairwise

# returns names and dict[str, set[str]] of allowed transitions
def parse_data(data: str):
    transition_map = {}
    lines = data.splitlines()

    for rule_str in lines[2:]:
        before, after = rule_str.split(" > ")
        transition_map[before] = set(after.split(','))

    names = lines[0].split(',')
    return names, transition_map

# checks if name is valid according to transition_map
def is_name_valid(transition_map: dict[str, set[str]], name: str) -> bool:
    for a, b in pairwise(name):
        if a not in transition_map or b not in transition_map[a]:
            return False
    return True


def part1(data: str):
    names, transition_map = parse_data(data)
    for name in names:
        if is_name_valid(transition_map, name):
            return name

# <assert snipped>

def part2(data: str):
    ans = 0
    names, transition_map = parse_data(data)
    for i, name in enumerate(names):
        if is_name_valid(transition_map, name):
            ans += i + 1
    return ans

# <assert snipped>

def part3(data: str):
    prefixes, transition_map = parse_data(data)

    # remove prefixes that are sub-prefixes of others
    #   to avoid double counting
    superset_prefixes = []
    for i, p1 in enumerate(prefixes):
        for j, p2 in enumerate(prefixes):
            if i != j and p1.startswith(p2):
                break
        else:
            # no break -> p1 is not a sub-prefix
            superset_prefixes.append(p1)

    variants = 0
    for prefix in superset_prefixes:
        if not is_name_valid(transition_map, prefix):
            continue
        
        # DFS to count all valid name extensions
        stack = [(prefix[-1], len(prefix))]
        while stack:
            prev, name_len = stack.pop()
            if name_len >= 7:
                variants += 1
            if name_len == 11:
                continue

            for next_letter in transition_map.get(prev, []):
                stack.append((next_letter, name_len + 1))
        
    return variants

# <assert snipped>
[โ€“] Pyro@programming.dev 8 points 1 month ago (4 children)

Will we be using the same programming dot dev private leaderboard as last time?

[โ€“] Pyro@programming.dev 2 points 1 month ago

Yeah, it may be possible for precision errors to accumulate in my solution but for EC the inputs are few enough that it doesn't really matter. Maybe I got a little lucky with my input too.

[โ€“] Pyro@programming.dev 2 points 1 month ago* (last edited 1 month ago)

Thank you! I love seeing well-documented solutions for problems that I'm struggling with, so I try to add comments for non-trivial parts of any code I post. :)

[โ€“] Pyro@programming.dev 2 points 1 month ago (2 children)

Python

# pairwise helps picking consecutive values easier
#   [A,B,C,D] -> [AB, BC, CD]
# combinations will give me combinations of elements in a sequence
#   [A,B,C,D] -> [AB, AC, AD, BC, BD, CD]
from itertools import pairwise, combinations


# line will pass thorough the center if the points are on opposite ends,
#   i.e., total points / 2 distance away
def part1(data: str, nail_count: int = 32):
    opposite_dist = nail_count // 2
    nodes = [int(n) for n in data.split(",")]

    center_passes = 0
    for a, b in pairwise(nodes):
        if abs(a - b) == opposite_dist:
            center_passes += 1
    return center_passes


assert part1("1,5,2,6,8,4,1,7,3", 8) == 4


# BRUTEFORCE: take every two points and check if they intersect
def part2(data: str, nail_count: int = 32):
    def intersects(s1, s2):
        # expand the points
        (x1, x2), (y1, y2) = s1, s2
        # make sure x1 is the smaller nail and x2 is the bigger one
        if x1 > x2:
            x1, x2 = x2, x1
        # there is an intersection if one point lies bwtween x1 and x2
        #  and the other lies outside it
        if x1 < y1 < x2 and (y2 > x2 or y2 < x1):
            return True
        if x1 < y2 < x2 and (y1 > x2 or y1 < x1):
            return True
        return False

    nodes = [int(n) for n in data.split(",")]
    knots = 0
    for s1, s2 in combinations(pairwise(nodes), 2):
        if intersects(s1, s2):
            knots += 1

    return knots


assert part2("1,5,2,6,8,4,1,7,3,5,7,8,2", 8) == 21

from collections import defaultdict


# This is better than bruteforce
def part3(data: str, nail_count: int = 256):
    connections = defaultdict(list)
    nodes = [int(n) for n in data.split(",")]

    # record all connections, both ways
    for a, b in pairwise(nodes):
        connections[a].append(b)
        connections[b].append(a)

    max_cuts = 0
    for node_a in range(1, nail_count + 1):
        cuts = 0
        for node_b in range(node_a + 2, nail_count + 1):
            # add new cuts for the node we just added between a and b
            for other_node in connections[node_b - 1]:
                if other_node > node_b or other_node < node_a:
                    cuts += 1
            # also add any cuts for threads that go between node a and b
            cuts += connections[node_a].count(node_b)

            # remove cuts that were going from BETWEEN node_a and node_b-1 (prev node_b) to node_b
            for other_node in connections[node_b]:
                if node_a < other_node < node_b:
                    cuts -= 1
            # also remove any cuts for threads that go between node a and b-1
            cuts -= connections[node_a].count(node_b - 1)

            max_cuts = max(max_cuts, cuts)

    return max_cuts


assert part3("1,5,2,6,8,4,1,7,3,6", 8) == 7
view more: โ€น prev next โ€บ