HorizonMath / numerics /hard_square_entropy.py
ewang26
Add data, numerics, and validators
848d4b7
"""
Numerical computation for: Hard Square Entropy Constant
The hard square model (also called the hard-core lattice gas on Z^2) counts
independent sets on the square lattice - configurations where no two adjacent
sites are both occupied.
The hard square entropy constant is defined as:
κ = lim_{n→∞} [F(n,n)]^{1/n²}
where F(m,n) counts (0,1)-matrices of size m×n with no two adjacent 1s
(horizontally or vertically).
Numerical value:
κ ≈ 1.5030480824753323...
Unlike the hard hexagon model (solved by Baxter), the hard square model has
NO KNOWN CLOSED FORM. This is a genuinely open problem in statistical mechanics.
The entropy per site is:
s = log(κ) ≈ 0.40749...
Computation method: Transfer matrix
- For strips of width m, enumerate valid row configurations (no adjacent 1s)
- Build transfer matrix where T[i,j] = 1 if rows i,j are compatible vertically
- The largest eigenvalue λ_m gives κ ≈ λ_m^{1/m}
- Convergence is systematic as m → ∞
References:
- Baxter, Enting, Tsang (1980) "Hard-square lattice gas"
- Calkin, Wilf (1998) bounds using corner transfer matrices
- OEIS A085850
"""
import numpy as np
from scipy import sparse
from scipy.sparse.linalg import eigs
from functools import lru_cache
def generate_valid_rows(width: int) -> list[tuple[int, ...]]:
"""
Generate all valid row configurations of given width.
A valid row has no two adjacent 1s.
The count is F_{width+2} where F_n is the Fibonacci sequence.
"""
if width == 0:
return [()]
if width == 1:
return [(0,), (1,)]
valid = []
def backtrack(row: list[int], pos: int):
if pos == width:
valid.append(tuple(row))
return
# Can always place 0
row.append(0)
backtrack(row, pos + 1)
row.pop()
# Can place 1 only if previous is not 1
if pos == 0 or row[-1] == 0:
row.append(1)
backtrack(row, pos + 1)
row.pop()
backtrack([], 0)
return valid
def rows_compatible(row1: tuple[int, ...], row2: tuple[int, ...]) -> bool:
"""
Check if two rows are vertically compatible.
They are compatible if no column has 1 in both rows.
"""
return all(a == 0 or b == 0 for a, b in zip(row1, row2))
def build_transfer_matrix_sparse(width: int) -> sparse.csr_matrix:
"""
Build the transfer matrix for strips of given width.
Uses sparse matrix for efficiency with large widths.
"""
valid_rows = generate_valid_rows(width)
n = len(valid_rows)
row_to_idx = {row: i for i, row in enumerate(valid_rows)}
# Build sparse matrix
rows, cols, data = [], [], []
for i, row1 in enumerate(valid_rows):
for j, row2 in enumerate(valid_rows):
if rows_compatible(row1, row2):
rows.append(i)
cols.append(j)
data.append(1.0)
return sparse.csr_matrix((data, (rows, cols)), shape=(n, n))
def compute_entropy_for_width(width: int) -> float:
"""
Compute the hard square constant approximation for given strip width.
Returns κ_m = λ_m^{1/m} where λ_m is the largest eigenvalue.
"""
if width <= 0:
return 1.0
T = build_transfer_matrix_sparse(width)
# Get largest eigenvalue
if T.shape[0] < 10:
# For small matrices, use dense computation
T_dense = T.toarray()
eigenvalues = np.linalg.eigvals(T_dense)
lambda_max = max(abs(eigenvalues))
else:
# For larger matrices, use sparse eigenvalue solver
eigenvalues, _ = eigs(T.astype(float), k=1, which='LM')
lambda_max = abs(eigenvalues[0])
return lambda_max ** (1.0 / width)
def compute_entropy_sequence(max_width: int = 20) -> list[tuple[int, float]]:
"""
Compute the hard square constant approximations for widths 1 to max_width.
Returns list of (width, κ_estimate) pairs.
"""
results = []
for w in range(1, max_width + 1):
kappa = compute_entropy_for_width(w)
results.append((w, kappa))
return results
def extrapolate_entropy(estimates: list[tuple[int, float]], order: int = 4) -> float:
"""
Extrapolate the entropy constant using Richardson extrapolation.
The convergence is κ_m = κ + a/m² + b/m⁴ + ... for periodic boundary conditions,
or κ_m = κ + a/m + b/m² + ... for free boundaries.
We use polynomial extrapolation on the last few points.
"""
if len(estimates) < order + 1:
return estimates[-1][1]
# Take the last (order+1) points
recent = estimates[-(order + 1):]
widths = np.array([1.0 / w for w, _ in recent])
values = np.array([v for _, v in recent])
# Fit polynomial and extrapolate to 1/m = 0
coeffs = np.polyfit(widths, values, order)
return coeffs[-1] # Constant term = value at 1/m = 0
# High-precision reference value from literature (OEIS A085850)
# Baxter (1980), Calkin-Wilf (1998), Jensen (2012)
# Stored as mpf string to preserve precision beyond Python float's ~16 digits.
# 44 known digits from OEIS.
from mpmath import mpf
HARD_SQUARE_ENTROPY_CONSTANT = mpf("1.50304808247533226432206632947555368938578100")
def compute():
"""
Return the hard square entropy constant.
This uses pre-computed high-precision value from literature.
For verification, we also compute via transfer matrix.
"""
return HARD_SQUARE_ENTROPY_CONSTANT
def verify_computation(target_precision: int = 4, max_width: int = 14) -> tuple[bool, float, float]:
"""
Verify the computation by comparing transfer matrix results
with the reference value.
Args:
target_precision: Number of decimal places to match
max_width: Maximum strip width (14 is fast, 18+ is slow)
Returns (success, computed_value, reference_value)
"""
print(f"Computing transfer matrix eigenvalues for widths 1-{max_width}...")
estimates = compute_entropy_sequence(max_width)
# Show convergence
print("\nConvergence of κ_m = λ_m^(1/m):")
print("-" * 40)
for w, kappa in estimates:
diff = abs(kappa - HARD_SQUARE_ENTROPY_CONSTANT)
print(f" width {w:2d}: κ = {kappa:.12f} (diff: {diff:.2e})")
# Extrapolate
extrapolated = extrapolate_entropy(estimates, order=3)
print(f"\nExtrapolated value: {extrapolated:.12f}")
print(f"Reference value: {HARD_SQUARE_ENTROPY_CONSTANT:.12f}")
# Check precision
diff = abs(extrapolated - HARD_SQUARE_ENTROPY_CONSTANT)
success = diff < 10 ** (-target_precision)
return success, extrapolated, HARD_SQUARE_ENTROPY_CONSTANT
if __name__ == "__main__":
print(compute())