1. Calibration-Based Qubit Selection
Load IBM Cloud credentials and select the backend 'ibm_torino'.
For each qubit q_i read:
T_1^(i), T_2^(i), ϵ_√X^(i).
Minimize the weighted cost:
C(S) = ∑ [ αϵ_√X^(i) - βT_1^(i) - γT_2^(i) ],
qi∈S
with positive constants α, β, γ.
Then choose the best 100 qubits S* to form the initial layout.
2. Lattice indexing
Logical sites q_(r, c) (r, c ∈ 0…9) map to physical indices by index(r, c)=10r + c,
giving a row‑major 10 x 10 grid.
3. Twistor shear phase layers
For each of the three layers ϕ ∈ {ϕ_0, ϕ_0 + Δϕ, ϕ_0 − Δϕ} apply:
U_twist(qr, c) = {R_Z(+2ϕ) r = c
R_Z(−2ϕ) r + c = 9,
R_Z(θ) = e^(−iθZ/2),
1 otherwise
4. Mirror boundary phases
Impose static plate phases:
U_mirror(q_(0, c)) = RZ(+ψ),
U_mirror(q_(9, c)) = RZ(−ψ),
with ψ = π/6.
The total single‑qubit unitary is U = U_mirrorU_twist.
5. Vacuum‑mode CNOT ladder
In every column c apply the chain:
CX(q_(0, c) -> q_(1, c)) CX(q_(1, c) -> q_(2, c))...CX(q_(8, c)-> q_(9, c)),
coupling mirrors through nine internal links.
6. Measurement
Measure each qubit in the computational basis, recording bit‑strings:
b∈{0, 1}^100 over N = 32768 shots.
7. Pair‑creation observable
For column cc define top and bottom indices t_c = (0, c), b_c = (9, c).
The rate:
ρ_c = #{shots with b_(t_c) = b_(b_c) = 1}/N
quantifies |11> coincidences, the Casimir energy analogue.
Collect the set ρ = (ρ_0, …, ρ_9).
8. Json
Store a Json with keys:
{"phi_base": φ_0, "delta_phi": Δφ, "psi": ψ, "raw_counts": …, "boundary_pair_rates": ρ_c }.
Code:
# Main Circuit
# Imports
import json, logging, pandas as pd
from math import pi
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, transpile
from qiskit_ibm_runtime import QiskitRuntimeService, SamplerV2
logging.basicConfig(level=logging .INFO, format="%(levelname)s: %(message)s")
log = logging.getLogger("Dyn‑Twistor‑Casimir")
# IBMQ
TOKEN = "IBMQ_API_KEY_0-`"
INSTANCE = "IBMQ_CRN"
service = QiskitRuntimeService(
channel="ibm_cloud",
token=TOKEN,
instance=INSTANCE,
)
backend = service.backend("ibm_torino")
# Calibration‑based qubit ranking (100)
def best_qubits(cal_csv: str, n: int = 100) -> list[int]:
"""Rank by √X error, then T1, then T2; return best n qubits."""
df = pd .read_csv(cal_csv)
df.columns = df.columns.str.strip()
ranked = df.sort_values(
["√x (sx) error", "T1 (us)", "T2 (us)"],
ascending=[True, False, False],
)
winners = ranked["Qubit"].head(n).astype(int).tolist()
log .info("Initial‑layout qubits: %s", winners)
return winners
CAL_CSV = "/Users/steventippeconnic/Downloads/ibm_torino_calibrations_2025-05-14T16_23_55Z.csv"
layout = best_qubits(CAL_CSV, 100)
# Lattice helpers
ROWS = COLS = 10
def idx(r: int, c: int) -> int:
"""(row, col) → flat index in row‑major order."""
return COLS * r + c
# Registers
q_reg = QuantumRegister(ROWS * COLS, "q")
c_reg = ClassicalRegister(ROWS * COLS, "c")
qc = QuantumCircuit(q_reg, c_reg, name="Dyn‑Twistor‑Casimir‑10×10")
# Parameter set‑up
phi0 = pi / 8 # Base twistor phase
dphi = pi / 16 # Modulation amplitude
psi = pi / 6 # Mirror phase
phase_layers = [phi0, phi0 + dphi, phi0 - dphi] # three‑slice sinusoid
# Twistor + mirror phases (three layers)
for layer, φ in enumerate(phase_layers, 1):
for r in range(ROWS):
for c in range(COLS):
q = q_reg[idx(r, c)]
# Dual‑diagonal twistor shear
if r == c:
qc.rz(+2 * φ, q)
elif r + c == COLS - 1:
qc.rz(-2 * φ, q)
# Mirror boundaries (top & bottom rows) use constant ψ
if r == 0:
qc.rz(+psi, q)
elif r == ROWS - 1:
qc.rz(-psi, q)
qc.barrier(label=f"phase‑layer {layer}")
# Column‑wise CX ladder (9 links)
for c in range(COLS):
for r in range(ROWS - 1): # 0..8 -> link neighbor rows
qc .cx(q_reg[idx(r, c)], q_reg[idx(r + 1, c)])
qc.barrier(label="vacuum‑ladder")
# Measurement
for q in range(ROWS * COLS):
qc.measure(q_reg[q], c_reg[q])
# Transpile
qc_t = transpile(
qc,
backend=backend,
initial_layout=layout,
optimization_level=3,
)
log .info("Transpiled depth : %d", qc_t.depth())
log .info("Transpiled CXs : %d", qc_t.count_ops().get("cx", 0))
# Execute
SHOTS = 32768
sampler = SamplerV2(mode=backend)
job = sampler .run([qc_t], shots=SHOTS)
result = job.result()
creg_name = qc_t.cregs[0].name
counts = result[0].data.__getattribute__(creg_name).get_counts()
# Boundary pair‑creation rates ρ_c (10 columns)
pair_counts = {c: 0 for c in range(COLS)}
def both_one(bits, i, j):
return bits[i] == "1" and bits[j] == "1"
for bitstring, freq in counts.items():
bits = bitstring[::-1] # flip to row‑major order
for col in range(COLS):
top, bottom = idx(0, col), idx(ROWS - 1, col)
if both_one(bits, top, bottom):
pair_counts[col] += freq
pair_rates = {k: v / SHOTS for k, v in pair_counts.items()}
# Json
out = {
"experiment_name" : "Dynamic‑Twistor Casimir (100‑qubit 10x10)",
"phi_base" : float(phi0),
"delta_phi" : float(dphi),
"mirror_phase_psi" : float(psi),
"raw_counts" : counts,
"boundary_pair_rates" : pair_rates,
}
JSON_PATH = "/Users/steventippeconnic/Documents/Dyn_Twistor_Casimir_10x10_0.json"
with open(JSON_PATH, "w") as fp:
json.dump(out, fp, indent=4)
log .info("Results saved → %s", JSON_PATH)
# Console summary
avg_rate = sum(pair_rates.values()) / COLS
log .info("Average ρ_c : %.5f", avg_rate)
for col, ρ in pair_rates.items():
log .info(" column %2d : %.5f", col, ρ)
# End
/////////////////////////////////////////////////////////////////
# Code for all visuals from experiment JSON
# Imports
import json, numpy as np, matplotlib.pyplot as plt
from pathlib import Path
from itertools import combinations
from scipy.stats import linregress
FILE = Path('/Users/steventippeconnic/Documents/QC/Dyn_Twistor_Casimir_10x10_0.json')
data = json.loads(FILE.read_text())
counts = data['raw_counts']
shots = sum(counts.values())
COLS = ROWS = 10
idx = lambda r,c: 10*r + c # row‑major flat index
# Expand counts to arrays
tops, bots, pairs, lattice = [], [], [], []
for b,f in counts.items():
bits = b[::-1] # flip to row‑major
t = [int(bits[idx(0,c)]) for c in range(COLS)]
btm = [int(bits[idx(ROWS-1,c)]) for c in range(COLS)]
pr = [u&v for u,v in zip(t,btm)]
lat = [int(bit) for bit in bits]
tops .append(np.repeat([t], f, 0))
bots .append(np.repeat([btm], f, 0))
pairs.append(np.repeat([pr], f, 0))
lattice.append(np.repeat([lat], f, 0))
tops = np.vstack(tops)
bots = np.vstack(bots)
pairs = np.vstack(pairs)
lattice= np.vstack(lattice)
ρ = pairs.mean(axis=0) # Boundary pair‑rates
P_top = tops.mean(axis=0)
P_bot = bots.mean(axis=0)
κ = np.divide(ρ, P_top*P_bot, out=np.zeros_like(ρ), where=(P_top*P_bot)>0)
P_qubit = lattice.mean(axis=0).reshape(ROWS,COLS)
multiplicity = pairs.sum(axis=1)
corr = np.corrcoef(pairs.T)
ΔP = P_top - P_bot # Top–bottom asymmetry
multiplicity = pairs.sum(axis=1) # #Cols with pair per shot
# Joint mirror state distribution P(00,01,10,11) per column
joint = np.zeros((COLS, 4))
for b, f in counts.items():
bits = b[::-1]
top = [int(bits[idx(0,c)]) for c in range(COLS)]
bot = [int(bits[idx(ROWS-1,c)]) for c in range(COLS)]
for c, (u, v) in enumerate(zip(top, bot)):
joint[c, (u << 1) | v] += f
joint /= shots
labels = ['00', '01', '10', '11']
P01 = joint[:,1]; P10 = joint[:,2]
# Boundary pair‑creation rate ρ_c (10 columns)
plt.figure(figsize=(7,3))
plt.title('Boundary pair‑creation rate ρ_c (10 columns)')
plt.bar(range(COLS), ρ)
plt.xlabel('column c'); plt.ylabel('ρ_c')
plt.ylim(0, ρ.max()*1.15); plt.tight_layout()
plt.show()
# Per‑qubit 1 probability (10×10 lattice)
plt.figure(figsize=(4.8,4.8))
plt.title('Per‑qubit 1 probability (10×10 lattice)')
plt.imshow(P_qubit, cmap='viridis', vmin=0, vmax=P_qubit.max())
plt.colorbar(label='P(1)'); plt.xticks(range(COLS)); plt.yticks(range(ROWS))
plt.xlabel('Columns')
plt.ylabel('Columns')
plt.tight_layout(); plt.show()
# Two‑plate coherence κ_c = ρ_c / (P_top·P_bottom)
plt.figure(figsize=(7,3))
plt.title('Two‑plate coherence κ_c = ρ_c / (P_top·P_bottom)')
plt.bar(range(COLS), κ); plt.axhline(1, ls='--', lw=0.8, c='k')
plt.xlabel('column c'); plt.ylabel('κ_c'); plt.ylim(0, κ.max()*1.2)
plt.tight_layout(); plt.show()
# Column‑to‑column pair correlation
plt.figure(figsize=(4,4))
plt.title('Column‑to‑column pair correlation')
plt.imshow(corr, cmap='coolwarm', vmin=-1, vmax=1)
plt.colorbar(label='Correlation'); plt.xticks(range(COLS)); plt.yticks(range(COLS))
plt.xlabel('Columns')
plt.ylabel('Columns')
plt.tight_layout(); plt.show()
# Joint mirror‑state stacked bars (00,01,10,11) per column
plt.figure(figsize=(7,3))
plt.title('Mirror‑pair state distribution per column')
bottom = np.zeros(COLS)
for i, lab in enumerate(labels):
plt.bar(range(COLS), joint[:, i], bottom=bottom, label=lab)
bottom += joint[:, i]
plt.xlabel('column c'); plt.ylabel('probability'); plt.legend(ncol=4, fontsize='small')
plt.tight_layout(); plt.show()
# Multiplicity histogram (# columns with a pair per shot)
plt.figure(figsize=(6,3))
plt.title('Multiplicity of simultaneous mirror pairs')
plt.hist(multiplicity, bins=np.arange(12)-0.5, rwidth=0.9, log=True)
plt.xlabel('number of columns with a pair'); plt.ylabel('frequency (log scale)')
plt.tight_layout(); plt.show()
# Scatter: asymmetry ΔP = P_top − P_bot vs pair rate ρ_c
slope, intercept, r, *_ = linregress(ΔP, ρ)
plt.figure(figsize=(4.5,4))
plt.title(f'ρ_c vs ΔP (r = {r:+.2f})')
plt.scatter(ΔP, ρ, s=70)
x = np.linspace(ΔP.min(), ΔP.max(), 100)
plt.plot(x, intercept + slope*x, c='k', lw=1)
plt.axvline(0, ls='--', lw=0.8, c='grey')
plt.xlabel('ΔP = P_top − P_bottom'); plt.ylabel('ρ_c')
plt.tight_layout(); plt.show()
# Polar plot of pair‑creation landscape ρ_c (directional view)
θ = np.linspace(0, 2*np.pi, COLS, endpoint=False)
plt.figure(figsize=(5,5))
ax = plt.subplot(111, projection='polar')
ax.set_title('Polar view of ρ_c (dynamic twistor corridors)')
ax.bar(θ, ρ, width=2*np.pi/COLS, alpha=0.85)
ax.set_yticklabels([]); ax.set_theta_zero_location('N'); ax.set_theta_direction(-1)
plt.tight_layout(); plt.show()
# Bottom‑dominance index δ_c = P01 − P10
δ = P01 - P10
plt.figure(figsize=(7,3))
plt.title('Bottom‑dominance index δ_c = P(01) − P(10)')
plt.bar(range(COLS), δ, color='teal')
plt.axhline(0, ls='--', lw=0.8, c='k')
plt.xlabel('column c'); plt.ylabel('δ_c')
plt.tight_layout(); plt.show()
# Distance‑resolved coincidence gain
dist_gain = []
for d in range(1, COLS): # distance 1..9
both = ((pairs[:, :-d] & pairs[:, d:]).mean())
exp = ρ[:-d] * ρ[d:] # uncorrelated expectation
ratio = both / exp.mean() if exp.mean() else 0
dist_gain.append(ratio)
plt.figure(figsize=(6,3))
plt.title('Pair‑event gain vs column distance')
plt.bar(range(1, COLS), dist_gain)
plt.xlabel('column separation Δ'); plt.ylabel('gain = P(both)/E[both]')
plt.tight_layout(); plt.show()
# Lorenz curve of column contribution to total pairs
sorted_ρ = np.sort(ρ)[::-1]
cum_frac_pairs = np.cumsum(sorted_ρ) / sorted_ρ.sum()
cum_frac_cols = np.arange(1, COLS+1) / COLS
plt.figure(figsize=(4,4))
plt.title('Lorenz curve for boundary pair creation')
plt.plot(cum_frac_cols, cum_frac_pairs, marker='o')
plt.plot([0,1], [0,1], ls='--', c='k', lw=0.8) # equality line
plt.xlabel('fraction of columns'); plt.ylabel('fraction of total pairs')
plt.tight_layout(); plt.show()
# Survival CDF of multiplicity (log‑log)
multiplicity = pairs.sum(axis=1)
vals, hist = np.unique(multiplicity, return_counts=True)
survival = 1 - np.cumsum(hist) / shots
plt.figure(figsize=(5,3))
plt.title('Survival CDF of simultaneous pair multiplicity')
plt.loglog(vals, survival, marker='o')
plt.xlabel('≥ m active columns'); plt.ylabel('P(M ≥ m)')
plt.grid(True, which='both', ls=':', lw=0.6)
plt.tight_layout(); plt.show()
# End