This repository serve as a backup for my Maxwell-TD code
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

99 lines
3.5 KiB

#!/usr/bin/env python3
import re
import argparse
import pandas as pd
from pathlib import Path
LINE_RE = re.compile(
r"""
^\s*Debug\s+tet_id=(?P<tet_id>\d+)\s+face_id=(?P<face_id>\d+)\s*\|\s*
face0=\((?P<face0>[^)]*)\)\s+
face1=\((?P<face1>[^)]*)\)\s+
face2=\((?P<face2>[^)]*)\)\s*\|\s*
tet0=\((?P<tet0>[^)]*)\)\s+
tet1=\((?P<tet1>[^)]*)\)\s+
tet2=\((?P<tet2>[^)]*)\)\s+
tet3=\((?P<tet3>[^)]*)\)\s*$
""",
re.VERBOSE
)
def parse_triplet(s: str):
# s like "9.249584e-03,-5.663737e-19,9.812500e-02"
parts = [p.strip() for p in s.split(",")]
if len(parts) != 3:
raise ValueError(f"Expected 3 components, got {len(parts)} in '{s}'")
return tuple(float(p) for p in parts)
def parse_log(path: Path):
rows = []
with path.open("r", encoding="utf-8", errors="ignore") as f:
for ln, line in enumerate(f, 1):
m = LINE_RE.match(line)
if not m:
continue # skip non-matching lines
try:
tet_id = int(m.group("tet_id"))
face_id = int(m.group("face_id"))
f0 = parse_triplet(m.group("face0"))
f1 = parse_triplet(m.group("face1"))
f2 = parse_triplet(m.group("face2"))
t0 = parse_triplet(m.group("tet0"))
t1 = parse_triplet(m.group("tet1"))
t2 = parse_triplet(m.group("tet2"))
t3 = parse_triplet(m.group("tet3"))
except Exception as e:
raise ValueError(f"Parse error on line {ln}: {e}") from e
rows.append({
"tet_id": tet_id, "face_id": face_id,
"face0_x": f0[0], "face0_y": f0[1], "face0_z": f0[2],
"face1_x": f1[0], "face1_y": f1[1], "face1_z": f1[2],
"face2_x": f2[0], "face2_y": f2[1], "face2_z": f2[2],
"tet0_x": t0[0], "tet0_y": t0[1], "tet0_z": t0[2],
"tet1_x": t1[0], "tet1_y": t1[1], "tet1_z": t1[2],
"tet2_x": t2[0], "tet2_y": t2[1], "tet2_z": t2[2],
"tet3_x": t3[0], "tet3_y": t3[1], "tet3_z": t3[2],
})
return pd.DataFrame(rows)
def to_long(df):
# explode wide columns into long form: one row per node (face0..2, tet0..3)
long_rows = []
for _, r in df.iterrows():
for kind in ["face0","face1","face2","tet0","tet1","tet2","tet3"]:
long_rows.append({
"tet_id": r["tet_id"],
"face_id": r["face_id"],
"node_kind": kind,
"x": r[f"{kind}_x"],
"y": r[f"{kind}_y"],
"z": r[f"{kind}_z"],
})
return pd.DataFrame(long_rows)
def main():
ap = argparse.ArgumentParser(description="Parse CUDA printf debug lines into CSV")
ap.add_argument("--log", required=True, help="Path to text file containing the 'Debug tet_id=...' lines")
ap.add_argument("--out-prefix", default="parsed_debug", help="Output file prefix (no extension)")
args = ap.parse_args()
log_path = Path(args.log)
df = parse_log(log_path)
if df.empty:
print("No matching debug lines found.")
return
out_wide = Path(f"{args.out_prefix}_wide.csv")
out_long = Path(f"{args.out_prefix}_long.csv")
df.to_csv(out_wide, index=False)
to_long(df).to_csv(out_long, index=False)
print(f"Parsed {len(df)} tet/face entries")
print(f"Saved wide CSV to: {out_wide}")
print(f"Saved long CSV to: {out_long}")
if __name__ == "__main__":
main()