You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
1192 lines
37 KiB
1192 lines
37 KiB
from concurrent.futures import ProcessPoolExecutor,ThreadPoolExecutor, as_completed
|
|
from pathlib import Path
|
|
from tqdm import tqdm
|
|
from concurrent import futures
|
|
|
|
import os
|
|
import numpy as np
|
|
import pandas as pd
|
|
import shutil
|
|
import sys
|
|
import matplotlib.pyplot as plt
|
|
import csv
|
|
import fnmatch
|
|
import random
|
|
import copy
|
|
import concurrent.futures
|
|
import urllib.request
|
|
import glob
|
|
import re
|
|
|
|
|
|
|
|
def extract_selected_log_values(filepath):
|
|
targets = {
|
|
"Final Time(sec)": None,
|
|
"dt_sample": None,
|
|
"tsPerSampling": None
|
|
}
|
|
|
|
with open(filepath, 'r') as f:
|
|
lines = f.readlines()
|
|
|
|
in_block = False
|
|
for line in lines:
|
|
if "==========================================" in line and "PERFORMING INFORMATION" in line:
|
|
in_block = True
|
|
continue
|
|
if in_block and "==========================================" in line:
|
|
break # End of block
|
|
|
|
for key in targets:
|
|
if key in line:
|
|
match = re.search(r'=\s*([\d.eE+-]+)', line)
|
|
if match:
|
|
targets[key] = float(match.group(1))
|
|
|
|
return {
|
|
"FinalTime": targets["Final Time(sec)"],
|
|
"dt_sample": targets["dt_sample"],
|
|
"tsPerSampling": int(targets["tsPerSampling"])
|
|
}
|
|
|
|
|
|
# Read Log File
|
|
log_file_path = "computeDGTD.log" # Replace with your log file path
|
|
result = extract_selected_log_values(log_file_path)
|
|
|
|
FinalTime = result["FinalTime"]
|
|
dt_sample = result["dt_sample"]
|
|
tsPerSampling = result["tsPerSampling"]
|
|
|
|
print("\n-------------------------------------------------\n")
|
|
|
|
print("FinalTime =", FinalTime)
|
|
print("dt_sample =", dt_sample)
|
|
print("tsPerSampling =", tsPerSampling)
|
|
|
|
# === Constants and Paths ===
|
|
c0 = 3e8
|
|
current_dir = os.getcwd()
|
|
|
|
# ====================== USER DEFINED ========================== #
|
|
FileName = "patch_model"
|
|
sc_curFolder = current_dir+"/CURRENT_SC"
|
|
inc_curFolder = current_dir+"/CURRENT_INC"
|
|
sc_curFileName = sc_curFolder +"/Currents_"+FileName+"_"
|
|
inc_curFileName = inc_curFolder +"/Currents_"+FileName+"_"
|
|
|
|
probe_Folder = current_dir+"/PROBES"
|
|
probe_FileName = probe_Folder +"/Currents_"+FileName+"_"
|
|
|
|
|
|
total_curFolder = current_dir+"/CURRENT_Total"
|
|
total_curFileName = total_curFolder +"/Currents_"+FileName+"_"
|
|
|
|
# Time interval (Sampling interval dt)
|
|
dt = dt_sample
|
|
# Time step count
|
|
steps = tsPerSampling
|
|
|
|
# ====================== USER DEFINED ========================== #
|
|
|
|
# Sampling frequency f_s = 1 / dt
|
|
f_s = 1.0 / dt
|
|
# Nyquist frequency
|
|
f_nyq = f_s / 2.0
|
|
|
|
# Count .curJ files
|
|
nfiles = len(fnmatch.filter(os.listdir(probe_Folder), '*.csv'))
|
|
nfiles = int(nfiles)
|
|
print('Number of time domain files:', nfiles)
|
|
|
|
# Number of FFT samples
|
|
NFFT = nfiles * 5
|
|
|
|
print("\n-------------------------------------------------\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ================================================= #
|
|
# ===== Parse PORT line from computeDGTD.log ====== #
|
|
# ================================================= #
|
|
|
|
PORT_RE = re.compile(
|
|
r"PORT:\s*ro=\[(?P<ro>[^]]+)\],\s*r1=\[(?P<r1>[^]]+)\],\s*Z=(?P<Z>[+\-0-9.eE]+),\s*\|E\|=(?P<Emag>[+\-0-9.eE]+),\s*Flag=(?P<Flag>\d+),\s*Tdist=(?P<Tdist>\d+),\s*f=(?P<f>[+\-0-9.eE]+),\s*t0=(?P<t0>[+\-0-9.eE]+),\s*tau=(?P<tau>[+\-0-9.eE]+)"
|
|
)
|
|
|
|
def _parse_bracket_vec(s: str):
|
|
# "-9.4999e-03,0.0e+00,-3.5e-03" -> np.array([.., .., ..])
|
|
parts = [float(p) for p in s.split(",")]
|
|
if len(parts) != 3:
|
|
raise ValueError(f"Expected 3 components in vector, got: {s}")
|
|
return np.array(parts, dtype=float)
|
|
|
|
def parse_port_from_log(log_path: str):
|
|
with open(log_path, "r") as f:
|
|
for line in f:
|
|
if "PORT:" in line:
|
|
m = PORT_RE.search(line)
|
|
if not m:
|
|
continue
|
|
ro = _parse_bracket_vec(m.group("ro"))
|
|
r1 = _parse_bracket_vec(m.group("r1"))
|
|
Z = float(m.group("Z"))
|
|
Emag= float(m.group("Emag"))
|
|
Flag= int(m.group("Flag")) # (often polarization/mode flag)
|
|
Tdist=int(m.group("Tdist")) # TimeDistributionFlag (0/1/2)
|
|
fMHz = float(m.group("f")) # MHz, e.g. 2.350e+03 = 2.35 GHz
|
|
t0 = float(m.group("t0"))
|
|
tau = float(m.group("tau"))
|
|
|
|
# Derived quantities
|
|
khat = r1 - ro
|
|
nrm = np.linalg.norm(khat)
|
|
if nrm > 0:
|
|
khat = khat / nrm
|
|
else:
|
|
# fallback direction if r1 == ro
|
|
khat = np.array([0.0, 0.0, 1.0], dtype=float)
|
|
|
|
return {
|
|
"ro": ro, "r1": r1, "Z": Z, "Emag": Emag,
|
|
"Flag": Flag, "Tdist": Tdist, "fMHz": fMHz,
|
|
"t0": t0, "tau": tau, "khat": khat
|
|
}
|
|
raise RuntimeError("No parsable PORT line found in log.")
|
|
|
|
# ---- use it ----
|
|
port = parse_port_from_log(log_file_path)
|
|
t_idx = np.arange(nfiles, dtype=int) # 0..nfiles-1
|
|
t_sec = t_idx * dt_sample # seconds
|
|
|
|
print("Parsed PORT:")
|
|
print(" ro =", port["ro"])
|
|
print(" r1 =", port["r1"])
|
|
print(" Z =", port["Z"])
|
|
print(" |E| =", port["Emag"])
|
|
print(" Flag =", port["Flag"])
|
|
print(" Tdist(TimeDistributionFlag) =", port["Tdist"])
|
|
print(" f(MHz) =", port["fMHz"])
|
|
print(" t0 =", port["t0"])
|
|
print(" tau =", port["tau"])
|
|
print(" khat =", port["khat"])
|
|
|
|
print("\n-------------------------------------------------\n")
|
|
|
|
|
|
|
|
|
|
####################################
|
|
####### Calculate Sparameter ######
|
|
####################################
|
|
|
|
### !!!!!!!!!!!!!! ###
|
|
### Incident Field ###
|
|
### !!!!!!!!!!!!!! ###
|
|
|
|
print("\n============= Collect Incident Fields at Ports ======================\n")
|
|
|
|
|
|
def read_incident_csv(path="port_quadrature_export.csv") -> pd.DataFrame:
|
|
df = pd.read_csv(path)
|
|
df.to_csv("incident_field_clean.csv", index=False)
|
|
return df
|
|
|
|
incident = read_incident_csv("port_quadrature_export.csv")
|
|
print(f"Incident field loaded: {incident.shape}")
|
|
print(incident)
|
|
|
|
|
|
Pi = np.pi
|
|
MEGA = 1e6
|
|
Vo = c0
|
|
|
|
def time_modulation_inc(flag, t, t0, khat, r, r0, freq_m, Emag, Hmag, tau):
|
|
omega = 2.0 * Pi * freq_m * MEGA
|
|
r = np.asarray(r, float); r0 = np.asarray(r0, float)
|
|
khat = np.asarray(khat, float)
|
|
nrm = np.linalg.norm(khat);
|
|
if nrm > 0: khat = khat / nrm
|
|
t = np.asarray(t, float)
|
|
|
|
proj = (r - r0) @ khat # (P,)
|
|
if flag == 0:
|
|
kr = proj * (omega / Vo)
|
|
phase = kr[None, :] - omega * t[:, None]
|
|
IncE = Emag * np.sin(phase)
|
|
IncH = Hmag * np.sin(phase)
|
|
elif flag == 1:
|
|
Exponent = t[:, None] - t0 - proj[None, :] / Vo
|
|
CosMod = np.cos(omega * (t[:, None] - t0))
|
|
env = np.exp(-(Exponent**2) / (tau*tau))
|
|
IncE = Emag * CosMod * env
|
|
IncH = Hmag * CosMod * env
|
|
elif flag == 2:
|
|
Exponent = t[:, None] - t0 - proj[None, :] / Vo
|
|
Neuman = (2.0 * Exponent) / (tau*tau)
|
|
env = np.exp(-(Exponent**2) / (tau*tau))
|
|
IncE = Emag * Neuman * env
|
|
IncH = Hmag * Neuman * env
|
|
else:
|
|
raise ValueError("Unknown TimeDistributionFlag")
|
|
return IncE, IncH
|
|
|
|
|
|
# --- extract Et/Ht from your incident CSV and apply modulation ---
|
|
idf = incident.copy()
|
|
idf.columns = [c.strip().lower() for c in idf.columns]
|
|
|
|
rQ = idf[["x","y","z"]].to_numpy(float) # (Q,3)
|
|
EtQ = idf[["et_x","et_y","et_z"]].to_numpy(float) # (Q,3)
|
|
HtQ = idf[["ht_x","ht_y","ht_z"]].to_numpy(float) # (Q,3)
|
|
|
|
# For E_updateinc_PORT equivalence: Hmag = Emag (no Z scaling)
|
|
Emag = port["Emag"]
|
|
Hmag = port["Emag"]
|
|
|
|
IncE, IncH = time_modulation_inc(
|
|
port["Tdist"], t_sec, port["t0"],
|
|
port["khat"], rQ, port["ro"],
|
|
port["fMHz"], Emag, Hmag, port["tau"]
|
|
) # each (T,Q)
|
|
|
|
E_inc = IncE[:, :, None] * EtQ[None, :, :] # (T,Q,3)
|
|
H_inc = IncH[:, :, None] * HtQ[None, :, :] # (T,Q,3)
|
|
|
|
print("E_inc shape:", E_inc.shape, "H_inc shape:", H_inc.shape)
|
|
|
|
|
|
|
|
|
|
|
|
T, Q, C = E_inc.shape
|
|
assert C == 3, f"Expected 3 components, got {C}"
|
|
|
|
# Sampling/FFT setup (no window)
|
|
dt = dt_sample
|
|
f_s = 1.0 / dt
|
|
f_nyq = f_s / 2.0
|
|
N = NFFT # you already set NFFT earlier (e.g., nfiles*3)
|
|
|
|
# One-sided frequency axis
|
|
freqs = np.fft.rfftfreq(N, d=dt) # Hz
|
|
freqs_ghz = freqs * 1e-9
|
|
|
|
# FFT of incident field (NO WINDOW)
|
|
# Shape: (F, Q, 3), complex
|
|
E_f = np.fft.rfft(E_inc, n=N, axis=0) / N
|
|
H_f = np.fft.rfft(H_inc, n=N, axis=0) / N
|
|
|
|
# Mean spectrum energy across probes & components
|
|
mean_spec_energy = np.mean(np.abs(E_f)**2, axis=(1, 2)) # (F,)
|
|
|
|
# Threshold at 10% of peak
|
|
peakE = float(mean_spec_energy.max()) if mean_spec_energy.size else 0.0
|
|
thresh = 0.10 * (peakE if peakE > 0 else 1.0)
|
|
|
|
mask = mean_spec_energy >= thresh
|
|
|
|
# Turn mask into contiguous frequency bands
|
|
def mask_to_bands(mask_bool, xvals):
|
|
idx = np.flatnonzero(mask_bool)
|
|
if idx.size == 0:
|
|
return []
|
|
bands = []
|
|
start = prev = idx[0]
|
|
for k in idx[1:]:
|
|
if k == prev + 1:
|
|
prev = k
|
|
else:
|
|
bands.append((start, prev))
|
|
start = prev = k
|
|
bands.append((start, prev))
|
|
# return with indices and frequency endpoints
|
|
return [(i0, i1, xvals[i0], xvals[i1]) for (i0, i1) in bands]
|
|
|
|
bands = mask_to_bands(mask, freqs)
|
|
|
|
print("=== Bands where mean |E_inc(f)|^2 ≥ 10% of peak ===")
|
|
if not bands:
|
|
print("None found. (Check dt/NFFT/time record.)")
|
|
else:
|
|
for (i0, i1, f0, f1) in bands:
|
|
print(f"Idx {i0}–{i1} | {f0*1e-9:.6f}–{f1*1e-9:.6f} GHz "
|
|
f"(points: {i1 - i0})")
|
|
|
|
# If you want a single sweep range (union):
|
|
if bands:
|
|
idx_min, fmin_hz = bands[0][0], bands[0][2]
|
|
idx_max, fmax_hz = bands[-1][1], bands[-1][3]
|
|
print(f"\nOverall sweep range: idx {idx_min}–{idx_max} | "
|
|
f"{fmin_hz*1e-9:.6f}–{fmax_hz*1e-9:.6f} GHz "
|
|
f"(points: {idx_max - idx_min})")
|
|
else:
|
|
idx_min = idx_max = 0
|
|
fmin_hz = fmax_hz = 0.0
|
|
|
|
# Plot normalized mean spectrum energy, highlight all ≥10% bands
|
|
norm_energy = mean_spec_energy / (peakE if peakE > 0 else 1.0)
|
|
|
|
plt.figure(figsize=(10, 5))
|
|
plt.plot(freqs_ghz, norm_energy, label="Mean |E_inc(f)|² (normalized)")
|
|
for (i0, i1, f0, f1) in bands:
|
|
plt.axvspan(f0*1e-9, f1*1e-9, alpha=0.25, label="≥10% band" if i0 == bands[0][0] else None)
|
|
plt.axhline(0.10, linestyle="--", linewidth=1)
|
|
|
|
# ---- configurable x-axis limit (in GHz) ----
|
|
xmax_ghz = 10.0 # <- set any value you want for the plot upper limit
|
|
|
|
# Clamp to available frequency range (Nyquist)
|
|
nyq_ghz = freqs_ghz[-1]
|
|
xmax_ghz_plot = min(xmax_ghz, nyq_ghz)
|
|
if xmax_ghz > nyq_ghz:
|
|
print(f"[note] Requested x-limit {xmax_ghz:.3f} GHz exceeds Nyquist "
|
|
f"{nyq_ghz:.3f} GHz; clamped to {xmax_ghz_plot:.3f} GHz.")
|
|
|
|
plt.xlim(0.0, xmax_ghz_plot)
|
|
plt.xticks(np.arange(0,10,0.5))
|
|
plt.xlabel("Frequency (GHz)")
|
|
plt.ylabel("Normalized mean |E_inc(f)|²")
|
|
plt.title(f"Incident mean spectrum energy (NFFT={N}, no window)")
|
|
plt.grid(True)
|
|
plt.legend(loc="best")
|
|
plt.tight_layout()
|
|
plt.savefig("incident_mean_spectrum.png", dpi=150)
|
|
plt.close()
|
|
|
|
# Save spectrum to Excel
|
|
df_spec = pd.DataFrame({
|
|
"Frequency (GHz)": freqs_ghz,
|
|
"Mean |E_inc(f)|^2": mean_spec_energy,
|
|
"Normalized Mean |E_inc(f)|^2": norm_energy
|
|
})
|
|
df_spec.to_excel("incident_mean_spectrum.xlsx", index=False)
|
|
|
|
# Also save sweep band info as a small CSV for downstream scripts
|
|
if bands:
|
|
df_bands = pd.DataFrame(
|
|
[{"idx_min": i0, "idx_max": i1, "fmin_GHz": f0*1e-9, "fmax_GHz": f1*1e-9, "num_points": i1 - i0}
|
|
for (i0, i1, f0, f1) in bands]
|
|
)
|
|
df_bands.to_csv("incident_sweep_bands.csv", index=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
### !!!!!!!!!!!!!!!!!!!! ###
|
|
### Read the Total Field ###
|
|
### !!!!!!!!!!!!!!!!!!!! ###
|
|
|
|
print("\n============= Collect Total Fields at Ports ======================\n")
|
|
|
|
|
|
### Read Probes location ###
|
|
|
|
def read_probe_coords(path="patch_model.probe") -> pd.DataFrame:
|
|
# Accepts comma or whitespace separators.
|
|
df = pd.read_csv(path, sep=None, engine="python")
|
|
# Normalize header
|
|
df.columns = [c.strip().upper() for c in df.columns]
|
|
assert set(["X", "Y", "Z"]).issubset(df.columns), "Probe file must have X,Y,Z columns"
|
|
df.to_csv("probe_locations_clean.csv", index=False)
|
|
return df[["X","Y","Z"]]
|
|
|
|
probes = read_probe_coords("patch_model.probe")
|
|
print(f"Probe coordinates loaded: {probes.shape}")
|
|
|
|
|
|
# Columns we expect in each probe CSV (as in your screenshot)
|
|
_TF_COLS = ["Ex","Ey","Ez","Hx","Hy","Hz"]
|
|
|
|
def _extract_tidx(fname: str) -> int:
|
|
m = re.search(r"_([0-9]+)\.csv$", fname)
|
|
if not m:
|
|
raise ValueError(f"Cannot parse time index from filename: {fname}")
|
|
return int(m.group(1))
|
|
|
|
def _read_one_probe_csv(path: str) -> pd.DataFrame:
|
|
df = pd.read_csv(path, sep=None, engine="python", header=0)
|
|
df.columns = [c.strip() for c in df.columns]
|
|
keep = [c for c in _TF_COLS if c in df.columns]
|
|
if len(keep) < 6:
|
|
raise ValueError(f"Missing expected field columns in {path}")
|
|
df = df[keep].copy()
|
|
df["probe_idx"] = np.arange(len(df), dtype=int)
|
|
df["t_idx"] = _extract_tidx(Path(path).name)
|
|
return df
|
|
|
|
def read_total_field_series(probe_folder: str, file_name: str, workers: int = 12):
|
|
pat1 = str(Path(probe_folder) / f"Probes_{file_name}_*.csv")
|
|
pat2 = str(Path(probe_folder) / f"Currents_{file_name}_*.csv")
|
|
files = sorted(glob.glob(pat1)) or sorted(glob.glob(pat2))
|
|
if not files:
|
|
raise FileNotFoundError(f"No probe CSVs found in {probe_folder}")
|
|
|
|
results = []
|
|
with ThreadPoolExecutor(max_workers=workers) as ex:
|
|
futures = {ex.submit(_read_one_probe_csv, f): f for f in files}
|
|
for fut in tqdm(as_completed(futures), total=len(futures),
|
|
desc="Reading probe CSVs", unit="file"):
|
|
results.append(fut.result())
|
|
|
|
total_df = pd.concat(results, ignore_index=True)
|
|
total_df.sort_values(["t_idx", "probe_idx"], inplace=True, kind="mergesort")
|
|
total_df.reset_index(drop=True, inplace=True)
|
|
|
|
# Dense arrays
|
|
t_vals = np.sort(total_df["t_idx"].unique())
|
|
p_vals = np.sort(total_df["probe_idx"].unique())
|
|
T, P = len(t_vals), len(p_vals)
|
|
|
|
t_map = {t:i for i,t in enumerate(t_vals)}
|
|
p_map = {p:i for i,p in enumerate(p_vals)}
|
|
|
|
E = np.empty((T, P, 3), dtype=float)
|
|
H = np.empty((T, P, 3), dtype=float)
|
|
|
|
for _, r in total_df.iterrows():
|
|
ti = t_map[r["t_idx"]]; pi = p_map[r["probe_idx"]]
|
|
E[ti, pi, :] = [r["Ex"], r["Ey"], r["Ez"]]
|
|
H[ti, pi, :] = [r["Hx"], r["Hy"], r["Hz"]]
|
|
|
|
return total_df, E, H
|
|
total_df, E_arr, H_arr = read_total_field_series(probe_Folder, FileName, workers=12)
|
|
|
|
print("total_df shape:", total_df.shape)
|
|
print("E_arr shape:", E_arr.shape, "H_arr shape:", H_arr.shape)
|
|
|
|
# Totals in freq domain
|
|
E_f_tot = np.fft.rfft(E_arr, n=N, axis=0) / N # (F,Q,3) complex
|
|
H_f_tot = np.fft.rfft(H_arr, n=N, axis=0) / N # (F,Q,3) complex
|
|
|
|
|
|
|
|
###################################
|
|
#### Calculate the Sparameters ####
|
|
###################################
|
|
|
|
print("\n============= Calculate S Parameters ======================\n")
|
|
|
|
# Reflected phasor (already had E_f_tot and E_f)
|
|
E_f_ref = E_f_tot - E_f # (F,Q,3)
|
|
|
|
# E-only overlay
|
|
num = np.sum(np.sum(E_f_ref * np.conjugate(E_f), axis=2), axis=1) # (F,)
|
|
den = 0.25 * np.sum(np.sum(E_f * np.conjugate(E_f), axis=2), axis=1) # (F,)
|
|
S11 = num / den
|
|
S11_mag = np.abs(S11)
|
|
S11_dB = 20.0 * np.log10(np.clip(S11_mag, 1e-12, None))
|
|
|
|
# Concatenate band indices
|
|
idx_concat = np.concatenate([np.arange(i0, i1+1) for (i0, i1, _, _) in bands])
|
|
idx_min, fmin_hz = bands[0][0], bands[0][2]
|
|
idx_max, fmax_hz = bands[-1][1], bands[-1][3]
|
|
print(f"S11 sweep over bands union: {fmin_hz*1e-9:.6f}–{fmax_hz*1e-9:.6f} GHz "
|
|
f"(total pts: {len(idx_concat)})")
|
|
|
|
# --- Plot ONLY the bands (one subplot per band) ---
|
|
nB = len(bands)
|
|
fig, axs = plt.subplots(nB, 1, figsize=(10, 4 + 1.6*nB), sharey=True)
|
|
if nB == 1: axs = [axs]
|
|
for ax, (i0, i1, f0, f1) in zip(axs, bands):
|
|
ax.plot(freqs_ghz[i0:i1+1], S11_dB[i0:i1+1], label=f"{f0*1e-9:.3f}–{f1*1e-9:.3f} GHz")
|
|
ax.set_xlim(f0*1e-9, f1*1e-9)
|
|
ax.set_xlabel("Frequency (GHz)")
|
|
ax.set_ylabel("|S11| (dB)")
|
|
ax.grid(True); ax.legend(loc="best")
|
|
fig.suptitle("S11 vs Frequency (band-only, no window)")
|
|
plt.tight_layout()
|
|
plt.savefig("S11_spectrum_bands_only.png", dpi=150)
|
|
plt.close(fig)
|
|
|
|
# --- Save concatenated band-only data ---
|
|
df_s11_bands_all = pd.DataFrame({
|
|
"f_Hz": freqs[idx_concat],
|
|
"f_GHz": freqs_ghz[idx_concat],
|
|
"S11_real": np.real(S11[idx_concat]),
|
|
"S11_imag": np.imag(S11[idx_concat]),
|
|
"S11_mag": S11_mag[idx_concat],
|
|
"S11_dB": S11_dB[idx_concat],
|
|
})
|
|
df_s11_bands_all.to_csv("S11_spectrum_bands_concat.csv", index=False)
|
|
df_s11_bands_all.to_excel("S11_spectrum_bands_concat.xlsx", index=False)
|
|
|
|
# --- Also save each band separately ---
|
|
for bidx, (i0, i1, f0, f1) in enumerate(bands, start=1):
|
|
df_b = pd.DataFrame({
|
|
"f_Hz": freqs[i0:i1+1],
|
|
"f_GHz": freqs_ghz[i0:i1+1],
|
|
"S11_real": np.real(S11[i0:i1+1]),
|
|
"S11_imag": np.imag(S11[i0:i1+1]),
|
|
"S11_mag": S11_mag[i0:i1+1],
|
|
"S11_dB": S11_dB[i0:i1+1],
|
|
})
|
|
df_b.to_csv(f"S11_band_{bidx}.csv", index=False)
|
|
|
|
|
|
|
|
##################################
|
|
### Obtain the Gain of Antenna ###
|
|
##################################
|
|
|
|
|
|
################################
|
|
####### Process .tri File ######
|
|
################################
|
|
|
|
def parse_mesh(filename):
|
|
with open(filename, 'r') as f:
|
|
lines = f.readlines()
|
|
|
|
scale = float(lines[0].strip())
|
|
num_nodes = int(lines[1].strip())
|
|
|
|
# Read nodes
|
|
nodes = np.array([list(map(float, line.strip().split())) for line in lines[2:2 + num_nodes]])
|
|
|
|
num_triangles = int(lines[3 + num_nodes].strip())
|
|
|
|
# Read triangles (1-based indexing in file, converting to 0-based)
|
|
triangles = np.array([list(map(int, line.strip().split())) for line in lines[4 + num_nodes:3 + num_nodes + num_triangles]])
|
|
|
|
print(num_triangles)
|
|
print(triangles[0])
|
|
print(triangles[-1])
|
|
|
|
return scale, nodes, triangles
|
|
|
|
|
|
tri_filename = FileName + "_out.tri"
|
|
scale, nodes, triangles = parse_mesh(tri_filename)
|
|
|
|
Num_Nodes = len(nodes)
|
|
Num_tri = len(triangles)
|
|
print('Number of Triangles =', Num_tri)
|
|
print('Number of Nodes =', Num_Nodes)
|
|
|
|
|
|
|
|
def count_triangles_from_timedomain_curfile(filename):
|
|
with open(filename, 'r') as f:
|
|
lines = f.readlines()
|
|
|
|
# Remove empty lines and strip
|
|
values = [line.strip() for line in lines if line.strip() != '']
|
|
|
|
total_values = len(values)
|
|
if total_values % 9 != 0:
|
|
print(f"Warning: File has {total_values} values which is not divisible by 9!")
|
|
|
|
num_triangles = total_values // 9
|
|
return num_triangles
|
|
|
|
sample_file = total_curFileName + "00000_BC.curJ"
|
|
num_tri = count_triangles_from_timedomain_curfile(sample_file)
|
|
print('Number of Triangles tri file =', Num_tri)
|
|
print('Number of Triangles time domain current file =', num_tri)
|
|
|
|
# ----------------------------------------------- #
|
|
# Radiated Field
|
|
# ----------------------------------------------- #
|
|
# Read curJ and curM in time domain (Incident Field)
|
|
from concurrent import futures
|
|
|
|
|
|
# Read curJ and curM files using multi-threading
|
|
def read_cur_files(i, currentFilename, ext, Num_tri):
|
|
index = "%05d" % int(steps * i)
|
|
pfile = open(currentFilename + index + "_BC." + ext, 'r')
|
|
lines = pfile.readlines()
|
|
|
|
cur = np.zeros([Num_tri, 3, 3])
|
|
|
|
counter = 0
|
|
for t in range(Num_tri):
|
|
for n in range(3):
|
|
cur[t, n, 0] = float(lines[counter])
|
|
counter += 1
|
|
cur[t, n, 1] = float(lines[counter])
|
|
counter += 1
|
|
cur[t, n, 2] = float(lines[counter])
|
|
counter += 1
|
|
counter += 1
|
|
pfile.close()
|
|
return cur
|
|
|
|
|
|
|
|
# curJ = n x H
|
|
# Time, Triangle, node, xyz component
|
|
curJ_sc = np.zeros([nfiles, Num_tri, 3, 3])
|
|
|
|
# curM = E x n
|
|
# Time, Triangle, node, xyz component
|
|
curM_sc = np.zeros([nfiles, Num_tri, 3, 3])
|
|
|
|
print("Reading curJ ...")
|
|
with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor:
|
|
tickets = {executor.submit(read_cur_files, i, total_curFileName, "curJ", Num_tri): i for i in range(nfiles)}
|
|
for future in tqdm(concurrent.futures.as_completed(tickets), total=nfiles, desc="curJ files"):
|
|
index = tickets[future]
|
|
try:
|
|
curJ_sc[index, :, :, :] = future.result()
|
|
except Exception as exc:
|
|
print(f"{index} generated an exception: {exc}")
|
|
|
|
print("Reading curM ...")
|
|
with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor:
|
|
tickets = {executor.submit(read_cur_files, i, total_curFileName, "curM", Num_tri): i for i in range(nfiles)}
|
|
for future in tqdm(concurrent.futures.as_completed(tickets), total=nfiles, desc="curM files"):
|
|
index = tickets[future]
|
|
try:
|
|
curM_sc[index, :, :, :] = future.result()
|
|
except Exception as exc:
|
|
print(f"{index} generated an exception: {exc}")
|
|
|
|
|
|
|
|
def process_Radiation(F_curJ, F_curM, FREQ, FREQ_STR, outfile, DEST, SURFACE_TRI_MESH, THETA_sc, PHI_sc):
|
|
|
|
# Create frequency folder and copy geometry
|
|
freq_dir = f"{current_dir}/freq/FREQ{FREQ_STR}"
|
|
os.makedirs(freq_dir, exist_ok=True)
|
|
print("Working Folder =",freq_dir)
|
|
|
|
# Change directory into frequency folder
|
|
os.chdir(freq_dir)
|
|
|
|
# Save results for current and magnetic field components
|
|
fid = open(outfile + ".curJ", 'w')
|
|
for t in tqdm(range(Num_tri)):
|
|
for n in range(3):
|
|
for c in range(3):
|
|
line = str(np.real(F_curJ[t, n, c])) + " " + str(np.imag(F_curJ[t, n, c])) + '\n'
|
|
fid.write(line)
|
|
fid.close()
|
|
|
|
fid = open(outfile + ".curM", 'w')
|
|
for t in tqdm(range(Num_tri)):
|
|
for n in range(3):
|
|
for c in range(3):
|
|
line = str(np.real(F_curM[t, n, c])) + " " + str(np.imag(F_curM[t, n, c])) + '\n'
|
|
fid.write(line)
|
|
fid.close()
|
|
|
|
|
|
# Create .region file
|
|
REGIONFILE = outfile
|
|
with open(REGIONFILE + ".region", "w") as filep:
|
|
filep.write("1\n")
|
|
filep.write("0 FEBI\n")
|
|
filep.write(f"0 FEBI {outfile}\n")
|
|
filep.write("Coupling\n")
|
|
|
|
# Copy mesh to expected filename
|
|
shutil.copy(f"{current_dir}/{SURFACE_TRI_MESH}_out.tri", f"{freq_dir}/{outfile}.tri")
|
|
shutil.copy(f"{current_dir}/n2f_main", f"{freq_dir}/n2f_main")
|
|
|
|
# Run field-to-far-field converter and translator
|
|
os.system(f"./n2f_main {REGIONFILE} {int(FREQ)} 0 0 0 180 360 1 0")
|
|
os.system(f"emsurftranslator -s {outfile}")
|
|
|
|
print("\n\n==== Process Farfield Result ====\n")
|
|
|
|
# Clean spacing in .cs file
|
|
os.system(f"sed -i 's/ / /g' {REGIONFILE}.cs")
|
|
|
|
# Read and split farfield RCS data
|
|
data = pd.read_csv(f"./{REGIONFILE}.cs", delimiter=" ", skiprows=1, header=None)
|
|
data1 = data.iloc[:361*10]
|
|
data1.drop(columns=[0], inplace=True)
|
|
data2 = data.iloc[361*10:]
|
|
data2.drop(columns=[data2.columns[-1]], inplace=True)
|
|
|
|
# Concatenate full pattern data
|
|
result = np.concatenate((data1.to_numpy(), data2.to_numpy()))
|
|
df = pd.DataFrame(result, columns=['Theta', 'Phi', 'RAD_V', 'RAD_H'])
|
|
|
|
# Extract RCS at specific angles
|
|
theta_df = df[df['Theta'] == THETA_sc]
|
|
theta_phi_df = theta_df[theta_df['Phi'] == PHI_sc]
|
|
print(theta_phi_df)
|
|
RAD_V = theta_phi_df['RAD_V'].values[0] # single value
|
|
RAD_H = theta_phi_df['RAD_H'].values[0] # single value
|
|
|
|
# Change back to base directory
|
|
os.chdir("..")
|
|
os.chdir("..")
|
|
|
|
return RAD_V, RAD_H
|
|
|
|
|
|
|
|
# ---------------- Radiation over contiguous band [idx_min..idx_max] ----------------
|
|
print("----- Compute Radiation (band) -----")
|
|
|
|
if 'S11' not in globals():
|
|
raise RuntimeError("S11 not defined. Compute S11 on the same frequency axis before running gain.")
|
|
|
|
num_points = idx_max - idx_min + 1
|
|
rad_freq_GHz = []
|
|
RAD_V_all = []
|
|
RAD_H_all = []
|
|
RG_V_lin_all = [] # realized gain (linear)
|
|
RG_H_lin_all = []
|
|
RG_V_dBi_all = [] # realized gain (dBi)
|
|
RG_H_dBi_all = []
|
|
|
|
for freq_ind in tqdm(range(num_points)):
|
|
freq_index = idx_min + freq_ind
|
|
|
|
SURFACE_TRI_MESH = FileName
|
|
outfile = "out"
|
|
# freq_axis is in GHz -> convert to MHz for the external tool (keep previous format)
|
|
FREQ = int(freqs_ghz[freq_index] * 1000) # MHz
|
|
FREQ_STR = str(FREQ)
|
|
|
|
# observation direction (deg) — match your earlier usage
|
|
THETA_sc = 180
|
|
PHI_sc = 0
|
|
|
|
print(f"---------------- Processing {FREQ_STR} MHz ---------------------")
|
|
|
|
# same per-bin normalization you used for RCS
|
|
norm = 1.0 / max(fft_inc_magnitude[freq_index], 1e-30)
|
|
curJ_FD = np.fft.fft(curJ_sc, n=NFFT, axis=0) / NFFT * norm
|
|
curM_FD = np.fft.fft(curM_sc, n=NFFT, axis=0) / NFFT * norm
|
|
|
|
F_curJ_single = curJ_FD[freq_index, :, :, :].copy() # (Num_tri, 3 nodes, 3 comps)
|
|
F_curM_single = curM_FD[freq_index, :, :, :].copy()
|
|
|
|
# run your radiation pipeline (writes .curJ/.curM, runs n2f_main, parses .cs)
|
|
RAD_V, RAD_H = process_Radiation(
|
|
F_curJ_single, F_curM_single,
|
|
FREQ, FREQ_STR, outfile, DEST, SURFACE_TRI_MESH,
|
|
THETA_sc, PHI_sc
|
|
)
|
|
|
|
rad_freq_GHz.append(FREQ / 1000.0) # back to GHz for plotting
|
|
RAD_V_all.append(RAD_V)
|
|
RAD_H_all.append(RAD_H)
|
|
|
|
# ---- Realized gain using S11 at the same bin ----
|
|
gamma2 = float(np.abs(S11[freq_index])**2) # |S11|^2
|
|
mult = max(1.0 - gamma2, 0.0) # avoid tiny negative due to FP
|
|
RG_V_lin = max(RAD_V, 0.0) * mult # realized gain (linear)
|
|
RG_H_lin = max(RAD_H, 0.0) * mult
|
|
RG_V_dBi = 10.0 * np.log10(max(RG_V_lin, 1e-12)) # dBi
|
|
RG_H_dBi = 10.0 * np.log10(max(RG_H_lin, 1e-12))
|
|
|
|
RG_V_lin_all.append(RG_V_lin)
|
|
RG_H_lin_all.append(RG_H_lin)
|
|
RG_V_dBi_all.append(RG_V_dBi)
|
|
RG_H_dBi_all.append(RG_H_dBi)
|
|
|
|
# ---------------- Plots (same style) ----------------
|
|
# Raw radiation from tool (if you still want to see it)
|
|
plt.figure(figsize=(8, 5))
|
|
plt.plot(rad_freq_GHz, RAD_V_all, marker='o', label='RAD_V (tool)')
|
|
plt.plot(rad_freq_GHz, RAD_H_all, marker='x', label='RAD_H (tool)')
|
|
plt.legend(); plt.grid(True)
|
|
plt.xlabel("Frequency (GHz)")
|
|
plt.ylabel("Radiation (tool units)")
|
|
plt.title("Radiation vs Frequency (band)")
|
|
plt.tight_layout()
|
|
plt.savefig("Radiation_vs_Frequency_new.png", dpi=300)
|
|
|
|
# Realized gain (from S11)
|
|
plt.figure(figsize=(8, 5))
|
|
plt.plot(rad_freq_GHz, RG_V_dBi_all, marker='o', label='Realized Gain V (dBi)')
|
|
plt.plot(rad_freq_GHz, RG_H_dBi_all, marker='x', label='Realized Gain H (dBi)')
|
|
plt.legend(); plt.grid(True)
|
|
plt.xlabel("Frequency (GHz)")
|
|
plt.ylabel("Gain (dBi)")
|
|
plt.title("Realized Gain vs Frequency (band)")
|
|
plt.tight_layout()
|
|
plt.savefig("Realized_Gain_vs_Frequency_new.png", dpi=300)
|
|
|
|
# ---------------- Save to Excel (same style as RCS) ----------------
|
|
df_rad = pd.DataFrame({
|
|
"Frequency (GHz)": rad_freq_GHz,
|
|
"RAD_V_tool": RAD_V_all,
|
|
"RAD_H_tool": RAD_H_all,
|
|
"S11_mag": np.abs(S11[idx_min:idx_max+1]),
|
|
"S11_dB": 20*np.log10(np.clip(np.abs(S11[idx_min:idx_max+1]), 1e-12, None)),
|
|
"RealizedGain_V_lin": RG_V_lin_all,
|
|
"RealizedGain_H_lin": RG_H_lin_all,
|
|
"RealizedGain_V_dBi": RG_V_dBi_all,
|
|
"RealizedGain_H_dBi": RG_H_dBi_all,
|
|
})
|
|
df_rad.to_excel("Radiation_RealizedGain_vs_Frequency_new.xlsx", index=False)
|
|
print("Saved to Radiation_RealizedGain_vs_Frequency_new.xlsx")
|
|
|
|
|
|
|
|
|
|
'''
|
|
|
|
#####################################
|
|
####### Process Incident Field ######
|
|
#####################################
|
|
|
|
# Folder containing your output
|
|
folder = "CURRENT_INC"
|
|
prefix = "Einc_field_" + FileName + "_"
|
|
extension = ".dat"
|
|
|
|
# Get sorted list of files
|
|
file_list = sorted(glob.glob(os.path.join(folder, f"{prefix}*{extension}")))
|
|
|
|
# Prepare time and field vectors
|
|
time_steps = []
|
|
Ex_vals = []
|
|
Ey_vals = []
|
|
Ez_vals = []
|
|
|
|
for idx, filepath in enumerate(file_list):
|
|
with open(filepath, 'r') as f:
|
|
line = f.readline().strip()
|
|
if line:
|
|
Ex, Ey, Ez = map(float, line.split())
|
|
Ex_vals.append(Ex)
|
|
Ey_vals.append(Ey)
|
|
Ez_vals.append(Ez)
|
|
time_steps.append(idx * dt) # Or parse from filename if non-uniform
|
|
|
|
|
|
|
|
# Convert to numpy arrays
|
|
time = np.array(time_steps)
|
|
Ex_vals = np.array(Ex_vals)
|
|
Ey_vals = np.array(Ey_vals)
|
|
Ez_vals = np.array(Ez_vals)
|
|
|
|
# Plot
|
|
plt.figure(figsize=(10, 6))
|
|
plt.plot(time, Ex_vals, label='Ex')
|
|
plt.plot(time, Ey_vals, label='Ey')
|
|
plt.plot(time, Ez_vals, label='Ez')
|
|
plt.xlabel("Time (s)")
|
|
plt.ylabel("E-field (V/m)")
|
|
plt.title("Incident Electric Field at Selected Node Over Time")
|
|
plt.legend()
|
|
plt.grid(True)
|
|
plt.tight_layout()
|
|
plt.savefig("Incident_Field.png")
|
|
|
|
|
|
# Compute max absolute values
|
|
max_Ex = np.max(np.abs(Ex_vals))
|
|
max_Ey = np.max(np.abs(Ey_vals))
|
|
max_Ez = np.max(np.abs(Ez_vals))
|
|
|
|
# Find the dominant component
|
|
if max_Ex >= max_Ey and max_Ex >= max_Ez:
|
|
signal = Ex_vals
|
|
dominant = "Ex"
|
|
elif max_Ey >= max_Ex and max_Ey >= max_Ez:
|
|
signal = Ey_vals
|
|
dominant = "Ey"
|
|
else:
|
|
signal = Ez_vals
|
|
dominant = "Ez"
|
|
|
|
print(f"Dominant component: {dominant}")
|
|
print(f"Max magnitude: {np.max(np.abs(signal))}")
|
|
|
|
|
|
# Frequency Resolution
|
|
df = f_s / NFFT
|
|
|
|
# time axis
|
|
time_axis = np.arange(nfiles) * dt
|
|
|
|
# frequency axis
|
|
freq_axis = np.arange(NFFT) * df / 1e9
|
|
|
|
incident_fft = np.fft.fft(signal , n=NFFT) / NFFT
|
|
|
|
# --- Step 2: Find indices where magnitude > 10% of peak ---
|
|
fft_inc_magnitude = np.abs(incident_fft)
|
|
threshold = 0.1 * np.max(fft_inc_magnitude)
|
|
indices_above_threshold = np.where(fft_inc_magnitude[0:NFFT//2] > threshold)[0]
|
|
|
|
|
|
# Extract frequency range where magnitude > 10% of peak
|
|
freq_selected = freq_axis[indices_above_threshold]
|
|
fft_selected = fft_inc_magnitude[indices_above_threshold]
|
|
|
|
# Find min and max frequency for box range
|
|
fmin, fmax = np.min(freq_selected), np.max(freq_selected)
|
|
|
|
# Find indices of fmin and fmax
|
|
idx_min = np.searchsorted(freq_axis, fmin)
|
|
idx_max = np.searchsorted(freq_axis, fmax)
|
|
|
|
# Confirm the values at those indices
|
|
actual_fmin = freq_axis[idx_min]
|
|
actual_fmax = freq_axis[idx_max]
|
|
|
|
num_points = idx_max - idx_min
|
|
|
|
print(f"Index range: {idx_min} to {idx_max}")
|
|
print(f"Frequency range: {actual_fmin:.3f} GHz to {actual_fmax:.3f} GHz")
|
|
print(f"Number of frequency points in range: {num_points}")
|
|
|
|
|
|
# --- Step 3: Plot with translucent box ---
|
|
|
|
# Create subplots: time domain (top), frequency domain (bottom)
|
|
fig, axs = plt.subplots(2, 1, figsize=(10, 8))
|
|
|
|
# Plot time-domain signal
|
|
axs[0].plot(time_axis, signal)
|
|
axs[0].set_title("Time-Domain Signal (Gauss Pulse)")
|
|
axs[0].set_xlabel("Time (s)")
|
|
axs[0].set_ylabel("Amplitude")
|
|
axs[0].grid(True)
|
|
|
|
# Plot frequency-domain magnitude
|
|
axs[1].plot(freq_axis, fft_inc_magnitude)
|
|
axs[1].axvspan(fmin, fmax, color='red', alpha=0.3, label=">10% of Peak")
|
|
axs[1].set_title("Magnitude Spectrum of Incident Electric Field")
|
|
axs[1].set_xlabel("Frequency (Hz)")
|
|
axs[1].set_ylabel("Magnitude")
|
|
axs[1].grid(True)
|
|
axs[1].set_xlim([0, f_nyq / 1e9]) # Adjust based on your center freq
|
|
plt.legend()
|
|
plt.tight_layout()
|
|
plt.savefig("Incident.png")
|
|
|
|
|
|
# Create DataFrame
|
|
df = pd.DataFrame({
|
|
'Frequency (GHz)': freq_axis,
|
|
'fft_inc Magnitude': fft_inc_magnitude,
|
|
})
|
|
|
|
# Save to Excel
|
|
excel_path = "./cur_inc_freqdomain_analytical.xlsx"
|
|
df.to_excel(excel_path, index=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Multi-threading
|
|
import concurrent.futures
|
|
import urllib.request
|
|
|
|
|
|
|
|
# ----------------------------------------------- #
|
|
# Scattered Field
|
|
# ----------------------------------------------- #
|
|
# Read curJ and curM in time domain (Incident Field)
|
|
from concurrent import futures
|
|
|
|
|
|
# Read curJ and curM files using multi-threading
|
|
def read_cur_files(i, currentFilename, ext, Num_tri):
|
|
index = "%05d" % int(steps * i)
|
|
pfile = open(currentFilename + index + "_BC." + ext, 'r')
|
|
lines = pfile.readlines()
|
|
|
|
cur = np.zeros([Num_tri, 3, 3])
|
|
|
|
counter = 0
|
|
for t in range(Num_tri):
|
|
for n in range(3):
|
|
cur[t, n, 0] = float(lines[counter])
|
|
counter += 1
|
|
cur[t, n, 1] = float(lines[counter])
|
|
counter += 1
|
|
cur[t, n, 2] = float(lines[counter])
|
|
counter += 1
|
|
counter += 1
|
|
pfile.close()
|
|
return cur
|
|
|
|
|
|
|
|
# curJ = n x H
|
|
# Time, Triangle, node, xyz component
|
|
curJ_sc = np.zeros([nfiles, Num_tri, 3, 3])
|
|
|
|
# curM = E x n
|
|
# Time, Triangle, node, xyz component
|
|
curM_sc = np.zeros([nfiles, Num_tri, 3, 3])
|
|
|
|
print("Reading curJ sc ...")
|
|
with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor:
|
|
tickets = {executor.submit(read_cur_files, i, sc_curFileName, "curJ", Num_tri): i for i in range(nfiles)}
|
|
for future in tqdm(concurrent.futures.as_completed(tickets), total=nfiles, desc="curJ files"):
|
|
index = tickets[future]
|
|
try:
|
|
curJ_sc[index, :, :, :] = future.result()
|
|
except Exception as exc:
|
|
print(f"{index} generated an exception: {exc}")
|
|
|
|
print("Reading curM sc...")
|
|
with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor:
|
|
tickets = {executor.submit(read_cur_files, i, sc_curFileName, "curM", Num_tri): i for i in range(nfiles)}
|
|
for future in tqdm(concurrent.futures.as_completed(tickets), total=nfiles, desc="curM files"):
|
|
index = tickets[future]
|
|
try:
|
|
curM_sc[index, :, :, :] = future.result()
|
|
except Exception as exc:
|
|
print(f"{index} generated an exception: {exc}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def process_RCS(F_curJ, F_curM, FREQ, FREQ_STR, outfile, DEST, SURFACE_TRI_MESH, THETA_sc, PHI_sc):
|
|
|
|
# Create frequency folder and copy geometry
|
|
freq_dir = f"{current_dir}/freq/FREQ{FREQ_STR}"
|
|
os.makedirs(freq_dir, exist_ok=True)
|
|
print("Working Folder =",freq_dir)
|
|
|
|
# Change directory into frequency folder
|
|
os.chdir(freq_dir)
|
|
|
|
# Save results for current and magnetic field components
|
|
fid = open(outfile + ".curJ", 'w')
|
|
for t in tqdm(range(Num_tri)):
|
|
for n in range(3):
|
|
for c in range(3):
|
|
line = str(np.real(F_curJ[t, n, c])) + " " + str(np.imag(F_curJ[t, n, c])) + '\n'
|
|
fid.write(line)
|
|
fid.close()
|
|
|
|
fid = open(outfile + ".curM", 'w')
|
|
for t in tqdm(range(Num_tri)):
|
|
for n in range(3):
|
|
for c in range(3):
|
|
line = str(np.real(F_curM[t, n, c])) + " " + str(np.imag(F_curM[t, n, c])) + '\n'
|
|
fid.write(line)
|
|
fid.close()
|
|
|
|
|
|
# Create .region file
|
|
REGIONFILE = outfile
|
|
with open(REGIONFILE + ".region", "w") as filep:
|
|
filep.write("1\n")
|
|
filep.write("0 FEBI\n")
|
|
filep.write(f"0 FEBI {outfile}\n")
|
|
filep.write("Coupling\n")
|
|
|
|
# Copy mesh to expected filename
|
|
shutil.copy(f"{current_dir}/{SURFACE_TRI_MESH}_out.tri", f"{freq_dir}/{outfile}.tri")
|
|
shutil.copy(f"{current_dir}/n2f_main", f"{freq_dir}/n2f_main")
|
|
|
|
# Run field-to-far-field converter and translator
|
|
os.system(f"./n2f_main {REGIONFILE} {int(FREQ)} 0 0 0 180 360 0 1")
|
|
os.system(f"emsurftranslator -s {outfile}")
|
|
|
|
print("\n\n==== Process Farfield Result ====\n")
|
|
|
|
# Clean spacing in .cs file
|
|
os.system(f"sed -i 's/ / /g' {REGIONFILE}.cs")
|
|
|
|
# Read and split farfield RCS data
|
|
data = pd.read_csv(f"./{REGIONFILE}.cs", delimiter=" ", skiprows=1, header=None)
|
|
data1 = data.iloc[:361*10]
|
|
data1.drop(columns=[0], inplace=True)
|
|
data2 = data.iloc[361*10:]
|
|
data2.drop(columns=[data2.columns[-1]], inplace=True)
|
|
|
|
# Concatenate full pattern data
|
|
result = np.concatenate((data1.to_numpy(), data2.to_numpy()))
|
|
df = pd.DataFrame(result, columns=['Theta', 'Phi', 'RCS_V', 'RCS_H'])
|
|
|
|
# Extract RCS at specific angles
|
|
theta_df = df[df['Theta'] == THETA_sc]
|
|
theta_phi_df = theta_df[theta_df['Phi'] == PHI_sc]
|
|
print(theta_phi_df)
|
|
RCS_V = theta_phi_df['RCS_V'].values[0] # single value
|
|
RCS_H = theta_phi_df['RCS_H'].values[0] # single value
|
|
|
|
# Change back to base directory
|
|
os.chdir("..")
|
|
os.chdir("..")
|
|
|
|
return RCS_V, RCS_H
|
|
|
|
|
|
|
|
|
|
|
|
freq_all = []
|
|
RCS_V_all = []
|
|
RCS_H_all = []
|
|
print("----- Compute RCS -----")
|
|
|
|
for freq_ind in tqdm(range(num_points)):
|
|
freq_index = idx_min + freq_ind
|
|
SURFACE_TRI_MESH = FileName
|
|
outfile = "out"
|
|
FREQ = int(freq_axis[freq_index] * 1000)
|
|
FREQ_STR = str(FREQ)
|
|
THETA_sc = 180
|
|
PHI_sc = 0
|
|
|
|
print("---------------- Processing ", FREQ_STR, " MHz ---------------------")
|
|
|
|
norm = 1.0 / fft_inc_magnitude[freq_index]
|
|
curJ_FD = np.fft.fft(curJ_sc, n=NFFT, axis=0) / NFFT * norm
|
|
curM_FD = np.fft.fft(curM_sc, n=NFFT, axis=0) / NFFT * norm
|
|
|
|
F_curJ_single = curJ_FD[freq_index, :, :, :].copy()
|
|
F_curM_single = curM_FD[freq_index, :, :, :].copy()
|
|
|
|
RCS_V,RCS_H = process_RCS(F_curJ_single, F_curM_single, FREQ, FREQ_STR, outfile, DEST, SURFACE_TRI_MESH, THETA_sc, PHI_sc)
|
|
|
|
freq_all.append(FREQ/1000)
|
|
RCS_V_all.append(RCS_V)
|
|
RCS_H_all.append(RCS_H)
|
|
|
|
|
|
|
|
# Plotting
|
|
plt.figure(figsize=(8, 5))
|
|
plt.plot(freq_all, RCS_V_all, marker='o',label='RCS_V')
|
|
plt.plot(freq_all, RCS_H_all, marker='x',label='RCS_H')
|
|
plt.legend()
|
|
plt.grid(True)
|
|
plt.xlabel("Frequency (GHz)")
|
|
plt.ylabel("RCS (dBsm)")
|
|
plt.title("RCS vs Frequency")
|
|
plt.tight_layout()
|
|
plt.savefig("RCS_vs_Frequency_new.png", dpi=300)
|
|
|
|
# Save to Excel
|
|
df_rcs = pd.DataFrame({
|
|
"Frequency (GHz)": freq_all,
|
|
"RCS_V (dBsm)": RCS_V_all,
|
|
"RCS_H (dBsm)": RCS_H_all
|
|
})
|
|
df_rcs.to_excel("RCS_vs_Frequency_new.xlsx", index=False)
|
|
print("Saved to RCS_vs_Frequency_new.xlsx")
|
|
|
|
# ----------------------------------------------- #
|
|
|
|
'''
|
|
|
|
|
|
|