OpenMC Guide
Data Processing in OpenMC
Getting Started with Results
After running an OpenMC simulation, you'll have statepoint files containing all your tally results. The key to effective analysis is extracting this data in a useful format and understanding what the numbers mean physically.
OpenMC provides straightforward methods to access your results, whether you want quick answers or detailed data for further analysis. Let's start with the basics and work up to more sophisticated processing.
Basic Result Extraction
import openmc
import numpy as np
import matplotlib.pyplot as plt
# Load simulation results
sp = openmc.StatePoint('statepoint.100.h5')
# Quick access to k-effective
keff = sp.keff
print(f"k-effective: {keff.nominal_value:.5f} ± {keff.std_dev:.5f}")
# Get a specific tally by name
flux_tally = sp.get_tally(name='fuel_flux')
# Access the raw mean and standard deviation arrays
mean_values = flux_tally.mean.flatten()
std_dev_values = flux_tally.std_dev.flatten()
print(f"Average flux: {np.mean(mean_values):.3e}")
print(f"Maximum flux: {np.max(mean_values):.3e}")
print(f"Relative uncertainty: {np.mean(std_dev_values/mean_values)*100:.2f}%")Working with Different Tally Types
Different tally types require different processing approaches. Here are the most common scenarios you'll encounter and how to handle them effectively.
Cell-Based Tallies
# Cell tallies are the simplest - one value per cell per score
sp = openmc.StatePoint('statepoint.100.h5')
cell_tally = sp.get_tally(name='fuel_cells')
# Extract data by score
df = cell_tally.get_pandas_dataframe()
# Filter by specific scores
flux_data = df[df['score'] == 'flux']
fission_data = df[df['score'] == 'fission']
# Calculate reaction rates
total_fission_rate = fission_data['mean'].sum()
average_flux = flux_data['mean'].mean()
print(f"Total fission rate: {total_fission_rate:.3e} reactions/s")
print(f"Average flux: {average_flux:.3e} n/cm²/s")
# Reaction rate ratios (useful for physics validation)
absorption_data = df[df['score'] == 'absorption']
eta = fission_data['mean'].sum() / absorption_data['mean'].sum()
print(f"Reproduction factor (η): {eta:.3f}")Mesh Tallies
# Mesh tallies give spatial distributions
mesh_tally = sp.get_tally(name='power_mesh')
mesh_filter = mesh_tally.find_filter(openmc.MeshFilter)
mesh = mesh_filter.mesh
# Get 3D power data
power_data = mesh_tally.get_values(scores=['fission'])
shape = mesh.dimension
power_3d = power_data.reshape(shape)
# Find hot spots
max_power = np.max(power_3d)
max_location = np.unravel_index(np.argmax(power_3d), power_3d.shape)
print(f"Peak power: {max_power:.3e} at mesh indices {max_location}")
# Calculate power peaking factors
avg_power = np.mean(power_3d[power_3d > 0]) # Exclude zero regions
peaking_factor = max_power / avg_power
print(f"Power peaking factor: {peaking_factor:.2f}")
# Extract 2D slice for plotting
midplane = power_3d[:, :, power_3d.shape[2]//2]
plt.figure(figsize=(8, 6))
plt.imshow(midplane, origin='lower', cmap='hot')
plt.colorbar(label='Power Density')
plt.title('Power Distribution (Midplane)')
plt.show()Energy Spectrum Analysis
# Energy tallies show neutron spectra
spectrum_tally = sp.get_tally(name='spectrum')
energy_filter = spectrum_tally.find_filter(openmc.EnergyFilter)
energy_bins = energy_filter.bins
# Get flux spectrum
df = spectrum_tally.get_pandas_dataframe()
flux_spectrum = df[df['score'] == 'flux']['mean'].values
# Calculate energy group fractions
total_flux = np.sum(flux_spectrum)
thermal_idx = np.searchsorted(energy_bins, 0.625) # 0.625 eV cutoff
fast_idx = np.searchsorted(energy_bins, 100e3) # 100 keV cutoff
thermal_fraction = np.sum(flux_spectrum[:thermal_idx]) / total_flux
epithermal_fraction = np.sum(flux_spectrum[thermal_idx:fast_idx]) / total_flux
fast_fraction = np.sum(flux_spectrum[fast_idx:]) / total_flux
print(f"Thermal flux fraction: {thermal_fraction:.3f}")
print(f"Epithermal flux fraction: {epithermal_fraction:.3f}")
print(f"Fast flux fraction: {fast_fraction:.3f}")
# Plot spectrum
energy_mid = np.sqrt(energy_bins[:-1] * energy_bins[1:])
plt.figure(figsize=(10, 6))
plt.loglog(energy_mid, flux_spectrum, 'b-', linewidth=2)
plt.xlabel('Energy (eV)')
plt.ylabel('Flux per unit lethargy')
plt.title('Neutron Energy Spectrum')
plt.grid(True, which='both', alpha=0.3)
plt.show()Statistical Analysis and Uncertainty
Monte Carlo results include statistical uncertainty that you must understand and report properly. OpenMC provides the tools to assess convergence and calculate meaningful confidence intervals.
Checking Convergence
# Load statepoint to check convergence
sp = openmc.StatePoint('statepoint.100.h5')
# K-effective convergence
keff_generations = sp.keff_generation
active_batches = len(keff_generations)
# Check if k-effective has converged
recent_keff = keff_generations[-20:] # Last 20 batches
keff_std = np.std(recent_keff)
keff_mean = np.mean(recent_keff)
relative_std = keff_std / keff_mean
print(f"K-effective: {keff_mean:.5f}")
print(f"Recent standard deviation: {keff_std:.5f}")
print(f"Relative standard deviation: {relative_std*100:.3f}%")
if relative_std < 0.001: # Less than 0.1%
print("K-effective appears converged")
else:
print("Consider running more batches")
# Plot k-effective vs generation
plt.figure(figsize=(10, 6))
plt.plot(range(1, active_batches + 1), keff_generations, 'b-', alpha=0.7)
plt.axhline(keff_mean, color='r', linestyle='--', label=f'Mean: {keff_mean:.5f}')
plt.xlabel('Generation')
plt.ylabel('k-effective')
plt.title('K-effective Convergence')
plt.legend()
plt.grid(True, alpha=0.3)
plt.show()Tally Uncertainty Analysis
# Analyze tally uncertainties
tally = sp.get_tally(name='fuel_flux')
df = tally.get_pandas_dataframe()
# Calculate relative uncertainties
flux_data = df[df['score'] == 'flux']
mean_flux = flux_data['mean'].values
std_flux = flux_data['std. dev.'].values
rel_error = std_flux / mean_flux
# Check for acceptable uncertainties
acceptable_threshold = 0.05 # 5%
high_uncertainty = rel_error > acceptable_threshold
print(f"Average relative uncertainty: {np.mean(rel_error)*100:.2f}%")
print(f"Maximum relative uncertainty: {np.max(rel_error)*100:.2f}%")
print(f"Tallies with >5% uncertainty: {np.sum(high_uncertainty)}")
# Calculate 95% confidence intervals
confidence_level = 1.96 # 95% confidence for normal distribution
lower_bound = mean_flux - confidence_level * std_flux
upper_bound = mean_flux + confidence_level * std_flux
print(f"\nSample flux result with 95% confidence:")
idx = 0 # First tally bin
print(f"Flux: {mean_flux[idx]:.3e} [{lower_bound[idx]:.3e}, {upper_bound[idx]:.3e}]")Automation and Batch Processing
For parameter studies or design optimization, you'll need to process multiple simulations automatically. Here's how to set up efficient batch processing workflows.
Simple Batch Analysis
import os
import pandas as pd
def analyze_statepoint(filepath, tally_name='fuel_flux'):
"""Extract key results from a single statepoint file."""
try:
sp = openmc.StatePoint(filepath)
# Get k-effective
keff = sp.keff.nominal_value
keff_std = sp.keff.std_dev
# Get flux tally
tally = sp.get_tally(name=tally_name)
df = tally.get_pandas_dataframe()
flux_data = df[df['score'] == 'flux']
# Calculate summary statistics
total_flux = flux_data['mean'].sum()
max_flux = flux_data['mean'].max()
avg_uncertainty = (flux_data['std. dev.'] / flux_data['mean']).mean()
return {
'filepath': filepath,
'keff': keff,
'keff_std': keff_std,
'total_flux': total_flux,
'max_flux': max_flux,
'avg_uncertainty': avg_uncertainty,
'status': 'success'
}
except Exception as e:
return {
'filepath': filepath,
'status': 'failed',
'error': str(e)
}
# Process multiple cases
case_dirs = ['case_1', 'case_2', 'case_3', 'case_4']
results = []
for case_dir in case_dirs:
statepoint_path = f"{case_dir}/statepoint.100.h5"
if os.path.exists(statepoint_path):
result = analyze_statepoint(statepoint_path)
result['case'] = case_dir
results.append(result)
if result['status'] == 'success':
print(f"Processed {case_dir}: k-eff = {result['keff']:.5f} ± {result['keff_std']:.5f}")
else:
print(f"{case_dir} failed: {result['error']}")
else:
print(f"Skipping {case_dir}: file not found")
# Create summary DataFrame
df_results = pd.DataFrame(results)
successful_cases = df_results[df_results['status'] == 'success']
# Save results
successful_cases.to_csv('batch_analysis_results.csv', index=False)
print(f"\nAnalyzed {len(successful_cases)} cases successfully")
print(f"Results saved to batch_analysis_results.csv")Parameter Study Analysis
import os
import numpy as np
# Analyze results as a function of a parameter
# Assumes you have a parameter study with different enrichments
enrichments = [0.03, 0.035, 0.04, 0.045, 0.05]
keff_values = []
keff_errors = []
for enrichment in enrichments:
case_dir = f"enrichment_{enrichment:.3f}"
statepoint_path = f"{case_dir}/statepoint.100.h5"
if os.path.exists(statepoint_path):
sp = openmc.StatePoint(statepoint_path)
keff_values.append(sp.keff.nominal_value)
keff_errors.append(sp.keff.std_dev)
else:
keff_values.append(np.nan)
keff_errors.append(np.nan)
# Convert to numpy arrays for analysis
enrichments = np.array(enrichments)
keff_values = np.array(keff_values)
keff_errors = np.array(keff_errors)
# Remove any failed cases
valid_mask = ~np.isnan(keff_values)
enrichments = enrichments[valid_mask]
keff_values = keff_values[valid_mask]
keff_errors = keff_errors[valid_mask]
# Plot results
plt.figure(figsize=(10, 6))
plt.errorbar(enrichments * 100, keff_values, yerr=keff_errors,
marker='o', linewidth=2, capsize=5)
plt.axhline(1.0, color='r', linestyle='--', alpha=0.7, label='Critical')
plt.xlabel('Enrichment (%)')
plt.ylabel('k-effective')
plt.title('Criticality vs. Fuel Enrichment')
plt.grid(True, alpha=0.3)
plt.legend()
plt.show()
# Find critical enrichment
critical_idx = np.argmin(np.abs(keff_values - 1.0))
critical_enrichment = enrichments[critical_idx]
print(f"Closest to critical: {critical_enrichment*100:.1f}% enrichment")
print(f"k-effective: {keff_values[critical_idx]:.5f}")Practical Tips and Best Practices
Here are essential tips for effective data processing with OpenMC results:
Data Management
- Organize your files: Use consistent naming conventions for statepoint files and organize by case or parameter
- Check file sizes: Large statepoint files may indicate excessive tallies or long simulations
- Backup important results: Statepoint files contain all your simulation work
- Document your analysis: Keep scripts and notebooks that explain what each analysis does
Common Pitfalls
# Always check for convergence before trusting results
sp = openmc.StatePoint('statepoint.100.h5')
# 1. Check if simulation ran to completion
if sp.n_batches < 100:
print(f"Warning: Only {sp.n_batches} batches completed")
# 2. Verify sufficient inactive batches
if sp.n_inactive < 10:
print("Warning: Few inactive batches may affect convergence")
# 3. Check tally uncertainty
tally = sp.get_tally(name='important_tally')
df = tally.get_pandas_dataframe()
rel_errors = df['std. dev.'] / df['mean']
high_error_count = (rel_errors > 0.1).sum()
if high_error_count > 0:
print(f"Warning: {high_error_count} tally bins have >10% uncertainty")
# 4. Validate physics reasonableness
if sp.keff.nominal_value < 0.5 or sp.keff.nominal_value > 2.0:
print("Warning: k-effective seems unrealistic")
# 5. Check for divide-by-zero in calculations
mean_values = df['mean'].values
zero_tallies = (mean_values == 0).sum()
if zero_tallies > 0:
print(f"Warning: {zero_tallies} tally bins have zero scores")Remember that Monte Carlo results are statistical estimates. Always report uncertainties, check convergence, and validate that your results make physical sense before drawing conclusions.