Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Projects folder + small patch to plot_VDF by Markus + Diffusion scripts #225

Merged
merged 3 commits into from
Jun 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 25 additions & 0 deletions projects/DiffusionScripts/DmumuFit.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
#!/bin/bash -l
#SBATCH --job-name=$jobName$
#SBATCH --time=00:10:00
#SBATCH --account=project_2000203
#SBATCH --partition=medium
#SBATCH --nodes=1 ## number of nodes

module load python-data

# set the number of threads based on --cpus-per-task
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
# Bind OpenMP threads to hardware threads
export OMP_PLACES=cores

#echo Calculating a total of $totalFrames frames divided amongst $jobcount jobs.
#echo Using a remainder of $remainder.
#echo Current job id is $SLURM_ARRAY_TASK_ID and calculated frames are
#echo from $start to $end

export PTNONINTERACTIVE=1

python3 /users/dubartma/analysis/subgrid/TimeDependence/solverIvascenko.py dmumu $folder$ $start$ $end$ $interval$ $twindow$

echo Job $SLURM_ARRAY_TASK_ID complete.
~
Binary file added projects/DiffusionScripts/ExamplePlotDmumu.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added projects/DiffusionScripts/ExampleTimeDep.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
93 changes: 93 additions & 0 deletions projects/DiffusionScripts/LossCone.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
#Template cfg file for diffusion local runs. Feeds writeConfig.py
project = LossCone
ParticlePopulations = proton
dynamic_timestep = 1

[proton_properties]
mass = 1
mass_units = PROTON
charge = 1

[io]
diagnostic_write_interval = 1
system_write_t_interval = $dtOutput$
system_write_file_name = bulk
system_write_distribution_stride = 0
system_write_distribution_xline_stride = 101
system_write_distribution_yline_stride = 101
system_write_distribution_zline_stride = 1

[gridbuilder]
x_length = $cells$
y_length = $cells$
z_length = 1
x_min = 0.0
x_max = $length$
y_min = 0.0
y_max = $length$
z_min = 0
z_max = $res$
t_max = $tMax$
#dt = $dtS$
dt_ceil = $dtS$

[PAD]
enable = 0
coefficient = 1.0
CFL = 1.0
vbins = 200
mubins = 30

[proton_vspace]
vx_min = -4020000
vx_max = +4020000 # in Blocks so * 4
vy_min = -4020000
vy_max = +4020000
vz_min = -4020000
vz_max = +4020000
vx_length = 67
vy_length = 67
vz_length = 67

[boundaries]
periodic_x = yes
periodic_y = yes
periodic_z = yes

[variables]
output = populations_vg_rho
output = populations_vg_v
output = populations_vg_ptensor
output = fg_b
output = fg_e
diagnostic = populations_vg_blocks
output = vg_b_vol
output = vg_f_saved
output = populations_1dmuspace

[loadBalance]
algorithm = RCB
tolerance = 1.05

[proton_sparse]
minValue = 1.0e-14

[LossCone]
BX0 = $BX0$
BY0 = 0.0
BZ0 = 0.0
magXPertAbsAmp = 0.0
magYPertAbsAmp = 0.0
magZPertAbsAmp = 0.0

[proton_LossCone]
rho = $rho$
TemperatureX = $TemperatureX$
TemperatureY = $TemperatureY$
TemperatureZ = $TemperatureZ$
densityPertRelAmp = 0.0
velocityPertAbsAmp = 10000.0
maxwCutoff = 1.0e-14
nSpaceSamples = 2
nVelocitySamples = 2

30 changes: 30 additions & 0 deletions projects/DiffusionScripts/LossCone.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
#!/bin/bash
#SBATCH --job-name=$jobName$
#SBATCH --time=15:00:00
#SBATCH --account=project_2000203
#SBATCH --partition=medium
#SBATCH --nodes=8 ## Number of nodes
#SBATCH --ntasks-per-node=16 ## number of tasks
#SBATCH --cpus-per-task=16 ## CPU cores per task
#SBATCH --hint=multithread

# set the number of threads based on --cpus-per-task
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
# Bind OpenMP threads to hardware threads
export OMP_PLACES=cores

module load boost papi jemalloc
module load python-data

umask 007
cd ./bulk/

executable="/users/dubartma/vlasiator/vlasiator"
configfile=$configfile$

srun $executable --run_config $configfile

#echo "setting a dependent job for task $SLURM_JOB_ID"
#sbatch --dependency=afterany:$SLURM_JOB_ID $variables$


40 changes: 40 additions & 0 deletions projects/DiffusionScripts/ReadMe.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
How to run local runs to calculate Diffusion coefficients and plots
ATTENTION: All paths are hardcoded and need to be changed

RUN IN ORDER
-- writeConfig.py : Creates config (.cfg) files and .sh files to run local runs based on Temperature Anisotropy and Parallel Beta. Time is based on cyclotron gyration.
- Will create the folders to individually run each run. the 'indx' variable makes sure to not run all runs at the same time (currently coded to run a batch of 100 runs out of 400).
- Will change all variables written as '$variable$' in LossCone.cfg and LossCone.sh.
- Will queue runs by itself.
- DO NOT RUN IN interactive node. Run as 'python writeConfig.py indx'.

When batch of runs are completed
-- writeVariablesSh.py : Creates the .sh files to get the variables (1dmuspace, Taniso, Beta, etc.) needed to compute Dmumu and run them.
- Computes the box averaged variables between the specified interval
- Requires indx similar to writeConfig.py
- Creates the path to store the data
- Based on 'variables.sh' which itself uses 'getVariables.py'
- DO NOT RUN IN interactive node. Run as 'python writeVariablesSh.py indx bulkStart bulkEnd interval'.

When variables batch are completed
-- writeDmumuSh.py : Creates the .sh files to compute Dmumu and run them.
- Based on 'DmumuFit.sh' which itself runs 'solverIvascenko.py'
- Requires the same input than the previous script. In addition, requires 'twindow'.
- DO NOT RUN IN interactive node. Run as 'python writeVariablesSh.py indx bulkStart bulkEnd interval twindow'.

When Dmumu batch are completed
-- plotTimeDep.py : plots the (betaPara,Taniso,nu0,t) map.
- Has 'rerun' keyword to avoid having to load all the data again (False/True)
- Creates the dat file needed for SubGrid model as well.
- Run as 'python plotTimeDep.py run bulkStart bulkEnd interval rerun'
- example of output in 'ExampleTimeDep.png'

--------------------------------------------------------------------------------------------------------------------------
Other scripts
-- getvariables.py : Compute the box averaged variables between time intervals provided by writeVariablesSh.py

-- solverIvascenko.py : Compute Dmumu(nu0) values based on Ivascenko et al. (2016) and Dubart et al. (2022). Used by writeDmumuSh.py
- saves Dmumu, nu0Analytic (see Dubart et al. (2022) Annex), nu0Fit, and the mean between the 2.

-- plot_Dmumu_Ivascenko : Plots fmu, VDFs, Dmumu and the fits calculated by solverIvascenko.py for 1 run.
- example of output in 'ExamplePlotDmumu.png'
74 changes: 74 additions & 0 deletions projects/DiffusionScripts/getVariables.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
import pytools as pt
import numpy as np
import sys,os

run = sys.argv[1]
folder = sys.argv[2]
bulkStart = int(sys.argv[3])
bulkEnd = int(sys.argv[4])
interval = int(sys.argv[5])

timetot = range(bulkStart, bulkEnd-interval+1,1)

path_bulk = "/scratch/project_2000203/dubartma/"+run+'/'+folder+'/bulk/'
path_save = '/scratch/project_2000203/dubartma/data/'+run+'/'+folder+'/variables/'

try:
os.mkdir('/scratch/project_2000203/dubartma/data/'+run+'/'+folder)
except:
pass
try:
os.mkdir(path_save)
except:
pass

betaPara = np.empty(len(timetot))
Taniso = np.empty(len(timetot))
Tpara = np.empty(len(timetot))
Tperp = np.empty(len(timetot))
n = np.empty(len(timetot))

for j in timetot:

try:

bulks = np.arange(j, j + interval + 1, 1)
print(bulks)

BetaParaTmp = np.empty(len(bulks))
TanisoTmp = np.empty(len(bulks))
TparaTmp = np.empty(len(bulks))
TperpTmp = np.empty(len(bulks))
nTmp = np.empty(len(bulks))

for i in range(0,len(bulks)):
bulkname = 'bulk.'+str(bulks[i]).rjust(7,'0')+'.vlsv'
f = pt.vlsvfile.VlsvReader(path_bulk+bulkname)

BetaParaTmp[i] = np.mean(f.read_variable('vg_beta_parallel'))
TanisoTmp[i] = np.mean(f.read_variable('vg_t_anisotropy'))
TparaTmp[i] = np.mean(f.read_variable('vg_t_parallel'))
TperpTmp[i] = np.mean(f.read_variable('vg_t_perpendicular'))
nTmp[i] = np.mean(f.read_variable('vg_rho'))

betaPara[j] = np.mean(BetaParaTmp)
Taniso[j] = np.mean(TanisoTmp)
Tpara[j] = np.mean(TparaTmp)
Tperp[j] = np.mean(TperpTmp)
n[j] = np.mean(nTmp)

except Exception as e:
print(e)
continue

np.save(path_save+'BetaPara_'+run+'_'+folder+'_'+str(interval)+'.npy',betaPara)
np.save(path_save+'Taniso_' +run+'_'+folder+'_'+str(interval)+'.npy',Taniso)
np.save(path_save+'Tpara_' +run+'_'+folder+'_'+str(interval)+'.npy',Tpara)
np.save(path_save+'Tperp_' +run+'_'+folder+'_'+str(interval)+'.npy',Tperp)
np.save(path_save+'n_' +run+'_'+folder+'_'+str(interval)+'.npy',n)

print(path_save+'BetaPara_'+run+'_'+folder+'_'+str(interval)+'.npy')
print(path_save+'Taniso_' +run+'_'+folder+'_'+str(interval)+'.npy')
print(path_save+'Tpara_' +run+'_'+folder+'_'+str(interval)+'.npy')
print(path_save+'Tperp_' +run+'_'+folder+'_'+str(interval)+'.npy')
print(path_save+'n_' +run+'_'+folder+'_'+str(interval)+'.npy')
120 changes: 120 additions & 0 deletions projects/DiffusionScripts/plotTimeDep.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
import numpy as np
import sys,os
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter, FormatStrFormatter
import matplotlib
import matplotlib.colors as colors
import scipy
import scipy.interpolate
import math
from scipy.interpolate import RegularGridInterpolator
import traceback

# Arguments
run = sys.argv[1]
bulkStart = int(sys.argv[2])
bulkEnd = int(sys.argv[3])
interval = int(sys.argv[4])

rerun = sys.argv[5]

# Paths
pathFig = '/users/dubartma/analysis/fig/3D/'
pathLoad = '/users/dubartma/analysis/data/'

timetot = range(bulkStart, bulkEnd-2*interval+1,1)

betaPara = np.linspace(np.log10(0.01),np.log10(100),20)
Taniso = np.linspace(np.log10(1.0),np.log10(10.0),20)

nu0Array = np.empty(len(betaPara)*len(Taniso)*len(timetot))
betaArray = np.empty(len(betaPara)*len(Taniso)*len(timetot))
TanisoArray = np.empty(len(betaPara)*len(Taniso)*len(timetot))
timeArray = np.empty(len(betaPara)*len(Taniso)*len(timetot))

if rerun == 'True':

print('Rerun')

nu03D = np.zeros([len(Taniso),len(betaPara),len(timetot)])

for i in range(0,len(Taniso)):
for j in range(0,len(betaPara)):

newname = "300_T"+str(round(10**Taniso[i],3))+"_Beta"+str(round(10**betaPara[j],3))
pathData = '/scratch/project_2000203/dubartma/data/'+run+'/'+newname+'/'

try:

nu03D[j,i,:] = np.load(pathData+'Dmumu/nu0Mean_'+run+'_'+newname+'_'+str(interval)+'.npy')
#nu03D[j,i,:] = np.load(pathData+'Dmumu/nu0Analytic_'+run+'_'+newname+'_'+str(interval)+'.npy')

except Exception as e:
print(e)
continue

np.save(pathLoad+'nu03D.npy',nu03D)

else:
print('Loading data')
nu03D = np.load(pathLoad+'nu03D.npy')

TanisoAIC = 1 + 0.43/(10**betaPara + 0.0004)**(0.42)
TanisoMM = 1 + 0.77/(10**betaPara + 0.016)**(0.76)

for t in timetot:

BetaPara_curve = np.arange(0.01,100.01,0.01)
Taniso_AIC = 1 + 0.43/(BetaPara_curve + 0.0004)**(0.42) # PCI marginal threshold condition, Hellinger et al. 2006, Yoon et al. 2012
Taniso_MM = 1 + 0.77/(BetaPara_curve + 0.016)**(0.76) # MM marginal threshold condition, Hellinger et al. 2006, Yoon et al. 2012

# ----------- nu03D --------------------------------

nu03D[:,0,:] = 0.0

nu0plot = nu03D
nu0min = 5e-3

for i in range(0,len(Taniso)):
for j in range(0,len(betaPara)):

if nu03D[j,i,t] < nu0min: nu0plot[j,i,t] = nu0min

nu0Array [t*len(betaPara)*len(Taniso)+i*len(betaPara)+j] = nu03D[j,i,t]
TanisoArray[t*len(betaPara)*len(Taniso)+i*len(betaPara)+j] = 10**Taniso[i]
betaArray [t*len(betaPara)*len(Taniso)+i*len(betaPara)+j] = 10**betaPara[j]
timeArray [t*len(betaPara)*len(Taniso)+i*len(betaPara)+j] = (t+1)*0.1

plt.pcolormesh(10**betaPara,10**Taniso,nu0plot[:,:,t].T,shading='nearest',norm=colors.LogNorm(vmin=nu0min,vmax=0.5),cmap='plasma')
#plt.pcolormesh(10**betaPara,10**Taniso,nu03D[:,:,t].T,shading='nearest',vmin=5.0e-2,vmax=0.5,cmap='plasma')
#plt.scatter(betaArray[t*len(betaPara)*len(Taniso):(t+1)*len(betaPara)*len(Taniso)],TanisoArray[t*len(betaPara)*len(Taniso):(t+1)*len(betaPara)*len(Taniso)],c=nu0Array[t*len(betaPara)*len(Taniso):(t+1)*len(betaPara)*len(Taniso)],vmin=0.05,vmax=0.5,cmap='plasma')

plt.loglog(BetaPara_curve,Taniso_AIC,linewidth=2,color='white',label='PCI')
plt.loglog(BetaPara_curve,Taniso_MM ,linewidth=2,color='white',linestyle='--',label='Mirror')

plt.xlim(BetaPara_curve[0],BetaPara_curve[-1])
plt.ylim(1.0,10.0)

plt.xlabel('$\\beta_{\\parallel,0}$' ,fontsize=15)
plt.ylabel('${T_\\bot / T_\\parallel}_0$',fontsize=15)

ax = plt.gca()
ax.xaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax.yaxis.set_minor_formatter(FormatStrFormatter('%.0f'))

plt.legend(loc='lower left')

plt.colorbar()
plt.text(200,11,'$\\nu_0$',fontsize=20,weight='bold')

plt.suptitle('t = '+str(round((t+1.5)*0.1,1))+' $f_\\mathrm{cp}^{-1}$',fontsize=15)
plt.savefig(pathFig+'Dmumu3D_'+str(t).rjust(7,'0')+'.png',dpi=300)
print (pathFig+'Dmumu3D_'+str(t).rjust(7,'0')+'.png')
plt.close()

header = 't*fcp betaPara Taniso nu0'
data = np.column_stack([timeArray,betaArray,TanisoArray,nu0Array])
np.savetxt(pathLoad+'nu03Ddt.dat',data,header=header)
print(pathLoad+'nu03Ddt.dat')

Loading