From 5f067df81814c567ffad1923646b832d04330128 Mon Sep 17 00:00:00 2001 From: Chang-Goo Kim Date: Wed, 18 Sep 2024 10:53:57 -0400 Subject: [PATCH 1/5] Updated pre-commit hook and ruff versions --- .github/workflows/linting.yml | 2 +- .pre-commit-config.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index 73d081f..54da43a 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -6,5 +6,5 @@ jobs: lint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: chartboost/ruff-action@v1 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index dbe66fb..09b11e9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -6,7 +6,7 @@ repos: - id: end-of-file-fixer - id: trailing-whitespace - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.3 + rev: v0.6.5 hooks: - id: ruff types_or: [python, pyi, jupyter] From 20f01cc4f6196b36847e4050bfa9a726bc051dfc Mon Sep 17 00:00:00 2001 From: Chang-Goo Kim Date: Wed, 18 Sep 2024 10:56:46 -0400 Subject: [PATCH 2/5] ruff linting and formatting --- docs/extract_data_subset.ipynb | 222 +++++++++++++++------------ docs/read_data_2-chem-CO_lines.ipynb | 39 +++-- docs/read_data_3-MHD_PI.ipynb | 25 +-- miscellaneous/copy_data.ipynb | 188 +++++++++++++++++------ 4 files changed, 301 insertions(+), 173 deletions(-) diff --git a/docs/extract_data_subset.ipynb b/docs/extract_data_subset.ipynb index 9e418c6..632fe09 100644 --- a/docs/extract_data_subset.ipynb +++ b/docs/extract_data_subset.ipynb @@ -15,10 +15,7 @@ "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", - "from matplotlib.colors import LogNorm\n", - "from matplotlib import gridspec\n", - "import numpy as np\n", - "import yt #https://yt-project.org/" + "import numpy as np" ] }, { @@ -38,7 +35,8 @@ "# add path to astro_tigress module\n", "# this can also be done using PYTHONPATH environment variable\n", "import sys\n", - "sys.path.insert(0,'../')\n", + "\n", + "sys.path.insert(0, \"../\")\n", "\n", "import astro_tigress" ] @@ -52,7 +50,7 @@ "# Need to set the master directory where the data is stored\n", "dir_master = \"../data/\"\n", "# name of the simulation model\n", - "model_id = \"R8_2pc\" " + "model_id = \"R8_2pc\"" ] }, { @@ -70,10 +68,14 @@ } ], "source": [ - "model = astro_tigress.Model(model_id,dir_master) #reading the model information\n", + "model = astro_tigress.Model(model_id, dir_master) # reading the model information\n", "print(\"Snapshots and contained data sets:\")\n", "for ivtk in model.ivtks:\n", - " print(\"ivtk={:d}, t={:.1f} Myr, datasets={}\".format(ivtk, ivtk*model.dt_Myr, model.data_sets[ivtk]))" + " print(\n", + " \"ivtk={:d}, t={:.1f} Myr, datasets={}\".format(\n", + " ivtk, ivtk * model.dt_Myr, model.data_sets[ivtk]\n", + " )\n", + " )" ] }, { @@ -116,7 +118,7 @@ "metadata": {}, "outputs": [], "source": [ - "#load the chemistry data set for the snapshot ivtk=300\n", + "# load the chemistry data set for the snapshot ivtk=300\n", "model.load(290, dataset=\"chem\")" ] }, @@ -164,14 +166,18 @@ "source": [ "# hydro quantities\n", "\n", - "Temp_chem = model.chem.grid[('gas','temperature_chem')].in_units(\"K\").value.T # this is temperature from chemistry post-processing\n", - "Temp = model.chem.grid[('gas','temperature')].in_units(\"K\").value.T # this is temperature from hydro simulations\n", - "nH = model.chem.grid[('gas','nH')].in_units(\"cm**-3\").value.T\n", - "vx = model.chem.grid[('gas','velocity_x')].in_units(\"km/s\").value.T\n", - "vy = model.chem.grid[('gas','velocity_y')].in_units(\"km/s\").value.T\n", - "vz = model.chem.grid[('gas','velocity_z')].in_units(\"km/s\").value.T\n", + "Temp_chem = (\n", + " model.chem.grid[(\"gas\", \"temperature_chem\")].in_units(\"K\").value.T\n", + ") # this is temperature from chemistry post-processing\n", + "Temp = (\n", + " model.chem.grid[(\"gas\", \"temperature\")].in_units(\"K\").value.T\n", + ") # this is temperature from hydro simulations\n", + "nH = model.chem.grid[(\"gas\", \"nH\")].in_units(\"cm**-3\").value.T\n", + "vx = model.chem.grid[(\"gas\", \"velocity_x\")].in_units(\"km/s\").value.T\n", + "vy = model.chem.grid[(\"gas\", \"velocity_y\")].in_units(\"km/s\").value.T\n", + "vz = model.chem.grid[(\"gas\", \"velocity_z\")].in_units(\"km/s\").value.T\n", "xH2 = model.chem.grid[\"H2\"].value.T\n", - "nH2 = xH2*nH" + "nH2 = xH2 * nH" ] }, { @@ -181,9 +187,9 @@ "outputs": [], "source": [ "# coordinates\n", - "xcc = model.chem.grid[(\"gas\",\"x\")][:,0,0].to('pc').value\n", - "ycc = model.chem.grid[(\"gas\",\"y\")][0,:,0].to('pc').value\n", - "zcc = model.chem.grid[(\"gas\",\"z\")][0,0,:].to('pc').value" + "xcc = model.chem.grid[(\"gas\", \"x\")][:, 0, 0].to(\"pc\").value\n", + "ycc = model.chem.grid[(\"gas\", \"y\")][0, :, 0].to(\"pc\").value\n", + "zcc = model.chem.grid[(\"gas\", \"z\")][0, 0, :].to(\"pc\").value" ] }, { @@ -192,7 +198,7 @@ "metadata": {}, "outputs": [], "source": [ - "plt.rcParams['figure.dpi']=200" + "plt.rcParams[\"figure.dpi\"] = 200" ] }, { @@ -202,7 +208,7 @@ "outputs": [], "source": [ "# |z| < 300pc region\n", - "zidx = np.abs(zcc)<300" + "zidx = np.abs(zcc) < 300" ] }, { @@ -230,23 +236,27 @@ } ], "source": [ - "fig,axes = plt.subplots(1,2,figsize=(12,5))\n", - "for ax, T in zip(axes,[Temp_chem,Temp]):\n", + "fig, axes = plt.subplots(1, 2, figsize=(12, 5))\n", + "for ax, T in zip(axes, [Temp_chem, Temp]):\n", " plt.sca(ax)\n", - " pok = T*nH\n", - " h_xH2,b1,b2=np.histogram2d((nH[zidx,:,:].flatten()),\n", - " (T[zidx,:,:].flatten()),\n", - " bins=[np.logspace(-1,4,50),np.logspace(1,5,50)],\n", - " weights=xH2[zidx,:,:].flatten())\n", - " h_vol,b1,b2=np.histogram2d((nH[zidx,:,:].flatten()),\n", - " (T[zidx,:,:].flatten()),\n", - " bins=[np.logspace(-1,4,50),np.logspace(1,5,50)],)\n", - " plt.pcolormesh(b1,b2,(h_xH2/h_vol).T,vmin=0,vmax=0.5)\n", - " plt.colorbar(label=r'$x_{{\\rm H}_2}$')\n", - " plt.xscale('log')\n", - " plt.yscale('log')\n", - " plt.xlabel(r'$n_H\\,[{\\rm cm^{-3}}]$')\n", - " plt.ylabel(r'$T\\,[{\\rm K}]$')\n", + " pok = T * nH\n", + " h_xH2, b1, b2 = np.histogram2d(\n", + " (nH[zidx, :, :].flatten()),\n", + " (T[zidx, :, :].flatten()),\n", + " bins=[np.logspace(-1, 4, 50), np.logspace(1, 5, 50)],\n", + " weights=xH2[zidx, :, :].flatten(),\n", + " )\n", + " h_vol, b1, b2 = np.histogram2d(\n", + " (nH[zidx, :, :].flatten()),\n", + " (T[zidx, :, :].flatten()),\n", + " bins=[np.logspace(-1, 4, 50), np.logspace(1, 5, 50)],\n", + " )\n", + " plt.pcolormesh(b1, b2, (h_xH2 / h_vol).T, vmin=0, vmax=0.5)\n", + " plt.colorbar(label=r\"$x_{{\\rm H}_2}$\")\n", + " plt.xscale(\"log\")\n", + " plt.yscale(\"log\")\n", + " plt.xlabel(r\"$n_H\\,[{\\rm cm^{-3}}]$\")\n", + " plt.ylabel(r\"$T\\,[{\\rm K}]$\")\n", "plt.tight_layout()" ] }, @@ -286,10 +296,10 @@ ], "source": [ "# mean H2 fraction as a function of density\n", - "plt.step(b1[:-1],(h_xH2.sum(axis=1)/h_vol.sum(axis=1)))\n", - "plt.xscale('log')\n", - "plt.ylabel(r'$x_{{\\rm H}_2}$')\n", - "plt.xlabel(r'$n_H\\,[{\\rm cm^{-3}}]$')" + "plt.step(b1[:-1], (h_xH2.sum(axis=1) / h_vol.sum(axis=1)))\n", + "plt.xscale(\"log\")\n", + "plt.ylabel(r\"$x_{{\\rm H}_2}$\")\n", + "plt.xlabel(r\"$n_H\\,[{\\rm cm^{-3}}]$\")" ] }, { @@ -315,7 +325,7 @@ "metadata": {}, "outputs": [], "source": [ - "dx = (xcc[1]-xcc[0])*ac.pc.cgs.value" + "dx = (xcc[1] - xcc[0]) * ac.pc.cgs.value" ] }, { @@ -324,9 +334,9 @@ "metadata": {}, "outputs": [], "source": [ - "NH2_y=nH2.sum(axis=1)*dx\n", - "NH2_x=nH2.sum(axis=2)*dx\n", - "NH2_z=nH2.sum(axis=0)*dx" + "NH2_y = nH2.sum(axis=1) * dx\n", + "NH2_x = nH2.sum(axis=2) * dx\n", + "NH2_z = nH2.sum(axis=0) * dx" ] }, { @@ -346,11 +356,11 @@ } ], "source": [ - "fig,axes = plt.subplots(1,3,figsize=(12,5))\n", - "for ax, NH2 in zip(axes,[NH2_x,NH2_y,NH2_z]):\n", + "fig, axes = plt.subplots(1, 3, figsize=(12, 5))\n", + "for ax, NH2 in zip(axes, [NH2_x, NH2_y, NH2_z]):\n", " plt.sca(ax)\n", - " plt.pcolormesh(xcc,zcc,NH2,vmax=2.e21)\n", - " ax.set_aspect('equal')" + " plt.pcolormesh(xcc, zcc, NH2, vmax=2.0e21)\n", + " ax.set_aspect(\"equal\")" ] }, { @@ -367,17 +377,17 @@ "metadata": {}, "outputs": [], "source": [ - "Nz,Ny,Nx=nH2.shape\n", - "k0,j0,i0=np.unravel_index(nH2.argmax(),nH2.shape)\n", + "Nz, Ny, Nx = nH2.shape\n", + "k0, j0, i0 = np.unravel_index(nH2.argmax(), nH2.shape)\n", "di = 25\n", "dj = 25\n", "dk = 25\n", - "il = max(i0-di,0)\n", - "jl = max(j0-dj,0)\n", - "kl = max(k0-dk,0)\n", - "iu = min(i0+di,Nx)\n", - "ju = min(j0+dj,Ny)\n", - "ku = min(k0+dk,Nz)" + "il = max(i0 - di, 0)\n", + "jl = max(j0 - dj, 0)\n", + "kl = max(k0 - dk, 0)\n", + "iu = min(i0 + di, Nx)\n", + "ju = min(j0 + dj, Ny)\n", + "ku = min(k0 + dk, Nz)" ] }, { @@ -403,12 +413,14 @@ "metadata": {}, "outputs": [], "source": [ - "for label,data in zip(['nH','nH2','vx','vy','vz','Temp'],[nH,nH2,vx,vy,vz,Temp]):\n", - " subcube[label]=data[kl:ku,jl:ju,il:iu]\n", + "for label, data in zip(\n", + " [\"nH\", \"nH2\", \"vx\", \"vy\", \"vz\", \"Temp\"], [nH, nH2, vx, vy, vz, Temp]\n", + "):\n", + " subcube[label] = data[kl:ku, jl:ju, il:iu]\n", "\n", - "subcube['x']=xcc[il:iu]\n", - "subcube['y']=ycc[jl:ju]\n", - "subcube['z']=zcc[kl:ku]" + "subcube[\"x\"] = xcc[il:iu]\n", + "subcube[\"y\"] = ycc[jl:ju]\n", + "subcube[\"z\"] = zcc[kl:ku]" ] }, { @@ -418,11 +430,13 @@ "outputs": [], "source": [ "# caculate velocity of the density peak\n", - "vpeak=[vx[k0,j0,i0],vy[k0,j0,i0],vz[k0,j0,i0]]\n", + "vpeak = [vx[k0, j0, i0], vy[k0, j0, i0], vz[k0, j0, i0]]\n", "# or caculate mass weighted velocity within the subregion\n", - "vpeak2 = [(subcube['nH2']*subcube['vx']).sum()/subcube['nH2'].sum(),\n", - " (subcube['nH2']*subcube['vy']).sum()/subcube['nH2'].sum(),\n", - " (subcube['nH2']*subcube['vz']).sum()/subcube['nH2'].sum()]\n" + "vpeak2 = [\n", + " (subcube[\"nH2\"] * subcube[\"vx\"]).sum() / subcube[\"nH2\"].sum(),\n", + " (subcube[\"nH2\"] * subcube[\"vy\"]).sum() / subcube[\"nH2\"].sum(),\n", + " (subcube[\"nH2\"] * subcube[\"vz\"]).sum() / subcube[\"nH2\"].sum(),\n", + "]" ] }, { @@ -439,7 +453,7 @@ } ], "source": [ - "print(vpeak,vpeak2)" + "print(vpeak, vpeak2)" ] }, { @@ -449,8 +463,8 @@ "outputs": [], "source": [ "# define perturbed velocity with respect to the desnity peak\n", - "for v,v0 in zip(['vx','vy','vz'],vpeak):\n", - " subcube['d{}'.format(v)] = subcube[v]-v0" + "for v, v0 in zip([\"vx\", \"vy\", \"vz\"], vpeak):\n", + " subcube[\"d{}\".format(v)] = subcube[v] - v0" ] }, { @@ -467,13 +481,15 @@ "metadata": {}, "outputs": [], "source": [ - "def add_header_for_glue(hdu,hdr,axis='xyz'):\n", - " for i,ax in enumerate(axis):\n", - " hdu.header['CDELT{}'.format(i+1)]=hdr['d{}'.format(ax)]\n", - " hdu.header['CTYPE{}'.format(i+1)]=ax\n", - " hdu.header['CUNIT{}'.format(i+1)]=hdr.comments['d{}'.format(ax)]\n", - " hdu.header['CRVAL{}'.format(i+1)]=hdr['{}min'.format(ax)]\n", - " hdu.header['CRPIX{}'.format(i+1)]=hdr['{}max'.format(ax)]+hdr['{}min'.format(ax)]\n", + "def add_header_for_glue(hdu, hdr, axis=\"xyz\"):\n", + " for i, ax in enumerate(axis):\n", + " hdu.header[\"CDELT{}\".format(i + 1)] = hdr[\"d{}\".format(ax)]\n", + " hdu.header[\"CTYPE{}\".format(i + 1)] = ax\n", + " hdu.header[\"CUNIT{}\".format(i + 1)] = hdr.comments[\"d{}\".format(ax)]\n", + " hdu.header[\"CRVAL{}\".format(i + 1)] = hdr[\"{}min\".format(ax)]\n", + " hdu.header[\"CRPIX{}\".format(i + 1)] = (\n", + " hdr[\"{}max\".format(ax)] + hdr[\"{}min\".format(ax)]\n", + " )\n", " return" ] }, @@ -494,17 +510,17 @@ "source": [ "hdul = fits.HDUList()\n", "hdr = fits.Header()\n", - "tMyr=model.chem.ytds.current_time.to('Myr').value\n", - "hdr['time']=float(tMyr)\n", - "hdr['xmin']=(subcube['x'][0],'pc')\n", - "hdr['xmax']=(subcube['x'][1],'pc')\n", - "hdr['ymin']=(subcube['y'][0],'pc')\n", - "hdr['ymax']=(subcube['y'][1],'pc')\n", - "hdr['zmin']=(subcube['z'][0],'pc')\n", - "hdr['zmax']=(subcube['z'][1],'pc')\n", - "hdr['dx']=(dx,'pc')\n", - "hdr['dy']=(dx,'pc')\n", - "hdr['dz']=(dx,'pc')\n", + "tMyr = model.chem.ytds.current_time.to(\"Myr\").value\n", + "hdr[\"time\"] = float(tMyr)\n", + "hdr[\"xmin\"] = (subcube[\"x\"][0], \"pc\")\n", + "hdr[\"xmax\"] = (subcube[\"x\"][1], \"pc\")\n", + "hdr[\"ymin\"] = (subcube[\"y\"][0], \"pc\")\n", + "hdr[\"ymax\"] = (subcube[\"y\"][1], \"pc\")\n", + "hdr[\"zmin\"] = (subcube[\"z\"][0], \"pc\")\n", + "hdr[\"zmax\"] = (subcube[\"z\"][1], \"pc\")\n", + "hdr[\"dx\"] = (dx, \"pc\")\n", + "hdr[\"dy\"] = (dx, \"pc\")\n", + "hdr[\"dz\"] = (dx, \"pc\")\n", "hdu = fits.PrimaryHDU(header=hdr)" ] }, @@ -515,8 +531,8 @@ "outputs": [], "source": [ "hdul.append(hdu)\n", - "for label in ['nH','nH2','vx','vy','vz','dvx','dvy','dvz','Temp']:\n", - " hdul.append(fits.ImageHDU(name=label,data=subcube[label])) " + "for label in [\"nH\", \"nH2\", \"vx\", \"vy\", \"vz\", \"dvx\", \"dvy\", \"dvz\", \"Temp\"]:\n", + " hdul.append(fits.ImageHDU(name=label, data=subcube[label]))" ] }, { @@ -525,9 +541,9 @@ "metadata": {}, "outputs": [], "source": [ - "hdr=hdu.header\n", + "hdr = hdu.header\n", "for hdu in hdul:\n", - " add_header_for_glue(hdu,hdr,axis='xyz')" + " add_header_for_glue(hdu, hdr, axis=\"xyz\")" ] }, { @@ -536,7 +552,7 @@ "metadata": {}, "outputs": [], "source": [ - "fitsname='subcube.fits'" + "fitsname = \"subcube.fits\"" ] }, { @@ -545,7 +561,7 @@ "metadata": {}, "outputs": [], "source": [ - "hdul.writeto(fitsname,overwrite=True)" + "hdul.writeto(fitsname, overwrite=True)" ] }, { @@ -562,8 +578,8 @@ "metadata": {}, "outputs": [], "source": [ - "x,y,z = np.meshgrid(subcube['x'],subcube['y'],subcube['z'],indexing='ij')\n", - "u,v,w = subcube['dvx'],subcube['dvy'],subcube['dvz']" + "x, y, z = np.meshgrid(subcube[\"x\"], subcube[\"y\"], subcube[\"z\"], indexing=\"ij\")\n", + "u, v, w = subcube[\"dvx\"], subcube[\"dvy\"], subcube[\"dvz\"]" ] }, { @@ -572,7 +588,7 @@ "metadata": {}, "outputs": [], "source": [ - "idx = subcube['nH2']>1" + "idx = subcube[\"nH2\"] > 1" ] }, { @@ -602,7 +618,7 @@ } ], "source": [ - "ax = plt.figure().add_subplot(projection='3d')\n", + "ax = plt.figure().add_subplot(projection=\"3d\")\n", "ax.quiver(x[idx], y[idx], z[idx], u[idx], v[idx], w[idx], length=0.1, normalize=True)" ] }, @@ -630,9 +646,13 @@ "outputs": [], "source": [ "dset = xr.Dataset()\n", - "for label in ['nH','nH2','dvx','dvy','dvz','vx','vy','vz','Temp']:\n", - " da = xr.DataArray(subcube[label],coords=[subcube['z'],subcube['y'],subcube['x']],dims=['z','y','x'])\n", - " dset[label]=da" + "for label in [\"nH\", \"nH2\", \"dvx\", \"dvy\", \"dvz\", \"vx\", \"vy\", \"vz\", \"Temp\"]:\n", + " da = xr.DataArray(\n", + " subcube[label],\n", + " coords=[subcube[\"z\"], subcube[\"y\"], subcube[\"x\"]],\n", + " dims=[\"z\", \"y\", \"x\"],\n", + " )\n", + " dset[label] = da" ] }, { @@ -641,7 +661,7 @@ "metadata": {}, "outputs": [], "source": [ - "dset.to_netcdf('subcube.nc')" + "dset.to_netcdf(\"subcube.nc\")" ] }, { diff --git a/docs/read_data_2-chem-CO_lines.ipynb b/docs/read_data_2-chem-CO_lines.ipynb index ad16015..5dab257 100644 --- a/docs/read_data_2-chem-CO_lines.ipynb +++ b/docs/read_data_2-chem-CO_lines.ipynb @@ -39,8 +39,7 @@ "import matplotlib.pyplot as plt\n", "from matplotlib.colors import LogNorm\n", "from matplotlib import gridspec\n", - "import numpy as np\n", - "import yt #https://yt-project.org/" + "import numpy as np" ] }, { @@ -60,7 +59,7 @@ "# Need to set the master directory where the data is stored\n", "dir_master = \"../data/\"\n", "# name of the simulation model\n", - "model_id = \"R8_2pc\" " + "model_id = \"R8_2pc\"" ] }, { @@ -89,13 +88,19 @@ "# add path to astro_tigress module\n", "# this can also be done using PYTHONPATH environment variable\n", "import sys\n", - "sys.path.insert(0,'../')\n", + "\n", + "sys.path.insert(0, \"../\")\n", "\n", "import astro_tigress\n", - "model = astro_tigress.Model(model_id,dir_master) #reading the model information\n", + "\n", + "model = astro_tigress.Model(model_id, dir_master) # reading the model information\n", "print(\"Snapshots and contained data sets:\")\n", "for ivtk in model.ivtks:\n", - " print(\"ivtk={:d}, t={:.1f} Myr, datasets={}\".format(ivtk, ivtk*model.dt_Myr, model.data_sets[ivtk]))" + " print(\n", + " \"ivtk={:d}, t={:.1f} Myr, datasets={}\".format(\n", + " ivtk, ivtk * model.dt_Myr, model.data_sets[ivtk]\n", + " )\n", + " )" ] }, { @@ -138,7 +143,7 @@ "metadata": {}, "outputs": [], "source": [ - "#load the chemistry data set for the snapshot ivtk=300\n", + "# load the chemistry data set for the snapshot ivtk=300\n", "model.load(290, dataset=\"chem\")" ] }, @@ -156,7 +161,7 @@ "metadata": {}, "outputs": [], "source": [ - "#model.chem.ytds.field_info" + "# model.chem.ytds.field_info" ] }, { @@ -256,7 +261,7 @@ "source": [ "fig = plt.figure()\n", "ax = fig.add_subplot(111)\n", - "indx = nH2_np.reshape(-1) > 1.\n", + "indx = nH2_np.reshape(-1) > 1.0\n", "ax.hist(np.log10(nH2_np.reshape(-1)[indx]), bins=50)\n", "ax.set_yscale(\"log\")\n", "ax.set_xlabel(r\"$\\log_{10}[n_{H_2} / cm^3]$\");" @@ -300,11 +305,11 @@ "source": [ "fig = plt.figure()\n", "ax = fig.add_subplot(111)\n", - "keys_NH2 = {\"origin\":\"lower\", \"cmap\":\"pink_r\", \"norm\": LogNorm(vmin=1e19, vmax=2e22)}\n", + "keys_NH2 = {\"origin\": \"lower\", \"cmap\": \"pink_r\", \"norm\": LogNorm(vmin=1e19, vmax=2e22)}\n", "left_edge_kpc = model.chem.grid.left_edge.in_units(\"kpc\").value\n", "right_edge_kpc = model.chem.grid.right_edge.in_units(\"kpc\").value\n", "extent = [left_edge_kpc[0], right_edge_kpc[0], left_edge_kpc[1], right_edge_kpc[1]]\n", - "cax = ax.imshow(np.swapaxes(NH2, 0, 1), extent=extent, **keys_NH2)\n", + "cax = ax.imshow(np.swapaxes(NH2, 0, 1), extent=extent, **keys_NH2)\n", "cbar = fig.colorbar(cax)\n", "ax.set_xlabel(\"x (kpc)\")\n", "ax.set_ylabel(\"y (kpc)\")\n", @@ -329,9 +334,9 @@ }, "outputs": [], "source": [ - "#load the CO line dataset for the snapshot ivtk=290\n", - "model.load(290, dataset=\"CO_lines\", iline=1) #CO(J=1-0)\n", - "model.load(290, dataset=\"CO_lines\", iline=2) #CO(J=2-1)" + "# load the CO line dataset for the snapshot ivtk=290\n", + "model.load(290, dataset=\"CO_lines\", iline=1) # CO(J=1-0)\n", + "model.load(290, dataset=\"CO_lines\", iline=2) # CO(J=2-1)" ] }, { @@ -352,7 +357,7 @@ }, "outputs": [], "source": [ - "#setting the range of x and y axes\n", + "# setting the range of x and y axes\n", "model.load(290, dataset=\"MHD\")\n", "left_edge_kpc = model.MHD.grid.left_edge.in_units(\"kpc\").value\n", "right_edge_kpc = model.MHD.grid.right_edge.in_units(\"kpc\").value\n", @@ -381,7 +386,7 @@ "ax1 = fig.add_subplot(gs[0, 0], aspect=\"equal\")\n", "ax2 = fig.add_subplot(gs[0, 1], aspect=\"equal\")\n", "ax_cbar = fig.add_subplot(gs[0, 2])\n", - "keys_WCO = {\"origin\":\"lower\", \"cmap\":\"afmhot_r\", \"norm\": LogNorm(vmin=0.1, vmax=500)}\n", + "keys_WCO = {\"origin\": \"lower\", \"cmap\": \"afmhot_r\", \"norm\": LogNorm(vmin=0.1, vmax=500)}\n", "cax = ax1.imshow(np.swapaxes(model.CO_lines[1].WCO, 0, 1), extent=extent, **keys_WCO)\n", "ax2.imshow(np.swapaxes(model.CO_lines[2].WCO, 0, 1), extent=extent, **keys_WCO)\n", "ax1.set_title(\"CO (J=1-0)\")\n", @@ -410,7 +415,7 @@ "metadata": {}, "outputs": [], "source": [ - "#model.CO_lines[1].img.write_fits(fn_fits=\"CO1-0.fits\")" + "# model.CO_lines[1].img.write_fits(fn_fits=\"CO1-0.fits\")" ] }, { diff --git a/docs/read_data_3-MHD_PI.ipynb b/docs/read_data_3-MHD_PI.ipynb index 6892632..a010aec 100644 --- a/docs/read_data_3-MHD_PI.ipynb +++ b/docs/read_data_3-MHD_PI.ipynb @@ -36,10 +36,7 @@ "metadata": {}, "outputs": [], "source": [ - "import matplotlib.pyplot as plt\n", - "import numpy as np\n", - "from scipy.interpolate import interp1d\n", - "import yt #https://yt-project.org/" + "import yt # https://yt-project.org/" ] }, { @@ -59,7 +56,7 @@ "# Need to set the master directory where the data is stored\n", "dir_master = \"../data/\"\n", "# name of the simulation model\n", - "model_id = \"R8_4pc\" " + "model_id = \"R8_4pc\"" ] }, { @@ -92,13 +89,19 @@ "# add path to astro_tigress module\n", "# this can also be done using PYTHONPATH environment variable\n", "import sys\n", - "sys.path.insert(0,'../')\n", + "\n", + "sys.path.insert(0, \"../\")\n", "\n", "import astro_tigress\n", - "model = astro_tigress.Model(model_id,dir_master) #reading the model information\n", + "\n", + "model = astro_tigress.Model(model_id, dir_master) # reading the model information\n", "print(\"Snapshots and contained data sets:\")\n", "for ivtk in model.ivtks:\n", - " print(\"ivtk={:d}, t={:.1f} Myr, datasets={}\".format(ivtk, ivtk*model.dt_Myr, model.data_sets[ivtk]))" + " print(\n", + " \"ivtk={:d}, t={:.1f} Myr, datasets={}\".format(\n", + " ivtk, ivtk * model.dt_Myr, model.data_sets[ivtk]\n", + " )\n", + " )" ] }, { @@ -123,7 +126,7 @@ "metadata": {}, "outputs": [], "source": [ - "#load the UV data set for the snapshot ivtk=300\n", + "# load the UV data set for the snapshot ivtk=300\n", "model.load(300, dataset=\"MHD_PI\")" ] }, @@ -246,7 +249,7 @@ } ], "source": [ - "yt.SlicePlot(model.MHD_PI.ytds, \"z\", fields=[\"nHI\",\"ne\"])" + "yt.SlicePlot(model.MHD_PI.ytds, \"z\", fields=[\"nHI\", \"ne\"])" ] }, { @@ -269,7 +272,7 @@ } ], "source": [ - "yt.SlicePlot(model.MHD_PI.ytds, \"x\", fields=[\"nHI\",\"ne\"])" + "yt.SlicePlot(model.MHD_PI.ytds, \"x\", fields=[\"nHI\", \"ne\"])" ] }, { diff --git a/miscellaneous/copy_data.ipynb b/miscellaneous/copy_data.ipynb index b5466ff..8f50d6c 100644 --- a/miscellaneous/copy_data.ipynb +++ b/miscellaneous/copy_data.ipynb @@ -14,10 +14,10 @@ "outputs": [], "source": [ "import sys\n", - "sys.path.insert(0, \"../astro_tigress/\") #add path for import\n", + "\n", + "sys.path.insert(0, \"../astro_tigress/\") # add path for import\n", "import numpy as np\n", "from tigress_utils import copy_file\n", - "import read_vtk\n", "import delete_fields_vtk" ] }, @@ -27,42 +27,42 @@ "metadata": {}, "outputs": [], "source": [ - "#directories\n", - "#dir_new = \"/tigress/munan/public_html/astro-tigress/\"\n", + "# directories\n", + "# dir_new = \"/tigress/munan/public_html/astro-tigress/\"\n", "dir_new = \"/projects/EOSTRIKE/TIGRESS_data_release/\"\n", "dir_MHD_old = \"/projects/EOSTRIKE/TIGRESS_XCO/\"\n", "dir_chem_old = \"/tigress/munan/chemistry/XCO_surfd/\"\n", "dir_CO_old = \"/tigress/munan/chemistry/scripts/radmc3d/output/XCO_surfd/\"\n", "dir_rad_old = \"/tigress/changgoo/radps_postproc/\"\n", - "#R8_2pc_rst\n", + "# R8_2pc_rst\n", "ivtks_R8_2pc = np.arange(290, 400, 10)\n", "id_R8_2pc_old = \"R8_2pc_rst\"\n", "chemZ_R8_old = [\"Z1S\", \"Z0p5S\", \"Z2S\"]\n", - "chemZ_R8_new = [\"Z1\", \"Z05\", \"Z2\"]\n", - "CO_lines = [1, 2] #CO (1-0), CO (2-1)\n", - "#R8_4pc_newacc\n", + "chemZ_R8_new = [\"Z1\", \"Z05\", \"Z2\"]\n", + "CO_lines = [1, 2] # CO (1-0), CO (2-1)\n", + "# R8_4pc_newacc\n", "ivtks_R8_4pc = np.arange(200, 410, 10)\n", "id_R8_4pc_old = \"R8_4pc_newacc\"\n", - "#R2_2pc_L256_B2\n", + "# R2_2pc_L256_B2\n", "ivtks_R2_2pc = np.arange(200, 420, 20)\n", "id_R2_2pc_old = \"R2_2pc_L256_B2\"\n", "chemZ_R2_old = [\"Z1E0p1S\", \"Z0p5E0p1S\", \"Z2E0p1S\"]\n", - "chemZ_R2_new = [\"Z1\", \"Z05\", \"Z2\"]\n", - "#R4_2pc_L512_B10\n", + "chemZ_R2_new = [\"Z1\", \"Z05\", \"Z2\"]\n", + "# R4_2pc_L512_B10\n", "ivtks_R4_2pc = np.arange(100, 320, 20)\n", "id_R4_2pc_old = \"R4_2pc_L512_B10\"\n", "chemZ_R4_old = [\"Z1E0p1S\", \"Z0p5E0p1S\", \"Z2E0p1S\"]\n", - "chemZ_R4_new = [\"Z1\", \"Z05\", \"Z2\"]\n", - "#R2_2pc_L512_B2_FUVcorr\n", + "chemZ_R4_new = [\"Z1\", \"Z05\", \"Z2\"]\n", + "# R2_2pc_L512_B2_FUVcorr\n", "ivtks_R2B2_2pc = np.arange(200, 300, 20)\n", "id_R2B2_2pc_old = \"R2_2pc_L512_B2_FUVcorr\"\n", "chemZ_R2B2_old = [\"Z1E0p1S\", \"Z0p5E0p1S\", \"Z2E0p1S\"]\n", - "chemZ_R2B2_new = [\"Z1\", \"Z05\", \"Z2\"]\n", - "#R2_1pc_L256_B2\n", + "chemZ_R2B2_new = [\"Z1\", \"Z05\", \"Z2\"]\n", + "# R2_1pc_L256_B2\n", "ivtks_R2N2_2pc = np.arange(255, 275, 5)\n", "id_R2N2_2pc_old = \"R2_1pc_L256_B2\"\n", "chemZ_R2N2_old = [\"Z1E0p1S\", \"Z0p5E0p1S\", \"Z2E0p1S\"]\n", - "chemZ_R2N2_new = [\"Z1\", \"Z05\", \"Z2\"]" + "chemZ_R2N2_new = [\"Z1\", \"Z05\", \"Z2\"]" ] }, { @@ -87,11 +87,14 @@ " copy_file(fn_hst_old, fn_hst_new)\n", " for ivtk in ivtks:\n", " dir_MHD_new = \"{:s}{:s}/{:04d}/MHD/\".format(dir_new, id_new, ivtk)\n", - " fn_vtk_old = \"{:s}{:s}/vtk/{:s}.{:04d}.vtk\".format(dir_MHD_old, id_old, id_old, ivtk)\n", + " fn_vtk_old = \"{:s}{:s}/vtk/{:s}.{:04d}.vtk\".format(\n", + " dir_MHD_old, id_old, id_old, ivtk\n", + " )\n", " fn_vtk_new = \"{:s}{:s}.{:04d}.vtk\".format(dir_MHD_new, id_new, ivtk)\n", " copy_file(fn_vtk_old, fn_vtk_new)\n", " return\n", "\n", + "\n", "def copy_chemistry(ivtks, id_old, id_new, dir_chem_old, dir_new, chemZ_old, chemZ_new):\n", " \"\"\"Copy Athena++ post-processing chemistry HDF5 output to data storage.\n", " input:\n", @@ -108,17 +111,22 @@ " for ivtk in ivtks:\n", " dir_chem_new = \"{:s}{:s}/{:04d}/chem/\".format(dir_new, id_new, ivtk)\n", " fn_old = \"{:s}{:s}/{:s}/{:04d}/{:s}.out1.00001.athdf\".format(\n", - " dir_chem_old, id_old, Z_old, ivtk, id_old)\n", + " dir_chem_old, id_old, Z_old, ivtk, id_old\n", + " )\n", " fn_new = \"{:s}{:s}/{:s}-{:s}.{:04d}.athdf\".format(\n", - " dir_chem_new, Z_new, id_new, Z_new, ivtk)\n", + " dir_chem_new, Z_new, id_new, Z_new, ivtk\n", + " )\n", " copy_file(fn_old, fn_new)\n", " fn_xdmf_old = \"{:s}{:s}/{:s}/{:04d}/{:s}.out1.00001.athdf.xdmf\".format(\n", - " dir_chem_old, id_old, Z_old, ivtk, id_old)\n", + " dir_chem_old, id_old, Z_old, ivtk, id_old\n", + " )\n", " fn_xdmf_new = \"{:s}{:s}/{:s}-{:s}.{:04d}.athdf.xdmf\".format(\n", - " dir_chem_new, Z_new, id_new, Z_new, ivtk)\n", + " dir_chem_new, Z_new, id_new, Z_new, ivtk\n", + " )\n", " copy_file(fn_xdmf_old, fn_xdmf_new)\n", " return\n", "\n", + "\n", "def copy_CO(ivtks, id_old, id_new, dir_CO_old, dir_new, chemZ_old, chemZ_new, lines):\n", " \"\"\"Copy RADMC-3D post-processing CO lines PPV output to data storage.\n", " input:\n", @@ -138,14 +146,17 @@ " dir_CO_new = \"{:s}{:s}/{:04d}/CO_lines/\".format(dir_new, id_new, ivtk)\n", " for iline in lines:\n", " fn_old = \"{:s}{:s}/{:s}/il{:d}/{:04d}/image.bout\".format(\n", - " dir_CO_old, id_old, Z_old, iline, ivtk)\n", + " dir_CO_old, id_old, Z_old, iline, ivtk\n", + " )\n", " fn_new = \"{:s}{:s}/il{:d}/{:s}-{:s}.il{:d}.{:04d}.bout\".format(\n", - " dir_CO_new, Z_new, iline, id_new, Z_new, iline, ivtk)\n", + " dir_CO_new, Z_new, iline, id_new, Z_new, iline, ivtk\n", + " )\n", " copy_file(fn_old, fn_new)\n", " return\n", "\n", + "\n", "def copy_rad(ivtks, id_old, id_new, dir_rad_old, dir_new):\n", - " \"\"\"Copy Athena 4.2 MHD TIGRESS simulation output \n", + " \"\"\"Copy Athena 4.2 MHD TIGRESS simulation output\n", " with radiaiton post-processing to data storage.\n", " Delete radiation fields.\n", " input:\n", @@ -154,13 +165,17 @@ " id_new: string, new output id, e.g. R8_2pc\n", " dir_rad_old: string, old folder for data storage\n", " dir_new: string, new folder for data storage\"\"\"\n", - " fields_del = ['rad_energy_density0','rad_energy_density1']\n", + " fields_del = [\"rad_energy_density0\", \"rad_energy_density1\"]\n", " for ivtk in ivtks:\n", - " ivtk_old = int((ivtk-ivtks[0])/10 + 1)\n", + " ivtk_old = int((ivtk - ivtks[0]) / 10 + 1)\n", " dir_rad_new = \"{:s}{:s}/{:04d}/MHD_PI/\".format(dir_new, id_new, ivtk)\n", - " fn_vtk_old = \"{:s}{:s}/vtk/{:s}.{:04d}.vtk\".format(dir_rad_old, id_old, id_old, ivtk_old)\n", + " fn_vtk_old = \"{:s}{:s}/vtk/{:s}.{:04d}.vtk\".format(\n", + " dir_rad_old, id_old, id_old, ivtk_old\n", + " )\n", " fn_vtk_new = \"{:s}{:s}.{:04d}.vtk\".format(dir_rad_new, id_new, ivtk)\n", - " delete_fields_vtk.delete_fields_vtk(fn_vtk_old, fn_vtk_new, fields_del, verbose=False)\n", + " delete_fields_vtk.delete_fields_vtk(\n", + " fn_vtk_old, fn_vtk_new, fields_del, verbose=False\n", + " )\n", " return" ] }, @@ -170,10 +185,27 @@ "metadata": {}, "outputs": [], "source": [ - "#R8_2pc_rst\n", + "# R8_2pc_rst\n", "copy_MHD(ivtks_R8_2pc, id_R8_2pc_old, \"R8_2pc\", dir_MHD_old, dir_new)\n", - "copy_chemistry(ivtks_R8_2pc, id_R8_2pc_old, \"R8_2pc\", dir_chem_old, dir_new, chemZ_R8_old, chemZ_R8_new)\n", - "copy_CO(ivtks_R8_2pc, id_R8_2pc_old, \"R8_2pc\", dir_CO_old, dir_new, chemZ_R8_old, chemZ_R8_new, CO_lines)\n", + "copy_chemistry(\n", + " ivtks_R8_2pc,\n", + " id_R8_2pc_old,\n", + " \"R8_2pc\",\n", + " dir_chem_old,\n", + " dir_new,\n", + " chemZ_R8_old,\n", + " chemZ_R8_new,\n", + ")\n", + "copy_CO(\n", + " ivtks_R8_2pc,\n", + " id_R8_2pc_old,\n", + " \"R8_2pc\",\n", + " dir_CO_old,\n", + " dir_new,\n", + " chemZ_R8_old,\n", + " chemZ_R8_new,\n", + " CO_lines,\n", + ")\n", "copy_rad(ivtks_R8_2pc, id_R8_2pc_old, \"R8_2pc\", dir_rad_old, dir_new)" ] }, @@ -185,7 +217,7 @@ }, "outputs": [], "source": [ - "#R8_4pc_newacc\n", + "# R8_4pc_newacc\n", "copy_MHD(ivtks_R8_4pc, id_R8_4pc_old, \"R8_4pc\", dir_MHD_old, dir_new)\n", "copy_rad(ivtks_R8_4pc, id_R8_4pc_old, \"R8_4pc\", dir_rad_old, dir_new)" ] @@ -196,10 +228,27 @@ "metadata": {}, "outputs": [], "source": [ - "#R2_2pc_L256_B2\n", + "# R2_2pc_L256_B2\n", "copy_MHD(ivtks_R2_2pc, id_R2_2pc_old, \"R2_2pc\", dir_MHD_old, dir_new)\n", - "copy_chemistry(ivtks_R2_2pc, id_R2_2pc_old, \"R2_2pc\", dir_chem_old, dir_new, chemZ_R2_old, chemZ_R2_new)\n", - "copy_CO(ivtks_R2_2pc, id_R2_2pc_old, \"R2_2pc\", dir_CO_old, dir_new, chemZ_R2_old, chemZ_R2_new, CO_lines)" + "copy_chemistry(\n", + " ivtks_R2_2pc,\n", + " id_R2_2pc_old,\n", + " \"R2_2pc\",\n", + " dir_chem_old,\n", + " dir_new,\n", + " chemZ_R2_old,\n", + " chemZ_R2_new,\n", + ")\n", + "copy_CO(\n", + " ivtks_R2_2pc,\n", + " id_R2_2pc_old,\n", + " \"R2_2pc\",\n", + " dir_CO_old,\n", + " dir_new,\n", + " chemZ_R2_old,\n", + " chemZ_R2_new,\n", + " CO_lines,\n", + ")" ] }, { @@ -208,10 +257,27 @@ "metadata": {}, "outputs": [], "source": [ - "#R4_2pc_L512_B10\n", + "# R4_2pc_L512_B10\n", "copy_MHD(ivtks_R4_2pc, id_R4_2pc_old, \"R4_2pc\", dir_MHD_old, dir_new)\n", - "copy_chemistry(ivtks_R4_2pc, id_R4_2pc_old, \"R4_2pc\", dir_chem_old, dir_new, chemZ_R4_old, chemZ_R4_new)\n", - "copy_CO(ivtks_R4_2pc, id_R4_2pc_old, \"R4_2pc\", dir_CO_old, dir_new, chemZ_R4_old, chemZ_R4_new, CO_lines)" + "copy_chemistry(\n", + " ivtks_R4_2pc,\n", + " id_R4_2pc_old,\n", + " \"R4_2pc\",\n", + " dir_chem_old,\n", + " dir_new,\n", + " chemZ_R4_old,\n", + " chemZ_R4_new,\n", + ")\n", + "copy_CO(\n", + " ivtks_R4_2pc,\n", + " id_R4_2pc_old,\n", + " \"R4_2pc\",\n", + " dir_CO_old,\n", + " dir_new,\n", + " chemZ_R4_old,\n", + " chemZ_R4_new,\n", + " CO_lines,\n", + ")" ] }, { @@ -220,10 +286,27 @@ "metadata": {}, "outputs": [], "source": [ - "#R2_2pc_L512_B2_FUVcorr\n", + "# R2_2pc_L512_B2_FUVcorr\n", "copy_MHD(ivtks_R2B2_2pc, id_R2B2_2pc_old, \"R2B2_2pc\", dir_MHD_old, dir_new)\n", - "copy_chemistry(ivtks_R2B2_2pc, id_R2B2_2pc_old, \"R2B2_2pc\", dir_chem_old, dir_new, chemZ_R2B2_old, chemZ_R2B2_new)\n", - "copy_CO(ivtks_R2B2_2pc, id_R2B2_2pc_old, \"R2B2_2pc\", dir_CO_old, dir_new, chemZ_R2B2_old, chemZ_R2B2_new, CO_lines)" + "copy_chemistry(\n", + " ivtks_R2B2_2pc,\n", + " id_R2B2_2pc_old,\n", + " \"R2B2_2pc\",\n", + " dir_chem_old,\n", + " dir_new,\n", + " chemZ_R2B2_old,\n", + " chemZ_R2B2_new,\n", + ")\n", + "copy_CO(\n", + " ivtks_R2B2_2pc,\n", + " id_R2B2_2pc_old,\n", + " \"R2B2_2pc\",\n", + " dir_CO_old,\n", + " dir_new,\n", + " chemZ_R2B2_old,\n", + " chemZ_R2B2_new,\n", + " CO_lines,\n", + ")" ] }, { @@ -232,10 +315,27 @@ "metadata": {}, "outputs": [], "source": [ - "#R2_1pc_L256_B2\n", + "# R2_1pc_L256_B2\n", "copy_MHD(ivtks_R2N2_2pc, id_R2N2_2pc_old, \"R2_1pc\", dir_MHD_old, dir_new)\n", - "copy_chemistry(ivtks_R2N2_2pc, id_R2N2_2pc_old, \"R2_1pc\", dir_chem_old, dir_new, chemZ_R2N2_old, chemZ_R2N2_new)\n", - "copy_CO(ivtks_R2N2_2pc, id_R2N2_2pc_old, \"R2_1pc\", dir_CO_old, dir_new, chemZ_R2N2_old, chemZ_R2N2_new, CO_lines)" + "copy_chemistry(\n", + " ivtks_R2N2_2pc,\n", + " id_R2N2_2pc_old,\n", + " \"R2_1pc\",\n", + " dir_chem_old,\n", + " dir_new,\n", + " chemZ_R2N2_old,\n", + " chemZ_R2N2_new,\n", + ")\n", + "copy_CO(\n", + " ivtks_R2N2_2pc,\n", + " id_R2N2_2pc_old,\n", + " \"R2_1pc\",\n", + " dir_CO_old,\n", + " dir_new,\n", + " chemZ_R2N2_old,\n", + " chemZ_R2N2_new,\n", + " CO_lines,\n", + ")" ] }, { From c000df5ebc4c293a279a45ad327a12fd6bafc944 Mon Sep 17 00:00:00 2001 From: Chang-Goo Kim Date: Wed, 18 Sep 2024 10:58:05 -0400 Subject: [PATCH 3/5] linting and formatting --- astro_tigress/athena_read.py | 847 ++++++++++++++++++----------- astro_tigress/map_circular_beam.py | 35 +- astro_tigress/tigress_read.py | 6 +- astro_tigress/tigress_utils.py | 9 +- astro_tigress/ytathena.py | 2 +- 5 files changed, 544 insertions(+), 355 deletions(-) diff --git a/astro_tigress/athena_read.py b/astro_tigress/athena_read.py index 8df309e..a183e56 100644 --- a/astro_tigress/athena_read.py +++ b/astro_tigress/athena_read.py @@ -17,6 +17,7 @@ # ======================================================================================== + def check_nan(data): """Check input NumPy array for the presence of any NaN entries""" if np.isnan(data).any(): @@ -26,12 +27,15 @@ def check_nan(data): # ======================================================================================== + def error_dat(filename, **kwargs): """Wrapper to np.loadtxt() for applying optional checks used in regression tests""" - data = np.loadtxt(filename, - dtype=np.float64, - ndmin=2, # prevent NumPy from squeezing singleton dimensions - **kwargs) + data = np.loadtxt( + filename, + dtype=np.float64, + ndmin=2, # prevent NumPy from squeezing singleton dimensions + **kwargs, + ) if check_nan_flag: check_nan(data) return data @@ -39,6 +43,7 @@ def error_dat(filename, **kwargs): # ======================================================================================== + def hst(filename, raw=False): """Read .hst files and return dict of 1D arrays. @@ -48,14 +53,14 @@ def hst(filename, raw=False): """ # Read data - with open(filename, 'r') as data_file: + with open(filename, "r") as data_file: # Find header header_found = False multiple_headers = False header_location = None line = data_file.readline() while len(line) > 0: - if line.startswith('# Athena'): + if line.startswith("# Athena"): if header_found: multiple_headers = True else: @@ -63,16 +68,18 @@ def hst(filename, raw=False): header_location = data_file.tell() line = data_file.readline() if multiple_headers: - warnings.warn('Multiple headers found; using most recent data', AthenaWarning) + warnings.warn( + "Multiple headers found; using most recent data", AthenaWarning + ) if header_location is None: - raise AthenaError('No header found') + raise AthenaError("No header found") # Parse header data_file.seek(header_location) header = data_file.readline() - data_names = re.findall(r'\[\d+\]=(\S+)', header) + data_names = re.findall(r"\[\d+\]=(\S+)", header) if len(data_names) == 0: - raise AthenaError('Header could not be parsed') + raise AthenaError("Header could not be parsed") # skip comments start_data = data_file.tell() line = data_file.readline() @@ -95,15 +102,17 @@ def hst(filename, raw=False): for key, val in list(data.items()): data[key] = np.array(val) if not raw: - if data_names[0] != 'time': - raise AthenaError('Cannot remove spurious data because time column could not' - + ' be identified') + if data_names[0] != "time": + raise AthenaError( + "Cannot remove spurious data because time column could not" + + " be identified" + ) branches_removed = False while not branches_removed: branches_removed = True - for n in range(1, len(data['time'])): - if data['time'][n] <= data['time'][n-1]: - branch_index = np.where(data['time'][:n] >= data['time'][n])[0][0] + for n in range(1, len(data["time"])): + if data["time"][n] <= data["time"][n - 1]: + branch_index = np.where(data["time"][:n] >= data["time"][n])[0][0] for key, val in list(data.items()): data[key] = np.concatenate((val[:branch_index], val[n:])) branches_removed = False @@ -116,6 +125,7 @@ def hst(filename, raw=False): # ======================================================================================== + def tab(filename, raw=False, dimensions=None): """Read .tab files and return dict or array. @@ -126,42 +136,44 @@ def tab(filename, raw=False, dimensions=None): # Check for valid number of dimensions if raw and not (dimensions == 1 or dimensions == 2 or dimensions == 3): - raise AthenaError('Improper number of dimensions') + raise AthenaError("Improper number of dimensions") if not raw and dimensions is not None: - warnings.warn('Ignoring specified number of dimensions', AthenaWarning) + warnings.warn("Ignoring specified number of dimensions", AthenaWarning) # Parse header if not raw: data_dict = {} - with open(filename, 'r') as data_file: + with open(filename, "r") as data_file: line = data_file.readline() - attributes = re.search(r'time=(\S+)\s+cycle=(\S+)\s+variables=(\S+)', line) + attributes = re.search(r"time=(\S+)\s+cycle=(\S+)\s+variables=(\S+)", line) line = data_file.readline() headings = line.split()[1:] - data_dict['time'] = float(attributes.group(1)) - data_dict['cycle'] = int(attributes.group(2)) - data_dict['variables'] = attributes.group(3) - if headings[0] == 'i' and headings[2] == 'j' and headings[4] == 'k': + data_dict["time"] = float(attributes.group(1)) + data_dict["cycle"] = int(attributes.group(2)) + data_dict["variables"] = attributes.group(3) + if headings[0] == "i" and headings[2] == "j" and headings[4] == "k": headings = headings[1:2] + headings[3:4] + headings[5:] dimensions = 3 - elif ((headings[0] == 'i' and headings[2] == 'j') - or (headings[0] == 'i' and headings[2] == 'k') - or (headings[0] == 'j' and headings[2] == 'k')): + elif ( + (headings[0] == "i" and headings[2] == "j") + or (headings[0] == "i" and headings[2] == "k") + or (headings[0] == "j" and headings[2] == "k") + ): headings = headings[1:2] + headings[3:] dimensions = 2 - elif headings[0] == 'i' or headings[0] == 'j' or headings[0] == 'k': + elif headings[0] == "i" or headings[0] == "j" or headings[0] == "k": headings = headings[1:] dimensions = 1 else: - raise AthenaError('Could not parse header') + raise AthenaError("Could not parse header") # Go through lines data_array = [] - with open(filename, 'r') as data_file: + with open(filename, "r") as data_file: first_line = True for line in data_file: # Skip comments - if line.split()[0][0] == '#': + if line.split()[0][0] == "#": continue # Extract cell indices @@ -192,13 +204,18 @@ def tab(filename, raw=False, dimensions=None): # Reshape array based on number of dimensions if dimensions == 1: - array_shape = (i_max-i_min+1, num_entries) + array_shape = (i_max - i_min + 1, num_entries) array_transpose = (1, 0) if dimensions == 2: - array_shape = (j_max-j_min+1, i_max-i_min+1, num_entries) + array_shape = (j_max - j_min + 1, i_max - i_min + 1, num_entries) array_transpose = (2, 0, 1) if dimensions == 3: - array_shape = (k_max-k_min+1, j_max-j_min+1, i_max-i_min+1, num_entries) + array_shape = ( + k_max - k_min + 1, + j_max - j_min + 1, + i_max - i_min + 1, + num_entries, + ) array_transpose = (3, 0, 1, 2) data_array = np.transpose(np.reshape(data_array, array_shape), array_transpose) @@ -217,19 +234,20 @@ def tab(filename, raw=False, dimensions=None): # ======================================================================================== + def vtk(filename): """Read .vtk files and return dict of arrays of data.""" # Read raw data - with open(filename, 'rb') as data_file: + with open(filename, "rb") as data_file: raw_data = data_file.read() - raw_data_ascii = raw_data.decode('ascii', 'replace') + raw_data_ascii = raw_data.decode("ascii", "replace") # Skip header current_index = 0 current_char = raw_data_ascii[current_index] - while current_char == '#': - while current_char != '\n': + while current_char == "#": + while current_char != "\n": current_index += 1 current_char = raw_data_ascii[current_index] current_index += 1 @@ -238,90 +256,96 @@ def vtk(filename): # Function for skipping though the file def skip_string(expected_string): expected_string_len = len(expected_string) - if (raw_data_ascii[current_index:current_index+expected_string_len] - != expected_string): - raise AthenaError('File not formatted as expected') - return current_index+expected_string_len + if ( + raw_data_ascii[current_index : current_index + expected_string_len] + != expected_string + ): + raise AthenaError("File not formatted as expected") + return current_index + expected_string_len # Read metadata - current_index = skip_string('BINARY\nDATASET RECTILINEAR_GRID\nDIMENSIONS ') + current_index = skip_string("BINARY\nDATASET RECTILINEAR_GRID\nDIMENSIONS ") end_of_line_index = current_index + 1 - while raw_data_ascii[end_of_line_index] != '\n': + while raw_data_ascii[end_of_line_index] != "\n": end_of_line_index += 1 data_to_map = raw_data_ascii[current_index:end_of_line_index] - face_dimensions = list(map(int, data_to_map.split(' '))) + face_dimensions = list(map(int, data_to_map.split(" "))) current_index = end_of_line_index + 1 # Function for reading interface locations def read_faces(letter, num_faces): - identifier_string = '{0}_COORDINATES {1} float\n'.format(letter, num_faces) + identifier_string = "{0}_COORDINATES {1} float\n".format(letter, num_faces) begin_index = skip_string(identifier_string) - format_string = '>' + 'f'*num_faces - end_index = begin_index + 4*num_faces + format_string = ">" + "f" * num_faces + end_index = begin_index + 4 * num_faces vals = np.array(struct.unpack(format_string, raw_data[begin_index:end_index])) - return vals, end_index+1 + return vals, end_index + 1 # Read interface locations - x_faces, current_index = read_faces('X', face_dimensions[0]) - y_faces, current_index = read_faces('Y', face_dimensions[1]) - z_faces, current_index = read_faces('Z', face_dimensions[2]) + x_faces, current_index = read_faces("X", face_dimensions[0]) + y_faces, current_index = read_faces("Y", face_dimensions[1]) + z_faces, current_index = read_faces("Z", face_dimensions[2]) # Prepare to read quantities defined on grid - cell_dimensions = np.array([max(dim-1, 1) for dim in face_dimensions]) + cell_dimensions = np.array([max(dim - 1, 1) for dim in face_dimensions]) num_cells = cell_dimensions.prod() - current_index = skip_string('CELL_DATA {0}\n'.format(num_cells)) - if raw_data_ascii[current_index:current_index+1] == '\n': - current_index = skip_string('\n') # extra newline inserted by join script + current_index = skip_string("CELL_DATA {0}\n".format(num_cells)) + if raw_data_ascii[current_index : current_index + 1] == "\n": + current_index = skip_string("\n") # extra newline inserted by join script data = {} # Function for reading scalar data def read_cell_scalars(): - begin_index = skip_string('SCALARS ') + begin_index = skip_string("SCALARS ") end_of_word_index = begin_index + 1 - while raw_data_ascii[end_of_word_index] != ' ': + while raw_data_ascii[end_of_word_index] != " ": end_of_word_index += 1 array_name = raw_data_ascii[begin_index:end_of_word_index] - string_to_skip = 'SCALARS {0} float\nLOOKUP_TABLE default\n'.format(array_name) + string_to_skip = "SCALARS {0} float\nLOOKUP_TABLE default\n".format(array_name) begin_index = skip_string(string_to_skip) - format_string = '>' + 'f'*num_cells - end_index = begin_index + 4*num_cells + format_string = ">" + "f" * num_cells + end_index = begin_index + 4 * num_cells data[array_name] = struct.unpack(format_string, raw_data[begin_index:end_index]) dimensions = tuple(cell_dimensions[::-1]) data[array_name] = np.array(data[array_name]).reshape(dimensions) - return end_index+1 + return end_index + 1 # Function for reading vector data def read_cell_vectors(): - begin_index = skip_string('VECTORS ') + begin_index = skip_string("VECTORS ") end_of_word_index = begin_index + 1 - while raw_data_ascii[end_of_word_index] != '\n': + while raw_data_ascii[end_of_word_index] != "\n": end_of_word_index += 1 array_name = raw_data_ascii[begin_index:end_of_word_index] - string_to_skip = 'VECTORS {0}\n'.format(array_name) + string_to_skip = "VECTORS {0}\n".format(array_name) array_name = array_name[:-6] # remove ' float' begin_index = skip_string(string_to_skip) - format_string = '>' + 'f'*num_cells*3 - end_index = begin_index + 4*num_cells*3 + format_string = ">" + "f" * num_cells * 3 + end_index = begin_index + 4 * num_cells * 3 data[array_name] = struct.unpack(format_string, raw_data[begin_index:end_index]) dimensions = tuple(np.append(cell_dimensions[::-1], 3)) data[array_name] = np.array(data[array_name]).reshape(dimensions) - return end_index+1 + return end_index + 1 # Read quantities defined on grid while current_index < len(raw_data): - expected_string = 'SCALARS' + expected_string = "SCALARS" expected_string_len = len(expected_string) - if (raw_data_ascii[current_index:current_index+expected_string_len] - == expected_string): + if ( + raw_data_ascii[current_index : current_index + expected_string_len] + == expected_string + ): current_index = read_cell_scalars() continue - expected_string = 'VECTORS' + expected_string = "VECTORS" expected_string_len = len(expected_string) - if (raw_data_ascii[current_index:current_index+expected_string_len] - == expected_string): + if ( + raw_data_ascii[current_index : current_index + expected_string_len] + == expected_string + ): current_index = read_cell_vectors() continue - raise AthenaError('File not formatted as expected') + raise AthenaError("File not formatted as expected") if check_nan_flag: check_nan(x_faces) @@ -335,11 +359,33 @@ def read_cell_vectors(): # ======================================================================================== -def athdf(filename, raw=False, data=None, quantities=None, dtype=None, level=None, - return_levels=False, subsample=False, fast_restrict=False, x1_min=None, - x1_max=None, x2_min=None, x2_max=None, x3_min=None, x3_max=None, vol_func=None, - vol_params=None, face_func_1=None, face_func_2=None, face_func_3=None, - center_func_1=None, center_func_2=None, center_func_3=None, num_ghost=0): + +def athdf( + filename, + raw=False, + data=None, + quantities=None, + dtype=None, + level=None, + return_levels=False, + subsample=False, + fast_restrict=False, + x1_min=None, + x1_max=None, + x2_min=None, + x2_max=None, + x3_min=None, + x3_max=None, + vol_func=None, + vol_params=None, + face_func_1=None, + face_func_2=None, + face_func_3=None, + center_func_1=None, + center_func_2=None, + center_func_3=None, + num_ghost=0, +): """Read .athdf files and populate dict of arrays of data. @@ -353,30 +399,32 @@ def athdf(filename, raw=False, data=None, quantities=None, dtype=None, level=Non # Handle request for raw data if raw: # Open file - with h5py.File(filename, 'r') as f: + with h5py.File(filename, "r") as f: # Store file-level attributes data = {} for key in f.attrs: data[str(key)] = f.attrs[key] # Store location metadata - data['Levels'] = f['Levels'][:] - data['LogicalLocations'] = f['LogicalLocations'][:] + data["Levels"] = f["Levels"][:] + data["LogicalLocations"] = f["LogicalLocations"][:] # Store coordinate data - data['x1f'] = f['x1f'][:] - data['x2f'] = f['x2f'][:] - data['x3f'] = f['x3f'][:] - data['x1v'] = f['x1v'][:] - data['x2v'] = f['x2v'][:] - data['x3v'] = f['x3v'][:] + data["x1f"] = f["x1f"][:] + data["x2f"] = f["x2f"][:] + data["x3f"] = f["x3f"][:] + data["x1v"] = f["x1v"][:] + data["x2v"] = f["x2v"][:] + data["x3v"] = f["x3v"][:] # Get metadata describing file layout - dataset_names = np.array([x.decode('ascii', 'replace') - for x in f.attrs['DatasetNames'][:]]) - dataset_sizes = f.attrs['NumVariables'][:] - variable_names = np.array([x.decode('ascii', 'replace') - for x in f.attrs['VariableNames'][:]]) + dataset_names = np.array( + [x.decode("ascii", "replace") for x in f.attrs["DatasetNames"][:]] + ) + dataset_sizes = f.attrs["NumVariables"][:] + variable_names = np.array( + [x.decode("ascii", "replace") for x in f.attrs["VariableNames"][:]] + ) # Store cell data for dataset_index, dataset_name in enumerate(dataset_names): @@ -401,39 +449,50 @@ def athdf(filename, raw=False, data=None, quantities=None, dtype=None, level=Non new_data = False # Open file - with h5py.File(filename, 'r') as f: + with h5py.File(filename, "r") as f: # Extract size information - max_level = f.attrs['MaxLevel'] + max_level = f.attrs["MaxLevel"] if level is None: level = max_level - block_size = f.attrs['MeshBlockSize'] - root_grid_size = f.attrs['RootGridSize'] - levels = f['Levels'][:] - logical_locations = f['LogicalLocations'][:] + block_size = f.attrs["MeshBlockSize"] + root_grid_size = f.attrs["RootGridSize"] + levels = f["Levels"][:] + logical_locations = f["LogicalLocations"][:] if dtype is None: - dtype = f[f.attrs['DatasetNames'][0]].dtype.newbyteorder('=') - if num_ghost == 0 and np.array(f['x1v']).min() < f.attrs['RootGridX1'][0]: - raise AthenaError('Ghost zones detected but "num_ghost" keyword set to zero.') + dtype = f[f.attrs["DatasetNames"][0]].dtype.newbyteorder("=") + if num_ghost == 0 and np.array(f["x1v"]).min() < f.attrs["RootGridX1"][0]: + raise AthenaError( + 'Ghost zones detected but "num_ghost" keyword set to zero.' + ) if num_ghost > 0 and not np.all(levels == max_level): - raise AthenaError('Cannot use ghost zones with different refinement levels') + raise AthenaError("Cannot use ghost zones with different refinement levels") nx_vals = [] for d in range(3): if block_size[d] == 1 and root_grid_size[d] > 1: # sum or slice - other_locations = [location - for location in zip(levels, - logical_locations[:, (d+1) % 3], - logical_locations[:, (d+2) % 3])] + other_locations = [ + location + for location in zip( + levels, + logical_locations[:, (d + 1) % 3], + logical_locations[:, (d + 2) % 3], + ) + ] if len(set(other_locations)) == len(other_locations): # effective slice nx_vals.append(1) else: # nontrivial sum num_blocks_this_dim = 0 - for level_this_dim, loc_this_dim in zip(levels, - logical_locations[:, d]): + for level_this_dim, loc_this_dim in zip( + levels, logical_locations[:, d] + ): if level_this_dim <= level: - possible_max = (loc_this_dim+1) * 2**(level-level_this_dim) + possible_max = (loc_this_dim + 1) * 2 ** ( + level - level_this_dim + ) num_blocks_this_dim = max(num_blocks_this_dim, possible_max) else: - possible_max = (loc_this_dim+1) / 2**(level_this_dim-level) + possible_max = (loc_this_dim + 1) / 2 ** ( + level_this_dim - level + ) num_blocks_this_dim = max(num_blocks_this_dim, possible_max) nx_vals.append(num_blocks_this_dim) elif block_size[d] == 1: # singleton dimension @@ -452,34 +511,55 @@ def athdf(filename, raw=False, data=None, quantities=None, dtype=None, level=Non num_extended_dims += 1 # Set volume function for preset coordinates if needed - coord = f.attrs['Coordinates'].decode('ascii', 'replace') - if level < max_level and not subsample and not fast_restrict and vol_func is None: - x1_rat = f.attrs['RootGridX1'][2] - x2_rat = f.attrs['RootGridX2'][2] - x3_rat = f.attrs['RootGridX3'][2] - if (coord == 'cartesian' or coord == 'minkowski' or coord == 'tilted' - or coord == 'sinusoidal'): - if ((nx1 == 1 or x1_rat == 1.0) and (nx2 == 1 or x2_rat == 1.0) - and (nx3 == 1 or x3_rat == 1.0)): + coord = f.attrs["Coordinates"].decode("ascii", "replace") + if ( + level < max_level + and not subsample + and not fast_restrict + and vol_func is None + ): + x1_rat = f.attrs["RootGridX1"][2] + x2_rat = f.attrs["RootGridX2"][2] + x3_rat = f.attrs["RootGridX3"][2] + if ( + coord == "cartesian" + or coord == "minkowski" + or coord == "tilted" + or coord == "sinusoidal" + ): + if ( + (nx1 == 1 or x1_rat == 1.0) + and (nx2 == 1 or x2_rat == 1.0) + and (nx3 == 1 or x3_rat == 1.0) + ): fast_restrict = True else: + def vol_func(xm, xp, ym, yp, zm, zp): - return (xp-xm) * (yp-ym) * (zp-zm) - elif coord == 'cylindrical': - if (nx1 == 1 and (nx2 == 1 or x2_rat == 1.0) - and (nx3 == 1 or x3_rat == 1.0)): + return (xp - xm) * (yp - ym) * (zp - zm) + elif coord == "cylindrical": + if ( + nx1 == 1 + and (nx2 == 1 or x2_rat == 1.0) + and (nx3 == 1 or x3_rat == 1.0) + ): fast_restrict = True else: + def vol_func(rm, rp, phim, phip, zm, zp): - return (rp**2-rm**2) * (phip-phim) * (zp-zm) - elif coord == 'spherical_polar' or coord == 'schwarzschild': + return (rp**2 - rm**2) * (phip - phim) * (zp - zm) + elif coord == "spherical_polar" or coord == "schwarzschild": if nx1 == 1 and nx2 == 1 and (nx3 == 1 or x3_rat == 1.0): fast_restrict = True else: + def vol_func(rm, rp, thetam, thetap, phim, phip): - return ((rp**3-rm**3) * abs(np.cos(thetam)-np.cos(thetap)) - * (phip-phim)) - elif coord == 'kerr-schild': + return ( + (rp**3 - rm**3) + * abs(np.cos(thetam) - np.cos(thetap)) + * (phip - phim) + ) + elif coord == "kerr-schild": if nx1 == 1 and nx2 == 1 and (nx3 == 1 or x3_rat == 1.0): fast_restrict = True else: @@ -488,86 +568,124 @@ def vol_func(rm, rp, thetam, thetap, phim, phip): def vol_func(rm, rp, thetam, thetap, phim, phip): cosm = np.cos(thetam) cosp = np.cos(thetap) - return (((rp**3-rm**3) * abs(cosm-cosp) - + a**2 * (rp-rm) * abs(cosm**3-cosp**3)) * (phip-phim)) + return ( + (rp**3 - rm**3) * abs(cosm - cosp) + + a**2 * (rp - rm) * abs(cosm**3 - cosp**3) + ) * (phip - phim) else: - raise AthenaError('Coordinates not recognized') + raise AthenaError("Coordinates not recognized") # Set cell center functions for preset coordinates if center_func_1 is None: - if (coord == 'cartesian' or coord == 'minkowski' or coord == 'tilted' - or coord == 'sinusoidal' or coord == 'kerr-schild'): + if ( + coord == "cartesian" + or coord == "minkowski" + or coord == "tilted" + or coord == "sinusoidal" + or coord == "kerr-schild" + ): + def center_func_1(xm, xp): - return 0.5 * (xm+xp) - elif coord == 'cylindrical': + return 0.5 * (xm + xp) + elif coord == "cylindrical": + def center_func_1(xm, xp): - return 2.0/3.0 * (xp**3-xm**3) / (xp**2-xm**2) - elif coord == 'spherical_polar': + return 2.0 / 3.0 * (xp**3 - xm**3) / (xp**2 - xm**2) + elif coord == "spherical_polar": + def center_func_1(xm, xp): - return 3.0/4.0 * (xp**4-xm**4) / (xp**3-xm**3) - elif coord == 'schwarzschild': + return 3.0 / 4.0 * (xp**4 - xm**4) / (xp**3 - xm**3) + elif coord == "schwarzschild": + def center_func_1(xm, xp): - return (0.5*(xm**3+xp**3)) ** 1.0/3.0 + return (0.5 * (xm**3 + xp**3)) ** 1.0 / 3.0 else: - raise AthenaError('Coordinates not recognized') + raise AthenaError("Coordinates not recognized") if center_func_2 is None: - if (coord == 'cartesian' or coord == 'cylindrical' or coord == 'minkowski' - or coord == 'tilted' or coord == 'sinusoidal' - or coord == 'kerr-schild'): + if ( + coord == "cartesian" + or coord == "cylindrical" + or coord == "minkowski" + or coord == "tilted" + or coord == "sinusoidal" + or coord == "kerr-schild" + ): + def center_func_2(xm, xp): - return 0.5 * (xm+xp) - elif coord == 'spherical_polar': + return 0.5 * (xm + xp) + elif coord == "spherical_polar": + def center_func_2(xm, xp): sm = np.sin(xm) cm = np.cos(xm) sp = np.sin(xp) cp = np.cos(xp) - return (sp-xp*cp - sm+xm*cm) / (cm - cp) - elif coord == 'schwarzschild': + return (sp - xp * cp - sm + xm * cm) / (cm - cp) + elif coord == "schwarzschild": + def center_func_2(xm, xp): return np.arccos(0.5 * (np.cos(xm) + np.cos(xp))) else: - raise AthenaError('Coordinates not recognized') + raise AthenaError("Coordinates not recognized") if center_func_3 is None: - if (coord == 'cartesian' or coord == 'cylindrical' or coord == 'tilted' - or coord == 'spherical_polar' or coord == 'minkowski' - or coord == 'sinusoidal' or coord == 'schwarzschild' - or coord == 'kerr-schild'): + if ( + coord == "cartesian" + or coord == "cylindrical" + or coord == "tilted" + or coord == "spherical_polar" + or coord == "minkowski" + or coord == "sinusoidal" + or coord == "schwarzschild" + or coord == "kerr-schild" + ): def center_func_3(xm, xp): - return 0.5 * (xm+xp) + return 0.5 * (xm + xp) else: - raise AthenaError('Coordinates not recognized') + raise AthenaError("Coordinates not recognized") # Check output level compared to max level in file if level < max_level and not subsample and not fast_restrict: - warnings.warn('Exact restriction being used: performance severely affected;' - + ' see documentation', AthenaWarning) + warnings.warn( + "Exact restriction being used: performance severely affected;" + + " see documentation", + AthenaWarning, + ) sys.stderr.flush() if level > max_level: - warnings.warn('Requested refinement level higher than maximum level in file:' - + ' all cells will be prolongated', AthenaWarning) + warnings.warn( + "Requested refinement level higher than maximum level in file:" + + " all cells will be prolongated", + AthenaWarning, + ) sys.stderr.flush() # Check that subsampling and/or fast restriction will work if needed if level < max_level and (subsample or fast_restrict): if num_ghost > 0: - raise AthenaError('Subsampling and fast restriction incompatible with' - + ' ghost zones') - max_restrict_factor = 2**(max_level-level) + raise AthenaError( + "Subsampling and fast restriction incompatible with" + + " ghost zones" + ) + max_restrict_factor = 2 ** (max_level - level) for current_block_size in block_size: - if (current_block_size != 1 - and current_block_size % max_restrict_factor != 0): - raise AthenaError('Block boundaries at finest level must be cell' - + ' boundaries at desired level for subsampling or' - + ' fast restriction to work') + if ( + current_block_size != 1 + and current_block_size % max_restrict_factor != 0 + ): + raise AthenaError( + "Block boundaries at finest level must be cell" + + " boundaries at desired level for subsampling or" + + " fast restriction to work" + ) # Create list of all quantities if none given - var_quantities = np.array([x.decode('ascii', 'replace') - for x in f.attrs['VariableNames'][:]]) - coord_quantities = ('x1f', 'x2f', 'x3f', 'x1v', 'x2v', 'x3v') + var_quantities = np.array( + [x.decode("ascii", "replace") for x in f.attrs["VariableNames"][:]] + ) + coord_quantities = ("x1f", "x2f", "x3f", "x1v", "x2v", "x3v") attr_quantities = [key for key in f.attrs] - other_quantities = ('Levels',) + other_quantities = ("Levels",) if not new_data: quantities = list(data.values()) elif quantities is None: @@ -577,24 +695,33 @@ def center_func_3(xm, xp): if q not in var_quantities and q not in coord_quantities: possibilities = '", "'.join(var_quantities) possibilities = '"' + possibilities + '"' - error_string = ('Quantity not recognized: file does not include "{0}"' - + ' but does include {1}') + error_string = ( + 'Quantity not recognized: file does not include "{0}"' + + " but does include {1}" + ) raise AthenaError(error_string.format(q, possibilities)) - quantities = [str(q) for q in quantities if q not in coord_quantities - and q not in attr_quantities and q not in other_quantities] + quantities = [ + str(q) + for q in quantities + if q not in coord_quantities + and q not in attr_quantities + and q not in other_quantities + ] # Store file attribute metadata for key in attr_quantities: data[str(key)] = f.attrs[key] # Get metadata describing file layout - num_blocks = f.attrs['NumMeshBlocks'] - dataset_names = np.array([x.decode('ascii', 'replace') - for x in f.attrs['DatasetNames'][:]]) - dataset_sizes = f.attrs['NumVariables'][:] + num_blocks = f.attrs["NumMeshBlocks"] + dataset_names = np.array( + [x.decode("ascii", "replace") for x in f.attrs["DatasetNames"][:]] + ) + dataset_sizes = f.attrs["NumVariables"][:] dataset_sizes_cumulative = np.cumsum(dataset_sizes) - variable_names = np.array([x.decode('ascii', 'replace') - for x in f.attrs['VariableNames'][:]]) + variable_names = np.array( + [x.decode("ascii", "replace") for x in f.attrs["VariableNames"][:]] + ) quantity_datasets = [] quantity_indices = [] for q in quantities: @@ -603,66 +730,81 @@ def center_func_3(xm, xp): if dataset_num == 0: dataset_index = var_num else: - dataset_index = var_num - dataset_sizes_cumulative[dataset_num-1] + dataset_index = var_num - dataset_sizes_cumulative[dataset_num - 1] quantity_datasets.append(dataset_names[dataset_num]) quantity_indices.append(dataset_index) # Locate fine block for coordinates in case of slice fine_block = np.where(levels == max_level)[0][0] - x1m = f['x1f'][fine_block, 0] - x1p = f['x1f'][fine_block, 1] - x2m = f['x2f'][fine_block, 0] - x2p = f['x2f'][fine_block, 1] - x3m = f['x3f'][fine_block, 0] - x3p = f['x3f'][fine_block, 1] + x1m = f["x1f"][fine_block, 0] + x1p = f["x1f"][fine_block, 1] + x2m = f["x2f"][fine_block, 0] + x2p = f["x2f"][fine_block, 1] + x3m = f["x3f"][fine_block, 0] + x3p = f["x3f"][fine_block, 1] # Populate coordinate arrays face_funcs = (face_func_1, face_func_2, face_func_3) center_funcs = (center_func_1, center_func_2, center_func_3) - for d, nx, face_func, center_func in zip(list(range(1, 4)), nx_vals, face_funcs, - center_funcs): - xf = 'x' + repr(d) + 'f' - xv = 'x' + repr(d) + 'v' + for d, nx, face_func, center_func in zip( + list(range(1, 4)), nx_vals, face_funcs, center_funcs + ): + xf = "x" + repr(d) + "f" + xv = "x" + repr(d) + "v" if nx == 1: - xm = (x1m, x2m, x3m)[d-1] - xp = (x1p, x2p, x3p)[d-1] + xm = (x1m, x2m, x3m)[d - 1] + xp = (x1p, x2p, x3p)[d - 1] data[xf] = np.array([xm, xp], dtype=dtype) else: - xmin = f.attrs['RootGridX' + repr(d)][0] - xmax = f.attrs['RootGridX' + repr(d)][1] - xrat_root = f.attrs['RootGridX' + repr(d)][2] + xmin = f.attrs["RootGridX" + repr(d)][0] + xmax = f.attrs["RootGridX" + repr(d)][1] + xrat_root = f.attrs["RootGridX" + repr(d)][2] if xrat_root == -1.0 and face_func is None: - raise AthenaError('Must specify user-defined face_func_{0}'.format(d)) + raise AthenaError( + "Must specify user-defined face_func_{0}".format(d) + ) elif face_func is not None: if num_ghost > 0: - raise AthenaError('Ghost zones incompatible with user-defined' - + ' coordinate spacing') - data[xf] = face_func(xmin, xmax, xrat_root, nx+1) + raise AthenaError( + "Ghost zones incompatible with user-defined" + + " coordinate spacing" + ) + data[xf] = face_func(xmin, xmax, xrat_root, nx + 1) elif xrat_root == 1.0: if np.all(levels == level): data[xf] = np.empty(nx + 1, dtype=dtype) - for n_block in range(int((nx - 2*num_ghost) - / (block_size[d-1] - 2*num_ghost))): - sample_block = np.where(logical_locations[:, d-1] - == n_block)[0][0] - index_low = n_block * (block_size[d-1] - 2*num_ghost) - index_high = index_low + block_size[d-1] + 1 + for n_block in range( + int( + (nx - 2 * num_ghost) + / (block_size[d - 1] - 2 * num_ghost) + ) + ): + sample_block = np.where( + logical_locations[:, d - 1] == n_block + )[0][0] + index_low = n_block * (block_size[d - 1] - 2 * num_ghost) + index_high = index_low + block_size[d - 1] + 1 data[xf][index_low:index_high] = f[xf][sample_block, :] else: if num_ghost > 0: - raise AthenaError('Cannot use ghost zones with different' - + ' refinement levels') + raise AthenaError( + "Cannot use ghost zones with different" + + " refinement levels" + ) data[xf] = np.linspace(xmin, xmax, nx + 1, dtype=dtype) else: if num_ghost > 0: - raise AthenaError('Ghost zones incompatible with non-uniform' - + ' coordinate spacing') + raise AthenaError( + "Ghost zones incompatible with non-uniform" + + " coordinate spacing" + ) xrat = xrat_root ** (1.0 / 2**level) - data[xf] = (xmin + (1.0-xrat**np.arange(nx+1, dtype=dtype)) - / (1.0-xrat**nx) * (xmax-xmin)) + data[xf] = xmin + (1.0 - xrat ** np.arange(nx + 1, dtype=dtype)) / ( + 1.0 - xrat**nx + ) * (xmax - xmin) data[xv] = np.empty(nx, dtype=dtype) for i in range(nx): - data[xv][i] = center_func(data[xf][i], data[xf][i+1]) + data[xv][i] = center_func(data[xf][i], data[xf][i + 1]) # Account for selection x1_select = False @@ -672,61 +814,73 @@ def center_func_3(xm, xp): i_max = nx1 j_max = nx2 k_max = nx3 - error_string = '{0} must be {1} than {2} in order to intersect data range' - if x1_min is not None and x1_min >= data['x1f'][1]: - if x1_min >= data['x1f'][-1]: - raise AthenaError(error_string.format('x1_min', 'less', data['x1f'][-1])) + error_string = "{0} must be {1} than {2} in order to intersect data range" + if x1_min is not None and x1_min >= data["x1f"][1]: + if x1_min >= data["x1f"][-1]: + raise AthenaError( + error_string.format("x1_min", "less", data["x1f"][-1]) + ) x1_select = True - i_min = np.where(data['x1f'] <= x1_min)[0][-1] - if x1_max is not None and x1_max <= data['x1f'][-2]: - if x1_max <= data['x1f'][0]: - raise AthenaError(error_string.format('x1_max', 'greater', - data['x1f'][0])) + i_min = np.where(data["x1f"] <= x1_min)[0][-1] + if x1_max is not None and x1_max <= data["x1f"][-2]: + if x1_max <= data["x1f"][0]: + raise AthenaError( + error_string.format("x1_max", "greater", data["x1f"][0]) + ) x1_select = True - i_max = np.where(data['x1f'] >= x1_max)[0][0] - if x2_min is not None and x2_min >= data['x2f'][1]: - if x2_min >= data['x2f'][-1]: - raise AthenaError(error_string.format('x2_min', 'less', data['x2f'][-1])) + i_max = np.where(data["x1f"] >= x1_max)[0][0] + if x2_min is not None and x2_min >= data["x2f"][1]: + if x2_min >= data["x2f"][-1]: + raise AthenaError( + error_string.format("x2_min", "less", data["x2f"][-1]) + ) x2_select = True - j_min = np.where(data['x2f'] <= x2_min)[0][-1] - if x2_max is not None and x2_max <= data['x2f'][-2]: - if x2_max <= data['x2f'][0]: - raise AthenaError(error_string.format('x2_max', 'greater', - data['x2f'][0])) + j_min = np.where(data["x2f"] <= x2_min)[0][-1] + if x2_max is not None and x2_max <= data["x2f"][-2]: + if x2_max <= data["x2f"][0]: + raise AthenaError( + error_string.format("x2_max", "greater", data["x2f"][0]) + ) x2_select = True - j_max = np.where(data['x2f'] >= x2_max)[0][0] - if x3_min is not None and x3_min >= data['x3f'][1]: - if x3_min >= data['x3f'][-1]: - raise AthenaError(error_string.format('x3_min', 'less', data['x3f'][-1])) + j_max = np.where(data["x2f"] >= x2_max)[0][0] + if x3_min is not None and x3_min >= data["x3f"][1]: + if x3_min >= data["x3f"][-1]: + raise AthenaError( + error_string.format("x3_min", "less", data["x3f"][-1]) + ) x3_select = True - k_min = np.where(data['x3f'] <= x3_min)[0][-1] - if x3_max is not None and x3_max <= data['x3f'][-2]: - if x3_max <= data['x3f'][0]: - raise AthenaError(error_string.format('x3_max', 'greater', - data['x3f'][0])) + k_min = np.where(data["x3f"] <= x3_min)[0][-1] + if x3_max is not None and x3_max <= data["x3f"][-2]: + if x3_max <= data["x3f"][0]: + raise AthenaError( + error_string.format("x3_max", "greater", data["x3f"][0]) + ) x3_select = True - k_max = np.where(data['x3f'] >= x3_max)[0][0] + k_max = np.where(data["x3f"] >= x3_max)[0][0] if (x1_select or x2_select or x3_select) and num_ghost > 0: - raise AthenaError('Cannot take subsets with ghost zones') + raise AthenaError("Cannot take subsets with ghost zones") # Adjust coordinates if selection made if x1_select: - data['x1f'] = data['x1f'][i_min:i_max+1] - data['x1v'] = data['x1v'][i_min:i_max] + data["x1f"] = data["x1f"][i_min : i_max + 1] + data["x1v"] = data["x1v"][i_min:i_max] if x2_select: - data['x2f'] = data['x2f'][j_min:j_max+1] - data['x2v'] = data['x2v'][j_min:j_max] + data["x2f"] = data["x2f"][j_min : j_max + 1] + data["x2v"] = data["x2v"][j_min:j_max] if x3_select: - data['x3f'] = data['x3f'][k_min:k_max+1] - data['x3v'] = data['x3v'][k_min:k_max] + data["x3f"] = data["x3f"][k_min : k_max + 1] + data["x3v"] = data["x3v"][k_min:k_max] # Prepare arrays for data and bookkeeping if new_data: for q in quantities: - data[q] = np.zeros((k_max-k_min, j_max-j_min, i_max-i_min), dtype=dtype) + data[q] = np.zeros( + (k_max - k_min, j_max - j_min, i_max - i_min), dtype=dtype + ) if return_levels: - data['Levels'] = np.empty((k_max-k_min, j_max-j_min, i_max-i_min), - dtype=np.int32) + data["Levels"] = np.empty( + (k_max - k_min, j_max - j_min, i_max - i_min), dtype=np.int32 + ) else: for q in quantities: data[q].fill(0.0) @@ -745,12 +899,21 @@ def center_func_3(xm, xp): s = 2 ** (level - block_level) # Calculate destination indices, without selection - il_d = (block_location[0] * (block_size[0] - 2*num_ghost) * s - if nx1 > 1 else 0) - jl_d = (block_location[1] * (block_size[1] - 2*num_ghost) * s - if nx2 > 1 else 0) - kl_d = (block_location[2] * (block_size[2] - 2*num_ghost) * s - if nx3 > 1 else 0) + il_d = ( + block_location[0] * (block_size[0] - 2 * num_ghost) * s + if nx1 > 1 + else 0 + ) + jl_d = ( + block_location[1] * (block_size[1] - 2 * num_ghost) * s + if nx2 > 1 + else 0 + ) + kl_d = ( + block_location[2] * (block_size[2] - 2 * num_ghost) * s + if nx3 > 1 + else 0 + ) iu_d = il_d + block_size[0] * s if nx1 > 1 else 1 ju_d = jl_d + block_size[1] * s if nx2 > 1 else 1 ku_d = kl_d + block_size[2] * s if nx3 > 1 else 1 @@ -774,8 +937,9 @@ def center_func_3(xm, xp): ku_d = min(ku_d, k_max) - k_min # Assign values - for q, dataset, index in zip(quantities, quantity_datasets, - quantity_indices): + for q, dataset, index in zip( + quantities, quantity_datasets, quantity_indices + ): block_data = f[dataset][index, block_num, :] if s > 1: if nx1 > 1: @@ -784,9 +948,9 @@ def center_func_3(xm, xp): block_data = np.repeat(block_data, s, axis=1) if nx3 > 1: block_data = np.repeat(block_data, s, axis=0) - data[q][kl_d:ku_d, jl_d:ju_d, il_d:iu_d] = block_data[kl_s:ku_s, - jl_s:ju_s, - il_s:iu_s] + data[q][kl_d:ku_d, jl_d:ju_d, il_d:iu_d] = block_data[ + kl_s:ku_s, jl_s:ju_s, il_s:iu_s + ] # Restrict fine data else: @@ -833,17 +997,21 @@ def center_func_3(xm, xp): # Apply subsampling if subsample: # Calculate fine-level offsets (nearest cell at or below center) - o1 = s/2 - 1 if nx1 > 1 else 0 - o2 = s/2 - 1 if nx2 > 1 else 0 - o3 = s/2 - 1 if nx3 > 1 else 0 + o1 = s / 2 - 1 if nx1 > 1 else 0 + o2 = s / 2 - 1 if nx2 > 1 else 0 + o3 = s / 2 - 1 if nx3 > 1 else 0 # Assign values - for q, dataset, index in zip(quantities, quantity_datasets, - quantity_indices): - data[q][kl_d:ku_d, - jl_d:ju_d, - il_d:iu_d] = f[dataset][index, block_num, kl_s+o3:ku_s:s, - jl_s+o2:ju_s:s, il_s+o1:iu_s:s] + for q, dataset, index in zip( + quantities, quantity_datasets, quantity_indices + ): + data[q][kl_d:ku_d, jl_d:ju_d, il_d:iu_d] = f[dataset][ + index, + block_num, + kl_s + o3 : ku_s : s, + jl_s + o2 : ju_s : s, + il_s + o1 : iu_s : s, + ] # Apply fast (uniform Cartesian) restriction elif fast_restrict: @@ -853,18 +1021,22 @@ def center_func_3(xm, xp): ko_vals = list(range(s)) if nx3 > 1 else (0,) # Assign values - for q, dataset, index in zip(quantities, quantity_datasets, - quantity_indices): + for q, dataset, index in zip( + quantities, quantity_datasets, quantity_indices + ): for ko in ko_vals: for jo in jo_vals: for io in io_vals: - data[q][kl_d:ku_d, - jl_d:ju_d, - il_d:iu_d] += f[dataset][index, block_num, - kl_s+ko:ku_s:s, - jl_s+jo:ju_s:s, - il_s+io:iu_s:s] - data[q][kl_d:ku_d, jl_d:ju_d, il_d:iu_d] /= s ** num_extended_dims + data[q][kl_d:ku_d, jl_d:ju_d, il_d:iu_d] += f[ + dataset + ][ + index, + block_num, + kl_s + ko : ku_s : s, + jl_s + jo : ju_s : s, + il_s + io : iu_s : s, + ] + data[q][kl_d:ku_d, jl_d:ju_d, il_d:iu_d] /= s**num_extended_dims # Apply exact (volume-weighted) restriction else: @@ -885,24 +1057,24 @@ def center_func_3(xm, xp): # Accumulate values for k_s, k_d in zip(k_s_vals, k_d_vals): if nx3 > 1: - x3m = f['x3f'][block_num, k_s] - x3p = f['x3f'][block_num, k_s+1] + x3m = f["x3f"][block_num, k_s] + x3p = f["x3f"][block_num, k_s + 1] for j_s, j_d in zip(j_s_vals, j_d_vals): if nx2 > 1: - x2m = f['x2f'][block_num, j_s] - x2p = f['x2f'][block_num, j_s+1] + x2m = f["x2f"][block_num, j_s] + x2p = f["x2f"][block_num, j_s + 1] for i_s, i_d in zip(i_s_vals, i_d_vals): if nx1 > 1: - x1m = f['x1f'][block_num, i_s] - x1p = f['x1f'][block_num, i_s+1] + x1m = f["x1f"][block_num, i_s] + x1p = f["x1f"][block_num, i_s + 1] vol = vol_func(x1m, x1p, x2m, x2p, x3m, x3p) - for q, dataset, index in zip(quantities, - quantity_datasets, - quantity_indices): - data[q][k_d, j_d, i_d] += vol * f[dataset][index, - block_num, - k_s, j_s, - i_s] + for q, dataset, index in zip( + quantities, quantity_datasets, quantity_indices + ): + data[q][k_d, j_d, i_d] += ( + vol + * f[dataset][index, block_num, k_s, j_s, i_s] + ) loc1 = (nx1 > 1) * block_location[0] / s loc2 = (nx2 > 1) * block_location[1] / s loc3 = (nx3 > 1) * block_location[2] / s @@ -910,7 +1082,7 @@ def center_func_3(xm, xp): # Set level information for cells in this block if return_levels: - data['Levels'][kl_d:ku_d, jl_d:ju_d, il_d:iu_d] = block_level + data["Levels"][kl_d:ku_d, jl_d:ju_d, il_d:iu_d] = block_level # Remove volume factors from restricted data if level < max_level and not subsample and not fast_restrict: @@ -932,16 +1104,16 @@ def center_func_3(xm, xp): ku = min(ku, k_max) - k_min for k in range(kl, ku): if nx3 > 1: - x3m = data['x3f'][k] - x3p = data['x3f'][k+1] + x3m = data["x3f"][k] + x3p = data["x3f"][k + 1] for j in range(jl, ju): if nx2 > 1: - x2m = data['x2f'][j] - x2p = data['x2f'][j+1] + x2m = data["x2f"][j] + x2p = data["x2f"][j + 1] for i in range(il, iu): if nx1 > 1: - x1m = data['x1f'][i] - x1p = data['x1f'][i+1] + x1m = data["x1f"][i] + x1p = data["x1f"][i + 1] vol = vol_func(x1m, x1p, x2m, x2p, x3m, x3p) for q in quantities: data[q][k, j, i] /= vol @@ -957,6 +1129,7 @@ def center_func_3(xm, xp): # ======================================================================================== + def restrict_like(vals, levels, vols=None): """Average cell values according to given mesh refinement scheme.""" @@ -964,45 +1137,54 @@ def restrict_like(vals, levels, vols=None): nx3, nx2, nx1 = vals.shape max_level = np.max(levels) if nx3 > 1 and nx3 % 2**max_level != 0: - raise AthenaError('x3-dimension wrong size to be restricted') + raise AthenaError("x3-dimension wrong size to be restricted") if nx2 > 1 and nx2 % 2**max_level != 0: - raise AthenaError('x2-dimension wrong size to be restricted') + raise AthenaError("x2-dimension wrong size to be restricted") if nx1 % 2**max_level != 0: - raise AthenaError('x1-dimension wrong size to be restricted') + raise AthenaError("x1-dimension wrong size to be restricted") # Construct volume weighting if vols is None: vols = np.ones_like(vals) else: if vols.shape != vals.shape: - raise AthenaError('Array of volumes must match cell values in size') + raise AthenaError("Array of volumes must match cell values in size") # Restrict data vals_restricted = np.copy(vals) for level in range(max_level): level_difference = max_level - level - stride = 2 ** level_difference + stride = 2**level_difference if nx3 > 1: - vals_level = np.reshape(vals * vols, (nx3/stride, stride, nx2/stride, stride, - nx1/stride, stride)) - vols_level = np.reshape(vols, (nx3/stride, stride, nx2/stride, stride, - nx1/stride, stride)) + vals_level = np.reshape( + vals * vols, + (nx3 / stride, stride, nx2 / stride, stride, nx1 / stride, stride), + ) + vols_level = np.reshape( + vols, (nx3 / stride, stride, nx2 / stride, stride, nx1 / stride, stride) + ) vals_sum = np.sum(np.sum(np.sum(vals_level, axis=5), axis=3), axis=1) vols_sum = np.sum(np.sum(np.sum(vols_level, axis=5), axis=3), axis=1) - vals_level = np.repeat(np.repeat(np.repeat(vals_sum / vols_sum, stride, - axis=0), - stride, axis=1), - stride, axis=2) + vals_level = np.repeat( + np.repeat( + np.repeat(vals_sum / vols_sum, stride, axis=0), stride, axis=1 + ), + stride, + axis=2, + ) elif nx2 > 1: - vals_level = np.reshape(vals * vols, (nx2/stride, stride, nx1/stride, stride)) - vols_level = np.reshape(vols, (nx2/stride, stride, nx1/stride, stride)) + vals_level = np.reshape( + vals * vols, (nx2 / stride, stride, nx1 / stride, stride) + ) + vols_level = np.reshape(vols, (nx2 / stride, stride, nx1 / stride, stride)) vals_sum = np.sum(np.sum(vals_level, axis=3), axis=1) vols_sum = np.sum(np.sum(vols_level, axis=3), axis=1) - vals_level = np.repeat(np.repeat(vals_sum / vols_sum, stride, axis=0), - stride, axis=1) + vals_level = np.repeat( + np.repeat(vals_sum / vols_sum, stride, axis=0), stride, axis=1 + ) else: - vals_level = np.reshape(vals * vols, (nx1/stride, stride)) - vols_level = np.reshape(vols, (nx1/stride, stride)) + vals_level = np.reshape(vals * vols, (nx1 / stride, stride)) + vols_level = np.reshape(vols, (nx1 / stride, stride)) vals_sum = np.sum(vals_level, axis=1) vols_sum = np.sum(vols_level, axis=1) vals_level = np.repeat(vals_sum / vols_sum, stride, axis=0) @@ -1012,21 +1194,23 @@ def restrict_like(vals, levels, vols=None): # ======================================================================================== + def athinput(filename): """Read athinput file and returns a dictionary of dictionaries.""" # Read data - with open(filename, 'r') as athinput: + with open(filename, "r") as athinput: # remove comments, extra whitespace, and empty lines - lines = [_f for _f in - [i.split('#')[0].strip() for i in athinput.readlines()] if _f] + lines = [ + _f for _f in [i.split("#")[0].strip() for i in athinput.readlines()] if _f + ] data = {} # split into blocks, first element will be empty - blocks = ('\n'.join(lines)).split('<')[1:] + blocks = ("\n".join(lines)).split("<")[1:] # Function for interpreting strings numerically def typecast(x): - if '_' in x: + if "_" in x: return x try: return int(x) @@ -1044,14 +1228,14 @@ def typecast(x): # Function for parsing assignment based on first '=' def parse_line(line): - out = [i.strip() for i in line.split('=')] - out[1] = '='.join(out[1:]) + out = [i.strip() for i in line.split("=")] + out[1] = "=".join(out[1:]) out[1] = typecast(out[1]) return out[:2] # Assign values into dictionaries for block in blocks: - info = list([_f for _f in block.split('\n') if _f]) + info = list([_f for _f in block.split("\n") if _f]) key = info.pop(0)[:-1] # last character is '>' data[key] = dict(list(map(parse_line, info))) return data @@ -1059,11 +1243,14 @@ def parse_line(line): # ======================================================================================== + class AthenaError(RuntimeError): """General exception class for Athena++ read functions.""" + pass class AthenaWarning(RuntimeWarning): """General warning class for Athena++ read functions.""" + pass diff --git a/astro_tigress/map_circular_beam.py b/astro_tigress/map_circular_beam.py index 186d133..a90d72f 100644 --- a/astro_tigress/map_circular_beam.py +++ b/astro_tigress/map_circular_beam.py @@ -2,57 +2,60 @@ import math from scipy.integrate import quad + def get_gauss_stamp(n): nx = n ny = n - nxc = nx/2 - nyc = ny/2 + nxc = nx / 2 + nyc = ny / 2 ix = np.zeros((nx, ny)) iy = np.zeros((nx, ny)) for i in range(nx): for j in range(ny): - ix[i, j] = j-nyc - iy[i, j] = i-nxc + ix[i, j] = j - nyc + iy[i, j] = i - nxc + def gauss(x): sigmax = 0.5 - ret = 1./(np.sqrt(2*math.pi)*sigmax) * np.exp(-0.5*(x/sigmax)**2) + ret = 1.0 / (np.sqrt(2 * math.pi) * sigmax) * np.exp(-0.5 * (x / sigmax) ** 2) return ret + stamp = np.zeros((nx, ny)) for i in range(nx): for j in range(ny): x1 = ix[i, j] - 0.5 - x2 = x1 + 1. + x2 = x1 + 1.0 y1 = iy[i, j] - 0.5 - y2 = y1 + 1. + y2 = y1 + 1.0 inte = quad(gauss, x1, x2)[0] * quad(gauss, y1, y2)[0] stamp[i, j] = inte return stamp -#average data using a stamp + +# average data using a stamp def stamp_avg(data, stamp): nxd, nyd = data.shape nxs, nys = stamp.shape ret = np.zeros(data.shape) - nxc = nxs/2 - nyc = nys/2 + nxc = nxs / 2 + nyc = nys / 2 ix = np.zeros((nxs, nys)) iy = np.zeros((nxs, nys)) for i in range(nxs): for j in range(nys): - ix[i, j] = i-nxc - iy[i, j] = j-nyc + ix[i, j] = i - nxc + iy[i, j] = j - nyc for i in range(nxd): for j in range(nyd): for istamp in range(nxs): for jstamp in range(nys): iret = i + ix[istamp, jstamp] jret = j + iy[istamp, jstamp] - if (iret >= 0) and (iret < nxd - ) and (jret >= 0) and (jret < nyd): - ret[iret, jret] += data[i, j]*stamp[istamp, jstamp] + if (iret >= 0) and (iret < nxd) and (jret >= 0) and (jret < nyd): + ret[iret, jret] += data[i, j] * stamp[istamp, jstamp] return ret + def map_circular_beam(data, nstamp=9): stamp = get_gauss_stamp(nstamp) return stamp_avg(data, stamp) - diff --git a/astro_tigress/tigress_read.py b/astro_tigress/tigress_read.py index f963937..c573375 100644 --- a/astro_tigress/tigress_read.py +++ b/astro_tigress/tigress_read.py @@ -168,9 +168,7 @@ def _download_file(self, f): target = osp.join(self.dir_master, f) if osp.isfile(target): - print( - "{} ({:.5f}GB) already exists".format(f, osp.getsize(target) / 2**30) - ) + print("{} ({:.5f}GB) already exists".format(f, osp.getsize(target) / 2**30)) while True: answer = input("overwrite? [y/n]:") if answer.lower() in ["y", "n"]: @@ -184,7 +182,7 @@ def _download_file(self, f): req = urllib.request.Request(source) try: - response = urllib.request.urlopen(req) + urllib.request.urlopen(req) except URLError as e: if hasattr(e, "reason"): print("We failed to reach a server.") diff --git a/astro_tigress/tigress_utils.py b/astro_tigress/tigress_utils.py index d3da4c0..c985b49 100644 --- a/astro_tigress/tigress_utils.py +++ b/astro_tigress/tigress_utils.py @@ -1,6 +1,7 @@ import os import shutil + def copy_file(fn_old, fn_new): """Copy file from old to new location. Make new directory if needed. Raise warning if old file does not exist. @@ -8,20 +9,20 @@ def copy_file(fn_old, fn_new): fn_old: string, old filename fn_new: string, new filename""" if fn_old == fn_new: - raise ValueError('New filename cannot be the same as the original one.') + raise ValueError("New filename cannot be the same as the original one.") if os.path.isfile(fn_old): if os.path.isfile(fn_new): - print("New file already exists: "+fn_new) + print("New file already exists: " + fn_new) while True: answer = input("Overwrite? (y/n):") if answer == "y": break elif answer == "n": - return + return dir_new = os.path.dirname(fn_new) if not os.path.exists(dir_new): os.makedirs(dir_new) shutil.copyfile(fn_old, fn_new) else: - raise RuntimeError("file does not exist: "+fn_old) + raise RuntimeError("file does not exist: " + fn_old) return diff --git a/astro_tigress/ytathena.py b/astro_tigress/ytathena.py index f44f076..75d488a 100644 --- a/astro_tigress/ytathena.py +++ b/astro_tigress/ytathena.py @@ -39,7 +39,7 @@ def __init__(self, fname=os.path.join(dirpath, "coolftn.txt")): self.nT = len(self.T1) def get_Tidx(self, T): - if type(T) == np.ndarray: + if type(T) is np.ndarray: Tidx = np.log10(T / self.Tmin) / self.dT Tidx[np.where(T < self.Tmin)] = 0 Tidx[np.where(T >= self.Tmax)] = self.nT - 2 From d6219ba3a2fb2f2344b9838de191cac171a7ca11 Mon Sep 17 00:00:00 2001 From: Chang-Goo Kim Date: Wed, 18 Sep 2024 11:00:51 -0400 Subject: [PATCH 4/5] Reformatting (ignoring E741) --- .ruff.toml | 2 + docs/example_2-synthetic-dustpol.ipynb | 100 +++++++++++++------------ 2 files changed, 53 insertions(+), 49 deletions(-) create mode 100644 .ruff.toml diff --git a/.ruff.toml b/.ruff.toml new file mode 100644 index 0000000..7c71fe6 --- /dev/null +++ b/.ruff.toml @@ -0,0 +1,2 @@ +[lint] +ignore = ["E741"] diff --git a/docs/example_2-synthetic-dustpol.ipynb b/docs/example_2-synthetic-dustpol.ipynb index 65d4040..5ff5144 100644 --- a/docs/example_2-synthetic-dustpol.ipynb +++ b/docs/example_2-synthetic-dustpol.ipynb @@ -35,14 +35,16 @@ "# add path to astro_tigress module\n", "# this can also be done using PYTHONPATH environment variable\n", "import sys\n", - "sys.path.insert(0,'../')\n", + "\n", + "sys.path.insert(0, \"../\")\n", "\n", "import astro_tigress\n", + "\n", "# Need to set the master directory where the data is stored\n", "dir_master = \"../data/\"\n", "# name of the simulation model\n", - "model_id = \"R8_4pc\" \n", - "model = astro_tigress.Model(model_id,dir_master) #reading the model information" + "model_id = \"R8_4pc\"\n", + "model = astro_tigress.Model(model_id, dir_master) # reading the model information" ] }, { @@ -51,7 +53,7 @@ "metadata": {}, "outputs": [], "source": [ - "#load the MHD data set for the snapshot ivtk=300\n", + "# load the MHD data set for the snapshot ivtk=300\n", "model.load(300, dataset=\"MHD\")" ] }, @@ -69,11 +71,11 @@ "metadata": {}, "outputs": [], "source": [ - "Temp = model.MHD.grid[('gas','temperature')].in_units(\"K\").value.T\n", - "nH = model.MHD.grid[('gas','nH')].in_units(\"cm**-3\").value.T\n", - "Bx = model.MHD.grid[('gas','magnetic_field_x')].in_units(\"uG\").value.T\n", - "By = model.MHD.grid[('gas','magnetic_field_y')].in_units(\"uG\").value.T\n", - "Bz = model.MHD.grid[('gas','magnetic_field_z')].in_units(\"uG\").value.T" + "Temp = model.MHD.grid[(\"gas\", \"temperature\")].in_units(\"K\").value.T\n", + "nH = model.MHD.grid[(\"gas\", \"nH\")].in_units(\"cm**-3\").value.T\n", + "Bx = model.MHD.grid[(\"gas\", \"magnetic_field_x\")].in_units(\"uG\").value.T\n", + "By = model.MHD.grid[(\"gas\", \"magnetic_field_y\")].in_units(\"uG\").value.T\n", + "Bz = model.MHD.grid[(\"gas\", \"magnetic_field_z\")].in_units(\"uG\").value.T" ] }, { @@ -90,9 +92,9 @@ "metadata": {}, "outputs": [], "source": [ - "xcc = model.MHD.grid[(\"gas\",\"x\")][:,0,0].to('pc').value\n", - "ycc = model.MHD.grid[(\"gas\",\"y\")][0,:,0].to('pc').value\n", - "zcc = model.MHD.grid[(\"gas\",\"z\")][0,0,:].to('pc').value" + "xcc = model.MHD.grid[(\"gas\", \"x\")][:, 0, 0].to(\"pc\").value\n", + "ycc = model.MHD.grid[(\"gas\", \"y\")][0, :, 0].to(\"pc\").value\n", + "zcc = model.MHD.grid[(\"gas\", \"z\")][0, 0, :].to(\"pc\").value" ] }, { @@ -109,8 +111,8 @@ "metadata": {}, "outputs": [], "source": [ - "dx_pc = model.MHD.grid[(\"gas\",\"dx\")][0,0,0].to('pc').value\n", - "dx_cm = model.MHD.grid[(\"gas\",\"dx\")][0,0,0].to('cm').value" + "dx_pc = model.MHD.grid[(\"gas\", \"dx\")][0, 0, 0].to(\"pc\").value\n", + "dx_cm = model.MHD.grid[(\"gas\", \"dx\")][0, 0, 0].to(\"cm\").value" ] }, { @@ -128,7 +130,7 @@ "metadata": {}, "outputs": [], "source": [ - "I,Q,U = calc_IQU(nH,Bx,By,Bz,dx_pc,los='y')" + "I, Q, U = calc_IQU(nH, Bx, By, Bz, dx_pc, los=\"y\")" ] }, { @@ -156,24 +158,24 @@ } ], "source": [ - "fig, axes = plt.subplots(1,2, figsize=(12,5))\n", + "fig, axes = plt.subplots(1, 2, figsize=(12, 5))\n", "# integration along the y-axis (edge on view)\n", "plt.sca(axes[0])\n", - "NH = nH.sum(axis=1)*dx_cm\n", + "NH = nH.sum(axis=1) * dx_cm\n", "\n", - "plt.pcolormesh(xcc,zcc,NH,vmax=1.e22,shading='nearest')\n", - "plt.gca().set_aspect('equal')\n", - "plt.xlabel('x[pc]')\n", - "plt.ylabel('z[pc]')\n", - "cbar = plt.colorbar(label=r'$N_H\\,[{\\rm cm^{-2}}]$')\n", + "plt.pcolormesh(xcc, zcc, NH, vmax=1.0e22, shading=\"nearest\")\n", + "plt.gca().set_aspect(\"equal\")\n", + "plt.xlabel(\"x[pc]\")\n", + "plt.ylabel(\"z[pc]\")\n", + "cbar = plt.colorbar(label=r\"$N_H\\,[{\\rm cm^{-2}}]$\")\n", "\n", "plt.sca(axes[1])\n", "\n", - "plt.pcolormesh(xcc,zcc,I,vmax=5,shading='nearest')\n", - "plt.gca().set_aspect('equal')\n", - "plt.xlabel('x[pc]')\n", - "plt.ylabel('z[pc]')\n", - "cbar = plt.colorbar(label=r'$I$')" + "plt.pcolormesh(xcc, zcc, I, vmax=5, shading=\"nearest\")\n", + "plt.gca().set_aspect(\"equal\")\n", + "plt.xlabel(\"x[pc]\")\n", + "plt.ylabel(\"z[pc]\")\n", + "cbar = plt.colorbar(label=r\"$I$\")" ] }, { @@ -190,7 +192,7 @@ "metadata": {}, "outputs": [], "source": [ - "P = np.sqrt(Q**2+U**2)" + "P = np.sqrt(Q**2 + U**2)" ] }, { @@ -210,34 +212,34 @@ } ], "source": [ - "fig, axes = plt.subplots(2,2, figsize=(13,10))\n", + "fig, axes = plt.subplots(2, 2, figsize=(13, 10))\n", "axes = axes.flatten()\n", "# integration along the y-axis (edge on view)\n", "plt.sca(axes[0])\n", - "plt.pcolormesh(xcc,zcc,I,vmax=5,shading='nearest')\n", - "plt.gca().set_aspect('equal')\n", - "plt.xlabel('x[pc]')\n", - "plt.ylabel('z[pc]')\n", - "cbar = plt.colorbar(label=r'$I$')\n", + "plt.pcolormesh(xcc, zcc, I, vmax=5, shading=\"nearest\")\n", + "plt.gca().set_aspect(\"equal\")\n", + "plt.xlabel(\"x[pc]\")\n", + "plt.ylabel(\"z[pc]\")\n", + "cbar = plt.colorbar(label=r\"$I$\")\n", "plt.sca(axes[1])\n", - "plt.pcolormesh(xcc,zcc,P/I,vmax=0.2,shading='nearest')\n", - "plt.gca().set_aspect('equal')\n", - "plt.xlabel('x[pc]')\n", - "plt.ylabel('z[pc]')\n", - "cbar = plt.colorbar(label=r'$P/I$')\n", + "plt.pcolormesh(xcc, zcc, P / I, vmax=0.2, shading=\"nearest\")\n", + "plt.gca().set_aspect(\"equal\")\n", + "plt.xlabel(\"x[pc]\")\n", + "plt.ylabel(\"z[pc]\")\n", + "cbar = plt.colorbar(label=r\"$P/I$\")\n", "\n", "plt.sca(axes[2])\n", - "plt.pcolormesh(xcc,zcc,Q,vmin=-0.1,vmax=0.1,shading='nearest',cmap=plt.cm.RdBu)\n", - "plt.gca().set_aspect('equal')\n", - "plt.xlabel('x[pc]')\n", - "plt.ylabel('z[pc]')\n", - "cbar = plt.colorbar(label=r'$Q$')\n", + "plt.pcolormesh(xcc, zcc, Q, vmin=-0.1, vmax=0.1, shading=\"nearest\", cmap=plt.cm.RdBu)\n", + "plt.gca().set_aspect(\"equal\")\n", + "plt.xlabel(\"x[pc]\")\n", + "plt.ylabel(\"z[pc]\")\n", + "cbar = plt.colorbar(label=r\"$Q$\")\n", "plt.sca(axes[3])\n", - "plt.pcolormesh(xcc,zcc,U,vmin=-0.1,vmax=0.1,shading='nearest',cmap=plt.cm.RdBu)\n", - "plt.gca().set_aspect('equal')\n", - "plt.xlabel('x[pc]')\n", - "plt.ylabel('z[pc]')\n", - "cbar = plt.colorbar(label=r'$U$')" + "plt.pcolormesh(xcc, zcc, U, vmin=-0.1, vmax=0.1, shading=\"nearest\", cmap=plt.cm.RdBu)\n", + "plt.gca().set_aspect(\"equal\")\n", + "plt.xlabel(\"x[pc]\")\n", + "plt.ylabel(\"z[pc]\")\n", + "cbar = plt.colorbar(label=r\"$U$\")" ] }, { From 44271faedc914752fd261d44ae7fc7fc84ab3412 Mon Sep 17 00:00:00 2001 From: Chang-Goo Kim Date: Wed, 18 Sep 2024 11:02:12 -0400 Subject: [PATCH 5/5] Synchronize the doc with script --- docs/install.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/docs/install.md b/docs/install.md index 76f88d3..7ce3f3e 100644 --- a/docs/install.md +++ b/docs/install.md @@ -3,8 +3,7 @@ ```sh git clone git@github.com:PrincetonUniversity/astro-tigress.git cd astro-tigress -conda create -n astro_tigress -conda activate astro_tigress -conda install pip -pip install -r requirements.txt +conda create --name astro-tigress python=3.8 pip +conda activate astro-tigress +pip install . ```