diff --git a/deployment.html b/deployment.html
index 3767755..ac6c598 100644
--- a/deployment.html
+++ b/deployment.html
@@ -2171,8 +2171,8 @@
Deployment Planning
Deployments since Jan 1, 2023
@@ -2456,6 +2456,18 @@
Deployments since Jan 1, 2023
4902660 |
AI2632-23CA005 |
+
+Argo Canada |
+IOS |
+ARVOR |
+03 Oct, 2023 |
+73.0400 |
+-149.9590 |
+LOUIS S. ST. LAURENT |
+300534062474720 |
+4902611 |
+AI2600-22CA009 |
+
@@ -2463,16 +2475,16 @@ Planned Deployments
-
-
+
+
-
-
+
+
-
+
Argo Canada |
-IOS |
-ARVOR |
-03 Oct, 2023 |
-73.04 |
--149.959 |
-LOUIS S. ST. LAURENT |
-300534062474720 |
-4902611 |
-AI2600-22CA009 |
-
-
-Argo Canada |
BIO |
ARVOR |
10 Oct, 2023 |
42.50 |
--50.000 |
+-50.00 |
DISCOVERY |
300534060123910 |
4902689 |
AI3500-20CA001 |
-
+
Argo Canada |
BIO |
ARVOR |
15 Oct, 2023 |
41.50 |
--49.000 |
+-49.00 |
DISCOVERY |
300534062473430 |
4902608 |
AI2600-22CA006 |
-
+
Argo Dalhousie |
Dal |
PROVOR |
14 Nov, 2023 |
56.75 |
--52.460 |
+-52.46 |
MERIAN |
300125062902880 |
4902684 |
P53865-23CA001 |
-
+
Argo Dalhousie |
Dal |
PROVOR |
21 Nov, 2023 |
56.33 |
--52.890 |
+-52.89 |
MERIAN |
300125062426150 |
4902685 |
P53865-23CA002 |
-
+
Argo Canada |
BIO |
ARVOR |
22 Nov, 2023 |
5.40 |
--18.800 |
+-18.80 |
DISCOVERY |
300534062475420 |
4902603 |
AI2600-22CA001 |
-
+
Argo Canada |
BIO |
ARVOR |
26 Nov, 2023 |
3.40 |
--16.200 |
+-16.20 |
DISCOVERY |
300534062475420 |
4902605 |
AI2600-22CA003 |
-
+
Argo Canada |
BIO |
ARVOR |
01 Dec, 2023 |
-22.00 |
--1.200 |
+-1.20 |
DISCOVERY |
300534062473410 |
4902617 |
AI3500-22CA001 |
-
+
Argo Canada |
BIO |
ARVOR |
01 Dec, 2023 |
-19.00 |
--3.000 |
+-3.00 |
DISCOVERY |
300534062477740 |
4902607 |
AI2600-22CA005 |
-
+
Argo Canada |
IOS |
ARVOR |
01 Dec, 2023 |
26.20 |
--163.300 |
+-163.30 |
HMCS VANCOUVER |
300534061174520 |
4902558 |
AI3500-23CA003 |
-
+
Argo Canada |
IOS |
ARVOR |
01 Dec, 2023 |
26.00 |
--169.200 |
+-169.20 |
HMCS VANCOUVER |
300534060126630 |
4902548 |
AI2600-20CA033 |
-
+
Argo Canada |
BIO |
ARVOR |
07 Dec, 2023 |
-24.60 |
-0.500 |
+0.50 |
DISCOVERY |
300534062476310 |
4902618 |
AI3500-22CA002 |
-
+
Argo Canada |
BIO |
ARVOR |
07 Dec, 2023 |
-27.60 |
-3.700 |
+3.70 |
DISCOVERY |
300534062476460 |
4902619 |
AI3500-22CA002 |
-
+
Argo ONC |
ONC |
Deep ARVOR |
15 Jan, 2024 |
-57.00 |
--61.000 |
+-61.00 |
NA |
300534063500400 |
4902638 |
AD2700-23CA006 |
-
+
Argo ONC |
ONC |
Deep ARVOR |
15 Jan, 2024 |
-60.00 |
--59.000 |
+-59.00 |
NA |
300534063601830 |
4902637 |
@@ -2764,7 +2764,7 @@ Inventory
Argo Canada |
IOS |
-11 |
+12 |
0 |
7 |
diff --git a/deployment/canada_deployments.csv b/deployment/canada_deployments.csv
index b4a23f1..60c9f25 100644
--- a/deployment/canada_deployments.csv
+++ b/deployment/canada_deployments.csv
@@ -11,7 +11,7 @@ Argo Canada,Blair Greenan,BIO,OPERATIONAL,ARVOR,2023-06-03T20:09:00,59.501,-55.0
Argo Canada,Blair Greenan,IOS,OPERATIONAL,ARVOR,2023-06-04T03:08:00,51.3783,-130.842,JOHN P. TULLY,300534060116080,4902614,AI2632-22CA012
Argo Canada,Blair Greenan,BIO,OPERATIONAL,ARVOR,2023-06-10T15:51:00,41.4059,-60.6433,CAPT. JACQUES CARTIER,300534062470430,4902606,AI2600-22CA004
Argo Canada,Blair Greenan,IOS,OPERATIONAL,ARVOR,2023-09-19T00:35:00,72.881,-135.997,LOUIS S. ST. LAURENT,300534062477390,4902610,AI2600-22CA008
-Argo Canada,Blair Greenan,IOS,CONFIRMED,ARVOR,2023-10-03T21:00:00,73.040,-149.959,LOUIS S. ST. LAURENT,300534062474720,4902611,AI2600-22CA009
+Argo Canada,Blair Greenan,IOS,OPERATIONAL,ARVOR,2023-10-03T21:00:00,73.040,-149.959,LOUIS S. ST. LAURENT,300534062474720,4902611,AI2600-22CA009
Argo Canada,Blair Greenan,IOS,CONFIRMED,ARVOR,2023-12-01T00:00:00,26.0,-169.2,HMCS VANCOUVER,300534060126630,4902548,AI2600-20CA033
Argo Canada,Blair Greenan,IOS,CONFIRMED,ARVOR,2023-12-01T00:00:00,26.2,-163.3,HMCS VANCOUVER,300534061174520,4902558,AI3500-23CA003
Argo ONC,Kohen Bauer,ONC,OPERATIONAL,Deep ARVOR,2023-07-10T16:27:00,52.1926,-139.0757,SIR WILFRID LAURIER,300534063607820,4902635,AD2700-23CA003
diff --git a/posts/posts.json b/posts/posts.json
index 4313a46..2c1d25f 100644
--- a/posts/posts.json
+++ b/posts/posts.json
@@ -13,7 +13,7 @@
"categories": [],
"contents": "\nA 2021 paper by Johnson and Bif (2021) demonstrated that diel cycles of oxygen can be observed using the Argo network as a collective set of sensors. More recently, Stoer and Fennel (2022) used similar methodology on particle backscatter to estimate Net Primary Production (NPP) from diel cycles in carbon biomass. Each of these recent papers apply the method Gille (2012) demonstrated for diel cycles of temperature to biogeochemical variables. The calculation of the diel cycle depends on multiple factors, one of them being that floats included in the analysis do not surface at a fixed or few times of day for every profile. Instead, floats must demonstrate good temporal coverage of a 24hr period with near even occurrences.\nFigure 2 from Johnson and Bif (2021). Their caption reads: a, Mean oxygen anomaly in the upper 20 m from each profile with acceptable cycle timing (N = 14,294) versus local hour of the day. Data from all days of year from 2010 to 2020 are included. b, Mean oxygen anomaly in each hourly interval and the least-squares fit of equation (2) to the data shown in a with GOP = 2.2 \\(\\pm\\) 0.3 (1 standard error) mmol m\\(^{−3}\\) d\\(^{−1}\\) O2. c, GOP determined at 2-month intervals in the upper 20 m versus day of year. d, Depth profile of GOP rates for all days of the year. Error bars are one standard error.This is not necessarily typical behaviour for Argo floats. For the analysis presented in Johnson and Bif (2021), of the 50,736 profiles available in the Northern Hemisphere, only 14,294 profiles satisfied the surface time requirements. In this post we will detail Argo Canada’s effort over the last 2-3 years to shift it’s Argo floats to satisfy this surfacing schedule and contribute to future research using this methodology.\nThe post will be a mix of text and python code, showing both the changes over time and the python code needed to demonstrate the change. We will use pandas for data handling, and matplotlib and seaborn for plotting. Lets load our packages and the Argo global index:\n\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set(style=\"ticks\", palette=\"colorblind\")\n\n# load the global index\nglobal_index = pd.read_csv(\n \"ftp://ftp.ifremer.fr/ifremer/argo/ar_index_global_prof.txt.gz\", \n compression=\"gzip\", header=8\n)\n# subset to only the MEDS DAC, profiles with valid dates\nmeds = global_index.loc[global_index.file.str.contains('meds')]\nmeds = meds.loc[meds.date.notna()]\n\n# convert date to pandas timestamps, take only profiles from the last 3 years\nmeds[\"date\"] = meds.date.astype(str)\\\n .apply(lambda x: x.replace(\".0\", \"\"))\\\n .apply(pd.Timestamp, tz=\"utc\")\nmeds = meds.loc[meds.date > pd.Timestamp(\"01-2020\", tz=\"utc\")]\n\nThe few surviving MetOcean NOVA floats are included here, but it should be noted that these were not specifically reprogrammed for varied surfacing times. Recently deployed deep ARVOR floats are also included, however their cycle period is defined as an integer number of days rather than in hours, and so those will not produce good diel coverage. Now, we will calculate the local time using the location data, and python packages timezonefinder and pytz, and visualize local surfacing times over the last 3 years.\n\nimport timezonefinder\ntf = timezonefinder.TimezoneFinder()\nimport pytz\n\nprofiler_type = {\n \"865\":\"NOVA \",\n \"844\":\"ARVOR_SBE \",\n \"878\":\"ARVOR_RBR \",\n \"838\":\"ARVOR_DEEP\",\n \"836\":\"PROVOR \",\n}\n\n# exclude invalid locations\nmeds = meds.loc[(meds.latitude.notna()) & (meds.longitude.notna())]\n\n# get timezone, local time, and hour at surface for each profile\nmeds[\"timezone\"] = [\n pytz.timezone(tf.certain_timezone_at(lat=lat, lng=lon))\\\n for lat, lon in zip(meds.latitude, meds.longitude)\n]\nmeds[\"local_time\"] = [utc_time.tz_convert(tz) for utc_time, tz in zip(meds.date, meds.timezone)]\nmeds[\"surface_hour\"] = [local_time.hour + 0.5 for local_time in meds.local_time]\n\n# add a column for WMO number as well as platform name\nmeds[\"WMO\"] = [int(s.split(\"/\")[1]) for s in meds.file]\nmeds[\"platform\"] = [profiler_type[f\"{p}\"] for p in meds.profiler_type]\n\nfig, ax = plt.subplots(dpi=300, constrained_layout=True)\nsns.lineplot(data=meds, x=\"local_time\", y=\"surface_hour\", hue=\"platform\", \n units=\"WMO\", estimator=None, alpha=0.25, ax=ax\n)\nsns.move_legend(ax, \"upper left\", bbox_to_anchor=(1, 1))\nplt.setp(ax.get_xticklabels(), ha=\"right\", rotation=45)\n\n\n\n\nThe above plot is very busy, but clearly shows a shift in regime in mid-2021 when we began reprogramming all new deployments as well as existing floats via remote commands. For NKE ARVOR floats, which constitute most of the Canadian fleet, we set the cycle period to 245 hours, or 10 days + 5 hours. This creates diel coverage relatively quickly, without sampling at times near each other on subsequent profiles. NKE PROVOR floats were slightly trickier to reprogram, as instead of a cycle period they have a surface time they aim to achieve. This parameter therefore must be reprogrammed every cycle. This is achieved by running a daily github action powered by python, which you can find on the ArgoCanada github page.\nTo better understand the timing, lets look closely at an ARVOR float that was deployed in late 2020 and reprogrammed, and a PROVOR float that gets a new command each cycle.\n\n# ARVOR deployed in 2020, PROVOR in 2022\narvor = 4902523\nprovor = 4902623\nsubset = meds.loc[meds.WMO.isin((arvor, provor))]\n# make day of mission our time variable so we can plot them on the same axis\nsubset[\"mission_day\"] = [\n subset.date.loc[i] - subset.date.loc[subset.WMO == subset.WMO.loc[i]].min()\\\n for i in subset.index\n]\n# fractional days\nsubset[\"mission_day\"] = subset[\"mission_day\"].apply(lambda x: x/pd.to_timedelta(1, 'D'))\n\nfig, ax = plt.subplots(dpi=300, constrained_layout=True)\nsns.lineplot(data=subset, x=\"mission_day\", y=\"surface_hour\", hue=\"platform\", \n style=\"platform\", dashes=False, markers=True, ax=ax\n)\nax.set_xlim((0,300))\n\n\n\n\nThe ARVOR float was deployed in 2020, and was reprogrammed remotely in the second half of 2021. The PROVOR float was deployed before the surface time reprogramming protocol was live, but has begun taking commands as of May 2023. The lines slope in opposite directions because the ARVORs operate on a 10 days + 5 hours (245hr) cycle, while the PROVORs are being programmed for 10 days - 5 hours (235hr) cycle. The latter is a minor difference, but was a suggestion from the manufacturer as it may produce an extra profile or two if/when a float dies of exhausted battery life.\nFinally, to get a better idea of how we have performed fleet-wide, let’s look at the distributions of surfacing times since 2020 by year.\n\n# create column for profile year\nmeds[\"year\"] = [d.year for d in meds.local_time]\nmeds = meds.loc[meds.year > 2019] # 2 floats have local times in 2019\n# create a FacetGrid that will plot by year, 2020, 2021, 2022, 2023\ng = sns.displot(\n meds, x=\"surface_hour\", col=\"year\", hue=\"platform\", \n kind=\"hist\", bins=list(range(24)), multiple=\"stack\", \n col_wrap=2, facet_kws=dict(despine=False, sharey=False)\n)\ng.fig.set_dpi(300)\ng.fig.set_constrained_layout(True)\n\n\n\n\nThe above figure shows the progression of the Canadian Argo fleet over the years. Blue bars correspond to ARVOR floats with Seabird CTDs, orange bars to ARVOR floats with RBR CTDs and green bars to PROVOR floats. In the 2020 panel, there are two peaks, one representing local times in the Eastern Pacific, and the other in the Western Atlantic. In 2021 these peaks persist, but for roughly half the year we have good coverage. In 2022, some peaks remain as the final floats still operating on 240 hour cycles are reprogrammed. In the final panel, we see that the fleet operates well to cover the entire day. There are slight biases toward 0600-0700 and 1400-1500 local times in the PROVOR floats (green bars) as the live reprogramming was not active until May this year, but those profiles are now being well distributed. Overall, almost all recent profiles recorded by Argo Canada floats should now meet the statistical criteria to be able to construct diel cycles as in Gille (2012), Johnson and Bif (2021), and Stoer and Fennel (2022).\nReferences\nJohnson, K.S., Bif, M.B. Constraint on net primary productivity of the global ocean by Argo oxygen measurements. Nat. Geosci. 14, 769-774 (2021). https://doi.org/10.1038/s41561-021-00807-z\nStoer, A.C. and Fennel, K. (2023), Estimating ocean net primary productivity from daily cycles of carbon biomass measured by profiling floats. Limnol. Oceanogr. Lett, 8: 368-375. https://doi.org/10.1002/lol2.10295\nGille, S. T. (2012), Diurnal variability of upper ocean temperatures from microwave satellite measurements and Argo profiles, J. Geophys. Res., 117, C11027, doi:10.1029/2012JC007883.\n\n\n\n",
"preview": {},
- "last_modified": "2023-10-17T13:38:50+00:00",
+ "last_modified": "2023-10-17T15:31:44+00:00",
"input_file": {}
},
{
@@ -30,7 +30,7 @@
"categories": [],
"contents": "\nIn this post we will work through performing the response time correction on oxygen observations following Bittig et al. (2014) on Argo data. The focus is more on accessing the proper variables within Argo than describing the actual correction. We will use argopandas package to manage our data fetching from Argo, and use a function from bgcArgoDMQC to do the response time correction. Other basic data manipulation and visualization will use the pandas, numpy, and scipy packages, and matplotlib and seaborn for plotting.\n# conda install -c conda-forge argopandas bgcArgoDMQC\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.interpolate import interp1d\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set(style='ticks', palette='colorblind')\n\nimport argopandas as argo\nfrom bgcArgoDMQC import correct_response_time\nWe will use float 7900589, an APEX float in the North Atlantic which has the intermediate parameter MTIME, defined as the relative time in fractional days since the date of the profile JULD.\nflt = argo.float(7900589)\n# grab core and bgc files for just the most recent cycle\ncore = flt.prof\nbgc = flt.bio_prof\ncore = core[core.file == core.file.iloc[-1]]\nbgc = bgc[bgc.file == bgc.file.iloc[-1]]\nDownloading 'https://data-argo.ifremer.fr/ar_index_global_prof.txt.gz'\nDownloading 'https://data-argo.ifremer.fr/argo_bio-profile_index.txt.gz'\ncore\n\n\n\n\nfile\n\n\ndate\n\n\nlatitude\n\n\nlongitude\n\n\nocean\n\n\nprofiler_type\n\n\ninstitution\n\n\ndate_update\n\n\n1853971\n\n\ncoriolis/7900589/profiles/R7900589_043.nc\n\n\n2021-11-08 12:13:44+00:00\n\n\n55.682\n\n\n-46.691\n\n\nA\n\n\n846\n\n\nIF\n\n\n2021-11-08 15:34:25+00:00\n\n\nbgc\n\n\n\n\nfile\n\n\ndate\n\n\nlatitude\n\n\nlongitude\n\n\nocean\n\n\nprofiler_type\n\n\ninstitution\n\n\nparameters\n\n\nparameter_data_mode\n\n\ndate_update\n\n\n179672\n\n\ncoriolis/7900589/profiles/BR7900589_043.nc\n\n\n2021-11-08 12:13:44+00:00\n\n\n55.682\n\n\n-46.691\n\n\nA\n\n\n846\n\n\nIF\n\n\nMTIME PRES TEMP_DOXY C1PHASE_DOXY C2PHASE_DOXY…\n\n\nRRRRRARRRARR\n\n\n2021-11-08 16:02:45+00:00\n\n\ncore_df = core.levels[['PRES', 'TEMP', 'PSAL']]\nbgc_df = bgc.levels[['PRES', 'MTIME', 'DOXY']]\nDownloading 'https://data-argo.ifremer.fr/dac/coriolis/7900589/profiles/R7900589_043.nc'\nReading 1 file\nDownloading 'https://data-argo.ifremer.fr/dac/coriolis/7900589/profiles/BR7900589_043.nc'\nReading 1 file\ncore_df\n\n\n\n\n\n\n\n\nPRES\n\n\nTEMP\n\n\nPSAL\n\n\nfile\n\n\nN_PROF\n\n\nN_LEVELS\n\n\n\n\n\n\n\n\ncoriolis/7900589/profiles/R7900589_043.nc\n\n\n0\n\n\n0\n\n\n0.43\n\n\n6.7983\n\n\n34.502201\n\n\n1\n\n\n2.30\n\n\n6.7997\n\n\n34.501499\n\n\n2\n\n\n4.42\n\n\n6.8032\n\n\n34.501801\n\n\n3\n\n\n6.01\n\n\n6.8057\n\n\n34.501900\n\n\n4\n\n\n8.07\n\n\n6.8026\n\n\n34.502102\n\n\n…\n\n\n…\n\n\n…\n\n\n…\n\n\n…\n\n\n3\n\n\n470\n\n\nNaN\n\n\nNaN\n\n\nNaN\n\n\n471\n\n\nNaN\n\n\nNaN\n\n\nNaN\n\n\n472\n\n\nNaN\n\n\nNaN\n\n\nNaN\n\n\n473\n\n\nNaN\n\n\nNaN\n\n\nNaN\n\n\n474\n\n\nNaN\n\n\nNaN\n\n\nNaN\n\n\n1900 rows × 3 columns\n\n\nbgc_df\n\n\n\n\n\n\n\n\nPRES\n\n\nMTIME\n\n\nDOXY\n\n\nfile\n\n\nN_PROF\n\n\nN_LEVELS\n\n\n\n\n\n\n\n\ncoriolis/7900589/profiles/BR7900589_043.nc\n\n\n0\n\n\n0\n\n\n0.43\n\n\n-0.000613\n\n\nNaN\n\n\n1\n\n\n2.30\n\n\n-0.001296\n\n\nNaN\n\n\n2\n\n\n4.42\n\n\nNaN\n\n\nNaN\n\n\n3\n\n\n6.01\n\n\nNaN\n\n\nNaN\n\n\n4\n\n\n8.07\n\n\nNaN\n\n\nNaN\n\n\n…\n\n\n…\n\n\n…\n\n\n…\n\n\n…\n\n\n3\n\n\n470\n\n\nNaN\n\n\nNaN\n\n\nNaN\n\n\n471\n\n\nNaN\n\n\nNaN\n\n\nNaN\n\n\n472\n\n\nNaN\n\n\nNaN\n\n\nNaN\n\n\n473\n\n\nNaN\n\n\nNaN\n\n\nNaN\n\n\n474\n\n\nNaN\n\n\nNaN\n\n\nNaN\n\n\n1900 rows × 3 columns\n\n\nYou will notice looking at the printout of bgc_df that there are a lot of NaN values. The valid MTIME and DOXY values are in the N_PROF dimension 2. There are a variety of reasons why there might be N_PROF > 1 dimensions in an Argo profile. Where that is not the subject I won’t go into why, and frankly I only know the valid data is in N_PROF = 2 by inspecting the dataframe. The valid core data is in N_PROF = 0. If we simply tried to line these separate dataframes up into one, we would fail miserably since our time and oxygen data would not be aligned with our physical data. So instead, we will use the common pressure axis to interpolate onto a common axis.\n# create a dataframe to store interpolated data in\ndf = pd.DataFrame()\n\n# define a pressure axis to interpolate and a depth resolution\ndP = 2.5\ninterp_pressure = np.arange(0, core_df['PRES'].max(), dP)\ndf['PRES'] = interp_pressure\n\n# interpolate\nfor key, source in zip(['MTIME', 'TEMP', 'DOXY'], [bgc_df, core_df, bgc_df]):\n ix = source[key].notna() # remove nan values that will mess with interp\n f = interp1d(source['PRES'][ix], source[key][ix], bounds_error=False)\n df[key] = f(interp_pressure)\ndf\n\n\n\n\nPRES\n\n\nMTIME\n\n\nTEMP\n\n\nDOXY\n\n\n0\n\n\n0.0\n\n\nNaN\n\n\nNaN\n\n\nNaN\n\n\n1\n\n\n2.5\n\n\n-0.001345\n\n\n6.800030\n\n\n265.266078\n\n\n2\n\n\n5.0\n\n\n-0.001957\n\n\n6.804258\n\n\n265.227454\n\n\n3\n\n\n7.5\n\n\n-0.002542\n\n\n6.802751\n\n\n265.246096\n\n\n4\n\n\n10.0\n\n\n-0.003235\n\n\n6.804123\n\n\n264.956293\n\n\n…\n\n\n…\n\n\n…\n\n\n…\n\n\n…\n\n\n374\n\n\n935.0\n\n\n-0.139717\n\n\n3.358495\n\n\n263.701094\n\n\n375\n\n\n937.5\n\n\n-0.140046\n\n\n3.354090\n\n\n263.718486\n\n\n376\n\n\n940.0\n\n\n-0.140375\n\n\n3.351910\n\n\n263.735879\n\n\n377\n\n\n942.5\n\n\n-0.140704\n\n\n3.351850\n\n\n263.753272\n\n\n378\n\n\n945.0\n\n\n-0.141049\n\n\n3.351151\n\n\nNaN\n\n\n379 rows × 4 columns\n\n\nNow we are almost ready to perform the time response correction, except that we don’t know what the time response of this optode is. Without a reference data set like in Bittig et al. (2014) or consecutive up- and down-casts as in Gordon et al. (2020), knowing the response time is not possible. For the purposes of demonstration we will choose a boundary layer thickness (an equivalent parameter, but independent of temperature unlike response time) of 120 micrometers (equivalent to a response time of 67.2 seconds at 20 degrees C).\nIl = 120\ndf['DOXY_ADJUSTED'] = correct_response_time(df['MTIME'], df['DOXY'], df['TEMP'], Il)\ndf['DOXY_DELTA'] = df.DOXY - df.DOXY_ADJUSTED # change in oxygen\nFinally, we’ll plot the profiles to see the end result of the correction.\n# melt the dataframe so that we can use hue keyword when plotting\ndf_melt = df.melt(id_vars=['PRES', 'MTIME', 'TEMP', 'DOXY_DELTA'], var_name='DOXY_STATUS', value_name='DOXY')\n\nfig, axes = plt.subplots(1, 2, sharey=True)\nsns.lineplot(x='DOXY', y='PRES', hue='DOXY_STATUS', data=df_melt, sort=False, ax=axes[0])\nsns.lineplot(x='DOXY_DELTA', y='PRES', data=df, sort=False, ax=axes[1])\naxes[0].legend(loc=3)\naxes[0].set_ylim((250, 0))\noxygen and delta oxygen profilesSome observations based on the above:\nIt is important to recall that this is an ascending profile.\nThe first thing your eye was likely drawn to is the large change 70m depth. I would wager that this single point is probably too dramatic, but also could be real as the gradient is strong there and oxygen would be favouring the higher side. This point makes me uncomfortable without reference data, but I can’t say for sure that it is wrong.\nFrom 250-100m, oxygen is relatively linear. In this section of the profile, we see a slighly lower DOXY_ADJUSTED than the original DOXY. Since oxygen is decreasing as the float ascends, the float remembers the higher concentration from the deeper depth, and therefore slightly overestimates the true oxygen concentration.\nAt points where there are “notches” in the original profile, those “notches” are amplified in the corrected on.\nThinking more generally about the wider Argo program, there are a few key questions:\nHow would you include this adjusted data in the B-file? Would it go in the DOXY_ADJUSTED field, which currently is used for gain adjustment (Johnson et al. 2015), or would it merit a different field?\nAssuming there is no reliable way to determine boundary layer thickness (time constant), should Argo correct using a generic one since the adjusted data will be “more correct” than the original, even if it is not perfect?\nGiven a lack of reference data, how would you flag the above adjusted profile? Are there any points you don’t believe that should be flagged as bad?\n\n\n\n",
"preview": {},
- "last_modified": "2023-10-17T13:38:50+00:00",
+ "last_modified": "2023-10-17T15:31:44+00:00",
"input_file": {}
},
{
@@ -47,7 +47,7 @@
"categories": [],
"contents": "\nHi Argo community! Today I present to you the argoFloats package. Not only does this package make Argo data more accessible by helping to identify, download, cache, and analyze Argo data, it does so in a way that makes it easy for someone with little coding experience.\nBefore getting ahead of ourselves, let’s download and load the package:\n\n\n\n\n\n\nTo eliminate the gap between Argo data and the end user, we created an easy 5 step process, shown below.\nWork flow for argoFloats package with descriptions on the left and relevent functions on the rightgetIndex()\ngetIndex(), the foundational tool of the package, queries an Argo server for an index of data files. Various servers can be used, with the default being to try Infremer and USGODAE in turn. The possible indices are listed below.\nFile Name\nNickname\nContents\nar_greylist.txt\n-\nSuspicious/malfunctioning floats\nar_index_global_meta.txt.gz\n-\nMetadata files\nar_index_global_prof.txt.gz\n\"core\"\nArgo data\nar_index_global_tech.txt.gz\n-\nTechnical files\nar_index_global_traj.txt.gz\n-\nTrajectory files\nargo_bio-profile_index.txt.gz\n\"bgc\" or \"bgcargo\"\nBiogeochemical data (without S or T)\nargo_bio-traj_index.txt.gz\n-\nBiogeochemical trajectory files\nargo_synthetic-profile_index.txt.gz\n\"synthetic\"\nSynthetic data, successor to \"merge\"\nUsing getIndex() saves the user from having to understand the file structures on the various servers (which are similar, but not quite the same). So, for example, a user need not study an FTP listing such as\n\n\n\nbut instead would simply write\n\n\n\nEasy, right? Now what about the next barrier? How do we cache an index to speed up our download? As of today (2021-10-19), there are 2527015 Argo profiles files. That means 2527015 files that look like this (ftp.ifremer.fr):\n\n\n\nContinuously downloading this index can be tiresome and time consuming. To avoid this problem, we created the age argument in getIndex(). This lets the user indicate how old a downloaded file must be (in days), for it to be considered out-of-date. For example\n\n\n\nindicates that getIndex() should return a previously-downloaded index, provided that it was downloaded less than 10 days ago. Since the previous index would have been downloaded with getIndex(), we call this process “caching”.\nsubset()\nThe indices (obtained with getIndex() and possibly subset using subset,argoFloats-method) store information about individual argo data files, including: the name of the data file, the date of observation, the observation location (latitude, longitude, and ocean), the profiler type, the institution that owns the float, and the most recent time of data update.\nOnce the indices are downloaded, a burning question still remains. How in the world do we handle over two million profiles? The answer to this is to use subset(), which provides the ability to subset by ID, time, geography, variable, institution, ocean, dataMode, cycle, direction, profile, dataStateIndicator, section, etc. This makes it easy to sift through the data based on parameters that the user is likely to be interested in. For more details and examples of each type of subset look at the help docs for subset within the argoFloats package.\nLets consider Argo floats near Nova Scotia. If a user wanted to only look at the core Argo profiling files within a 250 km radius of Halifax, Nova Scotia, they would do the following:\n\n\n\ngetProfiles() and readProfiles()\nTo take a deeper dive (excuse the pun) into the data stored within individual Argo data files, the user must download the data files, typically done with getProfiles(), and then read those file, typically with readProfiles().\nTo continue with our example, we might use\n\n\n\nto download and read the data files. Note that getProfiles() uses an age-based cache, just like getIndex() does, to speed up processing.\nAnalyzing Argo data\nThe argoFloats package has a suite of functions for analyzing Argo data, and these work well with the functions provided by the oce package (Kelley and Richards, 2021). These are described in great detail in our paper (Kelley et al. 2021) as well as the help documentation. As with oce, there is an emphasis on simplicity and a philosophy of isolating users from the details of storage. For example, the [[ operator provides a way to extract items from argoFloats objects, with e.g. index[[\"file\"]] being a replacement for index@data$index$file. Similarly, it is easy to extract individual Argo objects from a collection, and data within those objects, including derived data (such as potential temperature).\nPlotting being an important component of analysis, argoFloats provides specialized plots for maps, profiles, temperature-salinity, quality control analysis, to name a few. Continuing with our example,\n\n\n\ngives a temperature-salinity plot for a set of Argo profiles.\nQuality Control\nUh oh! What are all of those red points in the TS diagram shown above? That TS diagram must be useless, right? Wrong. We’ve got you covered. argoFloats has also included a three-step process for dealing with Quality Control of Argo data.\n\n\n\napplyQC\nTo first answer the question, “What are all of those red dots in the TS diagram?” They are points that were flagged as “bad” during quality control. The applyQC() function identifies any points that are classified as “bad” and sets them to NA to not be used in future plots or calculations.\nLet’s take a look at the impact of applyQC() where the left hand side shows before applyQC() was done and the right hand side shows after it was completed.\n\n\n\nshowQCTests()\nAlthough that was easy, your work is not done yet. It’s always a good idea to know why a certain point was flagged as bad. That is where showQCTests() comes into play. This function reveals which tests were passed and failed for each particular Argo profile. In our example, we might want to loop through all of the profiles in argos and when a “bad” point is encountered, display which tests were performed/failed. Note for the sake of space, the output is not shown.\n\n\n\nplot(x, which=“QC”)\nFor a last QC visual, we can use the plot(x, which=\"QC\"). Let’s take the first output (ie. float number 4901400) from above. As stated by the output, it failed the Density Inversion and Grey List test. We can look at the quality of each cycle in the data by doing the following:\n\n\n\nmapApp()\nLastly, just in case argoFloats wasn’t cool enough, we’ve also created an app, named mapApp(), which is a way to analyze spatial-temporal distribution of Argo profiles. One neat feature about mapApp() is the “Code” button. This button shows a popup window containing R code that performs the basic steps used to isolate data for the mapApp() display. It even provides code for some reasonable next steps, such as downloading, reading, and plotting the Argo profile data.\n\n\n\nReferences\nKelley, D. E., Harbin, J., & Richards, C. (2021). argoFloats: An R package for analyzing Argo data. Frontiers in Marine Science, 8, 636922. https://doi.org/10.3389/fmars.2021.635922\nKelley, D., and Richards, C. (2020). oce: Analysis of Oceanographic Data. Available online at: https://CRAN.R-project.org/package=oce (accessed October 22, 2021).\n\n\n\n",
"preview": "posts/2021-10-22-argofloats-an-r-package-for-argo-researchers/infremer.png",
- "last_modified": "2023-10-17T13:38:50+00:00",
+ "last_modified": "2023-10-17T15:31:44+00:00",
"input_file": {},
"preview_width": 828,
"preview_height": 894
@@ -66,7 +66,7 @@
"categories": [],
"contents": "\nThis post is a rough guide to implementing real-time adjustments to CHLA values in Argo NetCDF files. The real-time adjustments are relatively new and reflect a balance between simplicity and completeness, with the understanding that delayed-mode QC will be more robust and incorporate more float-specific considerations. In case it’s not clear, nothing in this post should be construed as a definitive QC implementation and should be subject to considerable testing prior to deployment as actual live QC.\nTo implement the tests on existing ArgoNetCDF files in Python, we’ll use the argopandas package (to list float profiles and do some basic interaction with Argo NetCDF files) and gsw (to perform seawater calculations required for some of the checks). We’ll also use pandas data frames and numpy arrays to marshal the generic data handling and matplotlib to plot as we go along.\n# pip install argopandas gsw matplotlib\n# conda install -c conda-forge argopandas gsw matplotlib\nimport argopandas as argo\nimport gsw\nimport numpy as np\nimport matplotlib.pyplot as plt\nExample data\nWe’ll need some data to practice with, too. As an example, I’ll use all the profiles from float 6904117. It’s probably best to use a fresh cache for every Python session (the default), but for the purposes of rendering this post I’ll use a local cache to avoid downloading the files everytime I render it.\nprofiles = argo.prof \\\n .subset_float(6904117) \\\n .subset_direction('ascending') \nbgc_profiles = argo.bio_prof \\\n .subset_float(6904117) \\\n .subset_direction('ascending')\nDownloading 'https://data-argo.ifremer.fr/ar_index_global_prof.txt.gz'\nDownloading 'https://data-argo.ifremer.fr/argo_bio-profile_index.txt.gz'\nprofiles\n\n\n\n\nfile\n\n\ndate\n\n\nlatitude\n\n\nlongitude\n\n\nocean\n\n\nprofiler_type\n\n\ninstitution\n\n\ndate_update\n\n\n1826644\n\n\ncoriolis/6904117/profiles/R6904117_001.nc\n\n\n2021-02-03 12:42:43+00:00\n\n\n57.295\n\n\n19.963\n\n\nA\n\n\n834\n\n\nIF\n\n\n2021-09-14 15:09:29+00:00\n\n\n1826646\n\n\ncoriolis/6904117/profiles/R6904117_002.nc\n\n\n2021-02-05 00:55:14+00:00\n\n\n57.251\n\n\n19.854\n\n\nA\n\n\n834\n\n\nIF\n\n\n2021-09-14 15:09:42+00:00\n\n\n1826648\n\n\ncoriolis/6904117/profiles/R6904117_003.nc\n\n\n2021-02-06 12:42:44+00:00\n\n\n57.165\n\n\n19.849\n\n\nA\n\n\n834\n\n\nIF\n\n\n2021-09-14 15:09:52+00:00\n\n\n1826650\n\n\ncoriolis/6904117/profiles/R6904117_004.nc\n\n\n2021-02-08 00:59:14+00:00\n\n\n57.136\n\n\n19.973\n\n\nA\n\n\n834\n\n\nIF\n\n\n2021-09-14 15:10:04+00:00\n\n\n1826652\n\n\ncoriolis/6904117/profiles/R6904117_005.nc\n\n\n2021-02-09 12:44:44+00:00\n\n\n57.163\n\n\n20.046\n\n\nA\n\n\n834\n\n\nIF\n\n\n2021-09-14 15:10:15+00:00\n\n\n…\n\n\n…\n\n\n…\n\n\n…\n\n\n…\n\n\n…\n\n\n…\n\n\n…\n\n\n…\n\n\n1826978\n\n\ncoriolis/6904117/profiles/R6904117_168.nc\n\n\n2021-10-12 12:26:21+00:00\n\n\n57.789\n\n\n19.844\n\n\nA\n\n\n834\n\n\nIF\n\n\n2021-10-12 16:43:24+00:00\n\n\n1826980\n\n\ncoriolis/6904117/profiles/R6904117_169.nc\n\n\n2021-10-14 00:28:17+00:00\n\n\n57.828\n\n\n19.870\n\n\nA\n\n\n834\n\n\nIF\n\n\n2021-10-14 04:40:19+00:00\n\n\n1826982\n\n\ncoriolis/6904117/profiles/R6904117_170.nc\n\n\n2021-10-15 12:30:17+00:00\n\n\n57.816\n\n\n19.850\n\n\nA\n\n\n834\n\n\nIF\n\n\n2021-10-15 17:48:45+00:00\n\n\n1826984\n\n\ncoriolis/6904117/profiles/R6904117_171.nc\n\n\n2021-10-17 00:29:12+00:00\n\n\n57.831\n\n\n19.861\n\n\nA\n\n\n834\n\n\nIF\n\n\n2021-10-17 04:40:26+00:00\n\n\n1826986\n\n\ncoriolis/6904117/profiles/R6904117_172.nc\n\n\n2021-10-18 13:12:23+00:00\n\n\n57.825\n\n\n19.852\n\n\nA\n\n\n834\n\n\nIF\n\n\n2021-10-18 16:39:57+00:00\n\n\n172 rows × 8 columns\n\n\nbgc_profiles\n\n\n\n\nfile\n\n\ndate\n\n\nlatitude\n\n\nlongitude\n\n\nocean\n\n\nprofiler_type\n\n\ninstitution\n\n\nparameters\n\n\nparameter_data_mode\n\n\ndate_update\n\n\n172309\n\n\ncoriolis/6904117/profiles/BR6904117_001.nc\n\n\n2021-02-03 12:42:43+00:00\n\n\n57.295\n\n\n19.963\n\n\nA\n\n\n834\n\n\nIF\n\n\nMTIME PRES C1PHASE_DOXY C2PHASE_DOXY TEMP_DOXY…\n\n\nRRRRRARRRRRRRRRRRARRRRRRRRAR\n\n\n2021-09-14 15:09:29+00:00\n\n\n172311\n\n\ncoriolis/6904117/profiles/BR6904117_002.nc\n\n\n2021-02-05 00:55:14+00:00\n\n\n57.251\n\n\n19.854\n\n\nA\n\n\n834\n\n\nIF\n\n\nMTIME PRES C1PHASE_DOXY C2PHASE_DOXY TEMP_DOXY…\n\n\nRRRRRARRRRRRRRRRRARRRRRRRRAR\n\n\n2021-09-14 15:09:42+00:00\n\n\n172313\n\n\ncoriolis/6904117/profiles/BR6904117_003.nc\n\n\n2021-02-06 12:42:44+00:00\n\n\n57.165\n\n\n19.849\n\n\nA\n\n\n834\n\n\nIF\n\n\nMTIME PRES C1PHASE_DOXY C2PHASE_DOXY TEMP_DOXY…\n\n\nRRRRRARRRRRRRRRRRARRRRRRRRAR\n\n\n2021-09-14 15:09:52+00:00\n\n\n172315\n\n\ncoriolis/6904117/profiles/BR6904117_004.nc\n\n\n2021-02-08 00:59:14+00:00\n\n\n57.136\n\n\n19.973\n\n\nA\n\n\n834\n\n\nIF\n\n\nMTIME PRES C1PHASE_DOXY C2PHASE_DOXY TEMP_DOXY…\n\n\nRRRRRARRRRRRRRRRRARRRRRRRRAR\n\n\n2021-09-14 15:10:04+00:00\n\n\n172317\n\n\ncoriolis/6904117/profiles/BR6904117_005.nc\n\n\n2021-02-09 12:44:44+00:00\n\n\n57.163\n\n\n20.046\n\n\nA\n\n\n834\n\n\nIF\n\n\nMTIME PRES C1PHASE_DOXY C2PHASE_DOXY TEMP_DOXY…\n\n\nRRRRRARRRRRRRRRRRARRRRRRRRAR\n\n\n2021-09-14 15:10:15+00:00\n\n\n…\n\n\n…\n\n\n…\n\n\n…\n\n\n…\n\n\n…\n\n\n…\n\n\n…\n\n\n…\n\n\n…\n\n\n…\n\n\n172643\n\n\ncoriolis/6904117/profiles/BR6904117_168.nc\n\n\n2021-10-12 12:26:21+00:00\n\n\n57.789\n\n\n19.844\n\n\nA\n\n\n834\n\n\nIF\n\n\nMTIME PRES C1PHASE_DOXY C2PHASE_DOXY TEMP_DOXY…\n\n\nRRRRRARRRRRRRRRRRARRRRRRRRAR\n\n\n2021-10-12 16:43:24+00:00\n\n\n172645\n\n\ncoriolis/6904117/profiles/BR6904117_169.nc\n\n\n2021-10-14 00:28:17+00:00\n\n\n57.828\n\n\n19.870\n\n\nA\n\n\n834\n\n\nIF\n\n\nMTIME PRES C1PHASE_DOXY C2PHASE_DOXY TEMP_DOXY…\n\n\nRRRRRARRRRRRRRRRRARRRRRRRRAR\n\n\n2021-10-14 04:40:19+00:00\n\n\n172647\n\n\ncoriolis/6904117/profiles/BR6904117_170.nc\n\n\n2021-10-15 12:30:17+00:00\n\n\n57.816\n\n\n19.850\n\n\nA\n\n\n834\n\n\nIF\n\n\nMTIME PRES C1PHASE_DOXY C2PHASE_DOXY TEMP_DOXY…\n\n\nRRRRRARRRRRRRRRRRARRRRRRRRAR\n\n\n2021-10-15 17:48:45+00:00\n\n\n172649\n\n\ncoriolis/6904117/profiles/BR6904117_171.nc\n\n\n2021-10-17 00:29:12+00:00\n\n\n57.831\n\n\n19.861\n\n\nA\n\n\n834\n\n\nIF\n\n\nMTIME PRES C1PHASE_DOXY C2PHASE_DOXY TEMP_DOXY…\n\n\nRRRRRARRRRRRRRRRRARRRRRRRRAR\n\n\n2021-10-17 04:40:26+00:00\n\n\n172651\n\n\ncoriolis/6904117/profiles/BR6904117_172.nc\n\n\n2021-10-18 13:12:23+00:00\n\n\n57.825\n\n\n19.852\n\n\nA\n\n\n834\n\n\nIF\n\n\nMTIME PRES C1PHASE_DOXY C2PHASE_DOXY TEMP_DOXY…\n\n\nRRRRRARRRRRRRRRRRARRRRRRRRAR\n\n\n2021-10-18 16:39:57+00:00\n\n\n172 rows × 10 columns\n\n\nThe recipe\nAs I understand it, this is what has to happen to the real-time measurements during the CHLA processing:\nCHLA values whose flags are unset are flagged as Flag.PROBABLY_BAD\nCHLA_ADJUSTED values whose flags are unset are flagged as Flag.GOOD\nApply global range test (flag for CHLA and CHLA_ADJUSTED set to Flag.BAD for CHLA values outside the range -0.1 to 100)\nApply the “dark correction” to CHLA values:\nRead the SCIENTIFIC_CALIB_COEFFICIENT variable for the CHLA profile and parse the FLOAT_DARK, and FLOAT_DARK_QC values. It is likely that FLOAT_DARK_QC doesn’t exist in any files yet, so default to Flag.NO_QC. For profiles that haven’t gone through 5 cycles deeper than the “mixed layer depth”, there should be a PRELIM_DARK variable in SCIENTIFIC_CALIB_COEFFICIENT\nIf FLOAT_DARK exists, apply the equation CHLA_ADJUSTED = ((FLUORESCENCE_CHLA-FLOAT_DARK)*SCALE_CHLA)/2.\nIf it doesn’t, calculate try to calculate PRELIM_DARK and look for 4 previous instances of PRELIM_DARK with which FLOAT_DARK/_QC should be calculated. If there are 5 PRELIM_DARK values, calculate FLOAT_DARK/_QC and use that. If there aren’t calculate the adjusted value using PRELIM_DARK.\n\nApply the non-photochemical quenching correction (NPC) to the value calculated by the dark correction. This is the correction for the upper portion of the ocean whereby phytoplankton have a reduced response to UV light because they have already been exposed to it from the sun.\nA few of these steps have details that need to be expanded upon, so let’s do those first\nQC Flags\nIn addition to computing adjusted realtime values, the QC checks for CHLA also flag some values using the Argo flag scheme. One of the challeneges in doing this is that you don’t want to make a data point look “better” by assigning a QC flag (e.g., flag a value as “probably bad” when it’s already been flagged as “bad”). This logic was implemented by the experimental argortqcpy package (here) and I’ve modified it slightly to get all of the flag logic in one happy Python class:\nclass Flag:\n \"\"\"\n Flags for check output. These values are valid values of the\n ``qc`` and ``adjusted_qc`` attributes of a\n :class:`~medsrtqc.core.Trace` object. Utility functions are\n provided as static methods to get the name or value of a flag\n or to update flag values ensuring that values that are already\n marked at a \"worse\" QC level are not inadvertently changed.\n \"\"\"\n\n @staticmethod\n def label(flag):\n \"\"\"Return the label of a QC flag\"\"\"\n return Flag._names[flag]\n\n @staticmethod\n def value(label):\n \"\"\"Return the value of a QC flag\"\"\"\n for value, lab in Flag._names.items():\n if label == lab:\n return value\n raise KeyError(f\"'{label}' is not the name of a QC flag\")\n\n @staticmethod\n def update_safely(qc, to, where=None):\n \"\"\"\n Safely update ``qc`` to the value ``to``. Values that are\n already marked at a \"worse\" QC level are not modified.\n \"\"\"\n where = slice(None) if where is None else where\n flags = qc[where]\n for overridable_flag in Flag._precedence[to]:\n flags[flags == overridable_flag] = to\n qc[where] = flags\n\n NO_QC = b'0'\n GOOD = b'1'\n PROBABLY_GOOD = b'2'\n PROBABLY_BAD = b'3'\n BAD = b'4'\n CHANGED = b'5'\n # '6' not used\n # '7' not used\n ESTIMATED = b'8'\n MISSING = b'9'\n FILL_VALUE = b''\n\n _names = {\n NO_QC: 'NO_QC',\n GOOD: 'GOOD',\n PROBABLY_GOOD: 'PROBABLY_GOOD',\n PROBABLY_BAD: 'PROBABLY_BAD',\n BAD: 'BAD',\n CHANGED: 'CHANGED',\n ESTIMATED: 'ESTIMATED',\n MISSING: 'MISSING',\n FILL_VALUE: 'FILL_VALUE'\n }\n\n _precedence = {\n NO_QC: set(),\n GOOD: {\n NO_QC,\n },\n PROBABLY_GOOD: {\n NO_QC,\n GOOD,\n CHANGED,\n },\n PROBABLY_BAD: {\n NO_QC,\n GOOD,\n PROBABLY_GOOD,\n CHANGED,\n },\n BAD: {\n NO_QC,\n GOOD,\n PROBABLY_GOOD,\n CHANGED,\n PROBABLY_BAD,\n },\n CHANGED: {\n NO_QC,\n },\n ESTIMATED: {\n NO_QC,\n GOOD,\n PROBABLY_GOOD,\n },\n MISSING: {\n NO_QC,\n },\n }\nParsing SCIENTIFIC_CALIB_COEFFICIENT\nThis variable in the NetCDF files looks like this:\nDARK_CHLA = 47, SCALE_CHLA = 0.0073\nand is in string form. It can be parsed by splitting a few times to return a dict() and reconstituted by pasting together a few times.\ndef parse_scientific_calib_coefficient(val):\n parts = val.split(',')\n vals = [part.split('=') for part in parts]\n return {k.strip(): float(v.strip()) for k, v in vals}\n\ndef unparse_calib_coefficient(coefs):\n return ', '.join(f'{k} = {v}' for k, v in coefs.items())\n\ncoefs = parse_scientific_calib_coefficient('DARK_CHLA = 47, SCALE_CHLA = 0.0073')\nprint(coefs)\ncoefs['SOMETHING_ELSE'] = 1.23\nunparse_calib_coefficient(coefs)\n{'DARK_CHLA': 47.0, 'SCALE_CHLA': 0.0073}\n\n\n\n\n\n'DARK_CHLA = 47.0, SCALE_CHLA = 0.0073, SOMETHING_ELSE = 1.23'\nThe mixed layer depth\nAs I understand it, there are as many ways to calculate the mixed layer depth as there are oceanographers. The following is an implementation based on the guidance provided in the draft CHLA QC document. I’m using plt as an argument here so that you can debug this stuff interactively (but skip installing matplotlib in production). The gist is that it looks for density changes > 0.03 below 10 dbar.\nimport gsw\n\ndef calc_mixed_layer_depth(pres, temp, psal, longitude=0, latitude=0, plt=None):\n abs_salinity = gsw.SA_from_SP(psal, pres, longitude, latitude)\n conservative_temp = gsw.CT_from_t(abs_salinity, temp, pres)\n density = gsw.sigma0(abs_salinity, conservative_temp)\n\n if plt:\n plt.plot(density, pres)\n plt.gca().invert_yaxis()\n plt.gca().set_xlabel('sigma0')\n\n mixed_layer_start = (np.diff(density) > 0.03) & (pres[:-1] > 10)\n if not np.any(mixed_layer_start):\n # Can't determine mixed layer depth (no density changes > 0.03 below 10 dbar)\n return None\n\n mixed_layer_start_index = np.where(mixed_layer_start)[0][0]\n mixed_layer_depth = pres[mixed_layer_start_index]\n\n if plt:\n plt.gca().axhline(y = mixed_layer_depth, linestyle='--')\n\n return mixed_layer_depth\nIt’s a little easier to parameterize this in terms of a NetCDFWrapper object since that’s what we get handed.\ndef calc_mixed_layer_depth_nc(nc_core, plt=None):\n if 'PRES' not in nc_core.variables or \\\n 'TEMP' not in nc_core.variables or \\\n 'PSAL' not in nc_core.variables:\n return None\n \n pres = nc_core['PRES'][:][0]\n temp = nc_core['TEMP'][:][0]\n psal = nc_core['PSAL'][:][0]\n\n if 'LONGITUDE' in nc_core.variables and 'LATITUDE' in nc_core.variables:\n longitude = nc_core['LONGITUDE'][:][0]\n latitude = nc_core['LATITUDE'][:][0]\n else:\n longitude = 0\n latitude = 0\n\n return calc_mixed_layer_depth(\n pres, temp, psal, \n longitude=longitude,\n latitude=latitude, \n plt=plt)\n\ncalc_mixed_layer_depth_nc(\n argo.nc('dac/' + profiles.reset_index().file[0]), \n plt=plt)\nDownloading 'https://data-argo.ifremer.fr/dac/coriolis/6904117/profiles/R6904117_001.nc'\n\n\n\n\n\n60.5\npngCalculating FLOAT_DARK\nThe guidance on calculating PRELIM_DARK is that it’s the minimum CHLA value below the mixed layer depth boundary. In Python:\ndef calc_prelim_dark(pres, chla, mixed_layer_depth):\n if mixed_layer_depth is None:\n return None\n \n chla_filter = chla[(pres > mixed_layer_depth) & ~np.isnan(chla)]\n if len(chla_filter) > 0:\n return chla_filter.min()\n else:\n return None\nBecause no PRELIM_DARK values have been calculated since the spec is new, we need something to do this too. Again, it’s a little easier below if we parameterize this in terms of a NetCDFWrapper.\ndef calc_prelim_dark_nc(nc_bgc, nc_core):\n if 'CHLA' not in nc_bgc.variables or 'PRES' not in nc_bgc.variables:\n raise KeyError(f\"'CHLA' or 'PRES' not found\")\n \n # we need a mixed layer depth for this calculation\n mixed_layer_depth = calc_mixed_layer_depth_nc(nc_core)\n if mixed_layer_depth is None:\n return None\n\n chla_prof_i = nc_bgc.param[nc_bgc.param.STATION_PARAMETERS.str.strip() == 'CHLA'] \\\n .reset_index() \\\n .iloc[0] \\\n .N_PROF\n\n pres = nc_bgc['PRES'][:][chla_prof_i]\n chla = nc_bgc['CHLA'][:][chla_prof_i]\n \n return calc_prelim_dark(pres, chla, mixed_layer_depth)\n\ncalc_prelim_dark_nc(\n argo.nc('dac/' + bgc_profiles.reset_index().file[0]),\n argo.nc('dac/' + profiles.reset_index().file[0]))\nDownloading 'https://data-argo.ifremer.fr/dac/coriolis/6904117/profiles/BR6904117_001.nc'\n\n\n\n\n\n0.2263\nCalculating FLOAT_DARK is a little more involved and it’s where we’ll make use of our float/cycle index that we created earlier. We need to start from the first NetCDF file for the float and collect any PRELIM_DARK values from SCIENTIFIC_CALIB_COEFFICIENT that might have been calculated. If there were 3 or fewer, we do nothing. If there are exactly 4, we add our PRELIM_DARK that we just calculated, calculate the mean of the values, and use that. The FLOAT_DARK_QC gets added based on the standard deviation of the PRELIM_DARK values.\ndef accumulate_prelim_dark(bio_prof_files):\n prelim_dark = []\n for nc in argo.nc(bio_prof_files):\n coefs_df = nc.calib[nc.calib.PARAMETER.str.strip() == 'CHLA'] \\\n .filter(['SCIENTIFIC_CALIB_COEFFICIENT'])\n if len(coefs_df) == 0 or 'SCIENTIFIC_CALIB_COEFFICIENT' not in coefs_df:\n continue\n\n coefs = parse_scientific_calib_coefficient(coefs_df.iloc[0][0])\n if 'PRELIM_DARK' not in coefs:\n continue\n \n prelim_dark.append(coefs['PRELIM_DARK'])\n \n return np.array(prelim_dark)\n\naccumulate_prelim_dark(['dac/' + f for f in bgc_profiles.head(10).file])\nDownloading 9 files from 'https://data-argo.ifremer.fr/dac/coriolis/6904117/profiles'\n\n\n\n\n\narray([], dtype=float64)\nAn empty result here is expected since the scheme was just invented.\nThe whole game\nThe rest is not very oceanographic but does involve navigating the structure of BGC variables’ encoding in Argo profile NetCDFs. First, we get a NetCDFWrapper handle and a local filename to the .nc file. In argopandas, a NetCDFWrapper is a thin wrapper around a netCDF4.Dataset that implements a few common accessors in addition to the most common accessors for the dataset (notably, obj.variables and obj['variable name']).\nnc_filename = argo.filename('dac/coriolis/6904117/profiles/BR6904117_171.nc')\nnc_core_filename = argo.filename('dac/coriolis/6904117/profiles/R6904117_171.nc')\nnc = argo.nc(nc_filename)\nnc_core = argo.nc(nc_core_filename)\n\n# make sure we've got a CHLA variable\nif 'CHLA' not in nc.variables or 'PRES' not in nc.variables:\n raise KeyError(f\"'CHLA' or 'PRES' not found in '{nc_filename}'\")\n\n# find the profile index associated with CHLA\nchla_prof_i = nc.param[nc.param.STATION_PARAMETERS.str.strip() == 'CHLA'] \\\n .reset_index() \\\n .iloc[0] \\\n .N_PROF\n\n# get the PRES/CHLA/CHLA_QC series we'll work with\n# (note that there will be trailing NaN values here\n# but we want to keep those because we'll have to keep the\n# size the same to reassign values to a copy later)\npres = nc['PRES'][:][chla_prof_i]\nchla = nc['CHLA'][:][chla_prof_i]\nchla_qc_original = nc['CHLA_QC'][:][chla_prof_i]\nchla_qc = chla_qc_original.copy() # keep original so we can compare!\n\n# create the chla_adjusted and chla_qc variables from originals\nchla_adjusted = chla.copy()\nchla_adjusted_qc = chla_qc.copy()\n\n# reset chla_qc to Flag.NO_QC\nchla_qc[:] = Flag.NO_QC\n\n# plot to verify!\nplt.plot(chla, pres)\nplt.gca().invert_yaxis()\nDownloading 'https://data-argo.ifremer.fr/dac/coriolis/6904117/profiles/BR6904117_171.nc'\nDownloading 'https://data-argo.ifremer.fr/dac/coriolis/6904117/profiles/R6904117_171.nc'\npngThe first step is to set the initial QC values to Flag.PROBABLY_BAD.\nchla_qc[:] = Flag.PROBABLY_BAD\nchla_adjusted_qc[:] = Flag.GOOD\nThen we apply the global range test:\nFlag.update_safely(chla_qc, Flag.BAD, where=(chla < 0.1) | (chla > 100))\nFlag.update_safely(chla_adjusted_qc, Flag.BAD, where=(chla < 0.1) | (chla > 100))\nNext, we go through the steps for the dark correction. This profile was collected before any of this QC was implemented so it doesn’t contain any PRELIM_DARK or FLOAT_DARK. I’ll write out the logic anyway.\ncoefs_df = nc.calib[nc.calib.PARAMETER.str.strip() == 'CHLA'] \\\n .filter(['SCIENTIFIC_CALIB_COEFFICIENT'])\nif len(coefs_df) == 0 or 'SCIENTIFIC_CALIB_COEFFICIENT' not in coefs_df:\n raise ValueError(f\"Can't find 'SCIENTIFIC_CALIB_COEFFICIENT' for 'CHLA' in file '{nc_filename}'\")\n\n# keep original and modified coefs so we know if they need updating\ncoefs = parse_scientific_calib_coefficient(coefs_df.iloc[0][0])\ncoefs_mod = coefs.copy()\n\nif 'FLOAT_DARK' in coefs:\n float_dark_qc = coefs['FLOAT_DARK_QC'] if 'FLOAT_DARK_QC' in coefs else Flag.NO_QC\n chla_adjusted[:] = chla - coefs['FLOAT_DARK']\n Flag.update_safely(chla_adjusted_qc, float_dark_qc)\nelse:\n prelim_dark_acc = accumulate_prelim_dark(['dac/' + f for f in bgc_profiles.file])\n prelim_dark_this = calc_prelim_dark_nc(nc, nc_core)\n if prelim_dark_this is None:\n prelim_dark_this = np.array([])\n else:\n coefs_mod['PRELIM_DARK'] = prelim_dark_this\n prelim_dark_this = np.array([prelim_dark_this])\n \n prelim_dark_acc = np.concatenate([prelim_dark_acc, prelim_dark_this])\n\n if len(prelim_dark_acc) > 0:\n float_dark = prelim_dark_acc.mean()\n float_dark_qc = Flag.PROBABLY_GOOD # not sure how this is actually calculated\n\n chla_adjusted[:] = chla - float_dark\n Flag.update_safely(chla_adjusted_qc, float_dark_qc)\n\n # potentially store in coefs_mod if there are enough PRELIM_DARK values\n if len(prelim_dark_acc) == 5:\n coefs_mod['FLOAT_DARK'] = float_dark\n coefs_mod['FLOAT_DARK_QC'] = float_dark_qc\n else:\n # we can't calculate the adjusted value\n chla_adjusted[:] = np.nan\n\n# should show the adjustment\nprint(chla[::100])\nprint(chla_adjusted[::100])\nDownloading 161 files from 'https://data-argo.ifremer.fr/dac/coriolis/6904117/profiles'\n\n\n[1.1241999864578247 1.1461000442504883 1.292099952697754\n 1.0292999744415283 0.23360000550746918 0.20440000295639038\n 0.21170000731945038 0.21170000731945038 0.23360000550746918\n 0.2773999869823456 0.2847000062465668 0.2847000062465668\n 0.2919999957084656]\n[0.9343999624252319 0.9563000202178955 1.1022999286651611\n 0.8394999504089355 0.04380001127719879 0.014600008726119995\n 0.021900013089179993 0.021900013089179993 0.04380001127719879\n 0.0875999927520752 0.09490001201629639 0.09490001201629639\n 0.10220000147819519]\nThere are no previous PRELIM_DARK values and no FLOAT_DARK value, so this applies the calculation based on the current profile.\nFrom my reading of the document, that’s the operation! What’s left is to figure out what changed and modify the NetCDF. There is probably an object diff library in Python that could do a good job of reporting what changed and I’d encourage readers to implement it! The objects that might have changed are coefs_mod (check differences with coefs) and chla_qc (check differences via chla_qc_original). chla_adjusted and chla_adjusted_qc can be written without checking diffs since in theory a brand new file won’t have those variables anyway.\nI imagine modification would be done by making a copy of the NetCDF file and opening via netCDF4.Dataset() in write mode.\nif coefs_mod != coefs:\n coefs_serialized = unparse_calib_coefficient(coefs_mod)\n # write coefs_searialized to the appropriate location in \n # 'SCIENTIFIC_CALIB_COEFFICIENT'\n\nchla_qc_updated = chla_qc != chla_qc_original\nTests\nIn the course of writing all of this I also wrote some unit tests and it’s worth putting them here so they don’t get lost!\nimport unittest\n\nclass TestFlag(unittest.TestCase):\n\n def test_value(self):\n self.assertEqual(Flag.NO_QC, Flag('NO_QC'))\n self.assertEqual(Flag.label(Flag.NO_QC), 'NO_QC')\n with self.assertRaises(KeyError):\n Flag('not a QC key')\n with self.assertRaises(KeyError):\n Flag.label(b'def not a flag')\n\n def test_update(self):\n qc = np.array([Flag.GOOD, Flag.PROBABLY_BAD, Flag.MISSING])\n Flag.update_safely(qc, to=Flag.BAD)\n self.assertTrue(np.all(qc == np.array([Flag.BAD, Flag.BAD, Flag.MISSING])))\n\n qc = np.array([Flag.GOOD, Flag.PROBABLY_BAD, Flag.MISSING])\n Flag.update_safely(qc, to=Flag.BAD, where=np.array([False, True, False]))\n self.assertTrue(np.all(qc == np.array([Flag.GOOD, Flag.BAD, Flag.MISSING])))\n\n\n\n",
"preview": {},
- "last_modified": "2023-10-17T13:38:50+00:00",
+ "last_modified": "2023-10-17T15:31:44+00:00",
"input_file": {}
},
{
@@ -83,7 +83,7 @@
"categories": [],
"contents": "\nOne of the named quality control tests that Argo profile longitude/latitude measurements must undergo is the “Position on land” test. All data assembly centres have their own implementation of this test. However, in migrating some code between languages I noted some IT challenges that may pop up with several approaches (notably, those that require a Python PROJ, GDAL, or GEOS install). This post is going through some options for how to implement that test in both Python and R with varying levels of dependencies. I’ll use the argodata to load all the profile locations which I’ll use to test the various approaches.\n\n\nlibrary(argodata)\nprof <- argo_global_prof()\n\n\n\nUse a vector definition of ‘land’ in R\nThis the the most obvious choice and probably the way that the test is implemented most frequently. The question does arise, though, as to where one gets the polygons for “land”. I would suggest using the Natural Earth 1:10,000,000 ocean data set because it has a clear source/version history and has a reasonable file size for the kinds of accuracy that we need. Most floats are deployed in water over 1000 m deep and aren’t so close to the coat that a higher resolution data set would improve the accuracy of the test. If you need a higher resolution you can use the Global Administration Database which also has a clear source/version history (but much larger file sizes).\n\n\n# download/unzip 1:10,000,000 oceans\ncurl::curl_download(\n \"https://www.naturalearthdata.com/http//www.naturalearthdata.com/download/10m/physical/ne_10m_ocean.zip\",\n \"ne_10m_ocean.zip\"\n)\nunzip(\"ne_10m_ocean.zip\", exdir = \".\")\n\n\n\nIn R the easiest way to go about this is to use the sf package, which you will need to load the files distributed by Natural Earth or GADM. Because of a recent update to sf, you have to omit the CRS values so that the longitude/latitude values are treated as Cartesian. Because the ocean data set was prepared for this use-case in mind, this isn’t a problem.\n\n\nlibrary(sf)\nocean <- read_sf(\"ne_10m_ocean.shp\") %>% \n st_set_crs(NA)\nplot(ocean$geometry, col = \"lightblue\")\n\n\n\n\nIf you want to check “is this point in the ocean”, you can use st_intersects().\n\n\nprofiles <- data.frame(\n id = \"prof1\",\n longitude = c(-65, -60),\n latitude = c(45, 45)\n)\n\nprofiles_sf <- st_as_sf(\n profiles,\n coords = c(\"longitude\", \"latitude\")\n)\n\nst_intersects(profiles_sf, ocean, sparse = FALSE)\n\n\n [,1]\n[1,] FALSE\n[2,] TRUE\n\nThe file size of the shapefile is about 6 MB unzipped, which is fairly reasonable. If you’re in an IT environment where installing R and R packages from CRAN is easy and you can maintain a recent GEOS/PROJ/GDAL stack, you’re good to go! If you can install packages but can’t maintain a system library stack, you can use the above as a “prep script” and distribute a well-known binary representation of the ocean polygon with your code. You can then use the geos package.\n\n\n# in some preparation script...\nocean_wkb <- st_as_binary(ocean$geometry)\nsaveRDS(ocean_wkb, \"ne_10m_ocean.WKB.rds\")\n\n# in your production code\nlibrary(geos)\nocean <- geos_read_wkb(readRDS(\"ne_10m_ocean.WKB.rds\"))\ngeos_intersects_any(\n geos_make_point(profiles$longitude, profiles$latitude),\n ocean\n)\n\n\n[1] FALSE TRUE\n\nUse a raster definition of ‘land’\nAnother option is to use a raster mask (zero or one values) to implement the point-on-land test. The nice part about this is that all you need is the NetCDF library installed (and you were never going to get away with an Argo QC package without it). There is no pre-computed land raster mask available but you can compute one reasonably easily using the ETOPO1. I’m going to prep the NetCDF using the stars package starting from the grid-registered GeoTIFF version of the ETOPO1 data set.\n\n\ncurl::curl_download(\n \"https://www.ngdc.noaa.gov/mgg/global/relief/ETOPO1/data/ice_surface/grid_registered/georeferenced_tiff/ETOPO1_Ice_g_geotiff.zip\",\n \"ETOPO1_Ice_g_geotiff.zip\"\n)\n\nunzip(\"ETOPO1_Ice_g_geotiff.zip\", exdir = \".\")\n\n\n\nI’m using a GeoTIFF version because it’s a little easier to load into R. The stars package takes care of the details but we do need to create the vector of latitude/longitude cell minimum values ourselves. The magic 1/60 here is one arc minute (the resolution of the data set).\n\n\ngrid <- stars::read_stars(\"ETOPO1_Ice_g_geotiff.tif\", proxy = FALSE)\nis_land <- grid > 0\n\n\n\nI’m using ncdf4 to write this but you can (and probably should) use the RNetCDF package because it’s more actively maintained and, in some cases, much faster. Note that the y values are in reverse order (north to south).\n\n\nlibrary(ncdf4)\n\ndim_x <- ncdim_def(\"longitude\", \"degrees\", seq(-180, 180, by = 1/60) - 1/60 / 2)\ndim_y <- ncdim_def(\"latitude\", \"degrees\", rev(seq(-90, 90, by = 1/60) - 1/60 / 2))\nvar_land <- ncvar_def(\n \"is_land\", \"boolean\", \n list(dim_x, dim_y),\n prec = \"byte\", compression = 9\n)\n\nnc <- nc_create(\"ETOPO1_is_land.nc\", vars = list(var_land))\nncvar_put(nc, var_land, vals = is_land[[1]])\nnc_close(nc)\n\n\n\nI turned the compression up big time here because the original grid was 400 MB! That’s unrealistic in terms of data distribution alongside code and way bigger than our compressed WKB version of the Natural Earth ocean boundaries (~3 MB). Compressed the file is just under 1 MB (!!!). To extract a longitude/latitude ‘is land’ value you have to do a tiny bit of math to find the cell index you’re after and then read the value of that cell.\n\n\nnc <- nc_open(\"ETOPO1_is_land.nc\")\nlon_values <- nc$dim$longitude$vals\nlat_values <- nc$dim$latitude$vals\n\ncell_x <- vapply(profiles$longitude, function(lon) which.min(abs(lon - lon_values)), integer(1))\ncell_y <- vapply(profiles$latitude, function(lat) which.min(abs(lat - lat_values)), integer(1))\n\nprof_is_land <- integer(nrow(profiles))\nfor (i in seq_along(prof_is_land)) {\n prof_is_land[i] = ncvar_get(\n nc, \"is_land\",\n start = c(cell_x[i], cell_y[i]),\n count = c(1, 1)\n )\n}\n\nnc_close(nc)\n\nprof_is_land\n\n\n[1] 1 0\n\nLet’s plot the results to see what we’re up against!\n\n\nplot(st_as_sfc(ocean), xlim = c(-70, -60), ylim = c(41, 49))\npoints(profiles[c(\"longitude\", \"latitude\")], pch = ifelse(prof_is_land, 16, 1))\n\n\n\n\nPython implementation\nBoth approaches can be implemented in Python, including the data preparation step (although I think this is easier in R for both). In particular, the NetCDF version results in a small (1 MB), distributable data file that can be included in a Python package and read via netCDF4 or other NetCDF backend. This doesn’t require a GEOS system install and might be eaiser to convince IT folks to work with.\n\n\n\n",
"preview": "posts/2021-07-02-classifying-an-arbitrary-longitudelatitude-as-land-or-ocean/classifying-an-arbitrary-longitudelatitude-as-land-or-ocean_files/figure-html5/unnamed-chunk-3-1.png",
- "last_modified": "2023-10-17T13:38:50+00:00",
+ "last_modified": "2023-10-17T15:31:44+00:00",
"input_file": {},
"preview_width": 1248,
"preview_height": 768
@@ -102,7 +102,7 @@
"categories": [],
"contents": "\nIn preparation for writing some real-time quality control code, the question arose of how to check modified Argo files to make sure that they conform to the specification in the Argo User’s Manual. The official tool is hosted on Ifremer and is written in Java. When the latest version is downloaded you can run a shell command that will check one or more files. Running a bash shell, this can be done in a few lines:\n# download and unpack the tool in the current working directory\ncurl https://www.seanoe.org/data/00344/45538/data/83774.tar.gz | tar -xz\n\n# set the working directory\ncd format_control_1-17\n\n# download a test Argo file to check\ncurl -o R4902533_001.nc \\\n https://data-argo.ifremer.fr/dac/meds/4902533/profiles/R4902533_001.nc\n\n# ...and check it\njava -cp ./resources:./jar/formatcheckerClassic-1.17-jar-with-dependencies.jar \\\n -Dapplication.properties=application.properties \\\n -Dfile.encoding=UTF8 \\\n oco.FormatControl \\\n R4902533_001.nc\n\n\n CO-03-08-03<\/function>\n Control file data format<\/comment>\n 23/06/2021 12:50:59<\/date>\n 1.17<\/application_version>\n R4902533_001.nc<\/netcdf_file>\n Argo_Prof_c_v3.1_AUM_3.1_20201104.xml<\/rules_file>\n Argo float vertical profile<\/title>\n 3.1<\/user_manual_version>\n Argo profile<\/data_type>\n 3.1<\/format_version>\n The variable \"LATITUDE\" is not correct: attribute \"reference\" forbidden<\/file_error>\n The variable \"LATITUDE\" is not correct: attribute \"coordinate_reference_frame\" forbidden<\/file_error>\n The variable \"LONGITUDE\" is not correct: attribute \"reference\" forbidden<\/file_error>\n The variable \"LONGITUDE\" is not correct: attribute \"coordinate_reference_frame\" forbidden<\/file_error>\n The optional variable \"PRES\" is not correct: attribut \"coordinate_reference_frame\" forbidden<\/file_error>\n The value of the attribute of variable \"PRES_ADJUSTED:axis\" is not correct: \"Z\" expected<\/file_error>\n no<\/file_compliant>\n ok<\/status>\n<\/coriolis_function_report>\nIf you’re on Windows and you’re running in PowerShell or good ol’ cmd.exe, you can just run command.bat R4902533_001.nc (although you’ll need to download and extract the tool separately); a shell wrapper for Linux is also distributed but appears to hard-code the location of java to the developer’s computer so you won’t be able to run it without some modification. If you’re on Windows and running Git Bash, you’ll need to replace the : separating the class paths with \\; because that’s how the Java interpreter expects paths to be separated on Windows (and because ; is a special character in bash so you need to escape it with \\).\nIf you’re writing QC code in Python you can run the tool directly using the subprocess module from the standard library.\nimport subprocess\nimport os\nimport tempfile\n\n# handle the platform-dependence of the class-path separator\nif os.name == 'nt':\n classpath_sep = ';'\nelse:\n classpath_sep = ':'\n\nclass_path_rel = ('./resources', './jar/formatcheckerClassic-1.17-jar-with-dependencies.jar')\nclass_path = classpath_sep.join(class_path_rel)\n\n# construct arguments as a list()\nargs = [\n 'java', '-cp', class_path,\n '-Dapplication.properties=application.properties',\n '-Dfile.encoding=UTF8',\n 'oco.FormatControl',\n 'R4902533_001.nc'\n]\n\nresult = subprocess.run(args, cwd='format_control_1-17', capture_output=True)\nresult.check_returncode()\nThis will run the tool and check for a non-zero status code (e.g., java fails to start). The bytes of the xml are available as result.stdout (if you want to output directly to stdout you can do so by omitting capture_output=True). You can then parse the results using the xml.etree.ElementTree class:\nfrom xml.etree import ElementTree\nimport io\n\nroot = ElementTree.parse(io.BytesIO(result.stdout)).getroot()\nerrors = [el.text for el in root.findall('file_error')]\nerrors\n['The variable \"LATITUDE\" is not correct: attribute \"reference\" forbidden',\n 'The variable \"LATITUDE\" is not correct: attribute \"coordinate_reference_frame\" forbidden',\n 'The variable \"LONGITUDE\" is not correct: attribute \"reference\" forbidden',\n 'The variable \"LONGITUDE\" is not correct: attribute \"coordinate_reference_frame\" forbidden',\n 'The optional variable \"PRES\" is not correct: attribut \"coordinate_reference_frame\" forbidden',\n 'The value of the attribute of variable \"PRES_ADJUSTED:axis\" is not correct: \"Z\" expected']\nYou can use a similar trick to run and parse the results in R with the help of the processx and xml2 packages.\n\n\nclasspath_sep <- if (Sys.info()[\"sysname\"] == \"Windows\") \";\" else \":\"\nclasspath <- paste(\n \"./resources\",\n \"./jar/formatcheckerClassic-1.17-jar-with-dependencies.jar\",\n sep = classpath_sep\n)\n\nargs <- c(\n \"-cp\", classpath,\n \"-Dapplication.properties=application.properties\",\n \"-Dfile.encoding=UTF8\",\n \"oco.FormatControl\",\n \"R4902533_001.nc\"\n)\nresult <- processx::run(\"java\", args, wd = \"format_control_1-17\")\n\nroot <- xml2::read_xml(result$stdout)\nerrors <- xml2::xml_text(xml2::xml_find_all(root, \"file_error\"))\nerrors\n\n\n[1] \"The variable \\\"LATITUDE\\\" is not correct: attribute \\\"reference\\\" forbidden\" \n[2] \"The variable \\\"LATITUDE\\\" is not correct: attribute \\\"coordinate_reference_frame\\\" forbidden\" \n[3] \"The variable \\\"LONGITUDE\\\" is not correct: attribute \\\"reference\\\" forbidden\" \n[4] \"The variable \\\"LONGITUDE\\\" is not correct: attribute \\\"coordinate_reference_frame\\\" forbidden\" \n[5] \"The optional variable \\\"PRES\\\" is not correct: attribut \\\"coordinate_reference_frame\\\" forbidden\"\n[6] \"The value of the attribute of variable \\\"PRES_ADJUSTED:axis\\\" is not correct: \\\"Z\\\" expected\" \n\nThere are some complexities that aren’t handled by the simple cases above. The tool and/or the rules used to determine what constitutes a are updated several times a year and keeping the tool up-to-date requires a manual check if it’s installed above. These bits of code also assume that when you type java at a terminal that you actually get a Java interpreter! This is not always the case and configuring a Java VM can be complex.\nTo solve these issues I put together a proof-of-concept Python tool + Docker image that boil the above steps down to a one-liner:\n# once per computer: docker pull paleolimbot/argo-checker\ncurl -s https://data-argo.ifremer.fr/dac/meds/4902533/profiles/R4902533_001.nc | \\\n docker run --rm paleolimbot/argo-checker --update check > result.xml\nSearching for installed tool in '/argo-checker'\nInstalled tool found at '/argo-checker/tool_83774'.\nChecking for newer version, as requested...\nChecking for latest tool at ...\nLatest tool source is located at .\nVersion is latest version.\nRunning 'java -cp ./resources:./jar/formatcheckerClassic-1.17-jar-with-dependencies.jar -Dapplication.properties=application.properties -Dfile.encoding=UTF8 oco.FormatControl /tmp/tmpytrg3qyn.nc'\nYou can then process ‘result.xml’ using whatever tool you’d like! Probably a more robust option would be to rewrite the Java tool in Python, but that’s a battle for another day.\n\n\n\n",
"preview": {},
- "last_modified": "2023-10-17T13:38:50+00:00",
+ "last_modified": "2023-10-17T15:31:44+00:00",
"input_file": {}
},
{
@@ -119,7 +119,7 @@
"categories": [],
"contents": "\nIn writing the last post, two floats were identified (coriolis/6902966 and coriolis/6902957) that do an almost perfect loop-the-loop off the coast of Brazil!\n\n\nlibrary(argoFloats)\nfull_index <- getIndex()\nfloat1 <- subset(full_index, ID = 6902966)\nfloat2 <- subset(full_index, ID = 6902957)\n\nplot(merge(float1, float2))\nlines(float1@data$index[c(\"longitude\", \"latitude\")])\nlines(float2@data$index[c(\"longitude\", \"latitude\")])\n\n\n\n\nThe first question we might want to ask is why!?. Eddies are common in ocean currents but it seems like an awfully big coincidence that these two floats did the exact same loop-the-loop if they weren’t in the same eddy at the same time. Using argodata, we can investigate some of the details. I’ll also use the tidyverse for plotting and data frame manipulation.\n\n\nlibrary(tidyverse)\nlibrary(argodata)\n\n\n\nThe first question we might want to ask is: were these floats in the same place at the same time? We can plot the dates to see if they are at least cycling at the same time:\n\n\nprof <- argo_global_prof() %>%\n argo_filter_float(c(6902966, 6902957)) %>% \n argo_extract_path_info()\n\nggplot(prof, aes(date, file_float)) +\n geom_point()\n\n\n\n\nIt looks like they are! Using gganimate, we can do a quick visual check if they also align in space.\n\n\nggplot(prof, aes(longitude, latitude, group = file_float)) +\n geom_path() +\n geom_point() +\n gganimate::transition_reveal(date) +\n labs(title = \"{ frame_along }\")\n\n\n\n\nIt looks like they were released at the same time right next to each other, which explains why they tracked so closely (and why they get farther apart as they move west).\nAnother curiosity is the distance between profiles, which changes a few times over the course of the float’s lifetime. We could just measure the time between profiles, but to see what the float intended to do we can also check the configuration parameters. In argodata these are available via argo_meta_config_param():\n\n\nmeta <- argo_global_meta() %>% \n argo_filter_float(c(6902966, 6902957))\n\nmeta %>% \n argo_meta_config_param() %>% \n argo_extract_path_info() %>% \n select(file_float, n_missions, config_parameter_name, config_parameter_value) %>% \n pivot_wider(names_from = n_missions, values_from = config_parameter_value) %>% \n knitr::kable()\n\n\nfile_float\nconfig_parameter_name\n1\n2\n3\n4\n5\n6\n6902957\nCONFIG_CycleTime_hours\n26.5\n24\n240\n24\nNA\nNA\n6902957\nCONFIG_ParkPressure_dbar\n200.0\n200\n1000\n1000\nNA\nNA\n6902957\nCONFIG_ProfilePressure_dbar\n1000.0\n1000\n2000\n2000\nNA\nNA\n6902957\nCONFIG_DescentToParkPresSamplingTime_seconds\n10.0\n0\n0\n0\nNA\nNA\n6902957\nCONFIG_Direction_NUMBER\n3.0\n1\n1\n1\nNA\nNA\n6902966\nCONFIG_CycleTime_hours\n38.5\n24\n240\n24\n48\n120\n6902966\nCONFIG_ParkPressure_dbar\n200.0\n200\n1000\n100\n1000\n1000\n6902966\nCONFIG_ProfilePressure_dbar\n1000.0\n1000\n2000\n100\n1000\n1000\n6902966\nCONFIG_SurfaceTime_HH\n15.0\n15\n15\n15\n15\n7\n6902966\nCONFIG_DescentToParkPresSamplingTime_seconds\n10.0\n0\n0\n0\n0\n0\n6902966\nCONFIG_InAirMeasurementPeriodicity_NUMBER\n5.0\n5\n5\n5\n5\n2\n6902966\nCONFIG_Direction_NUMBER\n3.0\n1\n1\n1\n1\n1\n\nIt looks like these floats were set to sample rapidly (every 24 hours) and changed a few times over the course of the float lifespan. This explains the loop-the-loop more convincingly as an eddy as the usual float cycling time of 10 days would mean it was a very slow eddy (credit to Clark Richards for that observation!).\n\n\n\n",
"preview": "posts/2021-06-07-an-argo-loop-the-loop/an-argo-loop-the-loop_files/figure-html5/unnamed-chunk-1-1.png",
- "last_modified": "2023-10-17T13:38:50+00:00",
+ "last_modified": "2023-10-17T15:31:44+00:00",
"input_file": {},
"preview_width": 1248,
"preview_height": 768
@@ -138,7 +138,7 @@
"categories": [],
"contents": "\nThis post is a demonstration of getting a complex subset of Argo profiles. For a paper we’re working on, we need a collection of long, well-characterized, coastal Argo float trajectories. For the purposes of the post, I’m going to define long as >500 km in length and containing more than 100 locations, well-characterized as a maximum of 100 km between profiles, and coastal as >80% of locations within 400 km of the coast. These criteria aren’t trivial to calculate!\nTo go about this, I’m going to use the argodata, s2, and tidyverse packages. The s2plot package is experimental but helps visualize the results of the subset.\n\n\nlibrary(tidyverse)\nlibrary(s2)\nlibrary(argodata) # remotes::install_github(\"ArgoCanada/argodata\")\nlibrary(s2plot) # remotes::install_github(\"paleolimbot/s2plot\")\n\n\n\nWe’re interested in trajectories, but we also don’t want to download and read every single trajectory file in Argo! The profile index contains best-guess profile locations and so I’ll use it here to approximate the trajectories for the purposes of selecting representative floats. In argodata, this is available from argo_global_prof(), but we’ll also need to create the s2 objects tha represent the point location. There’s a few invalid nodata values that we also have to consider, and I use argo_extract_path_info() to pull the float and cycle information out of the filename.\n\n\nprof <- argo_global_prof() %>% \n mutate(\n longitude = if_else(longitude %in% c(-999.999, -99.999), NA_real_, longitude),\n latitude = na_if(latitude, -99.999),\n geog = s2_geog_point(longitude, latitude)\n ) %>%\n filter(is.finite(longitude), is.finite(latitude)) %>% \n argo_extract_path_info()\n\nprof %>% \n select(file_float, file_cycle, geog)\n\n\n# A tibble: 2,443,945 x 3\n file_float file_cycle geog \n \n 1 13857 1 \n 2 13857 2 \n 3 13857 3 \n 4 13857 4 \n 5 13857 5 \n 6 13857 6 \n 7 13857 7 \n 8 13857 8 \n 9 13857 9 \n10 13857 10 \n# ... with 2,443,935 more rows\n\nFor the coastal subset we need some information that we don’t have. While argoFloats and argodata have built-in functions to subset by distance from a point or line, distance from a polygon is complicated, and even more complicated if that polygon is on the sphere. The s2 package contains a version of the Natural Earth 1:10,000,000 countries data set), which we can aggregate to form a definition of “land”.\n\n\nland <- s2_data_countries() %>%\n s2_union_agg()\n\n\n\nThe s2 package has a dedicated function for “distance within”; however, if we use it here it takes an unreasonable amount of time on the ~2.5 million profiles in Argo. We can simplify the definition of “within 400 km of land” using s2_buffer_cells() with a low min_level.\n\n\nland_400km_approx <- land %>% \n s2_buffer_cells(400 * 1000, min_level = 3)\n\n\n\nUsing the land to preselect profiles that might be within 400 km before computing the exact subset saves almost half an hour of computation time here.\n\n\nprof_coastal <- prof %>%\n filter(s2_intersects(geog, land_400km_approx)) %>% \n filter(s2_dwithin(geog, land, distance = 400 * 1000))\n\n\n\nWith a visual check, it looks like this worked!\n\n\ns2plot(land_400km_approx)\ns2plot(land, add = T, border = \"blue\")\ns2plot(prof_coastal$geog, add = T, pch = 16, cex = 0.5)\n\n\n\n\nWe also need a few more pieces of information for our long and well-characterized criteria. First, we need to sort by float and cycle and compute distances between successive cycles.\n\n\nprof_coastal_traj <- prof %>%\n arrange(file_float, file_cycle) %>% \n # only use the coastal subset\n mutate(\n is_coastal = file %in% prof_coastal$file\n ) %>% \n # takes care of duplicate profiles that have a realtime\n # and delayed profile in the index.\n group_by(file_float, file_cycle) %>%\n slice(1) %>%\n # compute distance between adjacent profiles\n group_by(file_float) %>% \n mutate(\n dist_from_last = s2_distance(geog, lag(geog))\n )\n\n\n\nNow we can apply our criteria. I’m using a grouped filter here so that I can use aggregation functions (like sum(), max(), and n()) but retain all the profile information from which the information was calculated. More than 100 profiles per float becomes n() > 100, >500 km in length becomes sum(dist_from_last) > (500 * 1000), 80% of profiles within 400 km of the coast becomes (sum(is_coastal) / n()) > 0.8, and maximum distance of 100 km between profiles becomes max(dist_from_last) < (100 * 1000).\n\n\nprof_coastal_traj_filter <- prof_coastal_traj %>% \n group_by(file_float) %>% \n filter(\n n() > 100,\n sum(dist_from_last, na.rm = TRUE) > (500 * 1000),\n (sum(is_coastal) / n()) > 0.8,\n max(dist_from_last, na.rm = TRUE) < (100 * 1000)\n )\n\nn_distinct(prof_coastal_traj_filter$file_float)\n\n\n[1] 659\n\nFrom these criteria, we get 664 floats! That’s a little lower than I was hoping for, so for the actual paper we’ll probably relax those numbers a little so that we have ~1,000 floats to work with. Once again, a visual check:\n\n\ns2plot(\n land,\n projection = s2plot_projection_orthographic(s2_geog_point(-40, 10))\n)\n\nprof_coastal_traj_filter %>% \n group_by(file_float) %>%\n summarise(traj = s2_make_line(longitude, latitude)) %>% \n pull() %>% \n s2plot(add = T, col = \"blue\")\n\n\n\n\n\n\n\n",
"preview": "posts/2021-06-04-finding-coastal-argo-trajectories/finding-coastal-argo-trajectories_files/figure-html5/unnamed-chunk-6-1.png",
- "last_modified": "2023-10-17T13:38:50+00:00",
+ "last_modified": "2023-10-17T15:31:44+00:00",
"input_file": {},
"preview_width": 1248,
"preview_height": 768
@@ -157,7 +157,7 @@
"categories": [],
"contents": "\nThese floats were first posted about by Jaimie Harbin, who noted that some interpolations are particularly questionable. This affects Canadian waters more so than others as we have a lot of ice (which prevents floats from communicating their position) and a complex coastline (which the default linear interpolation frequently crosses).\n\n\nlibrary(argoFloats)\nindex1 <- subset(getIndex(), ID = '6902728')\nplot(index1) \n\n\n\n\n\n\nbindex <- subset(getIndex(filename = 'bgc'), ID = '6902967')\nplot(bindex)\n\n\n\n\nThese points are documented as “interpolated” in the individual .nc files, identified as the POSITION_QC variable having a value of 8.\n\n\nlibrary(argodata)\nlibrary(dplyr)\nlibrary(ggplot2)\n\nprofiles <- bind_rows(\n argo_global_bio_prof(),\n argo_global_prof()\n) %>%\n argo_filter_float(c(6902967, 6902728)) %>%\n argo_prof_prof() %>%\n group_by(date_location, latitude, longitude) %>%\n slice(1) %>%\n ungroup()\n\nggplot(profiles, aes(longitude, latitude)) +\n geom_point(aes(col = position_qc)) +\n theme_void()\n\n\n\n\nMore detail about the profile positions:\n\n\nrmarkdown::paged_table(profiles)\n\n\n\n\n\n\n\n\n",
"preview": "posts/2021-05-14-a-questionable-interpolation/a-questionable-interpolation_files/figure-html5/unnamed-chunk-1-1.png",
- "last_modified": "2023-10-17T13:38:50+00:00",
+ "last_modified": "2023-10-17T15:31:44+00:00",
"input_file": {},
"preview_width": 1248,
"preview_height": 768
@@ -176,7 +176,7 @@
"categories": [],
"contents": "\nIntroduction\nIn July 2017, A DFO Argo float (of NOVA type, containing a pumped SBE CTD) was deployed on the Labrador Shelf, as part of an exercises related to the DFO/ECCC/DND CONCEPTS program (Canadian Operational Network of Coupled Environmental PredicTion Systems). The float was programmed to execute daily profiles between the surface and the bottom until it exhausted its batteries.\nThe float, with WMO ID 4902426, executed 389 total profiles, surviving a winter season under the ice. Float data and trajectory can be explored through the EuroArgo “Fleet monitoring” site, found at: https://fleetmonitoring.euro-argo.eu/float/4902426, or by downloading the netCDF files directly from https://data-argo.ifremer.fr/dac/meds/4902426/\nData summary\nA summary of the data found in the combined profile file 4902426_prof.nc is provided below (using the R oce package developed by myself and Dan Kelley). Another approach, which permits individual access to the single-profile files provided on the Argo DAC, is to use the in development (but quite mature) argoFloats package.\nTo see a crude map of the float profile locations:\n\n\n\nFor a quick “section”-style plot of the temperature, salinity, and density data:\n\n\n\n\n\n\n",
"preview": "posts/2021-05-14-labrador-shelf-dfo-argo-float/labrador-shelf-dfo-argo-float_files/figure-html5/unnamed-chunk-1-1.png",
- "last_modified": "2023-10-17T13:38:50+00:00",
+ "last_modified": "2023-10-17T15:31:44+00:00",
"input_file": {},
"preview_width": 1248,
"preview_height": 768
diff --git a/search.json b/search.json
index 2905df9..53ac046 100644
--- a/search.json
+++ b/search.json
@@ -5,21 +5,21 @@
"title": "About",
"author": [],
"contents": "\nThis site is an unofficial collection of posts related to the development of software to support the Argo international research program. Several projects related to this blog include argoFloats, argodata, and bgcArgoDMQC.\n\n\n\n",
- "last_modified": "2023-10-17T13:40:04+00:00"
+ "last_modified": "2023-10-17T15:33:11+00:00"
},
{
"path": "deployment.html",
"title": "Deployment Planning",
"author": [],
- "contents": "\nThis page is used for Canadian Argo groups to coordinate deployment planning, in addition to OceanOps. To submit plans to the website, see instructions on the github page.\nThe map below shows completed and planned deployments for 2023, coloured by institution. Dots with a buoy symbol are locations of completed deployments (deployment location, not most recent profile location), while dots with the loading symbol are planned deployments.\n\n\n\n\n\n\nDeployments since Jan 1, 2023\n\nProgram\nInstitute\nModel\nDate\nLatitude\nLongitude\nShip\nIMEI\nWMO\nSerial No.\nArgo Canada\nIOS\nPROVOR\n06 May, 2023\n49.4328\n-136.6640\nJOHN P. TULLY\n300125061075760\n4902596\nP41305-21CA001\nArgo ONC\nONC\nDeep ARVOR\n07 May, 2023\n49.5690\n-138.6648\nJOHN P. TULLY\n300534062315340\n4902634\nAD2700-23CA002\nArgo ONC\nONC\nDeep ARVOR\n09 May, 2023\n49.9980\n-145.0060\nJOHN P. TULLY\n300534063506540\n4902633\nAD2700-23CA001\nArgo Canada\nIOS\nPROVOR\n09 May, 2023\n49.9987\n-145.0020\nJOHN P. TULLY\n300125061070720\n4902597\nP41305-21CA002\nArgo Canada\nBIO\nARVOR\n03 Jun, 2023\n59.9763\n-53.5576\nCAPT. JACQUES CARTIER\n300534064009980\n4902650\nAI2632-23CA002\nArgo Canada\nBIO\nARVOR\n03 Jun, 2023\n59.9578\n-54.5413\nCAPT. JACQUES CARTIER\n300534060900650\n4902604\nAI2600-22CA002\nArgo Canada\nBIO\nARVOR\n03 Jun, 2023\n59.9554\n-54.5440\nCAPT. JACQUES CARTIER\n300534063999920\n4902670\nAI3500-23CA001\nArgo Canada\nBIO\nARVOR\n03 Jun, 2023\n59.9482\n-54.5468\nCAPT. JACQUES CARTIER\n300534063798900\n4902671\nAI3500-23CA002\nArgo Canada\nBIO\nARVOR\n03 Jun, 2023\n59.5010\n-55.0080\nCAPT. JACQUES CARTIER\n300534064005560\n4902652\nAI2632-23CA004\nArgo Canada\nIOS\nARVOR\n04 Jun, 2023\n51.3783\n-130.8420\nJOHN P. TULLY\n300534060116080\n4902614\nAI2632-22CA012\nArgo Canada\nBIO\nARVOR\n10 Jun, 2023\n41.4059\n-60.6433\nCAPT. JACQUES CARTIER\n300534062470430\n4902606\nAI2600-22CA004\nArgo ONC\nONC\nDeep ARVOR\n10 Jul, 2023\n52.1926\n-139.0757\nSIR WILFRID LAURIER\n300534063607820\n4902635\nAD2700-23CA003\nArgo ONC\nONC\nDeep ARVOR\n11 Jul, 2023\n53.1833\n-145.2405\nSIR WILFRID LAURIER\n300534063500390\n4902636\nAD2700-23CA004\nArgo ONC\nONC\nDeep ARVOR\n12 Jul, 2023\n53.7655\n-151.2152\nSIR WILFRID LAURIER\n300534063609820\n4902637\nAD2700-23CA005\nArgo Canada\nIOS\nARVOR\n12 Aug, 2023\n48.8289\n-128.7930\nJOHN P. TULLY\n300534064101170\n4902663\nAI2632-23CA008\nArgo Canada\nIOS\nARVOR\n30 Aug, 2023\n49.4498\n-127.9490\nJOHN P. TULLY\n300534064000550\n4902662\nAI2632-23CA007\nArgo Canada\nBIO\nPROVOR\n15 Sep, 2023\n43.4790\n-57.5480\nDISCOVERY\n300125061370340\n4902627\nP43205-22CA002\nArgo Canada\nBIO\nPROVOR\n15 Sep, 2023\n43.7820\n-57.8310\nDISCOVERY\n300125061077690\n4902600\nP41305-21CA005\nArgo Canada\nIOS\nARVOR\n19 Sep, 2023\n72.8810\n-135.9970\nLOUIS S. ST. LAURENT\n300534062477390\n4902610\nAI2600-22CA008\nArgo Canada\nBIO\nPROVOR\n24 Sep, 2023\n42.4720\n-61.4350\nDISCOVERY\n300125061078780\n4902601\nP41305-21CA006\nArgo Canada\nIOS\nARVOR\n29 Sep, 2023\n52.0660\n-133.5610\nJOHN P. TULLY\n300534064107010\n4902660\nAI2632-23CA005\n\nPlanned Deployments\n\nProgram\nInstitute\nModel\nDate\nLatitude\nLongitude\nShip\nIMEI\nWMO\nSerial No.\nArgo Canada\nIOS\nARVOR\n03 Oct, 2023\n73.04\n-149.959\nLOUIS S. ST. LAURENT\n300534062474720\n4902611\nAI2600-22CA009\nArgo Canada\nBIO\nARVOR\n10 Oct, 2023\n42.50\n-50.000\nDISCOVERY\n300534060123910\n4902689\nAI3500-20CA001\nArgo Canada\nBIO\nARVOR\n15 Oct, 2023\n41.50\n-49.000\nDISCOVERY\n300534062473430\n4902608\nAI2600-22CA006\nArgo Dalhousie\nDal\nPROVOR\n14 Nov, 2023\n56.75\n-52.460\nMERIAN\n300125062902880\n4902684\nP53865-23CA001\nArgo Dalhousie\nDal\nPROVOR\n21 Nov, 2023\n56.33\n-52.890\nMERIAN\n300125062426150\n4902685\nP53865-23CA002\nArgo Canada\nBIO\nARVOR\n22 Nov, 2023\n5.40\n-18.800\nDISCOVERY\n300534062475420\n4902603\nAI2600-22CA001\nArgo Canada\nBIO\nARVOR\n26 Nov, 2023\n3.40\n-16.200\nDISCOVERY\n300534062475420\n4902605\nAI2600-22CA003\nArgo Canada\nBIO\nARVOR\n01 Dec, 2023\n-22.00\n-1.200\nDISCOVERY\n300534062473410\n4902617\nAI3500-22CA001\nArgo Canada\nBIO\nARVOR\n01 Dec, 2023\n-19.00\n-3.000\nDISCOVERY\n300534062477740\n4902607\nAI2600-22CA005\nArgo Canada\nIOS\nARVOR\n01 Dec, 2023\n26.20\n-163.300\nHMCS VANCOUVER\n300534061174520\n4902558\nAI3500-23CA003\nArgo Canada\nIOS\nARVOR\n01 Dec, 2023\n26.00\n-169.200\nHMCS VANCOUVER\n300534060126630\n4902548\nAI2600-20CA033\nArgo Canada\nBIO\nARVOR\n07 Dec, 2023\n-24.60\n0.500\nDISCOVERY\n300534062476310\n4902618\nAI3500-22CA002\nArgo Canada\nBIO\nARVOR\n07 Dec, 2023\n-27.60\n3.700\nDISCOVERY\n300534062476460\n4902619\nAI3500-22CA002\nArgo ONC\nONC\nDeep ARVOR\n15 Jan, 2024\n-57.00\n-61.000\nNA\n300534063500400\n4902638\nAD2700-23CA006\nArgo ONC\nONC\nDeep ARVOR\n15 Jan, 2024\n-60.00\n-59.000\nNA\n300534063601830\n4902637\nAD2700-23CA007\n\nOther Deployment Information\nAlthough there is not yet a designated cruise or location, Argo Canada is sending 6 ARVOR floats to Cape Town, South Africa via the RRS Discovery. These floats will be deployed in the Western Indian Ocean where there is a significant gap in the Argo array.\nInventory\n\n\n\nThe table below shows the number of “in stock” floats for each program/institution.\n\nProgram\nInstitute\nARVOR\nARVOR+DO\nARVOR-RBR\nDeep ARVOR\nPROVOR\nArgo Canada\nBIO\n12\n2\n3\n0\n10\nArgo Canada\nIOS\n10\n1\n3\n0\n7\nArgo ONC\nONC\n0\n0\n0\n13\n0\nArgo Dalhousie\nDal\n0\n0\n0\n0\n5\n\nThis table shows what the standing stock of floats will be for each program/institution will be following the completion of the deployments listed in the “Planned Deployments” section. For BIO, the 6 floats being sent to Cape Town are also being accounted for in this table. For simplicity, all types of ARVOR models (except deep) are combined here.\n\nProgram\nInstitute\nARVOR\nDeep ARVOR\nPROVOR\nArgo Canada\nBIO\n3\n0\n10\nArgo Canada\nIOS\n11\n0\n7\nArgo ONC\nONC\n0\n11\n0\nArgo Dalhousie\nDal\n0\n0\n3\n\n\n\n\n",
- "last_modified": "2023-10-17T13:40:24+00:00"
+ "contents": "\nThis page is used for Canadian Argo groups to coordinate deployment planning, in addition to OceanOps. To submit plans to the website, see instructions on the github page.\nThe map below shows completed and planned deployments for 2023, coloured by institution. Dots with a buoy symbol are locations of completed deployments (deployment location, not most recent profile location), while dots with the loading symbol are planned deployments.\n\n\n\n\n\n\nDeployments since Jan 1, 2023\n\nProgram\nInstitute\nModel\nDate\nLatitude\nLongitude\nShip\nIMEI\nWMO\nSerial No.\nArgo Canada\nIOS\nPROVOR\n06 May, 2023\n49.4328\n-136.6640\nJOHN P. TULLY\n300125061075760\n4902596\nP41305-21CA001\nArgo ONC\nONC\nDeep ARVOR\n07 May, 2023\n49.5690\n-138.6648\nJOHN P. TULLY\n300534062315340\n4902634\nAD2700-23CA002\nArgo ONC\nONC\nDeep ARVOR\n09 May, 2023\n49.9980\n-145.0060\nJOHN P. TULLY\n300534063506540\n4902633\nAD2700-23CA001\nArgo Canada\nIOS\nPROVOR\n09 May, 2023\n49.9987\n-145.0020\nJOHN P. TULLY\n300125061070720\n4902597\nP41305-21CA002\nArgo Canada\nBIO\nARVOR\n03 Jun, 2023\n59.9763\n-53.5576\nCAPT. JACQUES CARTIER\n300534064009980\n4902650\nAI2632-23CA002\nArgo Canada\nBIO\nARVOR\n03 Jun, 2023\n59.9578\n-54.5413\nCAPT. JACQUES CARTIER\n300534060900650\n4902604\nAI2600-22CA002\nArgo Canada\nBIO\nARVOR\n03 Jun, 2023\n59.9554\n-54.5440\nCAPT. JACQUES CARTIER\n300534063999920\n4902670\nAI3500-23CA001\nArgo Canada\nBIO\nARVOR\n03 Jun, 2023\n59.9482\n-54.5468\nCAPT. JACQUES CARTIER\n300534063798900\n4902671\nAI3500-23CA002\nArgo Canada\nBIO\nARVOR\n03 Jun, 2023\n59.5010\n-55.0080\nCAPT. JACQUES CARTIER\n300534064005560\n4902652\nAI2632-23CA004\nArgo Canada\nIOS\nARVOR\n04 Jun, 2023\n51.3783\n-130.8420\nJOHN P. TULLY\n300534060116080\n4902614\nAI2632-22CA012\nArgo Canada\nBIO\nARVOR\n10 Jun, 2023\n41.4059\n-60.6433\nCAPT. JACQUES CARTIER\n300534062470430\n4902606\nAI2600-22CA004\nArgo ONC\nONC\nDeep ARVOR\n10 Jul, 2023\n52.1926\n-139.0757\nSIR WILFRID LAURIER\n300534063607820\n4902635\nAD2700-23CA003\nArgo ONC\nONC\nDeep ARVOR\n11 Jul, 2023\n53.1833\n-145.2405\nSIR WILFRID LAURIER\n300534063500390\n4902636\nAD2700-23CA004\nArgo ONC\nONC\nDeep ARVOR\n12 Jul, 2023\n53.7655\n-151.2152\nSIR WILFRID LAURIER\n300534063609820\n4902637\nAD2700-23CA005\nArgo Canada\nIOS\nARVOR\n12 Aug, 2023\n48.8289\n-128.7930\nJOHN P. TULLY\n300534064101170\n4902663\nAI2632-23CA008\nArgo Canada\nIOS\nARVOR\n30 Aug, 2023\n49.4498\n-127.9490\nJOHN P. TULLY\n300534064000550\n4902662\nAI2632-23CA007\nArgo Canada\nBIO\nPROVOR\n15 Sep, 2023\n43.4790\n-57.5480\nDISCOVERY\n300125061370340\n4902627\nP43205-22CA002\nArgo Canada\nBIO\nPROVOR\n15 Sep, 2023\n43.7820\n-57.8310\nDISCOVERY\n300125061077690\n4902600\nP41305-21CA005\nArgo Canada\nIOS\nARVOR\n19 Sep, 2023\n72.8810\n-135.9970\nLOUIS S. ST. LAURENT\n300534062477390\n4902610\nAI2600-22CA008\nArgo Canada\nBIO\nPROVOR\n24 Sep, 2023\n42.4720\n-61.4350\nDISCOVERY\n300125061078780\n4902601\nP41305-21CA006\nArgo Canada\nIOS\nARVOR\n29 Sep, 2023\n52.0660\n-133.5610\nJOHN P. TULLY\n300534064107010\n4902660\nAI2632-23CA005\nArgo Canada\nIOS\nARVOR\n03 Oct, 2023\n73.0400\n-149.9590\nLOUIS S. ST. LAURENT\n300534062474720\n4902611\nAI2600-22CA009\n\nPlanned Deployments\n\nProgram\nInstitute\nModel\nDate\nLatitude\nLongitude\nShip\nIMEI\nWMO\nSerial No.\nArgo Canada\nBIO\nARVOR\n10 Oct, 2023\n42.50\n-50.00\nDISCOVERY\n300534060123910\n4902689\nAI3500-20CA001\nArgo Canada\nBIO\nARVOR\n15 Oct, 2023\n41.50\n-49.00\nDISCOVERY\n300534062473430\n4902608\nAI2600-22CA006\nArgo Dalhousie\nDal\nPROVOR\n14 Nov, 2023\n56.75\n-52.46\nMERIAN\n300125062902880\n4902684\nP53865-23CA001\nArgo Dalhousie\nDal\nPROVOR\n21 Nov, 2023\n56.33\n-52.89\nMERIAN\n300125062426150\n4902685\nP53865-23CA002\nArgo Canada\nBIO\nARVOR\n22 Nov, 2023\n5.40\n-18.80\nDISCOVERY\n300534062475420\n4902603\nAI2600-22CA001\nArgo Canada\nBIO\nARVOR\n26 Nov, 2023\n3.40\n-16.20\nDISCOVERY\n300534062475420\n4902605\nAI2600-22CA003\nArgo Canada\nBIO\nARVOR\n01 Dec, 2023\n-22.00\n-1.20\nDISCOVERY\n300534062473410\n4902617\nAI3500-22CA001\nArgo Canada\nBIO\nARVOR\n01 Dec, 2023\n-19.00\n-3.00\nDISCOVERY\n300534062477740\n4902607\nAI2600-22CA005\nArgo Canada\nIOS\nARVOR\n01 Dec, 2023\n26.20\n-163.30\nHMCS VANCOUVER\n300534061174520\n4902558\nAI3500-23CA003\nArgo Canada\nIOS\nARVOR\n01 Dec, 2023\n26.00\n-169.20\nHMCS VANCOUVER\n300534060126630\n4902548\nAI2600-20CA033\nArgo Canada\nBIO\nARVOR\n07 Dec, 2023\n-24.60\n0.50\nDISCOVERY\n300534062476310\n4902618\nAI3500-22CA002\nArgo Canada\nBIO\nARVOR\n07 Dec, 2023\n-27.60\n3.70\nDISCOVERY\n300534062476460\n4902619\nAI3500-22CA002\nArgo ONC\nONC\nDeep ARVOR\n15 Jan, 2024\n-57.00\n-61.00\nNA\n300534063500400\n4902638\nAD2700-23CA006\nArgo ONC\nONC\nDeep ARVOR\n15 Jan, 2024\n-60.00\n-59.00\nNA\n300534063601830\n4902637\nAD2700-23CA007\n\nOther Deployment Information\nAlthough there is not yet a designated cruise or location, Argo Canada is sending 6 ARVOR floats to Cape Town, South Africa via the RRS Discovery. These floats will be deployed in the Western Indian Ocean where there is a significant gap in the Argo array.\nInventory\n\n\n\nThe table below shows the number of “in stock” floats for each program/institution.\n\nProgram\nInstitute\nARVOR\nARVOR+DO\nARVOR-RBR\nDeep ARVOR\nPROVOR\nArgo Canada\nBIO\n12\n2\n3\n0\n10\nArgo Canada\nIOS\n10\n1\n3\n0\n7\nArgo ONC\nONC\n0\n0\n0\n13\n0\nArgo Dalhousie\nDal\n0\n0\n0\n0\n5\n\nThis table shows what the standing stock of floats will be for each program/institution will be following the completion of the deployments listed in the “Planned Deployments” section. For BIO, the 6 floats being sent to Cape Town are also being accounted for in this table. For simplicity, all types of ARVOR models (except deep) are combined here.\n\nProgram\nInstitute\nARVOR\nDeep ARVOR\nPROVOR\nArgo Canada\nBIO\n3\n0\n10\nArgo Canada\nIOS\n12\n0\n7\nArgo ONC\nONC\n0\n11\n0\nArgo Dalhousie\nDal\n0\n0\n3\n\n\n\n\n",
+ "last_modified": "2023-10-17T15:33:16+00:00"
},
{
"path": "index.html",
"title": "Argo Canada Development Blog",
"author": [],
"contents": "\n\n\n\n",
- "last_modified": "2023-10-17T13:40:25+00:00"
+ "last_modified": "2023-10-17T15:33:17+00:00"
}
],
"collections": ["posts/posts.json"]
diff --git a/sitemap.xml b/sitemap.xml
index bab0a38..b06e2f2 100644
--- a/sitemap.xml
+++ b/sitemap.xml
@@ -2,54 +2,54 @@
https://argocanada.github.io/blog/about.html
- 2023-10-17T13:38:50+00:00
+ 2023-10-17T15:31:44+00:00
https://argocanada.github.io/blog/deployment.html
- 2023-10-17T13:38:50+00:00
+ 2023-10-17T15:31:44+00:00
https://argocanada.github.io/blog/
- 2023-10-17T13:38:50+00:00
+ 2023-10-17T15:31:44+00:00
https://argocanada.github.io/blog/posts/2023-07-13-diversifying-argo-surfacing-times/
- 2023-10-17T13:38:50+00:00
+ 2023-10-17T15:31:44+00:00
https://argocanada.github.io/blog/posts/2021-11-10-correcting-oxygen-response-time-error-on-an-argo-float/
- 2023-10-17T13:38:50+00:00
+ 2023-10-17T15:31:44+00:00
https://argocanada.github.io/blog/posts/2021-10-22-argofloats-an-r-package-for-argo-researchers/
- 2023-10-17T13:38:50+00:00
+ 2023-10-17T15:31:44+00:00
https://argocanada.github.io/blog/posts/2021-10-18-implemeting-real-time-chla-qc-in-python/
- 2023-10-17T13:38:50+00:00
+ 2023-10-17T15:31:44+00:00
https://argocanada.github.io/blog/posts/2021-07-02-classifying-an-arbitrary-longitudelatitude-as-land-or-ocean/
- 2023-10-17T13:38:50+00:00
+ 2023-10-17T15:31:44+00:00
https://argocanada.github.io/blog/posts/2021-06-23-checking-argo-files/
- 2023-10-17T13:38:50+00:00
+ 2023-10-17T15:31:44+00:00
https://argocanada.github.io/blog/posts/2021-06-07-an-argo-loop-the-loop/
- 2023-10-17T13:38:50+00:00
+ 2023-10-17T15:31:44+00:00
https://argocanada.github.io/blog/posts/2021-06-04-finding-coastal-argo-trajectories/
- 2023-10-17T13:38:50+00:00
+ 2023-10-17T15:31:44+00:00
https://argocanada.github.io/blog/posts/2021-05-14-a-questionable-interpolation/
- 2023-10-17T13:38:50+00:00
+ 2023-10-17T15:31:44+00:00
https://argocanada.github.io/blog/posts/2021-05-14-labrador-shelf-dfo-argo-float/
- 2023-10-17T13:38:50+00:00
+ 2023-10-17T15:31:44+00:00