-
Notifications
You must be signed in to change notification settings - Fork 3
/
PyWR.py
892 lines (767 loc) · 29.3 KB
/
PyWR.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
#PyWR.py (version1.1) -- 27 Oct 2020
#Python functions for weather typing (using K-means) and flow-dependent model diagnostics
#Authors: ÁG Muñoz ([email protected]), James Doss-Gollin, AW Robertson ([email protected])
#The International Research Institute for Climate and Society (IRI)
#The Earth Institute at Columbia University.
#Notes:
#Log: see version.log in GitHub
import numpy as np
import pandas as pd
from numba import jit
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import xarray as xr
from typing import Tuple
#for procrustes functions
from scipy.linalg import norm, orthogonal_procrustes
from scipy.spatial import procrustes
from math import atan, sin, cos
#for plotting functions
import cartopy.crs as ccrs
from cartopy import feature
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from mpl_toolkits.axes_grid1 import AxesGrid
import matplotlib.colors as mcolors
def download_data(url, authkey, outfile, force_download=False):
"""A smart function to download data from IRI Data Library.
If the data can be read in and force_download is False, will read from file
Otherwise will download from IRIDL and then read from file
Parameters
----------
url : str
The url pointing to the data.nc file.
authkey : str
The authentication key for IRI DL (see above).
outfile : str
The data filename.
force_download: Bool, optional
False if it's OK to read from existing file,
True if data *must* be re-downloaded.
Returns
-------
data : dataFrame
Dataframe of dataset specified in url or file.
"""
if not force_download:
try:
data = xr.open_dataset(outfile, decode_times=False)
except:
force_download = True
if force_download:
# calls curl to download data
command = "curl -L -k -b '__dlauth_id={}' '{}' > {}".format(authkey, url, outfile)
#command = "curl -L '{}' > {}".format(url, outfile)
get_ipython().system(command)
# open the data
data = xr.open_dataset(outfile, decode_times=False)
return data
def get_number_eof(X: np.ndarray, var_to_explain: float, plot=False) -> int:
"""Get the number of EOFs of X that explain a given variance proportion.
Parameters
----------
X : ndarray
var_to_explain : float
Proportion (0 to 1) of variance to be explained.
plot : Bool, optional
Default plot=Flase will not generate plot
Returns
-------
n_eof : float
Number of EOF's retained for the chosen percentage of variance.
Notes
-----
Plot generated by the function is 'number of EOFs' versus
'Cumulative proportion of variance explained'.
"""
assert var_to_explain > 0, 'var_to_explain must be between 0 and 1'
assert var_to_explain < 1, 'var_to_explain must be between 0 and 1'
pca = PCA().fit(X)
var_explained = pca.explained_variance_ratio_
cum_var = var_explained.cumsum()
n_eof = np.where(cum_var > var_to_explain)[0].min() + 1
if plot:
n_padding = 4
plt.figure(figsize=(12, 7))
plt.plot(np.arange(1, n_eof + 1 + n_padding), cum_var[0:(n_padding + n_eof)])
plt.scatter(np.arange(1, n_eof + 1 + n_padding), cum_var[0:(n_padding + n_eof)])
plt.xlabel('Number of EOFs')
plt.ylabel('Cumulative Proportion of Variance Explained')
plt.grid()
plt.title('{} EOF Retained'.format(n_eof))
plt.show()
return n_eof
@jit
def _vector_ci(P: np.ndarray, Q: np.ndarray) -> float:
"""Implement the Michaelangeli (1995) Classifiability Index.
The variable naming here is not pythonic but follows the notation in the 1995 paper
which makes it easier to follow what is going on. You shouldn't need to call
this function directly but it is called in cluster_xr_eof.
Parameters
----------
P : array
A cluster centroid.
Q : array
Another cluster centroid.
Returns
-------
ci : float
Classifiability index value.
"""
k = P.shape[0]
Aij = np.ones([k, k])
for i in range(k):
for j in range(k):
Aij[i, j] = np.corrcoef(P[i, :], Q[j, :])[0, 1]
Aprime = Aij.max(axis=0)
ci = Aprime.min()
return ci
def calc_classifiability(P, Q):
"""Implement the Michaelangeli (1995) Classifiability Index.
The variable naming here is not pythonic but follows the notation in the 1995 paper
which makes it easier to follow what is going on. You shouldn't need to call
this function directly but it is called in cluster_xr_eof.
Parameters
----------
P : array
A cluster centroid
Q : array
Another cluster centroid
Returns
-------
ci : float
Classifiability index value.
"""
k = P.shape[0]
Aij = np.ones([k, k])
for i in range(k):
for j in range(k):
Aij[i, j], _ = correl(P[i, :], Q[j, :])
Aprime = Aij.max(axis=0)
ci = Aprime.min()
return ci
@jit
def get_classifiability_index(centroids: np.ndarray) -> Tuple[float, int]:
"""Get the classifiability of a set of centroids.
This function will compute the classifiability index for a set of centroids and
indicate which is the best one.
Parameters
----------
centroids: array
Input array of centroids, indexed [simulation, dimension]
Returns
-------
classifiability : float
Classifiability index value.
best_part : int
The best centroid.
"""
nsim = centroids.shape[0]
c_pq = np.ones([nsim, nsim])
for i in range(0, nsim):
for j in range(0, nsim):
if i == j:
c_pq[i, j] = np.nan
else:
c_pq[i, j] = _vector_ci(P=centroids[i, :, :], Q=centroids[j, :, :])
classifiability = np.nanmean(c_pq)
best_part = np.where(c_pq == np.nanmax(c_pq))[0][0]
return classifiability, best_part
@jit
def loop_kmeans(X: np.ndarray, n_cluster: int, n_sim: int) -> Tuple[np.ndarray, np.ndarray]:
"""Compute weather types using k means clustering.
Should have more information on what this does.
Parameters
----------
X: array
PCA reanalysis data.
n_cluster: int
How many clusters to compute.
n_sim: int
how many times to initialize the clusters
(note: computation increases order (n_sim**2)).
Returns
-------
centroids : array
Centroids.
w_types : array
Weather types.
Notes
-----
X should be in reduced dimension space already; indexed [time, dimension].
"""
centroids = np.zeros(shape=(n_sim, n_cluster, X.shape[1]))
w_types = np.zeros(shape=(n_sim, X.shape[0]))
for i in np.arange(n_sim):
km = KMeans(n_clusters=n_cluster).fit(X)
centroids[i, :, :] = km.cluster_centers_
w_types[i, :] = km.labels_
return centroids, w_types
def resort_labels(old_labels):
"""Re-sort cluster labels.
Re-orders labels so that the lowest number is the most common,
and the highest number is the least common.
Parameters
----------
old_labels: vector
The previous labels of the clusters.
Returns
-------
new_labels : vector
The new cluster labels, ranked by frequency of occurrence.
"""
old_labels = np.int_(old_labels)
labels_from = np.unique(old_labels).argsort()
counts = np.array([np.sum(old_labels == xi) for xi in labels_from])
orders = counts.argsort()[::-1].argsort()
new_labels = orders[labels_from[old_labels]] + 1
return new_labels
#Functions for procrustes analysis
def prepareDS_procrustes(WTmod,WTrea):
"""Prepare model and reanalysis datasets for analysis / plotting.
Takes raw mode and datasets
Parameters
----------
WTmod : dataFrame
Model dataset
WTrea : dataFrame
Reanalysis dataset
Returns
-------
WTmod : dataFrame
Smoothed version of the original model dataset
WTrea : dataFrame
Smoothed, interpolated version of the reanalysis dataset
Notes
------
`WTrea` domain needs to be slightly larger than `WTmod` domain
in order to avoid NaNs after interpolation.
"""
WTmod = np.squeeze(WTmod)
WTrea = np.squeeze(WTrea)
#Here we interpolate the model reanalysis WT grid to the model one, to have a fair comparison
WTrea=WTrea.interp_like(WTmod)
return WTmod, WTrea
def procrustes2(data1, data2):
"""Perform procrustes analysis.
Needs more info on what exactly this is doing.
Parameters
----------
data1 : dataFrame
Reanalysis dataFrame.
data2 : dataFrame
Model dataFrame.
Returns
-------
mtx1 : array
Matrix to be mapped.
mtx2 : array
Target matrix.
disparity : float
Dissimilarity between the two datasets.
R : ndarray
The matrix solution of the orthogonal Procrustes problem.
Returned from scipy's orthogonal_procrustes() function.
s : float
Scale; Sum of the singular values of mtx1.T @ mtx2
"""
mtx1 = np.array(data1, dtype=np.double, copy=True)
mtx2 = np.array(data2, dtype=np.double, copy=True)
if mtx1.ndim != 2 or mtx2.ndim != 2:
raise ValueError("Input matrices must be two-dimensional")
if mtx1.shape != mtx2.shape:
raise ValueError("Input matrices must be of same shape")
if mtx1.size == 0:
raise ValueError("Input matrices must be >0 rows and >0 cols")
# translate all the data to the origin
mtx1 -= np.mean(mtx1, 0)
mtx2 -= np.mean(mtx2, 0)
norm1 = np.linalg.norm(mtx1)
norm2 = np.linalg.norm(mtx2)
if norm1 == 0 or norm2 == 0:
raise ValueError("Input matrices must contain >1 unique points")
# change scaling of data (in rows) such that trace(mtx*mtx') = 1
mtx1 /= norm1
mtx2 /= norm2
# transform mtx2 to minimize disparity
R, s = orthogonal_procrustes(mtx1, mtx2)
mtx2 = np.dot(mtx2, R.T) * s # HERE, the projected mtx2 is estimated.
# measure the dissimilarity between the two datasets
disparity = np.sum(np.square(mtx1 - mtx2))
return mtx1, mtx2, disparity, R,s
def procrustes2d(X, Y, scaling=True, reflection='best'):
"""Procrustes analysis
A port of MATLAB's `procrustes` function to Numpy.
-- Modified by Á.G. Muñoz ([email protected])
Procrustes analysis determines a linear transformation (translation,
reflection, orthogonal rotation and scaling) of the points in Y to best
conform them to the points in matrix X, using the sum of squared errors
as the goodness of fit criterion.
d, Z, [tform] = procrustes(X, Y)
Parameters
----------
X : array
The reference or target field.
Y : array
The field to be transformed.
scaling : Bool, optional
If False, the scaling component of the transformation is forced
to 1.
reflection : str or Bool, optional
If 'best' (default), the transformation solution may or may not
include a reflection component, depending on which fits the data
best. setting reflection to True or False forces a solution with
reflection or no reflection respectively.
Returns
--------
d : float
The residual sum of squared errors, normalized according to a
measure of the scale of X, ((X - X.mean(axis=0))**2).sum()
Z : array
The matrix of transformed Y-values.
tform : dict
Specifying the rotation, translation and scaling that
maps X --> Y.
Notes
-----
X and Y must have equal numbers of points (rows),
but Y may have fewer dimensions (columns) than X.
c: The translation component
T: The orthogonal rotation and reflection component
b: The scale component
That is, Z = TRANSFORM.b * Y * TRANSFORM.T + TRANSFORM.c.
"""
n,m = X.shape
ny,my = Y.shape
muX = X.mean(axis=0)
muY = Y.mean(axis=0)
muY = muY.values
X0 = X - np.broadcast_to(muX,(n,m))
Y0 = Y - np.broadcast_to(muY,(ny,my))
ssX = (X0**2.).sum()
ssY = (Y0**2.).sum()
# centred Frobenius norm
normX = np.sqrt(ssX)
normY = np.sqrt(ssY)
# scale to equal (unit) norm
X0 /= normX
Y0 /= normY
Y0 = Y0.values
if my < m:
Y0 = np.concatenate((Y0, np.zeros(n, m-my)),0)
# optimum rotation matrix of Y
A = np.dot(X0.T, Y0)
U,s,Vt = np.linalg.svd(A,full_matrices=False)
V = Vt.T
Tmat = np.dot(V, U.T)
if reflection is not 'best':
# does the current solution use a reflection?
have_reflection = np.linalg.det(Tmat) < 0
# if that's not what was specified, force another reflection
if reflection != have_reflection:
V[:,-1] *= -1
s[-1] *= -1
Tmat = np.dot(V, U.T)
traceTA = s.sum()
if scaling:
# optimum scaling of Y
b = traceTA * normX / normY
# standarised distance between X and b*Y*T + c
d = 1 - traceTA**2
# transformed coords
Z = np.dot(normX,np.dot(traceTA,np.matmul(Y0, Tmat))) + np.broadcast_to(muX,(n,m))
else:
b = 1
d = 1 + ssY/ssX - 2 * traceTA * normY / normX
Z = np.dot(normY,np.matmul(Y0, Tmat)) + np.broadcast_to(muX,(n,m))
# transformation matrix
if my < m:
Tmat = Tmat[:my,:]
c = muX - np.dot(b,np.matmul(muY, Tmat))
#transformation values
tform = {'rotation':Tmat, 'scale':b, 'translation':np.broadcast_to(c,(n,m))}
return d, Z, tform
def procrustesAnalysis(WTmod,WTrea,model,reanalysis='MERRA',smooth='SingleDay',printDisparity=False):
"""Run procrustes analysis
Takes model weather type data and reanalysis weather type data and performs
procrustes analysis to correct and improve reanalysis WT dataset.
Parameters
----------
WTmod : dataFrame
Model WT dataset.
WTrea : dataFrame.
Reanalysis WT dataset.
model : str
Name of the model data used.
reanalysis : str
Name of the reanalysis data used.
smooth : str, optional
Determines if data will be smoothed,
can be either 'SingleDay' (no smoothing) or '5DayAVG' (smoothing).
printDisparity : Bool, optional
If True, disparity values for each weather type will be printed.
Default printDisparity=False where no output is printed.
Returns
-------
WTf : Dataframe
Includes model, reanalysis, and adjusted reanalysis WT data.
Procrustes : Dataframe
Includes model data and procrustes analysis with
components of scale, rotation, translation.
"""
WTmod, WTrea = prepareDS_procrustes(WTmod,WTrea)
WT=xr.concat([WTrea, WTmod], pd.Index(['MERRA', model], name='dataset'))
if printDisparity == True:
print("Disparity (0 means perfect match) for:")
for iwt in range(len(WTrea['wt'])):
mtx1, mtx2, disparity,R,s = procrustes2(WTrea.WT[iwt], WTmod.WT[iwt])
if printDisparity == True:
print("WT "+str(int(iwt+1))+ ":" +str(round(disparity,2)))
WTmodT=xr.full_like(WTmod,0)
Scale=xr.full_like(WTmod,0)
Rotation=xr.full_like(WTmod,0)
Translation=xr.full_like(WTmod,0)
if printDisparity == True:
print("Normalized disparity (0 means perfect match) for:")
for iwt in range(len(WTrea['wt'])):
d,z,t = procrustes2d(WTrea.WT[iwt], WTmod.WT[iwt],scaling=True, reflection='best')
WTmodT.WT[iwt]=z
Scale.WT[iwt]=WTmodT.WT[iwt].dot(t['scale'])
Rotation.WT[iwt]=z @ t['rotation']
Translation.WT[iwt]=z+t['translation']
if printDisparity == True:
print("WT "+str(int(iwt+1))+ ":" +str(round(d,2)))
WTf=xr.concat([WTrea, WTmod, WTmodT],
pd.Index([reanalysis, model, model+' (Procrustes)'], name='dataset'))
Procrustes=xr.concat([WTmod, Scale, Rotation, Translation],
pd.Index([model, 'Scaled', 'Rotated', 'Translated'], name='dataset'))
return WTf, Procrustes
#Plotting / graphing functions
def plot_WTcomposite(reanalysis, t2m, rainfall, wt_unique, wt, wt_counts, map_proj=ccrs.PlateCarree(), data_proj=ccrs.PlateCarree(), figsize=(14,8), windArrows=False, uwnd=[], vwnd=[], savefig=True):
"""Plot composite of WTs for available input variables.
Using the outputs of the weather typing analysis, generate contour plots for the
reanalysis geopotential height data, temperature, and rainfall anomalies.
Option to render wind vector data overlay on plots.
Parameters
----------
reanalysis : Dataset
Reanalysis dataset.
t2m : Dataset
Temperature dataset.
rainfall : Dataset
Rainfall dataset.
wt_unique : ndarray
Unique reanalysis value for each lat/lng value for each weather type,
calculated as the mean reanalysis value grouped by `WT` and averaged over time.
wt : DataFrame
Dataframe containing the weather type for each time step.
wt_counts : Series
Series containing the proportion which each weather type is present in the data.
map_proj : cartopy.crs projection, optional
Projection the map should be displayed in.
data_proj : cartopy.crs projection, optional
Projection the data is in.
figsize : tuple, optional
Size of the figure the function returns, defaults as a "14 x 8" size figure.
windArrows : Bool, optional
If wind vector data is available, set to "True" to display wind arrows
on the reanalysis plots in the figure.
uwnd : Dataset, optional
The u component vector for wind.
vwnd : Dataset, optional
The w component vector for wind.
savefig : Bool, optional
Determins if plot will be saved.
Returns
-------
plt.show()
Composite figure showing plots of weather type data for pressure height (reanalysis),
precipitation, and temperature anomalies, for each weather type.
Also displays percent that each WT is prsent in totals days of each dataset.
Notes
-----
The uwnd and vwnd parameters only need to be set if `windArrows` is set to `True`.
If savefig=True, figure will be saved to current directory as "figs/wt_composite.pdf"
"""
# Set up the Figure
plt.rcParams.update({'font.size': 12})
fig, axes = plt.subplots(
nrows=3, ncols=len(wt_unique), subplot_kw={'projection': map_proj},
figsize=figsize, sharex=True, sharey=True
)
# Loop through
for i,w in enumerate(wt_unique):
def selector(ds):
times = wt.loc[wt['WT'] == w].index
typ = np.in1d(ds.unstack('time')['T'], times)
ds = ds.sel(time = np.in1d(ds.unstack('time')['T'], times))
ds = ds.mean(dim = 'time')
return(ds)
# Top row: geopotential height anomalies
ax = axes[0, i]
ax.set_title('WT {}: {:.1%} of days'.format(w, wt_counts.values[i]))
C0 = selector(reanalysis['adif']).unstack('grid').plot.contourf(
transform = data_proj,
ax=ax,
cmap='PuOr',
extend="both",
levels=np.linspace(-2e2, 2e2, 21),
add_colorbar=False, #could also make these function inputs so users could specify? but optional inputs
add_labels=False
)
ax.coastlines()
ax.add_feature(feature.BORDERS)
if windArrows == True:
X = reanalysis['adif'].X
Y = reanalysis['adif'].Y
U = selector(uwnd).adif.values
V = selector(vwnd).adif.values
magnitude = np.sqrt(U**2 + V**2)
strongest = magnitude > np.percentile(magnitude, 50)
Q = ax.quiver(
X[strongest], Y[strongest], U[strongest], V[strongest],
transform=data_proj,
width=0.06, scale=0.8,units='xy'
)
# Middle row: rainfall anomalies
ax = axes[1, i]
C1 = selector(rainfall['adif']).unstack('grid').plot.contourf(
transform = data_proj,
ax=ax,
cmap = 'BrBG',
extend="both",
levels=np.linspace(-2, 2, 13),
add_colorbar=False,
add_labels=False
)
ax.coastlines()
ax.add_feature(feature.BORDERS)
#Bottom row: tepmperature anomalies
ax = axes[2, i]
C2 = selector(t2m['asum']).unstack('grid').plot.contourf(
transform = data_proj,
ax=ax,
cmap = 'RdBu_r',
extend="both",
levels=np.linspace(-3, 3, 13),
add_colorbar=False,
add_labels=False
)
ax.coastlines()
ax.add_feature(feature.BORDERS)
ax.tick_params(colors='b')
#Add Colorbar
plt.tight_layout()
fig.subplots_adjust(right=0.94)
cax0 = fig.add_axes([0.97, 0.65, 0.0075, 0.3])
cax1 = fig.add_axes([0.97, 0.33, 0.0075, 0.3])
cax2 = fig.add_axes([0.97, 0.01, 0.0075, 0.3])
cbar0 = fig.colorbar(C0, cax = cax0)
cbar0.formatter.set_powerlimits((4, 4))
cbar0.update_ticks()
cbar0.set_label(r'$zg_{500}$ anomaly [$m^2$/$s^2$]', rotation=270)
cbar0.ax.get_yaxis().labelpad = 20
cbar1 = fig.colorbar(C1, cax=cax1)
cbar1.set_label('Precip. anomaly [mm/d]', rotation=270)
cbar1.ax.get_yaxis().labelpad = 20
cbar2 = fig.colorbar(C2, cax=cax2)
cbar2.set_label('T2m anomaly [$^o$C]', rotation=270)
cbar2.ax.get_yaxis().labelpad = 20
if savefig == True:
fig.savefig('figs/wt_composite.pdf', bbox_inches='tight')
return plt.show()
def plot_reaVSmod(WTmod,WTrea,model,reanalysis='MERRA',savefig=False):
"""Plot reanalysis and model weather type datasets.
Plots weather types of smoothed reanalysis data and
smoothed, interpolated model datasets as contour maps.
Parameters
----------
WTmod : dataFrame
Model dataset.
WTrea : dataFrame
Reanalysis dataset.
model : str
Name of model dataset.
reanalysis : str, optional
Name of reanalysis dataset.
Default reanalysis='MERRA' (data used in example calculations).
savefig : Bool, optional
Determines if plot will be saved.
Returns
-------
plt.show() : matplotlib plot
Contour plot showing reanalysis and model WTs.
Notes
-----
If savefig=True, figure will be saved to current directory as 'figs/plot_reaVSmod.png'
"""
WTmod, WTrea = prepareDS_procrustes(WTmod,WTrea)
WT=xr.concat([WTrea, WTmod], pd.Index([reanalysis, model], name='dataset'))
plt.rcParams.update({'figure.facecolor' : 'white'})
plt.rcParams.update({'font.size': 22})
countlev=20
n=40
x = 0.5
lower = plt.cm.bwr(np.linspace(0, x, n))
white = plt.cm.bwr(np.ones(90-2*n)*x)
upper = plt.cm.bwr(np.linspace(1-x, 1, n))
colors = np.vstack((lower, white, upper))
tmap = mcolors.LinearSegmentedColormap.from_list('map_white', colors)
p = WT.WT.plot.contourf(
x='X', y='Y', row='wt', col='dataset',
transform=ccrs.PlateCarree(),
subplot_kws={
'projection': ccrs.Orthographic(-90, 10)
#ccrs.PlateCarree()
},
figsize=(13, 15),
#vmin=-120, vmax=120,
levels = np.arange(-120, 130, countlev),
cmap=tmap,
extend='both',
#norm=norm,
cbar_kwargs=dict(label='geopotential height anomalies (gpm)',pad=0.1, aspect=30, shrink=0.9) #, orientation='horizontal', ticks=[0, 10, 20 ,30])
)
j = -1
for i, ax in enumerate(p.axes.flat):
if i<=(len(WT.dataset)-1): #Indicate models only on the top columns
ax.set_title(WT.dataset[i].values, fontsize=20)
#Add black contour lines
if i % len(WT.dataset) == 0:
j += 1
ax.contour(WT.WT['X'], WT.WT['Y'], WT.WT[i-len(WT.dataset)*j][j], linewidths=1, colors='k',levels = np.arange(-120, 130, countlev),transform=ccrs.PlateCarree())
xmin,xmax = WTmod.WT['X'].min(), WTmod.WT['X'].max()
ymin,ymax = WTmod.WT['Y'].min(), WTmod.WT['Y'].max()
for ax in p.axes.flat:
ax.coastlines()
ax.add_feature(feature.BORDERS)
if savefig == True:
plt.savefig("figs/plot_reaVSmod.pdf")
return plt.show()
def plot_procrustesCallibration(WTf,savefig=False):
"""Plot the WTf output of the procrustes analysis.
Plot the corrected weather type model outputs against the
original WT model and reanalysis datasets.
Parameters
----------
WTf : dataFrame
Output of procrustesAnalysis() which includes
reanalysis and model WTs, and the corrected WTs.
savefig : Bool, optional
Determines if plot will be saved.
Returns
-------
plt.show() : matplotlib plot
Contour plots showing comparison between model, reanalysis,
and corrected weather types.
Notes
-----
If savefig=True, figure will be saved to current directory as
'figs/ProcrustesCallibration_ + {model} + .pdf'
"""
plt.rcParams.update({'font.size': 20})
countlev=20
n=40
x = 0.5
lower = plt.cm.bwr(np.linspace(0, x, n))
white = plt.cm.bwr(np.ones(90-2*n)*x)
upper = plt.cm.bwr(np.linspace(1-x, 1, n))
colors = np.vstack((lower, white, upper))
tmap = mcolors.LinearSegmentedColormap.from_list('map_white', colors)
p = WTf.WT.plot.contourf(
x='X', y='Y', row='wt', col='dataset',
transform=ccrs.PlateCarree(),aspect=WTf.dims['X'] / WTf.dims['Y'],
subplot_kws={
'projection': ccrs.Orthographic(-90, 10) #ccrs.Orthographic(-80, 35)
#ccrs.PlateCarree()
},
figsize=(15, 15),
levels = np.arange(-120, 130, countlev),
cmap=tmap,
extend='both',
cbar_kwargs=dict(
label='geopotential height anomalies (gpm)',pad=0.1, aspect=30, shrink=0.9)
)
j = -1
for i, ax in enumerate(p.axes.flat):
if i<=(len(WTf.dataset)-1): #Indicate models only on the top columns
ax.set_title(WTf.dataset[i].values, fontsize=20)
#Add black contour lines
if i % len(WTf.dataset) == 0:
j += 1
ax.contour(
WTf.WT['X'], WTf.WT['Y'], WTf.WT[i-len(WTf.dataset)*j][j], linewidths=1,
colors='k',levels = np.arange(-120, 130, countlev),transform=ccrs.PlateCarree())
xmin,xmax = WTf.WT['X'].min(), WTf.WT['X'].max()
ymin,ymax = WTf.WT['Y'].min(), WTf.WT['Y'].max()
for ax in p.axes.flat:
ax.coastlines()
ax.add_feature(feature.BORDERS)
if savefig == True:
plt.savefig('figs/ProcrustesCallibration_'+ model +'.pdf')
plt.show()
def plot_procrustesDecomposition(Procrustes,savefig=False):
"""Plot results of procrustes analysis.
Contour plots which show the different elements of the
procrustes analysis used to correct the weather types.
Parameters
----------
Procrustes : dataFrame
Output of procrustesAnalysis().
savefig : Bool optional
Determines if plot will be saved.
Returns
-------
plt.show() : matplotlib plot
Contour plots showing model weather types without correction,
and the scaled, rotated, and translated data.
Notes
-----
If savefig=True, figure will be saved to current directory as
'figs/ProcrustesDecomposition_ + {model} + .pdf'
`Procrustes` includes the original model weather type data, as well as the
scale,rotation, and translation data used to correct the model
data in the procrustes analysis.
"""
plt.rcParams.update({'font.size': 20})
countlev=20
n=40
x = 0.5
lower = plt.cm.bwr(np.linspace(0, x, n))
white = plt.cm.bwr(np.ones(90-2*n)*x)
upper = plt.cm.bwr(np.linspace(1-x, 1, n))
colors = np.vstack((lower, white, upper))
tmap = mcolors.LinearSegmentedColormap.from_list('map_white', colors)
p = Procrustes.WT.plot.contourf(
x='X', y='Y', row='wt', col='dataset',
transform=ccrs.PlateCarree(),aspect=Procrustes.dims['X'] / Procrustes.dims['Y'],
subplot_kws={
'projection': ccrs.Orthographic(-90, 10)
},
figsize=(15, 15),
levels = np.arange(-120, 130, countlev),
cmap=tmap,
extend='both',
cbar_kwargs=dict(
label='geopotential height anomalies (gpm)',pad=0.1, aspect=30, shrink=0.9)
)
j = -1
for i, ax in enumerate(p.axes.flat):
if i<=(len(Procrustes.dataset)-1): #Indicate models only on the top columns
ax.set_title(Procrustes.dataset[i].values, fontsize=20)
#Add black contour lines
if i % len(Procrustes.dataset) == 0:
j += 1
ax.contour(
Procrustes.WT['X'], Procrustes.WT['Y'],
Procrustes.WT[i-len(Procrustes.dataset)*j][j], linewidths=1, colors='k',
levels = np.arange(-120, 130, countlev),transform=ccrs.PlateCarree())
xmin,xmax = Procrustes.WT['X'].min(), Procrustes.WT['X'].max()
ymin,ymax = Procrustes.WT['Y'].min(), Procrustes.WT['Y'].max()
for ax in p.axes.flat:
ax.coastlines()
ax.add_feature(feature.BORDERS)
if savefig == True:
plt.savefig('figs/ProcrustesDecomposition_'+ model +'.pdf')
plt.show()