forked from erjosito/azcli
-
Notifications
You must be signed in to change notification settings - Fork 0
/
flowlogs.azcli
939 lines (890 loc) · 51 KB
/
flowlogs.azcli
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
########################################################
# Script to deploy VMs and generate traffic between them.
# NSG/VNet flow logs and Traffic Analytics can be used to
# explore the traffic flows
# Optionall create some elements to inspect VNet flow logs:
# - Azure Firewall
# - Azure Application Gateway
# - Azure VPN Gateway
#
# Jose Moreno
# May 2021
########################################################
# Control
intra_vnet_vms=no
sample_app=yes
create_azfw=yes
create_vpn=yes
create_appgw=yes
create_adx=no
create_elk=no
flowlog_type=vnet
# Variables
rg=flowlogs
location=eastus2euap
ws_location=eastus2
vnet_name=flowlogs
vnet_prefix=192.168.0.0/16
subnet0_name=vm0
subnet0_prefix=192.168.0.0/24
subnet1_name=vm1
subnet1_prefix=192.168.1.0/24
nsg_name=flowlogs
cloudinit_file="/tmp/cloudinit.txt"
vm_size=Standard_B1s
no_of_vms=4
flows="1;2:3;4:1433:1000,1:2:80:100,3:4:443:100" # Syntax: src:dst:port:kb/min
# App VM
app_subnet_name=app
app_subnet_prefix=192.168.2.0/24
appvm_name=appvm
appvm_pip_name=appvm-pip
appvm_nsg_name=appvm-nsg
appvm_sku=Standard_B1s
appvm_cloudinit_filename=/tmp/cloudinit-whoami.txt
# AzFW variables
azfw_subnet_prefix=192.168.10.0/24
azfw_name=myazfw
azfw_policy_name=myazfwpolicy
azfw_pip_name=myazfw-pip
# AppGW variables
appgw_subnet_name=ApplicationGatewaySubnet
appgw_subnet_prefix=192.168.200.0/24
appgw_name=appgw
appgw_pip_name=appgw-pip
appgw_sku=Standard_v2
appgw_cookie=Disabled
# VPNGW variables
vng_subnet_prefix=192.168.254.0/24
vng_name=vpngw
vng_pipa_name=vpngw-pip-a
vng_pipb_name=vpngw-pip-b
vng_asn=65001
publisher=cisco
offer=cisco-csr-1000v
sku=16_12-byol
nva_size=Standard_B2ms
version=$(az vm image list -p $publisher -f $offer -s $sku --all --query '[0].version' -o tsv)
branch_prefix=172.16.200.0/24
branch_subnet=172.16.200.0/26
branch_gateway=172.16.200.1
branch_bgp_ip=172.16.200.11
branch_asn=65500
username=$(whoami)
psk='Microsoft123!'
# ADX variables
adx_sku='Dev(No SLA)_Standard_E2a_v4'
adx_tier='Basic' # Not able to make it work with other values such as Developer/Basic
adx_capacity=1
adx_name=nwlogs$RANDOM
adx_fwlogs_table=FirewallLogs
adx_flowlogs_table=FlowLogs
# ElasticStack variables
elk_subnet_prefix=192.168.20.0/24
elk_subnet_name=elastic
elk_vm_name=elkvm
elk_vm_size=Standard_B2ms
elk_cloudinit_file=/tmp/elk_cloudinit.txt
# Some helper functions
# Converts a list to a shell array
function convert_string_to_array () {
# Default to comma as separator
if [[ -n $2 ]]
then
separator=$2
else
separator=','
fi
# Different syntax for bash and zsh
if [ -n "$BASH_VERSION" ]; then
arr_opt=a
elif [ -n "$ZSH_VERSION" ]; then
arr_opt=A
fi
# Do the split into array
IFS=$separator read -r"${arr_opt}" myarray <<< "$1"
echo "${myarray[@]}"
}
# Configure flow logs for all NSGs in a given location
function configure_flowlogs () {
if [[ -z $1 ]]; then
nsg_location=$(az group show -n $rg --query location -o tsv)
echo "No argument specified, taking the RG's location $nsg_location"
else
echo "Using $1 as location"
nsg_location=$1
fi
storage_account_name=$(az storage account list -g $rg -o tsv --query "[?location=='$nsg_location'].name" | head -1)
if [[ -z "$storage_account_name" ]]; then
storage_account_name=$(echo "logs$RANDOM${nsg_location}" | cut -c1-24) # max 24 characters
echo "No storage account found in $nsg_location, creating one..."
az storage account create -n $storage_account_name -g $rg --sku Standard_LRS --kind StorageV2 -l $nsg_location -o none
else
echo "Storage account $storage_account_name created in $nsg_location, using it for NSG flow flogs"
fi
echo "Looking for NSGs in resource group $rg in location $nsg_location..."
nsg_list=$(az network nsg list -g $rg -o tsv --query "[?location=='$nsg_location'].name")
echo "$(echo $nsg_list | wc -l) NSGs found"
while IFS= read -r nsg_name; do
echo "Configuring Flow Logs for NSG $nsg_name into storage account $storage_account_name..."
az network watcher flow-log create -l $nsg_location -n "${nsg_name}-${nsg_location}" -g $rg --nsg $nsg_name --storage-account $storage_account_name --log-version 2 --retention 1 -o none
done <<< "$nsg_list"
}
# Set some variables getting the content from AKV
function get_secrets() {
# Variables
akv_name=erjositoKeyvault
default_password_secret=defaultPassword
# Get secrets
akv_rg_found=$(az keyvault list -o tsv --query "[?name=='$akv_name'].resourceGroup" 2>/dev/null)
if [[ -n ${akv_rg_found} ]]
then
echo "INFO: AKV ${akv_name} found in resource group $akv_rg_found"
default_password=$(az keyvault secret show --vault-name $akv_name -n $default_password_secret --query 'value' -o tsv 2>/dev/null)
else
echo "ERROR: secrets could not be read because Azure Key Vault ${akv_name} could not be found"
fi
}
# Create RG, Vnet, NSG
az group create -n $rg -l $location -o none
az network vnet create -g $rg -n $vnet_name --address-prefix $vnet_prefix --subnet-name $subnet1_name --subnet-prefix $subnet1_prefix -l $location -o none
az network vnet subnet create --vnet-name $vnet_name --name $subnet0_name -g $rg --address-prefixes $subnet0_prefix -o none
az network nsg create -n $nsg_name -g $rg -l $location -o none
az network nsg rule create -n allowSSHin --nsg-name $nsg_name -g $rg --priority 1000 --destination-port-ranges 22 --access Allow --protocol Tcp -o none
az network vnet subnet update -n $subnet1_name --vnet-name $vnet_name -g $rg --nsg $nsg_name -o none
az network vnet subnet update -n $subnet0_name --vnet-name $vnet_name -g $rg --nsg $nsg_name -o none
# Create Log Analytics workspace
logws_name=$(az monitor log-analytics workspace list -g $rg --query '[].name' -o tsv 2>/dev/null) # Retrieve the WS name if it already existed
if [[ -z "$logws_name" ]]
then
logws_name=log$RANDOM
az monitor log-analytics workspace create -n $logws_name -g $rg -l $ws_location -o none
fi
logws_id=$(az resource list -g $rg -n $logws_name --query '[].id' -o tsv)
logws_customerid=$(az monitor log-analytics workspace show -n $logws_name -g $rg --query customerId -o tsv)
# Create storage account
storage_account_name=$(az storage account list -g $rg --query '[].name' -o tsv 2>/dev/null) # Retrieve the storage account name if it already existed
if [[ -z "$storage_account_name" ]]
then
storage_account_name=log$RANDOM
az storage account create -n $storage_account_name -g $rg --sku Standard_LRS --kind StorageV2 -l $location -o none
fi
# Enable flow logs
if [[ "$flowlog_type" == "nsg" ]]; then
az network watcher flow-log create -l $location -n "flowlog-$location" -g $rg \
--nsg $nsg_name --storage-account $storage_account_name --log-version 2 --retention 7 \
--workspace $logws_id --interval 10 --traffic-analytics true -o none
else
az network watcher flow-log create -l $location -g $rg --name "flowlog-$location" --vnet $vnet_name \
--storage-account $storage_account_name --workspace $logws_name --interval 10 --traffic-analytics true -o none
fi
# Create a set of VMs that will generate traffic to each other
if [[ "$intra_vnet_vms" == "yes" ]]; then
# Generate cloudinit file to create VMs
cat <<EOF > $cloudinit_file
#cloud-config
packages:
- jq
- pv
EOF
# Create VMs spread over two subnets
for i in {1..$no_of_vms}
do
vm_name="vm$(printf "%02d" i)"
subnet_index=$(expr $i % 2)
az vm create -n $vm_name -g $rg --image Ubuntu2204 --generate-ssh-keys --size $vm_size \
--vnet-name $vnet_name --subnet "vm${subnet_index}" --public-ip-address "${vm_name}-pip" --nsg $nsg_name \
--custom-data $cloudinit_file -l $location --public-ip-sku Standard --no-wait
done
# Wait some seconds and create JSON with required IPs
sleep 60
ip_json=$(az vm list-ip-addresses -g $rg -o json)
# Get the private IP of a specific VM out of the output of the command "az vm list-ip-addresses"
function get_private_ip () {
echo $1 | jq -r '.[] | select(.virtualMachine.name == "'$2'") | .virtualMachine.network.privateIpAddresses[0]'
}
# Get the public IP of a specific VM out of the output of the command "az vm list-ip-addresses"
function get_public_ip () {
echo $1 | jq -r '.[] | select(.virtualMachine.name == "'$2'") | .virtualMachine.network.publicIpAddresses[0].ipAddress'
}
fi
# Sample (web) app running on a VM
if [[ "$sample_app" == "yes" ]]; then
az network vnet subnet create -n $app_subnet_name --vnet-name $vnet_name -g $rg --address-prefixes $app_subnet_prefix -o none
az network public-ip create -g $rg -n $appvm_pip_name --sku standard --allocation-method static -l $location -o none
az network nsg create -n $appvm_nsg_name -g $rg -o none
az network nsg rule create -n ssh --nsg-name $appvm_nsg_name -g $rg --priority 500 --destination-port-ranges 22 --access Allow --protocol Tcp -o none
az network nsg rule create -n web --nsg-name $appvm_nsg_name -g $rg --priority 510 --destination-port-ranges 8080 --access Allow --protocol Tcp -o none
az network nsg rule create -n https --nsg-name $appvm_nsg_name -g $rg --priority 520 --destination-port-ranges 443 --access Allow --protocol Tcp -o none
cat <<EOF > $appvm_cloudinit_filename
#cloud-config
runcmd:
- apt update && apt install -y python3-pip
- pip3 install flask
- wget https://raw.githubusercontent.com/erjosito/azcli/master/myip.py -O /root/myip.py
- python3 /root/myip.py >/root/myip.log 2>&1 &
EOF
az vm create -n $appvm_name -g $rg --image Ubuntu2204 --generate-ssh-keys --size $appvm_sku \
--vnet-name $vnet_name --subnet $app_subnet_name --nsg $appvm_nsg_name --public-ip-address $appvm_pip_name \
--custom-data $appvm_cloudinit_filename -o none
appvm_private_ip=$(az vm show -g $rg -n $appvm_name -d --query privateIps -o tsv) && echo $appvm_private_ip
fi
# Azure Firewall
if [[ "$create_azfw" == "yes" ]]; then
# Start
az network vnet subnet create --vnet-name $vnet_name --name AzureFirewallSubnet -g $rg --address-prefixes $azfw_subnet_prefix -o none
az network public-ip create -g $rg -n $azfw_pip_name --sku standard --allocation-method static -l $location -o none
azfw_ip=$(az network public-ip show -g $rg -n $azfw_pip_name --query ipAddress -o tsv)
az network firewall policy create -n $azfw_policy_name -g $rg -o none
az network firewall policy rule-collection-group create -n ruleset01 --policy-name $azfw_policy_name -g $rg --priority 1000 -o none
# Allow SSH and HTTP for connection monitor (uses TCP9 too)
echo "Creating rule to allow SSH and HTTP..."
az network firewall policy rule-collection-group collection add-filter-collection --policy-name $azfw_policy_name --rule-collection-group-name ruleset01 -g $rg -o none \
--name mgmt --collection-priority 101 --action Allow --rule-name allowSSHnHTTP --rule-type NetworkRule --description "TCP 22" \
--destination-addresses 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 --source-addresses 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 --ip-protocols TCP --destination-ports 9 22 80 8080 -o none
# Allow ICMP
echo "Creating rule to allow ICMP..."
az network firewall policy rule-collection-group collection add-filter-collection --policy-name $azfw_policy_name --rule-collection-group-name ruleset01 -g $rg -o none \
--name icmp --collection-priority 102 --action Allow --rule-name allowICMP --rule-type NetworkRule --description "ICMP traffic" \
--destination-addresses 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 --source-addresses 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 --ip-protocols ICMP --destination-ports "1-65535"
# Allow NTP
echo "Creating rule to allow NTP..."
az network firewall policy rule-collection-group collection add-filter-collection --policy-name $azfw_policy_name --rule-collection-group-name ruleset01 -g $rg \
--name ntp --collection-priority 103 --action Allow --rule-name allowNTP --rule-type NetworkRule --description "Egress NTP traffic" \
--destination-addresses '*' --source-addresses "10.0.0.0/8" "192.168.0.0/16" --ip-protocols UDP --destination-ports "123" -o none
# Example application collection with 2 rules (ipconfig.co, api.ipify.org)
echo "Creating rule to allow ifconfig.co and api.ipify.org..."
az network firewall policy rule-collection-group collection add-filter-collection --policy-name $azfw_policy_name --rule-collection-group-name ruleset01 -g $rg \
--name ifconfig --collection-priority 201 --action Allow --rule-name allowIfconfig --rule-type ApplicationRule --description "ifconfig" \
--target-fqdns "ifconfig.me" --source-addresses "10.0.0.0/8" "172.16.0.0/12" "192.168.0.0/16" --protocols Http=80 Https=443 -o none
az network firewall policy rule-collection-group collection rule add -g $rg --policy-name $azfw_policy_name --rule-collection-group-name ruleset01 --collection-name ifconfig \
--name ipify --target-fqdns "api.ipify.org" --source-addresses "10.0.0.0/8" "172.16.0.0/12" "192.168.0.0/16" --protocols Http=80 Https=443 --rule-type ApplicationRule -o none
# Example application collection with wildcards (*.ubuntu.com)
echo "Creating rule to allow *.ubuntu.com..."
az network firewall policy rule-collection-group collection add-filter-collection --policy-name $azfw_policy_name --rule-collection-group-name ruleset01 -g $rg \
--name ubuntu --collection-priority 202 --action Allow --rule-name repos --rule-type ApplicationRule --description "ubuntucom" \
--target-fqdns 'ubuntu.com' '*.ubuntu.com' --source-addresses '*' --protocols Http=80 Https=443 -o none
# Mgmt traffic to Azure
az network firewall policy rule-collection-group collection add-filter-collection --policy-name $azfw_policy_name --rule-collection-group-name ruleset01 -g $rg \
--name azure --collection-priority 203 --action Allow --rule-name azmonitor --rule-type ApplicationRule --description "Azure Monitor" \
--target-fqdns '*.opinsights.azure.com' '*.azure-automation.net' --source-addresses '*' --protocols Https=443 -o none
# Create Azure Firewall
echo "Creating Azure Firewall..."
az network firewall create -n $azfw_name -g $rg --policy $azfw_policy_name -l $location -o none
# Configuring IP
echo "Configuring firewall logs and private IP..."
azfw_id=$(az network firewall show -n $azfw_name -g $rg -o tsv --query id)
az monitor diagnostic-settings create -n mydiag --resource $azfw_id --workspace $logws_id \
--metrics '[{"category": "AllMetrics", "enabled": true, "retentionPolicy": {"days": 0, "enabled": false }, "timeGrain": null}]' \
--logs '[{"category": "AzureFirewallApplicationRule", "enabled": true, "retentionPolicy": {"days": 0, "enabled": false}},
{"category": "AzureFirewallNetworkRule", "enabled": true, "retentionPolicy": {"days": 0, "enabled": false}}]' -o none
az network firewall ip-config create -f $azfw_name -n azfw-ipconfig -g $rg --public-ip-address $azfw_pip_name --vnet-name $vnet_name -o none
az network firewall update -n $azfw_name -g $rg -o none
azfw_private_ip=$(az network firewall show -n $azfw_name -g $rg -o tsv --query 'ipConfigurations[0].privateIpAddress') && echo "$azfw_private_ip"
# Create route tables
if [[ "$intra_vnet_vms" == "yes" ]]; then
az network route-table create --name subnet0 --resource-group "$rg" --location "$location" -o none
az network route-table route create --route-table-name subnet0 -g $rg --address-prefix $subnet1_prefix --name subnet1 --next-hop-type VirtualAppliance --next-hop-ip-address $azfw_private_ip -o none
az network vnet subnet update --name $subnet0_name --route-table subnet0 --vnet-name $vnet_name --resource-group $rg -o none
az network route-table create --name subnet1 --resource-group "$rg" --location "$location" -o none
az network route-table route create --route-table-name subnet1 -g $rg --address-prefix $subnet0_prefix --name subnet0 --next-hop-type VirtualAppliance --next-hop-ip-address $azfw_private_ip -o none
az network vnet subnet update --name $subnet1_name --route-table subnet1 --vnet-name $vnet_name --resource-group $rg -o none
fi
# Create DNAT to sample app
if [[ "$sample_app" == "yes" ]]; then
azfw_pip_address=$(az network public-ip show -g $rg -n $azfw_pip_name --query ipAddress -o tsv)
echo "Firewall's public IP is $azfw_pip_address"
az network firewall policy rule-collection-group collection add-nat-collection --policy-name $azfw_policy_name --rule-collection-group-name ruleset01 -g $rg \
--name NAT8080 --collection-priority 110 --action DNAT --rule-name DnatSSL --description "Sample application DNAT" \
--source-addresses '*' --destination-addresses "$azfw_pip_address" --destination-ports 8080 --ip-protocols TCP \
--translated-address "$appvm_private_ip" --translated-port 8080 -o none
fi
fi
# Optionally, create App GW to expose the sample app
if [[ "$create_appgw" == "yes" ]]; then
az network vnet subnet create -g $rg --vnet-name $vnet_name -n $appgw_subnet_name --address-prefixes $appgw_subnet_prefix -o none
az network public-ip create -g $rg -n $appgw_pip_name --sku standard --allocation-method static -l $location -o none
appgw_pip_address=$(az network public-ip show -g $rg -n $appgw_pip_name --query ipAddress -o tsv)
az network application-gateway create -g $rg -n $appgw_name --capacity 2 --sku $appgw_sku \
--frontend-port 8080 --routing-rule-type basic --priority 100 \
--servers $appvm_private_ip --http-settings-port 8080 --http-settings-protocol Http \
--sku Standard_v2 --min-capacity 1 --max-capacity 2 \
--public-ip-address $appgw_pip_name --vnet-name $vnet_name --subnet $appgw_subnet_name -o none
# Add custom probe (with path /api/healthcheck) to initial config (port 8080)
echo "Creating custom probe for port 8080..."
az network application-gateway probe create -g "$rg" --gateway-name "$appgw_name" \
--name vmprobe8080 --protocol Http --host-name-from-http-settings --match-status-codes 200-399 --port 8080 --path "/api/healthcheck" -o none
http_settings_name=$(az network application-gateway http-settings list -g $rg --gateway-name $appgw_name -o tsv --query '[0].name')
az network application-gateway http-settings update -g $rg --gateway-name $appgw_name -n $http_settings_name --probe vmprobe8080 --host-name-from-backend-pool -o none
# Connect app gw to Azure Monitor
appgw_id=$(az network application-gateway show -n $appgw_name -g $rg --query id -o tsv)
az monitor diagnostic-settings create -n "mydiag$RANDOM" --resource $appgw_id --workspace $logws_id -o none \
--metrics '[{"category": "AllMetrics", "enabled": true, "retentionPolicy": {"days": 0, "enabled": false }, "timeGrain": null}]' \
--logs '[{"category": "ApplicationGatewayAccessLog", "enabled": true, "retentionPolicy": {"days": 0, "enabled": false}},
{"category": "ApplicationGatewayPerformanceLog", "enabled": true, "retentionPolicy": {"days": 0, "enabled": false}},
{"category": "ApplicationGatewayFirewallLog", "enabled": true, "retentionPolicy": {"days": 0, "enabled": false}}]'
fi
# Optionally, create a VPN GW and connect it to a simulated branch office (CSR)
if [[ "$create_vpngw" == "yes" ]]; then
# Create VPN GW
az network vnet subnet create -g $rg -n GatewaySubnet --vnet-name $vnet_name --address-prefix $vng_subnet_prefix -o none --only-show-errors
az network public-ip create -g $rg -n $vng_pipa_name -o none --only-show-errors
az network public-ip create -g $rg -n $vng_pipb_name -o none --only-show-errors
az network vnet-gateway create -g $rg --sku VpnGw1 --gateway-type Vpn --vpn-type RouteBased --vnet $vnet_name -n $vng_name --asn $vng_asn --public-ip-address $vng_pipa_name $vng_pipb_name -o none --only-show-errors
# Create CSR to simulate a branch
az vm create -n branch-nva1 -g $rg -l $location --size $nva_size --image ${publisher}:${offer}:${sku}:${version} --admin-username "$username" --generate-ssh-keys \
--public-ip-address branch-pip --public-ip-address-allocation static --public-ip-sku Standard \
--vnet-name branch --vnet-address-prefix $branch_prefix --subnet nva --subnet-address-prefix $branch_subnet --private-ip-address $branch_bgp_ip -o none
branch_ip=$(az network public-ip show -n branch-pip -g $rg --query ipAddress -o tsv) && echo $branch_ip
# Create LNG and connection
az network local-gateway create -g $rg -n branch --gateway-ip-address $branch_ip --local-address-prefixes "${branch_bgp_ip}/32" --asn $branch_asn --bgp-peering-address $branch_bgp_ip --peer-weight 0 -l $location -o none
az network vpn-connection create -g $rg --shared-key $psk --enable-bgp -n branch --vnet-gateway1 $vng_name --local-gateway2 branch -l $location -o none
# Get VNG data required to configure CSR
vpngw_pip_0=$(az network vnet-gateway show -n $vng_name -g $rg --query 'bgpSettings.bgpPeeringAddresses[0].tunnelIpAddresses[0]' -o tsv) && echo $vpngw_pip_0
vpngw_private_ip_0=$(az network vnet-gateway show -n $vng_name -g $rg --query 'bgpSettings.bgpPeeringAddresses[0].defaultBgpIpAddresses[0]' -o tsv) && echo $vpngw_private_ip_0
vpngw_pip_1=$(az network vnet-gateway show -n $vng_name -g $rg --query 'bgpSettings.bgpPeeringAddresses[1].tunnelIpAddresses[0]' -o tsv) && echo $vpngw_pip_1
vpngw_private_ip_1=$(az network vnet-gateway show -n $vng_name -g $rg --query 'bgpSettings.bgpPeeringAddresses[1].defaultBgpIpAddresses[0]' -o tsv) && echo $vpngw_private_ip_1
vpngw_asn=$(az network vnet-gateway show -n $vng_name -g $rg --query 'bgpSettings.asn' -o tsv) && echo $vpngw_asn
# Configure CSR (active/active VNG)
ssh -o BatchMode=yes -o StrictHostKeyChecking=no -o KexAlgorithms=diffie-hellman-group-exchange-sha1 $branch_ip <<EOF
config t
crypto ikev2 proposal azure-proposal
encryption aes-cbc-256 aes-cbc-128 3des
integrity sha1
group 2
exit
!
crypto ikev2 policy azure-policy
proposal azure-proposal
exit
!
crypto ikev2 keyring azure-keyring
peer $vpngw_pip_0
address $vpngw_pip_0
pre-shared-key $psk
exit
peer $vpngw_pip_1
address $vpngw_pip_1
pre-shared-key $psk
exit
exit
!
crypto ikev2 profile azure-profile
match address local interface GigabitEthernet1
match identity remote address $vpngw_pip_0 255.255.255.255
match identity remote address $vpngw_pip_1 255.255.255.255
authentication remote pre-share
authentication local pre-share
keyring local azure-keyring
exit
!
crypto ipsec transform-set azure-ipsec-proposal-set esp-aes 256 esp-sha-hmac
mode tunnel
exit
crypto ipsec profile azure-vti
set transform-set azure-ipsec-proposal-set
set ikev2-profile azure-profile
set security-association lifetime kilobytes 102400000
set security-association lifetime seconds 3600
exit
!
interface Tunnel0
ip unnumbered GigabitEthernet1
ip tcp adjust-mss 1350
tunnel source GigabitEthernet1
tunnel mode ipsec ipv4
tunnel destination $vpngw_pip_0
tunnel protection ipsec profile azure-vti
interface Tunnel1
ip unnumbered GigabitEthernet1
ip tcp adjust-mss 1350
tunnel source GigabitEthernet1
tunnel mode ipsec ipv4
tunnel destination $vpngw_pip_1
tunnel protection ipsec profile azure-vti
exit
!
router bgp $branch_asn
bgp router-id interface GigabitEthernet1
bgp log-neighbor-changes
neighbor $vpngw_private_ip_0 remote-as $vpngw_asn
neighbor $vpngw_private_ip_0 ebgp-multihop 5
neighbor $vpngw_private_ip_0 update-source GigabitEthernet1
neighbor $vpngw_private_ip_1 remote-as $vpngw_asn
neighbor $vpngw_private_ip_1 ebgp-multihop 5
neighbor $vpngw_private_ip_1 update-source GigabitEthernet1
!
ip route $vpngw_private_ip_0 255.255.255.255 Tunnel0
ip route $vpngw_private_ip_1 255.255.255.255 Tunnel1
!
end
!
wr mem
EOF
# Verify tunnel status and BGP
ssh -o BatchMode=yes -o StrictHostKeyChecking=no -o KexAlgorithms=diffie-hellman-group-exchange-sha1 $branch_ip "show ip int brief"
ssh -o BatchMode=yes -o StrictHostKeyChecking=no -o KexAlgorithms=diffie-hellman-group-exchange-sha1 $branch_ip "show ip bgp summary"
# Confige an IP SLA test
if [[ "$sample_app" == "yes" ]]; then
ssh -o BatchMode=yes -o StrictHostKeyChecking=no -o KexAlgorithms=diffie-hellman-group-exchange-sha1 $branch_ip <<EOF
config t
ip sla 1
http get http://${appvm_private_ip}:8080/api/ip
frequency 60
ip sla schedule 1 life forever start-time now
end
wr mem
EOF
fi
fi
# Aux function to add/update extension
function add_extension() {
extension_name=$1
extension_version=$(az extension show -n $extension_name --query version -o tsv 2>/dev/null)
if [[ -z "$extension_version" ]]
then
echo "Azure CLI extension $extension_name not found, installing now..."
az extension add -n $extension_name -o none
else
echo "Azure CLI extension $extension_name found with version $extension_version, trying to upgrade..."
az extension update -n $extension_name -o none
fi
extension_version=$(az extension show -n $extension_name --query version -o tsv 2>/dev/null)
echo "Azure CLI extension $extension_name installed with version $extension_version"
}
# Azure Data eXplorer (optional)
# WORK IN PROGRESS!!!!
if [[ "$create_adx" == "yes" ]]; then
# Create ADX cluster
add_extension kusto
az kusto cluster create -n $adx_name --sku "name=$adx_sku" "tier=$adx_tier" "capacity=$adx_capacity" -g $rg -l $ws_location -o none
az kusto database create --cluster-name $adx_name --database-name $adx_name -g $rg --read-write-database soft-delete-period=P365D hot-cache-period=P31D location=$ws_location -o none
adx_id=$(az kusto cluster show -n $adx_name -g $rg --query id -o tsv)
az kusto cluster update -n $adx_name -g $rg --type SystemAssigned -o none
adx_principal_id=$(az kusto cluster show -n $adx_name -g $rg --query 'identity.principalId' -o tsv)
# Create event grid (required for storage integration in ADX)
add_extension eventgrid
storage_account_name=$(az storage account list -g $rg -o tsv --query "[?location=='$location'].name" | head -1)
storage_account_id=$(az storage account show -n $storage_account_name -g $rg --query id --output tsv)
eg_name=$storage_account_name
az eventgrid system-topic create -n $eg_name -g $rg -l $location --topic-type microsoft.storage.storageaccounts --source $storage_account_id -o none
eg_id=$(az eventgrid system-topic show -n $eg_name -g $rg --query id -o tsv)
# Create event hub (required for storage integration in ADX)
eh_ns=ehns$RANDOM
eh_hub=$storage_account_name
az eventhubs namespace create -n $eh_ns -g $rg -l $location -o none
az eventhubs eventhub create --name $eh_hub --namespace-name $eh_ns -g $rg -o none
eh_hub_id=$(az eventhubs eventhub show --name $eh_hub --namespace-name $eh_ns -g $rg --query id -o tsv)
az eventgrid event-subscription create -n eg2eh --source-resource-id $eg_id --endpoint-type eventhub --endpoint $eh_hub_id -o none
# It fails with:
# Message: This event subscription operation is not supported using this API call. Please use the proper API and try again. # WTF!?!?!??!
# Create role assignments
az role assignment create --scope $eg_id --assignee $adx_principal_id --role 'EventGrid EventSubscription Reader' -o none
az role assignment create --scope $eg_id --assignee $adx_principal_id --role 'Azure Event Hubs Data Receiver' -o none # This goes to the EH, but so far I dont have any EH created!
az role assignment create --scope $storage_account_id --assignee $adx_principal_id --role 'Storage Blob Data Contributor' -o none
# In https://learn.microsoft.com/en-us/azure/data-explorer/create-event-grid-connection?tabs=arm-template%2Cazure-blob-storage they provide the EH ID!!!
az kusto data-connection event-grid create --cluster-name $adx_name -n $eg_name --database-name $adx_name -l $location \
--event-grid-resource-id $eg_id --event-hub-resource-id $eh_hub_id \
--blob-storage-event-type 'Microsoft.Storage.BlobCreated' --storage-account-resource-id $storage_account_id \
--data-format JSON --table-name rawFlowLogs --managed-identity-resource-id $adx_id -g $rg -o none
# It fails with: Message: Event grid properties must contain consumerGroup
# az kusto data-connection event-hub create --cluster-name $adx_name -n $eh_name --database-name $adx_name -l $location \
# --consumer-group '$Default' --event-hub-resource-id $eh_id --managed-identity-resource-id $adx_id -g $rg -o none
# No way I know to send remote KQL queries to ADX
# kql_query='.create table TestTable (TimeStamp: datetime, Value: string, Source:string)'
# az kusto script create --cluster-name $adx_name -g $rg --database-name $adx_name --continue-on-errors true -n createTable --script-content "$kql_query" -o none
# kql_query=".create table TestTable ingestion json mapping 'TestMapping' '[{\"column\":\"Records\",\"path\":\"$.records\"},{\"column\":\"Value\",\"path\":\"$.Value\"},{\"column\":\"Source\",\"path\":\"$.Source\"}]'"
# az kusto script create --cluster-name $adx_name -g $rg --database-name $adx_name --continue-on-errors true -n createTable --script-content "$kql_query" -o none
# Configure firewall logs to ADX via events hub
if [[ "$create_azfw" == "yes" ]]; then
# Create Event Hubs namespace
eh_name=$(az eventhubs namespace list -g $rg --query '[0].name' -o tsv)
eh_ns_name=$eh_name
if [[ -z "$eh_ns_name" ]]; then
eh_name=nwlogs$RANDOM
eh_ns_name=$eh_name
echo "Creating Event Hub ${eh_name}..."
az eventhubs namespace create -n $eh_ns_name -g $rg -l $location --sku Standard -o none
az eventhubs eventhub create -n $eh_name -g $rg --namespace-name $eh_ns_name -o none
else
echo "Events Hub $eh_name found in RG $rg"
fi
# Add diag setting to AzFW to send logs to this Event hub
azfw_id=$(az network firewall show -n $azfw_name -g $rg -o tsv --query id)
az monitor diagnostic-settings create -n $eh_name --resource $azfw_id --event-hub $eh_ns_id --event-hub-rule RootManageSharedAccessKey \
--metrics '[{"category": "AllMetrics", "enabled": true, "retentionPolicy": {"days": 0, "enabled": false }, "timeGrain": null}]' \
--logs '[{"category": "AZFWNetworkRule", "enabled": true, "retentionPolicy": {"days": 0, "enabled": false}},
{"category": "AZFWApplicationRule", "enabled": true, "retentionPolicy": {"days": 0, "enabled": false}},
{"category": "AZFWNatRule", "enabled": true, "retentionPolicy": {"days": 0, "enabled": false}},
{"category": "AZFWThreatIntel", "enabled": true, "retentionPolicy": {"days": 0, "enabled": false}},
{"category": "AZFWIdpsSignature", "enabled": true, "retentionPolicy": {"days": 0, "enabled": false}},
{"category": "AZFWDnsQuery", "enabled": true, "retentionPolicy": {"days": 0, "enabled": false}},
{"category": "AZFWFqdnResolveFailure", "enabled": true, "retentionPolicy": {"days": 0, "enabled": false}},
{"category": "AZFWFatFlow", "enabled": true, "retentionPolicy": {"days": 0, "enabled": false}},
{"category": "AZFWFlowTrace", "enabled": true, "retentionPolicy": {"days": 0, "enabled": false}}]' -o none
# Create table for firewall logs
echo "Run this query in ADX (https://dataexplorer.azure.com/clusters/$adx_name.$location/databases/$adx_name) and run the query in https://github.com/gbeaud/azure-demos/blob/main/adx-logs-management/adx-update-policy-structured-logs.kql"
# Link EH to ADX
eh_ns_cx_string=$(az eventhubs namespace authorization-rule keys list -g $rg --namespace-name $eh_ns_name --name RootManageSharedAccessKey --query primaryConnectionString -o tsv)
eh_ns_id=$(az eventhubs namespace show -g $rg -n $eh_ns_name --query id -o tsv)
eh_id=$(az eventhubs eventhub show --namespace-name $eh_ns_name -g $rg -n $eh_name --query id -o tsv)
az role assignment create --scope $eh_ns_id --assignee $adx_principal_id --role 'Azure Event Hubs Data Receiver' -o none
# az kusto data-connection event-hub data-connection-validation --cluster-name $adx_name -n $eh_name --database-name $adx_name -l $location \
# --consumer-group '$Default' --event-hub-resource-id $eh_id --managed-identity-resource-id $adx_id -g $rg -o none
fi
fi
# Optional: create ElasticStack VM
# WORK IN PROGRESS!!!!
if [[ "$create_elk" == "yes" ]]; then
# Create Subnet
echo "Creating subnet $elk_subnet_name ($elk_subnet_prefix)..."
az network vnet subnet create --vnet-name $vnet_name --name $elk_subnet_name -g $rg --address-prefixes $elk_subnet_prefix -o none
# Create NSGs
echo "Creating NSG for Elastic VM..."
az network nsg create -n "${elk_vm_name}-nsg" -g $rg -o none
az network nsg rule create -n SSH --nsg-name "${elk_vm_name}-nsg" -g $rg --priority 1000 --destination-port-ranges 22 --access Allow --protocol Tcp -o none
az network nsg rule create -n Kibana --nsg-name "${elk_vm_name}-nsg" -g $rg --priority 1010 --destination-port-ranges 5601 --access Allow --protocol Tcp -o none
az network nsg rule create -n ElasticSearch --nsg-name "${elk_vm_name}-nsg" -g $rg --priority 1020 --destination-port-ranges 9200 --access Allow --protocol Tcp -o none
az network nsg rule create -n ICMP --nsg-name "${elk_vm_name}-nsg" -g $rg --priority 1030 --source-address-prefixes '*' --destination-address-prefixes '*' --destination-port-ranges '*' --access Allow --protocol Icmp -o none
# Create Elastic Search VM
echo "Creating cloudinit file for Elastic Search..."
cat <<EOF > $elk_cloudinit_file
#cloud-config
packages:
- jq
- apt-transport-https
runcmd:
- wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | apt-key add -
- echo "deb https://artifacts.elastic.co/packages/8.x/apt stable main" | tee /etc/apt/sources.list.d/elastic-8.x.list
- apt update && apt install -y elasticsearch kibana logstash
- apt install -y filebeat metricbeat
- systemctl enable elasticsearch
- systemctl enable kibana
- systemctl start elasticsearch
- systemctl start kibana
EOF
echo "Creating Elastic Stack VM..."
az vm create -n $elk_vm_name -g $rg -l $location --image Ubuntu2204 --generate-ssh-keys --nsg "${elk_vm_name}-nsg" -o none \
--custom-data $elk_cloudinit_file --public-ip-sku Standard --public-ip-address "${elk_vm_name}-pip" \
--vnet-name $vnet_name --size $elk_vm_size --subnet $elk_subnet_name -l $location
# Make sure to reset the elastic user's password with elasticsearch-setup-password or elasticsearch-reset-password !!!!
echo "Sleeping now until Elastic gets installed..."
sleep 300
echo "You will be prompted now to reset the Elastic and Kibana passwords. Please enter the value of the defaultPassword variable/secret (see above in the script):"
elk_pip=$(az network public-ip show -n "${elk_vm_name}-pip" -g $rg --query ipAddress -o tsv)
ssh $elk_pip "sudo /usr/share/elasticsearch/bin/elasticsearch-reset-password -u elastic -i"
ssh $elk_pip "sudo /usr/share/elasticsearch/bin/elasticsearch-reset-password -u kibana -i"
# Verify Elastic Search VM
if [[ -z "$default_password" ]]; then
echo "ERROR: default_password is empty. Please enter the value of the defaultPassword variable/secret (see above in the script):"
read default_password
fi
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no $elk_pip "systemctl status elasticsearch"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no $elk_pip "systemctl status kibana"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no $elk_pip "curl -sk -u elastic:${default_password} https://localhost:9200"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no $elk_pip "curl -sk -u kibana_system:${default_password} https://localhost:9200/_xpack/security/_authenticate?pretty" # Not working!
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no $elk_pip "curl -sk -u elastic:${default_password} https://localhost:9200/_cluster/health"
# Configure ElasticSearch and Kibana
echo "Configuring ElasticSearch and Kibana..."
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no $elk_pip "sudo sed -i '/network.host/c\network.host: 0.0.0.0' /etc/elasticsearch/elasticsearch.yml"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no $elk_pip "sudo systemctl restart elasticsearch"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no $elk_pip "sudo sed -i '/server.host/c\server.host: \"0.0.0.0\"' /etc/kibana/kibana.yml"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no $elk_pip "sudo sed -i '/server.name/c\server.name: \"elastictest\"' /etc/kibana/kibana.yml"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no $elk_pip "sudo sed -i '/elasticsearch.username/c\elasticsearch.username: \"kibana\"' /etc/kibana/kibana.yml"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no $elk_pip "sudo sed -i '/elasticsearch.password/c\elasticsearch.password: \"$default_password\"' /etc/kibana/kibana.yml"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no $elk_pip "sudo sed -i '/elasticsearch.hosts/c\elasticsearch.hosts: [\"https://localhost:9200\"]' /etc/kibana/kibana.yml"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no $elk_pip "sudo sed -i '/elasticsearch.ssl.verificationMode/c\elasticsearch.ssl.verificationMode: none' /etc/kibana/kibana.yml"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no $elk_pip "sudo systemctl restart kibana"
# Install logstash plugin
# ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no $elk_pip "sudo /usr/share/logstash/bin/logstash-plugin install logstash-input-azureblob" # Errors out!
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no $elk_pip "sudo /usr/share/logstash/bin/logstash-plugin install logstash-input-azure_blob_storage"
# Configure logstash
storage_account_name=$(az storage account list -g $rg --query '[].name' -o tsv 2>/dev/null) # Retrieve the storage account name if it already existed
storage_account_key=$(az storage account keys list -n $storage_account_name --query '[0].value' -o tsv)
# From https://github.com/janmg/logstash-input-azure_blob_storage
cat <<EOF > /tmp/logstash.conf
input {
azure_blob_storage {
codec => "json"
storageaccount => "$storage_account_name"
access_key => "$storage_account_key"
container => "insights-logs-networksecuritygroupflowevent"
logtype => "nsgflowlog"
prefix => "resourceId=/"
path_filters => ['**/*.json']
addfilename => true
interval => 60
debug_timer => true
debug_until => 100
registry_create_policy => "start_over"
}
}
filter {
json {
source => "message"
}
mutate {
add_field => { "environment" => "test-env" }
remove_field => [ "message" ]
}
date {
match => ["unixtimestamp", "UNIX"]
}
}
output {
stdout { codec => json }
elasticsearch {
hosts => ["https://localhost:9200"]
index => "nsg-flow-logs-%{+xxxx.ww}"
ssl => true
ssl_certificate_verification => false
user => "elastic"
password => "$default_password"
}
}
EOF
user=$(whoami)
scp /tmp/logstash.conf $elk_pip:/home/$user/logstash.conf
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no $elk_pip "sudo mv /home/${user}/logstash.conf /etc/logstash/conf.d/logstash.conf"
# Start logstash
# ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no $elk_pip "sudo systemctl restart logstash"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no $elk_pip "sudo systemctl start logstash"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no $elk_pip "sudo systemctl enable logstash"
# Export object API (index-patterns and dashboard)
# curl -sk -u elastic:${default_password} -X POST -H "kbn-xsrf: true" -H 'Content-Type: application/json' "http://${elk_pip}:5601/api/saved_objects/_export" -d '{"objects": [ {"type": "index-pattern", "id": "130ee8d1-b379-4eb2-bbcc-16375396dc49"} ] }' >./flowlogs_index.ndjson
# curl -sk -u elastic:${default_password} -X POST -H "kbn-xsrf: true" -H 'Content-Type: application/json' "http://${elk_pip}:5601/api/saved_objects/_export" -d '{"objects": [ {"type": "dashboard", "id": "0f215dd0-c40c-11ed-84b5-4b9e67d15531"} ] }' >./flowlogs_dashboard.ndjson
# Import object API (to use with .ndjson files, not exactly json)
if [[ -e "./flowlogs_index.ndjson" ]]; then
echo "Importing data index..."
curl -sk -u elastic:${default_password} -X POST -H "kbn-xsrf: true" --form file=@./flowlogs_index.ndjson "http://${elk_pip}:5601/api/saved_objects/_import"
else
echo "Sorry, couldn't find file ./flowlogs_index.ndjson"
fi
if [[ -e "./flowlogs_dashboard.ndjson" ]]; then
echo "Importing Kibana dashboard..."
curl -sk -u elastic:${default_password} -X POST -H "kbn-xsrf: true" --form file=@./flowlogs_dashboard.ndjson "http://${elk_pip}:5601/api/saved_objects/_import"
else
echo "Sorry, couldn't find file ./flowlogs_dashboard.ndjson"
fi
# URLs
echo "Browse to http://$elk_pip:5601 for Kibana (the username is 'elastic')"
fi
# Start traffic generation
if [[ "$intra_vnet_vms" == "yes" ]]; then
flows_array=($(convert_string_to_array $flows ','))
for flow in "${flows_array[@]}"
do
# echo "Processing flow $flow..."
sources=$(echo $flow | cut -d':' -f 1)
destinations=$(echo $flow | cut -d':' -f 2)
port=$(echo $flow | cut -d':' -f 3)
kb_min=$(echo $flow | cut -d':' -f 4)
src_array=($(convert_string_to_array $sources ';'))
dst_array=($(convert_string_to_array $destinations ';'))
for dst in $dst_array; do
# Start nc listening on port for destination
dst_vm_name="vm$(printf "%02d" $dst)"
dst_pip=$(get_public_ip $ip_json "$dst_vm_name")
dst_ip=$(get_private_ip $ip_json "$dst_vm_name")
echo "Running \"nc -dlk -p ${port}\" on ${dst_vm_name}, ${dst_pip}"
ssh -n -o StrictHostKeyChecking=no -o BatchMode=yes "$dst_pip" "nc -dlk -p $port > /dev/null &"
for src in $src_array; do
# Configure crontab entry to send traffic every minute
src_vm_name="vm$(printf "%02d" $src)"
src_pip=$(get_public_ip $ip_json "$src_vm_name")
cmd='(crontab -l 2>/dev/null; echo "* * * * * dd if=/dev/urandom bs=1000 count='${kb_min}' | pv -L 10M | nc '${dst_ip}' '${port}'") | crontab -'
echo "Adding crontab entry for ${src_vm_name}, ${src_pip}"
ssh -n -o StrictHostKeyChecking=no -o BatchMode=yes "$src_pip" "$cmd"
done
done
done
fi
# Diagnostics
if [[ "$intra_vnet_vms" == "yes" ]]; then
az network watcher flow-log list -o table -l $location
src_vm_name=vm01
dst_vm_name=vm03
port=1433
src_pip=$(get_public_ip $ip_json "$src_vm_name")
dst_pip=$(get_public_ip $ip_json "$dst_vm_name")
dst_ip=$(get_private_ip $ip_json "$dst_vm_name")
ssh -n -o StrictHostKeyChecking=no -o BatchMode=yes "$src_pip" "crontab -l | grep \"$dst_ip $port\""
ssh -n -o StrictHostKeyChecking=no -o BatchMode=yes "$src_pip" "nc -vz $dst_ip $port"
ssh -n -o StrictHostKeyChecking=no -o BatchMode=yes "$dst_pip" "ps -ef | grep \"nc -dlk -p $port\" | grep -v grep"
ssh -n -o StrictHostKeyChecking=no -o BatchMode=yes "$dst_pip" "sudo netstat -tunlp | grep \":$port\""
fi
# Simulate port scan
if [[ "$intra_vnet_vms" == "yes" ]]; then
src_vm_name=vm01
port=22
src_pip=$(get_public_ip $ip_json "$src_vm_name") && echo $src_pip
for i in {1..$no_of_vms}
do
dst_vm_name="vm$(printf "%02d" i)"
dst_ip=$(get_private_ip $ip_json "$dst_vm_name")
ssh -n -o StrictHostKeyChecking=no -o BatchMode=yes "$src_pip" "nc -vz $dst_ip $port"
done
fi
# Test sample app
if [[ "$sample_app" == "yes" ]]; then
appvm_ip=$(az network public-ip show -n $appvm_pip_name -g $rg --query ipAddress -o tsv)
curl "${appvm_ip}:8080/api/ip"
fi
# Traffic over the firewall
if [[ "$sample_app" == "yes" ]]; then
azfw_ip=$(az network public-ip show -n $azfw_pip_name -g $rg --query ipAddress -o tsv)
curl "${azfw_ip}:8080/api/ip"
fi
# Traffic over the App Gateway
if [[ "$sample_app" == "yes" ]]; then
appgw_ip=$(az network public-ip show -n $appgw_pip_name -g $rg --query ipAddress -o tsv)
curl "${appgw_ip}:8080/api/ip"
fi
# ADX
az kusto cluster list -g $rg -o table
az kusto database list --cluster-name $adx_name -g $rg --only-show-errors -o table
az kusto script list --cluster-name $adx_name --database-name $adx_name -g $rg -o table
az kusto data-connection list --cluster-name $adx_name --database-name $adx_name -g $rg --only-show-errors -o table
az eventgrid system-topic list -g $rg -o table
# List tables
kql_query='.show tables details'
#############################
# Traffic Analytics queries #
#############################
# Inspect log categories
query="AzureDiagnostics
| distinct Category"
az monitor log-analytics query -w $logws_customerid --analytics-query "$query" -o table
# VNet Flow Logs
src_ip=$(curl -s4 ifconfig.me) # Other values: $appvm_private_ip, $branch_bgp_ip, $(curl -s4 ifconfig.me)
dst_ip=$azfw_ip # Other values: $appvm_private_ip, $azfw_ip, $appgw_ip
query="NTANetAnalytics
//| where TimeGenerated > ago(1h)
| where SubType == \"FlowLog\"
| where SrcIp == \"${src_ip}\"
| where ipv4_is_in_range(SrcIp, \"${azfw_subnet_prefix}\")
| where DestIp == \"${dst_ip}\"
| where DestPort == 8080
| where FlowStatus == \"Allowed\"
| summarize BytesSrc2Dst=sum(BytesSrcToDest), BytesDst2Src=sum(BytesDestToSrc) by MacAddress, SrcIp, SourceSystem, L4Protocol, DestIp, DestPort, ConnectionType, DestApplicationGateway, DestLocalNetworkGateway"
az monitor log-analytics query -w $logws_customerid --analytics-query "$query" -o table
# AppGW: Frontend and backend (healthchecks included too :( )
my_ip=$(curl -s4 ifconfig.me) # Other values: $appvm_private_ip, $branch_bgp_ip, $(curl -s4 ifconfig.me)
query="NTANetAnalytics
| where TimeGenerated > ago(1h)
| where SubType == \"FlowLog\"
| where (SrcIp == \"${my_ip}\" and DestIp == \"${appgw_ip}\") or ipv4_is_in_range(SrcIp, \"${appgw_subnet_prefix}\")
| where DestPort == 8080
| where FlowStatus == \"Allowed\"
| summarize BytesSrc2Dst=sum(BytesSrcToDest), BytesDst2Src=sum(BytesDestToSrc) by MacAddress, SrcIp, SourceSystem, L4Protocol, DestIp, DestPort, ConnectionType, DestApplicationGateway, DestLocalNetworkGateway"
az monitor log-analytics query -w $logws_customerid --analytics-query "$query" -o table
# AzFW: Frontend and backend
my_ip=$(curl -s4 ifconfig.me) # Other values: $appvm_private_ip, $branch_bgp_ip, $(curl -s4 ifconfig.me)
query="NTANetAnalytics
| where TimeGenerated > ago(1h)
| where SubType == \"FlowLog\"
| where (SrcIp == \"${my_ip}\" and DestIp == \"${azfw_ip}\") or (SrcIp == \"${azfw_ip}\" and DestIp == \"${my_ip}\") or ipv4_is_in_range(SrcIp, \"${azfw_subnet_prefix}\") or ipv4_is_in_range(DestIp, \"${azfw_subnet_prefix}\")
| where DestPort == 8080
| where FlowStatus == \"Allowed\"
| project TimeGenerated, MacAddress, SrcIp, SourceSystem, L4Protocol, DestIp, DestPort, ConnectionType, DestApplicationGateway, DestLocalNetworkGateway"
az monitor log-analytics query -w $logws_customerid --analytics-query "$query" -o table
# AzFW Fat Flow logs
src_ip=$(curl -s4 ifconfig.me) # Other values: $appvm_private_ip, $branch_bgp_ip, $(curl -s4 ifconfig.me)
query="AzureDiagnostics
| where Category == \"AZFWFatFlow\"
//| where DestinationPort_d == 8080
//| summarize count()
| where SourceIP == \"${src_ip}\"
| project TimeGenerated, FlowRate_s, Protocol_s, SourceIP"
az monitor log-analytics query -w $logws_customerid --analytics-query "$query" -o table
# AzFW NAT Logs
src_ip=$(curl -s4 ifconfig.me) # Other values: $appvm_private_ip, $branch_bgp_ip, $(curl -s4 ifconfig.me)
query="AzureDiagnostics
| where Category == \"AZFWNatRule\"
| where SourceIP == \"${src_ip}\"
| project TimeGenerated, SourceIP, SourcePort_d, DestinationIp_s, DestinationPort_d, TranslatedIp_s, TranslatedPort_d"
az monitor log-analytics query -w $logws_customerid --analytics-query "$query" -o table
##############
# Stop/Start #
##############
function stop_firewall() {
azfw_name=$(az network firewall list -g $rg --query '[0].name' -o tsv)
azfw_ipconfig_name=$(az network firewall show -n $azfw_name -g $rg --query 'ipConfigurations[0].name' -o tsv)
echo "Stoping Azure Firewall ${azfw_name}..."
az network firewall ip-config delete -f $azfw_name -n azfw-ipconfig -g $rg -o none
az network firewall update -n $azfw_name -g $rg -o none
}
function start_firewall() {
azfw_name=$(az network firewall list -g $rg --query '[0].name' -o tsv)
if [[ -n "$azfw_name" ]]; then
echo "Starting Azure Firewall ${azfw_name}..."
azfw_ipconfig_name="${azfw_name}-ipconfig"
az network firewall ip-config create -f $azfw_name -n $azfw_ipconfig_name -g $rg --public-ip-address $azfw_pip_name --vnet-name $vnet_name -o none
az network firewall update -n $azfw_name -g $rg -o none
else
echo "No Azure Firewall found in RG $rg"
fi
}
function stop_adx() {
adx_name=$(az kusto cluster list -g $rg --query '[0].name' -o tsv)
if [[ -n "$adx_name" ]]; then
echo "Stopping ADX cluster $adx_name..."
az kusto cluster stop -n $adx_name -g $rg -o none
else
echo "No ADX cluster found in RG $rg"
fi
}
function start_adx() {
adx_name=$(az kusto cluster list -g $rg --query '[0].name' -o tsv)
if [[ -n "$adx_name" ]]; then
echo "Starting ADX cluster $adx_name..."
az kusto cluster start -n $adx_name -g $rg -o none
else
echo "No ADX cluster found in RG $rg"
fi
}
function stop_lab() {
stop_vms
stop_firewall
stop_adx
if [[ "$create_appgw" == "yes" ]]; then
az network application-gateway start -n $appgw_name -g $rg --no-wait -o none
fi
if [[ "$create_vpn" == "yes" ]]; then
# Delete VPN GW?
fi
}
function stop_vms() {
vm_list=$(az vm list -o tsv -g "$rg" --query "[].name")
while IFS= read -r vm_name; do
echo "Deallocating Virtual Machine ${vm_name}..."
az vm deallocate -g $rg -n "$vm_name" --no-wait -o none
done <<< "$vm_list"
}
function start_vms() {
vm_list=$(az vm list -o tsv -g "$rg" --query "[].name")
while IFS= read -r vm_name; do
echo "Starting Virtual Machine ${vm_name}..."
az vm start -g $rg -n "$vm_name" --no-wait -o none
done <<< "$vm_list"
}
function start_lab() {
start_vms
if [[ "$create_azfw" == "yes" ]]; then
start_firewall
fi
if [[ "$create_adx" == "yes" ]]; then
start_adx
fi
if [[ "$create_appgw" == "yes" ]]; then
az network application-gateway start -n $appgw_name -g $rg --no-wait -o none
fi
if [[ "$create_vpn" == "yes" ]]; then
# Create VPN from scratch?
fi
}
###########
# Cleanup #
###########
# WIP!
# az network watcher flow-log delete -l $location -n