forked from jdnklau/fm-ml
-
Notifications
You must be signed in to change notification settings - Fork 0
/
fm-ml.bib
1750 lines (1606 loc) · 98.1 KB
/
fm-ml.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
% Encoding: UTF-8
@InCollection{scott2021machsmt,
author = {Joseph Scott and Aina Niemetz and Mathias Preiner and Saeed Nejati and Vijay Ganesh},
booktitle = {Tools and Algorithms for the Construction and Analysis of Systems},
publisher = {Springer International Publishing},
title = {{MachSMT}: A Machine Learning-based Algorithm Selector for {SMT} Solvers},
year = {2021},
pages = {303--325},
doi = {10.1007/978-3-030-72013-1_16},
groups = {Tool and Configuration Selection},
}
@InCollection{dunkelau2019automated,
author = {Jannik Dunkelau and Sebastian Krings and Joshua Schmidt},
booktitle = {Lecture Notes in Computer Science},
publisher = {Springer International Publishing},
title = {Automated Backend Selection for {ProB} Using Deep Learning},
year = {2019},
pages = {130--147},
doi = {10.1007/978-3-030-20652-9_9},
groups = {Tool and Configuration Selection},
}
@InProceedings{nagashima2018pamper,
author = {Yutaka Nagashima and Yilun He},
booktitle = {Proceedings of the 33rd {ACM}/{IEEE} International Conference on Automated Software Engineering},
title = {{PaMpeR}: proof method recommendation system for Isabelle/{HOL}},
year = {2018},
month = {sep},
publisher = {{ACM}},
doi = {10.1145/3238147.3238210},
groups = {Tool and Configuration Selection},
}
@Article{healy2017predicting,
author = {Andrew Healy and Rosemary Monahan and James F. Power},
journal = {Electronic Proceedings in Theoretical Computer Science},
title = {Predicting {SMT} Solver Performance for Software Verification},
year = {2017},
month = {jan},
pages = {20--37},
volume = {240},
doi = {10.4204/eptcs.240.2},
groups = {Tool and Configuration Selection},
publisher = {Open Publishing Association},
}
@InProceedings{healy2016evaluating,
author = {Andrew Healy and Rosemary Monahan and James F. Power},
booktitle = {Proceedings of the 31st Annual {ACM} Symposium on Applied Computing},
title = {Evaluating the use of a general-purpose benchmark suite for domain-specific {SMT}-solving},
year = {2016},
month = {apr},
publisher = {{ACM}},
doi = {10.1145/2851613.2851975},
groups = {Tool and Configuration Selection},
}
@InProceedings{tulsian2014mux,
author = {Varun Tulsian and Aditya Kanade and Rahul Kumar and Akash Lal and Aditya V. Nori},
booktitle = {Proceedings of the 11th Working Conference on Mining Software Repositories - {MSR} 2014},
title = {{MUX}: algorithm selection for software model checkers},
year = {2014},
publisher = {{ACM} Press},
doi = {10.1145/2597073.2597080},
groups = {Tool and Configuration Selection},
}
@Article{hutter2009paramils,
author = {F. Hutter and H. H. Hoos and K. Leyton-Brown and T. Stuetzle},
journal = {Journal of Artificial Intelligence Research},
title = {{ParamILS}: An Automatic Algorithm Configuration Framework},
year = {2009},
month = {oct},
pages = {267--306},
volume = {36},
doi = {10.1613/jair.2861},
groups = {Tool and Configuration Selection},
publisher = {{AI} Access Foundation},
}
@TechReport{UCAM-CL-TR-792,
author = {Bridge, James P.},
institution = {University of Cambridge, Computer Laboratory},
title = {{Machine learning and automated theorem proving}},
year = {2010},
month = nov,
number = {UCAM-CL-TR-792},
doi = {10.48456/tr-792},
groups = {Tool and Configuration Selection},
url = {https://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-792.pdf},
}
@Article{cai2019automatic,
author = {Cheng-Hao Cai and Jing Sun and Gillian Dobbie},
journal = {Automated Software Engineering},
title = {Automatic B-model repair using model checking and machine learning},
year = {2019},
month = {aug},
number = {3},
pages = {653--704},
volume = {26},
doi = {10.1007/s10515-019-00264-4},
groups = {Synthesis and Repair},
publisher = {Springer Science and Business Media {LLC}},
}
@InCollection{schmidt2018repair,
author = {Joshua Schmidt and Sebastian Krings and Michael Leuschel},
booktitle = {Lecture Notes in Computer Science},
publisher = {Springer International Publishing},
title = {Repair and Generation of Formal Models Using Synthesis},
year = {2018},
pages = {346--366},
doi = {10.1007/978-3-319-98938-9_20},
groups = {Synthesis and Repair},
}
@Article{claessen2017supervisory,
author = {Koen Claessen and Jonatan Kilhamn and Laura Kovács and Bengt Lennartson},
journal = {Strichman O., Tzoref-Brill R. (eds) Hardware and Software: Verification and Testing. HVC 2017. Lecture Notes in Computer Science, vol 10629. Springer, Cham},
title = {A Supervisory Control Algorithm Based on Property-Directed Reachability},
year = {2017},
month = nov,
abstract = {We present an algorithm for synthesising a controller (supervisor) for a discrete event system (DES) based on the property-directed reachability (PDR) model checking algorithm. The discrete event systems framework is useful in both software, automation and manufacturing, as problems from those domains can be modelled as discrete supervisory control problems. As a formal framework, DES is also similar to domains for which the field of formal methods for computer science has developed techniques and tools. In this paper, we attempt to marry the two by adapting PDR to the problem of controller synthesis. The resulting algorithm takes as input a transition system with forbidden states and uncontrollable transitions, and synthesises a safe and minimally-restrictive controller, correct-by-design. We also present an implementation along with experimental results, showing that the algorithm has potential as a part of the solution to the greater effort of formal supervisory controller synthesis and verification.},
archiveprefix = {arXiv},
doi = {10.1007/978-3-319-70389-3_8},
eprint = {1711.06501},
file = {:http\://arxiv.org/pdf/1711.06501v1:PDF},
groups = {Synthesis and Repair},
keywords = {cs.SY, cs.LO, 68-02},
primaryclass = {cs.SY},
}
@Article{jha2017theory,
author = {Susmit Jha and Sanjit A. Seshia},
journal = {Acta Informatica},
title = {A theory of formal synthesis via inductive learning},
year = {2017},
month = {feb},
number = {7},
pages = {693--726},
volume = {54},
doi = {10.1007/s00236-017-0294-5},
groups = {Synthesis and Repair},
publisher = {Springer Science and Business Media {LLC}},
}
@InCollection{katzmodel,
author = {Gal Katz and Doron Peled},
booktitle = {Tools and Algorithms for the Construction and Analysis of Systems},
publisher = {Springer Berlin Heidelberg},
title = {Model Checking-Based Genetic Programming with an Application to Mutual Exclusion},
pages = {141--156},
doi = {10.1007/978-3-540-78800-3_11},
groups = {Synthesis and Repair},
}
@InCollection{szegedy2020promising,
author = {Christian Szegedy},
booktitle = {Lecture Notes in Computer Science},
publisher = {Springer International Publishing},
title = {A Promising Path Towards Autoformalization and General Artificial Intelligence},
year = {2020},
pages = {3--20},
doi = {10.1007/978-3-030-53518-6_1},
groups = {Formalisation and Specifications},
}
@InProceedings{rahman2019classifying,
author = {Md. Abdur Rahman and Md. Ariful Haque and Md. Nurul Ahad Tawhid and Md. Saeed Siddik},
booktitle = {Proceedings of the 3rd {ACM} {SIGSOFT} International Workshop on Machine Learning Techniques for Software Quality Evaluation - {MaLTeSQuE} 2019},
title = {Classifying non-functional requirements using {RNN} variants for quality software development},
year = {2019},
publisher = {{ACM} Press},
doi = {10.1145/3340482.3342745},
groups = {Formalisation and Specifications},
}
@InProceedings{sharma2014machine,
author = {Richa Sharma and Jaspreet Bhatia and K. K. Biswas},
booktitle = {Proceedings of the 3rd International Workshop on Realizing Artificial Intelligence Synergies in Software Engineering - {RAISE} 2014},
title = {Machine learning for constituency test of coordinating conjunctions in requirements specifications},
year = {2014},
publisher = {{ACM} Press},
doi = {10.1145/2593801.2593806},
groups = {Formalisation and Specifications},
}
@InProceedings{sultanov2013application,
author = {Hakim Sultanov and Jane Huffman Hayes},
booktitle = {2013 21st {IEEE} International Requirements Engineering Conference ({RE})},
title = {Application of reinforcement learning to requirements engineering: requirements tracing},
year = {2013},
month = {jul},
publisher = {{IEEE}},
doi = {10.1109/re.2013.6636705},
groups = {Formalisation and Specifications},
}
@InProceedings{kaliszyk2015efficient,
author = {Kaliszyk, Cezary and Urban, Josef and Vyskocil, Jir{\'\i}},
booktitle = {Twenty-Fourth International Joint Conference on Artificial Intelligence},
title = {Efficient Semantic Features for Automated Reasoning over Large Theories},
year = {2015},
eprint = {file:///tmp/mozilla_jannik0/11384-49917-1-PB.pdf},
groups = {Feature Selection},
}
@InCollection{dunkelau2020analysing,
author = {Jannik Dunkelau and Joshua Schmidt and Michael Leuschel},
booktitle = {Rigorous State-Based Methods},
publisher = {Springer International Publishing},
title = {Analysing {ProB}'s Constraint Solving Backends},
year = {2020},
pages = {107--123},
doi = {10.1007/978-3-030-48077-6_8},
groups = {Tooling Analysis and Data Mining},
}
@Article{pira2021using,
author = {Einollah Pira},
journal = {Software Quality Journal},
title = {Using knowledge discovery to propose a two-phase model checking for safety analysis of graph transformations},
year = {2021},
month = {feb},
doi = {10.1007/s11219-020-09542-x},
groups = {Model Checking},
publisher = {Springer Science and Business Media {LLC}},
}
@Article{zhu2019ltl,
author = {Weijun Zhu and Huanmei Wu and Miaolei Deng},
journal = {{IEEE} Access},
title = {{LTL} Model Checking Based on Binary Classification of Machine Learning},
year = {2019},
pages = {135703--135719},
volume = {7},
doi = {10.1109/access.2019.2942762},
groups = {Model Checking},
publisher = {Institute of Electrical and Electronics Engineers ({IEEE})},
}
@InCollection{bortolussi2015machine,
author = {Luca Bortolussi and Dimitrios Milios and Guido Sanguinetti},
booktitle = {Runtime Verification},
publisher = {Springer International Publishing},
title = {Machine Learning Methods in Statistical Model Checking and System Design {\textendash} Tutorial},
year = {2015},
pages = {323--341},
doi = {10.1007/978-3-319-23820-3_23},
groups = {Model Checking},
}
@InCollection{brazdil2014verification,
author = {Tom{\'{a}}{\v{s}} Br{\'{a}}zdil and Krishnendu Chatterjee and Martin Chmel{\'{\i}}k and Vojt{\v{e}}ch Forejt and Jan K{\v{r}}et{\'{\i}}nsk{\'{y}} and Marta Kwiatkowska and David Parker and Mateusz Ujma},
booktitle = {Automated Technology for Verification and Analysis},
publisher = {Springer International Publishing},
title = {Verification of Markov Decision Processes Using Learning Algorithms},
year = {2014},
pages = {98--114},
doi = {10.1007/978-3-319-11936-6_8},
groups = {Model Checking},
}
@InProceedings{poulding2015heuristic,
author = {Simon Poulding and Robert Feldt},
booktitle = {Proceedings of the 2015 Annual Conference on Genetic and Evolutionary Computation},
title = {Heuristic Model Checking using a Monte-Carlo Tree Search Algorithm},
year = {2015},
month = {jul},
publisher = {{ACM}},
doi = {10.1145/2739480.2754767},
groups = {Model Checking},
}
@Article{yousefian2014heuristic,
author = {Rosa Yousefian and Vahid Rafe and Mohsen Rahmani},
journal = {Applied Soft Computing},
title = {A heuristic solution for model checking graph transformation systems},
year = {2014},
month = {nov},
pages = {169--180},
volume = {24},
doi = {10.1016/j.asoc.2014.06.055},
groups = {Model Checking},
publisher = {Elsevier {BV}},
}
@InCollection{behjati2010bounded,
author = {Razieh Behjati and Marjan Sirjani and Majid Nili Ahmadabadi},
booktitle = {Fundamentals of Software Engineering},
publisher = {Springer Berlin Heidelberg},
title = {Bounded Rational Search for On-the-Fly Model Checking of {LTL} Properties},
year = {2010},
pages = {292--307},
doi = {10.1007/978-3-642-11623-0_17},
groups = {Model Checking},
}
@InProceedings{alba2008finding,
author = {Enrique Alba and Francisco Chicano and Marco Ferreira and Juan Gomez-Pulido},
booktitle = {Proceedings of the 10th annual conference on Genetic and evolutionary computation - {GECCO} {\textquotesingle}08},
title = {Finding deadlocks in large concurrent java programs using genetic algorithms},
year = {2008},
publisher = {{ACM} Press},
doi = {10.1145/1389095.1389432},
groups = {Model Checking},
}
@Article{godefroid2004exploring,
author = {Patrice Godefroid and Sarfraz Khurshid},
journal = {International Journal on Software Tools for Technology Transfer},
title = {Exploring very large state spaces using genetic algorithms},
year = {2004},
month = {apr},
number = {2},
pages = {117--127},
volume = {6},
doi = {10.1007/s10009-004-0141-1},
groups = {Model Checking},
publisher = {Springer Science and Business Media {LLC}},
}
@Article{buccafurri1999enhancing,
author = {Francesco Buccafurri and Thomas Eiter and Georg Gottlob and Nicola Leone},
journal = {Artificial Intelligence},
title = {Enhancing model checking in verification by {AI} techniques},
year = {1999},
month = {aug},
number = {1-2},
pages = {57--104},
volume = {112},
doi = {10.1016/s0004-3702(99)00039-9},
groups = {Model Checking},
publisher = {Elsevier {BV}},
}
@Article{hasanbeig2018logically,
author = {Mohammadhosein Hasanbeig and Alessandro Abate and Daniel Kroening},
title = {Logically-Constrained Reinforcement Learning},
year = {2018},
month = jan,
abstract = {We present the first model-free Reinforcement Learning (RL) algorithm to synthesise policies for an unknown Markov Decision Process (MDP), such that a linear time property is satisfied. The given temporal property is converted into a Limit Deterministic Buchi Automaton (LDBA) and a robust reward function is defined over the state-action pairs of the MDP according to the resulting LDBA. With this reward function, the policy synthesis procedure is "constrained" by the given specification. These constraints guide the MDP exploration so as to minimize the solution time by only considering the portion of the MDP that is relevant to satisfaction of the LTL property. This improves performance and scalability of the proposed method by avoiding an exhaustive update over the whole state space while the efficiency of standard methods such as dynamic programming is hindered by excessive memory requirements, caused by the need to store a full-model in memory. Additionally, we show that the RL procedure sets up a local value iteration method to efficiently calculate the maximum probability of satisfying the given property, at any given state of the MDP. We prove that our algorithm is guaranteed to find a policy whose traces probabilistically satisfy the LTL property if such a policy exists, and additionally we show that our method produces reasonable control policies even when the LTL property cannot be satisfied. The performance of the algorithm is evaluated via a set of numerical examples. We observe an improvement of one order of magnitude in the number of iterations required for the synthesis compared to existing approaches.},
archiveprefix = {arXiv},
eprint = {1801.08099},
file = {:http\://arxiv.org/pdf/1801.08099v8:PDF},
groups = {Model Checking},
keywords = {cs.LG, cs.LO},
primaryclass = {cs.LG},
}
@InCollection{rawson2019neurally,
author = {Michael Rawson and Giles Reger},
booktitle = {Frontiers of Combining Systems},
publisher = {Springer International Publishing},
title = {A Neurally-Guided, Parallel Theorem Prover},
year = {2019},
pages = {40--56},
doi = {10.1007/978-3-030-29007-8_3},
groups = {Automated Theorem Proving},
}
@InCollection{selsam2019guiding,
author = {Daniel Selsam and Nikolaj Bj{\o}rner},
booktitle = {Lecture Notes in Computer Science},
publisher = {Springer International Publishing},
title = {Guiding High-Performance {SAT} Solvers with Unsat-Core Predictions},
year = {2019},
pages = {336--353},
doi = {10.1007/978-3-030-24258-9_24},
groups = {Automated Theorem Proving},
}
@Article{blanchette2016learning,
author = {Jasmin Christian Blanchette and David Greenaway and Cezary Kaliszyk and Daniel Kühlwein and Josef Urban},
journal = {Journal of Automated Reasoning},
title = {A Learning-Based Fact Selector for Isabelle/{HOL}},
year = {2016},
month = {feb},
number = {3},
pages = {219--244},
volume = {57},
doi = {10.1007/s10817-016-9362-8},
groups = {Automated Theorem Proving},
publisher = {Springer Science and Business Media {LLC}},
}
@InCollection{liang2016learning,
author = {Jia Hui Liang and Vijay Ganesh and Pascal Poupart and Krzysztof Czarnecki},
booktitle = {Theory and Applications of Satisfiability Testing {\textendash} {SAT} 2016},
publisher = {Springer International Publishing},
title = {Learning Rate Based Branching Heuristic for {SAT} Solvers},
year = {2016},
pages = {123--140},
doi = {10.1007/978-3-319-40970-2_9},
groups = {Automated Theorem Proving},
}
@InCollection{faerber2015random,
author = {Michael Färber and Cezary Kaliszyk},
booktitle = {Frontiers of Combining Systems},
publisher = {Springer International Publishing},
title = {Random Forests for Premise Selection},
year = {2015},
pages = {325--340},
doi = {10.1007/978-3-319-24246-0_20},
groups = {Automated Theorem Proving},
}
@Article{kaliszyk2015learning,
author = {Cezary Kaliszyk and Josef Urban},
journal = {Journal of Symbolic Computation},
title = {Learning-assisted theorem proving with millions of lemmas},
year = {2015},
month = {jul},
pages = {109--128},
volume = {69},
doi = {10.1016/j.jsc.2014.09.032},
groups = {Automated Theorem Proving},
publisher = {Elsevier {BV}},
}
@InCollection{kuehlwein2012overview,
author = {Daniel Kühlwein and Twan van Laarhoven and Evgeni Tsivtsivadze and Josef Urban and Tom Heskes},
booktitle = {Automated Reasoning},
publisher = {Springer Berlin Heidelberg},
title = {Overview and Evaluation of Premise Selection Techniques for Large Theory Mathematics},
year = {2012},
pages = {378--392},
doi = {10.1007/978-3-642-31365-3_30},
groups = {Automated Theorem Proving, Surveys},
}
@Article{selsam2018learning,
author = {Daniel Selsam and Matthew Lamm and Benedikt Bünz and Percy Liang and Leonardo de Moura and David L. Dill},
title = {Learning a SAT Solver from Single-Bit Supervision},
year = {2018},
month = feb,
abstract = {We present NeuroSAT, a message passing neural network that learns to solve SAT problems after only being trained as a classifier to predict satisfiability. Although it is not competitive with state-of-the-art SAT solvers, NeuroSAT can solve problems that are substantially larger and more difficult than it ever saw during training by simply running for more iterations. Moreover, NeuroSAT generalizes to novel distributions; after training only on random SAT problems, at test time it can solve SAT problems encoding graph coloring, clique detection, dominating set, and vertex cover problems, all on a range of distributions over small random graphs.},
archiveprefix = {arXiv},
eprint = {1802.03685},
file = {:http\://arxiv.org/pdf/1802.03685v4:PDF},
groups = {Automated Theorem Proving},
keywords = {cs.AI, cs.LG, cs.LO},
primaryclass = {cs.AI},
}
@Article{lederman2018learning,
author = {Gil Lederman and Markus N. Rabe and Edward A. Lee and Sanjit A. Seshia},
title = {Learning Heuristics for Quantified Boolean Formulas through Deep Reinforcement Learning},
year = {2018},
month = jul,
abstract = {We demonstrate how to learn efficient heuristics for automated reasoning algorithms for quantified Boolean formulas through deep reinforcement learning. We focus on a backtracking search algorithm, which can already solve formulas of impressive size - up to hundreds of thousands of variables. The main challenge is to find a representation of these formulas that lends itself to making predictions in a scalable way. For a family of challenging problems, we learned a heuristic that solves significantly more formulas compared to the existing handwritten heuristics.},
archiveprefix = {arXiv},
eprint = {1807.08058},
file = {:http\://arxiv.org/pdf/1807.08058v3:PDF},
groups = {Automated Theorem Proving},
keywords = {cs.LO, cs.AI, cs.LG},
primaryclass = {cs.LO},
}
@InCollection{urban2013theorem,
author = {Josef Urban and Ji{\v{r}}{\'{\i}} Vysko{\v{c}}il},
booktitle = {Automated Reasoning and Mathematics},
publisher = {Springer Berlin Heidelberg},
title = {Theorem Proving in Large Formal Mathematics as an Emerging {AI} Field},
year = {2013},
pages = {240--257},
doi = {10.1007/978-3-642-36675-8_13},
groups = {Automated Theorem Proving},
}
@Article{kaliszyk2013stronger,
author = {Kaliszyk, Cezary and Urban, Josef},
title = {Stronger automation for {Flyspeck} by feature weighting and strategy evolution},
year = {2013},
groups = {Automated Theorem Proving},
publisher = {[Sl]: EasyChair},
url = {https://hdl.handle.net/2066/119984},
}
@Unpublished{goldman2015employing,
author = {Goldman, Robert P and Boldt, Michael W and Musliner, David J},
note = {Available at \url{https://www.sift.net/publications/employing-ai-techniques-probabilistic-model-checking}},
title = {Employing AI Techniques in Probabilistic Model Checking Position Paper},
year = {2015},
groups = {Automated Theorem Proving, Model Checking},
url = {https://www.sift.net/publications/employing-ai-techniques-probabilistic-model-checking},
}
@InCollection{blanchette2014survey,
author = {Blanchette, Jasmin Christian and K{\"u}hlwein, Daniel},
booktitle = {Infinity, Computability, and Metamathematics: Festschrift Celebrating the 60th Birthdays of Peter Koepke and Philip Welch},
publisher = {Tributes, College Publications},
title = {A Survey of Axiom Selection as a Machine Learning Problem},
year = {2014},
eprint = {https://www21.in.tum.de/~blanchet/axiom_sel.pdf},
groups = {Automated Theorem Proving, Surveys},
}
@InProceedings{NEURIPS2018_55acf853,
author = {Kaliszyk, Cezary and Urban, Josef and Michalewski, Henryk and Ol\v{s}\'{a}k, Miroslav},
booktitle = {Advances in Neural Information Processing Systems},
title = {Reinforcement Learning of Theorem Proving},
year = {2018},
editor = {S. Bengio and H. Wallach and H. Larochelle and K. Grauman and N. Cesa-Bianchi and R. Garnett},
publisher = {Curran Associates, Inc.},
volume = {31},
eprint = {https://proceedings.neurips.cc/paper/2018/file/55acf8539596d25624059980986aaa78-Paper.pdf},
groups = {Automated Theorem Proving},
}
@InProceedings{blanchette:hal-02381430,
author = {Blanchette, Jasmin Christian and Ouraoui, Daniel El and Fontaine, Pascal and Kaliszyk, Cezary},
booktitle = {{AITP 2019 - 4th Conference on Artificial Intelligence and Theorem Proving}},
title = {{Machine Learning for Instance Selection in SMT Solving}},
year = {2019},
address = {Obergurgl, Austria},
month = Apr,
eprint = {https://hal.archives-ouvertes.fr/hal-02381430/document},
groups = {Automated Theorem Proving},
pdf = {https://hal.archives-ouvertes.fr/hal-02381430/file/paper.pdf},
url = {https://hal.archives-ouvertes.fr/hal-02381430},
}
@PhdThesis{Lederman:EECS-2021-135,
author = {Lederman, Gil},
school = {EECS Department, University of California, Berkeley},
title = {Neural Guidance in Constraint Solvers},
year = {2021},
month = {May},
groups = {Automated Theorem Proving},
number = {UCB/EECS-2021-135},
url = {http://www2.eecs.berkeley.edu/Pubs/TechRpts/2021/EECS-2021-135.html},
}
@MastersThesis{Bavishi:EECS-2019-82,
author = {Bavishi, Rohan},
school = {EECS Department, University of California, Berkeley},
title = {Neural-Backed Generators for Program Synthesis},
year = {2019},
month = {May},
groups = {Synthesis and Repair},
number = {UCB/EECS-2019-82},
url = {http://www2.eecs.berkeley.edu/Pubs/TechRpts/2019/EECS-2019-82.html},
}
@InCollection{fedyukovich2019quantified,
author = {Grigory Fedyukovich and Sumanth Prabhu and Kumar Madhukar and Aarti Gupta},
booktitle = {Computer Aided Verification},
publisher = {Springer International Publishing},
title = {Quantified Invariants via Syntax-Guided Synthesis},
year = {2019},
pages = {259--277},
doi = {10.1007/978-3-030-25540-4_14},
groups = {Invariant Learning},
}
@InCollection{zhang2020synthesizing,
author = {Hongce Zhang and Weikun Yang and Grigory Fedyukovich and Aarti Gupta and Sharad Malik},
booktitle = {Lecture Notes in Computer Science},
publisher = {Springer International Publishing},
title = {Synthesizing Environment Invariants for Modular Hardware Verification},
year = {2020},
pages = {202--225},
doi = {10.1007/978-3-030-39322-9_10},
groups = {Invariant Learning},
}
@InProceedings{mordvinov2019property,
author = {Dmitry Mordvinov and Grigory Fedyukovich},
booktitle = {2019 Formal Methods in Computer Aided Design ({FMCAD})},
title = {Property Directed Inference of Relational Invariants},
year = {2019},
month = {oct},
publisher = {{IEEE}},
doi = {10.23919/fmcad.2019.8894274},
groups = {Invariant Learning},
}
@InCollection{fedyukovich2018accelerating,
author = {Grigory Fedyukovich and Rastislav Bod{\'{\i}}k},
booktitle = {Tools and Algorithms for the Construction and Analysis of Systems},
publisher = {Springer International Publishing},
title = {Accelerating Syntax-Guided Invariant Synthesis},
year = {2018},
pages = {251--269},
doi = {10.1007/978-3-319-89960-2_14},
groups = {Invariant Learning},
}
@InProceedings{fedyukovich2018solving,
author = {Grigory Fedyukovich and Sumanth Prabhu and Kumar Madhukar and Aarti Gupta},
booktitle = {2018 Formal Methods in Computer Aided Design ({FMCAD})},
title = {Solving Constrained Horn Clauses Using Syntax and Data},
year = {2018},
month = {oct},
publisher = {{IEEE}},
doi = {10.23919/fmcad.2018.8603011},
groups = {Invariant Learning},
}
@InCollection{gurfinkel2018quantifiers,
author = {Arie Gurfinkel and Sharon Shoham and Yakir Vizel},
booktitle = {Automated Technology for Verification and Analysis},
publisher = {Springer International Publishing},
title = {Quantifiers on Demand},
year = {2018},
pages = {248--266},
doi = {10.1007/978-3-030-01090-4_15},
groups = {Invariant Learning},
}
@InCollection{prabhu2018efficiently,
author = {Sumanth Prabhu and Kumar Madhukar and R. Venkatesh},
booktitle = {Static Analysis},
publisher = {Springer International Publishing},
title = {Efficiently Learning Safety Proofs from Appearance as well as Behaviours},
year = {2018},
pages = {326--343},
doi = {10.1007/978-3-319-99725-4_20},
groups = {Invariant Learning},
}
@InCollection{vizel2017ic3,
author = {Yakir Vizel and Arie Gurfinkel and Sharon Shoham and Sharad Malik},
booktitle = {Lecture Notes in Computer Science},
publisher = {Springer International Publishing},
title = {{IC}3 - Flipping the E in {ICE}},
year = {2017},
pages = {521--538},
doi = {10.1007/978-3-319-52234-0_28},
groups = {Invariant Learning},
}
@Article{garg2016learning,
author = {Pranav Garg and Daniel Neider and P. Madhusudan and Dan Roth},
journal = {{ACM} {SIGPLAN} Notices},
title = {Learning invariants using decision trees and implication counterexamples},
year = {2016},
month = {apr},
number = {1},
pages = {499--512},
volume = {51},
doi = {10.1145/2914770.2837664},
groups = {Invariant Learning},
publisher = {Association for Computing Machinery ({ACM})},
}
@InCollection{loeding2016abstract,
author = {Christof Löding and P. Madhusudan and Daniel Neider},
booktitle = {Tools and Algorithms for the Construction and Analysis of Systems},
publisher = {Springer Berlin Heidelberg},
title = {Abstract Learning Frameworks for Synthesis},
year = {2016},
pages = {167--185},
doi = {10.1007/978-3-662-49674-9_10},
groups = {Invariant Learning},
}
@InCollection{garg2014icearobustframeworkforlearninginvariants,
author = {Pranav Garg and Christof Löding and P. Madhusudan and Daniel Neider},
booktitle = {Computer Aided Verification},
publisher = {Springer International Publishing},
title = {{ICE}:~A~Robust~Framework~for~Learning~Invariants},
year = {2014},
pages = {69--87},
doi = {10.1007/978-3-319-08867-9_5},
groups = {Invariant Learning},
}
@InCollection{garg2013learninguniversallyquantifiedinvariants,
author = {Pranav Garg and Christof Löding and P. Madhusudan and Daniel Neider},
booktitle = {Computer Aided Verification},
publisher = {Springer Berlin Heidelberg},
title = {Learning~Universally~Quantified~Invariants of Linear~Data~Structures},
year = {2013},
pages = {813--829},
doi = {10.1007/978-3-642-39799-8_57},
groups = {Invariant Learning},
}
@InCollection{sharma2013data,
author = {Rahul Sharma and Saurabh Gupta and Bharath Hariharan and Alex Aiken and Percy Liang and Aditya V. Nori},
booktitle = {Programming Languages and Systems},
publisher = {Springer Berlin Heidelberg},
title = {A Data Driven Approach for Algebraic Loop Invariants},
year = {2013},
pages = {574--592},
doi = {10.1007/978-3-642-37036-6_31},
groups = {Invariant Learning},
}
@InProceedings{NIPS2016_f197002b,
author = {Irving, Geoffrey and Szegedy, Christian and Alemi, Alexander A and Een, Niklas and Chollet, Francois and Urban, Josef},
booktitle = {Advances in Neural Information Processing Systems},
title = {DeepMath - Deep Sequence Models for Premise Selection},
year = {2016},
editor = {D. Lee and M. Sugiyama and U. Luxburg and I. Guyon and R. Garnett},
publisher = {Curran Associates, Inc.},
volume = {29},
groups = {Invariant Learning},
url = {https://proceedings.neurips.cc/paper/2016/file/f197002b9a0853eca5e046d9ca4663d5-Paper.pdf},
}
@Article{shafiq2020machine,
author = {Saad Shafiq and Atif Mashkoor and Christoph Mayr-Dorn and Alexander Egyed},
title = {Machine Learning for Software Engineering: A Systematic Mapping},
year = {2020},
month = may,
abstract = {Context: The software development industry is rapidly adopting machine learning for transitioning modern day software systems towards highly intelligent and self-learning systems. However, the full potential of machine learning for improving the software engineering life cycle itself is yet to be discovered, i.e., up to what extent machine learning can help reducing the effort/complexity of software engineering and improving the quality of resulting software systems. To date, no comprehensive study exists that explores the current state-of-the-art on the adoption of machine learning across software engineering life cycle stages. Objective: This article addresses the aforementioned problem and aims to present a state-of-the-art on the growing number of uses of machine learning in software engineering. Method: We conduct a systematic mapping study on applications of machine learning to software engineering following the standard guidelines and principles of empirical software engineering. Results: This study introduces a machine learning for software engineering (MLSE) taxonomy classifying the state-of-the-art machine learning techniques according to their applicability to various software engineering life cycle stages. Overall, 227 articles were rigorously selected and analyzed as a result of this study. Conclusion: From the selected articles, we explore a variety of aspects that should be helpful to academics and practitioners alike in understanding the potential of adopting machine learning techniques during software engineering projects.},
archiveprefix = {arXiv},
doi = {10.1109/ACCESS.2021.3119746},
eprint = {2005.13299},
file = {:http\://arxiv.org/pdf/2005.13299v1:PDF},
groups = {ML for General Software Development, Surveys},
keywords = {cs.SE, cs.LG},
primaryclass = {cs.SE},
}
@Article{bundy2012ai,
author = {Bundy, Alan and Hutter, Dieter and Jones, Cliff B. and Moore, J Strother},
title = {AI meets Formal Software Development (Dagstuhl Seminar 12271)},
year = {2012},
doi = {10.4230/DAGREP.2.7.1},
groups = {AI for Formal Software Development},
keywords = {Computer Science, 000 Computer science, knowledge, general works},
language = {en},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum fuer Informatik GmbH, Wadern/Saarbruecken, Germany},
}
@InCollection{bak2020improved,
author = {Stanley Bak and Hoang-Dung Tran and Kerianne Hobbs and Taylor T. Johnson},
booktitle = {Computer Aided Verification},
publisher = {Springer International Publishing},
title = {Improved Geometric Path Enumeration for Verifying {ReLU} Neural Networks},
year = {2020},
pages = {66--96},
doi = {10.1007/978-3-030-53288-8_4},
groups = {Neural Networks},
}
@Article{tran2020nnv,
author = {Hoang-Dung Tran and Xiaodong Yang and Diego Manzanas Lopez and Patrick Musau and Luan Viet Nguyen and Weiming Xiang and Stanley Bak and Taylor T. Johnson},
title = {NNV: The Neural Network Verification Tool for Deep Neural Networks and Learning-Enabled Cyber-Physical Systems},
year = {2020},
month = apr,
abstract = {This paper presents the Neural Network Verification (NNV) software tool, a set-based verification framework for deep neural networks (DNNs) and learning-enabled cyber-physical systems (CPS). The crux of NNV is a collection of reachability algorithms that make use of a variety of set representations, such as polyhedra, star sets, zonotopes, and abstract-domain representations. NNV supports both exact (sound and complete) and over-approximate (sound) reachability algorithms for verifying safety and robustness properties of feed-forward neural networks (FFNNs) with various activation functions. For learning-enabled CPS, such as closed-loop control systems incorporating neural networks, NNV provides exact and over-approximate reachability analysis schemes for linear plant models and FFNN controllers with piecewise-linear activation functions, such as ReLUs. For similar neural network control systems (NNCS) that instead have nonlinear plant models, NNV supports over-approximate analysis by combining the star set analysis used for FFNN controllers with zonotope-based analysis for nonlinear plant dynamics building on CORA. We evaluate NNV using two real-world case studies: the first is safety verification of ACAS Xu networks and the second deals with the safety verification of a deep learning-based adaptive cruise control system.},
archiveprefix = {arXiv},
eprint = {2004.05519},
file = {:http\://arxiv.org/pdf/2004.05519v1:PDF},
groups = {Neural Networks},
keywords = {eess.SY, cs.LG, cs.SY},
primaryclass = {eess.SY},
}
@Article{tran2020verification,
author = {Hoang-Dung Tran and Stanley Bak and Weiming Xiang and Taylor T. Johnson},
title = {Verification of Deep Convolutional Neural Networks Using ImageStars},
year = {2020},
month = apr,
abstract = {Convolutional Neural Networks (CNN) have redefined the state-of-the-art in many real-world applications, such as facial recognition, image classification, human pose estimation, and semantic segmentation. Despite their success, CNNs are vulnerable to adversarial attacks, where slight changes to their inputs may lead to sharp changes in their output in even well-trained networks. Set-based analysis methods can detect or prove the absence of bounded adversarial attacks, which can then be used to evaluate the effectiveness of neural network training methodology. Unfortunately, existing verification approaches have limited scalability in terms of the size of networks that can be analyzed. In this paper, we describe a set-based framework that successfully deals with real-world CNNs, such as VGG16 and VGG19, that have high accuracy on ImageNet. Our approach is based on a new set representation called the ImageStar, which enables efficient exact and over-approximative analysis of CNNs. ImageStars perform efficient set-based analysis by combining operations on concrete images with linear programming (LP). Our approach is implemented in a tool called NNV, and can verify the robustness of VGG networks with respect to a small set of input states, derived from adversarial attacks, such as the DeepFool attack. The experimental results show that our approach is less conservative and faster than existing zonotope methods, such as those used in DeepZ, and the polytope method used in DeepPoly.},
archiveprefix = {arXiv},
eprint = {2004.05511},
file = {:http\://arxiv.org/pdf/2004.05511v2:PDF},
groups = {Neural Networks},
keywords = {cs.LG, cs.CV},
primaryclass = {cs.LG},
}
@Article{wu2020game,
author = {Min Wu and Matthew Wicker and Wenjie Ruan and Xiaowei Huang and Marta Kwiatkowska},
journal = {Theoretical Computer Science},
title = {A game-based approximate verification of deep neural networks with provable guarantees},
year = {2020},
month = {feb},
pages = {298--329},
volume = {807},
doi = {10.1016/j.tcs.2019.05.046},
groups = {Neural Networks},
publisher = {Elsevier {BV}},
}
@Article{dreossi2019formalization,
author = {Tommaso Dreossi and Shromona Ghosh and Alberto Sangiovanni-Vincentelli and Sanjit A. Seshia},
title = {A Formalization of Robustness for Deep Neural Networks},
year = {2019},
month = mar,
abstract = {Deep neural networks have been shown to lack robustness to small input perturbations. The process of generating the perturbations that expose the lack of robustness of neural networks is known as adversarial input generation. This process depends on the goals and capabilities of the adversary, In this paper, we propose a unifying formalization of the adversarial input generation process from a formal methods perspective. We provide a definition of robustness that is general enough to capture different formulations. The expressiveness of our formalization is shown by modeling and comparing a variety of adversarial attack techniques.},
archiveprefix = {arXiv},
eprint = {1903.10033},
file = {:http\://arxiv.org/pdf/1903.10033v1:PDF},
groups = {Neural Networks},
keywords = {cs.LG},
primaryclass = {cs.LG},
}
@Article{huang2018survey,
author = {Xiaowei Huang and Daniel Kroening and Wenjie Ruan and James Sharp and Youcheng Sun and Emese Thamo and Min Wu and Xinping Yi},
title = {A Survey of Safety and Trustworthiness of Deep Neural Networks: Verification, Testing, Adversarial Attack and Defence, and Interpretability},
year = {2018},
month = dec,
abstract = {In the past few years, significant progress has been made on deep neural networks (DNNs) in achieving human-level performance on several long-standing tasks. With the broader deployment of DNNs on various applications, the concerns over their safety and trustworthiness have been raised in public, especially after the widely reported fatal incidents involving self-driving cars. Research to address these concerns is particularly active, with a significant number of papers released in the past few years. This survey paper conducts a review of the current research effort into making DNNs safe and trustworthy, by focusing on four aspects: verification, testing, adversarial attack and defence, and interpretability. In total, we survey 202 papers, most of which were published after 2017.},
archiveprefix = {arXiv},
eprint = {1812.08342},
file = {:http\://arxiv.org/pdf/1812.08342v5:PDF},
groups = {Neural Networks, Surveys},
keywords = {cs.LG, cs.AI, I.2; F.3.1},
primaryclass = {cs.LG},
}
@InCollection{katz2019marabou,
author = {Guy Katz and Derek A. Huang and Duligur Ibeling and Kyle Julian and Christopher Lazarus and Rachel Lim and Parth Shah and Shantanu Thakoor and Haoze Wu and Aleksandar Zelji{\'{c}} and David L. Dill and Mykel J. Kochenderfer and Clark Barrett},
booktitle = {Computer Aided Verification},
publisher = {Springer International Publishing},
title = {The Marabou Framework for Verification and Analysis of Deep Neural Networks},
year = {2019},
pages = {443--452},
doi = {10.1007/978-3-030-25540-4_26},
groups = {Neural Networks},
}
@Article{liu2019algorithms,
author = {Changliu Liu and Tomer Arnon and Christopher Lazarus and Christopher Strong and Clark Barrett and Mykel J. Kochenderfer},
title = {Algorithms for Verifying Deep Neural Networks},
year = {2019},
month = mar,
abstract = {Deep neural networks are widely used for nonlinear function approximation with applications ranging from computer vision to control. Although these networks involve the composition of simple arithmetic operations, it can be very challenging to verify whether a particular network satisfies certain input-output properties. This article surveys methods that have emerged recently for soundly verifying such properties. These methods borrow insights from reachability analysis, optimization, and search. We discuss fundamental differences and connections between existing algorithms. In addition, we provide pedagogical implementations of existing methods and compare them on a set of benchmark problems.},
archiveprefix = {arXiv},
eprint = {1903.06758},
file = {:http\://arxiv.org/pdf/1903.06758v2:PDF},
groups = {Neural Networks},
keywords = {cs.LG, stat.ML},
primaryclass = {cs.LG},
}
@InProceedings{217595,
author = {Shiqi Wang and Kexin Pei and Justin Whitehouse and Junfeng Yang and Suman Jana},
booktitle = {27th USENIX Security Symposium (USENIX Security 18)},
title = {Formal Security Analysis of Neural Networks using Symbolic Intervals},
year = {2018},
address = {Baltimore, MD},
month = aug,
pages = {1599--1614},
publisher = {USENIX Association},
groups = {Neural Networks},
isbn = {978-1-939133-04-5},
url = {https://www.usenix.org/conference/usenixsecurity18/presentation/wang-shiqi},
}
@Article{pei2019bringing,
author = {Kexin Pei and Shiqi Wang and Yuchi Tian and Justin Whitehouse and Carl Vondrick and Yinzhi Cao and Baishakhi Ray and Suman Jana and Junfeng Yang},
journal = {{ACM} {SIGOPS} Operating Systems Review},
title = {Bringing Engineering Rigor to Deep Learning},
year = {2019},
month = {jul},
number = {1},
pages = {59--67},
volume = {53},
doi = {10.1145/3352020.3352030},
groups = {Neural Networks},
publisher = {Association for Computing Machinery ({ACM})},
}
@InCollection{tran2019star,
author = {Hoang-Dung Tran and Diago Manzanas Lopez and Patrick Musau and Xiaodong Yang and Luan Viet Nguyen and Weiming Xiang and Taylor T. Johnson},
booktitle = {Lecture Notes in Computer Science},
publisher = {Springer International Publishing},
title = {Star-Based Reachability Analysis of Deep Neural Networks},
year = {2019},
pages = {670--686},
doi = {10.1007/978-3-030-30942-8_39},
groups = {Neural Networks},
}
@InCollection{dreossi2018semantic,
author = {Tommaso Dreossi and Somesh Jha and Sanjit A. Seshia},
booktitle = {Computer Aided Verification},
publisher = {Springer International Publishing},
title = {Semantic Adversarial Deep Learning},
year = {2018},
pages = {3--26},
doi = {10.1007/978-3-319-96145-3_1},
groups = {Neural Networks},
}
@InProceedings{gehr2018ai2,
author = {Timon Gehr and Matthew Mirman and Dana Drachsler-Cohen and Petar Tsankov and Swarat Chaudhuri and Martin Vechev},
booktitle = {2018 {IEEE} Symposium on Security and Privacy ({SP})},
title = {{AI}2: Safety and Robustness Certification of Neural Networks with Abstract Interpretation},
year = {2018},
month = {may},
publisher = {{IEEE}},
doi = {10.1109/sp.2018.00058},
groups = {Neural Networks},
}
@Article{ruan2018reachability,
author = {Wenjie Ruan and Xiaowei Huang and Marta Kwiatkowska},
title = {Reachability Analysis of Deep Neural Networks with Provable Guarantees},
year = {2018},
month = may,
abstract = {Verifying correctness of deep neural networks (DNNs) is challenging. We study a generic reachability problem for feed-forward DNNs which, for a given set of inputs to the network and a Lipschitz-continuous function over its outputs, computes the lower and upper bound on the function values. Because the network and the function are Lipschitz continuous, all values in the interval between the lower and upper bound are reachable. We show how to obtain the safety verification problem, the output range analysis problem and a robustness measure by instantiating the reachability problem. We present a novel algorithm based on adaptive nested optimisation to solve the reachability problem. The technique has been implemented and evaluated on a range of DNNs, demonstrating its efficiency, scalability and ability to handle a broader class of networks than state-of-the-art verification approaches.},
archiveprefix = {arXiv},
eprint = {1805.02242},
file = {:http\://arxiv.org/pdf/1805.02242v1:PDF},
groups = {Neural Networks},
keywords = {cs.LG, cs.CV, stat.ML},
primaryclass = {cs.LG},
}
@InCollection{seshia2018formal,
author = {Sanjit A. Seshia and Ankush Desai and Tommaso Dreossi and Daniel J. Fremont and Shromona Ghosh and Edward Kim and Sumukh Shivakumar and Marcell Vazquez-Chanlatte and Xiangyu Yue},
booktitle = {Automated Technology for Verification and Analysis},
publisher = {Springer International Publishing},
title = {Formal Specification for Deep Neural Networks},
year = {2018},
pages = {20--34},
doi = {10.1007/978-3-030-01090-4_2},
groups = {Neural Networks},
}
@InProceedings{pmlr-v80-wong18a,
author = {Wong, Eric and Kolter, Zico},
booktitle = {Proceedings of the 35th International Conference on Machine Learning},
title = {Provable Defenses against Adversarial Examples via the Convex Outer Adversarial Polytope},
year = {2018},
editor = {Dy, Jennifer and Krause, Andreas},
month = {10--15 Jul},
pages = {5286--5295},
publisher = {PMLR},
series = {Proceedings of Machine Learning Research},
volume = {80},
groups = {Neural Networks},
pdf = {http://proceedings.mlr.press/v80/wong18a/wong18a.pdf},
url = {https://proceedings.mlr.press/v80/wong18a.html},
}
@Article{xiang2018specification,
author = {Weiming Xiang and Hoang-Dung Tran and Taylor T. Johnson},
title = {Specification-Guided Safety Verification for Feedforward Neural Networks},
year = {2018},
month = dec,
abstract = {This paper presents a specification-guided safety verification method for feedforward neural networks with general activation functions. As such feedforward networks are memoryless, they can be abstractly represented as mathematical functions, and the reachability analysis of the neural network amounts to interval analysis problems. In the framework of interval analysis, a computationally efficient formula which can quickly compute the output interval sets of a neural network is developed. Then, a specification-guided reachability algorithm is developed. Specifically, the bisection process in the verification algorithm is completely guided by a given safety specification. Due to the employment of the safety specification, unnecessary computations are avoided and thus the computational cost can be reduced significantly. Experiments show that the proposed method enjoys much more efficiency in safety verification with significantly less computational cost.},
archiveprefix = {arXiv},
eprint = {1812.06161},
file = {:http\://arxiv.org/pdf/1812.06161v1:PDF},
groups = {Neural Networks},
keywords = {cs.LG, cs.AI},
primaryclass = {cs.LG},
}
@Article{xiang2018verification,
author = {Weiming Xiang and Patrick Musau and Ayana A. Wild and Diego Manzanas Lopez and Nathaniel Hamilton and Xiaodong Yang and Joel Rosenfeld and Taylor T. Johnson},
title = {Verification for Machine Learning, Autonomy, and Neural Networks Survey},
year = {2018},
month = oct,
abstract = {This survey presents an overview of verification techniques for autonomous systems, with a focus on safety-critical autonomous cyber-physical systems (CPS) and subcomponents thereof. Autonomy in CPS is enabling by recent advances in artificial intelligence (AI) and machine learning (ML) through approaches such as deep neural networks (DNNs), embedded in so-called learning enabled components (LECs) that accomplish tasks from classification to control. Recently, the formal methods and formal verification community has developed methods to characterize behaviors in these LECs with eventual goals of formally verifying specifications for LECs, and this article presents a survey of many of these recent approaches.},
archiveprefix = {arXiv},
eprint = {1810.01989},
file = {:http\://arxiv.org/pdf/1810.01989v1:PDF},
groups = {Neural Networks, Surveys},
keywords = {cs.AI, cs.LG},
primaryclass = {cs.AI},
}
@Article{dutta2017output,
author = {Souradeep Dutta and Susmit Jha and Sriram Sanakaranarayanan and Ashish Tiwari},
title = {Output Range Analysis for Deep Neural Networks},
year = {2017},
month = sep,
abstract = {Deep neural networks (NN) are extensively used for machine learning tasks such as image classification, perception and control of autonomous systems. Increasingly, these deep NNs are also been deployed in high-assurance applications. Thus, there is a pressing need for developing techniques to verify neural networks to check whether certain user-expected properties are satisfied. In this paper, we study a specific verification problem of computing a guaranteed range for the output of a deep neural network given a set of inputs represented as a convex polyhedron. Range estimation is a key primitive for verifying deep NNs. We present an efficient range estimation algorithm that uses a combination of local search and linear programming problems to efficiently find the maximum and minimum values taken by the outputs of the NN over the given input set. In contrast to recently proposed "monolithic" optimization approaches, we use local gradient descent to repeatedly find and eliminate local minima of the function. The final global optimum is certified using a mixed integer programming instance. We implement our approach and compare it with Reluplex, a recently proposed solver for deep neural networks. We demonstrate the effectiveness of the proposed approach for verification of NNs used in automated control as well as those used in classification.},
archiveprefix = {arXiv},
eprint = {1709.09130},
file = {:http\://arxiv.org/pdf/1709.09130v1:PDF},
groups = {Neural Networks},
keywords = {cs.LG, stat.ML},
primaryclass = {cs.LG},
}
@InCollection{huang2017safety,
author = {Xiaowei Huang and Marta Kwiatkowska and Sen Wang and Min Wu},
booktitle = {Computer Aided Verification},
publisher = {Springer International Publishing},
title = {Safety Verification of Deep Neural Networks},
year = {2017},
pages = {3--29},
doi = {10.1007/978-3-319-63387-9_1},
groups = {Neural Networks},
}
@InCollection{katz2017reluplex,
author = {Guy Katz and Clark Barrett and David L. Dill and Kyle Julian and Mykel J. Kochenderfer},
booktitle = {Computer Aided Verification},
publisher = {Springer International Publishing},
title = {Reluplex: An Efficient {SMT} Solver for Verifying Deep Neural Networks},
year = {2017},
pages = {97--117},