diff --git a/baselines/BART/.gitignore b/baselines/BART/.gitignore new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/baselines/BART/.gitignore @@ -0,0 +1 @@ + diff --git a/baselines/BART/ans_aft_emo_expla_gen_eval.py b/baselines/BART/ans_aft_emo_expla_gen_eval.py new file mode 100644 index 0000000..748b32e --- /dev/null +++ b/baselines/BART/ans_aft_emo_expla_gen_eval.py @@ -0,0 +1,276 @@ +import os +import json +import random +from tqdm import tqdm +import pickle as pkl +import numpy as np +import pandas as pd +from sklearn.metrics import accuracy_score, precision_recall_fscore_support +import torch +from transformers import ( + AutoModelForSeq2SeqLM, + AutoTokenizer +) +from datasets import load_dataset +import evaluate +from nltk.tokenize import word_tokenize +from BARTScore.bart_score import BARTScorer +device = torch.device('cuda') +print("###################################################################################################") +print("Using {} ".format(device)) +print("###################################################################################################") +SEED = 0 +random.seed(SEED) +np.random.seed(SEED) +torch.manual_seed(SEED) + +data_path = 'data/' +logs_dir = 'logs/' +output_dir = 'EmoDialog/' +subtask = 'ans_aft_expl_gen_emo_gen_emo1_emo2_cap1_cap2_conv_gen_cap_1' + +if(subtask.endswith('gen_cap_1')): + task = 'image_blip_text_' + subtask +else: + task = 'text_only_' + subtask + +modelname = 'facebook/bart-large' + +if(subtask.endswith('gen_cap_1')): + numepochs = 25 +else: + numepochs = 5 + +max_target_length = 50 +if('conv' in subtask): + max_source_length = 350 +else: + max_source_length = 150 + +print("###################################################################################################") +print("Max sentence length {} ".format(max_source_length)) +print("###################################################################################################") + +special_tokens = {'additional_special_tokens': ['', '']} + +if(modelname == 't5-small'): + savename = 't5_small' + test_batch_size = 32 + train_batch_size = 32 +elif(modelname == 't5-base'): + savename = 't5_base' + test_batch_size = 8 + train_batch_size = 8 +elif(modelname == 't5-large'): + savename = 't5_large' + test_batch_size = 32 + train_batch_size = 16 +elif(modelname == 't5-11b'): + savename = 't5_11b' + test_batch_size = 16 + train_batch_size = 16 +elif(modelname == 'facebook/bart-base'): + savename = 'bart_small' + test_batch_size = 64 + train_batch_size = 32 +elif(modelname == 'facebook/bart-large'): + savename = 'bart_large' + test_batch_size = 32 + train_batch_size = 32 +elif(modelname == 'facebook/opt-1.3b'): + savename = 'opt_1.3b' + test_batch_size = 16 + train_batch_size = 16 +elif(modelname == 'facebook/opt-350m'): + savename = 'opt_350m' + test_batch_size = 32 + train_batch_size = 32 + +save_weights = os.path.join(output_dir, 'weights', savename, task, str(numepochs), + str(max_source_length) + '_' + str(max_target_length)) +if(os.path.isdir(os.path.join(output_dir, 'weights', savename)) == False): + os.mkdir(os.path.join(output_dir, 'weights', savename)) + +if(os.path.isdir(os.path.join(output_dir, 'weights', savename, task)) == False): + os.mkdir(os.path.join(output_dir, 'weights', savename, task)) + +if(os.path.isdir(os.path.join(output_dir, 'weights', savename, task, str(numepochs))) == False): + os.mkdir(os.path.join(output_dir, 'weights', savename, task, str(numepochs))) + +if(os.path.isdir(save_weights) == False): + os.mkdir(save_weights) + +print("###################################################################################################") +print("Loading from {} ".format(save_weights)) +print("###################################################################################################") + +test_file = data_path + 'test_' + subtask + '.csv' + +print("###################################################################################################") +print("Testing on {} ".format(test_file)) +print("###################################################################################################") + +extension = test_file.split(".")[-1] +raw_datasets = load_dataset('csv', + data_files={'train':test_file, 'validation': test_file, 'test': test_file}, + ) + +tokenizer = AutoTokenizer.from_pretrained( + save_weights, + cache_dir=logs_dir, + use_fast=True, + revision='main', + use_auth_token=None +) + +num_added_toks = tokenizer.add_special_tokens(special_tokens) + +model = AutoModelForSeq2SeqLM.from_pretrained( + save_weights, + from_tf=False, + cache_dir=logs_dir, + revision='main', + use_auth_token=None, +).to(device) + +model.resize_token_embeddings(len(tokenizer)) + +column_names = raw_datasets["train"].column_names + +text_column = column_names[0] +summary_column = column_names[1] + +min_target_length = 1 +ignore_pad_token_for_loss = True +padding = "max_length" +prefix = "" + +def preprocess_function_test(examples): + inputs = examples[text_column] + targets = examples[summary_column] + inputs = [prefix + inp for inp in inputs] + model_inputs = tokenizer(inputs, max_length=max_source_length, padding=padding, truncation=True) + + # Setup the tokenizer for targets + with tokenizer.as_target_tokenizer(): + labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True) + + model_inputs["labels"] = labels["input_ids"] + return model_inputs + +test_dataset = raw_datasets["test"] +test_dataset = test_dataset.map( + preprocess_function_test, + batched=True, + num_proc=None, + remove_columns=column_names, + load_from_cache_file=None, + desc="Running tokenizer on test dataset", + ) + +test_batches = len(test_dataset) // test_batch_size + 1 +predictions = [] +for i in tqdm(range(test_batches)): + start = i * test_batch_size + end = min((i + 1) * test_batch_size, len(test_dataset)) + output_preds = model.generate( + torch.LongTensor(test_dataset["input_ids"][start : end]).to(device), + max_length=max_target_length, + num_beams=None + ) + output_preds = tokenizer.batch_decode( + output_preds, skip_special_tokens=True, clean_up_tokenization_spaces=True + ) + predictions += output_preds + +predictions = [pred.strip() for pred in predictions] + +test_df = pd.read_csv(test_file) +references = test_df['response'].tolist() + +emotion_dict = {'amusement' : 0, + 'anger' : 1, + 'awe' : 2, + 'contentment' : 3, + 'disgust' : 4, + 'excitement' : 5, + 'fear' : 6, + 'sadness' : 7, + 'neutral' : 8} + +references_emo = [] +references_expl = [] +for i, ref in enumerate(references): + emo = ref.split()[0] + expl = " ".join(ref.split()[2:]) + references_emo.append(int(emotion_dict[emo.strip()])) + references_expl.append(expl) + +predictions_emo = [] +predictions_expl = [] +for i, ref in enumerate(predictions): + if(predictions[i]): + emo = ref.split()[0] + expl = " ".join(ref.split()[2:]) + if(emo.strip() not in emotion_dict): + predictions_emo.append(int((references_emo[i] + 1) % len(emotion_dict))) + else: + predictions_emo.append(int(emotion_dict[emo.strip()])) + predictions_expl.append(expl) + else: + predictions_emo.append(int((references_emo[i] + 1) % len(emotion_dict))) + predictions_expl.append('') + +precision, recall, f1, _ = precision_recall_fscore_support(references_emo, predictions_emo, average='weighted') +acc = accuracy_score(references_emo, predictions_emo) * 100 +f1 = f1 * 100 + +print("Accuracy {} and F1 {} ".format(acc, f1)) + +bleu = evaluate.load("bleu") +bleu_results = bleu.compute(predictions=predictions_expl, references=references_expl, tokenizer=word_tokenize) +print("BLEU scores: {} ".format(bleu_results)) + +meteor = evaluate.load("meteor") +meteor_results = meteor.compute(predictions=predictions_expl, references=references_expl) +print("Meteor scores: {} ".format(meteor_results)) + +rouge = evaluate.load("rouge") +rouge_results = rouge.compute(predictions=predictions_expl, references=references_expl) +print("ROUGE scores: {} ".format(rouge_results)) + +bertscore = evaluate.load("bertscore") +bertscore_results = bertscore.compute(predictions=predictions_expl, references=references_expl, lang="en") +bertscore_results = sum(bertscore_results['recall']) / len(predictions) +print("BERTScore: {} ".format(bertscore_results)) + +bart_scorer = BARTScorer(device='cuda:0', checkpoint='facebook/bart-large-cnn') +bart_scorer.load(path='bart_score.pth') +bartscore_results = bart_scorer.score(predictions_expl, references_expl, batch_size=4) +bartscore_results = sum(bartscore_results) / len(bartscore_results) +print("BARTScore: {} ".format(bartscore_results)) + +save_emo_expla = os.path.join(output_dir, 'weights', savename, task, str(numepochs), + str(max_source_length) + '_' + str(max_target_length), 'emo_expla.txt') +with open(save_emo_expla, 'w', encoding='utf-8') as f: + for sen in predictions: + f.write("{}\n".format(sen)) + +all_metrics = {} +all_metrics['accuracy'] = acc +all_metrics['f1-weighted'] = f1 +all_metrics['bleu-1'] = bleu_results['precisions'][0] +all_metrics['bleu-2'] = bleu_results['precisions'][1] +all_metrics['bleu-3'] = bleu_results['precisions'][2] +all_metrics['bleu-4'] = bleu_results['precisions'][3] +all_metrics['avg-bleu'] = bleu_results['bleu'] +all_metrics['rouge'] = rouge_results['rougeL'] +all_metrics['meteor'] = meteor_results['meteor'] +all_metrics['bert-score'] = bertscore_results +all_metrics['bart-score'] = bartscore_results + +save_res_file = os.path.join(output_dir, 'weights', savename, task, str(numepochs), + str(max_source_length) + '_' + str(max_target_length), 'metrics.json') + +with open(save_res_file, 'w') as f: + json.dump(all_metrics, f) diff --git a/baselines/BART/ans_aft_emo_expla_gen_train_mod.py b/baselines/BART/ans_aft_emo_expla_gen_train_mod.py new file mode 100644 index 0000000..ef68215 --- /dev/null +++ b/baselines/BART/ans_aft_emo_expla_gen_train_mod.py @@ -0,0 +1,360 @@ +import os +import json +import random +from tqdm import tqdm +import pickle as pkl +import numpy as np +import pandas as pd +from sklearn.metrics import accuracy_score, precision_recall_fscore_support +import torch +from transformers import ( + AutoModelForSeq2SeqLM, + AutoTokenizer, + DataCollatorForSeq2Seq, + Seq2SeqTrainer, + Seq2SeqTrainingArguments, +) +from datasets import load_dataset +import evaluate +from nltk.tokenize import word_tokenize +from BARTScore.bart_score import BARTScorer +device = torch.device('cuda') +print("###################################################################################################") +print("Using {} ".format(device)) +print("###################################################################################################") +SEED = 0 +random.seed(SEED) +np.random.seed(SEED) +torch.manual_seed(SEED) + +data_path = 'data/' +logs_dir = 'logs/' +output_dir = 'EmoDialog/' +subtask = 'ans_aft_expl_gen_emo_gen_emo1_emo2_cap1_cap2_conv_gen_cap_1' + +if(subtask.endswith('gen_cap_1')): + task = 'image_blip_text_' + subtask +else: + task = 'text_only_' + subtask + +modelname = 'facebook/bart-large' + +if(subtask.endswith('gen_cap_1')): + numepochs = 25 +else: + numepochs = 5 + +max_target_length = 50 +if('conv' in subtask): + max_source_length = 350 +else: + max_source_length = 150 + +print("###################################################################################################") +print("Max sentence length {} ".format(max_source_length)) +print("###################################################################################################") + +special_tokens = {'additional_special_tokens': ['', '']} + +if(modelname == 't5-small'): + savename = 't5_small' + test_batch_size = 32 + train_batch_size = 32 +elif(modelname == 't5-base'): + savename = 't5_base' + test_batch_size = 8 + train_batch_size = 8 +elif(modelname == 't5-large'): + savename = 't5_large' + test_batch_size = 32 + train_batch_size = 16 +elif(modelname == 't5-11b'): + savename = 't5_11b' + test_batch_size = 16 + train_batch_size = 16 +elif(modelname == 'facebook/bart-base'): + savename = 'bart_small' + test_batch_size = 64 + train_batch_size = 32 +elif(modelname == 'facebook/bart-large'): + savename = 'bart_large' + test_batch_size = 32 + train_batch_size = 32 +elif(modelname == 'facebook/opt-1.3b'): + savename = 'opt_1.3b' + test_batch_size = 16 + train_batch_size = 16 +elif(modelname == 'facebook/opt-350m'): + savename = 'opt_350m' + test_batch_size = 32 + train_batch_size = 32 + +save_weights = os.path.join(output_dir, 'weights', savename, task, str(numepochs), + str(max_source_length) + '_' + str(max_target_length)) +if(os.path.isdir(os.path.join(output_dir, 'weights', savename)) == False): + os.mkdir(os.path.join(output_dir, 'weights', savename)) + +if(os.path.isdir(os.path.join(output_dir, 'weights', savename, task)) == False): + os.mkdir(os.path.join(output_dir, 'weights', savename, task)) + +if(os.path.isdir(os.path.join(output_dir, 'weights', savename, task, str(numepochs))) == False): + os.mkdir(os.path.join(output_dir, 'weights', savename, task, str(numepochs))) + +if(os.path.isdir(save_weights) == False): + os.mkdir(save_weights) + +print("###################################################################################################") +print("Saving weights to {} ".format(save_weights)) +print("###################################################################################################") + +train_file = data_path + 'train_' + subtask + '.csv' +dev_file = data_path + 'dev_' + subtask + '.csv' +test_file = data_path + 'test_' + subtask + '.csv' + + +print("###################################################################################################") +print("Training on {} ".format(train_file)) +print("###################################################################################################") + +print("###################################################################################################") +print("Testing on {} ".format(test_file)) +print("###################################################################################################") + +extension = train_file.split(".")[-1] +raw_datasets = load_dataset('csv', + data_files={'train':train_file, 'validation': dev_file, 'test': test_file}, + ) + +tokenizer = AutoTokenizer.from_pretrained( + modelname, + cache_dir=logs_dir, + use_fast=True, + revision='main', + use_auth_token=None +) + +num_added_toks = tokenizer.add_special_tokens(special_tokens) + +model = AutoModelForSeq2SeqLM.from_pretrained( + modelname, + from_tf=False, + cache_dir=logs_dir, + revision='main', + use_auth_token=None, +) + +model.resize_token_embeddings(len(tokenizer)) + +column_names = raw_datasets["train"].column_names + +text_column = column_names[0] +summary_column = column_names[1] + +min_target_length = 1 +ignore_pad_token_for_loss = True +padding = "max_length" +prefix = "" + +def preprocess_function(examples): + inputs = examples[text_column] + targets = examples[summary_column] + inputs = [prefix + inp for inp in inputs] + model_inputs = tokenizer(inputs, max_length=max_source_length, padding=padding, truncation=True) + + # Setup the tokenizer for targets + with tokenizer.as_target_tokenizer(): + labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True) + + # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore + # padding in the loss. + if padding == "max_length" and ignore_pad_token_for_loss: + labels["input_ids"] = [ + [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] + ] + + model_inputs["labels"] = labels["input_ids"] + return model_inputs + +def preprocess_function_test(examples): + inputs = examples[text_column] + targets = examples[summary_column] + inputs = [prefix + inp for inp in inputs] + model_inputs = tokenizer(inputs, max_length=max_source_length, padding=padding, truncation=True) + + # Setup the tokenizer for targets + with tokenizer.as_target_tokenizer(): + labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True) + + model_inputs["labels"] = labels["input_ids"] + return model_inputs + +train_dataset = raw_datasets["train"] + +train_dataset = train_dataset.map( + preprocess_function, + batched=True, + num_proc=None, + remove_columns=column_names, + load_from_cache_file=None, + desc="Running tokenizer on train dataset", + ) + +eval_dataset = raw_datasets["validation"] +eval_dataset = eval_dataset.map( + preprocess_function, + batched=True, + num_proc=None, + remove_columns=column_names, + load_from_cache_file=None, + desc="Running tokenizer on validation dataset", + ) + +test_dataset = raw_datasets["test"] +test_dataset = test_dataset.map( + preprocess_function_test, + batched=True, + num_proc=None, + remove_columns=column_names, + load_from_cache_file=None, + desc="Running tokenizer on test dataset", + ) + +label_pad_token_id = -100 if ignore_pad_token_for_loss else tokenizer.pad_token_id + +data_collator = DataCollatorForSeq2Seq( + tokenizer, + model=model, + label_pad_token_id=label_pad_token_id, + pad_to_multiple_of=32, + ) + +training_args = Seq2SeqTrainingArguments( + output_dir=logs_dir, + num_train_epochs=numepochs, + logging_dir=logs_dir, + predict_with_generate=True, + per_device_train_batch_size=train_batch_size, + per_device_eval_batch_size=test_batch_size, + logging_steps=10000000, + save_steps=1000000 + ) + +trainer = Seq2SeqTrainer( + model=model, + args=training_args, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + tokenizer=tokenizer, + data_collator=data_collator +) + +train_result = trainer.train(resume_from_checkpoint=None) +trainer.save_model(output_dir=save_weights) + + +test_batches = len(test_dataset) // test_batch_size + 1 +predictions = [] +for i in tqdm(range(test_batches)): + start = i * test_batch_size + end = min((i + 1) * test_batch_size, len(test_dataset)) + output_preds = model.generate( + torch.LongTensor(test_dataset["input_ids"][start : end]).to(device), + max_length=max_target_length, + num_beams=None + ) + output_preds = tokenizer.batch_decode( + output_preds, skip_special_tokens=True, clean_up_tokenization_spaces=True + ) + predictions += output_preds + +predictions = [pred.strip() for pred in predictions] + +test_df = pd.read_csv(test_file) +references = test_df['response'].tolist() + +emotion_dict = {'amusement' : 0, + 'anger' : 1, + 'awe' : 2, + 'contentment' : 3, + 'disgust' : 4, + 'excitement' : 5, + 'fear' : 6, + 'sadness' : 7, + 'neutral' : 8} + +references_emo = [] +references_expl = [] +for i, ref in enumerate(references): + emo = ref.split()[0] + expl = " ".join(ref.split()[2:]) + references_emo.append(int(emotion_dict[emo.strip()])) + references_expl.append(expl) + +predictions_emo = [] +predictions_expl = [] +for i, ref in enumerate(predictions): + if(predictions[i]): + emo = ref.split()[0] + expl = " ".join(ref.split()[2:]) + if(emo.strip() not in emotion_dict): + predictions_emo.append(int((references_emo[i] + 1) % len(emotion_dict))) + else: + predictions_emo.append(int(emotion_dict[emo.strip()])) + predictions_expl.append(expl) + else: + predictions_emo.append(int((references_emo[i] + 1) % len(emotion_dict))) + predictions_expl.append('') + +precision, recall, f1, _ = precision_recall_fscore_support(references_emo, predictions_emo, average='weighted') +acc = accuracy_score(references_emo, predictions_emo) * 100 +f1 = f1 * 100 + +print("Accuracy {} and F1 {} ".format(acc, f1)) + +bleu = evaluate.load("bleu") +bleu_results = bleu.compute(predictions=predictions_expl, references=references_expl, tokenizer=word_tokenize) +print("BLEU scores: {} ".format(bleu_results)) + +meteor = evaluate.load("meteor") +meteor_results = meteor.compute(predictions=predictions_expl, references=references_expl) +print("Meteor scores: {} ".format(meteor_results)) + +rouge = evaluate.load("rouge") +rouge_results = rouge.compute(predictions=predictions_expl, references=references_expl) +print("ROUGE scores: {} ".format(rouge_results)) + +bertscore = evaluate.load("bertscore") +bertscore_results = bertscore.compute(predictions=predictions_expl, references=references_expl, lang="en") +bertscore_results = sum(bertscore_results['recall']) / len(predictions) +print("BERTScore: {} ".format(bertscore_results)) + +bart_scorer = BARTScorer(device='cuda:0', checkpoint='facebook/bart-large-cnn') +bart_scorer.load(path='bart_score.pth') +bartscore_results = bart_scorer.score(predictions_expl, references_expl, batch_size=4) +bartscore_results = sum(bartscore_results) / len(bartscore_results) +print("BARTScore: {} ".format(bartscore_results)) + +save_res = os.path.join(output_dir, 'weights', savename, task, str(numepochs), + str(max_source_length) + '_' + str(max_target_length), 'results.txt') +with open(save_res, 'w', encoding='utf-8') as f: + for sen in predictions_expl: + f.write("{}\n".format(sen)) + +all_metrics = {} +all_metrics['accuracy'] = acc +all_metrics['f1-weighted'] = f1 +all_metrics['bleu-1'] = bleu_results['precisions'][0] +all_metrics['bleu-2'] = bleu_results['precisions'][1] +all_metrics['bleu-3'] = bleu_results['precisions'][2] +all_metrics['bleu-4'] = bleu_results['precisions'][3] +all_metrics['avg-bleu'] = bleu_results['bleu'] +all_metrics['rouge'] = rouge_results['rougeL'] +all_metrics['meteor'] = meteor_results['meteor'] +all_metrics['bert-score'] = bertscore_results +all_metrics['bart-score'] = bartscore_results + +save_res_file = os.path.join(output_dir, 'weights', savename, task, str(numepochs), + str(max_source_length) + '_' + str(max_target_length), 'metrics.json') + +with open(save_res_file, 'w') as f: + json.dump(all_metrics, f) diff --git a/baselines/BART/ques_aft_emo_expla_gen_eval.py b/baselines/BART/ques_aft_emo_expla_gen_eval.py new file mode 100644 index 0000000..6ccdfa3 --- /dev/null +++ b/baselines/BART/ques_aft_emo_expla_gen_eval.py @@ -0,0 +1,279 @@ +import os +import json +import random +from tqdm import tqdm +import pickle as pkl +import numpy as np +import pandas as pd +from sklearn.metrics import accuracy_score, precision_recall_fscore_support +import torch +from transformers import ( + AutoModelForSeq2SeqLM, + AutoTokenizer +) +from datasets import load_dataset +import evaluate +from nltk.tokenize import word_tokenize +from BARTScore.bart_score import BARTScorer +device = torch.device('cuda') +print("###################################################################################################") +print("Using {} ".format(device)) +print("###################################################################################################") +SEED = 0 +random.seed(SEED) +np.random.seed(SEED) +torch.manual_seed(SEED) + +data_path = 'data/' +logs_dir = 'logs/' +output_dir = 'EmoDialog/' +subtask = 'ques_aft_expl_gen_emo_gen_emo1_emo2_cap1_cap2_conv' + +if('ft_gen_cap' in subtask): + data_path += 'ft_gen/' + +if(subtask.endswith('gen_cap_1')): + task = 'image_blip_text_' + subtask +else: + task = 'text_only_' + subtask + +modelname = 'facebook/bart-large' + +if(subtask.endswith('gen_cap_1')): + numepochs = 25 +else: + numepochs = 5 + +max_target_length = 50 +if('conv' in subtask): + max_source_length = 350 +else: + max_source_length = 150 + +print("###################################################################################################") +print("Max sentence length {} ".format(max_source_length)) +print("###################################################################################################") + +special_tokens = {'additional_special_tokens': ['', '']} + +if(modelname == 't5-small'): + savename = 't5_small' + test_batch_size = 32 + train_batch_size = 32 +elif(modelname == 't5-base'): + savename = 't5_base' + test_batch_size = 8 + train_batch_size = 8 +elif(modelname == 't5-large'): + savename = 't5_large' + test_batch_size = 32 + train_batch_size = 16 +elif(modelname == 't5-11b'): + savename = 't5_11b' + test_batch_size = 16 + train_batch_size = 16 +elif(modelname == 'facebook/bart-base'): + savename = 'bart_small' + test_batch_size = 64 + train_batch_size = 32 +elif(modelname == 'facebook/bart-large'): + savename = 'bart_large' + test_batch_size = 64 + train_batch_size = 32 +elif(modelname == 'facebook/opt-1.3b'): + savename = 'opt_1.3b' + test_batch_size = 16 + train_batch_size = 16 +elif(modelname == 'facebook/opt-350m'): + savename = 'opt_350m' + test_batch_size = 32 + train_batch_size = 32 + +save_weights = os.path.join(output_dir, 'weights', savename, task, str(numepochs), + str(max_source_length) + '_' + str(max_target_length)) +if(os.path.isdir(os.path.join(output_dir, 'weights', savename)) == False): + os.mkdir(os.path.join(output_dir, 'weights', savename)) + +if(os.path.isdir(os.path.join(output_dir, 'weights', savename, task)) == False): + os.mkdir(os.path.join(output_dir, 'weights', savename, task)) + +if(os.path.isdir(os.path.join(output_dir, 'weights', savename, task, str(numepochs))) == False): + os.mkdir(os.path.join(output_dir, 'weights', savename, task, str(numepochs))) + +if(os.path.isdir(save_weights) == False): + os.mkdir(save_weights) + +print("###################################################################################################") +print("Loading weights from {} ".format(save_weights)) +print("###################################################################################################") + +test_file = data_path + 'test_' + subtask + '.csv' + +print("###################################################################################################") +print("Testing on {} ".format(test_file)) +print("###################################################################################################") + +extension = test_file.split(".")[-1] +raw_datasets = load_dataset('csv', + data_files={'train':test_file, 'validation': test_file, 'test': test_file}, + ) + +tokenizer = AutoTokenizer.from_pretrained( + save_weights, + cache_dir=logs_dir, + use_fast=True, + revision='main', + use_auth_token=None +) + +num_added_toks = tokenizer.add_special_tokens(special_tokens) + +model = AutoModelForSeq2SeqLM.from_pretrained( + save_weights, + from_tf=False, + cache_dir=logs_dir, + revision='main', + use_auth_token=None, +).to(device) + +model.resize_token_embeddings(len(tokenizer)) + +column_names = raw_datasets["train"].column_names + +text_column = column_names[0] +summary_column = column_names[1] + +min_target_length = 1 +ignore_pad_token_for_loss = True +padding = "max_length" +prefix = "" + +def preprocess_function_test(examples): + inputs = examples[text_column] + targets = examples[summary_column] + inputs = [prefix + inp for inp in inputs] + model_inputs = tokenizer(inputs, max_length=max_source_length, padding=padding, truncation=True) + + # Setup the tokenizer for targets + with tokenizer.as_target_tokenizer(): + labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True) + + model_inputs["labels"] = labels["input_ids"] + return model_inputs + +test_dataset = raw_datasets["test"] +test_dataset = test_dataset.map( + preprocess_function_test, + batched=True, + num_proc=None, + remove_columns=column_names, + load_from_cache_file=None, + desc="Running tokenizer on validation dataset", + ) + +test_batches = len(test_dataset) // test_batch_size + 1 +predictions = [] +for i in tqdm(range(test_batches)): + start = i * test_batch_size + end = min((i + 1) * test_batch_size, len(test_dataset)) + output_preds = model.generate( + torch.LongTensor(test_dataset["input_ids"][start : end]).to(device), + max_length=max_target_length, + num_beams=None + ) + output_preds = tokenizer.batch_decode( + output_preds, skip_special_tokens=True, clean_up_tokenization_spaces=True + ) + predictions += output_preds + +predictions = [pred.strip() for pred in predictions] + +test_df = pd.read_csv(test_file) +references = test_df['response'].tolist() + +emotion_dict = {'amusement' : 0, + 'anger' : 1, + 'awe' : 2, + 'contentment' : 3, + 'disgust' : 4, + 'excitement' : 5, + 'fear' : 6, + 'sadness' : 7, + 'neutral' : 8} + +references_emo = [] +references_expl = [] +for i, ref in enumerate(references): + emo = ref.split()[0] + expl = " ".join(ref.split()[2:]) + references_emo.append(int(emotion_dict[emo.strip()])) + references_expl.append(expl) + +predictions_emo = [] +predictions_expl = [] +for i, ref in enumerate(predictions): + if(predictions[i]): + emo = ref.split()[0] + expl = " ".join(ref.split()[2:]) + if(emo.strip() not in emotion_dict): + predictions_emo.append(int((references_emo[i] + 1) % len(emotion_dict))) + else: + predictions_emo.append(int(emotion_dict[emo.strip()])) + predictions_expl.append(expl) + else: + predictions_emo.append(int((references_emo[i] + 1) % len(emotion_dict))) + predictions_expl.append('') + +precision, recall, f1, _ = precision_recall_fscore_support(references_emo, predictions_emo, average='weighted') +acc = accuracy_score(references_emo, predictions_emo) * 100 +f1 = f1 * 100 + +print("Accuracy {} and F1 {} ".format(acc, f1)) + +bleu = evaluate.load("bleu") +bleu_results = bleu.compute(predictions=predictions_expl, references=references_expl, tokenizer=word_tokenize) +print("BLEU scores: {} ".format(bleu_results)) + +meteor = evaluate.load("meteor") +meteor_results = meteor.compute(predictions=predictions_expl, references=references_expl) +print("Meteor scores: {} ".format(meteor_results)) + +rouge = evaluate.load("rouge") +rouge_results = rouge.compute(predictions=predictions_expl, references=references_expl) +print("ROUGE scores: {} ".format(rouge_results)) + +bertscore = evaluate.load("bertscore") +bertscore_results = bertscore.compute(predictions=predictions_expl, references=references_expl, lang="en") +bertscore_results = sum(bertscore_results['recall']) / len(predictions) +print("BERTScore: {} ".format(bertscore_results)) + +bart_scorer = BARTScorer(device='cuda:0', checkpoint='facebook/bart-large-cnn') +bart_scorer.load(path='bart_score.pth') +bartscore_results = bart_scorer.score(predictions_expl, references_expl, batch_size=4) +bartscore_results = sum(bartscore_results) / len(bartscore_results) +print("BARTScore: {} ".format(bartscore_results)) + +save_emo_expla = os.path.join(output_dir, 'weights', savename, task, str(numepochs), + str(max_source_length) + '_' + str(max_target_length), 'emo_expla.txt') +with open(save_emo_expla, 'w', encoding='utf-8') as f: + for sen in predictions: + f.write("{}\n".format(sen)) + +all_metrics = {} +all_metrics['accuracy'] = acc +all_metrics['f1-weighted'] = f1 +all_metrics['bleu-1'] = bleu_results['precisions'][0] +all_metrics['bleu-2'] = bleu_results['precisions'][1] +all_metrics['bleu-3'] = bleu_results['precisions'][2] +all_metrics['bleu-4'] = bleu_results['precisions'][3] +all_metrics['avg-bleu'] = bleu_results['bleu'] +all_metrics['rouge'] = rouge_results['rougeL'] +all_metrics['meteor'] = meteor_results['meteor'] +all_metrics['bert-score'] = bertscore_results +all_metrics['bart-score'] = bartscore_results + +save_res_file = os.path.join(output_dir, 'weights', savename, task, str(numepochs), + str(max_source_length) + '_' + str(max_target_length), 'metrics.json') + +with open(save_res_file, 'w') as f: + json.dump(all_metrics, f) diff --git a/baselines/BART/ques_aft_emo_expla_gen_train_mod.py b/baselines/BART/ques_aft_emo_expla_gen_train_mod.py new file mode 100644 index 0000000..20618f4 --- /dev/null +++ b/baselines/BART/ques_aft_emo_expla_gen_train_mod.py @@ -0,0 +1,363 @@ +import os +import json +import random +from tqdm import tqdm +import pickle as pkl +import numpy as np +import pandas as pd +from sklearn.metrics import accuracy_score, precision_recall_fscore_support +import torch +from transformers import ( + AutoModelForSeq2SeqLM, + AutoTokenizer, + DataCollatorForSeq2Seq, + Seq2SeqTrainer, + Seq2SeqTrainingArguments, +) +from datasets import load_dataset +import evaluate +from nltk.tokenize import word_tokenize +from BARTScore.bart_score import BARTScorer +device = torch.device('cuda') +print("###################################################################################################") +print("Using {} ".format(device)) +print("###################################################################################################") +SEED = 0 +random.seed(SEED) +np.random.seed(SEED) +torch.manual_seed(SEED) + +data_path = 'data/' +logs_dir = 'logs/' +output_dir = 'EmoDialog/' +subtask = 'ques_aft_expl_gen_emo_gen_emo1_emo2_cap1_cap2_conv_gen_cap_1' + +if('ft_gen_cap' in subtask): + data_path += 'ft_gen/' + +if(subtask.endswith('gen_cap_1')): + task = 'image_blip_text_' + subtask +else: + task = 'text_only_' + subtask + +modelname = 'facebook/bart-large' + +if(subtask.endswith('gen_cap_1')): + numepochs = 25 +else: + numepochs = 5 + +max_target_length = 50 +if('conv' in subtask): + max_source_length = 350 +else: + max_source_length = 150 + +print("###################################################################################################") +print("Max sentence length {} ".format(max_source_length)) +print("###################################################################################################") + +special_tokens = {'additional_special_tokens': ['', '']} + +if(modelname == 't5-small'): + savename = 't5_small' + test_batch_size = 32 + train_batch_size = 32 +elif(modelname == 't5-base'): + savename = 't5_base' + test_batch_size = 8 + train_batch_size = 8 +elif(modelname == 't5-large'): + savename = 't5_large' + test_batch_size = 32 + train_batch_size = 16 +elif(modelname == 't5-11b'): + savename = 't5_11b' + test_batch_size = 16 + train_batch_size = 16 +elif(modelname == 'facebook/bart-base'): + savename = 'bart_small' + test_batch_size = 64 + train_batch_size = 32 +elif(modelname == 'facebook/bart-large'): + savename = 'bart_large' + test_batch_size = 64 + train_batch_size = 32 +elif(modelname == 'facebook/opt-1.3b'): + savename = 'opt_1.3b' + test_batch_size = 16 + train_batch_size = 16 +elif(modelname == 'facebook/opt-350m'): + savename = 'opt_350m' + test_batch_size = 32 + train_batch_size = 32 + +save_weights = os.path.join(output_dir, 'weights', savename, task, str(numepochs), + str(max_source_length) + '_' + str(max_target_length)) +if(os.path.isdir(os.path.join(output_dir, 'weights', savename)) == False): + os.mkdir(os.path.join(output_dir, 'weights', savename)) + +if(os.path.isdir(os.path.join(output_dir, 'weights', savename, task)) == False): + os.mkdir(os.path.join(output_dir, 'weights', savename, task)) + +if(os.path.isdir(os.path.join(output_dir, 'weights', savename, task, str(numepochs))) == False): + os.mkdir(os.path.join(output_dir, 'weights', savename, task, str(numepochs))) + +if(os.path.isdir(save_weights) == False): + os.mkdir(save_weights) + +print("###################################################################################################") +print("Saving weights to {} ".format(save_weights)) +print("###################################################################################################") + +train_file = data_path + 'train_' + subtask + '.csv' +dev_file = data_path + 'dev_' + subtask + '.csv' +test_file = data_path + 'test_' + subtask + '.csv' + + +print("###################################################################################################") +print("Training on {} ".format(train_file)) +print("###################################################################################################") + +print("###################################################################################################") +print("Testing on {} ".format(test_file)) +print("###################################################################################################") + +extension = train_file.split(".")[-1] +raw_datasets = load_dataset('csv', + data_files={'train':train_file, 'validation': dev_file, 'test': test_file}, + ) + +tokenizer = AutoTokenizer.from_pretrained( + modelname, + cache_dir=logs_dir, + use_fast=True, + revision='main', + use_auth_token=None +) + +num_added_toks = tokenizer.add_special_tokens(special_tokens) + +model = AutoModelForSeq2SeqLM.from_pretrained( + modelname, + from_tf=False, + cache_dir=logs_dir, + revision='main', + use_auth_token=None, +) + +model.resize_token_embeddings(len(tokenizer)) + +column_names = raw_datasets["train"].column_names + +text_column = column_names[0] +summary_column = column_names[1] + +min_target_length = 1 +ignore_pad_token_for_loss = True +padding = "max_length" +prefix = "" + +def preprocess_function(examples): + inputs = examples[text_column] + targets = examples[summary_column] + inputs = [prefix + inp for inp in inputs] + model_inputs = tokenizer(inputs, max_length=max_source_length, padding=padding, truncation=True) + + # Setup the tokenizer for targets + with tokenizer.as_target_tokenizer(): + labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True) + + # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore + # padding in the loss. + if padding == "max_length" and ignore_pad_token_for_loss: + labels["input_ids"] = [ + [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] + ] + + model_inputs["labels"] = labels["input_ids"] + return model_inputs + +def preprocess_function_test(examples): + inputs = examples[text_column] + targets = examples[summary_column] + inputs = [prefix + inp for inp in inputs] + model_inputs = tokenizer(inputs, max_length=max_source_length, padding=padding, truncation=True) + + # Setup the tokenizer for targets + with tokenizer.as_target_tokenizer(): + labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True) + + model_inputs["labels"] = labels["input_ids"] + return model_inputs + +train_dataset = raw_datasets["train"] + +train_dataset = train_dataset.map( + preprocess_function, + batched=True, + num_proc=None, + remove_columns=column_names, + load_from_cache_file=None, + desc="Running tokenizer on train dataset", + ) + +eval_dataset = raw_datasets["validation"] +eval_dataset = eval_dataset.map( + preprocess_function, + batched=True, + num_proc=None, + remove_columns=column_names, + load_from_cache_file=None, + desc="Running tokenizer on validation dataset", + ) + +test_dataset = raw_datasets["test"] +test_dataset = test_dataset.map( + preprocess_function_test, + batched=True, + num_proc=None, + remove_columns=column_names, + load_from_cache_file=None, + desc="Running tokenizer on validation dataset", + ) + +label_pad_token_id = -100 if ignore_pad_token_for_loss else tokenizer.pad_token_id + +data_collator = DataCollatorForSeq2Seq( + tokenizer, + model=model, + label_pad_token_id=label_pad_token_id, + pad_to_multiple_of=32, + ) + +training_args = Seq2SeqTrainingArguments( + output_dir=logs_dir, + num_train_epochs=numepochs, + logging_dir=logs_dir, + predict_with_generate=True, + per_device_train_batch_size=train_batch_size, + per_device_eval_batch_size=32, + logging_steps=10000000, + save_steps=1000000 + ) + +trainer = Seq2SeqTrainer( + model=model, + args=training_args, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + tokenizer=tokenizer, + data_collator=data_collator +) + +train_result = trainer.train(resume_from_checkpoint=None) +trainer.save_model(output_dir=save_weights) + + +test_batches = len(test_dataset) // test_batch_size + 1 +predictions = [] +for i in tqdm(range(test_batches)): + start = i * test_batch_size + end = min((i + 1) * test_batch_size, len(test_dataset)) + output_preds = model.generate( + torch.LongTensor(test_dataset["input_ids"][start : end]).to(device), + max_length=max_target_length, + num_beams=None + ) + output_preds = tokenizer.batch_decode( + output_preds, skip_special_tokens=True, clean_up_tokenization_spaces=True + ) + predictions += output_preds + +predictions = [pred.strip() for pred in predictions] + +test_df = pd.read_csv(test_file) +references = test_df['response'].tolist() + +emotion_dict = {'amusement' : 0, + 'anger' : 1, + 'awe' : 2, + 'contentment' : 3, + 'disgust' : 4, + 'excitement' : 5, + 'fear' : 6, + 'sadness' : 7, + 'neutral' : 8} + +references_emo = [] +references_expl = [] +for i, ref in enumerate(references): + emo = ref.split()[0] + expl = " ".join(ref.split()[2:]) + references_emo.append(int(emotion_dict[emo.strip()])) + references_expl.append(expl) + +predictions_emo = [] +predictions_expl = [] +for i, ref in enumerate(predictions): + if(predictions[i]): + emo = ref.split()[0] + expl = " ".join(ref.split()[2:]) + if(emo.strip() not in emotion_dict): + predictions_emo.append(int((references_emo[i] + 1) % len(emotion_dict))) + else: + predictions_emo.append(int(emotion_dict[emo.strip()])) + predictions_expl.append(expl) + else: + predictions_emo.append(int((references_emo[i] + 1) % len(emotion_dict))) + predictions_expl.append('') + +precision, recall, f1, _ = precision_recall_fscore_support(references_emo, predictions_emo, average='weighted') +acc = accuracy_score(references_emo, predictions_emo) * 100 +f1 = f1 * 100 + +print("Accuracy {} and F1 {} ".format(acc, f1)) + +bleu = evaluate.load("bleu") +bleu_results = bleu.compute(predictions=predictions_expl, references=references_expl, tokenizer=word_tokenize) +print("BLEU scores: {} ".format(bleu_results)) + +meteor = evaluate.load("meteor") +meteor_results = meteor.compute(predictions=predictions_expl, references=references_expl) +print("Meteor scores: {} ".format(meteor_results)) + +rouge = evaluate.load("rouge") +rouge_results = rouge.compute(predictions=predictions_expl, references=references_expl) +print("ROUGE scores: {} ".format(rouge_results)) + +bertscore = evaluate.load("bertscore") +bertscore_results = bertscore.compute(predictions=predictions_expl, references=references_expl, lang="en") +bertscore_results = sum(bertscore_results['recall']) / len(predictions) +print("BERTScore: {} ".format(bertscore_results)) + +bart_scorer = BARTScorer(device='cuda:0', checkpoint='facebook/bart-large-cnn') +bart_scorer.load(path='bart_score.pth') +bartscore_results = bart_scorer.score(predictions_expl, references_expl, batch_size=4) +bartscore_results = sum(bartscore_results) / len(bartscore_results) +print("BARTScore: {} ".format(bartscore_results)) + +save_res = os.path.join(output_dir, 'weights', savename, task, str(numepochs), + str(max_source_length) + '_' + str(max_target_length), 'results.txt') +with open(save_res, 'w', encoding='utf-8') as f: + for sen in predictions_expl: + f.write("{}\n".format(sen)) + +all_metrics = {} +all_metrics['accuracy'] = acc +all_metrics['f1-weighted'] = f1 +all_metrics['bleu-1'] = bleu_results['precisions'][0] +all_metrics['bleu-2'] = bleu_results['precisions'][1] +all_metrics['bleu-3'] = bleu_results['precisions'][2] +all_metrics['bleu-4'] = bleu_results['precisions'][3] +all_metrics['avg-bleu'] = bleu_results['bleu'] +all_metrics['rouge'] = rouge_results['rougeL'] +all_metrics['meteor'] = meteor_results['meteor'] +all_metrics['bert-score'] = bertscore_results +all_metrics['bart-score'] = bartscore_results + +save_res_file = os.path.join(output_dir, 'weights', savename, task, str(numepochs), + str(max_source_length) + '_' + str(max_target_length), 'metrics.json') + +with open(save_res_file, 'w') as f: + json.dump(all_metrics, f) diff --git a/baselines/nlxgpt/README.md b/baselines/nlxgpt/README.md new file mode 100644 index 0000000..acc28f5 --- /dev/null +++ b/baselines/nlxgpt/README.md @@ -0,0 +1,38 @@ +# affect-visdial + +# code of adapting nlx-gpt for affective visdial + +## Requirements + ++ Setup + +``` +conda env create -f environment.yaml +``` + ++ GPU + +Our experiments are conducted on 4 NVIDIA V100 GPUs with `accelerater`. + +## Train + +First download [NLX-GPT pretrained model](https://drive.google.com/drive/folders/1Bfc__0HRzYPyvRe0Ur_oSbhO8dSavT4e?usp=sharing) and put in it /pretrained + +``` +# questioner w/o visual +accelerate launch train.py --dialog --visual_backbone --ckpt_path=path/to/ckpt +# questioner w/ visual +accelerate launch train.py --dialog --ckpt_path=path/to/ckpt +# answerer +accelerate launch train.py --dialog --answerer --visual_backbone --ckpt_path=path/to/ckpt +``` + +## Inference + +``` +accelerate launch eval.py --ckpt_path=path/to/ckpt --load_from_epoch epoch +``` + +## Acknowledgments + +The baseline code adopts from https://github.com/fawazsammani/nlxgpt \ No newline at end of file diff --git a/baselines/nlxgpt/environment.yml b/baselines/nlxgpt/environment.yml new file mode 100644 index 0000000..9db0fea --- /dev/null +++ b/baselines/nlxgpt/environment.yml @@ -0,0 +1,55 @@ +name: visdial +channels: + - pytorch + - conda-forge + - defaults +dependencies: + - pip: + - accelerate==0.14.0.dev0 + - aiohttp==3.8.3 + - aiosignal==1.2.0 + - async-timeout==4.0.2 + - attrs==22.1.0 + - bert-score==0.3.7 + - click==8.1.3 + - clip==1.0 + - contourpy==1.0.5 + - cycler==0.11.0 + - datasets==2.6.1 + - dill==0.3.5.1 + - evaluate==0.3.0 + - filelock==3.8.0 + - fonttools==4.37.4 + - frozenlist==1.3.1 + - fsspec==2022.10.0 + - ftfy==6.1.1 + - huggingface-hub==0.10.0 + - inexactsearch==1.0.2 + - joblib==1.2.0 + - kiwisolver==1.4.4 + - matplotlib==3.6.1 + - multidict==6.0.2 + - multiprocess==0.70.13 + - nltk==3.7 + - packaging==21.3 + - pandas==1.5.0 + - psutil==5.9.2 + - pyarrow==10.0.0 + - pyparsing==3.0.9 + - pyspellchecker==0.7.0 + - python-dateutil==2.8.2 + - pytz==2022.4 + - pyyaml==6.0 + - regex==2022.9.13 + - responses==0.18.0 + - scikit-learn==1.1.3 + - silpa-common==0.3 + - soundex==1.1.3 + - spellchecker==0.4 + - threadpoolctl==3.1.0 + - tokenizers==0.12.1 + - tqdm==4.64.1 + - transformers==4.22.2 + - wcwidth==0.2.5 + - xxhash==3.1.0 + - yarl==1.8.1 diff --git a/baselines/nlxgpt/eval.py b/baselines/nlxgpt/eval.py new file mode 100644 index 0000000..614a470 --- /dev/null +++ b/baselines/nlxgpt/eval.py @@ -0,0 +1,172 @@ +import argparse +import os +import json +from nltk.translate.bleu_score import sentence_bleu +import numpy as np +import torch +from transformers import BartTokenizer, BartForConditionalGeneration +import clip +import traceback +from typing import List +import torch.nn as nn +from evaluate import load +from sklearn.metrics import f1_score + +emotions = ['excitement', 'sadness', 'anger', 'contentment', 'something else', 'disgust', 'fear', 'amusement', 'awe'] + +class BARTScorer: + def __init__(self, device='cuda', max_length=1024, checkpoint='facebook/bart-large-cnn'): + # Set up model + self.device = device + self.max_length = max_length + self.tokenizer = BartTokenizer.from_pretrained(checkpoint) + self.model = BartForConditionalGeneration.from_pretrained(checkpoint) + self.model.eval() + self.model.to(device) + + # Set up loss + self.loss_fct = nn.NLLLoss(reduction='none', ignore_index=self.model.config.pad_token_id) + self.lsm = nn.LogSoftmax(dim=1) + + def load(self, path=None): + """ Load model from paraphrase finetuning """ + if path is None: + path = 'models/bart.pth' + self.model.load_state_dict(torch.load(path, map_location=self.device)) + + def score(self, srcs, tgts, batch_size=4): + """ Score a batch of examples """ + score_list = [] + for i in range(0, len(srcs), batch_size): + src_list = srcs[i: i + batch_size] + tgt_list = tgts[i: i + batch_size] + try: + with torch.no_grad(): + encoded_src = self.tokenizer( + src_list, + max_length=self.max_length, + truncation=True, + padding=True, + return_tensors='pt' + ) + encoded_tgt = self.tokenizer( + tgt_list, + max_length=self.max_length, + truncation=True, + padding=True, + return_tensors='pt' + ) + src_tokens = encoded_src['input_ids'].to(self.device) + src_mask = encoded_src['attention_mask'].to(self.device) + + tgt_tokens = encoded_tgt['input_ids'].to(self.device) + tgt_mask = encoded_tgt['attention_mask'] + tgt_len = tgt_mask.sum(dim=1).to(self.device) + + output = self.model( + input_ids=src_tokens, + attention_mask=src_mask, + labels=tgt_tokens + ) + logits = output.logits.view(-1, self.model.config.vocab_size) + loss = self.loss_fct(self.lsm(logits), tgt_tokens.view(-1)) + loss = loss.view(tgt_tokens.shape[0], -1) + loss = loss.sum(dim=1) / tgt_len + curr_score_list = [-x.item() for x in loss] + score_list += curr_score_list + + except RuntimeError: + traceback.print_exc() + print(f'source: {src_list}') + print(f'target: {tgt_list}') + exit(0) + return score_list + + def multi_ref_score(self, srcs, tgts: List[List[str]], agg="mean", batch_size=4): + # Assert we have the same number of references + ref_nums = [len(x) for x in tgts] + if len(set(ref_nums)) > 1: + raise Exception("You have different number of references per test sample.") + + ref_num = len(tgts[0]) + score_matrix = [] + for i in range(ref_num): + curr_tgts = [x[i] for x in tgts] + scores = self.score(srcs, curr_tgts, batch_size) + score_matrix.append(scores) + if agg == "mean": + score_list = np.mean(score_matrix, axis=0) + elif agg == "max": + score_list = np.max(score_matrix, axis=0) + else: + raise NotImplementedError + return list(score_list) + +def evaluate_all(results_pred, results_gt): + bertscore = load("bertscore") + count = 0 + bleus1 = [] + bleus2 = [] + bleus3 = [] + bleus4 = [] + barts = [] + bert_precision = [] + bert_recall = [] + bert_f1 = [] + emotion_true = [] + emotion_pred = [] + + for idx, pred in enumerate(results_pred): + emotion = pred['caption'].split("because")[0].strip() + exp = pred['caption'].split("because")[-1].strip() + emotion_gt = results_gt[idx]['gt'].split("because")[0].strip() + exp_gt = results_gt[idx]['gt'].split("because")[-1].strip() + + with torch.no_grad(): + bart_scorer = BARTScorer(device='cuda:0', checkpoint='facebook/bart-large-cnn') + bart = bart_scorer.score(exp, exp_gt) + barts.append(np.mean(bart)) + results = bertscore.compute(predictions=[pred['caption']], references=[results_gt[idx]['gt']], model_type="distilbert-base-uncased") + bert_precision.append(results['precision']) + bert_recall.append(results['recall']) + bert_f1.append(results['f1']) + + + emotion_true.append(emotions.index(emotion_gt)) + emotion_pred.append(emotions.index(emotion)) + + bleus1.append(sentence_bleu([exp_gt.split()], exp.split(), weights=(1,0,0,0))) + bleus2.append(sentence_bleu([exp_gt.split()], exp.split(), weights=(0.5,0.5,0,0))) + bleus3.append(sentence_bleu([exp_gt.split()], exp.split(), weights=(0.33,0.33,0.33,0))) + bleus4.append(sentence_bleu([exp_gt.split()], exp.split())) + + print('blue1: {:.2f}'.format(np.mean(bleus1)),flush=True) + print('blue2: {:.2f}'.format(np.mean(bleus2)),flush=True) + print('blue3: {:.2f}'.format(np.mean(bleus3)),flush=True) + print('blue4: {:.2f}'.format(np.mean(bleus4)),flush=True) + print('bart score: {:.2f}'.format(np.mean(barts)),flush=True) + print('bert_precision: {:.2f}'.format(np.mean(bert_precision)),flush=True) + print('bert_recall: {:.2f}'.format(np.mean(bert_recall)),flush=True) + print('bert_f1: {:.2f}'.format(np.mean(bert_f1)),flush=True) + print(f1_score(emotion_true, emotion_pred, average='weighted'),flush=True) + +def parse_option(): + parser = argparse.ArgumentParser('NLX-GPT') + + parser.add_argument('--ckpt_path', type=str, default='finetune') + parser.add_argument('--load_from_epoch', type=int, default=0) + args = parser.parse_args() + return args + +device = 'cuda' +args = parse_option() +path = args.ckpt_path+'/results/' +if not os.path.exists(path): + os.mkdir(path) +f_pred = path + str(args.load_from_epoch) + '_pred.json' +with open(f_pred, 'r') as w: + results_full = json.load(w) +f_gt = path + str(args.load_from_epoch) + '_gt.json' +with open(f_gt, 'r') as w: + results_gt = json.load(w) +evaluate_all(results_full, results_gt) \ No newline at end of file diff --git a/baselines/nlxgpt/models/__pycache__/clip_vit.cpython-38.pyc b/baselines/nlxgpt/models/__pycache__/clip_vit.cpython-38.pyc new file mode 100644 index 0000000..70db201 Binary files /dev/null and b/baselines/nlxgpt/models/__pycache__/clip_vit.cpython-38.pyc differ diff --git a/baselines/nlxgpt/models/__pycache__/gpt.cpython-38.pyc b/baselines/nlxgpt/models/__pycache__/gpt.cpython-38.pyc new file mode 100644 index 0000000..c78d09f Binary files /dev/null and b/baselines/nlxgpt/models/__pycache__/gpt.cpython-38.pyc differ diff --git a/baselines/nlxgpt/models/clip_vit.py b/baselines/nlxgpt/models/clip_vit.py new file mode 100644 index 0000000..c7d4da6 --- /dev/null +++ b/baselines/nlxgpt/models/clip_vit.py @@ -0,0 +1,28 @@ +import clip +import torch +import torch.nn as nn + +class ImageEncoder(nn.Module): + + def __init__(self, device): + super(ImageEncoder, self).__init__() + self.encoder, _ = clip.load("ViT-B/16", device=device) # loads already in eval mode + + def forward(self, x): + """ + Expects a tensor of size (batch_size, 3, 224, 224) + """ + with torch.no_grad(): + x = x.type(self.encoder.visual.conv1.weight.dtype) + x = self.encoder.visual.conv1(x) # shape = [*, width, grid, grid] + x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] + x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] + x = torch.cat([self.encoder.visual.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.encoder.visual.positional_embedding.to(x.dtype) + x = self.encoder.visual.ln_pre(x) + x = x.permute(1, 0, 2) # NLD -> LND + x = self.encoder.visual.transformer(x) + grid_feats = x.permute(1, 0, 2) # LND -> NLD (N, 197, 768) + grid_feats = self.encoder.visual.ln_post(grid_feats[:,1:]) + + return grid_feats.float() diff --git a/baselines/nlxgpt/models/gpt.py b/baselines/nlxgpt/models/gpt.py new file mode 100644 index 0000000..1d2242a --- /dev/null +++ b/baselines/nlxgpt/models/gpt.py @@ -0,0 +1,517 @@ +import torch +import torch.utils.checkpoint +from torch import nn +import torch.nn.functional as F +from torch.nn import CrossEntropyLoss +from transformers import GPT2PreTrainedModel +from transformers.activations import ACT2FN +from transformers.modeling_outputs import (BaseModelOutputWithPastAndCrossAttentions,CausalLMOutputWithCrossAttentions,) +from transformers.modeling_utils import Conv1D +from typing import Tuple + + +class GPT2Attention(nn.Module): + def __init__(self, config, is_cross_attention=False): + super().__init__() + + max_positions = config.max_position_embeddings + self.register_buffer( + "bias", + torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view( + 1, 1, max_positions, max_positions + ), + ) + self.register_buffer("masked_bias", torch.tensor(-1e4)) + + self.embed_dim = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.embed_dim // self.num_heads + self.split_size = self.embed_dim + if self.head_dim * self.num_heads != self.embed_dim: + raise ValueError( + f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})." + ) + + self.scale_attn_weights = config.scale_attn_weights + self.is_cross_attention = is_cross_attention + + if self.is_cross_attention: + self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim) + self.q_attn = Conv1D(self.embed_dim, self.embed_dim) + else: + self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim) + self.c_proj = Conv1D(self.embed_dim, self.embed_dim) + + self.attn_dropout = nn.Dropout(config.attn_pdrop) + self.resid_dropout = nn.Dropout(config.resid_pdrop) + + + def _attn(self, query, key, value, attention_mask=None, head_mask=None): + attn_weights = torch.matmul(query, key.transpose(-1, -2)) + + if self.scale_attn_weights: + attn_weights = attn_weights / (float(value.size(-1)) ** 0.5) + + if not self.is_cross_attention: + # if only "normal" attention layer implements causal mask + query_length, key_length = query.size(-2), key.size(-2) + causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].bool() + attn_weights = torch.where(causal_mask, attn_weights, self.masked_bias.to(attn_weights.dtype)) + + if attention_mask is not None: + # Apply the attention mask + attn_weights = attn_weights + attention_mask + + attn_weights = nn.Softmax(dim=-1)(attn_weights) + attn_weights = self.attn_dropout(attn_weights) + + # Mask heads if we want to + if head_mask is not None: + attn_weights = attn_weights * head_mask + + attn_output = torch.matmul(attn_weights, value) + + return attn_output, attn_weights + + def _split_heads(self, tensor, num_heads, attn_head_size): + """ + Splits hidden_size dim into attn_head_size and num_heads + """ + new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) + tensor = tensor.view(*new_shape) + return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features) + + def _merge_heads(self, tensor, num_heads, attn_head_size): + """ + Merges attn_head_size dim and num_attn_heads dim into hidden_size + """ + tensor = tensor.permute(0, 2, 1, 3).contiguous() + new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,) + return tensor.view(new_shape) + + def forward( + self, + hidden_states, + layer_past=None, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + use_cache=False, + output_attentions=False, + ): + if encoder_hidden_states is not None: + if not hasattr(self, "q_attn"): + raise ValueError( + "If class is used as cross attention, the weights `q_attn` have to be defined. " + "Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`." + ) + + query = self.q_attn(hidden_states) + key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2) + attention_mask = encoder_attention_mask + else: + query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2) + + query = self._split_heads(query, self.num_heads, self.head_dim) + key = self._split_heads(key, self.num_heads, self.head_dim) + value = self._split_heads(value, self.num_heads, self.head_dim) + + if layer_past is not None: + past_key, past_value = layer_past + key = torch.cat((past_key, key), dim=-2) + value = torch.cat((past_value, value), dim=-2) + + if use_cache is True: + present = (key, value) + else: + present = None + + attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) + attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) + attn_output = self.c_proj(attn_output) + attn_output = self.resid_dropout(attn_output) + + outputs = (attn_output, present) + if output_attentions: + outputs += (attn_weights,) + + return outputs # a, present, (attentions) + + +class GPT2MLP(nn.Module): + def __init__(self, intermediate_size, config): + super().__init__() + embed_dim = config.hidden_size + self.c_fc = Conv1D(intermediate_size, embed_dim) + self.c_proj = Conv1D(embed_dim, intermediate_size) + self.act = ACT2FN[config.activation_function] + self.dropout = nn.Dropout(config.resid_pdrop) + + def forward(self, hidden_states): + hidden_states = self.c_fc(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.c_proj(hidden_states) + hidden_states = self.dropout(hidden_states) + return hidden_states + + +class GPT2Block(nn.Module): + def __init__(self, config): + super().__init__() + hidden_size = config.hidden_size + inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size + + self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + self.attn = GPT2Attention(config) + self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + + if config.add_cross_attention: + self.crossattention = GPT2Attention(config, is_cross_attention=True) + self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + + self.mlp = GPT2MLP(inner_dim, config) + + def forward( + self, + hidden_states, + layer_past=None, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + use_cache=False, + output_attentions=False, + ): + residual = hidden_states + hidden_states = self.ln_1(hidden_states) + attn_outputs = self.attn( + hidden_states, + layer_past=layer_past, + attention_mask=attention_mask, + head_mask=head_mask, + use_cache=use_cache, + output_attentions=output_attentions, + ) + attn_output = attn_outputs[0] # output_attn: a, present, (attentions) + outputs = attn_outputs[1:] + # residual connection + hidden_states = attn_output + residual + + if encoder_hidden_states is not None: + # add one self-attention block for cross-attention + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " + "cross-attention layers by setting `config.add_cross_attention=True`" + ) + residual = hidden_states + hidden_states = self.ln_cross_attn(hidden_states) + cross_attn_outputs = self.crossattention( + hidden_states, + attention_mask=attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + ) + attn_output = cross_attn_outputs[0] + # residual connection + hidden_states = residual + attn_output + outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights + + residual = hidden_states + hidden_states = self.ln_2(hidden_states) + feed_forward_hidden_states = self.mlp(hidden_states) + # residual connection + hidden_states = residual + feed_forward_hidden_states + + if use_cache: + outputs = (hidden_states,) + outputs + else: + outputs = (hidden_states,) + outputs[1:] + + return outputs # hidden_states, present, (attentions, cross_attentions) + + + +class GPT2Model(GPT2PreTrainedModel): + _keys_to_ignore_on_load_missing = ["attn.masked_bias"] + + def __init__(self, config): + super().__init__(config) + + self.embed_dim = config.hidden_size + + self.wte = nn.Embedding(config.vocab_size, self.embed_dim) + self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) + + self.drop = nn.Dropout(config.embd_pdrop) + self.h = nn.ModuleList([GPT2Block(config) for _ in range(config.num_hidden_layers)]) + self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) + + self.init_weights() + + + def get_input_embeddings(self): + return self.wte + + def set_input_embeddings(self, new_embeddings): + self.wte = new_embeddings + + def forward( + self, + input_ids=None, + past_key_values=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + batch_size = input_ids.shape[0] + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + batch_size = inputs_embeds.shape[0] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + device = input_ids.device if input_ids is not None else inputs_embeds.device + + if token_type_ids is not None: + token_type_ids = token_type_ids.view(-1, input_shape[-1]) + if position_ids is not None: + position_ids = position_ids.view(-1, input_shape[-1]) + + if past_key_values is None: + past_length = 0 + past_key_values = tuple([None] * len(self.h)) + else: + past_length = past_key_values[0][0].size(-2) + if position_ids is None: + position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) + position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) + + # GPT2Attention mask. + if attention_mask is not None: + assert batch_size > 0, "batch_size has to be defined and > 0" + attention_mask = attention_mask.view(batch_size, -1) + attention_mask = attention_mask[:, None, None, :] + attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility + attention_mask = (1.0 - attention_mask) * -10000.0 + + # If a 2D ou 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if self.config.add_cross_attention and encoder_hidden_states is not None: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + if encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # head_mask has shape n_layer x batch x n_heads x N x N + head_mask = self.get_head_mask(head_mask, self.config.n_layer) + + if inputs_embeds is None: + inputs_embeds = self.wte(input_ids) + position_embeds = self.wpe(position_ids) + hidden_states = inputs_embeds + position_embeds + + if token_type_ids is not None: + token_type_embeds = self.wte(token_type_ids) + hidden_states = hidden_states + token_type_embeds + + hidden_states = self.drop(hidden_states) + + output_shape = input_shape + (hidden_states.size(-1),) + + presents = () if use_cache else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + all_hidden_states = () if output_hidden_states else None + for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + outputs = block( + hidden_states, + layer_past=layer_past, + attention_mask=attention_mask, + head_mask=head_mask[i], + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + use_cache=use_cache, + output_attentions=output_attentions, + ) + + hidden_states = outputs[0] + if use_cache is True: + presents = presents + (outputs[1],) + + if output_attentions: + all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],) + + + hidden_states = self.ln_f(hidden_states) + + hidden_states = hidden_states.view(*output_shape) + # Add last hidden state + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) + + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=presents, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + + +class GPT2LMHeadModel(GPT2PreTrainedModel): + _keys_to_ignore_on_load_missing = [r"attn.masked_bias", r"attn.bias", r"lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.transformer = GPT2Model(config) + self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) + + self.init_weights() + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs): + token_type_ids = kwargs.get("token_type_ids", None) + # only last token for inputs_ids if past is defined in kwargs + if past: + input_ids = input_ids[:, -1].unsqueeze(-1) + if token_type_ids is not None: + token_type_ids = token_type_ids[:, -1].unsqueeze(-1) + + attention_mask = kwargs.get("attention_mask", None) + position_ids = kwargs.get("position_ids", None) + + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past: + position_ids = position_ids[:, -1].unsqueeze(-1) + else: + position_ids = None + return { + "input_ids": input_ids, + "past_key_values": past, + "use_cache": kwargs.get("use_cache"), + "position_ids": position_ids, + "attention_mask": attention_mask, + "token_type_ids": token_type_ids, + } + + + def forward( + self, + input_ids=None, + past_key_values=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.transformer( + input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + + lm_logits = self.lm_head(hidden_states) + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = lm_logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) + + if not return_dict: + output = (lm_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=loss, + logits=lm_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + cross_attentions=transformer_outputs.cross_attentions, + ) + + @staticmethod + def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]: + """ + This function is used to re-order the :obj:`past_key_values` cache if + :meth:`~transformers.PreTrainedModel.beam_search` or :meth:`~transformers.PreTrainedModel.beam_sample` is + called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step. + """ + return tuple( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) + for layer_past in past + ) diff --git a/baselines/nlxgpt/models/gpt_vcr.py b/baselines/nlxgpt/models/gpt_vcr.py new file mode 100644 index 0000000..8e60944 --- /dev/null +++ b/baselines/nlxgpt/models/gpt_vcr.py @@ -0,0 +1,529 @@ +import torch +import torch.nn as nn +from torch.nn import CrossEntropyLoss +from transformers import GPT2PreTrainedModel +from transformers.activations import ACT2FN +from transformers.modeling_outputs import (BaseModelOutputWithPastAndCrossAttentions,CausalLMOutputWithCrossAttentions,) +from transformers.modeling_utils import Conv1D +from typing import Tuple + + +class GPT2Attention(nn.Module): + def __init__(self, config, is_cross_attention=False): + super().__init__() + + max_positions = config.max_position_embeddings + self.register_buffer( + "bias", + torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view( + 1, 1, max_positions, max_positions + ), + ) + self.register_buffer("masked_bias", torch.tensor(-1e4)) + + self.embed_dim = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.embed_dim // self.num_heads + self.split_size = self.embed_dim + if self.head_dim * self.num_heads != self.embed_dim: + raise ValueError( + f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})." + ) + + self.scale_attn_weights = config.scale_attn_weights + self.is_cross_attention = is_cross_attention + + if self.is_cross_attention: + self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim) + self.q_attn = Conv1D(self.embed_dim, self.embed_dim) + else: + self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim) + self.c_proj = Conv1D(self.embed_dim, self.embed_dim) + + self.attn_dropout = nn.Dropout(config.attn_pdrop) + self.resid_dropout = nn.Dropout(config.resid_pdrop) + + + def _attn(self, query, key, value, attention_mask=None, head_mask=None): + attn_weights = torch.matmul(query, key.transpose(-1, -2)) + + if self.scale_attn_weights: + attn_weights = attn_weights / (float(value.size(-1)) ** 0.5) + + if not self.is_cross_attention: + # if only "normal" attention layer implements causal mask + query_length, key_length = query.size(-2), key.size(-2) + causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].bool() + attn_weights = torch.where(causal_mask, attn_weights, self.masked_bias.to(attn_weights.dtype)) + + if attention_mask is not None: + # Apply the attention mask + attn_weights = attn_weights + attention_mask + + attn_weights = nn.Softmax(dim=-1)(attn_weights) + attn_weights = self.attn_dropout(attn_weights) + + # Mask heads if we want to + if head_mask is not None: + attn_weights = attn_weights * head_mask + + attn_output = torch.matmul(attn_weights, value) + + return attn_output, attn_weights + + def _split_heads(self, tensor, num_heads, attn_head_size): + """ + Splits hidden_size dim into attn_head_size and num_heads + """ + new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) + tensor = tensor.view(*new_shape) + return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features) + + def _merge_heads(self, tensor, num_heads, attn_head_size): + """ + Merges attn_head_size dim and num_attn_heads dim into hidden_size + """ + tensor = tensor.permute(0, 2, 1, 3).contiguous() + new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,) + return tensor.view(new_shape) + + def forward( + self, + hidden_states, + layer_past=None, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + use_cache=False, + output_attentions=False, + ): + if encoder_hidden_states is not None: + if not hasattr(self, "q_attn"): + raise ValueError( + "If class is used as cross attention, the weights `q_attn` have to be defined. " + "Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`." + ) + + query = self.q_attn(hidden_states) + key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2) + attention_mask = encoder_attention_mask + else: + query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2) + + query = self._split_heads(query, self.num_heads, self.head_dim) + key = self._split_heads(key, self.num_heads, self.head_dim) + value = self._split_heads(value, self.num_heads, self.head_dim) + + if layer_past is not None: + past_key, past_value = layer_past + key = torch.cat((past_key, key), dim=-2) + value = torch.cat((past_value, value), dim=-2) + + if use_cache is True: + present = (key, value) + else: + present = None + + attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) + attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) + attn_output = self.c_proj(attn_output) + attn_output = self.resid_dropout(attn_output) + + outputs = (attn_output, present) + if output_attentions: + outputs += (attn_weights,) + + return outputs # a, present, (attentions) + + +class GPT2MLP(nn.Module): + def __init__(self, intermediate_size, config): + super().__init__() + embed_dim = config.hidden_size + self.c_fc = Conv1D(intermediate_size, embed_dim) + self.c_proj = Conv1D(embed_dim, intermediate_size) + self.act = ACT2FN[config.activation_function] + self.dropout = nn.Dropout(config.resid_pdrop) + + def forward(self, hidden_states): + hidden_states = self.c_fc(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.c_proj(hidden_states) + hidden_states = self.dropout(hidden_states) + return hidden_states + + +class GPT2Block(nn.Module): + def __init__(self, config): + super().__init__() + hidden_size = config.hidden_size + inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size + + self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + self.attn = GPT2Attention(config) + self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + + if config.add_cross_attention: + self.crossattention = GPT2Attention(config, is_cross_attention=True) + self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + + self.mlp = GPT2MLP(inner_dim, config) + + def forward( + self, + hidden_states, + layer_past=None, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + use_cache=False, + output_attentions=False, + ): + residual = hidden_states + hidden_states = self.ln_1(hidden_states) + attn_outputs = self.attn( + hidden_states, + layer_past=layer_past, + attention_mask=attention_mask, + head_mask=head_mask, + use_cache=use_cache, + output_attentions=output_attentions, + ) + attn_output = attn_outputs[0] # output_attn: a, present, (attentions) + outputs = attn_outputs[1:] + # residual connection + hidden_states = attn_output + residual + + if encoder_hidden_states is not None: + # add one self-attention block for cross-attention + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " + "cross-attention layers by setting `config.add_cross_attention=True`" + ) + residual = hidden_states + hidden_states = self.ln_cross_attn(hidden_states) + cross_attn_outputs = self.crossattention( + hidden_states, + attention_mask=attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + ) + attn_output = cross_attn_outputs[0] + # residual connection + hidden_states = residual + attn_output + outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights + + residual = hidden_states + hidden_states = self.ln_2(hidden_states) + feed_forward_hidden_states = self.mlp(hidden_states) + # residual connection + hidden_states = residual + feed_forward_hidden_states + + if use_cache: + outputs = (hidden_states,) + outputs + else: + outputs = (hidden_states,) + outputs[1:] + + return outputs # hidden_states, present, (attentions, cross_attentions) + + + +class GPT2Model(GPT2PreTrainedModel): + _keys_to_ignore_on_load_missing = ["attn.masked_bias"] + + def __init__(self, config): + super().__init__(config) + + self.embed_dim = config.hidden_size + + self.wte = nn.Embedding(config.vocab_size, self.embed_dim) + self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) + self.box_embed = nn.Linear(8, self.embed_dim) + + self.drop = nn.Dropout(config.embd_pdrop) + self.h = nn.ModuleList([GPT2Block(config) for _ in range(config.num_hidden_layers)]) + self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) + + self.init_weights() + + + def get_input_embeddings(self): + return self.wte + + def set_input_embeddings(self, new_embeddings): + self.wte = new_embeddings + + def forward( + self, + input_ids=None, + past_key_values=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + boxes = None, + box_numbers = None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + batch_size = input_ids.shape[0] + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + batch_size = inputs_embeds.shape[0] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + device = input_ids.device if input_ids is not None else inputs_embeds.device + + if token_type_ids is not None: + token_type_ids = token_type_ids.view(-1, input_shape[-1]) + if position_ids is not None: + position_ids = position_ids.view(-1, input_shape[-1]) + + if past_key_values is None: + past_length = 0 + past_key_values = tuple([None] * len(self.h)) + else: + past_length = past_key_values[0][0].size(-2) + if position_ids is None: + position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) + position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) + + # GPT2Attention mask. + if attention_mask is not None: + assert batch_size > 0, "batch_size has to be defined and > 0" + attention_mask = attention_mask.view(batch_size, -1) + attention_mask = attention_mask[:, None, None, :] + attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility + attention_mask = (1.0 - attention_mask) * -10000.0 + + # If a 2D ou 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if self.config.add_cross_attention and encoder_hidden_states is not None: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + if encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # head_mask has shape n_layer x batch x n_heads x N x N + head_mask = self.get_head_mask(head_mask, self.config.n_layer) + + if inputs_embeds is None: + inputs_embeds = self.wte(input_ids) + + position_embeds = self.wpe(position_ids) + hidden_states = inputs_embeds + position_embeds + + if token_type_ids is not None: + token_type_embeds = self.wte(token_type_ids) + hidden_states = hidden_states + token_type_embeds + + box_numbers = self.wte(box_numbers) + hidden_states = hidden_states + box_numbers + + box_states = self.box_embed(boxes) * (boxes.sum(2) != 0).float().unsqueeze(-1) + hidden_states = hidden_states + box_states + + hidden_states = self.drop(hidden_states) + + output_shape = input_shape + (hidden_states.size(-1),) + + presents = () if use_cache else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + all_hidden_states = () if output_hidden_states else None + for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + outputs = block( + hidden_states, + layer_past=layer_past, + attention_mask=attention_mask, + head_mask=head_mask[i], + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + use_cache=use_cache, + output_attentions=output_attentions, + ) + + hidden_states = outputs[0] + if use_cache is True: + presents = presents + (outputs[1],) + + if output_attentions: + all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],) + + + hidden_states = self.ln_f(hidden_states) + + hidden_states = hidden_states.view(*output_shape) + # Add last hidden state + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) + + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=presents, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + + +class GPT2LMHeadModel(GPT2PreTrainedModel): + _keys_to_ignore_on_load_missing = [r"attn.masked_bias", r"attn.bias", r"lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.transformer = GPT2Model(config) + self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) + + self.init_weights() + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs): + token_type_ids = kwargs.get("token_type_ids", None) + # only last token for inputs_ids if past is defined in kwargs + if past: + input_ids = input_ids[:, -1].unsqueeze(-1) + if token_type_ids is not None: + token_type_ids = token_type_ids[:, -1].unsqueeze(-1) + + attention_mask = kwargs.get("attention_mask", None) + position_ids = kwargs.get("position_ids", None) + + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past: + position_ids = position_ids[:, -1].unsqueeze(-1) + else: + position_ids = None + return { + "input_ids": input_ids, + "past_key_values": past, + "use_cache": kwargs.get("use_cache"), + "position_ids": position_ids, + "attention_mask": attention_mask, + "token_type_ids": token_type_ids, + } + + + def forward( + self, + input_ids=None, + past_key_values=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + boxes = None, + box_numbers = None, + labels=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.transformer( + input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + boxes = boxes, + box_numbers = box_numbers, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + + lm_logits = self.lm_head(hidden_states) + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = lm_logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) + + if not return_dict: + output = (lm_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=loss, + logits=lm_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + cross_attentions=transformer_outputs.cross_attentions, + ) + + @staticmethod + def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]: + """ + This function is used to re-order the :obj:`past_key_values` cache if + :meth:`~transformers.PreTrainedModel.beam_search` or :meth:`~transformers.PreTrainedModel.beam_sample` is + called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step. + """ + return tuple( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) + for layer_past in past + ) diff --git a/baselines/nlxgpt/train.py b/baselines/nlxgpt/train.py new file mode 100644 index 0000000..f8a0257 --- /dev/null +++ b/baselines/nlxgpt/train.py @@ -0,0 +1,722 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.data +from torch.utils.data import Dataset +import torchvision.transforms as transforms +from transformers import GPT2Tokenizer, AutoConfig +from transformers import AdamW, get_linear_schedule_with_warmup +import json +from PIL import Image +from accelerate import Accelerator +from models.gpt import GPT2LMHeadModel +from models.clip_vit import ImageEncoder +from utils.data_utils import * +from utils.eval_utils import top_filtering +import pickle +import os +import argparse +import time +from nltk.translate.bleu_score import sentence_bleu +import numpy as np +import traceback +from transformers import BartTokenizer, BartForConditionalGeneration +from typing import List +import re +from sklearn.metrics import f1_score +from evaluate import load + +def proc_ques(ques): + words = re.sub(r"([.,'!?\"()*#:;])",'',ques.lower()).replace('-', ' ').replace('/', ' ') + return words + +class BARTScorer: + def __init__(self, device='cuda', max_length=1024, checkpoint='facebook/bart-large-cnn'): + # Set up model + self.device = device + self.max_length = max_length + self.tokenizer = BartTokenizer.from_pretrained(checkpoint) + self.model = BartForConditionalGeneration.from_pretrained(checkpoint) + self.model.eval() + self.model.to(device) + + # Set up loss + self.loss_fct = nn.NLLLoss(reduction='none', ignore_index=self.model.config.pad_token_id) + self.lsm = nn.LogSoftmax(dim=1) + + def load(self, path=None): + """ Load model from paraphrase finetuning """ + if path is None: + path = 'models/bart.pth' + self.model.load_state_dict(torch.load(path, map_location=self.device)) + + def score(self, srcs, tgts, batch_size=4): + """ Score a batch of examples """ + score_list = [] + for i in range(0, len(srcs), batch_size): + src_list = srcs[i: i + batch_size] + tgt_list = tgts[i: i + batch_size] + try: + with torch.no_grad(): + encoded_src = self.tokenizer( + src_list, + max_length=self.max_length, + truncation=True, + padding=True, + return_tensors='pt' + ) + encoded_tgt = self.tokenizer( + tgt_list, + max_length=self.max_length, + truncation=True, + padding=True, + return_tensors='pt' + ) + src_tokens = encoded_src['input_ids'].to(self.device) + src_mask = encoded_src['attention_mask'].to(self.device) + + tgt_tokens = encoded_tgt['input_ids'].to(self.device) + tgt_mask = encoded_tgt['attention_mask'] + tgt_len = tgt_mask.sum(dim=1).to(self.device) + + output = self.model( + input_ids=src_tokens, + attention_mask=src_mask, + labels=tgt_tokens + ) + logits = output.logits.view(-1, self.model.config.vocab_size) + loss = self.loss_fct(self.lsm(logits), tgt_tokens.view(-1)) + loss = loss.view(tgt_tokens.shape[0], -1) + loss = loss.sum(dim=1) / tgt_len + curr_score_list = [-x.item() for x in loss] + score_list += curr_score_list + + except RuntimeError: + traceback.print_exc() + print(f'source: {src_list}') + print(f'target: {tgt_list}') + exit(0) + return score_list + + def multi_ref_score(self, srcs, tgts: List[List[str]], agg="mean", batch_size=4): + # Assert we have the same number of references + ref_nums = [len(x) for x in tgts] + if len(set(ref_nums)) > 1: + raise Exception("You have different number of references per test sample.") + + ref_num = len(tgts[0]) + score_matrix = [] + for i in range(ref_num): + curr_tgts = [x[i] for x in tgts] + scores = self.score(srcs, curr_tgts, batch_size) + score_matrix.append(scores) + if agg == "mean": + score_list = np.mean(score_matrix, axis=0) + elif agg == "max": + score_list = np.max(score_matrix, axis=0) + else: + raise NotImplementedError + return list(score_list) + +def evaluate_all(results_pred, results_gt): + bertscore = load("bertscore") + count = 0 + bleus1 = [] + bleus2 = [] + bleus3 = [] + bleus4 = [] + barts = [] + bert_precision = [] + bert_recall = [] + bert_f1 = [] + emotion_true = [] + emotion_pred = [] + + for idx, pred in enumerate(results_pred): + emotion = pred['caption'].split("because")[0].strip() + exp = pred['caption'].split("because")[-1].strip() + emotion_gt = results_gt[idx]['gt'].split("because")[0].strip() + exp_gt = results_gt[idx]['gt'].split("because")[-1].strip() + + with torch.no_grad(): + bart_scorer = BARTScorer(device='cuda:0', checkpoint='facebook/bart-large-cnn') + bart = bart_scorer.score(exp, exp_gt) + barts.append(np.mean(bart)) + results = bertscore.compute(predictions=[pred['caption']], references=[results_gt[idx]['gt']], model_type="distilbert-base-uncased") + bert_precision.append(results['precision']) + bert_recall.append(results['recall']) + bert_f1.append(results['f1']) + + emotion_true.append(emotions.index(emotion_gt)) + emotion_pred.append(emotions.index(emotion)) + + bleus1.append(sentence_bleu([exp_gt.split()], exp.split(), weights=(1,0,0,0))) + bleus2.append(sentence_bleu([exp_gt.split()], exp.split(), weights=(0.5,0.5,0,0))) + bleus3.append(sentence_bleu([exp_gt.split()], exp.split(), weights=(0.33,0.33,0.33,0))) + bleus4.append(sentence_bleu([exp_gt.split()], exp.split())) + + + # print('{:.2f}%'.format(count/len(results_pred)*100),flush=True) + print('blue1: {:.2f}'.format(np.mean(bleus1)),flush=True) + print('blue2: {:.2f}'.format(np.mean(bleus2)),flush=True) + print('blue3: {:.2f}'.format(np.mean(bleus3)),flush=True) + print('blue4: {:.2f}'.format(np.mean(bleus4)),flush=True) + print('bart score: {:.2f}'.format(np.mean(barts)),flush=True) + print('bert_precision: {:.2f}'.format(np.mean(bert_precision)),flush=True) + print('bert_recall: {:.2f}'.format(np.mean(bert_recall)),flush=True) + print('bert_f1: {:.2f}'.format(np.mean(bert_f1)),flush=True) + print(f1_score(emotion_true, emotion_pred, average='weighted'),flush=True) + +emotions = ['excitement', 'sadness', 'anger', 'contentment', 'something else', 'disgust', 'fear', 'amusement', 'awe'] + +def parse_option(): + parser = argparse.ArgumentParser('NLX-GPT') + + parser.add_argument('--ckpt_path', type=str, default='finetune') + parser.add_argument('--batch_size', type=int, default=32) + parser.add_argument('--freq', type=int, default=10) + parser.add_argument('--visual_backbone', default=False, action="store_true") + parser.add_argument('--load_from_epoch', type=int, default=0) + parser.add_argument('--finetune', default=False, action="store_true") + parser.add_argument('--eval', default=False, action="store_true") + parser.add_argument('--answerer', default=False, action="store_true") + parser.add_argument('--dialog', default=False, action="store_true") + args = parser.parse_args() + return args + +def change_requires_grad(model, req_grad): + for p in model.parameters(): + p.requires_grad = req_grad + + +def load_checkpoint(ckpt_path, epoch): + model_name = 'nle_model_{}'.format(str(epoch)) + tokenizer_name = 'nle_gpt2_tokenizer_0' + filename = 'ckpt_stats_' + str(epoch) + '.tar' + + tokenizer = GPT2Tokenizer.from_pretrained(ckpt_path + '/' + tokenizer_name) # load tokenizer + model = GPT2LMHeadModel.from_pretrained(ckpt_path + '/' + model_name).to(device) # load model with config + opt = torch.load(ckpt_path + '/' + filename) + optimizer = get_optimizer(model, learning_rate) + optimizer.load_state_dict(opt['optimizer_state_dict']) + start_epoch = opt['epoch'] + 1 + scheduler_dic = opt['scheduler'] + del opt + torch.cuda.empty_cache() + + return tokenizer, model, optimizer, scheduler_dic, start_epoch + + +def load_pretrained(): + if args.visual_backbone: + model_path = 'pretrain/nle_model_11_vis' + else: + model_path = 'pretrain/nle_model_11' + tokenizer_path = 'pretrain/nle_gpt2_tokenizer_0' + tokenizer = GPT2Tokenizer.from_pretrained(tokenizer_path) # load tokenizer + model = GPT2LMHeadModel.from_pretrained(model_path).to(device) # load model with config + return tokenizer, model + +class AverageMeter(object): + """Computes and stores the average and current value""" + def __init__(self, name, fmt=':f'): + self.name = name + self.fmt = fmt + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + def __str__(self): + fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' + return fmtstr.format(**self.__dict__) + +def save_checkpoint(epoch, unwrapped_model, optimizer, tokenizer, scheduler, ckpt_path, **kwargs): + model_name = 'nle_model_{}'.format(str(epoch)) + tokenizer_name = 'nle_gpt2_tokenizer_{}'.format(str(epoch)) + filename = 'ckpt_stats_' + str(epoch) + '.tar' + + if epoch == 0: + tokenizer.save_pretrained(ckpt_path + tokenizer_name) # save tokenizer + + unwrapped_model.save_pretrained(ckpt_path + model_name, save_function=accelerator.save) + + opt = {'epoch': epoch, + 'optimizer_state_dict': optimizer.state_dict(), + 'scheduler': scheduler.state_dict(), + **kwargs} + + accelerator.save(opt, ckpt_path + filename) + +def evaluate_bleu(results_pred, results_gt): + count = 0 + bleus1 = [] + bleus2 = [] + bleus3 = [] + bleus4 = [] + for idx, pred in enumerate(results_pred): + emotion = pred['caption'].split("because")[0].strip() + exp = pred['caption'].split("because")[-1].strip() + emotion_gt = results_gt[idx]['gt'].split("because")[0].strip() + exp_gt = results_gt[idx]['gt'].split("because")[-1].strip() + + if emotion == emotion_gt: + count+=1 + try: + bleus1.append(sentence_bleu([exp_gt.split()], exp.split(), weights=(1,0,0,0))) + bleus2.append(sentence_bleu([exp_gt.split()], exp.split(), weights=(0.5,0.5,0,0))) + bleus3.append(sentence_bleu([exp_gt.split()], exp.split(), weights=(0.33,0.33,0.33,0))) + bleus4.append(sentence_bleu([exp_gt.split()], exp.split())) + except: + pass + print('{:.2f}%'.format(count/len(results_pred)*100),flush=True) + print('blue1: {:.2f}'.format(np.mean(bleus1)),flush=True) + print('blue2: {:.2f}'.format(np.mean(bleus2)),flush=True) + print('blue3: {:.2f}'.format(np.mean(bleus3)),flush=True) + print('blue4: {:.2f}'.format(np.mean(bleus4)),flush=True) + + +class VQAXTrainDataset(Dataset): + + def __init__(self, path, transform, tokenizer, max_seq_len): + + self.tokenizer = tokenizer + self.transform = transform + self.max_seq_len = max_seq_len # question + The answer is becase + with open(path, 'rb') as f: + self.data = pickle.load(f) + self.data = self.data + self.ids_list = range(len(self.data)) + + def __getitem__(self, i): + dialog_id = self.ids_list[i] + sample = self.data[dialog_id] + image = sample['img_src'] + + text_a = '' + text_a += proc_ques(sample['caption1']) + text_a += proc_ques(sample['caption2']) + if args.dialog: + for ut in sample['conversation']: + text_a += proc_ques(ut) + text_a += proc_ques('what is the emotion') + if args.visual_backbone: + if args.answerer: + answer = sample['answerer_emotion'] + text_b = proc_ques(sample['answerer_explanation']) + else: + answer = sample['emotion_after'] + if sample['emotion_after'] == sample['emotion_before']: + text_b = proc_ques(sample['explanation_before']) + else: + text_b = proc_ques(sample['explanation_after']) + else: + answer = sample['emotion_before'] + text_b = proc_ques(sample['corrected_explanation_before']) + + # tokenization process + q_segment_id, a_segment_id, e_segment_id = self.tokenizer.convert_tokens_to_ids(['', + '', + '']) + + tokens = self.tokenizer.tokenize(text_a) + labels = [-100] * len(tokens) # we dont want to predict the question, set to pad to ignore in XE + segment_ids = [q_segment_id] * len(tokens) + + answer = [self.tokenizer.bos_token] + self.tokenizer.tokenize(" the answer is " + answer) + answer_len = len(answer) + tokens_b = self.tokenizer.tokenize(" because " + text_b) + [self.tokenizer.eos_token] + exp_len = len(tokens_b) + tokens += answer + tokens_b + labels += [-100] + answer[ + 1:] + tokens_b # labels will be shifted in the model, so for now set them same as tokens + segment_ids += [a_segment_id] * answer_len + segment_ids += [e_segment_id] * exp_len + + if len(tokens) > self.max_seq_len: + tokens = tokens[:self.max_seq_len] + labels = labels[:self.max_seq_len] + segment_ids = segment_ids[:self.max_seq_len] + + assert len(tokens) == len(segment_ids) + assert len(tokens) == len(labels) + + seq_len = len(tokens) + padding_len = self.max_seq_len - seq_len + tokens = tokens + ([self.tokenizer.pad_token] * padding_len) + labels = labels + ([-100] * padding_len) + + segment_ids += ([e_segment_id] * padding_len) + input_ids = self.tokenizer.convert_tokens_to_ids(tokens) + input_ids = torch.tensor(input_ids, dtype=torch.long) + + labels = [self.tokenizer.convert_tokens_to_ids(t) if t != -100 else t for t in labels] + labels = torch.tensor(labels, dtype=torch.long) + + segment_ids = torch.tensor(segment_ids, dtype=torch.long) + + if args.visual_backbone: + genre = image.split('/')[-2] + image_name = image.split('/')[-1] + root_path = '/ibex/reference/CV/WikiArt/wikiart' + img_path = os.path.join(root_path, genre, image_name) + try: + img = Image.open(img_path).convert('RGB') + except: + alter_img = os.listdir(os.path.join(root_path, genre))[0] + alter_img_path = os.path.join(root_path, genre, alter_img) + img = Image.open(alter_img_path).convert("RGB") + img = self.transform(img) + else: + img = torch.empty([3,256,256]) + + return (img, dialog_id, input_ids, labels, segment_ids) + + def __len__(self): + return len(self.ids_list) + + +class VQAXEvalDataset(Dataset): + + def __init__(self, path, transform, tokenizer, max_seq_len): + self.tokenizer = tokenizer + self.transform = transform + self.max_seq_len = max_seq_len # question + The answer is becase + with open(path, 'rb') as f: + self.data = pickle.load(f) + self.ids_list = range(len(self.data)) + + def __getitem__(self, i): + dialog_id = self.ids_list[i] + sample = self.data[dialog_id] + image = sample['img_src'] + text_a = '' + text_a += proc_ques(sample['caption1']) + text_a += proc_ques(sample['caption2']) + if args.dialog: + for ut in sample['conversation']: + text_a += proc_ques(ut) + text_a += proc_ques('what is the emotion') + if args.visual_backbone: + if args.answerer: + answer = sample['answerer_emotion'] + exp = proc_ques(sample['answerer_explanation']) + else: + answer = sample['emotion_after'] + if sample['emotion_after'] == sample['emotion_before']: + exp = proc_ques(sample['corrected_explanation_before']) + else: + exp = proc_ques(sample['corrected_explanation_after']) + else: + answer = sample['emotion_before'] + exp = proc_ques(sample['corrected_explanation_before']) + + emotion = answer + + # tokenization process + q_segment_id, a_segment_id, e_segment_id = self.tokenizer.convert_tokens_to_ids( + ['', '', '']) + tokens = self.tokenizer.tokenize(text_a) + segment_ids = [q_segment_id] * len(tokens) + + answer = [self.tokenizer.bos_token] + self.tokenizer.tokenize(" the answer is") + answer_len = len(answer) + tokens += answer + + segment_ids += [a_segment_id] * answer_len + + input_ids = self.tokenizer.convert_tokens_to_ids(tokens) + input_ids = torch.tensor(input_ids, dtype=torch.long) + segment_ids = torch.tensor(segment_ids, dtype=torch.long) + + if args.visual_backbone: + genre = image.split('/')[-2] + image_name = image.split('/')[-1] + root_path = '/ibex/reference/CV/WikiArt/wikiart' + img_path = os.path.join(root_path, genre, image_name) + try: + img = Image.open(img_path).convert('RGB') + except: + alter_img = os.listdir(os.path.join(root_path, genre))[0] + alter_img_path = os.path.join(root_path, genre, alter_img) + img = Image.open(alter_img_path).convert("RGB") + img = self.transform(img) + else: + img = torch.empty([3,256,256]) + + return img, dialog_id, input_ids, segment_ids, emotion, exp + + def __len__(self): + return len(self.ids_list) + + +def sample_sequences(model, tokenizer, loader): + model.eval() + results_full = [] + SPECIAL_TOKENS = ['<|endoftext|>', '', '', '', ''] + special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS) + because_token = tokenizer.convert_tokens_to_ids('because') + max_len = 30 + results_gt = [] + + for i, batch in enumerate(loader): + + current_output = [] + img, img_id, input_ids, segment_ids, emotion, exp = batch + + img = img.to(device) + input_ids = input_ids.to(device) + segment_ids = segment_ids.to(device) + + emotion = emotion[0] if type(emotion) else emotion + exp = exp[0] if type(exp) else exp + + img_embeddings = image_encoder(img) if args.visual_backbone else None + + always_exp = True + + with torch.no_grad(): + + for step in range(max_len + 1): + + if step == max_len: + break + + outputs = model(input_ids=input_ids, + past_key_values=None, + attention_mask=None, + token_type_ids=segment_ids, + position_ids=None, + encoder_hidden_states=img_embeddings, + encoder_attention_mask=None, + labels=None, + use_cache=False, + return_dict=True) + + lm_logits = outputs.logits + logits = lm_logits[0, -1, :] / temperature + logits = top_filtering(logits, top_k=top_k, top_p=top_p) + probs = F.softmax(logits, dim=-1) + prev = torch.topk(probs, 1)[1] if no_sample else torch.multinomial(probs, 1) + + if prev.item() in special_tokens_ids: + break + + # take care of when to start the token + if not always_exp: + + if prev.item() != because_token: + new_segment = special_tokens_ids[-2] # answer segment + else: + new_segment = special_tokens_ids[-1] # explanation segment + always_exp = True + else: + new_segment = special_tokens_ids[-1] # explanation segment + + new_segment = torch.LongTensor([new_segment]).to(device) + current_output.append(prev.item()) + input_ids = torch.cat((input_ids, prev.unsqueeze(0)), dim=1) + segment_ids = torch.cat((segment_ids, new_segment.unsqueeze(0)), dim=1) + + decoded_sequences = tokenizer.decode(current_output, skip_special_tokens=True).lstrip() + results_full.append({"caption": proc_ques(decoded_sequences)}) + results_gt.append({"gt": proc_ques(emotion+' because '+exp)}) + + return results_full, results_gt + + +def get_optimizer(model, learning_rate): + no_decay = ['bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], + 'weight_decay': weight_decay}, + {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], + 'weight_decay': 0.0}] + + optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate) + return optimizer + +args = parse_option() +if args.eval: + device = 'cuda' +else: + accelerator = Accelerator() + device = accelerator.device + +finetune_pretrained = args.finetune # if True, finetunes from the image captioning model +eval_batch_size = 1 +img_size = 224 +ckpt_path = args.ckpt_path +nle_data_train_path = 'data/train_data.pickle' +nle_data_eval_path = 'data/val_data.pickle' +nle_data_test_path = 'data/test_data.pkl' +max_seq_len = 400 +no_sample = True +top_k = 0 +top_p = 0.9 +batch_size = args.batch_size # per GPU +num_train_epochs = 100 +weight_decay = 0 +learning_rate = 2e-5 if not finetune_pretrained else 1e-5 +gradient_accumulation_steps = 1 +start_epoch = 0 +temperature = 1 + +if args.visual_backbone: + image_encoder = ImageEncoder(device).to(device) + change_requires_grad(image_encoder, True) + +if args.load_from_epoch > 0: + tokenizer, model, optimizer, scheduler_dic, start_epoch = load_checkpoint(ckpt_path, args.load_from_epoch) + +else: + if finetune_pretrained: + tokenizer, model = load_pretrained() + optimizer = get_optimizer(model, learning_rate) + else: + tokenizer = GPT2Tokenizer.from_pretrained('distilgpt2') + orig_num_tokens = len(tokenizer.encoder) + + num_new_tokens = tokenizer.add_special_tokens({'pad_token': '', + 'additional_special_tokens': ['', '', + '']}) + + assert len(tokenizer) == orig_num_tokens + num_new_tokens + config = AutoConfig.from_pretrained('distilgpt2') + + # Add configs + setattr(config, 'img_size', None) + setattr(config, 'max_seq_len', None) + config.img_size = img_size + config.max_seq_len = max_seq_len + config.add_cross_attention = args.visual_backbone + + model = GPT2LMHeadModel.from_pretrained('distilgpt2', config=config) + model.resize_token_embeddings(len(tokenizer)) + model = model.to(device) + optimizer = get_optimizer(model, learning_rate) + +print("Model Setup Ready...") + +img_transform = transforms.Compose([transforms.Resize((img_size, img_size)), + transforms.ToTensor(), + transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) + +if args.eval: + test_dataset = VQAXEvalDataset(path=nle_data_test_path, + transform=img_transform, + tokenizer=tokenizer, + max_seq_len=max_seq_len) + + test_loader = torch.utils.data.DataLoader(test_dataset, + batch_size=1, + shuffle=False, + pin_memory=True) +else: + train_dataset = VQAXTrainDataset(path=nle_data_train_path, + transform=img_transform, + tokenizer=tokenizer, + max_seq_len=max_seq_len) + + train_loader = torch.utils.data.DataLoader(train_dataset, + batch_size=batch_size, + shuffle=True, + pin_memory=True) + + eval_dataset = VQAXEvalDataset(path=nle_data_eval_path, + transform=img_transform, + tokenizer=tokenizer, + max_seq_len=max_seq_len) + + val_loader = torch.utils.data.DataLoader(eval_dataset, + batch_size=1, + shuffle=False, + pin_memory=True) +if not args.eval: + model, optimizer, train_loader = accelerator.prepare(model, optimizer, train_loader) + + t_total = (len(train_loader) // gradient_accumulation_steps) * num_train_epochs + warmup_steps = 0 # 0.10 * t_total + scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total) + + if args.load_from_epoch > 0: + scheduler.load_state_dict(scheduler_dic) + +if args.eval: + results_full, results_gt = sample_sequences(model, tokenizer, test_loader) + file_pred = args.ckpt_path + '/results/' + str(args.load_from_epoch) + '_pred_test.json' + with open(file_pred, 'w') as w: + json.dump(results_full, w) + file_gt = args.ckpt_path + '/results/' + str(args.load_from_epoch) + '_gt_test.json' + with open(file_gt, 'w') as w: + json.dump(results_gt, w) + evaluate_all(results_full, results_gt) + +else: + for epoch in range(args.load_from_epoch, num_train_epochs): + + model.train() + accum_loss = 0 + end = time.time() + + for step, batch in enumerate(train_loader): + + batch = tuple(input_tensor.to(device) for input_tensor in batch) + img, _, input_ids, labels, segment_ids = batch + + img_embeddings = image_encoder(img) if args.visual_backbone else None + + outputs = model(input_ids=input_ids, + past_key_values=None, + attention_mask=None, + token_type_ids=segment_ids, + position_ids=None, + encoder_hidden_states=img_embeddings, + encoder_attention_mask=None, + labels=labels, + use_cache=False, + return_dict=True) + + loss = outputs.loss + loss = loss / gradient_accumulation_steps + accelerator.backward(loss) + accum_loss += loss.item() + + # measure elapsed time + end = time.time() + + if step % gradient_accumulation_steps == 0 or step == len(train_loader) - 1: + optimizer.step() + scheduler.step() + optimizer.zero_grad() + if step == len(train_loader) - 1: + accelerator.print("\rEpoch {} / {}, Iter {} / {}, Loss: {:.3f}, Time: {}".format(epoch, + num_train_epochs, + step, len(train_loader), + accum_loss, time.time() - end), + end=' ', flush=True) + accum_loss = 0 + + accelerator.wait_for_everyone() + unwrapped_model = accelerator.unwrap_model(model) + if epoch % args.freq ==0: + save_checkpoint(epoch, unwrapped_model, optimizer, tokenizer, scheduler, ckpt_path+'/') + results_full, results_gt = sample_sequences(unwrapped_model, tokenizer, val_loader) + file_pred = args.ckpt_path + '/results/' + str(epoch) + '_pred.json' + with open(file_pred, 'w') as w: + json.dump(results_full, w) + file_gt = args.ckpt_path + '/results/' + str(epoch) + '_gt.json' + with open(file_gt, 'w') as w: + json.dump(results_gt, w) + + evaluate_bleu(results_full, results_gt) + diff --git a/baselines/nlxgpt/utils/__pycache__/data_utils.cpython-38.pyc b/baselines/nlxgpt/utils/__pycache__/data_utils.cpython-38.pyc new file mode 100644 index 0000000..0d1cb46 Binary files /dev/null and b/baselines/nlxgpt/utils/__pycache__/data_utils.cpython-38.pyc differ diff --git a/baselines/nlxgpt/utils/__pycache__/eval_utils.cpython-38.pyc b/baselines/nlxgpt/utils/__pycache__/eval_utils.cpython-38.pyc new file mode 100644 index 0000000..5697350 Binary files /dev/null and b/baselines/nlxgpt/utils/__pycache__/eval_utils.cpython-38.pyc differ diff --git a/baselines/nlxgpt/utils/data_utils.py b/baselines/nlxgpt/utils/data_utils.py new file mode 100644 index 0000000..257cdd4 --- /dev/null +++ b/baselines/nlxgpt/utils/data_utils.py @@ -0,0 +1,117 @@ +import re + +contractions = { + "aint": "ain't", "arent": "aren't", "cant": "can't", "couldve": + "could've", "couldnt": "couldn't", "couldn'tve": "couldn't've", + "couldnt've": "couldn't've", "didnt": "didn't", "doesnt": + "doesn't", "dont": "don't", "hadnt": "hadn't", "hadnt've": + "hadn't've", "hadn'tve": "hadn't've", "hasnt": "hasn't", "havent": + "haven't", "hed": "he'd", "hed've": "he'd've", "he'dve": + "he'd've", "hes": "he's", "howd": "how'd", "howll": "how'll", + "hows": "how's", "Id've": "I'd've", "I'dve": "I'd've", "Im": + "I'm", "Ive": "I've", "isnt": "isn't", "itd": "it'd", "itd've": + "it'd've", "it'dve": "it'd've", "itll": "it'll", "let's": "let's", + "maam": "ma'am", "mightnt": "mightn't", "mightnt've": + "mightn't've", "mightn'tve": "mightn't've", "mightve": "might've", + "mustnt": "mustn't", "mustve": "must've", "neednt": "needn't", + "notve": "not've", "oclock": "o'clock", "oughtnt": "oughtn't", + "ow's'at": "'ow's'at", "'ows'at": "'ow's'at", "'ow'sat": + "'ow's'at", "shant": "shan't", "shed've": "she'd've", "she'dve": + "she'd've", "she's": "she's", "shouldve": "should've", "shouldnt": + "shouldn't", "shouldnt've": "shouldn't've", "shouldn'tve": + "shouldn't've", "somebody'd": "somebodyd", "somebodyd've": + "somebody'd've", "somebody'dve": "somebody'd've", "somebodyll": + "somebody'll", "somebodys": "somebody's", "someoned": "someone'd", + "someoned've": "someone'd've", "someone'dve": "someone'd've", + "someonell": "someone'll", "someones": "someone's", "somethingd": + "something'd", "somethingd've": "something'd've", "something'dve": + "something'd've", "somethingll": "something'll", "thats": + "that's", "thered": "there'd", "thered've": "there'd've", + "there'dve": "there'd've", "therere": "there're", "theres": + "there's", "theyd": "they'd", "theyd've": "they'd've", "they'dve": + "they'd've", "theyll": "they'll", "theyre": "they're", "theyve": + "they've", "twas": "'twas", "wasnt": "wasn't", "wed've": + "we'd've", "we'dve": "we'd've", "weve": "we've", "werent": + "weren't", "whatll": "what'll", "whatre": "what're", "whats": + "what's", "whatve": "what've", "whens": "when's", "whered": + "where'd", "wheres": "where's", "whereve": "where've", "whod": + "who'd", "whod've": "who'd've", "who'dve": "who'd've", "wholl": + "who'll", "whos": "who's", "whove": "who've", "whyll": "why'll", + "whyre": "why're", "whys": "why's", "wont": "won't", "wouldve": + "would've", "wouldnt": "wouldn't", "wouldnt've": "wouldn't've", + "wouldn'tve": "wouldn't've", "yall": "y'all", "yall'll": + "y'all'll", "y'allll": "y'all'll", "yall'd've": "y'all'd've", + "y'alld've": "y'all'd've", "y'all'dve": "y'all'd've", "youd": + "you'd", "youd've": "you'd've", "you'dve": "you'd've", "youll": + "you'll", "youre": "you're", "youve": "you've" +} + +manual_map = {'none': '0', + 'zero': '0', + 'one': '1', + 'two': '2', + 'three': '3', + 'four': '4', + 'five': '5', + 'six': '6', + 'seven': '7', + 'eight': '8', + 'nine': '9', + 'ten': '10'} +articles = ['a', 'an', 'the'] +period_strip = re.compile("(?!<=\d)(\.)(?!\d)") +comma_strip = re.compile("(\d)(\,)(\d)") +punct = [';', r"/", '[', ']', '"', '{', '}', + '(', ')', '=', '+', '\\', '_', '-', + '>', '<', '@', '`', ',', '?', '!'] + +def process_punctuation(inText): + outText = inText + for p in punct: + if (p + ' ' in inText or ' ' + p in inText) \ + or (re.search(comma_strip, inText) != None): + outText = outText.replace(p, '') + else: + outText = outText.replace(p, ' ') + outText = period_strip.sub("", outText, re.UNICODE) + return outText + + +def process_digit_article(inText): + outText = [] + tempText = inText.lower().split() + for word in tempText: + word = manual_map.setdefault(word, word) + if word not in articles: + outText.append(word) + else: + pass + for wordId, word in enumerate(outText): + if word in contractions: + outText[wordId] = contractions[word] + outText = ' '.join(outText) + return outText + +def prep_ans(answer): + answer = process_digit_article(process_punctuation(answer)) + answer = answer.replace(',', '') + return answer + +def proc_ans(ans): + + ans_prob_dict = {} + + for ans_ in ans: + ans_proc = prep_ans(ans_['answer']) + if ans_proc not in ans_prob_dict: + ans_prob_dict[ans_proc] = 1 + else: + ans_prob_dict[ans_proc] += 1 + + confident_answer = max(ans_prob_dict, key=ans_prob_dict.get) + return confident_answer + +def proc_ques(ques): + words = re.sub(r"([.,'!?\"()*#:;])",'',ques.lower()).replace('-', ' ').replace('/', ' ') + return words + diff --git a/baselines/nlxgpt/utils/eval_utils.py b/baselines/nlxgpt/utils/eval_utils.py new file mode 100644 index 0000000..b15af40 --- /dev/null +++ b/baselines/nlxgpt/utils/eval_utils.py @@ -0,0 +1,40 @@ +import torch +import torch.nn.functional as F + +def top_filtering(logits, top_k=0., top_p=0.9, threshold=-float('Inf'), filter_value=-float('Inf')): + """ Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering + Args: + logits: logits distribution shape (vocabulary size) + top_k: <=0: no filtering, >0: keep only top k tokens with highest probability. + top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset + whose total probability mass is greater than or equal to the threshold top_p. + In practice, we select the highest probability tokens whose cumulative probability mass exceeds + the threshold top_p. + threshold: a minimal threshold to keep logits + """ + assert logits.dim() == 1 # Only work for batch size 1 for now - could update but it would obfuscate a bit the code + top_k = min(top_k, logits.size(-1)) + if top_k > 0: + # Remove all tokens with a probability less than the last token in the top-k tokens + indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] + logits[indices_to_remove] = filter_value + + if top_p > 0.0: + # Compute cumulative probabilities of sorted tokens + sorted_logits, sorted_indices = torch.sort(logits, descending=True) + cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) + + # Remove tokens with cumulative probability above the threshold + sorted_indices_to_remove = cumulative_probabilities > top_p + # Shift the indices to the right to keep also the first token above the threshold + sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() + sorted_indices_to_remove[..., 0] = 0 + + # Back to unsorted indices and set them to -infinity + indices_to_remove = sorted_indices[sorted_indices_to_remove] + logits[indices_to_remove] = filter_value + + indices_to_remove = logits < threshold + logits[indices_to_remove] = filter_value + + return logits diff --git a/baselines/nlxgpt/utils/vcr_filter.py b/baselines/nlxgpt/utils/vcr_filter.py new file mode 100644 index 0000000..8072e8a --- /dev/null +++ b/baselines/nlxgpt/utils/vcr_filter.py @@ -0,0 +1,63 @@ +import json +import torch +from bert_score import score +from cococaption.pycocotools.coco import COCO +from cococaption.pycocoevalcap.eval import COCOEvalCap + +annotations_path = 'nle_data/VCR/vcr_test.json' +pred_unf_full_path = 'cococaption/results/unfiltered_captions_full_8.json' # explanations + answers +pred_unf_exp_path = 'cococaption/results/unfiltered_captions_full_8.json' # explanations +save_filtered_caps = 'cococaption/results/vcr_filtered_results.json' +save_filtered_scores = 'cococaption/results/vcr_filtered_scores.json' +annTest = 'cococaption/annotations/vcr_test_annot_exp.json' +keep_keys_path = 'cococaption/results/correct_keys.json' +threshold = 0.92 + +gt = json.load(open(annotations_path, 'r')) +prd = json.load(open(pred_unf_full_path, 'r')) + +predictions = {} +for item in prd: + predictions[item['image_id']] = item['caption'].split("because")[0].strip() + +ground_truths = {} +for key,value in gt.items(): + ground_truths[int(key)] = value['answers'] + +refs = [] +cands = [] +all_keys = [] + +for key,value in predictions.items(): + all_keys.append(key) + refs.append(ground_truths[key].lower()) + cands.append(value.lower()) + + +out, hash = score(cands, refs, model_type='distilbert-base-uncased', verbose=True, idf=False, lang="en", return_hash=True) +P, R, F1 = out + +print("Accuracy: ", F1.mean()) + +all_keys = torch.LongTensor(all_keys) + +with open(keep_keys_path, 'w') as w: + json.dump(all_keys[F1 >= threshold].tolist(), w) + + +correct_keys = json.load(open(keep_keys_path, 'r')) +exp_predictions = json.load(open(pred_unf_exp_path, 'r')) +exp_preds = [item for item in exp_predictions if item['image_id'] in correct_keys] + +with open(save_filtered_caps, 'w') as w: + json.dump(exp_preds, w) + +coco = COCO(annTest) +cocoRes = coco.loadRes(save_filtered_caps) +cocoEval = COCOEvalCap(coco, cocoRes) +cocoEval.params['image_id'] = cocoRes.getImgIds() +cocoEval.evaluate() + +with open(save_filtered_scores, 'w') as w: + json.dump(cocoEval.eval, w) + diff --git a/index.html b/index.html index edeed2a..dbd8294 100644 --- a/index.html +++ b/index.html @@ -115,7 +115,7 @@

  • - + Code
  • diff --git a/ui/README.md b/ui/README.md new file mode 100644 index 0000000..d398288 --- /dev/null +++ b/ui/README.md @@ -0,0 +1,119 @@ +# ParlAI Chat Task + +This task exists to demonstrate both the out-of-the-box functionality of setting up a ParlAI chat on Mephisto, as well as more complicated tasks with custom frontend functionality. To get started you can run one of the following commands: + +The baseline chat example can be run from this directory with: +```console +python parlai_test_script.py +``` + +You can also run an example that has onboarding set up with: +```console +python parlai_test_script.py conf=onboarding_example +``` +Which is mostly a wrapper around adding an onboarding qualification, which you can do manually for any of the other configurations. +```console +python parlai_test_script.py conf=... mephisto.blueprint.onboarding_qualification=test_onboard_qualification +``` + +Further, you can try running a task using a prebuilt customized frontend bundle (built from the `webapp` directory) with: +```console +python parlai_test_script.py conf=custom_prebuilt +``` + +Further, you can try running a task using an auto-building task (built from the `custom_simple` directory) with: +```console +python parlai_test_script.py conf=custom_simple +``` + +### Common ParlAI blueprint argument overrides +- `mephisto.blueprint.onboarding_qualification=` (str): Setting this variable enables onboarding (and will grant/ the named qualification from first time workers), which can be used to demo how onboarding worlds and the surrounding functionality works. +- `mephisto.blueprint.custom_source_dir=` (str): Path to the directory to point `ParlAIChatTaskBuilder` to build a custom frontend source from. See usage of `custom_source_dir` in the Task Arguments section to use this for your task. +- `mephisto.blueprint.custom_source_bundle=` (str): Whether or not to launch the task with a prebuilt frontend source file (not created with the `TaskBuilder`) at the provided location. +- `turn_timeout=` (int): Maximum time in seconds to wait for a response between turns before marking a worker as timed out (default 300 seconds). + +## Implementation +### Configuration +his task is configured using [Hydra](https://hydra.cc/) - details about using hydra to configure tasks can be read here and in other examples. For more about being able to customize the configuration files, please refer to the [Hydra documentation](https://hydra.cc/docs/intro). Under our current setup, using Hydra means that you must be in the same directory as the python script for hydra to correctly pick up the config files. + +### The worlds file +The chats in this example rely on the worlds files in `demo_worlds.py`. The worlds in this file get initialized with agents provided by Mephisto, and then during the chat procedure, data is saved locally to the Mephisto data directory. + +Task worlds implement the following methods: +- `parley` - The `parley` method holds the core logic for your world, where ParlAI Agents will interact with one another with `act`s and `observe`s. +- `episode_done` - The `episode_done` method should return `True` when the conversational episode is over. +- `shutdown` - The `shutdown` method is used to clean up any resources that were allocated upon starting the world, and is called when the episode completes. +- `prep_save_data` (optional) - The `prep_save_data` method is used to add any additional values to the saved conversation data. This data will be accessible later in review. + +The world file also needs to implement the following methods: +- `make_world` - This method should, given world options and a list of agents, return an initialized task world. It may optionally accept an `initialization_data` keyword argument, which will be provided with the `Assignment`'s `InitializationData` if present. +- `get_world_params` - This method is used to configure details about your ParlAI world for Mephisto to understand how to initialize your world. In this case the only required configuration parameter is `agent_count`, which specifies the number of human agents Mephisto will provide to your `make_world` function. +- `make_onboarding_world` (optional) - The `make_onboarding_world` method is called to initialize a world on the very first time a specific worker works on your task. This will only ever be called with a single agent, and it should return the initialized onboarding world. +- `validate_onboarding` (optional) - When an onboarding world completes, this method is called on the full data collected in the onboarding task. If it returns `True` the worker is allowed to move on to the full task. If this method returns `False`, then the worker is blocked from working on this task in the future. + +### Task arguments (in the run file) +ParlAI chat tasks have a number of arguments set up, primarily via Hydra but also using the `SharedParlAITaskState` object. Details of this are listed below +#### Mephisto required arguments +- `mephisto.task.task_title` - The title of the task showed to workers in their queue +- `mephisto.task.task_description` - The short description for the task that may be showed inline while they're searching for tasks +- `mephisto.task.task_tags` - Any searchable tags that would make it easy to find your task +- `mephisto.task.task_reward` - The reward paid out in dollars for completing your task +- `mephisto.task.task_name` - This task name will be used to consolidate your data across runs. Generally we suggest using `descriptive-string-pilot-#` when piloting and `descriptive-task-string` for your main collection. More details on this are in the (TODO) Mephisto argument instructions. +- `mephisto.blueprint.onboarding_qualification` (optional) - The qualification to grant to a worker when they have completed onboarding, such that they no longer need to do onboarding in future tasks where you specify the same qualification name. Specifying this qualification will make mephisto attempt to run onboarding for new workers, and ommitting it will skip onboarding for all workers. +#### ParlAI-specific arguments +- `mephisto.blueprint.world_file` - Optional path to the python file containing your worlds. Detailed requirements of this file in the "The worlds file" section. If you already know what world module you intend to use for a specific run, it's generally a better practice to import that module directly in your run script, and then pass it as `SharedParlAITaskState.world_module`. +- `mephisto.blueprint.task_description_file` - Path to an HTML file containing the task preview and basic instructions for your task. If you use a custom built frontend, you can omit this argument and directly edit the `TaskPreviewView` in `app.jsx` instead. +- `mephisto.blueprint.num_conversations` - Number of conversations to have. This currently leads to `num_conversations` * `agent_count` tasks being launched for workers to do. Incomplete tasks are _currently_ not relaunched. +- `mephisto.blueprint.custom_source_dir` - Path to a folder containing, at the very least, a `main.js` for a custom ParlAI frontend view. Can also contain an updated `package.json` if that file imports anything outside of the defaults used in `server/blueprints/parlai_chat/webapp/package.json`. Providing this flag will make Mephisto build that frontend in a `_generated` directory, and refer to that build when launching your task. Mephisto will only rebuild when the source files in the provided `custom_source_dir` are updated. +- `mephisto.blueprint.custom_source_bundle` - Path to a custom built source bundle to deploy to the frontend instead of the standard ParlAI view. For this task we specify a build in the `webapp` directory, but you'll need to run `npm install; npm run dev` in that directory the first time you use a bundle (and whenever you make changes to the `src` that you want to be picked up) +- `SharedParlAITaskState.world_module` - The world file module that you want to provide for this run. +- `SharedParlAITaskState.world_opt` - The world option will be passed to the `make_world` function as the first argument. Passing contents through `world_opt` is the preferred way to share important state between different calls to `make_world`, as this dict will be shared amongst all calls. +- `SharedParlAITaskState.onboarding_world_opt` - The world option will be passed to the `make_onboarding_world` function as the first argument. + +### Task description content +For simple tasks, you can provide an HTML task description (via the `task_description_file` argument above) containing a preview and description of what the worker is going to be doing in their task. + +### Simple custom frontend bundles +Using the `mephisto.blueprint.custom_source_dir` option, it's possible to specify just a directory containing any frontend code you are using for a task, so long as you've built the app contained using the `bootstrap-chat` package. In this case, Mephisto will handle the process of generating the bundle whenever it detects changed files, meaning that you don't have to think about that part of the process. + +The automated build process looks for three special paths: +- `[custom_source_dir]/main.js` : this should contain your main application code, and is the only required file in the custom source directory. It should probably end with `ReactDOM.render(, document.getElementById("app"));` +- `[custom_source_dir]/style.css` : this can contain any style files that alter the way that your chat application looks. You can later reference this file by including `import "./style.css";` in your `main.js` file. +- `[custom_source_dir]/components/`: This is a special directory that can contain additional elements that your `main.js` file references (using relative paths) +- `[custom_source_dir]/package.json`: If you want additional dependencies, you can specify them in a `package.json` file. We suggest copying the one present at `mephisto/abstractions/blueprints/parlai_chat/webapp/package.json`. + +### Custom frontend bundles +Custom frontend bundles can be provided that override the view of how the ParlAI chat looks, and the kinds of inputs you can pass. Most of the ParlAI-specific interfaceing logic is built into the `bootstrap-chat` package. The remaining custom view logic is in `webapp/src/main.js`. Here we define the `RenderChatMessage` component, which overrides the base behavior. + + +# How can I make my own task? +## Simple chat collection (no custom frontend) +If you are able to provide your workers enough context just using a task description and perhaps a message in the chat thread with directions, you should be able to work on a task without a custom frontend. In order to get started on a task like this, you'll likely do the following: + +1. Copy the `demo_worlds.py`, `parlai_test_script.py`, and `task_description.html` files to a new directory for your task. This generally would go in the project directory you are launching tasks for, but you can use `mephisto/tasks` if you're just experimenting. +2. Update any task-related variables in your `conf/my_new_config.yaml` file to make sense for your task. +3. Update your worlds file to specify the kind of conversation you are creating between agents. +4. Run `parlai_test_script.py` to pilot your task over localhost. You can use different `worker_id` URL parameters in different windows to play the part of multiple workers at the same time. +5. Repeat 3 & 4 until you're happy with your task. +6. Launch a small batch on a crowd provider to see how real workers handle your task. +7. Iterate more - use a review script (like the one present in `examples/simple_static_task/examine_results`) to make it easy to see what data you're getting. +8. Collect some interesting conversational data. + + +## Tasks with custom frontends +If your task needs additional input beyond simple forms (tutorial TODO, see the `respond_with_form` field in the `demo_worlds.py` file in this example to attempt), you'll likely need to be writing your own frontend components. In order to get started on this path, you'll likely do the following: + +1. Copy this whole example directory into a new directory for your task. This generally would go in the project directory you are launching tasks for, but you can use `mephisto/tasks` if you're just experimenting. +2. Update the task-related variables in your `conf/my_new_config.yaml` file to make sense for your task. +3. Update your worlds file to specify the kind of conversation you are creating between agents. +4. Update `app.jsx` to alter parts of your frontend job to do what you want. +5. Rebuild your frontend with `npm install; npm run dev` +6. Run `parlai_test_script.py` to pilot your task over localhost. You can use different `worker_id` URL parameters in different windows to play the part of multiple workers at the same time. +7. Repeat 3 - 6 until you're happy with your task. +8. Launch a small batch on a crowd provider to see how real workers handle your task. +9. Iterate more - use a review script (like the one present in `examples/simple_static_task/examine_results`) to make it easy to see what data you're getting. +10. Collect some interesting conversational data. + +You may also find success using the options for the simple custom frontend functionality, described in the "Simple custom frontend bundles" section. + +If you do require frontend customization, we recommend using [React Dev Tools](https://chrome.google.com/webstore/detail/react-developer-tools/fmkadmapgofadopljbjfkapdkoienihi?hl=en) to inspect the specific elements you want to change and debug your frontend as you work with it. Note that after rebuilding your frontend (by using `npm install; npm run dev`) you may need to do a force refresh (shift-cmd-R in chrome) to ensure you load the new version of your bundle. diff --git a/ui/__init__.py b/ui/__init__.py new file mode 100644 index 0000000..240697e --- /dev/null +++ b/ui/__init__.py @@ -0,0 +1,5 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. diff --git a/ui/__pycache__/demo_worlds.cpython-39.pyc b/ui/__pycache__/demo_worlds.cpython-39.pyc new file mode 100644 index 0000000..c622ad9 Binary files /dev/null and b/ui/__pycache__/demo_worlds.cpython-39.pyc differ diff --git a/ui/assets/.gitkeep b/ui/assets/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/ui/custom_input_and_messages/README.md b/ui/custom_input_and_messages/README.md new file mode 100644 index 0000000..c8374a9 --- /dev/null +++ b/ui/custom_input_and_messages/README.md @@ -0,0 +1,37 @@ +**NOTE: These instructions are from before Hydra integration. They will need to be upated after Hydra integration** + +To run the example in this directory, modify `parlai_test_script.py` in the parent directory replacing: + +``` +source_dir_path = f"{TASK_DIRECTORY}/custom_simple" +``` + +with: + +``` +source_dir_path = f"{TASK_DIRECTORY}/custom_input_and_messages" +``` + +You can then run the task with: +``` +python parlai_test_script.py --build-custom-task True +``` + +--- + +### Additional Notes + +If you are locally developing on a library package such as `bootstrap-chat`, you might receive an unsymlinked version of the package in the `_generated` folder. + +The `_generated` folder is created after each build - which only occurs whenever the source directory is modified. + +So to temporarily work around this, run the run script so a `_generated` folder gets created, cancel out, then do: + +``` +cd custom_input_and_messages/_generated +npm link bootstrap-chat +npm run dev +cd ../.. +``` + +Now when you run the script again, the fixed, symlinked `_generated` from the previous cancelled run will be used. \ No newline at end of file diff --git a/ui/custom_input_and_messages/main.js b/ui/custom_input_and_messages/main.js new file mode 100644 index 0000000..76db270 --- /dev/null +++ b/ui/custom_input_and_messages/main.js @@ -0,0 +1,266 @@ +/* + * Copyright (c) 2017-present, Facebook, Inc. + * All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +import React from "react"; +import ReactDOM from "react-dom"; +import "bootstrap-chat/styles.css"; +import { FormGroup, FormControl, Button, Radio } from "react-bootstrap"; + +import { ChatApp, DefaultTaskDescription, INPUT_MODE } from "bootstrap-chat"; + +/* +This example modifies the default parlai_chat example to demonstrate +how one can override the default visual implementations for the +chat message bubble and the response input bar, while coordinating +behavior between them with global state. + +In this example we add a radio button group to each received chat message. +Additionally, we require the user to make a selection for the most +recently received chat message, before they can submit their own message +by modifying the input bar code. + +This example is for illustrative purposes only and has not been tested +with production usage. +*/ + +function ChatMessage({ isSelf, idx, agentName, message = "", onRadioChange }) { + const floatToSide = isSelf ? "right" : "left"; + const alertStyle = isSelf ? "alert-info" : "alert-warning"; + + const handleChange = (e) => { + onRadioChange(e.currentTarget.value); + }; + + return ( +
    +
    + + {agentName}: {message} + + {isSelf ? null : ( + + + 1 + {" "} + + 2 + {" "} + + )} +
    +
    + ); +} + +function RenderChatMessage({ + message, + mephistoContext, + appContext, + idx, + onRadioChange, +}) { + const { agentId } = mephistoContext; + const { currentAgentNames } = appContext.taskContext; + + return ( +
    + +
    + ); +} + +function MainApp() { + const [messages, setMessages] = React.useState([]); + const [chatAnnotations, setChatAnnotation] = React.useReducer( + (state, action) => { + return { ...state, ...{ [action.index]: action.value } }; + }, + {} + ); + + const lastMessageAnnotation = chatAnnotations[messages.length - 1]; + + return ( + { + setMessages(messages); + }} + /* + You can also use renderTextResponse below, which allows you + to modify the input bar while keeping additional default + functionality such as the ability to trigger custom forms + and a done state. + Or you can use renderResponse for more flexibility and implement + those states yourself, as shown below with the done state: + */ + renderResponse={({ onMessageSend, inputMode, appContext }) => + inputMode === INPUT_MODE.DONE ? ( +
    +
    +

    Thanks for completing the task!

    + +
    +
    + ) : ( + + ) + } + renderMessage={({ message, idx, mephistoContext, appContext }) => ( + { + setChatAnnotation({ index: idx, value: val }); + }} + /> + )} + renderSidePane={({ mephistoContext: { taskConfig } }) => ( + +

    This is a custom Task Description loaded from a custom bundle

    +

    + It has the ability to do a number of things, like directly access + the contents of task data, view the number of messages so far, and + pretty much anything you make like. We're also able to control other + components as well, as in this example we've made it so that if you + click a message, it will alert with that message idx. +

    +

    The regular task description content will now appear below:

    +
    + )} + /> + ); +} + +function CustomTextResponse({ + onMessageSend, + active, + isLastMessageAnnotated, + lastMessageAnnotation, +}) { + const [textValue, setTextValue] = React.useState( + !lastMessageAnnotation ? "" : lastMessageAnnotation + " - " + ); + const [sending, setSending] = React.useState(false); + + const annotationNeeded = active && !isLastMessageAnnotated; + active = active && isLastMessageAnnotated; + + const inputRef = React.useRef(); + + React.useEffect(() => { + if (active && inputRef.current && inputRef.current.focus) { + inputRef.current.focus(); + } + }, [active]); + + const tryMessageSend = React.useCallback(() => { + if (textValue !== "" && active && !sending) { + setSending(true); + onMessageSend({ text: textValue, task_data: {} }).then(() => { + setTextValue(""); + setSending(false); + }); + } + }, [textValue, active, sending, onMessageSend]); + + const handleKeyPress = React.useCallback( + (e) => { + if (e.key === "Enter") { + tryMessageSend(); + e.stopPropagation(); + e.nativeEvent.stopImmediatePropagation(); + } + }, + [tryMessageSend] + ); + + return ( +
    +
    + { + inputRef.current = ref; + }} + value={textValue} + placeholder={ + annotationNeeded + ? "Please annotate the last message before you can continue" + : "Enter your message here..." + } + onKeyPress={(e) => handleKeyPress(e)} + onChange={(e) => setTextValue(e.target.value)} + disabled={!active || sending} + /> + +
    +
    + ); +} + +ReactDOM.render(, document.getElementById("app")); diff --git a/ui/custom_simple/main.js b/ui/custom_simple/main.js new file mode 100644 index 0000000..e3cf994 --- /dev/null +++ b/ui/custom_simple/main.js @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2017-present, Facebook, Inc. + * All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +import React from "react"; +import ReactDOM from "react-dom"; +import "bootstrap-chat/styles.css"; + +import { ChatApp, ChatMessage, DefaultTaskDescription } from "bootstrap-chat"; + +function RenderChatMessage({ message, mephistoContext, appContext, idx }) { + const { agentId } = mephistoContext; + const { currentAgentNames } = appContext.taskContext; + + return ( +
    alert("You clicked on message with index " + idx)}> + +
    + ); +} + +function MainApp() { + return ( + ( + + )} + renderSidePane={({ mephistoContext: { taskConfig } }) => ( + +

    This is a custom Task Description built from a source dir

    +

    + It has the ability to do a number of things, like directly access + the contents of task data, view the number of messages so far, and + pretty much anything you make like. We're also able to control other + components as well, as in this example we've made it so that if you + click a message, it will alert with that message idx. +

    +

    The regular task description content will now appear below:

    +
    + )} + /> + ); +} + +ReactDOM.render(, document.getElementById("app")); diff --git a/ui/demo_worlds.py b/ui/demo_worlds.py new file mode 100644 index 0000000..7255071 --- /dev/null +++ b/ui/demo_worlds.py @@ -0,0 +1,293 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +import random +import redis + +from parlai.crowdsourcing.utils.worlds import CrowdOnboardWorld, CrowdTaskWorld # type: ignore +from parlai.core.worlds import validate # type: ignore +from joblib import Parallel, delayed # type: ignore + + +class MultiAgentDialogOnboardWorld(CrowdOnboardWorld): + def __init__(self, opt, agent): + super().__init__(opt, agent) + self.opt = opt + + def parley(self): + self.agent.agent_id = "Onboarding Agent" + self.agent.observe({"id": "System", "text": "Welcome onboard!"}) + x = self.agent.act(timeout=self.opt["turn_timeout"]) + self.agent.observe( + { + "id": "System", + "text": "Thank you for your input! Please wait while " + "we match you with another worker...", + "episode_done": True, + } + ) + self.episodeDone = True + + +class MultiAgentDialogWorld(CrowdTaskWorld): + """ + Basic world where each agent gets a turn in a round-robin fashion, receiving as + input the actions of all other agents since that agent last acted. + """ + + def __init__(self, opt, agents=None, image_data=None, shared=None): + # Add passed in agents directly. + self.agents = agents + self.acts = [None] * len(agents) + self.episodeDone = False + self.max_turns = opt.get("max_turns", 10) + self.current_turns = 0 + self.send_task_data = opt.get("send_task_data", False) + self.opt = opt + + img_url_default = 'https://dummyimage.com/600x400/000000/ffffff&text=ASK+Questions+about+the+image' + self.emotion_labels = { + 'positive' : image_data['positive_emotion_label'], + 'negative' : image_data['negative_emotion_label'] + } + + self.image_data = image_data + for idx, agent in enumerate(self.agents): + agent.agent_id = f"Chat Agent {idx + 1}" + agent.observe( + { + "id": "System", + "text": 'You have been paired! Please, directly start with questions. Do not start with words ' + 'like Hello, Hi, Hey, etc ' if not idx else 'You have been paired. Please, avoid short answers e.g, YES/NO without further elaboration/details ', + "task_data": { + "image_src" : image_data['image_src'] if idx else img_url_default, + "positive_caption" : image_data['positive_caption'], + "positive_emotion_label": image_data['positive_emotion_label'], + "negative_caption" : image_data['negative_caption'], + "negative_emotion_label": image_data['negative_emotion_label'], + "positive_emoji_url" : image_data['positive_emoji_url'], + "negative_emoji_url" : image_data['negative_emoji_url'] + } + } + ) + def parley(self): + """ + For each agent, get an observation of the last action each of the other agents + took. + Then take an action yourself. + """ + acts = self.acts + self.current_turns += 1 + for index, agent in enumerate(self.agents): + try: + acts[index] = agent.act(timeout=self.opt["turn_timeout"]) + if self.send_task_data: + acts[index].force_set( + "task_data", + { + "last_acting_agent": agent.agent_id, + "current_dialogue_turn": self.current_turns, + "utterance_count": self.current_turns + index, + }, + ) + except TypeError: + acts[index] = agent.act() # not MTurkAgent + if acts[index]["episode_done"]: + self.episodeDone = True + for other_agent in self.agents: + if other_agent != agent: + other_agent.observe(validate(acts[index])) + if self.current_turns >= self.max_turns: + self.episodeDone = True + + for idx, agent in enumerate(self.agents): + if idx == 0: + agent.observe( + { + "id": "Coordinator", + "text": 'Please select one of the emotions below that reflects your imagination of the image that is shaped by your conversation with the fellow turker. Please, be specific and describe the justification in at least 10 words. \nPlease refer to pieces of information from the conversation you had with the Answerer that informed your decision.\n Please avoid here responses that are irrelevant to our imagination of the artwork and how that constructed your emotion. \n For example, responses like "This person was a very good typist and conversationalist", "I enjoyed talking with a partner". etc', + "task_data": { + "positive_emotion_label": self.emotion_labels['positive'], + "negative_emotion_label": self.emotion_labels['negative'], + "respond_with_form": [ + { + "type": "choices", + "question": "Please, choose emotion based on the conversation", + "choices": [ + "1", + ], + }, + + {"type": "text", "question": 'Why/What makes you feel this particular emotion?'} + ] + }, + } + ) + agent.act() # Request a response + + for idx, agent in enumerate(self.agents): # Ensure you get the response + if idx == 0: + form_result = agent.act(timeout=self.opt["turn_timeout"]) + + for idx, agent in enumerate(self.agents): + if idx==0: + agent.observe( + { + "id": "System", + "text": 'Congrats, now you can see the hidden image', + "task_data": { + "image_src" : self.image_data['image_src'], + "positive_caption" : self.image_data['positive_caption'], + "positive_emotion_label": self.image_data['positive_emotion_label'], + "negative_caption" : self.image_data['negative_caption'], + "negative_emotion_label": self.image_data['negative_emotion_label'], + "positive_emoji_url" : self.image_data['positive_emoji_url'], + "negative_emoji_url" : self.image_data['negative_emoji_url'], + } + } + ) + agent.observe( + { + "id": "Coordinator", + "text": ' ', + "task_data": { + "positive_emotion_label": self.emotion_labels['positive'], + "negative_emotion_label": self.emotion_labels['negative'], + "respond_with_form": [ + { + "type": "choices", + "question": "Please, choose emotion after observing the image", + "choices": [ + "1", + ], + }, + {"type": "text", "question": 'If you selected different emotion label, What made you change your mind?'} + ] + }, + } + ) + agent.act() # Request a response + + if idx==1: + agent.observe( + { + "id": "System", + "text": 'Please, fill the form', + "task_data": { + "image_src" : self.image_data['image_src'], + "positive_caption" : self.image_data['positive_caption'], + "positive_emotion_label": self.image_data['positive_emotion_label'], + "negative_caption" : self.image_data['negative_caption'], + "negative_emotion_label": self.image_data['negative_emotion_label'], + "positive_emoji_url" : self.image_data['positive_emoji_url'], + "negative_emoji_url" : self.image_data['negative_emoji_url'], + } + } + ) + agent.observe( + { + "id": "Coordinator", + "text": ' ', + "task_data": { + "positive_emotion_label": self.emotion_labels['positive'], + "negative_emotion_label": self.emotion_labels['negative'], + "respond_with_form": [ + { + "type": "choices", + "question": "Please, choose emotion after observing the image", + "choices": [ + "1", + ], + }, + {"type": "text", "question": 'Please, explain in 10 words what/why made you feel this way?'} + ] + }, + } + ) + agent.act() # Request a response + + for idx, agent in enumerate(self.agents): # Ensure you get the response + #if idx == 0: + form_result = agent.act(timeout=self.opt["turn_timeout"]) + + def prep_save_data(self, agent): + """Process and return any additional data from this world you may want to store""" + return {"example_key": "example_value"} + + def episode_done(self): + return self.episodeDone + + def shutdown(self): + """ + Shutdown all mturk agents in parallel, otherwise if one mturk agent is + disconnected then it could prevent other mturk agents from completing. + """ + global shutdown_agent + + def shutdown_agent(agent): + try: + agent.shutdown(timeout=None) + except Exception: + agent.shutdown() # not MTurkAgent + + Parallel(n_jobs=len(self.agents), backend="threading")( + delayed(shutdown_agent)(agent) for agent in self.agents + ) + + +def make_onboarding_world(opt, agent): + return MultiAgentDialogOnboardWorld(opt, agent) + + +def validate_onboarding(data): + """Check the contents of the data to ensure they are valid""" + print(f"Validating onboarding data {data}") + possible_answers = ["Agree", "agree", "AGREE"] + answer = data['outputs']['messages'][2]['data']['text'] + is_correct = answer in possible_answers + return is_correct + + +def make_world(opt, agents): + # connect to Redis server + r = redis.Redis(host='localhost', port=6379, password='', db=0, charset="utf-8", decode_responses=True) + image_name = r.lpop('visdial_queue') + #image_name = list(opt['image_data'].keys())[0] + image_url = 'https://wikiart-dataset.s3.amazonaws.com/' + image_name + '.jpg' + print("Num of images in REDIS: ",len(r.lrange('visdial_queue', 0, -1))) + + emojis = { + 'anger' : "https://affective-dialog.s3.us-west-2.amazonaws.com/assets/emojis/anger-preview-rev-1-1.png", + 'disgust' : "https://affective-dialog.s3.us-west-2.amazonaws.com/assets/emojis/disgust-trans.png", + 'fear' : "https://affective-dialog.s3.us-west-2.amazonaws.com/assets/emojis/fear-removebg-preview.png", + 'sadness' : "https://affective-dialog.s3.us-west-2.amazonaws.com/assets/emojis/sadness-removebg-preview.png", + 'excitement' : "https://affective-dialog.s3.us-west-2.amazonaws.com/assets/emojis/excitement-transparent.png", + 'amusement' : "https://affective-dialog.s3.us-west-2.amazonaws.com/assets/emojis/amusement-trans.png", + 'contentment': "https://affective-dialog.s3.us-west-2.amazonaws.com/assets/emojis/contentment-trans.png", + 'awe' : "https://affective-dialog.s3.us-west-2.amazonaws.com/assets/emojis/awe-trans.png", + 'something else' : '' + } + caption_data = [ + (opt['image_data'][image_name]['positive'][-1], opt['image_data'][image_name]['positive'][0], + emojis[opt['image_data'][image_name]['positive'][0]]), + (opt['image_data'][image_name]['negative'][-1],opt['image_data'][image_name]['negative'][0], + emojis[opt['image_data'][image_name]['negative'][0]]) + ] + random.shuffle(caption_data) + image_data = { + "image_src" : image_url, + "positive_caption" : caption_data[0][0], + "positive_emotion_label": caption_data[0][1], + "negative_caption" : caption_data[1][0], + "negative_emotion_label": caption_data[1][1], + "positive_emoji_url" : caption_data[0][2], + "negative_emoji_url" : caption_data[1][2] + } + + return MultiAgentDialogWorld(opt, agents, image_data) + + +def get_world_params(): + return {"agent_count": 2} diff --git a/ui/hydra_configs/conf/base.yaml b/ui/hydra_configs/conf/base.yaml new file mode 100644 index 0000000..c51fd7e --- /dev/null +++ b/ui/hydra_configs/conf/base.yaml @@ -0,0 +1,13 @@ +#@package _global_ +defaults: + - /mephisto/blueprint: parlai_chat + - /mephisto/architect: local + - /mephisto/provider: mock +mephisto: + blueprint: + world_file: ${task_dir}/demo_worlds.py + task_description_file: ${task_dir}/task_description.html + num_conversations: 1 + task: + # We expect to be able to handle 25 concurrent conversations without issue + max_num_concurrent_units: 50 # 25 convos * 2 people per diff --git a/ui/hydra_configs/conf/custom_prebuilt.yaml b/ui/hydra_configs/conf/custom_prebuilt.yaml new file mode 100644 index 0000000..9bd3328 --- /dev/null +++ b/ui/hydra_configs/conf/custom_prebuilt.yaml @@ -0,0 +1,15 @@ +#@package _global_ +defaults: + - base +mephisto: + blueprint: + custom_source_bundle: ${task_dir}/webapp/build/bundle.js + task: + task_name: parlai-chat-example + task_title: "Test ParlAI Prebuilt Chat Task" + task_description: > + This is a simple chat between two people + used to demonstrate the functionalities around using Mephisto + for ParlAI tasks. + task_reward: 0.3 + task_tags: "dynamic,chat,testing" diff --git a/ui/hydra_configs/conf/custom_simple.yaml b/ui/hydra_configs/conf/custom_simple.yaml new file mode 100644 index 0000000..e8bba10 --- /dev/null +++ b/ui/hydra_configs/conf/custom_simple.yaml @@ -0,0 +1,15 @@ +#@package _global_ +defaults: + - base +mephisto: + blueprint: + custom_source_dir: ${task_dir}/custom_simple + task: + task_name: parlai-chat-example + task_title: "Test ParlAI Simply Built Chat Task" + task_description: > + This is a simple chat between two people + used to demonstrate the functionalities around using Mephisto + for ParlAI tasks. + task_reward: 0.3 + task_tags: "dynamic,chat,testing" diff --git a/ui/hydra_configs/conf/deepenai1.yaml b/ui/hydra_configs/conf/deepenai1.yaml new file mode 100644 index 0000000..25efb23 --- /dev/null +++ b/ui/hydra_configs/conf/deepenai1.yaml @@ -0,0 +1,15 @@ +#@package _global_ +defaults: + - base +mephisto: + blueprint: + custom_source_bundle: ${task_dir}/webapp/build/bundle.js + task: + task_name: deepenai-g1 + task_title: "DeepenAI Upgrade [1]" + task_description: > + This is a simple chat between two people + used to demonstrate the functionalities around using Mephisto + for ParlAI tasks. + task_reward: 0.3 + task_tags: "dynamic,chat,testing" diff --git a/ui/hydra_configs/conf/deepenai2.yaml b/ui/hydra_configs/conf/deepenai2.yaml new file mode 100644 index 0000000..bacf52d --- /dev/null +++ b/ui/hydra_configs/conf/deepenai2.yaml @@ -0,0 +1,15 @@ +#@package _global_ +defaults: + - base +mephisto: + blueprint: + custom_source_bundle: ${task_dir}/webapp/build/bundle.js + task: + task_name: deepenai-g2 + task_title: "DeepenAI Upgrade [2]" + task_description: > + This is a simple chat between two people + used to demonstrate the functionalities around using Mephisto + for ParlAI tasks. + task_reward: 0.3 + task_tags: "dynamic,chat,testing" diff --git a/ui/hydra_configs/conf/example.yaml b/ui/hydra_configs/conf/example.yaml new file mode 100644 index 0000000..45e7ae3 --- /dev/null +++ b/ui/hydra_configs/conf/example.yaml @@ -0,0 +1,13 @@ +#@package _global_ +defaults: + - base +mephisto: + task: + task_name: parlai-chat-example + task_title: "Test ParlAI Chat Task" + task_description: > + This is a simple chat between two people + used to demonstrate the functionalities around using Mephisto + for ParlAI tasks. + task_reward: 0.3 + task_tags: "dynamic,chat,testing" diff --git a/ui/hydra_configs/conf/onboarding_example.yaml b/ui/hydra_configs/conf/onboarding_example.yaml new file mode 100644 index 0000000..5f05c1e --- /dev/null +++ b/ui/hydra_configs/conf/onboarding_example.yaml @@ -0,0 +1,15 @@ +#@package _global_ +defaults: + - base +mephisto: + blueprint: + onboarding_qualification: test-parlai-chat-qualification + task: + task_name: parlai-chat-example + task_title: "Test ParlAI Chat Task" + task_description: > + This is a simple chat between two people + used to demonstrate the functionalities around using Mephisto + for ParlAI tasks. + task_reward: 0.3 + task_tags: "dynamic,chat,testing" diff --git a/ui/hydra_configs/conf/smartone.yaml b/ui/hydra_configs/conf/smartone.yaml new file mode 100644 index 0000000..e69de29 diff --git a/ui/parlai_test_script.py b/ui/parlai_test_script.py new file mode 100644 index 0000000..c456f25 --- /dev/null +++ b/ui/parlai_test_script.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + + +import os +from mephisto.operations.operator import Operator +from mephisto.tools.scripts import task_script +from mephisto.operations.hydra_config import build_default_task_config +from mephisto.abstractions.blueprints.parlai_chat.parlai_chat_blueprint import ( + BLUEPRINT_TYPE_PARLAI_CHAT, + SharedParlAITaskState, +) + +from omegaconf import DictConfig +from dataclasses import dataclass, field + + +@dataclass +class ParlAITaskConfig(build_default_task_config("example")): # type: ignore + num_turns: int = field( + default=3, + metadata={"help": "Number of turns before a conversation is complete"}, + ) + turn_timeout: int = field( + default=300, + metadata={ + "help": "Maximum response time before kicking " + "a worker out, default 300 seconds" + }, + ) + +def load_image_data(): + import pickle + with open('/home/haydark/dev/artemis/artemis_for_dialog_66k.pickle', 'rb') as handle: + image_data = pickle.load(handle) + return image_data + +@task_script(config=ParlAITaskConfig) +def main(operator: "Operator", cfg: DictConfig) -> None: + + image_data = load_image_data() + world_opt = {"num_turns": cfg.num_turns, "turn_timeout": cfg.turn_timeout, "image_data": image_data} + + custom_bundle_path = cfg.mephisto.blueprint.get("custom_source_bundle", None) + if custom_bundle_path is not None: + assert os.path.exists(custom_bundle_path), ( + "Must build the custom bundle with `npm install; npm run dev` from within " + f"the {cfg.task_dir}/webapp directory in order to demo a custom bundle " + ) + world_opt["send_task_data"] = True + + shared_state = SharedParlAITaskState( + world_opt=world_opt, onboarding_world_opt=world_opt + ) + + operator.launch_task_run(cfg.mephisto, shared_state) + operator.wait_for_runs_then_shutdown(skip_input=True, log_rate=30) + + +if __name__ == "__main__": + main() diff --git a/ui/task_description.html b/ui/task_description.html new file mode 100644 index 0000000..7f61069 --- /dev/null +++ b/ui/task_description.html @@ -0,0 +1,130 @@ +

    Affective Dialog Mturk Instructions

    +
    +

    Task Description

    + +

    + In this task, you will be chatting with a fellow Turker about an image. + You will take 10 turns (each is a question and an answer) to complete the task. + You will be either asking questions (Questioner) or answering questions (Answerer). + Stay tuned. The task will start when you have been connected with a fellow Turker. + (Note, it may take up to 3 minutes to get paired with another fellow Turker.) +

    +Questioner should keep his/her questions about the content of the image and do not ask the answerer to provide his/her feelings about the image explicitly. +

    + Important! Please follow the detailed instructions below thoroughly, + and once you are paired with a fellow turker, please do not quit the task until the completion. + Otherwise, your HIT will be REJECTED! +

    + +
    +
    +
    +
    + Questioner-instr +
    +

    + Role of Questioner +

    +
    +

    + The questioner asks questions about a piece of art that is visible only to the Answerer. + The Questioner's task is to decide on an emotion/feeling based on the responses. +

    +

    + Instructions to the Questioner +

    +

    + In this task, to help you understand the task, imagine that you are a blind person who wants to engage in an emotional experience + about an artwork and form an opinion about it by querying another person. + As a "Questioner ", you do not have access to the art image, but you will have access to two subjective descriptions + reflecting different emotions about the image. You will engage in a dialog with a person (a fellow Turker) + who has access to the artwork image . Your role is to ask questions that are specific to the content of the image + to decide on an emotion that the hidden artwork may trigger for you. At the end of the conversation, + you will be given 9 possible emotions (awe, contentment, excitement, amusement, sadness, anger, disgust, fear, or + something else) to choose from based on your conversation with your fellow Turker. At the end, you will also be + requested to provide a text description that explains based on the conversation why you chose the selected emotion. + Please refer to pieces of information from the conversation that informed your decision. + Very generic questions (e.g., What is the image about?, “you are welcome”, “no more questions?”) are not allowed. +

    + +

    Common bad examples for the Questioner that leads to HIT rejection:

    +
      +
    • Very generic questions: What is the image about? What is depicted?
    • +
    • Ask about the feelings of the answerer: How do you feel about the image?
    • +
    • Irrelevant questions/chitchat: I am good, weather is great. How are you doing?
    • +
    • Offensive language: ...
    • +
    +
    +
    +
    + +
    +
    +
    +
    + Answerer-instr +
    +

    + Role of Answerer +

    +
    +

    + The Answerer provides answers to the questions + about the content of a visual artwork. +

    +

    + Instructions to the Answerer +

    +

    + In this task, to help you understand the task, imagine that you are helping a blind person explore and appreciate an artwork that + you only can see by providing answers about the content of the artwork. + As an "Answerer" you will have access to the artwork image as well as two subjective + descriptions reflecting different emotions about the image. You will engage in a dialog with a person + (a fellow Turker) who can not see the artwork image. Imagine if you are helping a blind person to experience + the artwork that you can see but he/she does not. Your role is to help answer questions about the visual content + that is specific to the art piece (not general questions). Also, please help the presumably blind fellow turker + to form his/her own opinion about the art piece without imposing certain emotions, e.g. just saying this image is + sad or joyful in your answers. Please also provide detailed answers (e.g., do not use short answers such as yes/no/maybe). + What is important here is to help him/her create an emotional experience about the artwork. Please focus on describing + the content from an artistic point of view including textures, the colors, peoples, animals, etc. + Please keep the following in mind while chatting with your fellow Turker: +

    +

    Please keep the following in mind while chatting with your fellow Turker:

    +
      +
    • Short answers: yes, no, maybe, .......
    • +
    • Provide emotions: picture is depressing as an answerer.
    • +
    • Irrelevant answers/chitchat: I am good, weather is great. How are you doing?
    • +
    • Offensive Language: ...
    • +
    • Please have a natural conversation. Unnatural sounding conversation including awkward messages will be rejected.
    • +
    +
    +
    +
    + +
    +
    +
    +

    Both the Questioner and Answerer should follow the following instructions during the conversation

    +
      +
    • Please directly start the conversation. Do not make small talk.
    • +
    • Please do not write potentially offensive messages.
    • +
    • Please do not have conversations about something other than the image. Just either ask questions, or answer questions about an image (depending on your role).
    • +
    • Please do not use chat/IM language (e.g, "r8" instead of "right"). Please use professional and grammatically correct English.
    • +
    • Please have a natural conversation. Unnatural sounding conversation including awkward messages will be rejected.
    • +
    • Please note that you are expected to complete and submit the hit in one go (once you have been connected with a partner). You cannot resume hits.
    • +
    • You have maximum of 3 minutes response time per turn before your HIT is rejected
    • +
    • Questioner should keep his or her questions about the content of the image and do not ask the answerer to provide his or her feelings about the image explicitly.
    • + +
    +

    + + Please complete one HIT before proceeding to the other. + Please don't open multiple tabs, you cannot chat with yourself. + We may measure the level of engagement in the task and the task may terminate + if a low level of engagement is detected or if instructions are violated. + Your data will be recorded ONLY when the dialog is complete (i.e., reaching a decision after 10 Questions and 10 answers) and no violation of instructions is detected. + Thus, we expect the questioner and answerer to work as a team to fully complete the HIT and receive the payment. + Your participation is voluntary and you can stop at any time, but you will be paid for completed tasks only. + + +

    \ No newline at end of file diff --git a/ui/webapp/.babelrc b/ui/webapp/.babelrc new file mode 100644 index 0000000..5507f2e --- /dev/null +++ b/ui/webapp/.babelrc @@ -0,0 +1,4 @@ +{ + "presets": ["@babel/env", "@babel/preset-react"], + "plugins": ["@babel/plugin-proposal-class-properties"] +} diff --git a/ui/webapp/.eslintrc b/ui/webapp/.eslintrc new file mode 100644 index 0000000..fcf5ef3 --- /dev/null +++ b/ui/webapp/.eslintrc @@ -0,0 +1,3 @@ +{ + "extends": ["react-app"] +} diff --git a/ui/webapp/build/bundle.js b/ui/webapp/build/bundle.js new file mode 100644 index 0000000..5af7cd1 --- /dev/null +++ b/ui/webapp/build/bundle.js @@ -0,0 +1,1253 @@ +/* + * ATTENTION: The "eval" devtool has been used (maybe by default in mode: "development"). + * This devtool is neither made for production nor for readable output files. + * It uses "eval()" calls to create a separate source file in the browser devtools. + * If you are trying to read the output file, select a different devtool (https://webpack.js.org/configuration/devtool/) + * or disable the default devtool with "devtool: false". + * If you are looking for production-ready output files, see mode: "production" (https://webpack.js.org/configuration/mode/). + */ +/******/ (() => { // webpackBootstrap +/******/ var __webpack_modules__ = ({ + +/***/ "./node_modules/@babel/runtime-corejs2/core-js/object/assign.js": +/*!**********************************************************************!*\ + !*** ./node_modules/@babel/runtime-corejs2/core-js/object/assign.js ***! + \**********************************************************************/ +/***/ ((module, __unused_webpack_exports, __webpack_require__) => { + +eval("module.exports = __webpack_require__(/*! core-js/library/fn/object/assign */ \"./node_modules/core-js/library/fn/object/assign.js\");\n\n//# sourceURL=webpack://parlai-mturk-task-compiler/./node_modules/@babel/runtime-corejs2/core-js/object/assign.js?"); + +/***/ }), + +/***/ "./node_modules/@babel/runtime-corejs2/core-js/object/create.js": +/*!**********************************************************************!*\ + !*** ./node_modules/@babel/runtime-corejs2/core-js/object/create.js ***! + \**********************************************************************/ +/***/ ((module, __unused_webpack_exports, __webpack_require__) => { + +eval("module.exports = __webpack_require__(/*! core-js/library/fn/object/create */ \"./node_modules/core-js/library/fn/object/create.js\");\n\n//# sourceURL=webpack://parlai-mturk-task-compiler/./node_modules/@babel/runtime-corejs2/core-js/object/create.js?"); + +/***/ }), + +/***/ "./node_modules/@babel/runtime-corejs2/core-js/object/entries.js": +/*!***********************************************************************!*\ + !*** ./node_modules/@babel/runtime-corejs2/core-js/object/entries.js ***! + \***********************************************************************/ +/***/ ((module, __unused_webpack_exports, __webpack_require__) => { + +eval("module.exports = __webpack_require__(/*! core-js/library/fn/object/entries */ \"./node_modules/core-js/library/fn/object/entries.js\");\n\n//# sourceURL=webpack://parlai-mturk-task-compiler/./node_modules/@babel/runtime-corejs2/core-js/object/entries.js?"); + +/***/ }), + +/***/ "./node_modules/@babel/runtime-corejs2/core-js/object/keys.js": +/*!********************************************************************!*\ + !*** ./node_modules/@babel/runtime-corejs2/core-js/object/keys.js ***! + \********************************************************************/ +/***/ ((module, __unused_webpack_exports, __webpack_require__) => { + +eval("module.exports = __webpack_require__(/*! core-js/library/fn/object/keys */ \"./node_modules/core-js/library/fn/object/keys.js\");\n\n//# sourceURL=webpack://parlai-mturk-task-compiler/./node_modules/@babel/runtime-corejs2/core-js/object/keys.js?"); + +/***/ }), + +/***/ "./node_modules/@babel/runtime-corejs2/core-js/object/set-prototype-of.js": +/*!********************************************************************************!*\ + !*** ./node_modules/@babel/runtime-corejs2/core-js/object/set-prototype-of.js ***! + \********************************************************************************/ +/***/ ((module, __unused_webpack_exports, __webpack_require__) => { + +eval("module.exports = __webpack_require__(/*! core-js/library/fn/object/set-prototype-of */ \"./node_modules/core-js/library/fn/object/set-prototype-of.js\");\n\n//# sourceURL=webpack://parlai-mturk-task-compiler/./node_modules/@babel/runtime-corejs2/core-js/object/set-prototype-of.js?"); + +/***/ }), + +/***/ "./node_modules/@babel/runtime-corejs2/core-js/object/values.js": +/*!**********************************************************************!*\ + !*** ./node_modules/@babel/runtime-corejs2/core-js/object/values.js ***! + \**********************************************************************/ +/***/ ((module, __unused_webpack_exports, __webpack_require__) => { + +eval("module.exports = __webpack_require__(/*! core-js/library/fn/object/values */ \"./node_modules/core-js/library/fn/object/values.js\");\n\n//# sourceURL=webpack://parlai-mturk-task-compiler/./node_modules/@babel/runtime-corejs2/core-js/object/values.js?"); + +/***/ }), + +/***/ "./src/composed/AffectiveChatApp.jsx": +/*!*******************************************!*\ + !*** ./src/composed/AffectiveChatApp.jsx ***! + \*******************************************/ +/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => { + +"use strict"; +eval("__webpack_require__.r(__webpack_exports__);\n/* harmony export */ __webpack_require__.d(__webpack_exports__, {\n/* harmony export */ \"AffectiveChatApp\": () => (/* binding */ AffectiveChatApp),\n/* harmony export */ \"AppContext\": () => (/* binding */ AppContext),\n/* harmony export */ \"INPUT_MODE\": () => (/* binding */ INPUT_MODE)\n/* harmony export */ });\n/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n/* harmony import */ var mephisto_task__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! mephisto-task */ \"./node_modules/mephisto-task/build/bundle.js\");\n/* harmony import */ var mephisto_task__WEBPACK_IMPORTED_MODULE_1___default = /*#__PURE__*/__webpack_require__.n(mephisto_task__WEBPACK_IMPORTED_MODULE_1__);\n/* harmony import */ var _AffectiveFrontend_jsx__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! ./AffectiveFrontend.jsx */ \"./src/composed/AffectiveFrontend.jsx\");\nvar _excluded = [\"agent_display_name\", \"live_update_requested\"];\n\nfunction _objectWithoutProperties(source, excluded) { if (source == null) return {}; var target = _objectWithoutPropertiesLoose(source, excluded); var key, i; if (Object.getOwnPropertySymbols) { var sourceSymbolKeys = Object.getOwnPropertySymbols(source); for (i = 0; i < sourceSymbolKeys.length; i++) { key = sourceSymbolKeys[i]; if (excluded.indexOf(key) >= 0) continue; if (!Object.prototype.propertyIsEnumerable.call(source, key)) continue; target[key] = source[key]; } } return target; }\n\nfunction _objectWithoutPropertiesLoose(source, excluded) { if (source == null) return {}; var target = {}; var sourceKeys = Object.keys(source); var key, i; for (i = 0; i < sourceKeys.length; i++) { key = sourceKeys[i]; if (excluded.indexOf(key) >= 0) continue; target[key] = source[key]; } return target; }\n\nfunction _toConsumableArray(arr) { return _arrayWithoutHoles(arr) || _iterableToArray(arr) || _unsupportedIterableToArray(arr) || _nonIterableSpread(); }\n\nfunction _nonIterableSpread() { throw new TypeError(\"Invalid attempt to spread non-iterable instance.\\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.\"); }\n\nfunction _iterableToArray(iter) { if (typeof Symbol !== \"undefined\" && iter[Symbol.iterator] != null || iter[\"@@iterator\"] != null) return Array.from(iter); }\n\nfunction _arrayWithoutHoles(arr) { if (Array.isArray(arr)) return _arrayLikeToArray(arr); }\n\nfunction ownKeys(object, enumerableOnly) { var keys = Object.keys(object); if (Object.getOwnPropertySymbols) { var symbols = Object.getOwnPropertySymbols(object); enumerableOnly && (symbols = symbols.filter(function (sym) { return Object.getOwnPropertyDescriptor(object, sym).enumerable; })), keys.push.apply(keys, symbols); } return keys; }\n\nfunction _objectSpread(target) { for (var i = 1; i < arguments.length; i++) { var source = null != arguments[i] ? arguments[i] : {}; i % 2 ? ownKeys(Object(source), !0).forEach(function (key) { _defineProperty(target, key, source[key]); }) : Object.getOwnPropertyDescriptors ? Object.defineProperties(target, Object.getOwnPropertyDescriptors(source)) : ownKeys(Object(source)).forEach(function (key) { Object.defineProperty(target, key, Object.getOwnPropertyDescriptor(source, key)); }); } return target; }\n\nfunction _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }\n\nfunction _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _unsupportedIterableToArray(arr, i) || _nonIterableRest(); }\n\nfunction _nonIterableRest() { throw new TypeError(\"Invalid attempt to destructure non-iterable instance.\\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.\"); }\n\nfunction _unsupportedIterableToArray(o, minLen) { if (!o) return; if (typeof o === \"string\") return _arrayLikeToArray(o, minLen); var n = Object.prototype.toString.call(o).slice(8, -1); if (n === \"Object\" && o.constructor) n = o.constructor.name; if (n === \"Map\" || n === \"Set\") return Array.from(o); if (n === \"Arguments\" || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)) return _arrayLikeToArray(o, minLen); }\n\nfunction _arrayLikeToArray(arr, len) { if (len == null || len > arr.length) len = arr.length; for (var i = 0, arr2 = new Array(len); i < len; i++) { arr2[i] = arr[i]; } return arr2; }\n\nfunction _iterableToArrayLimit(arr, i) { var _i = arr == null ? null : typeof Symbol !== \"undefined\" && arr[Symbol.iterator] || arr[\"@@iterator\"]; if (_i == null) return; var _arr = []; var _n = true; var _d = false; var _s, _e; try { for (_i = _i.call(arr); !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i[\"return\"] != null) _i[\"return\"](); } finally { if (_d) throw _e; } } return _arr; }\n\nfunction _arrayWithHoles(arr) { if (Array.isArray(arr)) return arr; }\n\n/*\n * Copyright (c) 2017-present, Facebook, Inc.\n * All rights reserved.\n * This source code is licensed under the BSD-style license found in the\n * LICENSE file in the root directory of this source tree. An additional grant\n * of patent rights can be found in the PATENTS file in the same directory.\n */\n\n\n\n/* ================= Application Components ================= */\n\nvar AppContext = /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createContext({});\nvar emptyAppSettings = {};\nvar INPUT_MODE = {\n WAITING: \"waiting\",\n INACTIVE: \"inactive\",\n DONE: \"done\",\n READY_FOR_INPUT: \"ready_for_input\"\n};\n\nfunction AffectiveChatApp(_ref) {\n var renderMessage = _ref.renderMessage,\n renderSidePane = _ref.renderSidePane,\n renderTextResponse = _ref.renderTextResponse,\n renderResponse = _ref.renderResponse,\n onMessagesChange = _ref.onMessagesChange,\n _ref$defaultAppSettin = _ref.defaultAppSettings,\n defaultAppSettings = _ref$defaultAppSettin === void 0 ? emptyAppSettings : _ref$defaultAppSettin;\n\n var _React$useReducer = react__WEBPACK_IMPORTED_MODULE_0__.useReducer(function (oldContext, newContext) {\n return _objectSpread(_objectSpread({}, oldContext), newContext);\n }, {}),\n _React$useReducer2 = _slicedToArray(_React$useReducer, 2),\n taskContext = _React$useReducer2[0],\n updateContext = _React$useReducer2[1];\n\n var _React$useReducer3 = react__WEBPACK_IMPORTED_MODULE_0__.useReducer(function (previousMessages, newMessage) {\n // we clear messages by sending false\n return newMessage === false ? [] : [].concat(_toConsumableArray(previousMessages), [newMessage]);\n }, []),\n _React$useReducer4 = _slicedToArray(_React$useReducer3, 2),\n messages = _React$useReducer4[0],\n addMessage = _React$useReducer4[1];\n\n react__WEBPACK_IMPORTED_MODULE_0__.useEffect(function () {\n if (onMessagesChange) {\n onMessagesChange(messages);\n }\n }, [messages]);\n\n var initialAppSettings = _objectSpread({\n volume: 1,\n isReview: false,\n isCoverPage: false,\n useTurns: true\n }, defaultAppSettings);\n\n var _React$useReducer5 = react__WEBPACK_IMPORTED_MODULE_0__.useReducer(function (prevSettings, newSettings) {\n return Object.assign(prevSettings, newSettings);\n }, initialAppSettings),\n _React$useReducer6 = _slicedToArray(_React$useReducer5, 2),\n appSettings = _React$useReducer6[0],\n setAppSettings = _React$useReducer6[1];\n\n var _React$useState = react__WEBPACK_IMPORTED_MODULE_0__.useState(INPUT_MODE.WAITING),\n _React$useState2 = _slicedToArray(_React$useState, 2),\n inputMode = _React$useState2[0],\n setInputMode = _React$useState2[1];\n\n function playNotifSound() {\n var audio = new Audio(\"./notif.mp3\");\n audio.volume = appSettings.volume;\n\n if (audio.volume != 0) {\n audio.play();\n }\n }\n\n function trackAgentName(agentName) {\n var _newAgentName;\n\n var previouslyTrackedNames = taskContext.currentAgentNames || {};\n var newAgentName = (_newAgentName = {}, _defineProperty(_newAgentName, agentId, agentName), _defineProperty(_newAgentName, agentName, agentName), _newAgentName);\n\n var currentAgentNames = _objectSpread(_objectSpread({}, previouslyTrackedNames), newAgentName);\n\n updateContext({\n currentAgentNames: currentAgentNames\n });\n }\n\n function handleStateUpdate(state) {\n var agent_display_name = state.agent_display_name,\n live_update_requested = state.live_update_requested,\n remainingState = _objectWithoutProperties(state, _excluded);\n\n if (agent_display_name) {\n trackAgentName(agent_display_name);\n }\n\n if (remainingState.task_done) {\n setInputMode(INPUT_MODE.DONE);\n } else if (live_update_requested === true) {\n setInputMode(INPUT_MODE.READY_FOR_INPUT);\n\n if (appSettings.useTurns) {\n playNotifSound();\n }\n } else if (live_update_requested === false) {\n setInputMode(INPUT_MODE.WAITING);\n }\n\n if (Object.keys(remainingState).length > 0) {\n updateContext(remainingState);\n }\n }\n\n var mephistoProps = (0,mephisto_task__WEBPACK_IMPORTED_MODULE_1__.useMephistoLiveTask)({\n onStatusUpdate: function onStatusUpdate(_ref2) {\n var status = _ref2.status;\n\n if ([mephisto_task__WEBPACK_IMPORTED_MODULE_1__.AGENT_STATUS.DISCONNECT, mephisto_task__WEBPACK_IMPORTED_MODULE_1__.AGENT_STATUS.RETURNED, mephisto_task__WEBPACK_IMPORTED_MODULE_1__.AGENT_STATUS.EXPIRED, mephisto_task__WEBPACK_IMPORTED_MODULE_1__.AGENT_STATUS.TIMEOUT, mephisto_task__WEBPACK_IMPORTED_MODULE_1__.AGENT_STATUS.PARTNER_DISCONNECT, mephisto_task__WEBPACK_IMPORTED_MODULE_1__.AGENT_STATUS.MEPHISTO_DISCONNECT].includes(status)) {\n setInputMode(INPUT_MODE.INACTIVE);\n updateContext({\n doneText: mephisto_task__WEBPACK_IMPORTED_MODULE_1__.STATUS_TO_TEXT_MAP[status],\n task_done: status == mephisto_task__WEBPACK_IMPORTED_MODULE_1__.AGENT_STATUS.PARTNER_DISCONNECT\n });\n }\n },\n onLiveUpdate: function onLiveUpdate(message) {\n if (message.task_data !== undefined) {\n handleStateUpdate(message.task_data);\n }\n\n if (message.text !== undefined) {\n addMessage(message);\n } // For handling reconnected packets and properly updating state\n // during turns.\n\n\n if (taskContext.currentAgentNames && message.id in taskContext.currentAgentNames && appSettings.useTurns) {\n // This was our own message, so update to not requesting\n handleStateUpdate({\n live_update_requested: false\n });\n }\n }\n });\n var blockedReason = mephistoProps.blockedReason,\n blockedExplanation = mephistoProps.blockedExplanation,\n taskConfig = mephistoProps.taskConfig,\n isPreview = mephistoProps.isPreview,\n previewHtml = mephistoProps.previewHtml,\n isLoading = mephistoProps.isLoading,\n agentId = mephistoProps.agentId,\n handleSubmit = mephistoProps.handleSubmit,\n connect = mephistoProps.connect,\n destroy = mephistoProps.destroy,\n sendLiveUpdate = mephistoProps.sendLiveUpdate,\n isOnboarding = mephistoProps.isOnboarding,\n agentStatus = mephistoProps.agentStatus;\n react__WEBPACK_IMPORTED_MODULE_0__.useEffect(function () {\n if (agentId) {\n console.log(\"connecting...\");\n connect(agentId);\n }\n }, [agentId]);\n react__WEBPACK_IMPORTED_MODULE_0__.useEffect(function () {\n if (taskContext.is_final) {\n destroy();\n }\n });\n react__WEBPACK_IMPORTED_MODULE_0__.useEffect(function () {\n if (isOnboarding && agentStatus === mephisto_task__WEBPACK_IMPORTED_MODULE_1__.AGENT_STATUS.WAITING) {\n handleSubmit();\n }\n }, [isOnboarding, agentStatus]);\n var handleMessageSend = react__WEBPACK_IMPORTED_MODULE_0__.useCallback(function (message) {\n message = _objectSpread(_objectSpread({}, message), {}, {\n id: agentId,\n episode_done: (taskContext === null || taskContext === void 0 ? void 0 : taskContext.task_done) || false\n });\n return sendLiveUpdate(message).then(addMessage).then(function () {\n if (appSettings.useTurns) {\n handleStateUpdate({\n live_update_requested: false\n });\n }\n });\n }, [agentId, taskContext === null || taskContext === void 0 ? void 0 : taskContext.task_done, addMessage, setInputMode]);\n\n if (blockedReason !== null) {\n return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"h1\", null, blockedExplanation);\n }\n\n if (isLoading) {\n return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", null, \"Initializing...\");\n }\n\n if (isPreview) {\n if (!taskConfig.has_preview) {\n return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(TaskPreviewView, {\n description: taskConfig.task_description\n });\n }\n\n if (previewHtml === null) {\n return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", null, \"Loading...\");\n }\n\n return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n dangerouslySetInnerHTML: {\n __html: previewHtml\n }\n });\n }\n\n return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(mephisto_task__WEBPACK_IMPORTED_MODULE_1__.MephistoContext.Provider, {\n value: mephistoProps\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(AppContext.Provider, {\n value: {\n taskContext: taskContext,\n appSettings: appSettings,\n setAppSettings: setAppSettings,\n onTaskComplete: function onTaskComplete() {\n destroy();\n handleSubmit({});\n }\n }\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n className: \"container-fluid\",\n id: \"ui-container\"\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(_AffectiveFrontend_jsx__WEBPACK_IMPORTED_MODULE_2__[\"default\"], {\n inputMode: inputMode,\n messages: messages,\n onMessageSend: handleMessageSend,\n renderMessage: renderMessage,\n renderSidePane: renderSidePane,\n renderTextResponse: renderTextResponse,\n renderResponse: renderResponse\n }))));\n}\n\nfunction TaskPreviewView(_ref3) {\n var description = _ref3.description;\n return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n className: \"preview-screen\"\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n dangerouslySetInnerHTML: {\n __html: description\n }\n }));\n}\n\n\n\n//# sourceURL=webpack://parlai-mturk-task-compiler/./src/composed/AffectiveChatApp.jsx?"); + +/***/ }), + +/***/ "./src/composed/AffectiveFrontend.jsx": +/*!********************************************!*\ + !*** ./src/composed/AffectiveFrontend.jsx ***! + \********************************************/ +/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => { + +"use strict"; +eval("__webpack_require__.r(__webpack_exports__);\n/* harmony export */ __webpack_require__.d(__webpack_exports__, {\n/* harmony export */ \"default\": () => (__WEBPACK_DEFAULT_EXPORT__)\n/* harmony export */ });\n/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n/* harmony import */ var bootstrap_chat__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! bootstrap-chat */ \"./node_modules/bootstrap-chat/build/bundle.js\");\n/* harmony import */ var bootstrap_chat__WEBPACK_IMPORTED_MODULE_1___default = /*#__PURE__*/__webpack_require__.n(bootstrap_chat__WEBPACK_IMPORTED_MODULE_1__);\n/* harmony import */ var mephisto_task__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! mephisto-task */ \"./node_modules/mephisto-task/build/bundle.js\");\n/* harmony import */ var mephisto_task__WEBPACK_IMPORTED_MODULE_2___default = /*#__PURE__*/__webpack_require__.n(mephisto_task__WEBPACK_IMPORTED_MODULE_2__);\n/* harmony import */ var _AffectiveChatApp_jsx__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(/*! ./AffectiveChatApp.jsx */ \"./src/composed/AffectiveChatApp.jsx\");\n/* harmony import */ var _EmojiFormResponse_jsx__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(/*! ./EmojiFormResponse.jsx */ \"./src/composed/EmojiFormResponse.jsx\");\n/*\n * Copyright (c) 2017-present, Facebook, Inc.\n * All rights reserved.\n * This source code is licensed under the BSD-style license found in the\n * LICENSE file in the root directory of this source tree. An additional grant\n * of patent rights can be found in the PATENTS file in the same directory.\n */\n\n\n\n\n\n\nfunction AffectiveFrontend(_ref) {\n var messages = _ref.messages,\n onMessageSend = _ref.onMessageSend,\n inputMode = _ref.inputMode,\n renderSidePane = _ref.renderSidePane,\n renderMessage = _ref.renderMessage,\n renderTextResponse = _ref.renderTextResponse,\n renderResponse = _ref.renderResponse;\n var mephistoContext = react__WEBPACK_IMPORTED_MODULE_0__.useContext(mephisto_task__WEBPACK_IMPORTED_MODULE_2__.MephistoContext);\n var appContext = react__WEBPACK_IMPORTED_MODULE_0__.useContext(_AffectiveChatApp_jsx__WEBPACK_IMPORTED_MODULE_3__.AppContext);\n var connectionStatus = mephistoContext.connectionStatus,\n agentStatus = mephistoContext.agentStatus,\n taskConfig = mephistoContext.taskConfig;\n var appSettings = appContext.appSettings;\n var sidePaneSize = appSettings.isCoverPage ? \"col-xs-12\" : \"col-xs-4\";\n var heightStyle = taskConfig.frame_height == 0 ? {} : {\n height: taskConfig.frame_height\n };\n return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(bootstrap_chat__WEBPACK_IMPORTED_MODULE_1__.ConnectionStatusBoundary, {\n status: connectionStatus\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n className: \"row\",\n style: heightStyle\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n className: \"side-pane \" + sidePaneSize\n }, renderSidePane({\n mephistoContext: mephistoContext,\n appContext: appContext\n })), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n className: \"chat-container-pane\"\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n className: \"right-top-pane\"\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(ChatStatusBar, null), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(bootstrap_chat__WEBPACK_IMPORTED_MODULE_1__.ChatPane, {\n scrollBottomKey: messages.length + \"-\" + inputMode\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n id: \"message_thread\",\n style: {\n width: \"100%\"\n }\n }, messages.map(function (message, idx) {\n return renderMessage({\n message: message,\n idx: idx,\n appContext: appContext,\n mephistoContext: mephistoContext\n });\n })), inputMode === _AffectiveChatApp_jsx__WEBPACK_IMPORTED_MODULE_3__.INPUT_MODE.WAITING ? /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(bootstrap_chat__WEBPACK_IMPORTED_MODULE_1__.SystemMessage, {\n glyphicon: \"hourglass\",\n text: getWaitingMessage(agentStatus)\n }) : null)), renderResponse ? renderResponse({\n onMessageSend: onMessageSend,\n inputMode: inputMode,\n appContext: appContext,\n mephistoContext: mephistoContext\n }) : /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(ResponsePane, {\n inputMode: inputMode,\n onMessageSend: onMessageSend,\n renderTextResponse: renderTextResponse\n }))));\n}\n\nfunction getWaitingMessage(agentStatus) {\n return agentStatus === \"waiting\" ? \"Waiting to pair with a task...\" : \"Waiting for the next person to speak...\";\n}\n\nfunction ChatStatusBar() {\n var _React$useContext = react__WEBPACK_IMPORTED_MODULE_0__.useContext(mephisto_task__WEBPACK_IMPORTED_MODULE_2__.MephistoContext),\n connectionStatus = _React$useContext.connectionStatus;\n\n var _React$useContext2 = react__WEBPACK_IMPORTED_MODULE_0__.useContext(_AffectiveChatApp_jsx__WEBPACK_IMPORTED_MODULE_3__.AppContext),\n appSettings = _React$useContext2.appSettings,\n setAppSettings = _React$useContext2.setAppSettings;\n\n return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n className: \"chat-status-bar\"\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(bootstrap_chat__WEBPACK_IMPORTED_MODULE_1__.ConnectionIndicator, {\n connectionStatus: connectionStatus\n }), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(bootstrap_chat__WEBPACK_IMPORTED_MODULE_1__.VolumeControl, {\n volume: appSettings.volume,\n onVolumeChange: function onVolumeChange(v) {\n return setAppSettings({\n volume: v\n });\n }\n }));\n}\n\nfunction ResponsePane(_ref2) {\n var onMessageSend = _ref2.onMessageSend,\n inputMode = _ref2.inputMode,\n renderTextResponse = _ref2.renderTextResponse;\n var appContext = react__WEBPACK_IMPORTED_MODULE_0__.useContext(_AffectiveChatApp_jsx__WEBPACK_IMPORTED_MODULE_3__.AppContext);\n var mephistoContext = react__WEBPACK_IMPORTED_MODULE_0__.useContext(mephisto_task__WEBPACK_IMPORTED_MODULE_2__.MephistoContext);\n var taskContext = appContext.taskContext,\n onTaskComplete = appContext.onTaskComplete;\n var response_pane = null;\n\n switch (inputMode) {\n case _AffectiveChatApp_jsx__WEBPACK_IMPORTED_MODULE_3__.INPUT_MODE.DONE:\n case _AffectiveChatApp_jsx__WEBPACK_IMPORTED_MODULE_3__.INPUT_MODE.INACTIVE:\n response_pane = /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(bootstrap_chat__WEBPACK_IMPORTED_MODULE_1__.DoneResponse, {\n onTaskComplete: onTaskComplete,\n onMessageSend: onMessageSend,\n doneText: taskContext.doneText || null,\n isTaskDone: taskContext.task_done || null\n });\n break;\n\n case _AffectiveChatApp_jsx__WEBPACK_IMPORTED_MODULE_3__.INPUT_MODE.READY_FOR_INPUT:\n case _AffectiveChatApp_jsx__WEBPACK_IMPORTED_MODULE_3__.INPUT_MODE.WAITING:\n if (taskContext && taskContext[\"respond_with_form\"]) {\n response_pane = /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(_EmojiFormResponse_jsx__WEBPACK_IMPORTED_MODULE_4__[\"default\"], {\n onMessageSend: onMessageSend,\n active: inputMode === _AffectiveChatApp_jsx__WEBPACK_IMPORTED_MODULE_3__.INPUT_MODE.READY_FOR_INPUT,\n formOptions: taskContext[\"respond_with_form\"]\n });\n } else {\n response_pane = renderTextResponse ? renderTextResponse({\n onMessageSend: onMessageSend,\n inputMode: inputMode,\n active: inputMode === _AffectiveChatApp_jsx__WEBPACK_IMPORTED_MODULE_3__.INPUT_MODE.READY_FOR_INPUT,\n appContext: appContext,\n mephistoContext: mephistoContext\n }) : /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(bootstrap_chat__WEBPACK_IMPORTED_MODULE_1__.TextResponse, {\n onMessageSend: onMessageSend,\n active: inputMode === _AffectiveChatApp_jsx__WEBPACK_IMPORTED_MODULE_3__.INPUT_MODE.READY_FOR_INPUT\n });\n }\n\n break;\n\n default:\n response_pane = null;\n break;\n }\n\n return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n className: \"right-bottom-pane\"\n }, response_pane);\n}\n\n/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (AffectiveFrontend);\n\n//# sourceURL=webpack://parlai-mturk-task-compiler/./src/composed/AffectiveFrontend.jsx?"); + +/***/ }), + +/***/ "./src/composed/EmojiFormResponse.jsx": +/*!********************************************!*\ + !*** ./src/composed/EmojiFormResponse.jsx ***! + \********************************************/ +/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => { + +"use strict"; +eval("__webpack_require__.r(__webpack_exports__);\n/* harmony export */ __webpack_require__.d(__webpack_exports__, {\n/* harmony export */ \"default\": () => (__WEBPACK_DEFAULT_EXPORT__)\n/* harmony export */ });\n/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n/* harmony import */ var react_bootstrap__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! react-bootstrap */ \"./node_modules/react-bootstrap/es/Col.js\");\n/* harmony import */ var react_bootstrap__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! react-bootstrap */ \"./node_modules/react-bootstrap/es/FormGroup.js\");\n/* harmony import */ var react_bootstrap__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(/*! react-bootstrap */ \"./node_modules/react-bootstrap/es/FormControl.js\");\n/* harmony import */ var react_bootstrap__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(/*! react-bootstrap */ \"./node_modules/react-bootstrap/es/Button.js\");\n/* harmony import */ var react_bootstrap__WEBPACK_IMPORTED_MODULE_5__ = __webpack_require__(/*! react-bootstrap */ \"./node_modules/react-bootstrap/es/Form.js\");\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; return _typeof = \"function\" == typeof Symbol && \"symbol\" == typeof Symbol.iterator ? function (obj) { return typeof obj; } : function (obj) { return obj && \"function\" == typeof Symbol && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }, _typeof(obj); }\n\nfunction _createForOfIteratorHelper(o, allowArrayLike) { var it = typeof Symbol !== \"undefined\" && o[Symbol.iterator] || o[\"@@iterator\"]; if (!it) { if (Array.isArray(o) || (it = _unsupportedIterableToArray(o)) || allowArrayLike && o && typeof o.length === \"number\") { if (it) o = it; var i = 0; var F = function F() {}; return { s: F, n: function n() { if (i >= o.length) return { done: true }; return { done: false, value: o[i++] }; }, e: function e(_e2) { throw _e2; }, f: F }; } throw new TypeError(\"Invalid attempt to iterate non-iterable instance.\\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.\"); } var normalCompletion = true, didErr = false, err; return { s: function s() { it = it.call(o); }, n: function n() { var step = it.next(); normalCompletion = step.done; return step; }, e: function e(_e3) { didErr = true; err = _e3; }, f: function f() { try { if (!normalCompletion && it[\"return\"] != null) it[\"return\"](); } finally { if (didErr) throw err; } } }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); Object.defineProperty(Constructor, \"prototype\", { writable: false }); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); Object.defineProperty(subClass, \"prototype\", { writable: false }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } else if (call !== void 0) { throw new TypeError(\"Derived constructors may only return object or undefined\"); } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Boolean.prototype.valueOf.call(Reflect.construct(Boolean, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\nfunction _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _unsupportedIterableToArray(arr, i) || _nonIterableRest(); }\n\nfunction _nonIterableRest() { throw new TypeError(\"Invalid attempt to destructure non-iterable instance.\\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.\"); }\n\nfunction _unsupportedIterableToArray(o, minLen) { if (!o) return; if (typeof o === \"string\") return _arrayLikeToArray(o, minLen); var n = Object.prototype.toString.call(o).slice(8, -1); if (n === \"Object\" && o.constructor) n = o.constructor.name; if (n === \"Map\" || n === \"Set\") return Array.from(o); if (n === \"Arguments\" || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)) return _arrayLikeToArray(o, minLen); }\n\nfunction _arrayLikeToArray(arr, len) { if (len == null || len > arr.length) len = arr.length; for (var i = 0, arr2 = new Array(len); i < len; i++) { arr2[i] = arr[i]; } return arr2; }\n\nfunction _iterableToArrayLimit(arr, i) { var _i = arr == null ? null : typeof Symbol !== \"undefined\" && arr[Symbol.iterator] || arr[\"@@iterator\"]; if (_i == null) return; var _arr = []; var _n = true; var _d = false; var _s, _e; try { for (_i = _i.call(arr); !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i[\"return\"] != null) _i[\"return\"](); } finally { if (_d) throw _e; } } return _arr; }\n\nfunction _arrayWithHoles(arr) { if (Array.isArray(arr)) return arr; }\n\n/*\n * Copyright (c) 2017-present, Facebook, Inc.\n * All rights reserved.\n * This source code is licensed under the BSD-style license found in the\n * LICENSE file in the root directory of this source tree. An additional grant\n * of patent rights can be found in the PATENTS file in the same directory.\n */\n\n\nvar rating_value = null;\nvar emojis = [{\n id: \"00\",\n name: \"emotion\",\n label: \"Anger\",\n value: \"Anger\",\n emoji: \"https://i.imgur.com/zNDm7kE.jpg\",\n isChecked: false\n}, {\n id: \"01\",\n name: \"\",\n label: \"Disgust\",\n value: \"Disgust\",\n emoji: \"https://i.imgur.com/yUvJNUW.png\",\n isChecked: false\n}, {\n id: \"02\",\n name: \"emotion\",\n label: \"Fear\",\n value: \"Fear\",\n emoji: \"https://i.imgur.com/bQsXv2s.png\",\n isChecked: false\n}, {\n id: \"03\",\n name: \"emotion\",\n label: \"Sadness\",\n value: \"Sadness\",\n emoji: \"https://i.imgur.com/bYLyDAs.png\",\n isChecked: false\n}, {\n id: \"04\",\n name: \"emotion\",\n label: \"Excitement\",\n value: \"Excitement\",\n emoji: \"https://i.imgur.com/FBMXWUE.png\",\n isChecked: false\n}, {\n id: \"05\",\n name: \"emotion\",\n label: \"Amusement\",\n value: \"Amusement\",\n emoji: \"https://i.imgur.com/RjnmFuv.png\",\n isChecked: false\n}, {\n id: \"06\",\n name: \"emotion\",\n label: \"Contentment\",\n value: \"Contentment\",\n emoji: \"https://i.imgur.com/fUoaoi1.png\",\n isChecked: false\n}, {\n id: \"07\",\n name: \"emotion\",\n label: \"Awe\",\n value: \"Awe\",\n emoji: \"https://i.imgur.com/T93v73O.png\",\n isChecked: false\n}, {\n id: \"08\",\n name: \"emotion\",\n label: \"Something Else\",\n value: \"Something Else\",\n emoji: \"\",\n isChecked: false\n}];\n\nvar RadioInput = function RadioInput(_ref) {\n var name = _ref.name,\n label = _ref.label,\n value = _ref.value,\n isChecked = _ref.isChecked,\n handleChange = _ref.handleChange;\n\n var handleRadioChange = function handleRadioChange(e) {\n var id = e.currentTarget.id;\n handleChange(id); // Send back id to radio group for comparison\n };\n\n return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", null, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"input\", {\n type: \"radio\",\n className: \"custom-radio\",\n name: name,\n id: value // htlmlFor targets this id.\n ,\n checked: isChecked,\n onChange: handleRadioChange\n }), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"label\", {\n htmlFor: value\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"span\", null, label)));\n};\n\nvar RadioGroupInput = function RadioGroupInput() {\n var _useState = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)(\"\"),\n _useState2 = _slicedToArray(_useState, 2),\n selectedInput = _useState2[0],\n setSelectedInput = _useState2[1];\n\n var _handleChange = function handleChange(inputValue) {\n emojis.forEach(function (emoji) {\n if (emoji.id === inputValue) {\n emoji.isChecked = true;\n } else {\n emoji.isChecked = false;\n }\n });\n setSelectedInput(inputValue);\n };\n\n return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(react_bootstrap__WEBPACK_IMPORTED_MODULE_1__[\"default\"], {\n sm: 11\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n style: {\n width: \"100%\",\n display: \"grid\",\n gridTemplateColumns: \"auto auto auto auto auto auto auto auto auto\",\n gridColumnGap: \"8x\",\n justifyContent: \"center\",\n alignItems: \"center\",\n fontSize: \"11px\"\n }\n }, emojis.map(function (emoji) {\n return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n style: {\n display: \"flex\",\n flexDirection: \"column\"\n }\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(RadioInput, {\n key: emoji.id,\n name: emoji.name,\n value: emoji.value,\n label: emoji.label,\n isChecked: emoji.isChecked,\n handleChange: function handleChange() {\n return _handleChange(emoji.id);\n }\n }), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"img\", {\n src: emoji.emoji,\n width: \"40\"\n }));\n })));\n};\n\nvar EmojiFormResponse = /*#__PURE__*/function (_React$Component) {\n _inherits(EmojiFormResponse, _React$Component);\n\n var _super = _createSuper(EmojiFormResponse);\n\n // Provide a form-like interface to MTurk interface.\n function EmojiFormResponse(props) {\n var _this;\n\n _classCallCheck(this, EmojiFormResponse);\n\n _this = _super.call(this, props); // At this point it should be assumed that task_data\n // has a field \"respond_with_form\"\n\n var responses = [];\n\n var _iterator = _createForOfIteratorHelper(_this.props.formOptions),\n _step;\n\n try {\n for (_iterator.s(); !(_step = _iterator.n()).done;) {\n var _ = _step.value;\n responses.push(\"\");\n }\n } catch (err) {\n _iterator.e(err);\n } finally {\n _iterator.f();\n }\n\n _this.state = {\n responses: responses,\n sending: false\n };\n return _this;\n }\n\n _createClass(EmojiFormResponse, [{\n key: \"tryMessageSend\",\n value: function tryMessageSend() {\n var _this2 = this;\n\n var form_elements = this.props.formOptions;\n var question = form_elements[0][\"question\"];\n var response_data = [];\n var response_text = \"\";\n var all_response_filled = false;\n var answer = \"\";\n\n for (var i = 0; i < emojis.length; i++) {\n var e = emojis[i];\n\n if (e.isChecked) {\n answer = e.value;\n all_response_filled = true;\n }\n }\n\n response_data.push({\n question: question,\n response: answer\n });\n response_data.push({\n question: \"Response Rating\",\n response: \"rating_value\"\n });\n\n for (var ind in form_elements) {\n var _question = form_elements[ind][\"question\"];\n var response = this.state.responses[ind];\n response_data.push({\n question: _question,\n response: response\n });\n response_text += _question + \": \" + response + \"\\n\";\n }\n\n if (!response_data.at(-1).response) {\n all_response_filled = false;\n }\n\n if (all_response_filled && this.props.active && !this.state.sending) {\n this.setState({\n sending: true\n });\n this.props.onMessageSend({\n text: response_text,\n task_data: {\n form_responses: response_data\n }\n }).then(function () {\n return _this2.setState({\n sending: false\n });\n }); // clear answers once sent\n\n this.setState(function (prevState) {\n prevState[\"responses\"].fill(\"\");\n return {\n responses: prevState[\"responses\"]\n };\n });\n }\n }\n }, {\n key: \"render\",\n value: function render() {\n var _this3 = this;\n\n var form_elements = this.props.formOptions;\n var listFormElements = form_elements.map(function (form_elem, index) {\n var question = form_elem[\"question\"];\n var form_type = form_elem[\"type\"];\n\n if (form_elem[\"type\"] === \"choices\") {\n var choices = [/*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"option\", {\n key: \"empty_option\"\n })].concat(form_elem[\"choices\"].map(function (option_label, index) {\n return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"option\", {\n key: \"option_\" + index.toString()\n }, option_label);\n }));\n return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(react_bootstrap__WEBPACK_IMPORTED_MODULE_2__[\"default\"], {\n key: \"form_el_\" + index\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"p\", null, question), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(RadioGroupInput, null));\n }\n\n return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(react_bootstrap__WEBPACK_IMPORTED_MODULE_2__[\"default\"], {\n key: \"form_el_\" + index\n }, question, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(react_bootstrap__WEBPACK_IMPORTED_MODULE_1__[\"default\"], {\n sm: 11\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(react_bootstrap__WEBPACK_IMPORTED_MODULE_3__[\"default\"], {\n type: \"text\",\n componentClass: \"textarea\",\n placeholder: \"at least 10 words\",\n style: {\n fontSize: \"12px\",\n height: '50px'\n },\n value: _this3.state.responses[index],\n onChange: function onChange(e) {\n var text = e.target.value;\n\n _this3.setState(function (prevState) {\n var new_res = prevState[\"responses\"];\n new_res[index] = text;\n return {\n responses: new_res\n };\n });\n },\n onKeyPress: function onKeyPress(e) {\n if (e.key === \"Enter\") {\n e.preventDefault();\n e.stopPropagation();\n e.nativeEvent.stopImmediatePropagation();\n }\n }\n })), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"hr\", null), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"hr\", null), question === \"Why/What makes you feel this particular emotion?\" ? \"Please, rate the response of the fellow turker\" : null);\n });\n var submit_button = /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(react_bootstrap__WEBPACK_IMPORTED_MODULE_4__[\"default\"], {\n className: \"btn btn-primary\",\n style: {\n height: \"30px\",\n width: \"100px\",\n fontSize: \"12px\"\n },\n id: \"id_send_msg_button\",\n disabled: this.state.textval === \"\" || !this.props.active || this.state.sending,\n onClick: function onClick() {\n return _this3.tryMessageSend();\n }\n }, \"Send\");\n return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n id: \"response-type-text-input\",\n className: \"response-type-module\",\n style: {\n paddingTop: \"15px\",\n \"float\": \"left\",\n width: \"100%\",\n backgroundColor: \"#eeeeee\"\n }\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(react_bootstrap__WEBPACK_IMPORTED_MODULE_5__[\"default\"], {\n horizontal: true,\n style: {\n backgroundColor: \"#eeeeee\",\n paddingBottom: \"2px\"\n }\n }, listFormElements, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(react_bootstrap__WEBPACK_IMPORTED_MODULE_2__[\"default\"], null, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(react_bootstrap__WEBPACK_IMPORTED_MODULE_1__[\"default\"], {\n sm: 6\n }), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(react_bootstrap__WEBPACK_IMPORTED_MODULE_1__[\"default\"], {\n sm: 5\n }, submit_button))));\n }\n }]);\n\n return EmojiFormResponse;\n}(react__WEBPACK_IMPORTED_MODULE_0__.Component);\n\n/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (EmojiFormResponse);\n\n//# sourceURL=webpack://parlai-mturk-task-compiler/./src/composed/EmojiFormResponse.jsx?"); + +/***/ }), + +/***/ "./src/main.js": +/*!*********************!*\ + !*** ./src/main.js ***! + \*********************/ +/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => { + +"use strict"; +eval("__webpack_require__.r(__webpack_exports__);\n/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n/* harmony import */ var react_dom__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! react-dom */ \"./node_modules/react-dom/index.js\");\n/* harmony import */ var bootstrap_chat_styles_css__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! bootstrap-chat/styles.css */ \"./node_modules/bootstrap-chat/styles.css\");\n/* harmony import */ var bootstrap_chat_styles_css__WEBPACK_IMPORTED_MODULE_2___default = /*#__PURE__*/__webpack_require__.n(bootstrap_chat_styles_css__WEBPACK_IMPORTED_MODULE_2__);\n/* harmony import */ var _css_custom_styles_css__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(/*! ./css/custom-styles.css */ \"./src/css/custom-styles.css\");\n/* harmony import */ var _css_custom_styles_css__WEBPACK_IMPORTED_MODULE_3___default = /*#__PURE__*/__webpack_require__.n(_css_custom_styles_css__WEBPACK_IMPORTED_MODULE_3__);\n/* harmony import */ var _composed_AffectiveChatApp_jsx__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(/*! ./composed/AffectiveChatApp.jsx */ \"./src/composed/AffectiveChatApp.jsx\");\n/* harmony import */ var bootstrap_chat__WEBPACK_IMPORTED_MODULE_5__ = __webpack_require__(/*! bootstrap-chat */ \"./node_modules/bootstrap-chat/build/bundle.js\");\n/* harmony import */ var bootstrap_chat__WEBPACK_IMPORTED_MODULE_5___default = /*#__PURE__*/__webpack_require__.n(bootstrap_chat__WEBPACK_IMPORTED_MODULE_5__);\n/*\n * Copyright (c) 2017-present, Facebook, Inc.\n * All rights reserved.\n * This source code is licensed under the BSD-style license found in the\n * LICENSE file in the root directory of this source tree. An additional grant\n * of patent rights can be found in the PATENTS file in the same directory.\n */\n\n\n\n\n\n\n\nfunction PrintCaptionData(_ref) {\n var taskContext = _ref.taskContext;\n return taskContext.positive_emoji_url ? /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n className: \"row\"\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n className: \"col-lg-6\"\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n style: {\n width: 70,\n height: 70\n }\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"img\", {\n src: taskContext.negative_emoji_url,\n alt: \"Emoji-icon1\",\n style: {\n width: \"100%\",\n height: \"100%\"\n }\n })), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"h4\", null, taskContext.negative_emotion_label), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"h5\", null, taskContext.negative_caption)), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n className: \"col-lg-6\"\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n style: {\n width: 70,\n height: 70\n }\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"img\", {\n src: taskContext.positive_emoji_url,\n alt: \"Emoji-icon2\",\n style: {\n width: \"100%\",\n height: \"100%\"\n }\n })), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"h4\", null, taskContext.positive_emotion_label), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"h5\", null, taskContext.positive_caption))) : null;\n}\n\nfunction PrintImage(_ref2) {\n var img_src = _ref2.img_src;\n return img_src ? /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n className: \"row\"\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n className: \"col-md-8 offset-md-2\"\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"img\", {\n src: img_src,\n className: \"mx-auto d-block\",\n alt: \"Image\"\n }))) : null;\n}\n\nfunction ChatMessage(_ref3) {\n var isSelf = _ref3.isSelf,\n idx = _ref3.idx,\n agentName = _ref3.agentName,\n _ref3$message = _ref3.message,\n message = _ref3$message === void 0 ? \"\" : _ref3$message,\n onRadioChange = _ref3.onRadioChange;\n var floatToSide = isSelf ? \"right\" : \"left\";\n var alertStyle = isSelf ? \"alert-info\" : \"alert-warning\";\n\n var handleChange = function handleChange(e) {\n onRadioChange(e.currentTarget.value);\n };\n\n return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n className: \"row\",\n style: {\n marginLeft: \"0\",\n marginRight: \"0\"\n }\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n className: \"alert message \" + alertStyle,\n role: \"alert\",\n style: {\n \"float\": floatToSide\n }\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"span\", {\n style: {\n fontSize: \"16px\",\n whiteSpace: \"pre-wrap\"\n }\n }, agentName === \"Chat Agent 1\" && /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n style: {\n width: \"100%\",\n display: \"flex\",\n flexDirection: \"row\",\n justifyContent: \"flex-start\",\n alignItems: \"center\"\n }\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n style: {\n width: 100,\n height: 100\n }\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"img\", {\n src: \"https://affective-dialog.s3.us-west-2.amazonaws.com/assets/Questioner_icon_with_title.png\",\n alt: \"Questioner-icon\",\n style: {\n width: \"100%\",\n height: \"100%\"\n }\n })), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"span\", null)), agentName === \"Chat Agent 2\" && /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n style: {\n width: \"100%\",\n display: \"flex\",\n flexDirection: \"row\",\n justifyContent: \"flex-start\",\n alignItems: \"center\"\n }\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n style: {\n width: 100,\n height: 100\n }\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"img\", {\n src: \"https://affective-dialog.s3.us-west-2.amazonaws.com/assets/Answerer_icon_with_title.png\",\n alt: \"Answerer-icon\",\n style: {\n width: \"100%\",\n height: \"100%\"\n }\n })), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"span\", null)), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"span\", {\n style: {\n fontSize: \"16px\",\n whiteSpace: \"pre-wrap\"\n },\n dangerouslySetInnerHTML: {\n __html: message\n }\n }))));\n}\n\nfunction RenderChatMessage(_ref4) {\n var message = _ref4.message,\n mephistoContext = _ref4.mephistoContext,\n appContext = _ref4.appContext,\n idx = _ref4.idx;\n var agentId = mephistoContext.agentId;\n var currentAgentNames = appContext.taskContext.currentAgentNames;\n return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"div\", {\n onClick: function onClick() {\n return alert(\"You clicked on message with index \" + idx);\n }\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(ChatMessage, {\n isSelf: message.id === agentId || message.id in currentAgentNames,\n agentName: message.id in currentAgentNames ? currentAgentNames[message.id] : message.id,\n message: message.text,\n taskData: message.task_data,\n messageId: message.update_id\n }));\n}\n\nfunction MainApp() {\n return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(_composed_AffectiveChatApp_jsx__WEBPACK_IMPORTED_MODULE_4__.AffectiveChatApp, {\n renderMessage: function renderMessage(_ref5) {\n var message = _ref5.message,\n idx = _ref5.idx,\n mephistoContext = _ref5.mephistoContext,\n appContext = _ref5.appContext;\n return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(RenderChatMessage, {\n message: message,\n mephistoContext: mephistoContext,\n appContext: appContext,\n idx: idx,\n key: message.message_id + \"-\" + idx\n });\n },\n renderSidePane: function renderSidePane(_ref6) {\n var _ref6$mephistoContext = _ref6.mephistoContext,\n taskConfig = _ref6$mephistoContext.taskConfig,\n agentId = _ref6$mephistoContext.agentId,\n taskContext = _ref6.appContext.taskContext;\n return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(bootstrap_chat__WEBPACK_IMPORTED_MODULE_5__.DefaultTaskDescription, {\n chatTitle: taskConfig.chat_title,\n taskDescriptionHtml: taskConfig.task_description\n }, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"br\", null), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(\"hr\", null), agentId.includes(\"onboarding\") ? null : /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(PrintCaptionData, {\n taskContext: taskContext\n }), agentId.includes(\"onboarding\") ? null : /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(PrintImage, {\n img_src: taskContext.image_src\n }));\n }\n });\n}\n\nreact_dom__WEBPACK_IMPORTED_MODULE_1__.render( /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0__.createElement(MainApp, null), document.getElementById(\"app\"));\n\n//# sourceURL=webpack://parlai-mturk-task-compiler/./src/main.js?"); + +/***/ }), + +/***/ "./node_modules/bootstrap-chat/build/bundle.js": +/*!*****************************************************!*\ + !*** ./node_modules/bootstrap-chat/build/bundle.js ***! + \*****************************************************/ +/***/ ((module, __unused_webpack_exports, __webpack_require__) => { + +eval("/*! For license information please see bundle.js.LICENSE.txt */\n!function(e,t){ true?module.exports=t(__webpack_require__(/*! react */ \"./node_modules/react/index.js\"),__webpack_require__(/*! mephisto-task */ \"./node_modules/mephisto-task/build/bundle.js\"),__webpack_require__(/*! react-dom */ \"./node_modules/react-dom/index.js\")):0}(self,(function(e,t,n){return(()=>{var r={1760:(e,t,n)=>{(t=n(1817)(!1)).push([e.id,\".rc-slider {\\n position: relative;\\n height: 14px;\\n padding: 5px 0;\\n width: 100%;\\n border-radius: 6px;\\n touch-action: none;\\n box-sizing: border-box;\\n -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\\n}\\n.rc-slider * {\\n box-sizing: border-box;\\n -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\\n}\\n.rc-slider-rail {\\n position: absolute;\\n width: 100%;\\n background-color: #e9e9e9;\\n height: 4px;\\n border-radius: 6px;\\n}\\n.rc-slider-track {\\n position: absolute;\\n left: 0;\\n height: 4px;\\n border-radius: 6px;\\n background-color: #abe2fb;\\n}\\n.rc-slider-handle {\\n position: absolute;\\n width: 14px;\\n height: 14px;\\n cursor: pointer;\\n cursor: -webkit-grab;\\n margin-top: -5px;\\n cursor: grab;\\n border-radius: 50%;\\n border: solid 2px #96dbfa;\\n background-color: #fff;\\n touch-action: pan-x;\\n}\\n.rc-slider-handle-dragging.rc-slider-handle-dragging.rc-slider-handle-dragging {\\n border-color: #57c5f7;\\n box-shadow: 0 0 0 5px #96dbfa;\\n}\\n.rc-slider-handle:focus {\\n outline: none;\\n}\\n.rc-slider-handle-click-focused:focus {\\n border-color: #96dbfa;\\n box-shadow: unset;\\n}\\n.rc-slider-handle:hover {\\n border-color: #57c5f7;\\n}\\n.rc-slider-handle:active {\\n border-color: #57c5f7;\\n box-shadow: 0 0 5px #57c5f7;\\n cursor: -webkit-grabbing;\\n cursor: grabbing;\\n}\\n.rc-slider-mark {\\n position: absolute;\\n top: 18px;\\n left: 0;\\n width: 100%;\\n font-size: 12px;\\n}\\n.rc-slider-mark-text {\\n position: absolute;\\n display: inline-block;\\n vertical-align: middle;\\n text-align: center;\\n cursor: pointer;\\n color: #999;\\n}\\n.rc-slider-mark-text-active {\\n color: #666;\\n}\\n.rc-slider-step {\\n position: absolute;\\n width: 100%;\\n height: 4px;\\n background: transparent;\\n}\\n.rc-slider-dot {\\n position: absolute;\\n bottom: -2px;\\n margin-left: -4px;\\n width: 8px;\\n height: 8px;\\n border: 2px solid #e9e9e9;\\n background-color: #fff;\\n cursor: pointer;\\n border-radius: 50%;\\n vertical-align: middle;\\n}\\n.rc-slider-dot-active {\\n border-color: #96dbfa;\\n}\\n.rc-slider-dot-reverse {\\n margin-right: -4px;\\n}\\n.rc-slider-disabled {\\n background-color: #e9e9e9;\\n}\\n.rc-slider-disabled .rc-slider-track {\\n background-color: #ccc;\\n}\\n.rc-slider-disabled .rc-slider-handle,\\n.rc-slider-disabled .rc-slider-dot {\\n border-color: #ccc;\\n box-shadow: none;\\n background-color: #fff;\\n cursor: not-allowed;\\n}\\n.rc-slider-disabled .rc-slider-mark-text,\\n.rc-slider-disabled .rc-slider-dot {\\n cursor: not-allowed !important;\\n}\\n.rc-slider-vertical {\\n width: 14px;\\n height: 100%;\\n padding: 0 5px;\\n}\\n.rc-slider-vertical .rc-slider-rail {\\n height: 100%;\\n width: 4px;\\n}\\n.rc-slider-vertical .rc-slider-track {\\n left: 5px;\\n bottom: 0;\\n width: 4px;\\n}\\n.rc-slider-vertical .rc-slider-handle {\\n margin-left: -5px;\\n touch-action: pan-y;\\n}\\n.rc-slider-vertical .rc-slider-mark {\\n top: 0;\\n left: 18px;\\n height: 100%;\\n}\\n.rc-slider-vertical .rc-slider-step {\\n height: 100%;\\n width: 4px;\\n}\\n.rc-slider-vertical .rc-slider-dot {\\n left: 2px;\\n margin-bottom: -4px;\\n}\\n.rc-slider-vertical .rc-slider-dot:first-child {\\n margin-bottom: -4px;\\n}\\n.rc-slider-vertical .rc-slider-dot:last-child {\\n margin-bottom: -4px;\\n}\\n.rc-slider-tooltip-zoom-down-enter,\\n.rc-slider-tooltip-zoom-down-appear {\\n animation-duration: 0.3s;\\n animation-fill-mode: both;\\n display: block !important;\\n animation-play-state: paused;\\n}\\n.rc-slider-tooltip-zoom-down-leave {\\n animation-duration: 0.3s;\\n animation-fill-mode: both;\\n display: block !important;\\n animation-play-state: paused;\\n}\\n.rc-slider-tooltip-zoom-down-enter.rc-slider-tooltip-zoom-down-enter-active,\\n.rc-slider-tooltip-zoom-down-appear.rc-slider-tooltip-zoom-down-appear-active {\\n animation-name: rcSliderTooltipZoomDownIn;\\n animation-play-state: running;\\n}\\n.rc-slider-tooltip-zoom-down-leave.rc-slider-tooltip-zoom-down-leave-active {\\n animation-name: rcSliderTooltipZoomDownOut;\\n animation-play-state: running;\\n}\\n.rc-slider-tooltip-zoom-down-enter,\\n.rc-slider-tooltip-zoom-down-appear {\\n transform: scale(0, 0);\\n animation-timing-function: cubic-bezier(0.23, 1, 0.32, 1);\\n}\\n.rc-slider-tooltip-zoom-down-leave {\\n animation-timing-function: cubic-bezier(0.755, 0.05, 0.855, 0.06);\\n}\\n@keyframes rcSliderTooltipZoomDownIn {\\n 0% {\\n opacity: 0;\\n transform-origin: 50% 100%;\\n transform: scale(0, 0);\\n }\\n 100% {\\n transform-origin: 50% 100%;\\n transform: scale(1, 1);\\n }\\n}\\n@keyframes rcSliderTooltipZoomDownOut {\\n 0% {\\n transform-origin: 50% 100%;\\n transform: scale(1, 1);\\n }\\n 100% {\\n opacity: 0;\\n transform-origin: 50% 100%;\\n transform: scale(0, 0);\\n }\\n}\\n.rc-slider-tooltip {\\n position: absolute;\\n left: -9999px;\\n top: -9999px;\\n visibility: visible;\\n box-sizing: border-box;\\n -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\\n}\\n.rc-slider-tooltip * {\\n box-sizing: border-box;\\n -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\\n}\\n.rc-slider-tooltip-hidden {\\n display: none;\\n}\\n.rc-slider-tooltip-placement-top {\\n padding: 4px 0 8px 0;\\n}\\n.rc-slider-tooltip-inner {\\n padding: 6px 2px;\\n min-width: 24px;\\n height: 24px;\\n font-size: 12px;\\n line-height: 1;\\n color: #fff;\\n text-align: center;\\n text-decoration: none;\\n background-color: #6c6c6c;\\n border-radius: 6px;\\n box-shadow: 0 0 4px #d9d9d9;\\n}\\n.rc-slider-tooltip-arrow {\\n position: absolute;\\n width: 0;\\n height: 0;\\n border-color: transparent;\\n border-style: solid;\\n}\\n.rc-slider-tooltip-placement-top .rc-slider-tooltip-arrow {\\n bottom: 4px;\\n left: 50%;\\n margin-left: -4px;\\n border-width: 4px 4px 0;\\n border-top-color: #6c6c6c;\\n}\\n\",\"\"]),e.exports=t},1817:e=>{\"use strict\";e.exports=function(e){var t=[];return t.toString=function(){return this.map((function(t){var n=function(e,t){var n,r,o,i=e[1]||\"\",a=e[3];if(!a)return i;if(t&&\"function\"==typeof btoa){var s=(n=a,r=btoa(unescape(encodeURIComponent(JSON.stringify(n)))),o=\"sourceMappingURL=data:application/json;charset=utf-8;base64,\".concat(r),\"/*# \".concat(o,\" */\")),u=a.sources.map((function(e){return\"/*# sourceURL=\".concat(a.sourceRoot||\"\").concat(e,\" */\")}));return[i].concat(u).concat([s]).join(\"\\n\")}return[i].join(\"\\n\")}(t,e);return t[2]?\"@media \".concat(t[2],\" {\").concat(n,\"}\"):n})).join(\"\")},t.i=function(e,n,r){\"string\"==typeof e&&(e=[[null,e,\"\"]]);var o={};if(r)for(var i=0;i{\"use strict\";Object.defineProperty(t,\"__esModule\",{value:!0}),t.default=function(){for(var e=arguments.length,t=Array(e),n=0;n{\"use strict\";Object.defineProperty(t,\"__esModule\",{value:!0});var r=\"function\"==typeof Symbol&&\"symbol\"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&\"function\"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?\"symbol\":typeof e},o=a(n(8156)),i=a(n(6194));function a(e){return e&&e.__esModule?e:{default:e}}t.default=(0,i.default)((function(e,t,n,i,a){var s=e[t],u=void 0===s?\"undefined\":r(s);return o.default.isValidElement(s)?new Error(\"Invalid \"+i+\" `\"+a+\"` of type ReactElement supplied to `\"+n+\"`, expected a ReactComponent or a DOMElement. You can usually obtain a ReactComponent or DOMElement from a ReactElement by attaching a ref to it.\"):\"object\"===u&&\"function\"==typeof s.render||1===s.nodeType?null:new Error(\"Invalid \"+i+\" `\"+a+\"` of value `\"+s+\"` supplied to `\"+n+\"`, expected a ReactComponent or a DOMElement.\")})),e.exports=t.default},167:(e,t,n)=>{\"use strict\";Object.defineProperty(t,\"__esModule\",{value:!0});var r=a(n(8156)),o=n(2744),i=a(n(6194));function a(e){return e&&e.__esModule?e:{default:e}}t.default=(0,i.default)((function(e,t,n,i,a){var s=e[t];return r.default.isValidElement(s)?new Error(\"Invalid \"+i+\" `\"+a+\"` of type ReactElement supplied to `\"+n+\"`,expected an element type (a string , component class, or function component).\"):(0,o.isValidElementType)(s)?null:new Error(\"Invalid \"+i+\" `\"+a+\"` of value `\"+s+\"` supplied to `\"+n+\"`, expected an element type (a string , component class, or function component).\")})),e.exports=t.default},8314:(e,t)=>{\"use strict\";Object.defineProperty(t,\"__esModule\",{value:!0}),t.default=function(e){return function(t,n,r,o,i){var a=r||\"<>\",s=i||n;if(null==t[n])return new Error(\"The \"+o+\" `\"+s+\"` is required to make `\"+a+\"` accessible for users of assistive technologies such as screen readers.\");for(var u=arguments.length,l=Array(u>5?u-5:0),c=5;c{\"use strict\";Object.defineProperty(t,\"__esModule\",{value:!0}),t.default=function(e){function t(t,n,r,o,i,a){var s=o||\"<>\",u=a||r;if(null==n[r])return t?new Error(\"Required \"+i+\" `\"+u+\"` was not specified in `\"+s+\"`.\"):null;for(var l=arguments.length,c=Array(l>6?l-6:0),f=6;f{\"use strict\";t.__esModule=!0;var r=l(n(8076)),o=l(n(2451)),i=l(n(8156)),a=l(n(7111)),s=l(n(1623)),u=l(n(9049));function l(e){return e&&e.__esModule?e:{default:e}}function c(e,t){if(!(e instanceof t))throw new TypeError(\"Cannot call a class as a function\")}function f(e,t){if(!e)throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\");return!t||\"object\"!=typeof t&&\"function\"!=typeof t?e:t}var p=function(e){function t(){var n,r;c(this,t);for(var o=arguments.length,l=Array(o),p=0;p{\"use strict\";t.__esModule=!0;var r=Object.assign||function(e){for(var t=1;t=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(e,[\"container\",\"containerPadding\",\"target\",\"placement\",\"shouldUpdatePosition\",\"rootClose\",\"children\",\"transition\"]);if(!(d.show||p&&!this.state.exited))return null;var h=f;if(h=a.default.createElement(u.default,{container:t,containerPadding:n,target:r,placement:o,shouldUpdatePosition:i},h),p){var v=d.onExit,m=d.onExiting,y=d.onEnter,b=d.onEntering,g=d.onEntered;h=a.default.createElement(p,{in:d.show,appear:!0,onExit:v,onExiting:m,onExited:this.onHiddenListener,onEnter:y,onEntering:b,onEntered:g},h)}return c&&(h=a.default.createElement(l.default,{onRootClose:d.onHide},h)),a.default.createElement(s.default,{container:t},h)},t}(a.default.Component);f.propTypes=r({},s.default.propTypes,u.default.propTypes,{show:o.default.bool,rootClose:o.default.bool,onHide:function(e){var t=o.default.func;e.rootClose&&(t=t.isRequired);for(var n=arguments.length,r=Array(n>1?n-1:0),i=1;i{\"use strict\";t.__esModule=!0;var r=c(n(8076)),o=c(n(2451)),i=c(n(8156)),a=c(n(7111)),s=c(n(1623)),u=c(n(9049)),l=c(n(2813));function c(e){return e&&e.__esModule?e:{default:e}}function f(e,t){if(!(e instanceof t))throw new TypeError(\"Cannot call a class as a function\")}function p(e,t){if(!e)throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\");return!t||\"object\"!=typeof t&&\"function\"!=typeof t?e:t}var d=function(e){function t(){var n,r;f(this,t);for(var o=arguments.length,i=Array(o),a=0;a0&&void 0!==arguments[0]?arguments[0]:r.props;r._portalContainerNode=(0,s.default)(e.container,(0,u.default)(r).body)},r.getMountNode=function(){return r._portalContainerNode},p(r,n)}return function(e,t){if(\"function\"!=typeof t&&null!==t)throw new TypeError(\"Super expression must either be null or a function, not \"+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.componentDidMount=function(){this.setContainer(),this.forceUpdate(this.props.onRendered)},t.prototype.componentWillReceiveProps=function(e){e.container!==this.props.container&&this.setContainer(e)},t.prototype.componentWillUnmount=function(){this._portalContainerNode=null},t.prototype.render=function(){return this.props.children&&this._portalContainerNode?a.default.createPortal(this.props.children,this._portalContainerNode):null},t}(i.default.Component);d.displayName=\"Portal\",d.propTypes={container:r.default.oneOfType([o.default,r.default.func]),onRendered:r.default.func},t.default=a.default.createPortal?d:l.default,e.exports=t.default},5915:(e,t,n)=>{\"use strict\";t.__esModule=!0;var r=Object.assign||function(e){for(var t=1;t=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}var v=function(e){function t(n,r){!function(e,t){if(!(e instanceof t))throw new TypeError(\"Cannot call a class as a function\")}(this,t);var o=function(e,t){if(!e)throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\");return!t||\"object\"!=typeof t&&\"function\"!=typeof t?e:t}(this,e.call(this,n,r));return o.getTarget=function(){var e=o.props.target,t=\"function\"==typeof e?e():e;return t&&l.default.findDOMNode(t)||null},o.maybeUpdatePosition=function(e){var t=o.getTarget();(o.props.shouldUpdatePosition||t!==o._lastTarget||e)&&o.updatePosition(t)},o.state={positionLeft:0,positionTop:0,arrowOffsetLeft:null,arrowOffsetTop:null},o._needsFlush=!1,o._lastTarget=null,o}return function(e,t){if(\"function\"!=typeof t&&null!==t)throw new TypeError(\"Super expression must either be null or a function, not \"+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.componentDidMount=function(){this.updatePosition(this.getTarget())},t.prototype.componentWillReceiveProps=function(){this._needsFlush=!0},t.prototype.componentDidUpdate=function(e){this._needsFlush&&(this._needsFlush=!1,this.maybeUpdatePosition(this.props.placement!==e.placement))},t.prototype.render=function(){var e=this.props,t=e.children,n=e.className,i=h(e,[\"children\",\"className\"]),a=this.state,l=a.positionLeft,c=a.positionTop,f=h(a,[\"positionLeft\",\"positionTop\"]);delete i.target,delete i.container,delete i.containerPadding,delete i.shouldUpdatePosition;var p=u.default.Children.only(t);return(0,s.cloneElement)(p,r({},i,f,{positionLeft:l,positionTop:c,className:(0,o.default)(n,p.props.className),style:r({},p.props.style,{left:l,top:c})}))},t.prototype.updatePosition=function(e){if(this._lastTarget=e,e){var t=l.default.findDOMNode(this),n=(0,f.default)(this.props.container,(0,p.default)(this).body);this.setState((0,c.default)(this.props.placement,t,e,n,this.props.containerPadding))}else this.setState({positionLeft:0,positionTop:0,arrowOffsetLeft:null,arrowOffsetTop:null})},t}(u.default.Component);v.propTypes={target:i.default.oneOfType([a.default,i.default.func]),container:i.default.oneOfType([a.default,i.default.func]),containerPadding:i.default.number,placement:i.default.oneOf([\"top\",\"right\",\"bottom\",\"left\"]),shouldUpdatePosition:i.default.bool},v.displayName=\"Position\",v.defaultProps={containerPadding:0,placement:\"right\",shouldUpdatePosition:!1},t.default=v,e.exports=t.default},1330:(e,t,n)=>{\"use strict\";t.__esModule=!0;var r=l(n(5804)),o=l(n(8076)),i=l(n(8156)),a=l(n(7111)),s=l(n(616)),u=l(n(9049));function l(e){return e&&e.__esModule?e:{default:e}}var c=function(e){function t(n,o){!function(e,t){if(!(e instanceof t))throw new TypeError(\"Cannot call a class as a function\")}(this,t);var i=function(e,t){if(!e)throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\");return!t||\"object\"!=typeof t&&\"function\"!=typeof t?e:t}(this,e.call(this,n,o));return i.addEventListeners=function(){var e=i.props.event,t=(0,u.default)(i);i.documentMouseCaptureListener=(0,s.default)(t,e,i.handleMouseCapture,!0),i.documentMouseListener=(0,s.default)(t,e,i.handleMouse),i.documentKeyupListener=(0,s.default)(t,\"keyup\",i.handleKeyUp)},i.removeEventListeners=function(){i.documentMouseCaptureListener&&i.documentMouseCaptureListener.remove(),i.documentMouseListener&&i.documentMouseListener.remove(),i.documentKeyupListener&&i.documentKeyupListener.remove()},i.handleMouseCapture=function(e){var t;i.preventMouseRootClose=!!((t=e).metaKey||t.altKey||t.ctrlKey||t.shiftKey)||!function(e){return 0===e.button}(e)||(0,r.default)(a.default.findDOMNode(i),e.target)},i.handleMouse=function(e){!i.preventMouseRootClose&&i.props.onRootClose&&i.props.onRootClose(e)},i.handleKeyUp=function(e){27===e.keyCode&&i.props.onRootClose&&i.props.onRootClose(e)},i.preventMouseRootClose=!1,i}return function(e,t){if(\"function\"!=typeof t&&null!==t)throw new TypeError(\"Super expression must either be null or a function, not \"+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.componentDidMount=function(){this.props.disabled||this.addEventListeners()},t.prototype.componentDidUpdate=function(e){!this.props.disabled&&e.disabled?this.addEventListeners():this.props.disabled&&!e.disabled&&this.removeEventListeners()},t.prototype.componentWillUnmount=function(){this.props.disabled||this.removeEventListeners()},t.prototype.render=function(){return this.props.children},t}(i.default.Component);c.displayName=\"RootCloseWrapper\",c.propTypes={onRootClose:o.default.func,children:o.default.element,disabled:o.default.bool,event:o.default.oneOf([\"click\",\"mousedown\"])},c.defaultProps={event:\"click\"},t.default=c,e.exports=t.default},616:(e,t,n)=>{\"use strict\";t.__esModule=!0,t.default=function(e,t,n,i){return(0,r.default)(e,t,n,i),{remove:function(){(0,o.default)(e,t,n,i)}}};var r=i(n(1154)),o=i(n(6915));function i(e){return e&&e.__esModule?e:{default:e}}e.exports=t.default},3662:(e,t,n)=>{\"use strict\";t.__esModule=!0,t.default=function(e,t,n,i,a){var s=\"BODY\"===i.tagName?(0,r.default)(n):(0,o.default)(n,i),l=(0,r.default)(t),c=l.height,f=l.width,p=void 0,d=void 0,h=void 0,v=void 0;if(\"left\"===e||\"right\"===e){d=s.top+(s.height-c)/2,p=\"left\"===e?s.left-f:s.left+s.width;var m=function(e,t,n,r){var o=u(n),i=o.scroll,a=o.height,s=e-r-i,l=e+r-i+t;return s<0?-s:l>a?a-l:0}(d,c,i,a);d+=m,v=50*(1-2*m/c)+\"%\",h=void 0}else{if(\"top\"!==e&&\"bottom\"!==e)throw new Error('calcOverlayPosition(): No such placement of \"'+e+'\" found.');p=s.left+(s.width-f)/2,d=\"top\"===e?s.top-c:s.top+s.height;var y=function(e,t,n,r){var o=u(n).width,i=e-r,a=e+r+t;return i<0?-i:a>o?o-a:0}(p,f,i,a);p+=y,h=50*(1-2*y/f)+\"%\",v=void 0}return{positionLeft:p,positionTop:d,arrowOffsetLeft:h,arrowOffsetTop:v}};var r=s(n(6546)),o=s(n(3207)),i=s(n(2748)),a=s(n(9049));function s(e){return e&&e.__esModule?e:{default:e}}function u(e){var t=void 0,n=void 0,o=void 0;if(\"BODY\"===e.tagName)t=window.innerWidth,n=window.innerHeight,o=(0,i.default)((0,a.default)(e).documentElement)||(0,i.default)(e);else{var s=(0,r.default)(e);t=s.width,n=s.height,o=(0,i.default)(e)}return{width:t,height:n,scroll:o}}e.exports=t.default},1623:(e,t,n)=>{\"use strict\";t.__esModule=!0,t.default=function(e,t){return e=\"function\"==typeof e?e():e,o.default.findDOMNode(e)||t};var r,o=(r=n(7111))&&r.__esModule?r:{default:r};e.exports=t.default},9049:(e,t,n)=>{\"use strict\";t.__esModule=!0,t.default=function(e){return(0,o.default)(r.default.findDOMNode(e))};var r=i(n(7111)),o=i(n(2885));function i(e){return e&&e.__esModule?e:{default:e}}e.exports=t.default},2247:(e,t,n)=>{\"use strict\";t.ZP=t.cn=t.d0=void 0;var r=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)if(Object.prototype.hasOwnProperty.call(e,n)){var r=Object.defineProperty&&Object.getOwnPropertyDescriptor?Object.getOwnPropertyDescriptor(e,n):{};r.get||r.set?Object.defineProperty(t,n,r):t[n]=e[n]}return t.default=e,t}(n(8076)),o=s(n(8156)),i=s(n(7111)),a=n(2442);function s(e){return e&&e.__esModule?e:{default:e}}n(6869);var u=\"unmounted\",l=\"exited\",c=\"entering\";t.d0=c;var f=\"entered\";t.cn=f;var p=\"exiting\",d=function(e){var t,n;function r(t,n){var r;r=e.call(this,t,n)||this;var o,i=n.transitionGroup,a=i&&!i.isMounting?t.enter:t.appear;return r.appearStatus=null,t.in?a?(o=l,r.appearStatus=c):o=f:o=t.unmountOnExit||t.mountOnEnter?u:l,r.state={status:o},r.nextCallback=null,r}n=e,(t=r).prototype=Object.create(n.prototype),t.prototype.constructor=t,t.__proto__=n;var a=r.prototype;return a.getChildContext=function(){return{transitionGroup:null}},r.getDerivedStateFromProps=function(e,t){return e.in&&t.status===u?{status:l}:null},a.componentDidMount=function(){this.updateStatus(!0,this.appearStatus)},a.componentDidUpdate=function(e){var t=null;if(e!==this.props){var n=this.state.status;this.props.in?n!==c&&n!==f&&(t=c):n!==c&&n!==f||(t=p)}this.updateStatus(!1,t)},a.componentWillUnmount=function(){this.cancelNextCallback()},a.getTimeouts=function(){var e,t,n,r=this.props.timeout;return e=t=n=r,null!=r&&\"number\"!=typeof r&&(e=r.exit,t=r.enter,n=void 0!==r.appear?r.appear:t),{exit:e,enter:t,appear:n}},a.updateStatus=function(e,t){if(void 0===e&&(e=!1),null!==t){this.cancelNextCallback();var n=i.default.findDOMNode(this);t===c?this.performEnter(n,e):this.performExit(n)}else this.props.unmountOnExit&&this.state.status===l&&this.setState({status:u})},a.performEnter=function(e,t){var n=this,r=this.props.enter,o=this.context.transitionGroup?this.context.transitionGroup.isMounting:t,i=this.getTimeouts(),a=o?i.appear:i.enter;t||r?(this.props.onEnter(e,o),this.safeSetState({status:c},(function(){n.props.onEntering(e,o),n.onTransitionEnd(e,a,(function(){n.safeSetState({status:f},(function(){n.props.onEntered(e,o)}))}))}))):this.safeSetState({status:f},(function(){n.props.onEntered(e)}))},a.performExit=function(e){var t=this,n=this.props.exit,r=this.getTimeouts();n?(this.props.onExit(e),this.safeSetState({status:p},(function(){t.props.onExiting(e),t.onTransitionEnd(e,r.exit,(function(){t.safeSetState({status:l},(function(){t.props.onExited(e)}))}))}))):this.safeSetState({status:l},(function(){t.props.onExited(e)}))},a.cancelNextCallback=function(){null!==this.nextCallback&&(this.nextCallback.cancel(),this.nextCallback=null)},a.safeSetState=function(e,t){t=this.setNextCallback(t),this.setState(e,t)},a.setNextCallback=function(e){var t=this,n=!0;return this.nextCallback=function(r){n&&(n=!1,t.nextCallback=null,e(r))},this.nextCallback.cancel=function(){n=!1},this.nextCallback},a.onTransitionEnd=function(e,t,n){this.setNextCallback(n);var r=null==t&&!this.props.addEndListener;e&&!r?(this.props.addEndListener&&this.props.addEndListener(e,this.nextCallback),null!=t&&setTimeout(this.nextCallback,t)):setTimeout(this.nextCallback,0)},a.render=function(){var e=this.state.status;if(e===u)return null;var t=this.props,n=t.children,r=function(e,t){if(null==e)return{};var n,r,o={},i=Object.keys(e);for(r=0;r=0||(o[n]=e[n]);return o}(t,[\"children\"]);if(delete r.in,delete r.mountOnEnter,delete r.unmountOnExit,delete r.appear,delete r.enter,delete r.exit,delete r.timeout,delete r.addEndListener,delete r.onEnter,delete r.onEntering,delete r.onEntered,delete r.onExit,delete r.onExiting,delete r.onExited,\"function\"==typeof n)return n(e,r);var i=o.default.Children.only(n);return o.default.cloneElement(i,r)},r}(o.default.Component);function h(){}d.contextTypes={transitionGroup:r.object},d.childContextTypes={transitionGroup:function(){}},d.propTypes={},d.defaultProps={in:!1,mountOnEnter:!1,unmountOnExit:!1,appear:!1,enter:!0,exit:!0,onEnter:h,onEntering:h,onEntered:h,onExit:h,onExiting:h,onExited:h},d.UNMOUNTED=0,d.EXITED=1,d.ENTERING=2,d.ENTERED=3,d.EXITING=4;var v=(0,a.polyfill)(d);t.ZP=v},6869:(e,t,n)=>{\"use strict\";var r;t.__esModule=!0,t.classNamesShape=t.timeoutsShape=void 0,(r=n(8076))&&r.__esModule,t.timeoutsShape=null,t.classNamesShape=null},8502:(e,t,n)=>{var r=n(9827),o=n(1760);\"string\"==typeof(o=o.__esModule?o.default:o)&&(o=[[e.id,o,\"\"]]);r(o,{insert:\"head\",singleton:!1}),e.exports=o.locals||{}},9827:(e,t,n)=>{\"use strict\";var r,o=function(){var e={};return function(t){if(void 0===e[t]){var n=document.querySelector(t);if(window.HTMLIFrameElement&&n instanceof window.HTMLIFrameElement)try{n=n.contentDocument.head}catch(e){n=null}e[t]=n}return e[t]}}(),i=[];function a(e){for(var t=-1,n=0;n{\"use strict\";t.__esModule=!0,t.default=function e(t,n,i){void 0===i&&(i=[]);var s=t.displayName||t.name||\"Component\",u=o.isReactComponent(t),l=Object.keys(n),c=l.map(o.defaultKey);!u&&i.length&&invariant(!1);var f=function(e){var i,s;function f(){for(var t,r=arguments.length,o=new Array(r),i=0;i1?i-1:0),s=1;s{\"use strict\";var r;t.__esModule=!0,t.uncontrolledPropTypes=function(e,t){var n={};return Object.keys(e).forEach((function(e){n[i(e)]=o})),n},t.isProp=function(e,t){return void 0!==e[t]},t.defaultKey=i,t.isReactComponent=function(e){return!!(e&&e.prototype&&e.prototype.isReactComponent)},(r=n(5665))&&r.__esModule;var o=function(){};function i(e){return\"default\"+e.charAt(0).toUpperCase()+e.substr(1)}},9979:(e,t,n)=>{e.exports=n(472)},9542:(e,t,n)=>{e.exports=n(4597)},6431:(e,t,n)=>{e.exports=n(6307)},3384:(e,t,n)=>{e.exports=n(4700)},5174:(e,t,n)=>{e.exports=n(1371)},5430:(e,t,n)=>{e.exports=n(6356)},4856:(e,t,n)=>{e.exports=n(6798)},7675:e=>{function t(){return e.exports=t=Object.assign||function(e){for(var t=1;t{e.exports=function(e){return e&&e.__esModule?e:{default:e}},e.exports.default=e.exports,e.exports.__esModule=!0},169:(e,t,n)=>{e.exports=n(2515)},7085:(e,t)=>{var n;!function(){\"use strict\";var r={}.hasOwnProperty;function o(){for(var e=[],t=0;t{\"use strict\";var r=n(8032);t.__esModule=!0,t.default=function(e){void 0===e&&(e=(0,o.default)());try{return e.activeElement}catch(e){}};var o=r(n(2885));e.exports=t.default},6915:(e,t,n)=>{\"use strict\";var r=n(8032);t.__esModule=!0,t.default=void 0;var o=function(){};r(n(8632)).default&&(o=document.addEventListener?function(e,t,n,r){return e.removeEventListener(t,n,r||!1)}:document.attachEvent?function(e,t,n){return e.detachEvent(\"on\"+t,n)}:void 0);var i=o;t.default=i,e.exports=t.default},1154:(e,t,n)=>{\"use strict\";var r=n(8032);t.__esModule=!0,t.default=void 0;var o=function(){};r(n(8632)).default&&(o=document.addEventListener?function(e,t,n,r){return e.addEventListener(t,n,r||!1)}:document.attachEvent?function(e,t,n){return e.attachEvent(\"on\"+t,(function(t){(t=t||window.event).target=t.target||t.srcElement,t.currentTarget=e,n.call(e,t)}))}:void 0);var i=o;t.default=i,e.exports=t.default},2885:(e,t)=>{\"use strict\";t.__esModule=!0,t.default=function(e){return e&&e.ownerDocument||document},e.exports=t.default},5804:(e,t,n)=>{\"use strict\";var r=n(8032);t.__esModule=!0,t.default=void 0;var o=r(n(8632)).default?function(e,t){return e.contains?e.contains(t):e.compareDocumentPosition?e===t||!!(16&e.compareDocumentPosition(t)):i(e,t)}:i;function i(e,t){if(t)do{if(t===e)return!0}while(t=t.parentNode);return!1}t.default=o,e.exports=t.default},6105:(e,t)=>{\"use strict\";t.__esModule=!0,t.default=function(e){return e===e.window?e:9===e.nodeType&&(e.defaultView||e.parentWindow)},e.exports=t.default},6546:(e,t,n)=>{\"use strict\";var r=n(8032);t.__esModule=!0,t.default=function(e){var t=(0,a.default)(e),n=(0,i.default)(t),r=t&&t.documentElement,s={top:0,left:0,height:0,width:0};if(t)return(0,o.default)(r,e)?(void 0!==e.getBoundingClientRect&&(s=e.getBoundingClientRect()),{top:s.top+(n.pageYOffset||r.scrollTop)-(r.clientTop||0),left:s.left+(n.pageXOffset||r.scrollLeft)-(r.clientLeft||0),width:(null==s.width?e.offsetWidth:s.width)||0,height:(null==s.height?e.offsetHeight:s.height)||0}):s};var o=r(n(5804)),i=r(n(6105)),a=r(n(2885));e.exports=t.default},790:(e,t,n)=>{\"use strict\";var r=n(8032);t.__esModule=!0,t.default=function(e){for(var t=(0,o.default)(e),n=e&&e.offsetParent;n&&\"html\"!==a(e)&&\"static\"===(0,i.default)(n,\"position\");)n=n.offsetParent;return n||t.documentElement};var o=r(n(2885)),i=r(n(3482));function a(e){return e.nodeName&&e.nodeName.toLowerCase()}e.exports=t.default},3207:(e,t,n)=>{\"use strict\";var r=n(8032);t.__esModule=!0,t.default=function(e,t){var n,r={top:0,left:0};return\"fixed\"===(0,l.default)(e,\"position\")?n=e.getBoundingClientRect():(t=t||(0,a.default)(e),n=(0,i.default)(e),\"html\"!==function(e){return e.nodeName&&e.nodeName.toLowerCase()}(t)&&(r=(0,i.default)(t)),r.top+=parseInt((0,l.default)(t,\"borderTopWidth\"),10)-(0,s.default)(t)||0,r.left+=parseInt((0,l.default)(t,\"borderLeftWidth\"),10)-(0,u.default)(t)||0),(0,o.default)({},n,{top:n.top-r.top-(parseInt((0,l.default)(e,\"marginTop\"),10)||0),left:n.left-r.left-(parseInt((0,l.default)(e,\"marginLeft\"),10)||0)})};var o=r(n(7675)),i=r(n(6546)),a=r(n(790)),s=r(n(2748)),u=r(n(8477)),l=r(n(3482));e.exports=t.default},8477:(e,t,n)=>{\"use strict\";var r=n(8032);t.__esModule=!0,t.default=function(e,t){var n=(0,o.default)(e);if(void 0===t)return n?\"pageXOffset\"in n?n.pageXOffset:n.document.documentElement.scrollLeft:e.scrollLeft;n?n.scrollTo(t,\"pageYOffset\"in n?n.pageYOffset:n.document.documentElement.scrollTop):e.scrollLeft=t};var o=r(n(6105));e.exports=t.default},2748:(e,t,n)=>{\"use strict\";var r=n(8032);t.__esModule=!0,t.default=function(e,t){var n=(0,o.default)(e);if(void 0===t)return n?\"pageYOffset\"in n?n.pageYOffset:n.document.documentElement.scrollTop:e.scrollTop;n?n.scrollTo(\"pageXOffset\"in n?n.pageXOffset:n.document.documentElement.scrollLeft,t):e.scrollTop=t};var o=r(n(6105));e.exports=t.default},9272:(e,t,n)=>{\"use strict\";var r=n(8032);t.__esModule=!0,t.default=function(e){if(!e)throw new TypeError(\"No Element passed to `getComputedStyle()`\");var t=e.ownerDocument;return\"defaultView\"in t?t.defaultView.opener?e.ownerDocument.defaultView.getComputedStyle(e,null):window.getComputedStyle(e,null):{getPropertyValue:function(t){var n=e.style;\"float\"==(t=(0,o.default)(t))&&(t=\"styleFloat\");var r=e.currentStyle[t]||null;if(null==r&&n&&n[t]&&(r=n[t]),a.test(r)&&!i.test(t)){var s=n.left,u=e.runtimeStyle,l=u&&u.left;l&&(u.left=e.currentStyle.left),n.left=\"fontSize\"===t?\"1em\":r,r=n.pixelLeft+\"px\",n.left=s,l&&(u.left=l)}return r}}};var o=r(n(534)),i=/^(top|right|bottom|left)$/,a=/^([+-]?(?:\\d*\\.|)\\d+(?:[eE][+-]?\\d+|))(?!px)[a-z%]+$/i;e.exports=t.default},3482:(e,t,n)=>{\"use strict\";var r=n(8032);t.__esModule=!0,t.default=function(e,t,n){var r=\"\",c=\"\",f=t;if(\"string\"==typeof t){if(void 0===n)return e.style[(0,o.default)(t)]||(0,a.default)(e).getPropertyValue((0,i.default)(t));(f={})[t]=n}Object.keys(f).forEach((function(t){var n=f[t];n||0===n?(0,l.default)(t)?c+=t+\"(\"+n+\") \":r+=(0,i.default)(t)+\": \"+n+\";\":(0,s.default)(e,(0,i.default)(t))})),c&&(r+=u.transform+\": \"+c+\";\"),e.style.cssText+=\";\"+r};var o=r(n(534)),i=r(n(2036)),a=r(n(9272)),s=r(n(6656)),u=n(4570),l=r(n(6210));e.exports=t.default},6656:(e,t)=>{\"use strict\";t.__esModule=!0,t.default=function(e,t){return\"removeProperty\"in e.style?e.style.removeProperty(t):e.style.removeAttribute(t)},e.exports=t.default},6210:(e,t)=>{\"use strict\";t.__esModule=!0,t.default=function(e){return!(!e||!n.test(e))};var n=/^((translate|rotate|scale)(X|Y|Z|3d)?|matrix(3d)?|perspective|skew(X|Y)?)$/i;e.exports=t.default},4570:(e,t,n)=>{\"use strict\";var r=n(8032);t.__esModule=!0,t.default=t.animationEnd=t.animationDelay=t.animationTiming=t.animationDuration=t.animationName=t.transitionEnd=t.transitionDuration=t.transitionDelay=t.transitionTiming=t.transitionProperty=t.transform=void 0;var o,i,a,s,u,l,c,f,p,d,h,v=r(n(8632)),m=\"transform\";if(t.transform=m,t.animationEnd=a,t.transitionEnd=i,t.transitionDelay=c,t.transitionTiming=l,t.transitionDuration=u,t.transitionProperty=s,t.animationDelay=h,t.animationTiming=d,t.animationDuration=p,t.animationName=f,v.default){var y=function(){for(var e,t,n=document.createElement(\"div\").style,r={O:function(e){return\"o\"+e.toLowerCase()},Moz:function(e){return e.toLowerCase()},Webkit:function(e){return\"webkit\"+e},ms:function(e){return\"MS\"+e}},o=Object.keys(r),i=\"\",a=0;a{\"use strict\";t.__esModule=!0,t.default=function(e){return e.replace(n,(function(e,t){return t.toUpperCase()}))};var n=/-(.)/g;e.exports=t.default},534:(e,t,n)=>{\"use strict\";var r=n(8032);t.__esModule=!0,t.default=function(e){return(0,o.default)(e.replace(i,\"ms-\"))};var o=r(n(6288)),i=/^-ms-/;e.exports=t.default},2382:(e,t)=>{\"use strict\";t.__esModule=!0,t.default=function(e){return e.replace(n,\"-$1\").toLowerCase()};var n=/([A-Z])/g;e.exports=t.default},2036:(e,t,n)=>{\"use strict\";var r=n(8032);t.__esModule=!0,t.default=function(e){return(0,o.default)(e).replace(i,\"-ms-\")};var o=r(n(2382)),i=/^ms-/;e.exports=t.default},8632:(e,t)=>{\"use strict\";t.__esModule=!0,t.default=void 0;var n=!(\"undefined\"==typeof window||!window.document||!window.document.createElement);t.default=n,e.exports=t.default},5665:e=>{\"use strict\";e.exports=function(e,t,n,r,o,i,a,s){if(!e){var u;if(void 0===t)u=new Error(\"Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings.\");else{var l=[n,r,o,i,a,s],c=0;(u=new Error(t.replace(/%s/g,(function(){return l[c++]})))).name=\"Invariant Violation\"}throw u.framesToPop=1,u}}},2336:(e,t)=>{function n(e){if(e&&\"object\"==typeof e){var t=e.which||e.keyCode||e.charCode;t&&(e=t)}if(\"number\"==typeof e)return a[e];var n,i=String(e);return(n=r[i.toLowerCase()])?n:(n=o[i.toLowerCase()])||(1===i.length?i.charCodeAt(0):void 0)}n.isEventKey=function(e,t){if(e&&\"object\"==typeof e){var n=e.which||e.keyCode||e.charCode;if(null==n)return!1;if(\"string\"==typeof t){var i;if(i=r[t.toLowerCase()])return i===n;if(i=o[t.toLowerCase()])return i===n}else if(\"number\"==typeof t)return t===n;return!1}};var r=(t=e.exports=n).code=t.codes={backspace:8,tab:9,enter:13,shift:16,ctrl:17,alt:18,\"pause/break\":19,\"caps lock\":20,esc:27,space:32,\"page up\":33,\"page down\":34,end:35,home:36,left:37,up:38,right:39,down:40,insert:45,delete:46,command:91,\"left command\":91,\"right command\":93,\"numpad *\":106,\"numpad +\":107,\"numpad -\":109,\"numpad .\":110,\"numpad /\":111,\"num lock\":144,\"scroll lock\":145,\"my computer\":182,\"my calculator\":183,\";\":186,\"=\":187,\",\":188,\"-\":189,\".\":190,\"/\":191,\"`\":192,\"[\":219,\"\\\\\":220,\"]\":221,\"'\":222},o=t.aliases={windows:91,\"⇧\":16,\"⌥\":18,\"⌃\":17,\"⌘\":91,ctl:17,control:17,option:18,pause:19,break:19,caps:20,return:13,escape:27,spc:32,spacebar:32,pgup:33,pgdn:34,ins:45,del:46,cmd:91};for(i=97;i<123;i++)r[String.fromCharCode(i)]=i-32;for(var i=48;i<58;i++)r[i-48]=i;for(i=1;i<13;i++)r[\"f\"+i]=i+111;for(i=0;i<10;i++)r[\"numpad \"+i]=i+96;var a=t.names=t.title={};for(i in r)a[r[i]]=i;for(var s in o)r[s]=o[s]},194:(e,t,n)=>{var r=n(2651)(n(4329),\"DataView\");e.exports=r},9155:(e,t,n)=>{var r=n(6673),o=n(9605),i=n(9687),a=n(1972),s=n(863);function u(e){var t=-1,n=null==e?0:e.length;for(this.clear();++t{var r=n(8886),o=n(6457),i=n(8108),a=n(7765),s=n(9142);function u(e){var t=-1,n=null==e?0:e.length;for(this.clear();++t{var r=n(2651)(n(4329),\"Map\");e.exports=r},7372:(e,t,n)=>{var r=n(4692),o=n(7238),i=n(9615),a=n(1336),s=n(5084);function u(e){var t=-1,n=null==e?0:e.length;for(this.clear();++t{var r=n(2651)(n(4329),\"Promise\");e.exports=r},6559:(e,t,n)=>{var r=n(2651)(n(4329),\"Set\");e.exports=r},6041:(e,t,n)=>{var r=n(7372),o=n(3312),i=n(4201);function a(e){var t=-1,n=null==e?0:e.length;for(this.__data__=new r;++t{var r=n(2970),o=n(7617),i=n(4944),a=n(9935),s=n(9236),u=n(9346);function l(e){var t=this.__data__=new r(e);this.size=t.size}l.prototype.clear=o,l.prototype.delete=i,l.prototype.get=a,l.prototype.has=s,l.prototype.set=u,e.exports=l},877:(e,t,n)=>{var r=n(4329).Symbol;e.exports=r},9904:(e,t,n)=>{var r=n(4329).Uint8Array;e.exports=r},4215:(e,t,n)=>{var r=n(2651)(n(4329),\"WeakMap\");e.exports=r},9574:e=>{e.exports=function(e,t){for(var n=-1,r=null==e?0:e.length,o=0,i=[];++n{var r=n(3803),o=n(2441),i=n(4609),a=n(5211),s=n(2850),u=n(6603),l=Object.prototype.hasOwnProperty;e.exports=function(e,t){var n=i(e),c=!n&&o(e),f=!n&&!c&&a(e),p=!n&&!c&&!f&&u(e),d=n||c||f||p,h=d?r(e.length,String):[],v=h.length;for(var m in e)!t&&!l.call(e,m)||d&&(\"length\"==m||f&&(\"offset\"==m||\"parent\"==m)||p&&(\"buffer\"==m||\"byteLength\"==m||\"byteOffset\"==m)||s(m,v))||h.push(m);return h}},3309:e=>{e.exports=function(e,t){for(var n=-1,r=t.length,o=e.length;++n{e.exports=function(e,t){for(var n=-1,r=null==e?0:e.length;++n{var r=n(8213);e.exports=function(e,t){for(var n=e.length;n--;)if(r(e[n][0],t))return n;return-1}},1999:(e,t,n)=>{var r=n(3309),o=n(4609);e.exports=function(e,t,n){var i=t(e);return o(e)?i:r(i,n(e))}},3984:(e,t,n)=>{var r=n(877),o=n(1115),i=n(7058),a=r?r.toStringTag:void 0;e.exports=function(e){return null==e?void 0===e?\"[object Undefined]\":\"[object Null]\":a&&a in Object(e)?o(e):i(e)}},8996:(e,t,n)=>{var r=n(3984),o=n(8994);e.exports=function(e){return o(e)&&\"[object Arguments]\"==r(e)}},241:(e,t,n)=>{var r=n(5024),o=n(8994);e.exports=function e(t,n,i,a,s){return t===n||(null==t||null==n||!o(t)&&!o(n)?t!=t&&n!=n:r(t,n,i,a,e,s))}},5024:(e,t,n)=>{var r=n(4395),o=n(6436),i=n(4112),a=n(1800),s=n(3460),u=n(4609),l=n(5211),c=n(6603),f=\"[object Arguments]\",p=\"[object Array]\",d=\"[object Object]\",h=Object.prototype.hasOwnProperty;e.exports=function(e,t,n,v,m,y){var b=u(e),g=u(t),w=b?p:s(e),E=g?p:s(t),x=(w=w==f?d:w)==d,_=(E=E==f?d:E)==d,O=w==E;if(O&&l(e)){if(!l(t))return!1;b=!0,x=!1}if(O&&!x)return y||(y=new r),b||c(e)?o(e,t,n,v,m,y):i(e,t,w,n,v,m,y);if(!(1&n)){var C=x&&h.call(e,\"__wrapped__\"),S=_&&h.call(t,\"__wrapped__\");if(C||S){var T=C?e.value():e,k=S?t.value():t;return y||(y=new r),m(T,k,n,v,y)}}return!!O&&(y||(y=new r),a(e,t,n,v,m,y))}},9532:(e,t,n)=>{var r=n(664),o=n(3013),i=n(4411),a=n(1087),s=/^\\[object .+?Constructor\\]$/,u=Function.prototype,l=Object.prototype,c=u.toString,f=l.hasOwnProperty,p=RegExp(\"^\"+c.call(f).replace(/[\\\\^$.*+?()[\\]{}|]/g,\"\\\\$&\").replace(/hasOwnProperty|(function).*?(?=\\\\\\()| for .+?(?=\\\\\\])/g,\"$1.*?\")+\"$\");e.exports=function(e){return!(!i(e)||o(e))&&(r(e)?p:s).test(a(e))}},7934:(e,t,n)=>{var r=n(3984),o=n(2829),i=n(8994),a={};a[\"[object Float32Array]\"]=a[\"[object Float64Array]\"]=a[\"[object Int8Array]\"]=a[\"[object Int16Array]\"]=a[\"[object Int32Array]\"]=a[\"[object Uint8Array]\"]=a[\"[object Uint8ClampedArray]\"]=a[\"[object Uint16Array]\"]=a[\"[object Uint32Array]\"]=!0,a[\"[object Arguments]\"]=a[\"[object Array]\"]=a[\"[object ArrayBuffer]\"]=a[\"[object Boolean]\"]=a[\"[object DataView]\"]=a[\"[object Date]\"]=a[\"[object Error]\"]=a[\"[object Function]\"]=a[\"[object Map]\"]=a[\"[object Number]\"]=a[\"[object Object]\"]=a[\"[object RegExp]\"]=a[\"[object Set]\"]=a[\"[object String]\"]=a[\"[object WeakMap]\"]=!1,e.exports=function(e){return i(e)&&o(e.length)&&!!a[r(e)]}},1350:(e,t,n)=>{var r=n(3624),o=n(1360),i=Object.prototype.hasOwnProperty;e.exports=function(e){if(!r(e))return o(e);var t=[];for(var n in Object(e))i.call(e,n)&&\"constructor\"!=n&&t.push(n);return t}},3803:e=>{e.exports=function(e,t){for(var n=-1,r=Array(e);++n{e.exports=function(e){return function(t){return e(t)}}},4299:e=>{e.exports=function(e,t){return e.has(t)}},5637:(e,t,n)=>{var r=n(4329)[\"__core-js_shared__\"];e.exports=r},6436:(e,t,n)=>{var r=n(6041),o=n(898),i=n(4299);e.exports=function(e,t,n,a,s,u){var l=1&n,c=e.length,f=t.length;if(c!=f&&!(l&&f>c))return!1;var p=u.get(e),d=u.get(t);if(p&&d)return p==t&&d==e;var h=-1,v=!0,m=2&n?new r:void 0;for(u.set(e,t),u.set(t,e);++h{var r=n(877),o=n(9904),i=n(8213),a=n(6436),s=n(8628),u=n(8044),l=r?r.prototype:void 0,c=l?l.valueOf:void 0;e.exports=function(e,t,n,r,l,f,p){switch(n){case\"[object DataView]\":if(e.byteLength!=t.byteLength||e.byteOffset!=t.byteOffset)return!1;e=e.buffer,t=t.buffer;case\"[object ArrayBuffer]\":return!(e.byteLength!=t.byteLength||!f(new o(e),new o(t)));case\"[object Boolean]\":case\"[object Date]\":case\"[object Number]\":return i(+e,+t);case\"[object Error]\":return e.name==t.name&&e.message==t.message;case\"[object RegExp]\":case\"[object String]\":return e==t+\"\";case\"[object Map]\":var d=s;case\"[object Set]\":var h=1&r;if(d||(d=u),e.size!=t.size&&!h)return!1;var v=p.get(e);if(v)return v==t;r|=2,p.set(e,t);var m=a(d(e),d(t),r,l,f,p);return p.delete(e),m;case\"[object Symbol]\":if(c)return c.call(e)==c.call(t)}return!1}},1800:(e,t,n)=>{var r=n(147),o=Object.prototype.hasOwnProperty;e.exports=function(e,t,n,i,a,s){var u=1&n,l=r(e),c=l.length;if(c!=r(t).length&&!u)return!1;for(var f=c;f--;){var p=l[f];if(!(u?p in t:o.call(t,p)))return!1}var d=s.get(e),h=s.get(t);if(d&&h)return d==t&&h==e;var v=!0;s.set(e,t),s.set(t,e);for(var m=u;++f{var r=\"object\"==typeof n.g&&n.g&&n.g.Object===Object&&n.g;e.exports=r},147:(e,t,n)=>{var r=n(1999),o=n(8968),i=n(8220);e.exports=function(e){return r(e,i,o)}},7864:(e,t,n)=>{var r=n(5668);e.exports=function(e,t){var n=e.__data__;return r(t)?n[\"string\"==typeof t?\"string\":\"hash\"]:n.map}},2651:(e,t,n)=>{var r=n(9532),o=n(4428);e.exports=function(e,t){var n=o(e,t);return r(n)?n:void 0}},1115:(e,t,n)=>{var r=n(877),o=Object.prototype,i=o.hasOwnProperty,a=o.toString,s=r?r.toStringTag:void 0;e.exports=function(e){var t=i.call(e,s),n=e[s];try{e[s]=void 0;var r=!0}catch(e){}var o=a.call(e);return r&&(t?e[s]=n:delete e[s]),o}},8968:(e,t,n)=>{var r=n(9574),o=n(9874),i=Object.prototype.propertyIsEnumerable,a=Object.getOwnPropertySymbols,s=a?function(e){return null==e?[]:(e=Object(e),r(a(e),(function(t){return i.call(e,t)})))}:o;e.exports=s},3460:(e,t,n)=>{var r=n(194),o=n(2602),i=n(3057),a=n(6559),s=n(4215),u=n(3984),l=n(1087),c=\"[object Map]\",f=\"[object Promise]\",p=\"[object Set]\",d=\"[object WeakMap]\",h=\"[object DataView]\",v=l(r),m=l(o),y=l(i),b=l(a),g=l(s),w=u;(r&&w(new r(new ArrayBuffer(1)))!=h||o&&w(new o)!=c||i&&w(i.resolve())!=f||a&&w(new a)!=p||s&&w(new s)!=d)&&(w=function(e){var t=u(e),n=\"[object Object]\"==t?e.constructor:void 0,r=n?l(n):\"\";if(r)switch(r){case v:return h;case m:return c;case y:return f;case b:return p;case g:return d}return t}),e.exports=w},4428:e=>{e.exports=function(e,t){return null==e?void 0:e[t]}},6673:(e,t,n)=>{var r=n(591);e.exports=function(){this.__data__=r?r(null):{},this.size=0}},9605:e=>{e.exports=function(e){var t=this.has(e)&&delete this.__data__[e];return this.size-=t?1:0,t}},9687:(e,t,n)=>{var r=n(591),o=Object.prototype.hasOwnProperty;e.exports=function(e){var t=this.__data__;if(r){var n=t[e];return\"__lodash_hash_undefined__\"===n?void 0:n}return o.call(t,e)?t[e]:void 0}},1972:(e,t,n)=>{var r=n(591),o=Object.prototype.hasOwnProperty;e.exports=function(e){var t=this.__data__;return r?void 0!==t[e]:o.call(t,e)}},863:(e,t,n)=>{var r=n(591);e.exports=function(e,t){var n=this.__data__;return this.size+=this.has(e)?0:1,n[e]=r&&void 0===t?\"__lodash_hash_undefined__\":t,this}},2850:e=>{var t=/^(?:0|[1-9]\\d*)$/;e.exports=function(e,n){var r=typeof e;return!!(n=null==n?9007199254740991:n)&&(\"number\"==r||\"symbol\"!=r&&t.test(e))&&e>-1&&e%1==0&&e{e.exports=function(e){var t=typeof e;return\"string\"==t||\"number\"==t||\"symbol\"==t||\"boolean\"==t?\"__proto__\"!==e:null===e}},3013:(e,t,n)=>{var r,o=n(5637),i=(r=/[^.]+$/.exec(o&&o.keys&&o.keys.IE_PROTO||\"\"))?\"Symbol(src)_1.\"+r:\"\";e.exports=function(e){return!!i&&i in e}},3624:e=>{var t=Object.prototype;e.exports=function(e){var n=e&&e.constructor;return e===(\"function\"==typeof n&&n.prototype||t)}},8886:e=>{e.exports=function(){this.__data__=[],this.size=0}},6457:(e,t,n)=>{var r=n(5256),o=Array.prototype.splice;e.exports=function(e){var t=this.__data__,n=r(t,e);return!(n<0||(n==t.length-1?t.pop():o.call(t,n,1),--this.size,0))}},8108:(e,t,n)=>{var r=n(5256);e.exports=function(e){var t=this.__data__,n=r(t,e);return n<0?void 0:t[n][1]}},7765:(e,t,n)=>{var r=n(5256);e.exports=function(e){return r(this.__data__,e)>-1}},9142:(e,t,n)=>{var r=n(5256);e.exports=function(e,t){var n=this.__data__,o=r(n,e);return o<0?(++this.size,n.push([e,t])):n[o][1]=t,this}},4692:(e,t,n)=>{var r=n(9155),o=n(2970),i=n(2602);e.exports=function(){this.size=0,this.__data__={hash:new r,map:new(i||o),string:new r}}},7238:(e,t,n)=>{var r=n(7864);e.exports=function(e){var t=r(this,e).delete(e);return this.size-=t?1:0,t}},9615:(e,t,n)=>{var r=n(7864);e.exports=function(e){return r(this,e).get(e)}},1336:(e,t,n)=>{var r=n(7864);e.exports=function(e){return r(this,e).has(e)}},5084:(e,t,n)=>{var r=n(7864);e.exports=function(e,t){var n=r(this,e),o=n.size;return n.set(e,t),this.size+=n.size==o?0:1,this}},8628:e=>{e.exports=function(e){var t=-1,n=Array(e.size);return e.forEach((function(e,r){n[++t]=[r,e]})),n}},591:(e,t,n)=>{var r=n(2651)(Object,\"create\");e.exports=r},1360:(e,t,n)=>{var r=n(6507)(Object.keys,Object);e.exports=r},2824:(e,t,n)=>{e=n.nmd(e);var r=n(2836),o=t&&!t.nodeType&&t,i=o&&e&&!e.nodeType&&e,a=i&&i.exports===o&&r.process,s=function(){try{return i&&i.require&&i.require(\"util\").types||a&&a.binding&&a.binding(\"util\")}catch(e){}}();e.exports=s},7058:e=>{var t=Object.prototype.toString;e.exports=function(e){return t.call(e)}},6507:e=>{e.exports=function(e,t){return function(n){return e(t(n))}}},4329:(e,t,n)=>{var r=n(2836),o=\"object\"==typeof self&&self&&self.Object===Object&&self,i=r||o||Function(\"return this\")();e.exports=i},3312:e=>{e.exports=function(e){return this.__data__.set(e,\"__lodash_hash_undefined__\"),this}},4201:e=>{e.exports=function(e){return this.__data__.has(e)}},8044:e=>{e.exports=function(e){var t=-1,n=Array(e.size);return e.forEach((function(e){n[++t]=e})),n}},7617:(e,t,n)=>{var r=n(2970);e.exports=function(){this.__data__=new r,this.size=0}},4944:e=>{e.exports=function(e){var t=this.__data__,n=t.delete(e);return this.size=t.size,n}},9935:e=>{e.exports=function(e){return this.__data__.get(e)}},9236:e=>{e.exports=function(e){return this.__data__.has(e)}},9346:(e,t,n)=>{var r=n(2970),o=n(2602),i=n(7372);e.exports=function(e,t){var n=this.__data__;if(n instanceof r){var a=n.__data__;if(!o||a.length<199)return a.push([e,t]),this.size=++n.size,this;n=this.__data__=new i(a)}return n.set(e,t),this.size=n.size,this}},1087:e=>{var t=Function.prototype.toString;e.exports=function(e){if(null!=e){try{return t.call(e)}catch(e){}try{return e+\"\"}catch(e){}}return\"\"}},8213:e=>{e.exports=function(e,t){return e===t||e!=e&&t!=t}},2441:(e,t,n)=>{var r=n(8996),o=n(8994),i=Object.prototype,a=i.hasOwnProperty,s=i.propertyIsEnumerable,u=r(function(){return arguments}())?r:function(e){return o(e)&&a.call(e,\"callee\")&&!s.call(e,\"callee\")};e.exports=u},4609:e=>{var t=Array.isArray;e.exports=t},3527:(e,t,n)=>{var r=n(664),o=n(2829);e.exports=function(e){return null!=e&&o(e.length)&&!r(e)}},5211:(e,t,n)=>{e=n.nmd(e);var r=n(4329),o=n(5676),i=t&&!t.nodeType&&t,a=i&&e&&!e.nodeType&&e,s=a&&a.exports===i?r.Buffer:void 0,u=(s?s.isBuffer:void 0)||o;e.exports=u},5508:(e,t,n)=>{var r=n(241);e.exports=function(e,t){return r(e,t)}},664:(e,t,n)=>{var r=n(3984),o=n(4411);e.exports=function(e){if(!o(e))return!1;var t=r(e);return\"[object Function]\"==t||\"[object GeneratorFunction]\"==t||\"[object AsyncFunction]\"==t||\"[object Proxy]\"==t}},2829:e=>{e.exports=function(e){return\"number\"==typeof e&&e>-1&&e%1==0&&e<=9007199254740991}},4411:e=>{e.exports=function(e){var t=typeof e;return null!=e&&(\"object\"==t||\"function\"==t)}},8994:e=>{e.exports=function(e){return null!=e&&\"object\"==typeof e}},6603:(e,t,n)=>{var r=n(7934),o=n(3452),i=n(2824),a=i&&i.isTypedArray,s=a?o(a):r;e.exports=s},8220:(e,t,n)=>{var r=n(6681),o=n(1350),i=n(3527);e.exports=function(e){return i(e)?r(e):o(e)}},9874:e=>{e.exports=function(){return[]}},5676:e=>{e.exports=function(){return!1}},1049:(e,t,n)=>{\"use strict\";var r=n(4562);function o(){}function i(){}i.resetWarningCache=o,e.exports=function(){function e(e,t,n,o,i,a){if(a!==r){var s=new Error(\"Calling PropTypes validators directly is not supported by the `prop-types` package. Use PropTypes.checkPropTypes() to call them. Read more at http://fb.me/use-check-prop-types\");throw s.name=\"Invariant Violation\",s}}function t(){return e}e.isRequired=e;var n={array:e,bool:e,func:e,number:e,object:e,string:e,symbol:e,any:e,arrayOf:t,element:e,elementType:e,instanceOf:t,node:e,objectOf:t,oneOf:t,oneOfType:t,shape:t,exact:t,checkPropTypes:i,resetWarningCache:o};return n.PropTypes=n,n}},8076:(e,t,n)=>{e.exports=n(1049)()},4562:e=>{\"use strict\";e.exports=\"SECRET_DO_NOT_PASS_THIS_OR_YOU_WILL_BE_FIRED\"},244:(e,t)=>{\"use strict\";var n=\"function\"==typeof Symbol&&Symbol.for,r=n?Symbol.for(\"react.element\"):60103,o=n?Symbol.for(\"react.portal\"):60106,i=n?Symbol.for(\"react.fragment\"):60107,a=n?Symbol.for(\"react.strict_mode\"):60108,s=n?Symbol.for(\"react.profiler\"):60114,u=n?Symbol.for(\"react.provider\"):60109,l=n?Symbol.for(\"react.context\"):60110,c=n?Symbol.for(\"react.async_mode\"):60111,f=n?Symbol.for(\"react.concurrent_mode\"):60111,p=n?Symbol.for(\"react.forward_ref\"):60112,d=n?Symbol.for(\"react.suspense\"):60113,h=n?Symbol.for(\"react.suspense_list\"):60120,v=n?Symbol.for(\"react.memo\"):60115,m=n?Symbol.for(\"react.lazy\"):60116,y=n?Symbol.for(\"react.block\"):60121,b=n?Symbol.for(\"react.fundamental\"):60117,g=n?Symbol.for(\"react.responder\"):60118,w=n?Symbol.for(\"react.scope\"):60119;function E(e){if(\"object\"==typeof e&&null!==e){var t=e.$$typeof;switch(t){case r:switch(e=e.type){case c:case f:case i:case s:case a:case d:return e;default:switch(e=e&&e.$$typeof){case l:case p:case m:case v:case u:return e;default:return t}}case o:return t}}}function x(e){return E(e)===f}t.AsyncMode=c,t.ConcurrentMode=f,t.ContextConsumer=l,t.ContextProvider=u,t.Element=r,t.ForwardRef=p,t.Fragment=i,t.Lazy=m,t.Memo=v,t.Portal=o,t.Profiler=s,t.StrictMode=a,t.Suspense=d,t.isAsyncMode=function(e){return x(e)||E(e)===c},t.isConcurrentMode=x,t.isContextConsumer=function(e){return E(e)===l},t.isContextProvider=function(e){return E(e)===u},t.isElement=function(e){return\"object\"==typeof e&&null!==e&&e.$$typeof===r},t.isForwardRef=function(e){return E(e)===p},t.isFragment=function(e){return E(e)===i},t.isLazy=function(e){return E(e)===m},t.isMemo=function(e){return E(e)===v},t.isPortal=function(e){return E(e)===o},t.isProfiler=function(e){return E(e)===s},t.isStrictMode=function(e){return E(e)===a},t.isSuspense=function(e){return E(e)===d},t.isValidElementType=function(e){return\"string\"==typeof e||\"function\"==typeof e||e===i||e===f||e===s||e===a||e===d||e===h||\"object\"==typeof e&&null!==e&&(e.$$typeof===m||e.$$typeof===v||e.$$typeof===u||e.$$typeof===l||e.$$typeof===p||e.$$typeof===b||e.$$typeof===g||e.$$typeof===w||e.$$typeof===y)},t.typeOf=E},2744:(e,t,n)=>{\"use strict\";e.exports=n(244)},2442:(e,t,n)=>{\"use strict\";function r(){var e=this.constructor.getDerivedStateFromProps(this.props,this.state);null!=e&&this.setState(e)}function o(e){this.setState(function(t){var n=this.constructor.getDerivedStateFromProps(e,t);return null!=n?n:null}.bind(this))}function i(e,t){try{var n=this.props,r=this.state;this.props=e,this.state=t,this.__reactInternalSnapshotFlag=!0,this.__reactInternalSnapshot=this.getSnapshotBeforeUpdate(n,r)}finally{this.props=n,this.state=r}}function a(e){var t=e.prototype;if(!t||!t.isReactComponent)throw new Error(\"Can only polyfill class components\");if(\"function\"!=typeof e.getDerivedStateFromProps&&\"function\"!=typeof t.getSnapshotBeforeUpdate)return e;var n=null,a=null,s=null;if(\"function\"==typeof t.componentWillMount?n=\"componentWillMount\":\"function\"==typeof t.UNSAFE_componentWillMount&&(n=\"UNSAFE_componentWillMount\"),\"function\"==typeof t.componentWillReceiveProps?a=\"componentWillReceiveProps\":\"function\"==typeof t.UNSAFE_componentWillReceiveProps&&(a=\"UNSAFE_componentWillReceiveProps\"),\"function\"==typeof t.componentWillUpdate?s=\"componentWillUpdate\":\"function\"==typeof t.UNSAFE_componentWillUpdate&&(s=\"UNSAFE_componentWillUpdate\"),null!==n||null!==a||null!==s){var u=e.displayName||e.name,l=\"function\"==typeof e.getDerivedStateFromProps?\"getDerivedStateFromProps()\":\"getSnapshotBeforeUpdate()\";throw Error(\"Unsafe legacy lifecycles will not be called for components using new component APIs.\\n\\n\"+u+\" uses \"+l+\" but also contains the following legacy lifecycles:\"+(null!==n?\"\\n \"+n:\"\")+(null!==a?\"\\n \"+a:\"\")+(null!==s?\"\\n \"+s:\"\")+\"\\n\\nThe above lifecycles should be removed. Learn more about this warning here:\\nhttps://fb.me/react-async-component-lifecycle-hooks\")}if(\"function\"==typeof e.getDerivedStateFromProps&&(t.componentWillMount=r,t.componentWillReceiveProps=o),\"function\"==typeof t.getSnapshotBeforeUpdate){if(\"function\"!=typeof t.componentDidUpdate)throw new Error(\"Cannot polyfill getSnapshotBeforeUpdate() for components that do not define componentDidUpdate() on the prototype\");t.componentWillUpdate=i;var c=t.componentDidUpdate;t.componentDidUpdate=function(e,t,n){var r=this.__reactInternalSnapshotFlag?this.__reactInternalSnapshot:n;c.call(this,e,t,r)}}return e}n.r(t),n.d(t,{polyfill:()=>a}),r.__suppressDeprecationWarning=!0,o.__suppressDeprecationWarning=!0,i.__suppressDeprecationWarning=!0},2515:e=>{var t=function(e){\"use strict\";var t,n=Object.prototype,r=n.hasOwnProperty,o=\"function\"==typeof Symbol?Symbol:{},i=o.iterator||\"@@iterator\",a=o.asyncIterator||\"@@asyncIterator\",s=o.toStringTag||\"@@toStringTag\";function u(e,t,n){return Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}),e[t]}try{u({},\"\")}catch(e){u=function(e,t,n){return e[t]=n}}function l(e,t,n,r){var o=t&&t.prototype instanceof m?t:m,i=Object.create(o.prototype),a=new k(r||[]);return i._invoke=function(e,t,n){var r=f;return function(o,i){if(r===d)throw new Error(\"Generator is already running\");if(r===h){if(\"throw\"===o)throw i;return P()}for(n.method=o,n.arg=i;;){var a=n.delegate;if(a){var s=C(a,n);if(s){if(s===v)continue;return s}}if(\"next\"===n.method)n.sent=n._sent=n.arg;else if(\"throw\"===n.method){if(r===f)throw r=h,n.arg;n.dispatchException(n.arg)}else\"return\"===n.method&&n.abrupt(\"return\",n.arg);r=d;var u=c(e,t,n);if(\"normal\"===u.type){if(r=n.done?h:p,u.arg===v)continue;return{value:u.arg,done:n.done}}\"throw\"===u.type&&(r=h,n.method=\"throw\",n.arg=u.arg)}}}(e,n,a),i}function c(e,t,n){try{return{type:\"normal\",arg:e.call(t,n)}}catch(e){return{type:\"throw\",arg:e}}}e.wrap=l;var f=\"suspendedStart\",p=\"suspendedYield\",d=\"executing\",h=\"completed\",v={};function m(){}function y(){}function b(){}var g={};u(g,i,(function(){return this}));var w=Object.getPrototypeOf,E=w&&w(w(M([])));E&&E!==n&&r.call(E,i)&&(g=E);var x=b.prototype=m.prototype=Object.create(g);function _(e){[\"next\",\"throw\",\"return\"].forEach((function(t){u(e,t,(function(e){return this._invoke(t,e)}))}))}function O(e,t){function n(o,i,a,s){var u=c(e[o],e,i);if(\"throw\"!==u.type){var l=u.arg,f=l.value;return f&&\"object\"==typeof f&&r.call(f,\"__await\")?t.resolve(f.__await).then((function(e){n(\"next\",e,a,s)}),(function(e){n(\"throw\",e,a,s)})):t.resolve(f).then((function(e){l.value=e,a(l)}),(function(e){return n(\"throw\",e,a,s)}))}s(u.arg)}var o;this._invoke=function(e,r){function i(){return new t((function(t,o){n(e,r,t,o)}))}return o=o?o.then(i,i):i()}}function C(e,n){var r=e.iterator[n.method];if(r===t){if(n.delegate=null,\"throw\"===n.method){if(e.iterator.return&&(n.method=\"return\",n.arg=t,C(e,n),\"throw\"===n.method))return v;n.method=\"throw\",n.arg=new TypeError(\"The iterator does not provide a 'throw' method\")}return v}var o=c(r,e.iterator,n.arg);if(\"throw\"===o.type)return n.method=\"throw\",n.arg=o.arg,n.delegate=null,v;var i=o.arg;return i?i.done?(n[e.resultName]=i.value,n.next=e.nextLoc,\"return\"!==n.method&&(n.method=\"next\",n.arg=t),n.delegate=null,v):i:(n.method=\"throw\",n.arg=new TypeError(\"iterator result is not an object\"),n.delegate=null,v)}function S(e){var t={tryLoc:e[0]};1 in e&&(t.catchLoc=e[1]),2 in e&&(t.finallyLoc=e[2],t.afterLoc=e[3]),this.tryEntries.push(t)}function T(e){var t=e.completion||{};t.type=\"normal\",delete t.arg,e.completion=t}function k(e){this.tryEntries=[{tryLoc:\"root\"}],e.forEach(S,this),this.reset(!0)}function M(e){if(e){var n=e[i];if(n)return n.call(e);if(\"function\"==typeof e.next)return e;if(!isNaN(e.length)){var o=-1,a=function n(){for(;++o=0;--i){var a=this.tryEntries[i],s=a.completion;if(\"root\"===a.tryLoc)return o(\"end\");if(a.tryLoc<=this.prev){var u=r.call(a,\"catchLoc\"),l=r.call(a,\"finallyLoc\");if(u&&l){if(this.prev=0;--n){var o=this.tryEntries[n];if(o.tryLoc<=this.prev&&r.call(o,\"finallyLoc\")&&this.prev=0;--t){var n=this.tryEntries[t];if(n.finallyLoc===e)return this.complete(n.completion,n.afterLoc),T(n),v}},catch:function(e){for(var t=this.tryEntries.length-1;t>=0;--t){var n=this.tryEntries[t];if(n.tryLoc===e){var r=n.completion;if(\"throw\"===r.type){var o=r.arg;T(n)}return o}}throw new Error(\"illegal catch attempt\")},delegateYield:function(e,n,r){return this.delegate={iterator:M(e),resultName:n,nextLoc:r},\"next\"===this.method&&(this.arg=t),v}},e}(e.exports);try{regeneratorRuntime=t}catch(e){\"object\"==typeof globalThis?globalThis.regeneratorRuntime=t:Function(\"r\",\"regeneratorRuntime = r\")(t)}},7382:e=>{\"use strict\";e.exports=function(){}},472:(e,t,n)=>{n(7860),n(615),e.exports=n(1176).Array.from},4597:(e,t,n)=>{n(2524),e.exports=n(1176).Object.assign},6307:(e,t,n)=>{n(1309);var r=n(1176).Object;e.exports=function(e,t){return r.create(e,t)}},4700:(e,t,n)=>{n(3823),e.exports=n(1176).Object.entries},1371:(e,t,n)=>{n(5681),e.exports=n(1176).Object.keys},6356:(e,t,n)=>{n(2201),e.exports=n(1176).Object.setPrototypeOf},6798:(e,t,n)=>{n(803),e.exports=n(1176).Object.values},3658:e=>{e.exports=function(e){if(\"function\"!=typeof e)throw TypeError(e+\" is not a function!\");return e}},3229:(e,t,n)=>{var r=n(4525);e.exports=function(e){if(!r(e))throw TypeError(e+\" is not an object!\");return e}},2592:(e,t,n)=>{var r=n(317),o=n(9228),i=n(7933);e.exports=function(e){return function(t,n,a){var s,u=r(t),l=o(u.length),c=i(a,l);if(e&&n!=n){for(;l>c;)if((s=u[c++])!=s)return!0}else for(;l>c;c++)if((e||c in u)&&u[c]===n)return e||c||0;return!e&&-1}}},5172:(e,t,n)=>{var r=n(1940),o=n(9117)(\"toStringTag\"),i=\"Arguments\"==r(function(){return arguments}());e.exports=function(e){var t,n,a;return void 0===e?\"Undefined\":null===e?\"Null\":\"string\"==typeof(n=function(e,t){try{return e[t]}catch(e){}}(t=Object(e),o))?n:i?r(t):\"Object\"==(a=r(t))&&\"function\"==typeof t.callee?\"Arguments\":a}},1940:e=>{var t={}.toString;e.exports=function(e){return t.call(e).slice(8,-1)}},1176:e=>{var t=e.exports={version:\"2.6.12\"};\"number\"==typeof __e&&(__e=t)},2527:(e,t,n)=>{\"use strict\";var r=n(6397),o=n(2929);e.exports=function(e,t,n){t in e?r.f(e,t,o(0,n)):e[t]=n}},847:(e,t,n)=>{var r=n(3658);e.exports=function(e,t,n){if(r(e),void 0===t)return e;switch(n){case 1:return function(n){return e.call(t,n)};case 2:return function(n,r){return e.call(t,n,r)};case 3:return function(n,r,o){return e.call(t,n,r,o)}}return function(){return e.apply(t,arguments)}}},3790:e=>{e.exports=function(e){if(null==e)throw TypeError(\"Can't call method on \"+e);return e}},4922:(e,t,n)=>{e.exports=!n(8071)((function(){return 7!=Object.defineProperty({},\"a\",{get:function(){return 7}}).a}))},3184:(e,t,n)=>{var r=n(4525),o=n(8252).document,i=r(o)&&r(o.createElement);e.exports=function(e){return i?o.createElement(e):{}}},6800:e=>{e.exports=\"constructor,hasOwnProperty,isPrototypeOf,propertyIsEnumerable,toLocaleString,toString,valueOf\".split(\",\")},9645:(e,t,n)=>{var r=n(8252),o=n(1176),i=n(847),a=n(4421),s=n(9843),u=function(e,t,n){var l,c,f,p=e&u.F,d=e&u.G,h=e&u.S,v=e&u.P,m=e&u.B,y=e&u.W,b=d?o:o[t]||(o[t]={}),g=b.prototype,w=d?r:h?r[t]:(r[t]||{}).prototype;for(l in d&&(n=t),n)(c=!p&&w&&void 0!==w[l])&&s(b,l)||(f=c?w[l]:n[l],b[l]=d&&\"function\"!=typeof w[l]?n[l]:m&&c?i(f,r):y&&w[l]==f?function(e){var t=function(t,n,r){if(this instanceof e){switch(arguments.length){case 0:return new e;case 1:return new e(t);case 2:return new e(t,n)}return new e(t,n,r)}return e.apply(this,arguments)};return t.prototype=e.prototype,t}(f):v&&\"function\"==typeof f?i(Function.call,f):f,v&&((b.virtual||(b.virtual={}))[l]=f,e&u.R&&g&&!g[l]&&a(g,l,f)))};u.F=1,u.G=2,u.S=4,u.P=8,u.B=16,u.W=32,u.U=64,u.R=128,e.exports=u},8071:e=>{e.exports=function(e){try{return!!e()}catch(e){return!0}}},8252:e=>{var t=e.exports=\"undefined\"!=typeof window&&window.Math==Math?window:\"undefined\"!=typeof self&&self.Math==Math?self:Function(\"return this\")();\"number\"==typeof __g&&(__g=t)},9843:e=>{var t={}.hasOwnProperty;e.exports=function(e,n){return t.call(e,n)}},4421:(e,t,n)=>{var r=n(6397),o=n(2929);e.exports=n(4922)?function(e,t,n){return r.f(e,t,o(1,n))}:function(e,t,n){return e[t]=n,e}},5190:(e,t,n)=>{var r=n(8252).document;e.exports=r&&r.documentElement},6419:(e,t,n)=>{e.exports=!n(4922)&&!n(8071)((function(){return 7!=Object.defineProperty(n(3184)(\"div\"),\"a\",{get:function(){return 7}}).a}))},4190:(e,t,n)=>{var r=n(1940);e.exports=Object(\"z\").propertyIsEnumerable(0)?Object:function(e){return\"String\"==r(e)?e.split(\"\"):Object(e)}},4220:(e,t,n)=>{var r=n(8355),o=n(9117)(\"iterator\"),i=Array.prototype;e.exports=function(e){return void 0!==e&&(r.Array===e||i[o]===e)}},4525:e=>{e.exports=function(e){return\"object\"==typeof e?null!==e:\"function\"==typeof e}},3432:(e,t,n)=>{var r=n(3229);e.exports=function(e,t,n,o){try{return o?t(r(n)[0],n[1]):t(n)}catch(t){var i=e.return;throw void 0!==i&&r(i.call(e)),t}}},3299:(e,t,n)=>{\"use strict\";var r=n(9525),o=n(2929),i=n(9453),a={};n(4421)(a,n(9117)(\"iterator\"),(function(){return this})),e.exports=function(e,t,n){e.prototype=r(a,{next:o(1,n)}),i(e,t+\" Iterator\")}},9973:(e,t,n)=>{\"use strict\";var r=n(2020),o=n(9645),i=n(9576),a=n(4421),s=n(8355),u=n(3299),l=n(9453),c=n(9551),f=n(9117)(\"iterator\"),p=!([].keys&&\"next\"in[].keys()),d=\"keys\",h=\"values\",v=function(){return this};e.exports=function(e,t,n,m,y,b,g){u(n,t,m);var w,E,x,_=function(e){if(!p&&e in T)return T[e];switch(e){case d:case h:return function(){return new n(this,e)}}return function(){return new n(this,e)}},O=t+\" Iterator\",C=y==h,S=!1,T=e.prototype,k=T[f]||T[\"@@iterator\"]||y&&T[y],M=k||_(y),P=y?C?_(\"entries\"):M:void 0,N=\"Array\"==t&&T.entries||k;if(N&&(x=c(N.call(new e)))!==Object.prototype&&x.next&&(l(x,O,!0),r||\"function\"==typeof x[f]||a(x,f,v)),C&&k&&k.name!==h&&(S=!0,M=function(){return k.call(this)}),r&&!g||!p&&!S&&T[f]||a(T,f,M),s[t]=M,s[O]=v,y)if(w={values:C?M:_(h),keys:b?M:_(d),entries:P},g)for(E in w)E in T||i(T,E,w[E]);else o(o.P+o.F*(p||S),t,w);return w}},9736:(e,t,n)=>{var r=n(9117)(\"iterator\"),o=!1;try{var i=[7][r]();i.return=function(){o=!0},Array.from(i,(function(){throw 2}))}catch(e){}e.exports=function(e,t){if(!t&&!o)return!1;var n=!1;try{var i=[7],a=i[r]();a.next=function(){return{done:n=!0}},i[r]=function(){return a},e(i)}catch(e){}return n}},8355:e=>{e.exports={}},2020:e=>{e.exports=!0},3364:(e,t,n)=>{\"use strict\";var r=n(4922),o=n(8350),i=n(2858),a=n(6083),s=n(6058),u=n(4190),l=Object.assign;e.exports=!l||n(8071)((function(){var e={},t={},n=Symbol(),r=\"abcdefghijklmnopqrst\";return e[n]=7,r.split(\"\").forEach((function(e){t[e]=e})),7!=l({},e)[n]||Object.keys(l({},t)).join(\"\")!=r}))?function(e,t){for(var n=s(e),l=arguments.length,c=1,f=i.f,p=a.f;l>c;)for(var d,h=u(arguments[c++]),v=f?o(h).concat(f(h)):o(h),m=v.length,y=0;m>y;)d=v[y++],r&&!p.call(h,d)||(n[d]=h[d]);return n}:l},9525:(e,t,n)=>{var r=n(3229),o=n(4433),i=n(6800),a=n(2977)(\"IE_PROTO\"),s=function(){},u=function(){var e,t=n(3184)(\"iframe\"),r=i.length;for(t.style.display=\"none\",n(5190).appendChild(t),t.src=\"javascript:\",(e=t.contentWindow.document).open(),e.write(\" + + + + +
    + + + diff --git a/ui/webapp/src/static/notif.mp3 b/ui/webapp/src/static/notif.mp3 new file mode 100644 index 0000000..baf6a4a Binary files /dev/null and b/ui/webapp/src/static/notif.mp3 differ diff --git a/ui/webapp/webpack.config.js b/ui/webapp/webpack.config.js new file mode 100644 index 0000000..1fe7801 --- /dev/null +++ b/ui/webapp/webpack.config.js @@ -0,0 +1,49 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +var path = require("path"); +var webpack = require("webpack"); + +module.exports = { + entry: "./src/main.js", + output: { + path: __dirname, + filename: "build/bundle.js", + }, + resolve: { + alias: { + react: path.resolve("./node_modules/react"), + "mephisto-task": path.resolve("./node_modules/mephisto-task"), + }, + fallback: { + net: false, + dns: false, + }, + }, + module: { + rules: [ + { + test: /\.(js|jsx)$/, + loader: "babel-loader", + exclude: /node_modules/, + options: { presets: ["@babel/env"] }, + }, + { + test: /\.css$/, + use: ["style-loader", "css-loader"], + }, + { + test: /\.(svg|png|jpe?g|ttf)$/, + loader: "url-loader", + options: { limit: 100000 }, + }, + { + test: /\.jpg$/, + loader: "file-loader", + }, + ], + }, +};