diff --git a/worlds/ff4fe/FreeEnterpriseForAP/FreeEnt/generator.py b/worlds/ff4fe/FreeEnterpriseForAP/FreeEnt/generator.py index a4721b66efab..0321ebfa16bd 100644 --- a/worlds/ff4fe/FreeEnterpriseForAP/FreeEnt/generator.py +++ b/worlds/ff4fe/FreeEnterpriseForAP/FreeEnt/generator.py @@ -16,7 +16,7 @@ import pyaes -from .. import f4c +from . import f4c from .flags import FlagSet, FlagLogic from .address import * diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/__init__.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/__init__.py deleted file mode 100644 index d30a9e64a041..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from .compile import compile, CompileOptions -from .bytes_patch import BytesPatch - -def encode_text(text): - from . import ff4struct - byte_list = ff4struct.text.encode(text, allow_dual_char=False) - if byte_list and byte_list[-1] == 0x00: - byte_list.pop() - return byte_list diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ai_common.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/ai_common.py deleted file mode 100644 index 70393c78b84e..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ai_common.py +++ /dev/null @@ -1,59 +0,0 @@ -CHAIN_START_CODE = 0xFD -CHAIN_END_CODE = 0xFC -CHAIN_INTO_CODE = 0xFB -END_CODE = 0xFF - -COMMANDS = { - 0xC0 : ['fight'], - 0xE1 : ['pass'], - 0xE8 : ['set race {}', 'races'], - 0xE9 : ['set attack index {}', 'hex'], - 0xEA : ['set defense index {}', 'hex'], - 0xEB : ['set magic defense index {}', 'hex'], - 0xEC : ['speed {}', 'speed_delta'], - 0xED : ['set resistance {}', 'elements'], - 0xEE : ['set spell power {}', 'decimal'], - 0xEF : ['set weakness {}', 'elements'], - 0xF0 : ['set sprite {}', 'hex'], - 0xF1 : ['message {}', 'hex'], - 0xF2 : ['message {} next action', 'hex'], - 0xF3 : ['music {}', 'music'], - 0xF4 : ['condition {}', 'condition_delta'], - 0xF5 : ['set reaction {}', 'reaction'], - 0xF7 : ['darken {}', 'hex'], - 0xF8 : ['debug {}', 'hex'], - 0xF9 : ['target {}', 'target'], -} - -TARGETS = { - 0x16 : 'self', - 0x17 : 'all monsters', - 0x18 : 'other monsters', - 0x19 : 'type 0 monsters', - 0x1A : 'type 1 monsters', - 0x1B : 'type 2 monsters', - 0x1C : 'front row', - 0x1D : 'back row', - 0x1E : 'stunned monster', - 0x1F : 'sleeping monster', - 0x20 : 'charmed monster', - 0x21 : 'weak monster', - 0x22 : 'random anything', - 0x23 : 'random other anything', - 0x24 : 'random monster', - 0x25 : 'random other monster', - 0x26 : 'random front row', - 0x27 : 'random back row', - 0x28 : 'all characters', - 0x29 : 'dead monsters' -} - -COMMAND_CODES_BY_SLUG = { - COMMANDS[k][0].replace('{}', '').replace(' ', '').lower() : k - for k in COMMANDS - } - -TARGET_CODES_BY_SLUG = { - TARGETS[k].replace(' ', '') : k - for k in TARGETS - } diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/bytes_patch.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/bytes_patch.py deleted file mode 100644 index 907501cdebe2..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/bytes_patch.py +++ /dev/null @@ -1,31 +0,0 @@ -''' -A BytesPatch object is like a patch() block, except instead of reading -the patch data hex codes from text, it uses a provided bytes object, -mainly so that scripts using F4C as a module do not necessarily have -to encode a raw patch into a string to pass to F4C just so that F4C -can convert it back to bytes. -''' - -from . import compile_common - -class BytesPatch: - def __init__(self, data, *, unheadered_address=None, headered_address=None, bus_address=None): - self.data = data - self.unheadered_address = unheadered_address - self.headered_address = headered_address - self.bus_address = bus_address - - if unheadered_address is None and headered_address is None and bus_address is None: - raise compile_common.BuildError('BytesPatch requires a rom address to be specified') - - def get_unheadered_address(self): - if self.unheadered_address is not None: - addr = self.unheadered_address - elif self.headered_address is not None: - addr = self.headered_address - 0x200 - elif self.bus_address is not None: - addr = compile_common.snes_to_rom_address(self.bus_address) - else: - raise compile_common.BuildError('BytesPatch requires a rom address to be specified') - - return addr diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile.py deleted file mode 100644 index 8d503e8d9398..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile.py +++ /dev/null @@ -1,379 +0,0 @@ -import argparse -import re -import os -from datetime import datetime -from io import BytesIO -import time -import random -import hashlib - -from . import lark -from . import ff4bin -from . import consts -from .bytes_patch import BytesPatch - -from . import compile_cache -from . import compile_event -from . import compile_consts -from . import compile_text -from . import compile_trigger -from . import compile_postprocess -from . import compile_ai_script -from . import compile_placement -from . import compile_map -from . import compile_npc -from . import compile_event_call -from . import compile_shop -from . import compile_patch -from . import compile_myselfpatch -from . import compile_actor -from . import compile_drop_table -from . import compile_formation -from . import compile_monster -from . import compile_spell_set -from . import compile_gfx - -block_processors = { - 'consts' : compile_consts.process_consts_block, - 'event' : compile_event.process_event_block, - 'text' : compile_text.process_text_block, - 'trigger' : compile_trigger.process_trigger_block, - 'ai_script' : compile_ai_script.process_ai_script_block, - 'placement' : compile_placement.process_placement_block, - 'map' : compile_map.process_map_block, - 'mapgrid' : compile_map.process_mapgrid_block, - 'npc' : compile_npc.process_npc_block, - 'eventcall' : compile_event_call.process_eventcall_block, - 'shop' : compile_shop.process_shop_block, - 'actor' : compile_actor.process_actor_block, - 'droptable' : compile_drop_table.process_droptable_block, - 'formation' : compile_formation.process_formation_block, - 'monster' : compile_monster.process_monster_block, - 'spellset' : compile_spell_set.process_spellset_block, - - 'patch' : compile_patch.process_patch_block, - 'msfpatch' : compile_myselfpatch.process_msfpatch_block, - 'chr' : compile_gfx.process_chr_block, - 'pal' : compile_gfx.process_pal_block, -} - -class CompileError(Exception): - pass - -class MetricsContext: - def __init__(self, metrics, name): - self._metrics = metrics - self._name = name - - def __enter__(self): - self._metrics.start(self._name) - - def __exit__(self, type, value, traceback): - self._metrics.end(self._name) - -class Metrics: - def __init__(self): - self._start_times = {} - self._totals = {} - - def start(self, name): - self._start_times[name] = time.process_time() - self._last_name = name - - def end(self, name=None): - if name is None: - name = self._last_name - - if name in self._start_times: - total = time.process_time() - self._start_times[name] - del self._start_times[name] - self._totals.setdefault(name, 0) - self._totals[name] += total - - self._last_name = None - - def measure(self, name): - return MetricsContext(self, name) - - def __repr__(self): - max_name_length = max([len(n) for n in self._totals]) - fmt = '{:%d} : {}' % max_name_length - key_order = sorted(self._totals.keys(), key=lambda k: self._totals[k], reverse=True) - lines = [fmt.format(name, self._totals[name]) for name in key_order] - return '\n'.join(lines) - -class CompileOptions: - def __init__(self): - self.build_cache_path = None - self.clean_cache = False - self.force_recompile = False - self.shuffle_msfpatches = False - self.random_seed = None - -class CompileEnvironment: - def __init__(self, rom, options): - self.rom = rom - self.options = options - self.postprocess = compile_postprocess.Postprocessor() - self.cache = None - self.reports = {} - - self.rnd = random.Random() - if options.random_seed is not None: - if type(options.random_seed) is str: - numeric_seed = int(hashlib.sha1(options.random_seed.encode('utf-8')).hexdigest(), 16) - else: - numeric_seed = options.random_seed - self.rnd.seed(numeric_seed) - -class CompileReport: - def __init__(self): - self.metrics = None - self.symbol_table = None - -CODE_TOKENS = ['/*', '*/', '//', '{', '}', '(', ')'] -WHITESPACE = ['\n', '\r', '\t', ' '] -WHITESPACE_REGEX = re.compile(r'^\s+') - -def _tokenize_code_line(line): - line = line.replace('\r', '') - tokens = re.split(r'(\s+|/\*|//|\*/|\{|\}|\(|\))', line) - return filter(lambda x: x != '', tokens) - -''' -Main interface function for running the F4C compiler. -@param input_rom: Either a stream of the input ROM file, or a path to it -@param output_rom: Either a stream to write the output ROM data to, or an output file path -@param options: Compile options struct -@param scripts: Any number of paths to f4c/f4t script files and/or raw F4C/F4T data. -''' -def compile(input_rom, output_rom, *scripts, options=CompileOptions()): - metrics = Metrics() - metrics.start('total') - - with metrics.measure('load rom'): - rom = ff4bin.Rom(input_rom) - - with metrics.measure('load scripts'): - code_token_sets = [] - text_lines = [] - bytes_patches = [] - for script in scripts: - autodetect_format = True - is_text_file = False - in_multiline_comment = False - code_tokens = None - - if isinstance(script, BytesPatch): - bytes_patches.append(script) - continue - - if '\n' not in script and os.path.isfile(script): - extension = os.path.splitext(script)[1].lower() - if extension == '.f4c': - autodetect_format = False - elif extension == '.f4t': - autodetect_format = False - is_text_file = True - - close_lines = True - lines = open(script, 'r') - else: - close_lines = False - lines = script.splitlines(True) - - for line in lines: - if autodetect_format and not line.strip(): - autodetect_format = False - if line.strip().startswith('---'): - is_text_file = True - - if is_text_file: - line = re.sub(r'[\n\r]+$', '', line) - text_lines.append(line) - else: - if code_tokens is None: - code_tokens = [] - code_token_sets.append(code_tokens) - - with metrics.measure('tokenize line'): - tokens = _tokenize_code_line(line) - - for token in tokens: - if in_multiline_comment: - if token == '*/': - in_multiline_comment = False - elif token == '/*': - in_multiline_comment = True - elif token == '//': - break - else: - code_tokens.append(token) - - if close_lines: - lines.close() - - # read code blocks - with metrics.measure('read code blocks'): - blocks = [] - for code_tokens in code_token_sets: - def pop_whitespace(): - while code_tokens and code_tokens[0][0] in WHITESPACE: - code_tokens.pop(0) - - current_block = None - pop_whitespace() - - while code_tokens: - block_type = code_tokens.pop(0) - if not re.search(r'^[A-Za-z_][A-Za-z_0-9]*$', block_type): - raise CompileError("Expected block type identifier, got '{}' (context: {})".format(block_type, ''.join(code_tokens[:10]))) - - current_block = {'type' : block_type, 'parameters' : '', 'body' : ''} - blocks.append(current_block) - - pop_whitespace() - if code_tokens[0] == '(': - code_tokens.pop(0) - - param_tokens = [] - if not code_tokens: - raise CompileError("Unexpected EOF while parsing parameters for '{}' block".format(current_block['type'])) - - pop_whitespace() - while code_tokens[0] != ')': - param_tokens.append(code_tokens.pop(0)) - pop_whitespace() - if not code_tokens: - raise CompileError("Unexpected EOF while parsing parameters for '{}' block".format(current_block['type'])) - code_tokens.pop(0) - pop_whitespace() - current_block['parameters'] = ' '.join(param_tokens) - - if not code_tokens or code_tokens[0] != '{': - raise CompileError("Expected {{ to begin body definition for '{}({})' block".format(current_block['type'], current_block['parameters'])) - - code_tokens.pop(0) - if not code_tokens: - raise CompileError("Unexpected EOF while parsing body for '{}({})' block".format(current_block['type'], current_block['parameters'])) - body_tokens = [] - brace_level = 0 - while brace_level > 0 or code_tokens[0] != '}': - tk = code_tokens.pop(0) - body_tokens.append(tk) - - if tk == '}': - brace_level -= 1 - elif tk == '{': - brace_level += 1 - - if not code_tokens: - raise CompileError("Unexpected EOF while parsing body for '{}({})' block".format(current_block['type'], current_block['parameters'])) - - code_tokens.pop(0) - current_block['body'] = ''.join(body_tokens) - - # remove starting/ending brace lines for text blocks, if applicable - if current_block['type'] == 'text': - current_block['body'] = re.sub(r'^[ \t\f\v]*\n\r?', '', current_block['body']) - current_block['body'] = re.sub(r'\n\r?[ \t\f\v]*$', '', current_block['body']) - - pop_whitespace() - - # read text blocks - with metrics.measure('read text blocks'): - current_text_block = None - for line in text_lines: - m = re.search(r'^\s*---\s*(?P
.*[^\s])\s*---\s*$', line) - if m: - current_text_block = {'type' : 'text', 'parameters' : m.group('header'), 'body' : '', 'lines' : []} - blocks.append(current_text_block) - elif current_text_block is not None: - current_text_block['lines'].append(line) - - for block in blocks: - if block['type'] == 'text' and 'lines' in block: - lines = block['lines'] - while not lines[-1].strip(): - lines = lines[:-1] - block['body'] = '\n'.join(lines) - - # process blocks - env = CompileEnvironment(rom, options) - if options.build_cache_path: - env.cache = compile_cache.CompileCache(options.build_cache_path) - - blocks.sort(key = lambda b : (0 if b['type'] == 'consts' else 1)) - - for block in blocks: - if block['type'] not in block_processors: - print('No compiler found for block type "{}"'.format(block['type'])) - continue - - with metrics.measure('process {} block'.format(block['type'])): - process_func = block_processors[block['type']] - process_func(block, rom, env) - - # apply bytes patches - with metrics.measure('bytes patches'): - for bp in bytes_patches: - rom.add_patch(bp.get_unheadered_address(), bp.data) - - with metrics.measure('postprocess'): - env.postprocess.apply_registered_processes(env) - compile_postprocess.apply_cleanup_processes(env) - - if env.cache and options.clean_cache: - env.cache.cleanup() - - with metrics.measure('output'): - rom.save_rom(output_rom) - - metrics.end('total') - - report = CompileReport() - report.metrics = metrics - report.symbols = env.reports.get('symbols', {}) - return report - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('rom') - parser.add_argument('code_files', nargs='*') - parser.add_argument('--no-default-consts', action='store_true') - parser.add_argument('-o', '--output') - parser.add_argument('-t', '--test', action="store_true") - parser.add_argument('-m', '--metrics', action="store_true") - parser.add_argument('-l', '--list', action='append') - args = parser.parse_args() - - scripts = [] - if not args.no_default_consts: - scripts.append(os.path.join(os.path.dirname(__file__), 'default.consts')) - - if args.list: - for list_file in args.list: - with open(list_file) as infile: - for line in infile: - if line.strip() and not line.strip().startswith('#'): - scripts.append(line.strip()) - - scripts.extend(args.code_files) - - output_buffer = BytesIO() - - metrics = compile(args.rom, output_buffer, *scripts) - - # apply output - if not args.test: - output_filename = args.output - if output_filename is None: - parts = os.path.splitext(args.rom) - output_filename = parts[0] + '.f4c-' + datetime.now().strftime('%Y%m%d%H%M%S') + parts[1] - with open(output_filename, 'wb') as outfile: - output_buffer.seek(0) - outfile.write(output_buffer.read()) - - if args.metrics: - print(metrics) diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_actor.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_actor.py deleted file mode 100644 index 096a6d78bf32..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_actor.py +++ /dev/null @@ -1,55 +0,0 @@ -from . import compile_common - -def process_actor_block(block, rom, env): - params_tree = compile_common.parse(block['parameters'], 'actor', 'actor_block_params') - tree = compile_common.parse(block['body'], 'actor', 'actor_block_body') - - actor_index = params_tree.children[0] - 1 - - for node in tree.children: - if node.data == 'name': - rom.actor_name_ids[actor_index] = node.children[0] - elif node.data == 'load': - load_params = node.children[0] - if load_params.data == 'slot': - rom.actor_load_info[actor_index] = 0x80 | load_params.children[0] - else: - rom.actor_load_info[actor_index] = load_params.children[0] - elif node.data == 'save': - if actor_index < 20: - rom.actor_save_info[actor_index] = node.children[0] - elif node.data == 'discard': - if actor_index < 20: - rom.actor_save_info[actor_index] = 0x80 - elif node.data == 'commands': - commands = list(node.children) - while len(commands) < 5: - commands.append(0xFF) - rom.actor_commands[actor_index] = commands[:5] - else: - gear = list(rom.actor_gear[actor_index]) - if node.data == 'right_hand': - gear[3] = node.children[0] - if len(node.children) > 1: - gear[4] = node.children[1] - elif node.children[0] == 0: - gear[4] = 0 - else: - gear[4] = 1 - elif node.data == 'left_hand': - gear[5] = node.children[0] - if len(node.children) > 1: - gear[6] = node.children[1] - elif node.children[0] == 0: - gear[6] = 0 - else: - gear[6] = 1 - elif node.data == 'head': - gear[0] = node.children[0] - elif node.data == 'body': - gear[1] = node.children[0] - elif node.data == 'arms': - gear[2] = node.children[0] - - rom.actor_gear[actor_index] = gear - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_ai_script.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_ai_script.py deleted file mode 100644 index 7f03608c0543..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_ai_script.py +++ /dev/null @@ -1,122 +0,0 @@ -from . import compile_common -from . import lark -from . import consts -from . import ai_common - -class AiScriptTransformer(lark.Transformer): - def races(self, children): - b = 0 - for n in children: - b |= (1 << n) - return b - - def elements(self, children): - b = 0 - for n in children: - b |= (1 << n) - return b - - def speed_delta(self, n): - sign,value = n - if str(sign) == '-': - return 0x80 | (value & 0x7F) - else: - return value & 0x7F - - def condition_increment(self, n): - return 0x01 - - def condition_set(self, n): - v = n[0] - return 0x80 | (v & 0x7F) - - def reaction_number(self, n): - v = n[0] - return 0x80 | (v & 0x7F) - - def target(self, children): - if type(children[0]) is int: - return children[0] - - slug = ''.join([str(x).lower() for x in children]) - if slug not in ai_common.TARGET_CODES_BY_SLUG: - raise ValueError("Could not find code for target specification: {}".format(slug)) - - return ai_common.TARGET_CODES_BY_SLUG[slug] - - - def ai_command(self, children): - slug = '' - parameter = None - for c in children: - if type(c) is int: - parameter = c - else: - slug += str(c).lower() - - if slug in ai_common.COMMAND_CODES_BY_SLUG: - code = [ai_common.COMMAND_CODES_BY_SLUG[slug]] - if parameter is not None: - code.append(parameter) - return code - elif slug == 'use': - return [parameter] - elif slug == 'useongroup': - return [parameter + 0x30] - elif slug == 'usecommand': - return [parameter + 0xC0] - elif slug == 'wait': - return [0xFE] - elif slug == 'chaininto': - return [0xFB] - else: - raise ValueError("Unrecognized AI script command: {}".format(slug)) - - def chain_block(self, children): - return [0xFD] + self.partition_commands(children, 0xFB) + [0xFC] - - def ai_script_block_body(self, children): - return self.partition_commands(children, 0xFE) + [0xFF] - - def partition_commands(self, children, separator_byte=None): - sections = [ [] ] - - for c in children: - sections[-1].extend(c) - if c[0] == 0xFE or c[0] == 0xFB and len(sections) > 1: - sections[-2].extend(sections[-1]) - sections.pop() - elif c[0] <= 0xE7 or c[0] == 0xFD: - sections.append([]) - - if len(sections) > 1: - sections[-2].extend(sections[-1]) - sections.pop() - - result = [] - for i,section in enumerate(sections): - if separator_byte is not None and i > 0: - result.append(separator_byte) - result.extend(section) - - return result - -_ai_script_transformer = AiScriptTransformer() - -def compile_ai_script(script_body): - tree = compile_common.parse(script_body, 'ai', 'ai_script_block_body') - return _ai_script_transformer.transform(tree) - -def process_ai_script_block(block, rom, env): - params_tree = compile_common.parse(block['parameters'], 'ai', 'ai_script_block_parameters') - encoded_script = compile_ai_script(block['body']) - - if params_tree.data == 'normal_script': - rom.monster_scripts[params_tree.children[0]] = encoded_script - elif params_tree.data == 'moon_script': - rom.moon_monster_scripts[params_tree.children[0]] = encoded_script - else: - raise ValueError("Don't know where to put script for ai_script({})".format(block['parameters'])) - - - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_cache.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_cache.py deleted file mode 100644 index 2187dfcb8645..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_cache.py +++ /dev/null @@ -1,47 +0,0 @@ -import os -import hashlib -import pickle -import re - -class CompileCache: - def __init__(self, path): - self._cache_path = path - self._used_names = set() - - def get_block_cache_name(self, block): - tag = block['type'] + ' ' + block['parameters'].strip() - tag = re.sub(r'\s+', '_', tag) - - md5 = hashlib.md5() - sha1 = hashlib.sha1() - md5.update(block['body'].encode('utf-8')) - sha1.update(block['body'].encode('utf-8')) - - return '{}_{}_{}'.format(tag, md5.hexdigest(), sha1.hexdigest()) - - def load(self, cache_name): - path = os.path.join(self._cache_path, cache_name) - if os.path.exists(path): - self._used_names.add(cache_name) - with open(path, 'rb') as infile: - data = pickle.load(infile) - return data - - return None - - def save(self, cache_name, data): - path = os.path.join(self._cache_path, cache_name) - if not os.path.exists(self._cache_path): - os.makedirs(self._cache_path) - with open(path, 'wb') as outfile: - pickle.dump(data, outfile) - self._used_names.add(cache_name) - - def cleanup(self): - if not os.path.exists(self._cache_path): - return - - for filename in os.listdir(self._cache_path): - filepath = os.path.join(self._cache_path, filename) - if filename not in self._used_names and os.path.isfile(filepath): - os.unlink(filepath) diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_common.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_common.py deleted file mode 100644 index 64a754ac49cd..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_common.py +++ /dev/null @@ -1,91 +0,0 @@ -import pkgutil - -from . import lark -from . import consts -import os - -_parsers = {} -_common_grammar = None -_grammar_path = os.path.dirname(__file__) - -class CompileError(Exception): - pass - -class ParseError(Exception): - pass - -def get_parser(name, start='start'): - global _common_grammar - - if _common_grammar is None: - _common_grammar = pkgutil.get_data(__name__, "grammar_common.lark").decode() - - key = '{}|{}'.format(name, start) - if key not in _parsers: - grammar = pkgutil.get_data(__name__, f'grammar_{name}.lark').decode() - _parsers[key] = lark.Lark(grammar + _common_grammar, start=start) - - return _parsers[key] - -def snes_to_rom_address(snes_address): - bank = (snes_address >> 16) & 0xFF - addr = (snes_address & 0xFFFF) - if bank == 0x7E or bank == 0x7F: - raise ValueError("Cannot convert SNES address {:X} to ROM address : address is WRAM".format(snes_address)) - if bank >= 0x80: - bank -= 0x80 - if addr < 0x8000: - if bank >= 0x40 and bank <= 0x6F: - addr += 0x8000 - else: - raise ValueError("Cannot convert SNES address {:X} to ROM address : this address does not map to ROM".format(snes_address)) - if not (bank & 0x01): - addr -= 0x8000 - bank = (bank >> 1) - return ((bank << 16) | addr) - -class ValueTransformer(lark.Transformer): - def hex_number(self, n): - v = n[0] - return int(v[1:], 16) - - def decimal_number(self, n): - v = n[0] - return int(v) - - def direction_up(self, n): - return 0 - - def direction_right(self, n): - return 1 - - def direction_down(self, n): - return 2 - - def direction_left(self, n): - return 3 - - def bus_address(self, n): - return snes_to_rom_address(n[0]) - - def unheadered_rom_address(self, n): - return n[0] - - def headered_rom_address(self, n): - return n[0] - 0x200 - -_value_transformer = ValueTransformer() - -def transform_values(tree): - return _value_transformer.transform(tree) - -def parse(text, grammar_name, start_symbol='start'): - parser = get_parser(grammar_name, start_symbol) - try: - tree = parser.parse(text) - except lark.common.ParseError as e: - raise lark.common.ParseError(str(e) + ' - input:\n' + text) - tree = transform_values(tree) - tree = consts.resolve_consts(tree) - return tree - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_consts.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_consts.py deleted file mode 100644 index c7999e0aa6a0..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_consts.py +++ /dev/null @@ -1,28 +0,0 @@ -from . import consts -from . import compile_common -import re - -HEX_REGEX = re.compile(r'^\$[A-Fa-f0-9]+$') -DEC_REGEX = re.compile(r'^[0-9]+$') -IDENTIFIER_REGEX = re.compile(r'^[A-Za-z_][A-Za-z_0-9]*$') - -def process_consts_block(block, rom, env): - family = block['parameters'].strip() - - tokens = block['body'].split() - for i in range(0, len(tokens), 2): - if i + 1 >= len(tokens): - raise compile_common.ParseError('Expected identifier after "{}" in const definition (family "{}")'.format(tokens[i], family)) - - if HEX_REGEX.match(tokens[i]): - value = int(tokens[i][1:], 16) - elif DEC_REGEX.match(tokens[i]): - value = int(tokens[i]) - else: - raise compile_common.ParseError('Expected value, got "{}" in const definition (family "{}")'.format(tokens[i], family)) - - if not IDENTIFIER_REGEX.match(tokens[i+1]): - raise compile_common.ParseError('Invalid identifier name "{}" in const definition (family "{}")'.format(tokens[i+1], family)) - - consts.set_value(tokens[i+1], family, value) - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_drop_table.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_drop_table.py deleted file mode 100644 index a54c773c7e6d..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_drop_table.py +++ /dev/null @@ -1,20 +0,0 @@ -from . import ff4struct -from . import compile_common - -def process_droptable_block(block, rom, env): - params_tree = compile_common.parse(block['parameters'], 'drop_table', 'droptable_block_params') - droptable_id = params_tree.children[0] - - droptable = ff4struct.drop_table.decode(rom.drop_tables[droptable_id]) - - tree = compile_common.parse(block['body'], 'drop_table', 'droptable_block_body') - for entry in tree.children: - item = None - if len(entry.children) > 1: - item = entry.children[1] - rarity = str(entry.children[0].children[0]) - if rarity not in ['common', 'uncommon', 'rare', 'mythic']: - raise compile_common.CompileError(f"Unsupported drop table rarity {rarity} in drop table ${droptable_id:02X}") - setattr(droptable, rarity, item) - - rom.drop_tables[droptable_id] = droptable.encode() diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_event.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_event.py deleted file mode 100644 index 51dabc5e8bd2..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_event.py +++ /dev/null @@ -1,228 +0,0 @@ -from . import lark -from . import event_common -from . import compile_common -from . import consts - -_EVENT_COMMAND_CODES = {} -_PLACEMENT_COMMAND_CODES = {} -_VEHICLE_CODES = {} - -class EventCodeTransformer(lark.Transformer): - def __init__(self, *args, **kwargs): - lark.Transformer.__init__(self, *args, **kwargs) - self._placement_consts = {} - - def statuses(self, statuses): - status_byte = 0 - for v in statuses: - status_byte |= (1 << v) - return status_byte - - def placement_const_definition(self, n): - value,name = n - self._placement_consts[name] = value - return None - - def evcmd_placement(self, n): - placement,command = n - try: - if placement.data == 'placement_const': - placement_number = self._placement_consts[str(placement.children[0])] - except AttributeError: - placement_number = placement - - command_text = ''.join([str(x) for x in command.children if type(x) is not int]) - if type(command.children[-1]) is int: - # directional command - command_code = _PLACEMENT_COMMAND_CODES[command_text + "up"] - command_code += command.children[-1] - else: - command_code = _PLACEMENT_COMMAND_CODES[command_text] - - return [((placement_number << 4) | command_code)] - - def evcmd_player(self, n): - player_keyword,command = n - command_text = ''.join([str(x) for x in command.children if type(x) is not int]) - if type(command.children[-1]) is int: - # directional command - command_code = _EVENT_COMMAND_CODES["player{}up".format(command_text)] - command_code += command.children[-1] - else: - command_code = _EVENT_COMMAND_CODES["player{}".format(command_text)] - - return [command_code] - - def evcmd_message(self, elements): - number = elements[1] - if len(elements) > 2 and elements[2].data == 'message_bank3_specifier': - return [0xF6, number] - elif number >= 0x100: - return [0xF1, number - 0x100] - else: - return [0xF0, number] - - def evcmd_confirm(self, elements): - return [0xF8, elements[2] - 0x100] - - def evcmd_give_hp(self, elements): - return [0xDE, int(elements[2] / 10)] - - def evcmd_give_mp(self, elements): - return [0xDF, int(elements[2] / 10)] - - def evcmd_restore_hp(self, elements): - return [0xDE, 0xFE] - - def evcmd_restore_mp(self, elements): - return [0xDF, 0xFE] - - def evcmd_clear_status(self, elements): - status_byte = 0 - if len(elements) > 2: - status_byte = elements[3] - - return [0xE3, status_byte] - - def evcmd_npc(self, elements): - result = self.event_command(elements) - result[1] &= 0xFF - return result - - def evcmd_music(self, elements): - if len(elements) > 2: - return [0xEA, elements[1]] - else: - return [0xFA, elements[1]] - - def evcmd_load_map(self, elements): - # command is "load map M at X Y facing D [flags]" - map_number = elements[2] - x = elements[4] - y = elements[5] - x_and_direction = x - flags = 0 - - if map_number & 0x100: - flags |= 0x80 - map_number &= 0xFF - - for flag_node in elements[6:]: - if flag_node.data == 'facing_specifier': - direction = flag_node.children[0] - x_and_direction = ((x & 0x3F) | ((direction & 0x03) << 6)) - elif flag_node.data == 'no_transition_specifier': - flags |= 0x20 - elif flag_node.data == 'vehicle_specifier': - if type(flag_node.children[1]) is int: - flags |= (flag_node.children[1] & 0x1f) - else: - slug = ''.join([str(t) for t in flag_node.children]) - flags |= _VEHICLE_CODES[slug] - elif flag_node.data == 'no_launch_specifier': - flags |= 0x40 - - return [0xFE, map_number, x_and_direction, y, flags] - - def event_command(self, elements): - slug = '' - for e in elements: - if type(e) is lark.Tree or type(e) is int: - break - else: - slug += str(e) - - result = [] - if slug in _EVENT_COMMAND_CODES: - cmd_code = _EVENT_COMMAND_CODES[slug] - result.append(cmd_code) - param_elems = filter(lambda e: type(e) is int, elements) - result.extend(param_elems) - return result - - def ev_cancel(self, elements): - return [0xFF] - - def batch_block(self, elements): - if type(elements[0]) is int: - iterations = elements[0] - length = len(elements) - 1 - subelements = elements[1:] - else: - iterations = 1 - length = len(elements) - subelements = elements - - batch_body = [] - for byte_list in subelements: - if batch_body and (byte_list[0] >= 0xD0 or batch_body[0] >= 0xD0): - raise ValueError("A batch block may only contain either (a) a single non-player/placement command, or (b) any number of only player/placement commands") - batch_body.extend(byte_list) - result = [event_common.BATCH_COMMAND_CODE, iterations, len(batch_body)] + batch_body - return result - - def extension_command(self, elements): - result = list([n & 0xFF for n in elements[0].children]) - if len(elements) > 1: - block_bytes = [] - for byte_list in elements[1].children: - block_bytes.extend(byte_list) - if len(block_bytes) > 254: - raise compile_common.CompileError("Event sub-block is longer than 254 bytes -- {}".format(str(elements))) - result.append(len(block_bytes)) - result.extend(block_bytes) - return result - - -def build_lookup_tables(): - if _EVENT_COMMAND_CODES: - return - - for cmd_code in event_common.COMMANDS: - cmd_data = event_common.COMMANDS[cmd_code] - slug = cmd_data[0].replace('{}', '').replace(' ', '').lower() - _EVENT_COMMAND_CODES[slug] = cmd_code - - for cmd_code in event_common.PLACEMENT_COMMANDS: - slug = event_common.PLACEMENT_COMMANDS[cmd_code].replace(' ', '').lower() - _PLACEMENT_COMMAND_CODES[slug] = cmd_code - - for code in event_common.VEHICLES: - slug = event_common.VEHICLES[code].replace(' ', '').lower() - _VEHICLE_CODES[slug] = code - - - -def process_event_block(block, rom, env): - params_tree = compile_common.parse(block['parameters'], 'event', 'event_block_parameters') - event_id = params_tree.children[0] - - bytecode = None - if env.cache: - cache_name = env.cache.get_block_cache_name(block) - if not env.options.force_recompile: - bytecode = env.cache.load(cache_name) - - if bytecode is None: - bytecode = compile_event_script(block['body']) - if env.cache: - env.cache.save(cache_name, bytecode) - - #print(' '.join(['{:02X}'.format(x) for x in bytecode])) - - rom.event_scripts[event_id] = bytecode - -def compile_event_script(script): - build_lookup_tables() - - tree = compile_common.parse(script, 'event', 'event_block_body') - tree = EventCodeTransformer().transform(tree) - - bytecode = [] - for elem in tree.children: - if type(elem) is list: - bytecode.extend(elem) - bytecode.append(0xFF) - - return bytecode - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_event_call.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_event_call.py deleted file mode 100644 index 249fe936e0d2..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_event_call.py +++ /dev/null @@ -1,42 +0,0 @@ -from . import consts -from . import compile_common -from . import ff4struct - -def compile_event_call(body): - tree = compile_common.parse(body, 'event_call', 'eventcall_block_body') - if not tree.children: - return [] - - event_call = ff4struct.event_call.EventCall() - for node in tree.children: - if node.data == 'messages': - event_call.parameters.extend(node.children) - else: - case = ff4struct.event_call.EventCallCase() - for condition in node.children[:-1]: - condition = ff4struct.event_call.EventCallCondition( - flag = condition.children[0], - value = (False if condition.data == 'not_condition' else True) - ) - case.conditions.append(condition) - case.event = node.children[-1] - event_call.cases.append(case) - - return event_call.encode() - - -def process_eventcall_block(block, rom, env): - params_tree = compile_common.parse(block['parameters'], 'event_call', 'eventcall_block_params') - encoded_event_call = compile_event_call(block['body']) - rom.event_calls[params_tree.children[0]] = encoded_event_call - -if __name__ == "__main__": - test = compile_event_call(''' - if not $22, $33: - $19 - if not $44: - $18 - else: - $00 - ''') - print(' '.join(['{:02X}'.format(x) for x in test])) diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_formation.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_formation.py deleted file mode 100644 index 0c9b28dd0aa2..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_formation.py +++ /dev/null @@ -1,109 +0,0 @@ -from . import compile_common -from . import lark -from . import ff4struct - -class FormationTransformer(lark.Transformer): - def __init__(self, formation): - lark.Transformer.__init__(self) - self.formation = formation - - def yes(self, n): - return True - - def no(self, n): - return False - - def back_attack(self, n): - self.formation.back_attack = (len(n) == 0) - return n - - def boss_death(self, n): - self.formation.boss_death = (len(n) == 0) - return n - - def eggs(self, n): - self.formation.eggs = list(n) - return n - - def monsters(self, n): - monster_types = [0xFF, 0xFF, 0xFF] - monster_qtys = [0, 0, 0] - for i,m in enumerate(n): - monster_types[i], monster_qtys[i] = m.children - self.formation.monster_types = monster_types - self.formation.monster_qtys = monster_qtys - return n - - def calling(self, n): - self.formation.calling = (len(n) == 0) - return n - - def transforming(self, n): - self.formation.transforming = (len(n) == 0) - return n - - def arrangement(self, n): - self.formation.arrangement = n[0] - return n - - def can_run(self, n): - self.formation.no_flee = False - return n - - def cant_run(self, n): - self.formation.no_flee = True - return n - - def can_gameover(self, n): - self.formation.no_gameover = False - return n - - def no_gameover(self, n): - self.formation.no_gameover = True - return n - - def music(self, n): - music_values = { - 'regular' : ff4struct.formation.REGULAR_MUSIC, - 'boss' : ff4struct.formation.BOSS_MUSIC, - 'fiend' : ff4struct.formation.FIEND_MUSIC, - 'continue' : ff4struct.formation.CONTINUE_MUSIC, - } - self.formation.music = music_values[str(n[0].children[0])] - return n - - def character_battle(self, n): - self.formation.character_battle = (len(n) == 0) - return n - - def auto_battle(self, n): - self.formation.auto_battle = (len(n) == 0) - return n - - def floating_enemies(self, n): - self.formation.floating_enemies = (len(n) == 0) - return n - - def transparent(self, n): - self.formation.transparent = (len(n) == 0) - return n - - def cursor_graph_index(self, n): - self.formation.cursor_graph_index = n[0] - return n - - def gfx_bits(self, n): - self.formation.gfx_bits = n[0] - return n - -def process_formation_block(block, rom, env): - params_tree = compile_common.parse(block['parameters'], 'formation', 'formation_block_params') - formation_id = params_tree.children[0] - formation = ff4struct.formation.decode(rom.formations[formation_id]) - - tree = compile_common.parse(block['body'], 'formation', 'formation_block_body') - t = FormationTransformer(formation) - t.transform(tree) - - rom.formations[formation_id] = formation.encode() - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_gfx.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_gfx.py deleted file mode 100644 index 45e7bc5a9852..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_gfx.py +++ /dev/null @@ -1,64 +0,0 @@ -from . import compile_common -import re - -def process_chr_block(block, rom, env): - params_tree = compile_common.parse(block['parameters'], 'gfx', 'chr_block_params') - patch_address = params_tree.children[0] - if len(params_tree.children) > 1: - bitdepth = int(str(params_tree.children[1].children[0])[0]) - else: - bitdepth = 4 - - pixels = [int(c, 16) for c in re.sub(r'[^A-Fa-f0-9]', '', block['body'])] - if len(pixels) != 64 and len(pixels) != 256: - raise compile_common.CompileError("CHR block does not contain exactly 64 or 256 pixels: {}".format(block['body'])) - - interleaved = [] - num_chrs = (len(pixels) >> 6) - - for chr_id in range(num_chrs): - if num_chrs == 1: - chr_pixels = pixels - else: - chr_pixels = [] - for y in range(8): - index = (128 * (chr_id >> 1)) + (8 * (chr_id & 1)) + (16 * y) - chr_pixels.extend(pixels[index:index+8]) - - bitplanes = [] - for layer in range(bitdepth): - bitplanes.append([0x00] * 8) - bitmask = (1 << layer) - for y in range(8): - bitrow = 0x00 - for x in range(8): - if (chr_pixels[y * 8 + x] & bitmask): - bitrow |= (0x80 >> x) - - bitplanes[layer][y] = bitrow - - for i in range(0, bitdepth, 2): - if i + 1 < bitdepth: - for j in range(8): - interleaved.append(bitplanes[i][j]) - interleaved.append(bitplanes[i + 1][j]) - else: - interleaved.extend(bitplanes[i]) - - rom.add_patch(patch_address, interleaved) - -def process_pal_block(block, rom, env): - params_tree = compile_common.parse(block['parameters'], 'gfx', 'pal_block_params') - patch_address = params_tree.children[0] - - tree = compile_common.parse(block['parameters'], 'gfx', 'pal_block_body') - data = [] - for rgb in tree.children: - r, g, b = rgb.children - if min([r, g, b]) < 0 or max([r, g, b]) >= 32: - raise compile_common.CompileError("PAL block contains RGB values outside accepted range 0-31: {}".format(block['body'])) - color = (b << 10) | (g << 5) | r - data.append(color & 0xff) - data.append((color >> 8) & 0xff) - - rom.add_patch(patch_address, data) diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_map.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_map.py deleted file mode 100644 index 5bc05b14dd97..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_map.py +++ /dev/null @@ -1,117 +0,0 @@ -from . import compile_common -from . import ff4struct -from . import lark - -def process_mapgrid_block(block, rom, env): - params_tree = compile_common.parse(block['parameters'], 'map', 'mapgrid_block_params') - tree = compile_common.parse(block['body'], 'map', 'mapgrid_block_body') - - map_id = params_tree.children[0] - start_x, start_y = (0, 0) - if len(params_tree.children) > 1: - start_x, start_y = params_tree.children[1].children - - map_grid = ff4struct.map_grid.decode(rom.map_grids[map_id]) - x = start_x - y = start_y - for n in tree.children: - if n.data == 'tile': - map_grid[x][y] = int(n.children[0], 16) - x += 1 - if x >= 32: - x = start_x - y += 1 - elif n.data == 'eol': - x = start_x - y += 1 - - rom.map_grids[map_id] = map_grid.encode() - - -class MapInfoTransformer(lark.Transformer): - def __init__(self, map_info): - lark.Transformer.__init__(self) - self.map_info = map_info - - def enabled(self, n): - return True - - def disabled(self, n): - return False - - def npc_palettes(self, n): - self.map_info.npc_palette_0 = n[0] - self.map_info.npc_palette_1 = n[1] - return n - - def background_transparent(self, n): - self.map_info.bg_translucent = True - return n - - def background_opaque(self, n): - self.map_info.bg_translucent = False - return n - - def background_scroll_both(self, n): - self.map_info.bg_scroll_vertical = True - self.map_info.bg_scroll_horizontal = True - return n - - def background_scroll_vertical(self, n): - self.map_info.bg_scroll_vertical = True - self.map_info.bg_scroll_horizontal = False - return n - - def background_scroll_horizontal(self, n): - self.map_info.bg_scroll_vertical = False - self.map_info.bg_scroll_horizontal = True - return n - - def background_scroll_none(self, n): - self.map_info.bg_scroll_vertical = False - self.map_info.bg_scroll_horizontal = False - return n - - def underground_npcs(self, n): - self.map_info.underground_npcs = True - return n - - def underground_map_grid(self, n): - self.map_info.underground_map_grid = True - return n - - def battle_background(self, n): - self.map_info.battle_background = n[0] - self.map_info.battle_background_alt_palette = False - return n - - def battle_background_alt(self, n): - self.map_info.battle_background = n[0] - self.map_info.battle_background_alt_palette = True - return n - - # catch all for branches that just set a property to a value, - # where the branch name is also the name of the MapInfo - # property to change - def __getattr__(self, key): - if hasattr(self.map_info, key): - def func(n): - setattr(self.map_info, key, n[0]) - return n - - return func - else: - raise AttributeError() - - -def process_map_block(block, rom, env): - params_tree = compile_common.parse(block['parameters'], 'map', 'map_block_params') - tree = compile_common.parse(block['body'], 'map', 'map_block_body') - - map_id = params_tree.children[0] - - map_info = ff4struct.map_info.decode(rom.map_infos[map_id]) - transformer = MapInfoTransformer(map_info) - transformer.transform(tree) - - rom.map_infos[map_id] = map_info.encode() diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_monster.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_monster.py deleted file mode 100644 index 1790072fa05f..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_monster.py +++ /dev/null @@ -1,111 +0,0 @@ -from . import compile_common -from . import ff4struct -from . import lark - -class MonsterTransformer(lark.Transformer): - def __init__(self, monster_id, rom): - lark.Transformer.__init__(self) - self.monster_id = monster_id - self.monster = ff4struct.monster.decode(rom.monsters[monster_id]) - self.monster_gfx = ff4struct.monster_gfx.decode(rom.monster_gfx[monster_id]) - self.monster_gp = rom.monster_gp[monster_id] - self.monster_xp = rom.monster_xp[monster_id] - - def attack(self, n): - return "attack" - - def resist(self, n): - return "resist" - - def weak(self, n): - return "weak" - - def boss(self, n): - self.monster.boss = (len(n) == 0) - return n - - def level(self, n): - self.monster.level = n[0] - return n - - def hp(self, n): - self.monster.hp = n[0] - return n - - def gp(self, n): - self.monster_gp = n[0] - return n - - def xp(self, n): - self.monster_xp = n[0] - return n - - def stat_index(self, n): - stat_name = '_'.join([str(c) for c in n[0].children]) - setattr(self.monster, stat_name + "_index", n[1]) - return n - - def drop_index(self, n): - self.monster.drop_index = n[0] - return n - - def drop_rate(self, n): - self.monster.drop_rate = n[0] - return n - - def attack_sequence(self, n): - self.monster.attack_sequence = n[0] - return n - - def element(self, n): - setattr(self.monster, n[0] + '_elements', set(n[1:])) - return n - - def status(self, n): - setattr(self.monster, n[0] + '_statuses', set(n[1:])) - return n - - def spell_power(self, n): - if len(n) == 0: - self.monster.spell_power = None - else: - self.monster.spell_power = n[0] - return n - - def race(self, n): - self.monster.races = set(n) - return n - - def reaction_sequence(self, n): - if len(n) == 0: - self.monster.reaction_sequence = None - else: - self.monster.reaction_sequence = n[0] - return n - - def gfx_size(self, n): - self.monster_gfx.size = n[0] - return n - - def gfx_palette(self, n): - self.monster_gfx.palette = n[0] - return n - - def gfx_pointer(self, n): - self.monster_gfx.pointer = n[0] - return n - - -def process_monster_block(block, rom, env): - params_tree = compile_common.parse(block['parameters'], 'monster', 'monster_block_params') - monster_id = params_tree.children[0] - - tree = compile_common.parse(block['body'], 'monster', 'monster_block_body') - t = MonsterTransformer(monster_id, rom) - t.transform(tree) - - rom.monsters[monster_id] = t.monster.encode() - rom.monster_gfx[monster_id] = t.monster_gfx.encode() - rom.monster_gp[monster_id] = t.monster_gp - rom.monster_xp[monster_id] = t.monster_xp - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_myselfpatch.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_myselfpatch.py deleted file mode 100644 index 63b052fdf5a8..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_myselfpatch.py +++ /dev/null @@ -1,879 +0,0 @@ -import pkgutil -import re -import os -import math -import hashlib -import pickle - -try: - from . import ff4struct - from . import lark -except ImportError: - import ff4struct - import lark - -_msf_parser = None -_expr_transformer = None - -_PART_SIZES = { - 'byte' : 1, - 'short' : 2, - 'long' : 3, - 'high' : 1, - 'bank' : 1 -} - -_OPERATIONS = { - '+' : (lambda x, y: x + y), - '-' : (lambda x, y: x - y), - '*' : (lambda x, y: x * y), - '/' : (lambda x, y: x // y), - '%' : (lambda x, y: x % y), - '&' : (lambda x, y: x & y), - '|' : (lambda x, y: x | y), - '^' : (lambda x, y: x ^ y), - '>>' : (lambda x, y: x >> y), - '<<' : (lambda x, y: x << y), -} - -def _extract_part(value, part): - if part == 'byte': - return (value & 0xFF) - elif part == 'short': - return (value & 0xFFFF) - elif part == 'long': - return (value) - elif part == 'high': - return (value >> 8) & 0xFF - elif part == 'bank': - return (value >> 16) & 0xFF - else: - raise ValueError(f"Unknown numeric part name {part}") - -def _load_parser(): - global _msf_parser - if _msf_parser is None: - infile = pkgutil.get_data(__name__, "grammar_myselfpatch.lark").decode() - _msf_parser = lark.Lark(infile) - - global _expr_transformer - if _expr_transformer is None: - _expr_transformer = ExpressionTransformer() - -def _build_expr_tree(parse_tree): - if type(parse_tree) is lark.lexer.Token: - return str(parse_tree) - elif type(parse_tree) is not lark.Tree: - return parse_tree - - return _expr_transformer.transform(parse_tree) - -def _format_expr_tree(tree): - if type(tree) in (list, tuple): - return f'({_format_expr_tree(tree[1])}{tree[0]}{_format_expr_tree(tree[2])})' - else: - return str(tree) - -def _resolve_expr_tree(tree, definitions): - if type(tree) is str: - if tree not in definitions: - raise ValueError(f"{tree} is not defined") - return definitions[tree] - elif type(tree) in (list, tuple): - op = tree[0] - if op not in _OPERATIONS: - raise ValueError(f"Expression operation {op} is not supported") - return _OPERATIONS[op](*[_resolve_expr_tree(t, definitions) for t in tree[1:]]) - else: - return tree - -class ExpressionTransformer(lark.Transformer): - def expr_passthrough(self, n): - return n[0] - - def expr_binary_op(self, n): - left_operand,op,right_operand = n - return [str(op), left_operand, right_operand] - - def decimal_number(self, n): - return int(n[0]) - - def hex_number(self, n): - if n[0].startswith('$'): - return int(n[0][1:], 16) - elif n[0].startswith('0x'): - return int(n[0][2:], 16) - else: - raise ValueError(f'Malformed hex number {n[0]}') - - def expr_identifier(self, n): - return str(n[0]) - -class CompiledBlock: - def __init__(self): - self.address = None - self.data = [] - self.definitions = {} - self.labels = {} - self.expressions = [] - self.label_branch_references = [] - - def as_dict(self): - print(self.labels) - return { - 'address' : self.address, - 'data' : self.data, - 'definitions' : self.definitions, - 'labels' : self.labels, - 'expressions' : self.expressions, - 'label_branch_references' : self.label_branch_references, - } - - def from_dict(self, d): - self.address = d['address'] - self.data = d['data'] - self.definitions = d['definitions'] - self.labels = d['labels'] - self.expressions = d['expressions'] - self.label_branch_references = d['label_branch_references'] - - def set_address(self, addr): - self.address = addr - - def add_definition(self, name, value): - self.definitions[name] = value - - def set_label_at_current_location(self, label_name, local=False): - self.labels[label_name] = (len(self.data), local) - - def add_expression(self, tree, part, size=None): - if size is None: - size = _PART_SIZES[part] - - self.expressions.append( { - 'tree' : tree, - 'part' : part, - 'size' : size, - 'position' : len(self.data) - } ) - - self.data.extend([None] * size) - - def add_label_branch_reference(self, label_name, byte_size=1): - self.label_branch_references.append( { - 'label' : label_name, - 'size' : byte_size, - 'position' : len(self.data) - }) - self.data.extend([None] * byte_size) - - def resolve_branch_references(self): - for ref in self.label_branch_references: - label_name = ref['label'] - if label_name not in self.labels: - raise ValueError("Cannot branch to label '{}' -- label not found in same patch block".format(label_name)) - - position = ref['position'] - # position points to the byte before the next instruction; - # the next instruction is the reference for the offset - offset = self.labels[label_name][0] - (position + 1) - offset_min = -(1 << (ref['size'] * 8 - 1)) - offset_max = -offset_min - 1 - - if offset > offset_max or offset < offset_min: - raise ValueError("Branch to label '{}' is out of range (offset {})".format(label_name, offset)) - - if offset < 0: - offset = (1 << (ref['size'] * 8)) + offset - - for i in range(ref['size']): - b = (offset >> (i * 8)) & 0xFF - self.data[ref['position'] + i] = b - - def resolve_expressions(self, definitions): - for expr in self.expressions: - value = _extract_part(_resolve_expr_tree(expr['tree'], definitions), expr['part']) - - for i in range(expr['size']): - b = (value >> (i * 8)) & 0xFF - self.data[expr['position'] + i] = b - - def pretty_print(self): - if self.address is None: - print('unlocated block') - else: - print('block at address: {:X}'.format(self.address)) - parts = [] - for i,b in enumerate(self.data): - if b is None: - for expr in self.expressions: - if i == expr['position']: - parts.append(f"[{expr['part']}:{_format_expr_tree(expr['tree'])}] ") - else: - parts.append(f'{b:02X} ') - - if (i + 1) % 16 == 0: - parts.append('\n') - - print(''.join(parts)) - -def save_precompiled_blocks(path, *compiled_blocks): - if not os.path.exists(os.path.dirname(path)): - os.makedirs(os.path.dirname(path)) - with open(path, 'wb') as outfile: - pickle.dump(compiled_blocks, outfile) - -def load_precompiled_blocks(path): - with open(path, 'rb') as infile: - compiled_blocks = pickle.load(infile) - return compiled_blocks - -def process_msfpatch_block(block, rom, env): - # check for precompiled version - if env and env.cache: - cache_name = env.cache.get_block_cache_name(block) - if not env.options.force_recompile: - precompiled_blocks = env.cache.load(cache_name) - if precompiled_blocks is not None: - env.postprocess.register(_postprocess_msfpatches, *precompiled_blocks) - return - - _load_parser() - - try: - tree = _msf_parser.parse(block['body']) - except lark.common.ParseError as e: - print(block['body']) - raise e - - current_block = CompiledBlock() - compiled_blocks = [ current_block ] - a_size = 1 - xy_size = 2 - - def num_value(node): - if node.data == 'decimal_number': - return int(str(node.children[0])), None - elif node.data == 'direct_patch_address': - return int(str(node.children[0]), 16), None - else: - s = str(node.children[0]) - if s.startswith('$'): - s = s[1:] - if s.startswith('0x'): - s = s[2:] - - return int(s, 16), int(math.ceil(len(s) / 2.0)) - - for item in tree.children: - if item.data == 'label': - current_block.set_label_at_current_location(str(item.children[0])) - elif item.data == 'local_label': - current_block.set_label_at_current_location(str(item.children[0]), local=True) - elif item.data == 'directive': - directive = item.children[0] - if directive == '.mx': - param, dummy = num_value(item.children[1]) - a_size = (1 if (param & 0x20) else 2) - xy_size = (1 if (param & 0x10) else 2) - elif directive == '.addr' or directive == '.new': - if directive == '.addr': - addr, dummy = num_value(item.children[1]) - else: - addr = None - - if len(current_block.data) > 0: - current_block = CompiledBlock() - compiled_blocks.append(current_block) - - if addr is not None: - current_block.set_address(addr) - elif directive == '.def': - if len(item.children) < 3: - raise ValueError(".def directive for '{}' is missing value".format(param)) - current_block.add_definition(str(item.children[1]), num_value(item.children[2])[0]) - else: - raise ValueError("Unsupported directive '{}'".format(directive)) - elif item.data == 'command': - op = str(item.children[0]) - if op not in OPCODES: - raise ValueError("Could not find operation '{}' -- command parse tree {}".format(op, str(item))) - - op_info = OPCODES[op] - if type(op_info) is int: - # this is an opcode with no parameters - if len(item.children) > 1: - raise ValueError("Operation '{}' called with parameters but does not use them -- command parse tree {}".format(op, str(item))) - current_block.data.append(op_info) - elif len(item.children) < 2: - raise ValueError("Operation '{}' requires parameters but none were provided -- command parse tree".format(op, str(item))) - else: - param = item.children[1] - - param_values = [] - - if param.data == 'branch_label': - param_value = str(param.children[0]) - if 'pcr long' in op_info: - slug = 'pcr long' - param_size = 2 - else: - slug = 'pcr' - param_size = 1 - param_values.append( ('branch', param_value, param_size) ) - else: - slug = param.data.replace('_', ' ') - - for param_child in param.children: - if param_child.data == 'address_expression': - param_type = 'expression' - part = param_child.children[0].data.rsplit('_', maxsplit=1)[-1] - param_size = _PART_SIZES[part] - - param_value = { - 'part' : part, - 'tree' : _build_expr_tree(param_child.children[1]) - } - else: - param_type = 'literal' - param_value, param_size = num_value(param_child) - - # set param size automatically for commands that have a variable argument size - if param.data == 'immediate': - if op in VARIABLE_SIZE_OPS: - if VARIABLE_SIZE_OPS[op] == 'a': - param_size = a_size - elif VARIABLE_SIZE_OPS[op] == 'xy': - param_size = xy_size - else: - raise ValueError("Don't know how to find size of register '{}'".format(VARIABLE_SIZE_OPS[op])) - - param_values.append( (param_type, param_value, param_size) ) - - if param.data == 'block': - # in assembly, parameters are SRC BANK, DST BANK - # but in the object code DST comes first - param_values.reverse() - - if slug.startswith('addr'): - ADDR_NAMES = { 1: 'dp', 2: 'absolute', 3: 'absolute long'} - slug = ADDR_NAMES[param_size] + slug[4:] - - if slug not in op_info: - if slug == 'dp': - slug = 'pcr' - elif slug == 'absolute': - slug = 'pcr long' - - if slug not in op_info: - raise ValueError("Could not find operation '{}' matching parameter format '{}' -- command parse tree {}".format(op, slug, str(item))) - - opcode = op_info[slug] - current_block.data.append(opcode) - - for param_type,param_value,param_size in param_values: - if param_type == 'literal': - for i in range(param_size): - b = (param_value >> (i * 8)) & 0xFF - current_block.data.append(b) - elif param_type == 'expression': - current_block.add_expression(param_value['tree'], param_value['part'], param_size) - elif param_type == 'branch': - current_block.add_label_branch_reference(param_value, param_size) - else: - raise ValueError(f"Unhandled param type '{param_type}'") - - elif item.data == 'direct_patch': - patch_block = CompiledBlock() - patch_block.set_address(num_value(item.children[0])[0]) - patch_block.data.append(num_value(item.children[1])[0]) - compiled_blocks.append(patch_block) - - elif item.data == 'raw_data': - for subitem in item.children: - if subitem.data == 'raw_byte': - current_block.data.append(int(str(subitem.children[0]), 16)) - else: - part = subitem.children[0].data.rsplit('_', maxsplit=1)[-1] - expr_tree = _build_expr_tree(subitem.children[1]) - current_block.add_expression(expr_tree, part) - - elif item.data == 'string': - text = str(item.children[0])[1:-1] - encoded_text = ff4struct.text.encode(text, allow_dual_char=False) - if encoded_text and encoded_text[-1] == 0: - encoded_text.pop() - current_block.data.extend(encoded_text) - - for block in compiled_blocks: - block.resolve_branch_references() - - if env: - if env.cache: - env.cache.save(cache_name, compiled_blocks) - - env.postprocess.register(_postprocess_msfpatches, *compiled_blocks) - - return compiled_blocks - -def _rom_to_snes_address(rom_address): - bank = (rom_address >> 15) - addr = (rom_address & 0x7FFF) | 0x8000 - if bank == 0x7E or bank == 0x7F: - bank += 0x80 - return (bank << 16) | addr - -def _snes_to_rom_address(snes_address): - bank = (snes_address >> 16) & 0xFF - addr = (snes_address & 0xFFFF) - if bank == 0x7E or bank == 0x7F: - raise ValueError("Cannot convert SNES address {:X} to ROM address : address is WRAM".format(snes_address)) - if bank >= 0x80: - bank -= 0x80 - if addr < 0x8000: - if bank >= 0x40 and bank <= 0x6F: - addr += 0x8000 - else: - raise ValueError("Cannot convert SNES address {:X} to ROM address : this address does not map to ROM".format(snes_address)) - if not (bank & 0x01): - addr -= 0x8000 - bank = (bank >> 1) - return ((bank << 16) | addr) - - -def _postprocess_msfpatches(env, blocks): - if env.options.shuffle_msfpatches: - blocks = list(blocks) - env.rnd.shuffle(blocks) - - # assign addresses to all new code blocks - new_code_address = _rom_to_snes_address(0x100000) # TODO: get number from ROM? - for block in blocks: - if block.address is None: - # TODO: test if block will cross bank boundaries? - block.address = new_code_address - new_code_address += len(block.data) - - # build global name tables - global_definitions = {} - block_definitions = [] - - for block in blocks: - local_definitions = {} - block_definitions.append(local_definitions) - - for name in block.definitions: - if name in global_definitions: - raise ValueError("Duplicate definition for symbol '{}' in MSF patches".format(name)) - global_definitions[name] = block.definitions[name] - - for name in block.labels: - value = block.address + block.labels[name][0] - local = block.labels[name][1] - if not local: - if name in global_definitions: - raise ValueError("Duplicate definition for symbol '{}' in MSF patches".format(name)) - global_definitions[name] = value - else: - if name in local_definitions: - raise ValueError("Duplicate definition for symbol '{}' in MSF patches".format(name)) - local_definitions[name] = value - - # resolve expressions - for i,block in enumerate(blocks): - block.resolve_expressions({**global_definitions, **block_definitions[i]}) - - # add raw patches to ROM - for block in blocks: - if env: - env.rom.add_patch(_snes_to_rom_address(block.address), block.data) - else: - block.pretty_print() - - env.reports['symbols'] = global_definitions - -#---------------------------------------------------- - -OPCODES = { - 'adc' : { - 'dp x indirect' : 0x61, - 'sr' : 0x63, - 'dp' : 0x65, - 'dp indirect long' : 0x67, - 'immediate' : 0x69, - 'absolute' : 0x6D, - 'absolute long' : 0x6F, - 'dp indirect y' : 0x71, - 'dp indirect' : 0x72, - 'sr indirect y' : 0x73, - 'dp x' : 0x75, - 'dp indirect long y' : 0x77, - 'absolute y' : 0x79, - 'absolute x' : 0x7D, - 'absolute long x' : 0x7F, - }, - 'and' : { - 'dp x indirect' : 0x21, - 'sr' : 0x23, - 'dp' : 0x25, - 'dp indirect long' : 0x27, - 'immediate' : 0x29, - 'absolute' : 0x2D, - 'absolute long' : 0x2F, - 'dp indirect y' : 0x31, - 'dp indirect' : 0x32, - 'sr indirect y' : 0x33, - 'dp x' : 0x35, - 'dp indirect long y' : 0x37, - 'absolute y' : 0x39, - 'absolute x' : 0x3D, - 'absolute long x' : 0x3F, - }, - 'asl' : { - 'dp' : 0x06, - 'a' : 0x0A, - 'absolute' : 0x0E, - 'dp x' : 0x16, - 'absolute x' : 0x1E, - }, - 'bcc' : { 'pcr' : 0x90 }, - 'blt' : { 'pcr' : 0x90 }, - 'bcs' : { 'pcr' : 0xB0 }, - 'bge' : { 'pcr' : 0xB0 }, - 'beq' : { 'pcr' : 0xF0 }, - 'bit' : { - 'dp' : 0x24, - 'absolute' : 0x2C, - 'dp x' : 0x34, - 'absolute x' : 0x3C, - 'immediate' : 0x89, - }, - 'bmi' : { 'pcr' : 0x30 }, - 'bne' : { 'pcr' : 0xD0 }, - 'bpl' : { 'pcr' : 0x10 }, - 'bra' : { 'pcr' : 0x80 }, - 'brk' : 0x00, - 'brl' : { 'pcr long' : 0x82 }, - 'bvc' : { 'pcr' : 0x50 }, - 'bvs' : { 'pcr' : 0x70 }, - 'clc' : 0x18, - 'cld' : 0xD8, - 'cli' : 0x58, - 'clv' : 0xB8, - 'cmp' : { - 'dp x indirect' : 0xC1, - 'sr' : 0xC3, - 'dp' : 0xC5, - 'dp indirect long' : 0xC7, - 'immediate' : 0xC9, - 'absolute' : 0xCD, - 'absolute long' : 0xCF, - 'dp indirect y' : 0xD1, - 'dp indirect' : 0xD2, - 'sr indirect y' : 0xD3, - 'dp x' : 0xD5, - 'dp indirect long y' : 0xD7, - 'absolute y' : 0xD9, - 'absolute x' : 0xDD, - 'absolute long x' : 0xDF, - }, - 'cop' : { 'interrupt' : 0x02 }, - 'cpx' : { - 'immediate' : 0xE0, - 'dp' : 0xE4, - 'absolute' : 0xEC, - }, - 'cpy' : { - 'immediate' : 0xC0, - 'dp' : 0xC4, - 'absolute' : 0xCC, - }, - 'dec' : { - 'a' : 0x3A, - 'dp' : 0xC6, - 'absolute' : 0xCE, - 'dp x' : 0xD6, - 'absolute x' : 0xDE, - }, - 'dea' : 0x3A, - 'dex' : 0xCA, - 'dey' : 0x88, - 'eor' : { - 'dp x indirect' : 0x41, - 'sr' : 0x43, - 'dp' : 0x45, - 'dp indirect long' : 0x47, - 'immediate' : 0x49, - 'absolute' : 0x4D, - 'absolute long' : 0x4F, - 'dp indirect y' : 0x51, - 'dp indirect' : 0x52, - 'sr indirect y' : 0x53, - 'dp x' : 0x55, - 'dp indirect long y' : 0x57, - 'absolute y' : 0x59, - 'absolute x' : 0x5D, - 'absolute long x' : 0x5F, - }, - 'inc' : { - 'a' : 0x1A, - 'dp' : 0xE6, - 'absolute' : 0xEE, - 'dp x' : 0xF6, - 'absolute x' : 0xFE, - }, - 'ina' : 0x1A, - 'inx' : 0xE8, - 'iny' : 0xC8, - 'jmp' : { - 'absolute' : 0x4C, - 'absolute long' : 0x5C, - 'absolute indirect' : 0x6C, - 'absolute x indirect' : 0x7C, - 'absolute indirect long': 0xDC, - }, - 'jml' : { - 'absolute long' : 0x5C, - 'absolute indirect long': 0xDC, - }, - 'jsr' : { - 'absolute' : 0x20, - 'absolute long' : 0x22, - 'absolute x indirect' : 0xFC, - }, - 'jsl' : { - 'absolute long' : 0x22, - }, - 'lda' : { - 'dp x indirect' : 0xA1, - 'sr' : 0xA3, - 'dp' : 0xA5, - 'dp indirect long' : 0xA7, - 'immediate' : 0xA9, - 'absolute' : 0xAD, - 'absolute long' : 0xAF, - 'dp indirect y' : 0xB1, - 'dp indirect' : 0xB2, - 'sr indirect y' : 0xB3, - 'dp x' : 0xB5, - 'dp indirect long y' : 0xB7, - 'absolute y' : 0xB9, - 'absolute x' : 0xBD, - 'absolute long x' : 0xBF, - }, - 'ldx' : { - 'immediate' : 0xA2, - 'dp' : 0xA6, - 'absolute' : 0xAE, - 'dp y' : 0xB6, - 'absolute y' : 0xBE, - }, - 'ldy' : { - 'immediate' : 0xA0, - 'dp' : 0xA4, - 'absolute' : 0xAC, - 'dp x' : 0xB4, - 'absolute x' : 0xBC, - }, - 'lsr' : { - 'dp' : 0x46, - 'a' : 0x4A, - 'absolute' : 0x4E, - 'dp x' : 0x56, - 'absolute x' : 0x5E, - }, - 'mvn' : { 'block' : 0x54 }, - 'mvp' : { 'block' : 0x44 }, - 'nop' : 0xEA, - 'ora' : { - 'dp x indirect' : 0x01, - 'sr' : 0x03, - 'dp' : 0x05, - 'dp indirect long' : 0x07, - 'immediate' : 0x09, - 'absolute' : 0x0D, - 'absolute long' : 0x0F, - 'dp indirect y' : 0x11, - 'dp indirect' : 0x12, - 'sr indirect y' : 0x13, - 'dp x' : 0x15, - 'dp indirect long y' : 0x17, - 'absolute y' : 0x19, - 'absolute x' : 0x1D, - 'absolute long x' : 0x1F, - }, - 'pea' : { 'absolute' : 0xF4 }, - 'pei' : { 'dp' : 0xD4 }, - 'per' : { 'pcr' : 0x62 }, - 'pha' : 0x48, - 'phb' : 0x8B, - 'phd' : 0x0B, - 'phk' : 0x4B, - 'php' : 0x08, - 'phx' : 0xDA, - 'phy' : 0x5A, - 'pla' : 0x68, - 'plb' : 0xAB, - 'pld' : 0x2B, - 'plp' : 0x28, - 'plx' : 0xFA, - 'ply' : 0x7A, - 'rep' : { 'immediate' : 0xC2 }, - 'rol' : { - 'dp' : 0x26, - 'a' : 0x2A, - 'absolute' : 0x2E, - 'dp x' : 0x36, - 'absolute x' : 0x3E, - }, - 'ror' : { - 'dp' : 0x66, - 'a' : 0x6A, - 'absolute' : 0x6E, - 'dp x' : 0x76, - 'absolute x' : 0x7E, - }, - 'rti' : 0x40, - 'rtl' : 0x6B, - 'rts' : 0x60, - 'sbc' : { - 'dp x indirect' : 0xE1, - 'sr' : 0xE3, - 'dp' : 0xE5, - 'dp indirect long' : 0xE7, - 'immediate' : 0xE9, - 'absolute' : 0xED, - 'absolute long' : 0xEF, - 'dp indirect y' : 0xF1, - 'dp indirect' : 0xF2, - 'sr indirect y' : 0xF3, - 'dp x' : 0xF5, - 'dp indirect long y' : 0xF7, - 'absolute y' : 0xF9, - 'absolute x' : 0xFD, - 'absolute long x' : 0xFF, - }, - 'sec' : 0x38, - 'sed' : 0xF8, - 'sei' : 0x78, - 'sep' : { 'immediate' : 0xE2 }, - 'sta' : { - 'dp x indirect' : 0x81, - 'sr' : 0x83, - 'dp' : 0x85, - 'dp indirect long' : 0x87, - 'absolute' : 0x8D, - 'absolute long' : 0x8F, - 'dp indirect y' : 0x91, - 'dp indirect' : 0x92, - 'sr indirect y' : 0x93, - 'dp x' : 0x95, - 'dp indirect long y' : 0x97, - 'absolute y' : 0x99, - 'absolute x' : 0x9D, - 'absolute long x' : 0x9F, - }, - 'stp' : 0xDB, - 'stx' : { - 'dp' : 0x86, - 'absolute' : 0x8E, - 'dp y' : 0x96, - }, - 'sty' : { - 'dp' : 0x84, - 'absolute' : 0x8C, - 'dp x' : 0x94, - }, - 'stz' : { - 'dp' : 0x64, - 'dp x' : 0x74, - 'absolute' : 0x9C, - 'absolute x' : 0x9E, - }, - 'tax' : 0xAA, - 'tay' : 0xA8, - 'tcd' : 0x5B, - 'tcs' : 0x1B, - 'tdc' : 0x7B, - 'trb' : { - 'dp' : 0x14, - 'absolute' : 0x1C, - }, - 'tsb' : { - 'dp' : 0x04, - 'absolute' : 0x0C, - }, - 'tsc' : 0x3B, - 'tsx' : 0xBA, - 'txa' : 0x8A, - 'txs' : 0x9A, - 'txy' : 0x9B, - 'tya' : 0x98, - 'tyx' : 0xBB, - 'wai' : 0xCB, - 'wdm' : 0x42, - 'xba' : 0xEB, - 'xce' : 0xFB -} - -VARIABLE_SIZE_OPS = { - 'adc' : 'a', - 'and' : 'a', - 'bit' : 'a', - 'cmp' : 'a', - 'cpx' : 'xy', - 'cpy' : 'xy', - 'eor' : 'a', - 'lda' : 'a', - 'ldx' : 'xy', - 'ldy' : 'xy', - 'ora' : 'a', - 'sbc' : 'a' -} - -#---------------------------------------------------- - -def _test_check_opcodes(): - codes = {} - def add_code(opcode, description): - if opcode in codes: - print("Duplicate opcode found for {} -- already set as {}".format(description, codes[opcode])) - else: - codes[opcode] = description - - for k in OPCODES: - data = OPCODES[k] - if type(data) is int: - add_code(data, k) - else: - for p in data: - add_code(data[p], k + " " + p) - - for c in range(256): - if c not in codes: - print("Could not find opcode {:2X}".format(c)) - -if __name__ == '__main__': - test_script = ''' - AddressingTestBlock: - [[ - $=TestValue - $_TestValue - ]] - - .mx 0x00 - lda #$`TestValue - sta $_TestValue - jmp $_TestValue - ora #$.TestValue - and $^TestValue - jmp $=(TestValue % 4) - - - ''' - - compiled_blocks = process_msfpatch_block({'body':test_script}, None, None) - for b in compiled_blocks: - b.pretty_print() - b.resolve_expressions({'TestValue':0x7e1234}) - b.pretty_print() diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_npc.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_npc.py deleted file mode 100644 index 26a5d39fe977..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_npc.py +++ /dev/null @@ -1,22 +0,0 @@ -from . import compile_common -from .compile_event_call import compile_event_call - -def process_npc_block(block, rom, env): - parse_tree = compile_common.parse(block['parameters'], 'npc', 'npc_block_params') - npc_id = parse_tree.children[0] - - tree = compile_common.parse(block['body'], 'npc', 'npc_block_body') - - for node in tree.children: - if node.data == 'sprite': - rom.npc_sprites[npc_id] = node.children[0] - elif node.data == 'active' or node.data == 'inactive': - flag_index = npc_id >> 3 - flag_bit = 1 << (npc_id % 8) - if node.data == 'active': - rom.npc_active_flags[flag_index] |= flag_bit - else: - rom.npc_active_flags[flag_index] &= (~flag_bit) - elif node.data == 'eventcall': - body = ' '.join([str(x) for x in node.children]) - rom.npc_event_calls[npc_id] = compile_event_call(body) diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_patch.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_patch.py deleted file mode 100644 index 505059e92f18..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_patch.py +++ /dev/null @@ -1,29 +0,0 @@ -import re -from . import compile_common -from . import consts - -def process_patch_block(block, rom, env): - params_tree = compile_common.parse(block['parameters'], 'patch', 'patch_parameters') - - patch_address = params_tree.children[0] - - # tree parsing is being slow, so for the time being instead we'll just do a dumb - # approach that'll work until we need fancier stuff, if ever - patch_data = [] - for piece in block['body'].split(): - if piece.startswith('#'): - piece_size = 0 - while piece.startswith('#'): - piece_size += 1 - piece = piece[1:] - v = consts.get_value(piece) - if v is None: - raise compile_common.CompileError("Cannot compile unrecognized const {} in patch block at {:02X}".format(piece, patch_address)) - while piece_size > 0: - piece_size -= 1 - patch_data.append(v & 0xFF) - v = v >> 8 - else: - patch_data.extend([int(piece[i:i+2], 16) for i in range(0, len(piece), 2)]) - - rom.add_patch(patch_address, patch_data) diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_placement.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_placement.py deleted file mode 100644 index 99a3da3c0f50..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_placement.py +++ /dev/null @@ -1,100 +0,0 @@ -from . import compile_common -from . import ff4struct -from . import lark - -class PlacementTransformer(lark.Transformer): - def __init__(self, placement): - lark.Transformer.__init__(self) - self.placement = placement - self.marked_for_delete = False - - def on(self, n): - return True - - def off(self, n): - return False - - def npc(self, n): - v = n[0] - self.placement.npc = (v & 0xFF) - return None - - def position(self, n): - x,y = n - self.placement.x = x - self.placement.y = y - return None - - def walking(self, n): - v = n[0] - self.placement.walks = v - return None - - def tangible(self, n): - self.placement.intangible = False - return None - - def intangible(self, n): - self.placement.intangible = True - return None - - def face(self, n): - v = n[0] - self.placement.facing = v - return None - - def palette(self, n): - v = n[0] - self.placement.palette = v - return None - - def turning(self, n): - v = n[0] - self.placement.turns = v - return None - - def marching(self, n): - v = n[0] - self.placement.marches = v - return None - - def speed(self, n): - v = n[0] - self.placement.speed = v - return None - - def delete(self, n): - self.marked_for_delete = True - return None - -def process_placement_block(block, rom, env): - params_tree = compile_common.parse(block['parameters'], 'placement', 'placement_block_parameters') - tree = compile_common.parse(block['body'], 'placement', 'placement_block_body') - - group_number, placement_index = params_tree.children - - placement_set = ff4struct.npc_placement.decode_set(rom.placement_groups[group_number]) - if placement_index < len(placement_set): - transformer = PlacementTransformer(placement_set[placement_index]) - transformer.transform(tree) - else: - transformer = PlacementTransformer(ff4struct.npc_placement.NpcPlacement()) - transformer.transform(tree) - while len(placement_set) < placement_index: - placement_set.append(ff4struct.npc_placement.NpcPlacement()) - placement_set.append(transformer.placement) - - if transformer.marked_for_delete: - env.postprocess.register(_postprocess_remove_placements, (group_number, placement_index)) - - encoded_placement_set = ff4struct.npc_placement.encode_set(placement_set) - rom.placement_groups[group_number] = encoded_placement_set - -def _postprocess_remove_placements(env, pairs): - for group_number in set([p[0] for p in pairs]): - indices = sorted(set([p[1] for p in pairs if p[0] == group_number]), reverse=True) - placement_set = ff4struct.npc_placement.decode_set(env.rom.placement_groups[group_number]) - for i in indices: - placement_set.pop(i) - encoded_placement_set = ff4struct.npc_placement.encode_set(placement_set) - env.rom.placement_groups[group_number] = encoded_placement_set diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_postprocess.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_postprocess.py deleted file mode 100644 index 018ff110e250..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_postprocess.py +++ /dev/null @@ -1,34 +0,0 @@ -from . import ff4struct -from . import compile_common - -class Postprocessor: - def __init__(self): - self._registered_processes = {} - - def register(self, callback, *items): - self._registered_processes.setdefault(callback, []).extend(items) - - def apply_registered_processes(self, env): - for postprocess_func in self._registered_processes: - items = self._registered_processes[postprocess_func] - postprocess_func(env, items) - -def apply_cleanup_processes(env): - rom = env.rom - # set treasure_index values on all maps according to triggers - treasure_index = 0 - for map_id in range(len(rom.map_infos)): - if map_id == 0x100: - # reset index counter for underworld/moon maps - treasure_index = 0 - - map_info = ff4struct.map_info.decode(rom.map_infos[map_id]) - map_info.treasure_index = treasure_index - rom.map_infos[map_id] = map_info.encode() - - triggers = ff4struct.trigger.decode_set(rom.map_trigger_sets[map_id]) - treasure_triggers = [t for t in triggers if t.type == ff4struct.trigger.TREASURE] - treasure_index += len(treasure_triggers) - - if treasure_index > 0x100: - raise compile_common.CompileError("Too many treasures; overflow reached at map {:X}".format(map_id)) diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_shop.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_shop.py deleted file mode 100644 index e8c3292895a2..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_shop.py +++ /dev/null @@ -1,9 +0,0 @@ -from . import compile_common - -def process_shop_block(block, rom, env): - params_tree = compile_common.parse(block['parameters'], 'shop', 'shop_block_params') - tree = compile_common.parse(block['body'], 'shop', 'shop_block_body') - - item_list = (list(tree.children) + ([0xFF] * 8))[:8] - - rom.shops[params_tree.children[0]] = item_list diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_spell_set.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_spell_set.py deleted file mode 100644 index f8e4757db2fd..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_spell_set.py +++ /dev/null @@ -1,29 +0,0 @@ -from . import compile_common -from . import ff4struct - -def process_spellset_block(block, rom, env): - params_tree = compile_common.parse(block['parameters'], 'spell_set', 'spellset_block_params') - spellset_id = params_tree.children[0] - - tree = compile_common.parse(block['body'], 'spell_set', 'spellset_block_body') - - ss = ff4struct.spell_set.decode(rom.spell_sets[spellset_id], rom.learned_spells[spellset_id]) - - for b in tree.children: - if b.data == 'initial_block': - ss.initial_spells = list(b.children) - elif b.data == 'learned_block': - ss.learned_spells = {} - for pair in b.children: - lv, s = pair.children - if lv in ss.learned_spells: - if type(ss.learned_spells[lv]) is list: - ss.learned_spells[lv].append(s) - else: - ss.learned_spells[lv] = [ss.learned_spells[lv], s] - else: - ss.learned_spells[lv] = s - - rom.spell_sets[spellset_id] = ss.encode_initial() - rom.learned_spells[spellset_id] = ss.encode_learned() - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_text.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_text.py deleted file mode 100644 index e6dc72afc431..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_text.py +++ /dev/null @@ -1,88 +0,0 @@ -from . import lark -from . import compile_common -from . import consts -from . import ff4struct - -def _postprocess_map_text(env, texts): - altered_map_texts = dict() - - for map_number,message_index,text in texts: - if map_number not in altered_map_texts: - map_texts = ff4struct.text.decode(env.rom.text_bank2[map_number]) - if type(map_texts) is str: - map_texts = [map_texts] - altered_map_texts[map_number] = map_texts - else: - map_texts = altered_map_texts[map_number] - - if message_index < len(map_texts): - map_texts[message_index] = text - else: - map_texts.insert(message_index, text) - - for map_number in altered_map_texts: - env.rom.text_bank2[map_number] = ff4struct.text.encode(altered_map_texts[map_number]) - - -def process_text_block(block, rom, env): - params_tree = compile_common.parse(block['parameters'], 'text', 'text_block_parameters') - - text_type = params_tree.data - if params_tree.children: - message_index = params_tree.children[0] - - if text_type == 'map_text': - map_number = params_tree.children[0] - message_index = params_tree.children[1] - - # map text encoding is done in postprocess to avoid repeated - # reencoding of other text on same map - env.postprocess.register(_postprocess_map_text, [map_number, message_index, block['body']]) - - elif text_type == 'monster_name_text': - encoded_text = ff4struct.text.encode(block['body'], allow_dual_char=False, fixed_length=8) - rom.text_monster_names[message_index] = encoded_text - - elif text_type == 'command_name_text': - encoded_text = ff4struct.text.encode(block['body'], allow_dual_char=False, fixed_length=5) - rom.text_command_names[message_index] = encoded_text - - elif text_type == 'item_name_text': - encoded_text = ff4struct.text.encode(block['body'], allow_dual_char=False, fixed_length=9) - rom.text_item_names[message_index] = encoded_text - - elif text_type == 'spell_name_text': - is_player_spell = (message_index < len(rom.text_spell_names)) - encoded_text = ff4struct.text.encode(block['body'], allow_dual_char=False, fixed_length=(6 if is_player_spell else 8)) - if is_player_spell: - rom.text_spell_names[message_index] = encoded_text - else: - rom.text_enemy_spell_names[message_index - len(rom.text_spell_names)] = encoded_text - - elif text_type == 'credits_text': - rom.text_credits = ff4struct.text.encode(block['body'], allow_dual_char=False) - - elif text_type == 'alert_text': - rom.text_alerts[message_index] = ff4struct.text.encode(block['body'], allow_dual_char=False) - - elif text_type == 'map_name_text': - rom.text_map_names[message_index] = ff4struct.text.encode(block['body'], allow_dual_char=False) - - elif text_type == 'battle_text': - rom.text_battle[message_index] = ff4struct.text.encode(block['body'], allow_dual_char=False) - - elif text_type == 'status_text': - rom.text_status[message_index] = ff4struct.text.encode(block['body'], allow_dual_char=False) - - elif text_type == 'custom_text': - rom.add_patch(params_tree.children[0], ff4struct.text.encode(block['body'], allow_dual_char=False)) - - else: - encoded_text = ff4struct.text.encode(block['body']) - if text_type == 'bank_text': - bank_number = params_tree.children[0] - message_index = params_tree.children[1] - if bank_number == 1: - rom.text_bank1[message_index] = encoded_text - elif bank_number == 3: - rom.text_bank3[message_index] = encoded_text diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_trigger.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_trigger.py deleted file mode 100644 index 5f3b5a878625..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/compile_trigger.py +++ /dev/null @@ -1,98 +0,0 @@ -from . import compile_common -from . import ff4struct -from . import lark - -class TriggerProcessor(lark.Transformer): - def __init__(self, trigger): - lark.Transformer.__init__(self) - self.trigger = trigger - - def position(self, n): - x,y = n - self.trigger.x = x - self.trigger.y = y - - def item(self, n): - item_number = n[0] - self.trigger.item = item_number - self.trigger.gp = None - - def gp(self, n): - gp_amount = n[0] - self.trigger.gp = gp_amount - self.trigger.item = None - - def treasure(self, children): - self.trigger.type = ff4struct.trigger.TREASURE - if len(children) > 1: - self.trigger.is_miab = True - self.trigger.formation = children[1] - else: - self.trigger.is_miab = False - self.trigger.formation = 0 - - def teleport(self, nodes): - map_id, x, y = nodes[:3] - facing = None - if len(nodes) > 3: - facing = nodes[3] - - if facing is None and (map_id < 251 or map_id > 253): - facing = 2 - - self.trigger.type = ff4struct.trigger.TELEPORT - self.trigger.map = (map_id & 0xFF) - self.trigger.target_x = x - self.trigger.target_y = y - self.trigger.target_facing = facing - - def event_call(self, n): - event_call_id = n[0] - self.trigger.type = ff4struct.trigger.EVENT - self.trigger.event_call = event_call_id - -def _get_encoded_trigger_set_by_map_id(rom, map_id): - if map_id >= 251 and map_id <= 253: - return rom.world_trigger_sets[map_id - 251] - else: - return rom.map_trigger_sets[map_id] - -def _set_encoded_trigger_set_by_map_id(rom, map_id, encoded_trigger_set): - if map_id >= 251 and map_id <= 253: - rom.world_trigger_sets[map_id - 251] = encoded_trigger_set - else: - rom.map_trigger_sets[map_id] = encoded_trigger_set - -def process_trigger_block(block, rom, env): - params_tree = compile_common.parse(block['parameters'], 'trigger', 'trigger_block_parameters') - map_id = params_tree.children[0] - trigger_id = params_tree.children[1] - - tree = compile_common.parse(block['body'], 'trigger', 'trigger_block_body') - - if tree.data == 'delete': - env.postprocess.register(_postprocess_remove_triggers, (map_id, trigger_id)) - else: - triggers = ff4struct.trigger.decode_set(_get_encoded_trigger_set_by_map_id(rom, map_id)) - if trigger_id >= len(triggers): - new_trigger = ff4struct.trigger.Trigger() - TriggerProcessor(new_trigger).transform(tree) - triggers.append(new_trigger) - else: - TriggerProcessor(triggers[trigger_id]).transform(tree) - _set_encoded_trigger_set_by_map_id(rom, map_id, ff4struct.trigger.encode_set(triggers)) - - -def _postprocess_remove_triggers(env, pairs): - triggers_by_map = {} - - for pair in pairs: - triggers_by_map.setdefault(pair[0], set()).add(pair[1]) - - for map_id in triggers_by_map: - trigger_indices = triggers_by_map[map_id] - encoded_triggers = _get_encoded_trigger_set_by_map_id(env.rom, map_id) - triggers = ff4struct.trigger.decode_set(encoded_triggers) - triggers = [triggers[i] for i in range(len(triggers)) if i not in trigger_indices] - encoded_triggers = ff4struct.trigger.encode_set(triggers) - _set_encoded_trigger_set_by_map_id(env.rom, map_id, encoded_triggers) diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/consts.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/consts.py deleted file mode 100644 index 9da1891dfd22..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/consts.py +++ /dev/null @@ -1,124 +0,0 @@ -import os,inspect -from . import lark - -_ROOT_FAMILY = "__" -_consts = {} - -_parser = lark.Lark(r''' - start : const_block* - const_block : "consts" "(" identifier ")" "{" const_def* "}" - const_def : number identifier - ?number : hex_value - | decimal_value - hex_value : /\$[0-9A-Fa-f]+/ - decimal_value : /[0-9]+/ - identifier : /[A-Za-z_][A-Za-z0-9_]*/ - - %import common.WS - %ignore WS - ''') - -class ConstsTransformer(lark.Transformer): - def hex_value(self, n): - v = n[0] - return int(v[1:], 16) - def decimal_value(self, n): - v = n[0] - return int(v) - def identifier(self, n): - v = n[0] - return str(v) - -def load_file(const_file_path): - with open(const_file_path, 'r') as infile: - data = infile.read() - load_string(data) - -def load_string(const_data): - lines = [] - for line in const_data.splitlines(True): - lines.append(line.strip().split('//')[0]) - - tree = _parser.parse('\n'.join(lines)) - - tree = ConstsTransformer().transform(tree) - - for block in tree.children: - if type(block.children[0] is str): - family_name = block.children[0] - definitions = block.children[1:] - else: - family_name = _ROOT_FAMILY - definitions = block.children - - family = _consts.setdefault(family_name, {}) - - for definition in definitions: - try: - value, identifier = definition.children - except ValueError: - print(definition) - continue - - if identifier in family: - raise ValueError("Const '{}.{}' already defined as value ${:02X}".format(family_name, identifier, family[identifier])) - else: - family[identifier] = value - -def set_value(identifier, family, value): - _consts.setdefault(family, {})[identifier] = value - -def get_value(identifier, family=None): - if '.' in identifier: - family, identifier = identifier.split('.') - - if family in _consts and identifier in _consts[family]: - return _consts[family][identifier] - else: - return None - -def get_name(value, family): - if family in _consts: - for identifier in _consts[family]: - if _consts[family][identifier] == value: - return identifier - - return None - -class ConstsResolver(lark.Transformer): - def const(self, nodes): - const_name = str(nodes[0]) - if '.' in const_name: - value = get_value(const_name) - if value is None: - raise ValueError("Const #{} not found".format(const_name)) - return value - else: - # bubble up unqualified const name to see if the family can be inferred from context - return const_name - - def __getattr__(self, name): - if name.startswith('value_'): - family = name[len('value_'):] - def resolver_function(nodes): - n = nodes[0] - if type(n) is str: - value = get_value(n, family) - if value is None: - raise ValueError("Const #{} not found in family '{}'".format(n, family)) - return value - else: - return n - - return resolver_function - else: - return lark.Transformer.__getattribute__(self, name) - -def resolve_consts(tree): - return ConstsResolver().transform(tree) - -#------------------------------------------------------------------------------------- - -if __name__ == '__main__': - currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) - load_file(os.path.join(currentdir, 'default.consts')) diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile.py deleted file mode 100644 index 148da34d9502..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile.py +++ /dev/null @@ -1,166 +0,0 @@ -import os -import argparse - -from . import ff4bin -from . import consts -from . import hints - -from .decompile_events import decompile_events -from .decompile_text import decompile_text -from .decompile_triggers import decompile_triggers -from .decompile_event_calls import decompile_event_calls -from .decompile_npcs import decompile_npcs, decompile_map_placements -from .decompile_ai import decompile_ai -from .decompile_ai_scripts import decompile_ai_scripts -from .decompile_map_infos import decompile_map_infos -from .decompile_map_grids import decompile_map_grids -from .decompile_tilesets import decompile_tilesets -from .decompile_shops import decompile_shops -from .decompile_actors import decompile_actors -from .decompile_drop_tables import decompile_drop_tables -from .decompile_monsters import decompile_monsters, decompile_monster_stats -from .decompile_formations import decompile_formations -from .decompile_spells import decompile_spells -from .decompile_spell_sets import decompile_spell_sets - -parser = argparse.ArgumentParser() -parser.add_argument('rom') -parser.add_argument('-a', '--all', action='store_true') -parser.add_argument('--events', action='store_true') -parser.add_argument('--text', action='store_true') -parser.add_argument('--triggers', action='store_true') -parser.add_argument('--eventcalls', action='store_true') -parser.add_argument('--npcs', action='store_true') -parser.add_argument('--ai', action='store_true') -parser.add_argument('--maps', action='store_true') -parser.add_argument('--tilesets', action='store_true') -parser.add_argument('--shops', action='store_true') -parser.add_argument('--actors', action='store_true') -parser.add_argument('--drops', action='store_true') -parser.add_argument('--monsters', action='store_true') -parser.add_argument('--formations', action='store_true') -parser.add_argument('--spells', action='store_true') -parser.add_argument('--spellsets', action='store_true') - -parser.add_argument('-i', '--ignorechecksum', action='store_true') -parser.add_argument('-o', '--output') -args = parser.parse_args() - -rom = ff4bin.Rom(args.rom, ignore_checksum=args.ignorechecksum) - -consts.load_file(os.path.join(os.path.dirname(__file__), 'default.consts')) -hints.load_file(os.path.join(os.path.dirname(__file__), 'default.hints')) - -output_prefix = args.output -if not output_prefix: - output_prefix = os.path.splitext(args.rom)[0] + '.decomp' - -if args.all or args.events: - filename = output_prefix + '.events.f4c' - print("Exporting events to {}".format(filename)) - with open(filename, 'w') as outfile: - outfile.write(decompile_events(rom)) - -if args.all or args.text: - filename = output_prefix + '.text.f4t' - print("Exporting text to {}".format(filename)) - with open(filename, 'w') as outfile: - outfile.write(decompile_text(rom)) - -if args.all or args.triggers: - filename = output_prefix + '.triggers.f4c' - print("Exporting triggers to {}".format(filename)) - with open(filename, 'w') as outfile: - outfile.write(decompile_triggers(rom)) - -if args.all or args.eventcalls: - filename = output_prefix + '.eventcalls.f4c' - print("Exporting event calls to {}".format(filename)) - with open(filename, 'w') as outfile: - outfile.write(decompile_event_calls(rom)) - -if args.all or args.npcs: - filename = output_prefix + '.npcs.f4c' - print("Exporting NPCs to {}".format(filename)) - with open(filename, 'w') as outfile: - outfile.write(decompile_npcs(rom)) - - filename = output_prefix + '.placements.f4c' - print("Exporting NPC placements to {}".format(filename)) - with open(filename, 'w') as outfile: - outfile.write(decompile_map_placements(rom)) - -if args.all or args.ai: - filename = output_prefix + '.ai.f4c' - print("Exporting AI to {}".format(filename)) - with open(filename, 'w') as outfile: - outfile.write(decompile_ai(rom)) - - filename = output_prefix + '.aiscripts.f4c' - print("Exporting AI scripts to {}".format(filename)) - with open(filename, 'w') as outfile: - outfile.write(decompile_ai_scripts(rom)) - -if args.all or args.maps: - filename = output_prefix + '.mapinfo.f4c' - print("Exporting map info to {}".format(filename)) - with open(filename, 'w') as outfile: - outfile.write(decompile_map_infos(rom)) - - filename = output_prefix + '.mapgrids.f4c' - print("Exporting map grids to {}".format(filename)) - with open(filename, 'w') as outfile: - outfile.write(decompile_map_grids(rom)) - -if args.all or args.shops: - filename = output_prefix + '.shops.f4c' - print("Exporting shops to {}".format(filename)) - with open(filename, 'w') as outfile: - outfile.write(decompile_shops(rom)) - -if args.all or args.tilesets: - filename = output_prefix + '.tilesets.f4c' - print("Exporting tilesets to {}".format(filename)) - with open(filename, 'w') as outfile: - outfile.write(decompile_tilesets(rom)) - -if args.all or args.actors: - filename = output_prefix + '.actors.f4c' - print("Exporting actors to {}".format(filename)) - with open(filename, 'w') as outfile: - outfile.write(decompile_actors(rom)) - -if args.all or args.drops: - filename = output_prefix + '.drops.f4c' - print("Exporting drops to {}".format(filename)) - with open(filename, 'w') as outfile: - outfile.write(decompile_drop_tables(rom)) - -if args.all or args.monsters: - filename = output_prefix + '.monsters.f4c' - print("Exporting monsters to {}".format(filename)) - with open(filename, 'w') as outfile: - outfile.write(decompile_monsters(rom)) - - filename = output_prefix + '.monsterstats.f4c' - print("Exporting monster stats to {}".format(filename)) - with open(filename, 'w') as outfile: - outfile.write(decompile_monster_stats(rom)) - -if args.all or args.formations: - filename = output_prefix + '.formations.f4c' - print("Exporting formations to {}".format(filename)) - with open(filename, 'w') as outfile: - outfile.write(decompile_formations(rom)) - -if args.all or args.spells: - filename = output_prefix + '.spells.f4c' - print("Exporting spells to {}".format(filename)) - with open(filename, 'w') as outfile: - outfile.write(decompile_spells(rom)) - -if args.all or args.spellsets: - filename = output_prefix + '.spellsets.f4c' - print("Exporting spell sets to {}".format(filename)) - with open(filename, 'w') as outfile: - outfile.write(decompile_spell_sets(rom)) diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_actors.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_actors.py deleted file mode 100644 index 2c3e56f3e55e..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_actors.py +++ /dev/null @@ -1,37 +0,0 @@ -from .decompile_common import value_text - -def decompile_actors(rom): - lines = [] - for i in range(0, len(rom.actor_name_ids)): - lines.append('actor({})'.format(value_text(i+1, 'actor'))) - lines.append('{') - lines.append(' name ${:02X}'.format(rom.actor_name_ids[i])) - - if rom.actor_load_info[i] & 0x80: - lines.append(' load from slot {}'.format(rom.actor_load_info[i] & 0x7F)) - else: - lines.append(' load from stats ${:02X}'.format(rom.actor_load_info[i])) - - if rom.actor_save_info[i] & 0x80: - lines.append(' discard') - else: - lines.append(' save to slot {}'.format(rom.actor_save_info[i])) - - lines.append(' commands {') - commands = list(rom.actor_commands[i]) - while commands and commands[-1] == 0xFF: - commands.pop() - for c in commands: - lines.append(' {}'.format(value_text(c, 'command'))) - lines.append(' }') - - gear = rom.actor_gear[i] - lines.append(' right hand {} {}'.format(value_text(gear[3], 'item'), (gear[4] if (gear[4] > 1) else ''))) - lines.append(' left hand {} {}'.format(value_text(gear[5], 'item'), (gear[6] if (gear[6] > 1) else ''))) - lines.append(' head {}'.format(value_text(gear[0], 'item'))) - lines.append(' body {}'.format(value_text(gear[1], 'item'))) - lines.append(' arms {}'.format(value_text(gear[2], 'item'))) - lines.append('}') - lines.append('') - - return '\n'.join(lines) diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_ai.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_ai.py deleted file mode 100644 index e896e237f3b4..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_ai.py +++ /dev/null @@ -1,153 +0,0 @@ -from .decompile_common import value_text -from . import ff4struct - -def _unit_name(i): - prefix = '' - if i & 0x80: - i &= 0x7f - prefix = 'all ' - - if i <= 0x15: - return prefix + 'actor {}'.format(value_text(i, 'actor')) - - target_table = { - 'self' : [0x17], - 'character' : [0x18, 0x19], - 'type 0 monster' : [0x1A, 0x25], - 'type 1 monster' : [0x1B, 0x26], - 'type 2 monster' : [0x1C, 0x27], - 'anyone' : [0x1D], - 'other' : [0x1E], - 'monster' : [0x1F, 0x24, 0x2F], - 'other monster' : [0x20, 0x23], - 'front row' : [0x21, 0x28], - 'back row' : [0x22, 0x29] - } - - for target in target_table: - if i in target_table[target]: - return prefix + target - - return prefix + 'unit {}'.format(value_text(i)) - -def _condition_description(condition_bytes, rom): - condition = condition_bytes[0] - if condition == 0: - unit = condition_bytes[1] - status_byte = condition_bytes[2] - status_list = [i for i in range(8) if (condition_bytes[3] & (1 << i))] - return '{} status {}'.format(_unit_name(unit), ' '.join([value_text(x + status_byte * 8, 'status') for x in status_list])) - - if condition == 1: - unit = condition_bytes[1] - hp_index = condition_bytes[3] - return '{} hp below index {} /* {} */'.format(_unit_name(unit), value_text(hp_index), rom.ai_hp_thresholds[hp_index]) - - if condition == 2: - flag_type = ('reaction' if condition_bytes[2] else 'condition') - flag_value = condition_bytes[3] - return '{} flag {}'.format(flag_type, flag_value) - - if condition == 3: - if condition_bytes[2] == 0: - alive_or_dead = 'alive' - elif condition_bytes[2] == 1: - alive_or_dead = 'dead' - else: - alive_or_dead = 'alivedeadmysterious {}'.format(condition_bytes[2]) - return '{} {}'.format(_unit_name(condition_bytes[1]), alive_or_dead) - - if condition == 4: - if condition_bytes[2] == 0: - alive_or_dead = 'alive but not only type alive' - elif condition_bytes[2] == 1: - alive_or_dead = 'dead' - elif condition_bytes[2] == 2: - alive_or_dead = 'only type alive' - else: - alive_or_dead = 'alivedeadmysterious {}'.format(condition_bytes[2]) - monster_name = ff4struct.text.decode(rom.text_monster_names[condition_bytes[3]]) - return 'monster {} {} // {}'.format(value_text(condition_bytes[3]), alive_or_dead, monster_name) - - if condition == 5: - return 'formation ${:2X}'.format((condition_bytes[2] << 8) | condition_bytes[3]) - - if condition == 6: - return 'all monsters same type as self' - - if condition == 8 or condition == 7: - unit_name = _unit_name(condition_bytes[1]) - if condition == 7: - unit_name = 'anyone ({})'.format(unit_name) - elements = '' - if condition_bytes[3]: - elements = ' '.join([value_text(i, 'element') for i in range(8) if (condition_bytes[3] & (1 << i))]) + ' ' - - if condition_bytes[2] == 194: - command = 'magic' - elif condition_bytes[2] == 222: - command = 'jump' - elif condition_bytes[2] >= 192 and condition_bytes[2] < 192 + 0x19: - command = value_text(condition_bytes[2] - 192, 'command') - else: - command = value_text(condition_bytes[2]) - - return '{} uses {}{}'.format(unit_name, elements, command) - - if condition == 10: - return 'damaged' - - if condition == 11: - return 'alone' - - return ' '.join([value_text(b) for b in condition_bytes]) - -def decompile_ai(rom): - lines = [] - - for i,encoded in enumerate(rom.ai_conditions): - lines.append('ai_condition({}) {{ {} }}'.format(value_text(i), _condition_description(encoded, rom))) - - lines.append('') - - cached_set_descriptions = {} - for i,encoded in enumerate(rom.ai_condition_sets): - if encoded[-1] == 0xFF: - encoded = encoded[:-1] - lines.append('ai_condition_set({}) {{'.format(value_text(i))) - descriptions = [] - for c in encoded: - d = _condition_description(rom.ai_conditions[c], rom) - descriptions.append(d) - lines.append(' {} // {}'.format(value_text(c), d)) - cached_set_descriptions[i] = ', '.join(descriptions) - lines.append('}') - lines.append('') - - # cache which monsters use what AI so that we can annotate - using_monsters = {} - for i,encoded in enumerate(rom.monsters): - m = ff4struct.monster.decode(encoded) - if m.attack_sequence is not None: - using_monsters.setdefault(m.attack_sequence, []).append(ff4struct.text.decode(rom.text_monster_names[i])) - if m.reaction_sequence is not None: - using_monsters.setdefault(m.reaction_sequence, []).append(ff4struct.text.decode(rom.text_monster_names[i])) - - for i,encoded in enumerate(rom.ai_groups): - if encoded[-1] == 0xFF: - encoded = encoded[:-1] - if i in using_monsters: - lines.append('// used by: {}'.format(', '.join(using_monsters[i]))) - lines.append('aigroup({}) {{'.format(value_text(i))) - for j in range(0, len(encoded), 2): - if j + 1 >= len(encoded): - lines.append(' script {}'.format(value_text(encoded[j]))) - else: - lines.append(' condition set {} : script {} // {}'.format( - value_text(encoded[j]), value_text(encoded[j+1]), - cached_set_descriptions[encoded[j]]) - ) - lines.append('}') - lines.append('') - - return '\n'.join(lines) \ No newline at end of file diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_ai_scripts.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_ai_scripts.py deleted file mode 100644 index 888604f52f14..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_ai_scripts.py +++ /dev/null @@ -1,146 +0,0 @@ -from .decompile_common import value_text -from . import ff4struct -from . import ai_common - -def _decompile_ai_script(script, rom): - script = list(script) - script_lines = [] - chain_starting = False - in_chain = False - last_command_was_action = False - - while script: - b = script.pop(0) - if b in ai_common.COMMANDS: - cmd_data = ai_common.COMMANDS[b] - cmd = cmd_data[0] - if len(cmd_data) > 1: - param_types = cmd_data[1:] - num_params = len(param_types) - raw_params = [script.pop(0) for i in range(num_params)] - params = [decomp(param_types[i], raw_params[i]) for i in range(num_params)] - cmd = cmd.format(*params) - if cmd.startswith('message $'): - msg = ff4struct.text.decode(rom.text_battle[raw_params[0]]) - cmd += ' // "{}"'.format(msg) - elif b <= 0x30: - cmd = 'use {}'.format(value_text(b, 'spell')) - elif b <= 0x5E: - cmd = 'use {} on group'.format(value_text(b - 0x30, 'spell')) - elif b <= 0xBF: - cmd = 'use {}'.format(value_text(b, 'spell')) - elif b <= 0xE7: - cmd = 'use command {}'.format(value_text(b - 0xC0, 'command')) - elif b == 0xFB: - if last_command_was_action: - cmd = '' #compiler automatically handles these 'chain-into' things - else: - cmd = 'chain into' - elif b == 0xFC: - in_chain = False - cmd = '}' - elif b == 0xFE: - if last_command_was_action: - cmd = '' #compiler automatically inserts wait commands - else: - cmd = 'wait' - elif b == 0xFF: - break - else: - cmd = 'unknown command ${:02X}'.format(b) - - if b == 0xFD: - in_chain = True - script_lines.append('chain {') - elif cmd is not None: - if in_chain: - cmd = ' ' + cmd - script_lines.append(cmd) - - if b <= 0xE7 or b == 0xFC: - last_command_was_action = True - else: - last_command_was_action = False - - return '\n'.join(script_lines) - -def decompile_ai_scripts(rom, script_id=None, moon=False): - lines = [] - - if script_id is not None: - script_ids = [script_id] if not moon else [] - moon_script_ids = [script_id] if moon else [] - else: - script_ids = range(len(rom.monster_scripts)) - moon_script_ids = range(len(rom.moon_monster_scripts)) - - for script_id in script_ids: - script = rom.monster_scripts[script_id] - lines.append('ai_script(${:02X})'.format(script_id)) - lines.append('{') - compiled_script = _decompile_ai_script(script, rom) - lines.extend([' ' + x for x in compiled_script.split('\n')]) - lines.append('}') - lines.append('') - - for script_id in moon_script_ids: - script = rom.moon_monster_scripts[script_id] - lines.append('ai_script(moon ${:02X})'.format(script_id)) - lines.append('{') - compiled_script = _decompile_ai_script(script, rom) - lines.extend([' ' + x for x in compiled_script.split('\n')]) - lines.append('}') - lines.append('') - - return '\n'.join(lines) - -def decomp(value_type, value): - try: - decomp_func = globals()['decomp_{}'.format(value_type)] - except KeyError: - return decomp_hex(value) - - return decomp_func(value) - -def decomp_hex(b): - return '${:02X}'.format(b) - -def decomp_decimal(b): - return str(b) - -def decomp_races(b): - return ' '.join([value_text(i, 'race', hex=False) for i in range(8) if (b & (1 << i))]) - -def decomp_speed_delta(b): - if b & 0x80: - return '- {}'.format(b & 0x7F) - else: - return '+ {}'.format(b) - -def decomp_elements(b): - return ' '.join([value_text(i, 'element', hex=False) for i in range(8) if (b & (1 << i))]) - -def decomp_music(b): - return value_text(b, 'music') - -def decomp_condition_delta(b): - if b == 1: - return '+ 1' - elif b & 0x80: - return str(b & 0x7F) - else: - return '? ${:02X}'.format(b) - -def decomp_reaction(b): - if b & 0x80: - return decomp_hex(b & 0x7F) - else: - return '? ' + decomp_hex(b) - -def decomp_target(b): - if b <= 0x15: - return value_text(b, 'actor') - elif b in ai_common.TARGETS: - return ai_common.TARGETS[b] - else: - return '? ' + decomp_hex(b) diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_common.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_common.py deleted file mode 100644 index 1a943653ad1c..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_common.py +++ /dev/null @@ -1,18 +0,0 @@ -from . import consts - -_DIRECTIONS = ['up', 'right', 'down', 'left'] - -def value_text(value, const_family=None, hex=True): - if const_family == 'direction': - return _DIRECTIONS[value] - - if const_family is not None: - name = consts.get_name(value, const_family) - if name is not None: - return '#{}'.format(name) - - if hex: - return '${:02X}'.format(value) - else: - return '{}'.format(value) - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_drop_tables.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_drop_tables.py deleted file mode 100644 index f1ace16cff24..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_drop_tables.py +++ /dev/null @@ -1,19 +0,0 @@ -from . import ff4struct -from .decompile_common import value_text - -def decompile_drop_tables(rom): - lines = [] - for drop_table_id,byte_list in enumerate(rom.drop_tables): - dt = ff4struct.drop_table.decode(byte_list) - - lines.append('droptable(${:02X}) {{'.format(drop_table_id)) - for rarity in ['common', 'uncommon', 'rare', 'mythic']: - item = getattr(dt, rarity) - if item is None: - lines.append(' {} none'.format(rarity)) - else: - lines.append(' {} {}'.format(rarity, value_text(item, 'item'))) - lines.append('}') - lines.append('') - - return '\n'.join(lines) diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_event_calls.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_event_calls.py deleted file mode 100644 index 9b22d3e8fcbc..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_event_calls.py +++ /dev/null @@ -1,68 +0,0 @@ -from . import ff4struct -from . import decompile_common -from . import event_common - -def decompile_event_call(encoded_event_call, block_parameters=None, map_messages=None): - lines = [] - - if block_parameters: - lines.append("eventcall({})".format(block_parameters)) - lines.append("{") - else: - lines.append("eventcall {") - - if encoded_event_call: - event_call = ff4struct.event_call.decode(encoded_event_call) - - for case in event_call.cases: - event_name = decompile_common.value_text(case.event, 'event') - event_description = event_common.DEFAULT_EVENT_DESCRIPTIONS[case.event] - if map_messages: - #if case.event >= 0x01 and case.event <= 0x03: - # # in vanilla FF4 these events display messages from event call params - # event_description += ' - "{}"'.format(map_messages[event_call.parameters[case.event - 1]]) - if case.event >= 0x27 and case.event <= 0x2E: - # in vanilla FF4 these events display map messages - event_description += ' - "{}"'.format(map_messages[case.event - 0x27]) - - if case.conditions: - conditions = [] - for c in case.conditions: - flag_name = decompile_common.value_text(c.flag, 'flag') - if c.value: - conditions.append(flag_name) - else: - conditions.append('not {}'.format(flag_name)) - - conditions = ', '.join(conditions) - lines.append(" if {}:".format(conditions)) - lines.append(" {} //{}".format(event_name, event_description)) - else: - if len(event_call.cases) > 1: - lines.append(" else:") - lines.append(" {} //{}".format(event_name, event_description)) - else: - lines.append(" {} //{}".format(event_name, event_description)) - - if event_call.parameters: - lines.append(' messages:') - for m in event_call.parameters: - if map_messages and m < len(map_messages): - lines.append(' ${:02X} // "{}"'.format(m, map_messages[m])) - else: - lines.append(' ${:02X}'.format(m)) - - lines.append('}') - return '\n'.join(lines) - - -def decompile_event_calls(rom): - lines = [] - for i,encoded_event_call in enumerate(rom.event_calls): - if not encoded_event_call: - continue - - lines.append(decompile_event_call(encoded_event_call, "${:02X}".format(i))) - lines.append('') - - return "\n".join(lines) diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_events.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_events.py deleted file mode 100644 index 1993022436f5..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_events.py +++ /dev/null @@ -1,383 +0,0 @@ -import re - -from . import ff4bin -from . import ff4struct -from . import consts -from . import hints -from . import event_common -from . import decompile_common - -def decompile_events(rom, event_id=None): - if event_id is None: - event_ids = range(len(rom.event_scripts)) - elif type(event_id) in [list, tuple]: - event_ids = event_id - else: - event_ids = [event_id] - - output_lines = [] - - for event_id in event_ids: - event_script = rom.event_scripts[event_id] - - sections = [] - current_section = None - - if event_script: - num_actions_left_in_block = 0 - indent = '' - has_else_clause = False - else_source_section = None - - while True: - cmd_code = event_script[0] - event_script = event_script[1:] - - if current_section is None or cmd_code == 0xFE or (cmd_code == 0xFF and has_else_clause): - current_section = { - 'commands' : [], - 'else' : False, - 'activations' : set(), - 'placements' : set(), - 'map_messages' : set(), - 'map' : None - } - sections.append(current_section) - - if cmd_code == 0xFF: - if has_else_clause: - current_section['else'] = True - current_section['else_source'] = else_source_section - has_else_clause = False - else: - break - - if cmd_code == 0xEB: - num_iterations, num_actions_left_in_block = event_script[:2] - event_script = event_script[2:] - current_section['commands'].append('batch {} {{'.format(num_iterations)) - if num_actions_left_in_block == 0: - current_section['commands'].append('}') - else: - indent = ' ' - else: - if event_common.is_placement_command_code(cmd_code): - placement = (cmd_code & 0xF0) >> 4 - current_section['placements'].add(placement) - sub_cmd = cmd_code & 0xF - if sub_cmd in event_common.PLACEMENT_COMMANDS: - current_section['commands'].append(indent + 'p {} {}'.format(placement, event_common.PLACEMENT_COMMANDS[sub_cmd])) - elif cmd_code in event_common.COMMANDS: - cmd_data = event_common.COMMANDS[cmd_code] - - param_data = cmd_data[1:] - num_params = len(param_data) - raw_params = list(event_script[:num_params]) - event_script = event_script[num_params:] - - if cmd_code == 0xFE: - # load map command - raw_params[0] += ((raw_params[3] & 0x80) << 1) - # extract facing from X coordinate and jam into flags param - if raw_params[0] < 0xFB or raw_params[0] > 0xFF: - raw_params[3] = (raw_params[3], ((raw_params[1] & 0xC0) >> 6)) - raw_params[1] = raw_params[1] & 0x3F - else: - # no facing direction on overworld/underworld/moon map params - raw_params[3] = (raw_params[3], 0) - current_section['map'] = raw_params[0] - elif cmd_code == 0xEF: - # map message command - current_section['map_messages'].add(raw_params[0]) - elif cmd_code == 0xF1 or cmd_code == 0xF8: - # high bank message or confirm message - raw_params[0] += 0x100 - - if cmd_code == 0xDE and raw_params[0] == 0xFE: - decomp_command = 'restore hp' - elif cmd_code == 0xDF and raw_params[0] == 0xFE: - decomp_command = 'restore mp' - else: - decomp_params = [decomp(param_data[i], raw_params[i]) for i in range(len(raw_params))] - decomp_command = indent + cmd_data[0].format(*decomp_params) - - if cmd_code == 0xE3 and raw_params[0] != 0: - decomp_command = decomp_command.replace('clear status', 'clear status except') - - - comment = None - if cmd_code == 0xF0 or cmd_code == 0xF1: - comment = make_comment_from_text(rom.text_bank1[raw_params[0]]) - elif cmd_code == 0xF6: - comment = make_comment_from_text(rom.text_bank3[raw_params[0]]) - - if comment: - comment = comment.partition('\n')[0] - decomp_command += ' // ' + comment - - current_section['commands'].append(decomp_command) - - if cmd_code == 0xF8: - has_else_clause = True - else_source_section = current_section - - if cmd_code == 0xF4 or cmd_code == 0xF5: - current_section['activations'].add(raw_params[0]) - - if num_actions_left_in_block > 0: - num_actions_left_in_block -= 1 - if num_actions_left_in_block == 0: - current_section['commands'].append('}') - indent = '' - - # annotate code sections if we need info from the map - for section in sections: - if section['placements'] or section['map_messages'] or section['activations']: - build_metadata(rom) - - if section['else']: - section['map'] = section['else_source']['map'] - - if section['map'] is None: - hint = hints.get_event_map(event_id) - if hint is not None: - section['map'] = hint - map_method = 'hinted' - else: - section['map'] = lookup_map(rom, event_id) - map_method = 'autodetected' - - if section['map']: - map_name = consts.get_name(section['map'], 'map') - if map_name is None: - map_name = ('${:02X}'.format(section['map'])) - else: - map_name = '#' + map_name - section['commands'].insert(0, '// {} map {}'.format(map_method, map_name)) - - if section['map'] is None: #previous lookup may fail - section['commands'].insert(0, '// could not auto-detect map') - continue - - if section['placements']: - placement_names = generate_placement_names(section['map'], section['placements']) - - const_lines = [] - const_lines.append('consts(placement) {') - for p in placement_names: - const_lines.append(' {} {}'.format(p, placement_names[p])) - const_lines.append('}') - - for i,cmd in enumerate(section['commands']): - m = re.search(r'^(?P\s*)p (?P\d+)', cmd) - if m: - p = int(m.group('placement')) - if p in placement_names: - placement_name = placement_names[p] - if placement_name: - section['commands'][i] = '{}p #{}'.format(m.group('indent'), placement_name) + cmd[len(m.group(0)):] - - section['commands'] = const_lines + section['commands'] - - map_message_comments = generate_map_message_comments(section['map'], section['map_messages']) - for i,cmd in enumerate(section['commands']): - m = re.search(r'map message (?P\d+)', cmd) - if m: - section['commands'][i] = cmd + ' // ' + map_message_comments[int(m.group('message'))] - continue - - m = re.search(r'activate (?P\$[0-9A-Fa-f]+)', cmd) - if m: - npc_number = int(m.group('npc')[1:], 16) - if section['map'] >= 0x100 or _MAP_INFOS[section['map']].underground_npcs: - npc_number += 0x100 - npc_name = consts.get_name(npc_number, 'npc') - if npc_name: - section['commands'][i] = section['commands'][i].replace(m.group('npc'), '#{}'.format(npc_name)) - - - output_lines.append('event(${:02X}) //{}'.format(event_id, event_common.DEFAULT_EVENT_DESCRIPTIONS[event_id])) - output_lines.append('{') - for section in sections: - if section['else']: - output_lines.append('cancel:') - for cmd in section['commands']: - output_lines.append(' ' + cmd) - output_lines.append('}') - output_lines.append('') - - return '\n'.join(output_lines) - - -#----------------------------------------------------------------------------- - -def make_comment_from_text(encoded_text): - if type(encoded_text) is str: - t = encoded_text - else: - t = ff4struct.text.decode(encoded_text) - - t = t.replace('\n', ' ', 1).partition('\n')[0] - return '"{}"'.format(t) - - -_EVENT_SOURCE_MAPS = {} -_MAP_PLACEMENTS = {} -_MAP_MESSAGES = {} -_MAP_INFOS = {} -_EVENT_MESSAGES = {} - -def build_metadata(rom): - if _EVENT_SOURCE_MAPS: - return - - for map_id,data in enumerate(rom.map_infos): - map_info = ff4struct.map_info.decode(data) - _MAP_INFOS[map_id] = map_info - - event_calls = [] - triggers = ff4struct.trigger.decode_set(rom.map_trigger_sets[map_id]) - for t in triggers: - if t.type == ff4struct.trigger.EVENT: - event_call = ff4struct.event_call.decode(rom.event_calls[t.event_call]) - if event_call: - event_calls.append(event_call) - - npc_offset = 0x100 if (map_id >= 0x100 or map_info.underground_npcs) else 0 - - placements = ff4struct.npc_placement.decode_set(rom.placement_groups[map_info.placements + npc_offset]) - for p in placements: - npc = p.npc + npc_offset - event_call = ff4struct.event_call.decode(rom.npc_event_calls[npc]) - if event_call is not None: - event_calls.append(event_call) - _MAP_PLACEMENTS[map_id] = placements - - for event_call in event_calls: - for case in event_call.cases: - _EVENT_SOURCE_MAPS.setdefault(case.event, set()).add(map_id) - if event_call.parameters: - if case.event in _EVENT_MESSAGES: - _EVENT_MESSAGES[case.event] = None - else: - _EVENT_MESSAGES[case.event] = event_call.parameters - - _MAP_MESSAGES[map_id] = ff4struct.text.decode(rom.text_bank2[map_id]) - - for i in _EVENT_SOURCE_MAPS: - _EVENT_SOURCE_MAPS[i] = list(_EVENT_SOURCE_MAPS[i]) - - -def lookup_map(rom, event_id): - if event_id not in _EVENT_SOURCE_MAPS: - return None - - map_set = _EVENT_SOURCE_MAPS[event_id] - if len(map_set) == 1: - return map_set[0] - else: - return None - -def generate_placement_names(map_id, placements): - npc_offset = 0x100 if (map_id >= 0x100 or _MAP_INFOS[map_id].underground_npcs) else 0 - npcs = {} - for p in range(len(_MAP_PLACEMENTS[map_id])): - npc_name = consts.get_name(npc_offset + _MAP_PLACEMENTS[map_id][p].npc, 'npc') - npcs.setdefault(npc_name, []).append(p) - - result = {} - for npc_name in npcs: - placement_list = sorted(npcs[npc_name]) - if len(placement_list) == 1: - result[placement_list[0]] = npc_name - else: - for i,p in enumerate(placement_list): - result[p] = '{}_{}'.format(npc_name, chr(ord('A') + i)) - - return {x : result[x] for x in result if x in placements} - - -def generate_map_message_comments(map_id, messages): - if map_id not in _MAP_MESSAGES: - return {} - - map_messages = _MAP_MESSAGES[map_id] - if type(map_messages) is str: - map_messages = [map_messages] - - return {x: make_comment_from_text(map_messages[x]) for x in messages} - - -#----------------------------------------------------------------------------- - -def decomp(value_type, value): - try: - decomp_func = globals()['decomp_{}'.format(value_type)] - except KeyError: - return decomp_hex(value) - - return decomp_func(value) - -def decomp_hex(b): - return '${:02X}'.format(b) - -def decomp_decimal(b): - return str(b) - -def decomp_const(family): - def func(b): - identifier = consts.get_name(b, family) - if identifier: - return '#{}'.format(identifier) - else: - return decomp_hex(b) - - return func - -def decomp_status(b): - fields = [decomp_const('status')(i) for i in range(8) if (b & (1 << i))] - return ' '.join(fields) - -decomp_actor = decomp_const('actor') - -def decomp_hpmp(b): - return str(b * 10) - -decomp_item = decomp_const('item') -decomp_spell = decomp_const('spell') -decomp_spellset = decomp_const('spellset') - -def decomp_gp(b): - return str(b * 100) - -decomp_music = decomp_const('music') -decomp_formation = decomp_hex -decomp_shop = decomp_hex -decomp_message = decomp_hex -decomp_flag = decomp_const('flag') -decomp_npc = decomp_hex -decomp_sound = decomp_const('sound') -decomp_vfx = decomp_const('vfx') -decomp_map = decomp_const('map') - -def decomp_mapflags(flags): - (b, facing) = flags - - params = [] - - if facing != 0: - params.append('facing ' + decompile_common.value_text(facing, 'direction')) - - if b & 0x20: - params.append('no transition') - - vehicle = b & 0b00011111 - - if vehicle in event_common.VEHICLES: - params.append(event_common.VEHICLES[vehicle]) - - if (b & 0x40): - params.append('no launch') - - return ' '.join(params) - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_formations.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_formations.py deleted file mode 100644 index afdfd2bea453..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_formations.py +++ /dev/null @@ -1,76 +0,0 @@ -from . import ff4struct - -def decompile_formations(rom): - lines = [] - for formation_id,encoded_formation in enumerate(rom.formations): - formation = ff4struct.formation.decode(encoded_formation) - - lines.append('formation(${:02X})'.format(formation_id)) - lines.append('{') - - lines.append(' monsters {') - for i in range(3): - if formation.monster_types[i] == 0xFF: - for later_type in formation.monster_types[i+1:]: - if later_type != 0xFF: - lines.append(' none') - break - else: - monster_name = ff4struct.text.decode(rom.text_monster_names[formation.monster_types[i]]) - lines.append(' ${:02X} x {} // {}'.format( - formation.monster_types[i], - formation.monster_qtys[i], - monster_name - )) - lines.append(' }') - - if formation.calling: - lines.append(" calling") - - if formation.transforming: - lines.append(" transforming") - - lines.append(' arrangement ${:02X}'.format(formation.arrangement)) - - if formation.back_attack: - lines.append(" back attack") - - if formation.boss_death: - lines.append(" boss death") - - if True in formation.eggs: - eggs_str = ' '.join(['yes' if b else 'no' for b in formation.eggs]) - lines.append(" eggs ({})".format(eggs_str)) - - if formation.no_flee: - lines.append(" can't run") - if formation.no_gameover: - lines.append(" no gameover") - - if formation.music == ff4struct.formation.BOSS_MUSIC: - lines.append(" boss music") - elif formation.music == ff4struct.formation.FIEND_MUSIC: - lines.append(" fiend music") - elif formation.music == ff4struct.formation.CONTINUE_MUSIC: - lines.append(" continue music") - - if formation.character_battle: - lines.append(" character battle") - - if formation.auto_battle: - lines.append(" auto battle") - - if formation.floating_enemies: - lines.append(" floating enemies") - - if formation.transparent: - lines.append(" transparent") - - lines.append(" gfx bits {}".format(formation.gfx_bits)) - - lines.append(" cursor graph ${:02X}".format(formation.cursor_graph_index)) - - lines.append("}") - lines.append("") - - return '\n'.join(lines) diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_map_grids.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_map_grids.py deleted file mode 100644 index 888b09c53c98..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_map_grids.py +++ /dev/null @@ -1,28 +0,0 @@ -from . import decompile_common -from . import ff4struct - -def decompile_map_grids(rom): - lines = [] - - grid_maps = {} - for map_id,encoded_map_info in enumerate(rom.map_infos): - map_info = ff4struct.map_info.decode(encoded_map_info) - grid_maps.setdefault(map_info.grid | (map_id & 0x100), []).append(map_id) - - for i,byte_list in enumerate(rom.map_grids): - if not byte_list: - continue - - map_grid = ff4struct.map_grid.decode(byte_list) - using_map_names = [decompile_common.value_text(m, 'map') for m in grid_maps.setdefault(i, [])] - - lines.append('mapgrid(${:02X}) // {}'.format(i, ' '.join(using_map_names))) - lines.append('{') - lines.append(' // ' + ' '.join(['{:2}'.format(x) for x in range(32)])) - lines.append(' //' + '-' * (3 * 32)) - for y in range(32): - lines.append(' ' + ' '.join(['{:02X}'.format(map_grid[x][y]) for x in range(32)]) + ' //| {:2}'.format(y)) - lines.append('}') - lines.append('') - - return '\n'.join(lines) diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_map_infos.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_map_infos.py deleted file mode 100644 index a1be1bf23068..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_map_infos.py +++ /dev/null @@ -1,72 +0,0 @@ -from . import decompile_common -from . import ff4struct - - - -def decompile_map_infos(rom): - lines = [] - for map_id,byte_list in enumerate(rom.map_infos): - map_info = ff4struct.map_info.decode(byte_list) - lines.append('map({})'.format(decompile_common.value_text(map_id, 'map'))) - lines.append('{') - lines.append(' battle background ${:02X} {}'.format( - map_info.battle_background, - 'alternate' if map_info.battle_background_alt_palette else '' - )) - lines.append(' warp {}'.format('enabled' if map_info.can_warp else 'disabled')) - lines.append(' exit {}'.format('enabled' if map_info.can_exit else 'disabled')) - lines.append(' magnetic {}'.format('enabled' if map_info.magnetic else 'disabled')) - lines.append(' grid ${:02X}'.format(map_info.grid)) - if (map_info.grid | (map_id & 0x100)) != map_id: - lines.append(' // ! this map\'s grid ID does not match its map ID') - lines.append(' tileset ${:02X}'.format(map_info.tileset)) - lines.append(' placement group ${:02X}'.format(map_info.placements)) - lines.append(' border tile ${:02X}'.format(map_info.border_tile)) - lines.append(' palette ${:02X}'.format(map_info.palette)) - lines.append(' npc palettes ${:02X} ${:02X}'.format(map_info.npc_palette_0, map_info.npc_palette_1)) - lines.append(' music {}'.format(decompile_common.value_text(map_info.music, 'music'))) - - bg_grid = map_info.bg_grid - if bg_grid > 0: - bg_grid |= (map_id & 0x100) - - background_props = ['grid ${:02X}'.format(bg_grid)] - if map_info.bg_translucent: - background_props.append('translucent') - if map_info.bg_scroll_horizontal or map_info.bg_scroll_vertical: - background_props.append('scroll') - if map_info.bg_scroll_vertical and map_info.bg_scroll_horizontal: - background_props.append('both') - elif map_info.bg_scroll_vertical: - background_props.append('vertical') - elif map_info.bg_scroll_horizontal: - background_props.append('horizontal') - background_props.append('direction {}'.format(decompile_common.value_text(map_info.bg_direction, 'direction'))) - background_props.append('speed {}'.format(map_info.bg_speed)) - - lines.append(' background {}'.format(' '.join(background_props))) - - if map_info.underground_npcs: - lines.append(' underground npcs') - - if map_info.underground_map_grid: - lines.append(' underground map grid') - - name_comment = '' - if map_info.name < len(rom.text_map_names): - name_comment = ' // ' + ff4struct.text.decode(rom.text_map_names[map_info.name]) - lines.append(' name index ${:02X}{}'.format(map_info.name, name_comment)) - - # treasure index is autocalculated; too fragile to manually adjust - lines.append(' // treasure index {}'.format(map_info.treasure_index)) - - if map_info.bit75: - lines.append(' // bit75 true') - - if map_info.bits81to86: - lines.append(' // bits81to86 ${:02X}'.format(map_info.bits81to86)) - - lines.append('}') - lines.append('') - - return "\n".join(lines) diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_monsters.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_monsters.py deleted file mode 100644 index 99162e3a263a..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_monsters.py +++ /dev/null @@ -1,94 +0,0 @@ -from .decompile_common import value_text -from . import ff4struct - -def _get_elements_string(elements): - items = [] - for i in range(8): - if i in elements: - items.append(value_text(i, 'element')) - - return ' '.join(items) - -def _get_statuses_string(statuses): - items = [] - for i in range(16): - if i in statuses: - items.append(value_text(i, 'status')) - - return ' '.join(items) - -def _get_races_string(races): - items = [] - for i in range(8): - if i in races: - items.append(value_text(i, 'race')) - - return ' '.join(items) - -def decompile_monsters(rom): - lines = [] - for monster_id,encoded_monster in enumerate(rom.monsters): - monster = ff4struct.monster.decode(encoded_monster) - - monster_name = ff4struct.text.decode(rom.text_monster_names[monster_id]) - - lines.append('monster(${:02X}) // {}'.format(monster_id, monster_name)) - lines.append('{') - if monster.boss: - lines.append(" boss") - lines.append(" level {}".format(monster.level)) - lines.append(" hp {}".format(monster.hp)) - lines.append(" gp {}".format(rom.monster_gp[monster_id])) - lines.append(" xp {}".format(rom.monster_xp[monster_id])) - lines.append(" attack index ${:02X} // {}".format(monster.attack_index, rom.monster_stats[monster.attack_index])) - lines.append(" defense index ${:02X} // {}".format(monster.defense_index, rom.monster_stats[monster.defense_index])) - lines.append(" magic defense index ${:02X} // {}".format(monster.magic_defense_index, rom.monster_stats[monster.magic_defense_index])) - lines.append(" speed index ${:02X} // {}".format(monster.speed_index, rom.monster_speeds[monster.speed_index])) - lines.append(" drop index ${:02X}".format(monster.drop_index)) - lines.append(" drop rate ${:02X}".format(monster.drop_rate)) - lines.append(" attack sequence ${:02X}".format(monster.attack_sequence)) - - if monster.attack_elements: - lines.append(" attack element {}".format(_get_elements_string(monster.attack_elements))) - if monster.attack_statuses: - lines.append(" attack status {}".format(_get_statuses_string(monster.attack_statuses))) - if monster.resist_elements: - lines.append(" resist element {}".format(_get_elements_string(monster.resist_elements))) - if monster.resist_statuses: - lines.append(" resist status {}".format(_get_statuses_string(monster.resist_statuses))) - if monster.weak_elements: - lines.append(" weak element {}".format(_get_elements_string(monster.weak_elements))) - if monster.spell_power is not None: - lines.append(" spell power {}".format(monster.spell_power)) - if monster.races: - lines.append(" race {}".format(_get_races_string(monster.races))) - if monster.reaction_sequence is not None: - lines.append(" reaction sequence ${:02X}".format(monster.reaction_sequence)) - - gfx = ff4struct.monster_gfx.decode(rom.monster_gfx[monster_id]) - lines.append(" gfx {") - lines.append(" size ${:02X}".format(gfx.size)) - lines.append(" palette ${:02X}".format(gfx.palette)) - lines.append(" pointer ${:04X}".format(gfx.pointer)) - lines.append(" }") - lines.append("}") - lines.append('') - - return '\n'.join(lines) - -def decompile_monster_stats(rom): - lines = [] - - for i,encoded in enumerate(rom.monster_stats): - lines.append('monster_stat({}) {{ {:3}x {:3}% {:3} }}'.format( - value_text(i), encoded[0], encoded[1], encoded[2] - )) - - lines.append('') - for i,encoded in enumerate(rom.monster_speeds): - lines.append('monster_speed({}) {{ {:3} - {:3} }}'.format( - value_text(i), encoded[0], encoded[1] - )) - - return '\n'.join(lines) - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_npcs.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_npcs.py deleted file mode 100644 index 38df7bdab5fc..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_npcs.py +++ /dev/null @@ -1,91 +0,0 @@ -from . import ff4bin -from . import ff4struct -from . import decompile_common -from .decompile_event_calls import decompile_event_call - -def decompile_npcs(rom): - lines = [] - num_npcs = len(rom.npc_sprites) - - npc_maps = {} - for map_id,data in enumerate(rom.map_infos): - map_info = ff4struct.map_info.decode(data) - npc_offset = 0x100 if (map_id >= 0x100 or map_info.underground_npcs) else 0 - placements = ff4struct.npc_placement.decode_set(rom.placement_groups[map_info.placements + npc_offset]) - for p in placements: - npc = p.npc + npc_offset - npc_maps.setdefault(npc, []).append(map_id) - - cached_map_messages = {} - - for npc_id in range(num_npcs): - lines.append("npc({})".format(decompile_common.value_text(npc_id, 'npc'))) - lines.append("{") - sprite = rom.npc_sprites[npc_id] - lines.append(" sprite {}".format(decompile_common.value_text(sprite, 'sprite'))) - active = bool(rom.npc_active_flags[int(npc_id / 8)] & (1 << (npc_id % 8))) - lines.append(" default {}".format('active' if active else 'inactive')) - - map_messages = None - if npc_id in npc_maps and len(npc_maps[npc_id]) == 1: - map_id = npc_maps[npc_id][0] - if map_id not in cached_map_messages: - texts = ff4struct.text.decode(rom.text_bank2[map_id]) - if type(texts) is str: - texts = [texts] - texts = [t.replace('\n', ' ', 1).partition('\n')[0] for t in texts] - cached_map_messages[map_id] = texts - map_messages = cached_map_messages[map_id] - - event_call_text = decompile_event_call(rom.npc_event_calls[npc_id], map_messages=map_messages) - lines.extend([' ' + l for l in event_call_text.split('\n')]) - lines.append("}") - lines.append('') - - return "\n".join(lines) - - -def decompile_map_placements(rom): - lines = [] - - using_maps = {} - for map_id,encoded_map_info in enumerate(rom.map_infos): - map_info = ff4struct.map_info.decode(encoded_map_info) - group_number = map_info.placements - if map_id >= 0x100 or map_info.underground_npcs: - group_number += 0x100 - - using_maps.setdefault(group_number, []).append(map_id) - - for group_number,byte_list in enumerate(rom.placement_groups): - placements = ff4struct.npc_placement.decode_set(byte_list) - - if group_number in using_maps: - map_comment = ' //' + ', '.join([decompile_common.value_text(m, 'map') for m in using_maps[group_number]]) - else: - map_comment = '' - - for placement_id,placement in enumerate(placements): - npc_id = placement.npc | (group_number & 0x100) - - lines.append('placement({} {}) {}'.format(decompile_common.value_text(group_number), placement_id, map_comment)) - lines.append('{') - lines.append(' npc {}'.format(decompile_common.value_text(npc_id, 'npc'))) - lines.append(' position {} {}'.format(placement.x, placement.y)) - lines.append(' walking {}'.format('on' if placement.walks else 'off')) - lines.append(' {}'.format('intangible' if placement.intangible else 'tangible')) - lines.append(' face {}'.format(decompile_common.value_text(placement.facing, 'direction'))) - lines.append(' palette {}'.format(placement.palette)) - lines.append(' turning {}'.format('on' if placement.turns else 'off')) - lines.append(' marching {}'.format('on' if placement.marches else 'off')) - lines.append(' speed {}'.format(placement.speed)) - #lines.append(' // bit13 {}'.format(placement.bit13)) - #lines.append(' // bit14 {}'.format(placement.bit14)) - #lines.append(' // bit21 {}'.format(placement.bit21)) - #lines.append(' // bit22 {}'.format(placement.bit22)) - lines.append('}') - lines.append('') - - return '\n'.join(lines) - - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_shops.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_shops.py deleted file mode 100644 index cddda7099454..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_shops.py +++ /dev/null @@ -1,14 +0,0 @@ -from .decompile_common import value_text - -def decompile_shops(rom): - lines = [] - for shop_id,byte_list in enumerate(rom.shops): - lines.append('shop(${:02X})'.format(shop_id)) - lines.append('{') - for b in byte_list: - if b < 0xFF: - lines.append(' {}'.format(value_text(b, 'item'))) - lines.append('}') - lines.append('') - - return '\n'.join(lines) diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_spell_sets.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_spell_sets.py deleted file mode 100644 index dd728dc1b3af..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_spell_sets.py +++ /dev/null @@ -1,28 +0,0 @@ -from . import ff4struct -from . import decompile_common - -def decompile_spell_sets(rom): - lines = [] - num_spell_sets = len(rom.spell_sets) - - for i in range(num_spell_sets): - spell_set = ff4struct.spell_set.decode(rom.spell_sets[i], rom.learned_spells[i]) - lines.append('spellset({}) {{'.format(decompile_common.value_text(i, 'spellset'))) - lines.append(' initial {') - for s in spell_set.initial_spells: - lines.append(' {}'.format(decompile_common.value_text(s, 'spell'))) - lines.append(' }') - lines.append(' learned {') - for lv in sorted(spell_set.learned_spells): - s = spell_set.learned_spells[lv] - if type(s) in (list, tuple): - spell_list = s - for s in spell_list: - lines.append(' {} {}'.format(lv, decompile_common.value_text(s, 'spell'))) - else: - lines.append(' {} {}'.format(lv, decompile_common.value_text(s, 'spell'))) - lines.append(' }') - lines.append('}') - lines.append('') - - return '\n'.join(lines) diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_spells.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_spells.py deleted file mode 100644 index 35ad8e13364c..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_spells.py +++ /dev/null @@ -1,32 +0,0 @@ -from . import ff4struct -from .decompile_common import value_text - -def decompile_spells(rom): - NUM_PLAYER_SPELLS = len(rom.text_spell_names) - lines = [] - for spell_id,spell in enumerate(rom.spells): - if spell_id == 0: - continue - - lines.append(f'spell(${spell_id:02X}) // {value_text(spell_id, "spell")}') - lines.append('{') - - sp = ff4struct.spell.decode(spell) - lines.append(f' casting time {sp.casting_time}') - lines.append(f' target ${sp.target:02X}') - - signed_param = (sp.param if sp.param < 128 else sp.param - 256) - lines.append(f' param ${sp.param:02X} // {sp.param}' + (f' / {signed_param}' if signed_param < 0 else '')) - lines.append(f' hit {sp.hit}') - lines.append(f' boss {sp.boss}') - lines.append(f' effect ${sp.effect:02X}') - lines.append(f' damage {sp.damage}') - lines.append(f' element ${sp.element:02X}') - lines.append(f' impact {sp.impact}') - lines.append(f' mp cost {sp.mp_cost}') - lines.append(f' ignore wall {sp.ignore_wall}') - lines.append('}') - lines.append('') - - return '\n'.join(lines) - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_text.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_text.py deleted file mode 100644 index 65bd2b54bdd6..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_text.py +++ /dev/null @@ -1,79 +0,0 @@ -from. import ff4struct -from. import consts -from .decompile_common import value_text - -def decompile_text(rom): - lines = [] - - for i,encoded_text in enumerate(rom.text_bank1): - lines.append("--- bank 1 message ${:02X} ---".format(i)) - lines.append(ff4struct.text.decode(encoded_text, consts)) - lines.append('') - - for i,encoded_texts in enumerate(rom.text_bank2): - map_name = consts.get_name(i, 'map') - map_name = '#{}'.format(map_name) if map_name else '${:02X}'.format(i) - texts = ff4struct.text.decode(encoded_texts, consts) - if type(texts) is str: - texts = [texts] - - for j,t in enumerate(texts): - lines.append("--- map {} message ${:02X} ---".format(map_name, j)) - lines.append(t) - lines.append('') - - for i,encoded_text in enumerate(rom.text_bank3): - lines.append("--- bank 3 message ${:02X} ---".format(i)) - lines.append(ff4struct.text.decode(encoded_text, consts)) - lines.append('') - - for i,encoded_text in enumerate(rom.text_battle): - lines.append("--- battle message ${:02X} ---".format(i)) - lines.append(ff4struct.text.decode(encoded_text, consts)) - lines.append('') - - for i,encoded_text in enumerate(rom.text_alerts): - lines.append("--- alert message ${:02X} ---".format(i)) - lines.append(ff4struct.text.decode(encoded_text, consts)) - lines.append('') - - for i,encoded_text in enumerate(rom.text_status): - lines.append("--- status ${:02X} ---".format(i)) - lines.append(ff4struct.text.decode(encoded_text, consts)) - lines.append('') - - for i,encoded_text in enumerate(rom.text_monster_names): - lines.append("--- monster name ${:02X} ---".format(i)) - lines.append(ff4struct.text.decode(encoded_text, consts)) - lines.append('') - - for i,encoded_text in enumerate(rom.text_command_names): - lines.append("--- command name ${:02X} ---".format(i)) - lines.append(ff4struct.text.decode(encoded_text, consts)) - lines.append('') - - for i,encoded_text in enumerate(rom.text_map_names): - lines.append("--- map name ${:02X} ---".format(i)) - lines.append(ff4struct.text.decode(encoded_text, consts)) - lines.append('') - - for i,encoded_text in enumerate(rom.text_item_names): - lines.append("--- item name {} ---".format(value_text(i, 'item'))) - lines.append(ff4struct.text.decode(encoded_text, consts)) - lines.append('') - - for i,encoded_text in enumerate(rom.text_spell_names): - lines.append("--- spell name {} ---".format(value_text(i, 'spell'))) - lines.append(ff4struct.text.decode(encoded_text, consts)) - lines.append('') - - for i,encoded_text in enumerate(rom.text_enemy_spell_names): - lines.append("--- spell name {} ---".format(value_text(i + len(rom.text_spell_names), 'spell'))) - lines.append(ff4struct.text.decode(encoded_text, consts)) - lines.append('') - - lines.append("--- credits ---") - lines.append(ff4struct.text.decode(rom.text_credits)) - lines.append('') - - return '\n'.join(lines) diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_tilesets.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_tilesets.py deleted file mode 100644 index f8569ce35b5f..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_tilesets.py +++ /dev/null @@ -1,55 +0,0 @@ -from . import ff4struct - -TILESET_DESCRIPTIONS = [ - "Airship", - "Lunar Core", - "Sealed Cave", - "Castle Exterior", - "Town", - "House", - "Castle Interior", - "Crystal Room", - "Lunar Whale", - "Feymarch", - "Tower", - "Giant", - "Lunar Subterrane", - "Mountain", - "Cave", - "Ship", -] - -TILE_PROPERTIES = { - 'layer1' : 'is layer 1', - 'layer2' : 'is layer 2', - 'bridge_layer' : 'is bridge layer', - 'save_point' : 'is save point', - 'closed_door' : 'is closed door', - 'walk_behind' : 'is walk behind', - 'bottom_half' : 'is bottom half', - 'warp' : 'is warp', - 'talkover' : 'is talkover', - 'encounters' : 'has encounters', - 'trigger' : 'is trigger' -} - -def decompile_tilesets(rom): - lines = [] - for i,byte_list in enumerate(rom.tilesets): - lines.append('tileset(${:02X}) // {}'.format(i, TILESET_DESCRIPTIONS[i])) - lines.append('{') - tileset = ff4struct.tileset.decode(byte_list) - for tile_id,tile in enumerate(tileset): - relevant_properties = [] - for p in TILE_PROPERTIES: - if getattr(tile, p): - relevant_properties.append(TILE_PROPERTIES[p]) - - lines.append(' ${:02X} {{ {} }}'.format( - tile_id, - ', '.join(relevant_properties) - )) - lines.append('}') - lines.append('') - - return '\n'.join(lines) diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_triggers.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_triggers.py deleted file mode 100644 index 89cdba9a8e0d..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/decompile_triggers.py +++ /dev/null @@ -1,57 +0,0 @@ -from . import ff4struct -from . import decompile_common - -def _decompile_trigger(trigger, map_index, trigger_index): - lines = [] - - lines.append("trigger({} {})".format(decompile_common.value_text(map_index, 'map'), trigger_index)) - - lines.append("{") - lines.append(" position {} {}".format(trigger.x, trigger.y)) - if trigger.type == ff4struct.trigger.EVENT: - lines.append(" event call {}".format(decompile_common.value_text(trigger.event_call))) - elif trigger.type == ff4struct.trigger.TREASURE: - if trigger.item is not None: - contents = decompile_common.value_text(trigger.item, 'item') - else: - contents = '{} gp'.format(trigger.gp) - - if trigger.is_miab: - fight = ' fight ${:02X}'.format(trigger.formation) - else: - fight = '' - lines.append(" treasure {}{}".format(contents, fight)) - elif trigger.type == ff4struct.trigger.TELEPORT: - target_map = trigger.map - if map_index >= 0x80 and map_index != 251: - # Underworld - if target_map < 0x80: - target_map += 0x100 - - facing = '' - if trigger.target_facing is not None: - facing = 'facing {}'.format(decompile_common.value_text(trigger.target_facing, 'direction')) - - lines.append(" teleport {} at {} {} {}".format( - decompile_common.value_text(target_map, 'map'), - trigger.target_x, trigger.target_y, - facing - )) - - lines.append("}") - return "\n".join(lines) - -def decompile_triggers(rom): - results = [] - - for map_index,encoded_trigger_set in enumerate(rom.map_trigger_sets): - if map_index in (251, 252, 253): - # these are world map sets - encoded_trigger_set = rom.world_trigger_sets[map_index - 251] - - triggers = ff4struct.trigger.decode_set(encoded_trigger_set) - for trigger_index,trigger in enumerate(triggers): - results.append(_decompile_trigger(trigger, map_index, trigger_index)) - results.append("") - - return "\n".join(results) diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/default.consts b/worlds/ff4fe/FreeEnterpriseForAP/f4c/default.consts deleted file mode 100644 index 22727023c786..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/default.consts +++ /dev/null @@ -1,1889 +0,0 @@ -consts(actor) { - $01 DKCecil - $02 Kain1 - $03 CRydia - $04 Tellah1 - $05 Edward - $06 Rosa1 - $07 Yang1 - $08 Palom - $09 Porom - $0A Tellah2 - $0B PCecil - $0C Tellah3 - $0D Yang2 - $0E Cid - $0F Kain2 - $10 Rosa2 - $11 ARydia - $12 Edge - $13 Fusoya - $14 Kain3 - $15 Golbez - $16 Anna -} - -consts(job) { - $00 DKCecil - $01 Kain - $02 CRydia - $03 Tellah - $04 Edward - $05 Rosa - $06 Yang - $07 Palom - $08 Porom - $09 PCecil - $0A Cid - $0B ARydia - $0C Edge - $0D Fusoya - $0E Golbez -} - -consts(music) { - $00 None - $01 Prologue - $02 LongWayToGo - $03 DummyFanfare - $04 YellowChocobo - $05 BlackChocobo - $06 Underworld - $07 Zeromus - $08 Victory - $09 Town - $0A Rydia - $0B DecisiveBattle - $0C Mountain - $0D Overworld - $0E BigWhale - $0F Sad - $10 Sleep - $11 Golbez - $12 Edward - $13 Rosa - $14 Baron - $15 Prelude - $16 Suspicions - $17 Babil - $18 Airship - $19 Zot - $1A BossBattle - $1B Giant - $1C Feymarch - $1D Destruction - $1E LunarPath - $1F Surprise - $20 Dwarf - $21 PalomPorom - $22 Calbrena - $23 Hurry - $24 Cid - $25 Cave - $26 Dancing - $27 Battle - $28 Eblan - $29 CharacterJoined - $2A CharacterDied - $2B ChocoboForest - $2C Opening - $2D Sad2 - $2E Fabul - $2F LongFanfare - $30 FatChocobo - $31 Moon - $32 Toroia - $33 Mysidia - $34 LunarSubterrane - $35 Ending1 - $36 Ending2 - $37 Ending3 -} - -consts(sound) { - $00 None - $01 Electric - $02 Mist - $03 Odin - $04 Meteor - $05 Explosion - $06 Thud - $07 Pierce - $08 Punch - $09 BlackMagic - $0A Grind - $0B Staff - $0C Exit - $0D Fire - $0E Thunder - $0F Petrify - $10 EnemyDefeat - $11 Ice2 - $12 Magnes - $13 Weak - $14 FormChange - $15 Mute - $16 Throw - $17 WhiteMagic - $18 Projectile - $19 Shiva - $1A Cure - $1B Airship - $1C Stop - $1D Ice - $1E Wall - $1F Float - $20 Quake - $21 Flee - $22 HealingPot - $23 Crush - $24 Crystal - $25 Hop - $26 Bahamut - $27 Life - $28 JumpLand - $29 Flood - $2A Hit - $2B CashRegister - $2C Thud2 - $2D Globe199 - $2E Fire3 - $2F Beach - $30 Unlock - $31 HeavyDoor - $32 Propellers - $33 BigBang - $34 CallMagic - $35 Gaze - $36 Laser - $37 GetItem - $38 Falling - $39 Fixing - $3A Harp - $3B Death - $3C Alert - $3D Jump - $3E Virus - $3F BattleStart - $40 Curse - $41 Fast - $42 Glow - $43 Peep - $44 Splash - $45 Door - $46 Clock - $47 Whirlpool - $48 Crunch - $49 Hummingway - $4A BlackHole - $4B Whirl - $4C Whistle - $4D Comet - $4E Absorb - $50 Sylph - $51 Burning - $52 Slap - $53 Entice - $54 Earthquake - $55 Pow - $56 Smoke - $57 Magnetize - $5A Hatch - $5B Needles - $5F Pyro - $61 Paralyze - $63 Zantetsuken - $66 Heal - $67 Venom - $68 Drain - $69 Ice3 - $6A Flare - $6B Warp - $6C White - $6D Dispel - $6E Blink - $6F Sleep - $70 Berserk - $71 Charm - $72 Slow -} - -consts(flag) { - $00 OpeningComplete - $01 TalkedToKainInBaron - $02 RosaSick - $03 CidGreetedCecil - $04 PrologueComplete - $05 CecilFoughtYang - $06 TwinsStone - $07 RydiaRejoined - $08 LearnedOdin - $09 DefeatedBaigan - $0A ReinforcedEnterprise - $0B CecilBecamePaladin - $0C KainRegainedSenses - $0D DefeatedMistDragon - $0E MistComplete - $0F ChildRydiaJoined - $10 UnknownTellahLeaves - $11 RosaHealed - $12 DefeatedAntlion - $13 DefeatedOctomamm - $14 DamcyanBombed - $15 RydiaLearnedFire - $16 DefeatedMomBomb - $17 CampedInWateryPass - $18 LeviathanAttacked - $19 YangDestroyedCannon - $1A FoundSleepingYang - $1B FabulBattle - $1C TwinsJoined - $1D PalomExtinguishedFire - $1E TellahRejoined - $1F DefeatedMilon1 - $20 DefeatedMilon2 - $21 PaladinCecilSpokeToElder - $22 RosaGreetedCecil - $23 GotEarthCrystal - $24 GotTwinharp - $25 EdwardAfterDarkElf - $26 EnteredZot - $27 TellahFightsEdward - $28 FindingOdinBeforeFeymarch - $2A FoughtMagusSisters - $2B FoughtValvalis - $2C SpokeAboutRescuingRosa - $2D EnteredMagnes - $2E DarkElfBattle1 - $2F ZotEndAndDrill - $30 UnderworldOpen - $31 SpokeToToroiaClericsAfterCrystal - $32 FirstVisitToUnderworld - $33 LugaeBattle - $34 EdgeJoined - $36 Hook - $37 RubicantBattle - $38 FoundFalcon - $39 ReinforcedFalcon - $3A SpokeToGiottAfterBabil - $3B KainStoleDarkCrystal2 - $3C OpenedSealedCave - $3D Drill - $3E TanksOutsideBabil - $3F EnteredFeymarch - $40 GotDarkCrystal - $41 DemonWallBattle - $42 KainStoleDarkCrystal - $43 BigWhaleSurfaced - $44 AsuraBattle - $45 LeviatanBattle - $46 BahamutBattle - $47 FusoyaJoined - $48 CPUBattle - $49 FourFiendsBattle - $4A GotAdamant - $4B GotExcalibur - $4C GotExcalibur2 - $4D GotPan - $4E UsedPan - $4F GotSpoon - $50 BehemothBattle50 - $51 BehemothBattle51 - $52 BehemothBattle52 - $53 FoughtEdgeParents - $54 RydiaLearnedSylph - $55 GotRibbons - $57 ExitedWateryPass - $E1 DarkElfBattle2 -} - -consts(map) { - $00 BaronTown - $01 Mist - $02 Kaipo - $03 Mysidia - $04 Silvera - $05 ToroiaTown - $06 Agart - $07 ToroiaInn - $08 ToroiaWeapon - $09 ToroiaArmor - $0A ToroiaItem - $0B BaronInn - $0C BaronEquipment - $0D CidHouse - $0E RosaHouse - $0F RydiaHouse - $10 KaipoInn - $11 KaipoCafe - $12 KaipoHospital - $13 MysidiaCafe - $14 MysidiaInn - $15 MountOrdealsMirrorRoom - $16 HouseOfWishes - $17 RoomOfWishes - $18 ToroiaCafe - $19 ToroiaCafeUpstairs - $1A ToroiaDancers - $1C ToroiaStable - $1D ToroiaStableDownstairs - $1E AstroTower - $1F AstroTowerObservatory - $20 AgartInn - $21 BlackChocoboForest - $22 TownWaterBG - $23 CastleFloorBG - $24 BaronCastle - $25 Damcyan - $26 Fabul - $27 ToroiaCastle - $28 Eblan - $29 DesertBG - $2A BaronCastleLobby - $2B BaronCastleOuterCourt - $2C BaronCastleThroneRoom - $2D BaronCastleWestHall - $2E BaronCastleEastHall - $2F BaronCastlePrisonEntrance - $30 BaronCastlePrison - $31 BaronCastleSoldiersQuarters - $32 BaronCastleWestTower1F - $33 BaronCastleWestTower2F - $34 BaronCastleCecilRoom - $35 BaronCastleEastTower1F - $36 BaronCastleEastTower2F - $37 BaronCastleEastTower3F - $38 BaronCastleEastTowerB1 - $39 BaronCastleOdinRoom - $3A SewerEntrance - $3B SewerB3 - $3C SewerB1 - $3D SewerSaveRoom - $3E SewerB2 - $3F Damcyan1F - $40 Damcyan2F - $41 Damcyan3F - $42 DamcyanTreasuryEntrance - $43 DamcyanTreasuryDownstairs - $44 RoomToSewer - $45 AgartWeapon - $46 AgartArmor - $47 FabulLobby - $48 FabulSecondFloor - $49 FabulThroneRoom - $4A FabulCrystalRoom - $4B FabulEquipment - $4C FabulInn - $4D FabulEastTower1F - $4E FabulEastTower2F - $4F FabulKingRoom - $50 FabulWestTower1F - $51 FabulHospital - $52 FabulYangRoom - $53 FieldOutsideMist - $54 WateryPassSaveRoom - $55 ToroiaCastleLobby - $56 ToroiaCastleClericRoom - $57 ToroiaCastleCrystalRoom - $58 ToroiaCastleHospital - $59 ToroiaCastleStairs - $5A ToroiaCastleHall - $5B ToroiaCastlePotRoom - $5C ToroiaCastleChestRoom - $5D ToroiaCastleTreasury - $5E Eblan1F - $5F Eblan2F - $60 EblanThroneRoom - $61 EblanWestTower1F - $62 EblanWestTower2F - $63 EblanEastTower1F - $64 EblanEastTower2F - $65 EblanBasement - $66 BaronCastleBlackMagicSchool - $67 BaronCastleWhiteMagicSchool - $68 DesertBG2 - $69 TrainingRoom - $6A Waterfall - $6B CastleWaterBG - $6C MistCave - $6D MirrorRoomBG - $6E WateryPassBG - $6F WateryPass1F - $70 WateryPass2F - $71 WateryPass3F - $72 WateryPass4F - $73 WateryPass5F - $74 WaterfallEntrance - $75 Waterfall1F - $76 Waterfall2F - $77 AntlionCave1F - $78 AntlionCave2F - $79 AntlionCaveNest - $7A AntlionCaveSaveRoom - $7B AntlionCaveTreasureRoom - $7C BlackBG - $7D MistBG - $7E MountHobsWest - $7F MountHobsSummit - $80 MountHobsEast - $81 MountHobsSave - $82 MountainBG - $83 WateryPassWaterfallRoom - $84 MountOrdeals1F - $85 MountOrdeals2F - $86 MountOrdeals3F - $87 MountOrdealsSummit - $88 MysidiaCrystalRoom - $89 MysidiaSerpentRoad - $8A BaronCastleAntechamber - $8B AgartWell - $8C CaveMagnes1F - $8D CaveMagnes2F - $8E CaveMagnesPitTreasureRoom - $8F CaveMagnes3F - $90 CaveMagnesTorchTreasureRoom - $91 CaveMagnes4F - $92 CaveMagnesSaveRoom - $93 CaveMagnes5F - $94 CaveMagnesCrystalRoom - $95 CaveMagnesBG - $96 WateryPassCamp - $97 BaronSerpentRoad - $98 Zot1F - $99 Zot2F - $9A Zot3F - $9B Black - $9C Zot4F - $9D Zot5F - $9E Zot6F - $9F ZotCommandCenter - $A0 AdamantGrotto - $A1 CaveMagnesSaveRoom2 - $A2 ZotSaveRoom - $A3 CidAirship - $A4 TwinsAirship - $A5 EdwardAirship - $A6 BabilSaveRoom2 - $A7 BabilB1 - $A8 BabilB2 - $A9 BabilB3 - $AA BabilB4 - $AB BabilCrystalRoom - $AC BabilB5 - $AD FallingBG - $AE UndergroundTunnelBG - $AF CrystalRoomBG - $B0 EndingCecilRoom - $B1 TrainingRoomMain - $B2 TrainingRoomUpstairs - $B5 GiantMouth - $B6 GiantNeck - $B7 GiantChest - $B9 GiantStomach - $BA GiantPassage - $BC GiantLung - $BD GiantCPU - $BE GiantBG - $C0 SoldierAirship - $C1 FabulPort - $C2 Boat - $C3 DockedAirship - $C4 JoinedAirships - $C5 EmptyAirship - $C6 UnderworldAirship - $C7 CaveEblanEntrance - $C8 CaveEblanSettlement - $C9 CaveEblanPass - $CA CaveEblanExit - $CB CaveEblanInn - $CC CaveEblanEquipment - $CD CaveEblanSaveRoom - $CE CaveEblanHospital - $CF FabulChocoboForest - $D0 EmptyAirship2 - $D1 MountOrdealsChocoboForest - $D2 BaronChocoboForest - $D3 TroiaChocoboForest - $D4 IslandChocoboForest - $D5 BaronEmptyThroneRoom - $D6 EmptyAirshipBlack - $D7 EmptyAirship3 - $D8 EmptyAirshipUnderground - $D9 EmptyAirshipBlack2 - $DA TowerOfWishesFinalBattle - $DB AirshipBG - $DC LargeDock - $DF SmallDock - $E0 MistInn - $E1 MistWeapon - $E2 MistArmor - $E3 KaipoWeapon - $E4 KaipoArmor - $E5 MysidiaWeapon - $E6 MysidiaArmor - $E7 MysidiaItem - $E8 SilveraInn - $E9 SilveraWeapons - $EA SilveraArmor - $EB SilveraItems - $EC BaronTownItems - $ED EndingTowerOfWishes - $EE EndingPalom - $EF EndingEblan - $F0 EndingLeviathan - $F1 EndingDamcyan - $F2 EndingDwarfCastle - $F3 EndingMountOrdeals - $F4 EndingAstroTower - $F5 EndingCecilRoom2 - $F6 EndingBaronThroneRoom - $F7 EndingFabulThroneRoom - $FB Overworld - $FC Underworld - $FD Moon - $FE CurrentMap - $100 SmithyHouse - $101 Tomra - $102 SmithyHouseMainFloor - $103 SmithyRoom - $104 TomraInn - $105 TomraEquipment - $106 TomraTreasury - $107 DwarfCastle - $108 DwarfCastleLobby - $109 DwarfCastleThroneRoom - $10A DwarfCastleFatChocobo - $10B DwarfCastleTunnel - $10C CrystalRoomBG2 - $10D DwarfCastleCrystalRoom - $10E DwarfCastleEquipment - $10F DwarfCastleBasement - $110 DwarfCastleEastTower1F - $111 DwarfCastleInn - $112 DwarfCastleHospital - $113 DwarfCastleWestTower1F - $114 BabilSaveRoom - $115 BabilIcebrandRoom - $116 BabilBlizzardRoom - $117 BabilIceShieldRoom - $118 BabilIceMailRoom - $119 DwarfCastleEastTower3F - $11A DwarfCastleWestTower3F - $11B DwarfCastleTower2F - $11C DwarfCastleBG - $11D BabilFloorLugae - $11E BabilFloorIceMail - $11F BabilFloorAirship - $120 DwarfCastleCafe - $121 Babil1F - $122 Babil2F - $123 Babil3F - $124 Babil4F - $125 Babil5F - $126 BabilFloorAirship2 - $127 BabilFloorIceMail2 - $128 BabilFloorLugae2 - $12C BigWhale - $12D BabilCannon - $12E DwarfTank - $12F BigWhale2 - $130 BabilBG - $131 EndingPalom2 - $132 TomraItem - $133 TowerOfWishesAfterGiant - $134 CastleFloorBG2 - $135 TownWaterBG2 - $136 CaveOfSummons1F - $137 CaveOfSummons2F - $138 CaveOfSummons3F - $139 SylvanCaveBG - $13A Feymarch1F - $13B FeymarchTreasury - $13C Feymarch2F - $13D FeymarchSaveRoom - $13E FeymarchLibrary1F - $13F FeymarchLibrary2F - $140 FeymarchLeviathanRoom - $141 FeymarchWeapon - $142 FeymarchArmor - $143 FeymarchInn - $144 SealedCaveEntrance - $145 SylvanCave1F - $146 SylvanCave2F - $147 SylvanCave3F - $148 SylvanCaveTreasury - $149 SylvanCaveYangRoom - $14A SealedCave1F - $14B SealedCaveRoomKatanaEther - $14C SealedCave2F - $14D SealedCave3F - $14E SealedCaveRoomKatanaNinjaHat - $14F SealedCaveRoomNinjaStarElixir - $150 SealedCaveRoomLightSword - $151 SealedCave4F - $152 SealedCave5F - $153 SealedCave6F - $154 SealedCaveRoomBoxes - $155 SealedCave7F - $156 SealedCaveSaveRoom - $157 SealedCaveDemonWallRoom - $158 SealedCaveEmptyRoom - $159 SealedCaveCrystalRoom - $15A Bahamut1F - $15B Bahamut2F - $15C BahamutFloor - $160 LunarPalaceLobby - $161 LunarPalaceCrystalRoom - $162 LunarBG - $163 LunarPassage1 - $164 LunarPassage2 - $165 Hummingway - $166 LunarLairBG - $167 LunarSubterran1F - $168 LunarSubterran2F - $169 LunarSubterran3F - $16A LunarSubterran4F - $16B LunarSubterran5F - $16C LunarSubterran6F - $16D LunarSubterran7F - $16E LunarCore1F - $16F LunarCore2F - $170 LunarCore3F - $171 LunarCore4F - $172 LunarCoreZemusRoom - $173 LunarSubterranRoomElixir - $174 LunarSubterranTunnelCure3 - $175 LunarSubterranTunnelProtectRing - $176 LunarSubterranTunnelWhiteRobe - $177 LunarSubterranPinkpuff - $178 LunarSubterranSaveRoom1 - $179 LunarSubterranTunnelMinerva - $17A LunarSubterranRoomHolyLance - $17B LunarSubterranSaveRoom2 - $17C LunarSubterranRoomRibbons - $17D LunarCoreBG -} - -consts(vfx) { - $00 RedWingsFlying - $01 RedWingsLanding - $02 PackageOpen - $03 DamcyanBombing - $04 RydiaBattle - $05 Shelter - $06 HealingPot - $07 MistGathering - $08 PackageBombs - $09 Prologue - $0A Warp - $0B Exit - $0C Bombardment - $0D ShipAtFabul - $0E AgartMountainExplosion - $0F Leviathan1 - $10 Leviathan2 - $11 LeviathanSwallow - $12 KainRedWingsArrive - $13 KainRedWingsLeave - $14 FlyToZot - $15 LandEnterprise - $16 EnterpriseTanksRedWings - $17 EnterpriseEvadingRedWings - $18 EnterpriseEvadingRedWings2 - $19 BaronCastleAntechamberWall1 - $1A BaronCastleAntechamberWall2 - $1B BaronCastleAntechamberWall3 - $1C PutEnterpriseAtBaron - $1D PopWarpStack - $1E ScreenShakeAndExplosion - $1F BigWhaleRising - $20 MoonTravelToGiant - $21 GiantAttack - $22 TanksAttackGiant - $23 AirshipsAttackGiant - $24 AttackGiant - $25 AirshipFliesToBigWhale - $26 AirshipFliesToGiant - $27 AirshipFliesToGiantAndFade - $28 PutEnterpriseAtDwarfCastle - $29 HideAllPlacements - $2A BigWhaleShining - $2B ReturnToZot - $2C BigWhaleLanding - $2D FlyBigWhale - $2E RemoveEnterprise - $2F MoonTravel - $30 ReturnToMoonAfterGiant - $31 GiantCPUExplosion - $32 MysidianLegend - // $33 All placements disappear and music gets quieter - $34 Telescope - $35 BigWhaleReturnsToSea - $36 DwarfAttackTowerExplosions - $37 MysidianLegendSpace - $38 MoonFliesAway - $39 EndCredits - $3A ExplosionsCenterScreen - $3B EnterpriseCatchBabilFall - $3C ResetGame - $3D LowerMusicVolume - $3E FadeUpMusicVolume - $3F KillAllButPaladinCecil - $40 GiantExplode -} - -consts(item) { - $00 NoWeapon - $01 FireClaw - $02 IceClaw - $03 ThunderClaw - $04 CharmClaw - $05 PoisonClaw - $06 CatClaw - $07 Rod - $08 IceRod - $09 FlameRod - $0A ThunderRod - $0B Change - $0C CharmRod - $0D StardustRod - $0E Lilith - $0F Staff - $10 Cure - $11 SilverStaff - $12 PowerStaff - $13 Lunar - $14 LifeStaff - $15 Silence - $16 ShadowSword - $17 DarknessSword - $18 BlackSword - $19 Legend - $1A Light - $1B Excalibur - $1C FireBrand - $1D IceBrand - $1E Defense - $1F DrainSword - $20 Ancient - $21 Sleep - $22 MedusaSword - $23 Spear - $24 Wind - $25 FlameSpear - $26 BlizzardSpear - $27 DragoonSpear - $28 WhiteSpear - $29 DrainSpear - $2A Gungnir - $2B Short - $2C Middle - $2D Long - $2E NinjaSword - $2F Murasame - $30 Masamune - $31 Assassin - $32 MuteDagger - $33 Whip - $34 Chain - $35 Blitz - $36 FlameWhip - $37 DragonWhip - $38 HandAxe - $39 Dwarf - $3A Ogre - $3B SilverDagger - $3C Dancing - $3D SilverSword - $3E Spoon - $3F CrystalSword - $40 Shuriken - $41 NinjaStar - $42 Boomrang - $43 FullMoon - $44 Dreamer - $45 CharmHarp - // $46 Dummy - $47 PoisonAxe - $48 RuneAxe - $49 SilverHammer - $4A EarthHammer - $4B Wooden - $4C Avenger - $4D ShortBow - $4E CrossBow - $4F GreatBow - $50 Archer - $51 ElvenBow - $52 SamuraiBow - $53 ArtemisBow - $54 IronArrow - $55 WhiteArrow - $56 FireArrow - $57 IceArrow - $58 LitArrow - $59 DarknessArrow - $5A PoisonArrow - $5B MuteArrow - $5C CharmArrow - $5D SamuraiArrow - $5E MedusaArrow - $5F ArtemisArrow - $60 NoArmor - $61 IronShield - $62 ShadowShield - $63 BlackShield - $64 PaladinShield - $65 SilverShield - $66 FireShield - $67 IceShield - $68 DiamondShield - $69 Aegis - $6A SamuraiShield - $6B DragoonShield - $6C CrystalShield - $6D IronHelm - $6E ShadowHelm - $6F DarknessHelm - $70 BlackHelm - $71 PaladinHelm - $72 SilverHelm - $73 DiamondHelm - $74 SamuraiHelm - $75 DragoonHelm - $76 CrystalHelm - $77 Cap - $78 LeatherHat - $79 GaeaHat - $7A WizardHat - $7B Tiara - $7C Ribbon - $7D Headband - $7E Bandanna - $7F NinjaHelm - $80 Glass - $81 IronArmor - $82 ShadowArmor - $83 DarknessArmor - $84 BlackArmor - $85 PaladinArmor - $86 SilverArmor - $87 FireArmor - $88 IceArmor - $89 DiamondArmor - $8A Genji - $8B DragonArmor - $8C CrystalArmor - $8D Cloth - $8E LeatherArmor - $8F GaeaArmor - $90 WizardArmor - $91 BlackRobe - $92 Sorcerer - $93 WhiteRobe - $94 PowerRobe - $95 Heroine - $96 Prisoner - $97 Bard - $98 Karate - $99 BlBelt - $9A AdamantArmor - $9B NinjaArmor - $9C IronGauntlet - $9D ShadowGauntlet - $9E DarknessGauntlet - $9F BlackGauntlet - $A0 PaladinGauntlet - $A1 SilverGauntlet - $A2 DiamondGauntlet - $A3 Zeus - $A4 SamuraiGauntlet - $A5 DragoonGauntlet - $A6 CrystalGauntlet - $A7 IronRing - $A8 RubyRing - $A9 SilverRing - $AA Strength - $AB Rune - $AC CrystalRing - $AD DiamondRing - $AE Protect - $AF Cursed - $B0 Bomb - $B1 BigBomb - $B2 Notus - $B3 Boreas - $B4 ThorRage - $B5 ZeusRage - $B6 Stardust - $B7 Succubus - $B8 Vampire - $B9 Bacchus - $BA Hermes - $BB HrGlass1 - $BC HrGlass2 - $BD HrGlass3 - $BE SilkWeb - $BF Illusion - $C0 FireBomb - $C1 Blizzard - $C2 LitBolt - $C3 StarVeil - $C4 Kamikaze - $C5 MoonVeil - $C6 MuteBell - $C7 GaiaDrum - $C8 Crystal - $C9 Coffin - $CA Grimoire - $CB Bestiary - $CC Alarm - $CD Unihorn - $CE Cure1 - $CF Cure2 - $D0 Cure3 - $D1 Ether1 - $D2 Ether2 - $D3 Elixir - $D4 Life - $D5 Soft - $D6 MaidKiss - $D7 Mallet - $D8 DietFood - $D9 EchoNote - $DA EyeDrops - $DB Antidote - $DC Cross - $DD Heal - $DE Siren - $DF AuApple - $E0 AgApple - $E1 SomaDrop - $E2 Tent - $E3 Cabin - $E4 Smut - $E5 Exit - $E6 DwarfBread - $E7 Imp - $E8 BombSummon - $E9 Cockatrice - $EA Mage - $EB Carrot - $EC Pass - $ED Whistle - $EE Package - $EF Baron - $F0 SandRuby - $F1 EarthCrystal - $F2 Magma - $F3 Luca - $F4 TwinHarp - $F5 DarkCrystal - $F6 Rat - $F7 Adamant - $F8 Pan - $F9 Pink - $FA Tower - $FB DkMatter - // $FC Dummy - // $FD Dummy - $FE Sort - $FF TrashCan -} - -consts(spell) { - $01 Hold - $02 Mute - $03 Charm - $04 Blink - $05 Armor - $06 Shell - $07 Slow - $08 Fast - $09 Bersk - $0A Wall - $0B White - $0C Dspel - $0D Peep - $0E Cure1 - $0F Cure2 - $10 Cure3 - $11 Cure4 - $12 Heal - $13 Life1 - $14 Life2 - $15 Size - $16 Exit - $17 Sight - $18 Float - $19 Toad - $1A Piggy - $1B Warp - $1C Venom - $1D Fire1 - $1E Fire2 - $1F Fire3 - $20 Ice1 - $21 Ice2 - $22 Ice3 - $23 Lit1 - $24 Lit2 - $25 Lit3 - $26 Virus - $27 Weak - $28 Quake - $29 Sleep - $2A Stone - $2B Fatal - $2C Stop - $2D Drain - $2E Psych - $2F Meteo - $30 Nuke - $31 Imp - $32 Bomb - $33 Cocka - $34 Mage - $35 Chocb - $36 Shiva - $37 Indra - $38 Jinn - $39 Titan - $3A Mist - $3B Sylph - $3C Odin - $3D Levia - $3E Asura - $3F Baham - $40 Comet - $41 Flare - $42 Flame - $43 Flood - $44 Blitz - $45 Smoke - $46 Pin - $47 Image - $4D ImpEffect - $4E BombEffect - $4F CockatriceEffect - $50 MageEffect - $51 ChocoboEffect - $52 ShivaEffect - $53 IndraEffect - $54 JinnEffect - $55 TitanEffect - $56 MistEffect - $57 SylphEffect - $58 OdinEffect - $59 LeviatanEffect - $5A AsuraEffect1 - $5B AsuraEffect2 - $5C AsuraEffect3 - $5D BahamutEffect - $5E WMeteo - $61 Enemy_Gaze - $62 Enemy_Bluster - $63 Enemy_Slap - $64 Enemy_Powder - $65 Enemy_Glance - $66 Enemy_Charm - $67 Enemy_Tongue - $68 Enemy_Curse - $69 Enemy_Ray - $6A Enemy_Count - $6B Enemy_Beak - $6C Enemy_Petrify - $6D Enemy_Blast - $6E Enemy_Hug - $6F Enemy_Breath - $70 Enemy_Whisper - $71 Enemy_Entangle - $72 Enemy_WeakEnemy - $73 Enemy_Disrupt - $74 Enemy_ColdMist - $75 Enemy_Explode - $76 Enemy_DullSong - $77 Enemy_HoldGas - $78 Enemy_Gas - $79 Enemy_Poison - $7A Enemy_Maser - $7B Enemy_Vanish - $7C Enemy_Demolish - $7D Enemy_BlkHole - $7E Enemy_Dancing - $7F Enemy_Disrupt2 - $80 Enemy_Storm - $81 Enemy_Magnet - $82 Enemy_Reaction - $83 Enemy_Hatch - $84 Enemy_Remedy - $85 Enemy_Absorb - $86 Enemy_Heal - $87 Enemy_BigBang - $88 Enemy_Vampire - $89 Enemy_Digest - $8A Enemy_Pollen - $8B Enemy_Crush - $8C Enemy_Alert - $8D Enemy_Call - $8E CommandEnemy - $8F Enemy_Vanish2 - $90 Enemy_Search - $91 Enemy_Fission - $92 Enemy_Retreat - $93 Enemy_Heal2 - $94 Enemy_Beam - $95 Enemy_Globe199 - $96 Enemy_Fire - $97 Enemy_Blaze - $98 Enemy_Blitz - $99 Enemy_Thunder - $9A Enemy_DBreath - $9B Enemy_BigWave - $9C Enemy_Blizzard - $9D Enemy_Wave - $9E Enemy_Tornado - $9F Enemy_Laser - $A0 Enemy_Explode2 - $A1 Enemy_Quake - $A2 Enemy_Emission - $A3 Enemy_HeatRay - $A4 Enemy_Glare - $A5 Enemy_Odin - $A6 Enemy_MegaNuke - $A7 Enemy_Needle - $A8 Enemy_Counter - $A9 InvincibleOn - $AA InvincibleOff - $AB Enemy_Recover - $AC Enemy_Remedy2 - $AD Transform3 - $AE EndBattle - - $B0 SummonMistDragon - $B1 ReturnRydia - $B3 GhostAnna - $B4 GhostEdwardTellah - $B5 GhostPalomPorom - $B6 GhostYangCid - $B7 GhostFusoyaGolbez - $B8 GhostDisappear - $BA GhostRevive - $BB ZeromusShake2 - $BC Unsure_KillShadow - $BE GhostHeal - $BF GhostFlash -} - -consts(spellset) { - $00 PCecil - $02 RydiaWhite - $03 RydiaBlack - $04 RydiaCall - $05 TellahWhite - $06 TellahBlack - $07 Rosa - $08 Palom - $09 Porom - $0A FusoyaWhite - $0B FusoyaBlack - $0C Edge -} - -consts(npc) { - $01 DoorOpener - $02 DarkElfSeal - $03 BaronSoldier1 - $04 BaronSoldier2 - $05 Kainazzo - $06 Engineer1 - $07 Engineer2 - $08 Baigan - $09 CastleBaronWhiteMage1 - $0A CastleBaronWhiteMage2 - $0B CastleBaronWhiteMage3 - $0C CastleBaronBlackMage1 - $0D CastleBaronBlackMage2 - $0E CastleBaronBlackMage3 - $0F CastleBaronSoldier1 - $10 Palom1 - $11 Porom1 - $12 KainNormal - $13 CastleBaronSoldier2 - $14 CastleBaronSoldier3 - $15 CastleBaronSoldier4 - $16 BaronGuard - $17 KingBaron - $18 Elder1 - $19 HPStatusRecovery - $1A Yang1 - $1B Rosa1 - $1C CastleBaronSoldier5 - $1D ToroiaArmorShopWoman - $1E CastleBaronMysidiaBlackMage - $1F CastleBaronMysidiaWhiteMage - $20 CastleBaronBlackMage4 - $21 CastleBaronSoldier6 - $22 ToroiaWeaponShopDancer - $23 ItemShopGirl - $24 SleepingBubble1 - $25 CecilMaid - $26 Rubicant1 - $27 DarkKnightCecil1 - $28 LegendarySword - $29 Kain1 - $2A MPRecovery - $2B Captain - $2C Sparkle1 - $2D LockedDoorWaterway - $2E LockedDoorWeaponArmorShop - $2F BaronMan - $30 BaronOldMan - $31 Namingway1 - $32 BaronToroiaWoman - $33 BaronBoy - $34 BaronOldWoman - $35 Cid1 - $36 BaronDancer - $37 RosaMom - $38 BaronKaipoInnkeeper - $39 BaronInnGirl - $3A BaronInnMan - $3B BaronInnWoman - $3C CidDaughter - $3D Books1 - $3E Books2 - $3F Books3 - $40 Cid2 - $41 Fireplace - $42 BaronWeaponShopMan - $43 BaronArmorShopBeardedMan - $44 BaronKaipoToroiaItemShopBeardedMan - $45 PaladinCecil1 - $46 MistFarmAgartEblanMan - $47 MistToroiaCafeMan - $48 MistMan - $49 MistDamcyanFabulToroiaAgartOldMan - $4A ChildRydia - $4B WomanLyingDown - $4C MistInnkeeper - $4D MistWeaponShopBeardedMan - $4E MistArmorShopOldMan - $4F KaipoMan - $50 KaipoBeardedMan - $51 KaipoWoman1 - $52 KaipoDancer - $53 KaipoBoy - $54 KaipoToroiaMan - $55 KaipoWoman2 - $56 KaipoWeaponShopOldMan - $57 KaipoArmorShopBeardedMan - $58 FabulWeaponShopOldWoman - $59 DamcyanEblanZotLockedDoor - $5A KaipoInn2FWoman - $5B KaipoInn2FScholar - $5C KaipoInn2FBeardedMan - $5D Advertisement - $5E KaipoInnMysidiaAirshipSoldier - $5F KaipoSickhouseOldMan - $60 KaipoSickhouseOldWoman - $61 RosaTiedUpSick - $62 Edward1 - $63 EdwardHarp1 - $64 Anna - $65 HoodedMonster1 - $66 Tellah1 - $67 WaterSplash - $68 Octomamm - $69 FallenSoldier1 - $6A FallenSoldier2 - $6B AnnaFallen - $6C FallenSoldier3 - $6D FallenSoldier4 - $6E ToroiaInnWoman - $6F PalomStatue - $70 PoromStatue - $71 FireMagic - $72 IceWall - $73 Yang2 - $74 SilveraMini1 - $75 Bomb1 - $76 Monk - $77 SheilaWaving - $78 SilveraMini2 - $79 SilveraFrog1 - $7A FabulMonk1 - $7B AntlionLeftClaw - $7C CaptainUnused - $7D CastleBaronDoorOpener - $7E ChocoboUnused - $7F Golbez1 - $80 MysidiaFabulCrystal - $81 Lightning1 - $82 BlackChocoboFarm - $83 SilveraPigDancer - $84 Queen1 - $85 Dancer1 - $86 BaronDancerDress - $87 FatChocoboTopLeft1 - $88 FatChocoboTopRight1 - $89 FatChocoboBottomLeft1 - $8A FatChocoboBottomRight1 - $8B WellSign - $8C YellowChocobo1 - $8D WhiteChocobo1 - $8E DancerLegs - $8F OctomammTentacles - $90 RubicantFlames - $91 AntlionRightClaw - $92 OldManUnused - $93 TrainingRoomSoldierOutside - $94 CaveEblanaSoldier1 - $95 CaveEblanaSoldier2 - $96 SoldierUnused - $97 CaveEblanaSoldier3 - $98 FabulMonk2 - $99 FabulMonk3 - $9A FabulMonk4 - $9B FabulMan - $9C FabulWoman1 - $9D FabulBoy - $9E FabulDancer - $9F FabulOldWoman - $A0 FabulKing - $A1 FabulWhiteMage - $A2 FabulKarateFighter1 - $A3 FabulKarateFighter2 - $A4 FabulWoman2 - $A5 MistBombUnused - $A6 SilveraPig - $A7 SilveraFrog2 - $A8 DarkElf - $A9 BaronToroiaAgartWoman - $AA SilveraWeaponShopFrog - $AB SilveraArmorShopPig - $AC SilveraItemShopMini - $AD SilveraInnPig - $AE AgartBoy - $AF CaveMagnesCrystal - $B0 KaipoToroiaAgartOldWoman - $B1 ToroiaAgartGirl - $B2 ToroiaAgartBeardedMan - $B3 AgartDancer - $B4 AgartWeaponShopMan - $B5 AgartArmorShopOldMan - $B6 FabulInnOldMan - $B7 MysidiaBlackMageFrog - $B8 MysidiaBlackMage1 - $B9 MysidiaBlackMage2 - $BA MysidiaWhiteMage1 - $BB MysidiaWhiteMage2 - $BC MysidiaWhiteMage3 - $BD MysidiaDancer - $BE FabulMysidiaOldWoman - $BF MysidiaInn2FInnkeeperWhiteMage - $C0 MysidiaWeaponShopBlackMage - $C1 MysidiaArmorShopWhiteMage - $C2 MysidiaItemShopBlackMage - $C3 MysidiaBlackMagePoison - $C4 Teleporter - $C5 MtOrdealsFlameTop - $C6 MtOrdealsFlameBottom - $C7 IceMagic - $C8 RydiaMom - $C9 Chamberlain - $CA Valvalis - $CB CecilRosaKiss1 - $CC Ball - $CD BlackChocobo1 - $CE MagusSister - $CF FatChocoboSpot1 - $D0 CastleToroiaCleric1 - $D1 CastleToroiaCleric2 - $D2 CastleToroiaCleric3 - $D3 CastleToroiaCleric4 - $D4 CastleToroiaCleric5 - $D5 CastleToroiaCleric6 - $D6 CastleToroiaCleric7 - $D7 CastleToroiaCleric8 - $D8 ToroiaDancer1 - $D9 ToroiaDancer2 - $DA ToroiaEdward - $DB MysidiaBlackMage3 - $DC ElderPraying - $DD CastleBaronCaptain - $DE MysidiaWhiteMage4 - $DF SilveraFrog3 - $E0 MysidiaElder - $E1 MysidiaBlackMagePraying1 - $E2 MysidiaWhiteMagePraying1 - $E3 SilveraFrog4 - $E4 CaveEblanaWeaponShopOldMan - $E5 CaveEblanaArmorShopMan - $E6 CaveEblanaInnBeardedMan - $E7 Edge1 - $E8 Rubicant2 - $E9 AdultRydia1 - $EA Crystal2 - $EB FallenSoldier5 - $EC AirshipEngineer - $ED ToroiaDancerAsks - $EE FallenSoldier6 - $EF CaveEblanaOldMan - $F0 CaveEblanaSoldier4 - $F1 CaveEblanaSoldier5 - $F2 AirshipDwarf - $F3 ZotKain - $F4 Golbez2 - $F5 ZotLockedDoor - $F6 PassSeller - $F7 PassReceiver - $F8 GrottoAdamantMiniWantsTails - $F9 CaveEblanaSoldier6 - $FA SaloonKingMan - $FB SilveraMini3 - $FC Fusoya1 - $FD MysidiaWhiteMagePraying2 - $FE MysidiaBlackMagePraying2 - $FF DarkKnightCecil2 - $100 DarkKnightCecil3 - $101 Dwarf1 - $102 Dwarf2 - $103 Dwarf3 - $104 Dwarf4 - $105 Dwarf5 - $106 Dwarf6 - $107 Dwarf7 - $108 KingGiott1 - $109 Luca1 - $10A Dwarf8 - $10B Dwarf9 - $10C Dwarf10 - $10D Dwarf11 - $10E Rosa2 - $10F Kain2 - $110 Yang3 - $111 Cid3 - $112 AdultRydia2 - $113 Edge2 - $114 Golbez3 - $115 GolbezHand - $116 Calbrena - $117 Dwarf12 - $118 Dwarf13 - $119 Dwarf14 - $11A Dwarf15 - $11B Crystal1 - $11C GolbezFallen - $11D Dwarf16 - $11E FatChocoboTopLeft2 - $11F FatChocoboTopRight2 - $120 FatChocoboBottomLeft2 - $121 FatChocoboBottomRight2 - $122 FatChocoboSpot2 - $123 FatChocoboSpot3 - $124 Namingway2 - $125 Cid4 - $126 Invisible1 - $127 Lugae - $128 Rubicant3 - $129 Teleportation - $12A HoodedMonster2 - $12B Invisible2 - $12C Mist - $12D Cid5 - $12E Edge3 - $12F Dwarf17 - $130 Dwarf18 - $131 Dwarf19 - $132 Dwarf20 - $133 Dwarf21 - $134 Dwarf22 - $135 SleepingBubble2 - $136 Dwarf23 - $137 Dwarf24 - $138 Dwarf25 - $139 Dwarf26 - $13A Luca2 - $13B DoubleDoor1 - $13C Lightning2 - $13D DoubleDoor2 - $13E DoubleDoor3 - $13F DoubleDoor4 - $140 DoubleDoor5 - $141 DoubleDoor6 - $142 DoubleDoor7 - $143 DoubleDoor8 - $144 DoubleDoor9 - $145 DoubleDoor10 - $146 DoubleDoor11 - $147 DoubleDoor12 - $148 DoubleDoor13 - $149 DoubleDoor14 - $14A DoubleDoor15 - $14B HoodedMonster3 - $14C HoodedMonster4 - $14D HoodedMonster5 - $14E HoodedMonster6 - $14F Chocobo1 - $150 Chocobo2 - $151 Bomb2 - $152 Bomb3 - $153 OldMan1 - $154 Queen2 - $155 Invisible3 - $156 Invisible4 - $157 Invisible5 - $158 Invisible6 - $159 Invisible7 - $15A Invisible8 - $15B Invisible9 - $15C Invisible10 - $15D Invisible11 - $15E Invisible12 - $15F HoodedMonster7 - $160 Bomb4 - $161 HoodedMonster8 - $162 Chocobo3 - $163 Bahamut - $164 Boy1 - $165 Girl1 - $166 Fusoya2 - $167 Sparkle2 - $168 DoubleDoor16 - $169 Invisible13 - $16A BigWhaleCrystal - $16B Invisible14 - $16C Invisible15 - $16D Invisible16 - $16E Smithy - $16F SmithyFinishedExcalibur - $170 Soldier1 - $171 Man1 - $172 Soldier2 - $173 BlackMage1 - $174 WhiteMage1 - $175 Soldier3 - $176 Soldier4 - $177 BlackMage2 - $178 Invisible17 - $179 WhiteMage2 - $17A OldMan2 - $17B Girl2 - $17C Dancer2 - $17D Midget - $17E HoodedMonster9 - $17F Namingway3 - $180 Man2 - $181 Scholar1 - $182 Boy2 - $183 Crystal3 - $184 Crystal4 - $185 Crystal5 - $186 Crystal6 - $187 Crystal7 - $188 Crystal8 - $189 Crystal9 - $18A Crystal10 - $18B Yang4 - $18C Sylph1 - $18D Sylph2 - $18E Sylph3 - $18F Sylph4 - $190 Sylph5 - $191 Invisible18 - $192 Dwarf27 - $193 Invisible19 - $194 Dwarf28 - $195 Dwarf29 - $196 SleepingBubble3 - $197 Fire - $198 Invisible20 - $199 Invisible21 - $19A Rosa3 - $19B AdultRydia3 - $19C Edge4 - $19D Kain3 - $19E CecilRosaKiss2 - $19F Elder2 - $1A0 Palom2 - $1A1 Porom2 - $1A2 Edward2 - $1A3 Cid6 - $1A4 Yang5 - $1A5 Giott - $1A6 Luca3 - $1A7 Engineer3 - $1A8 Engineer4 - $1A9 Cleric1 - $1AA Cleric2 - $1AB FatChocoboTopLeft3 - $1AC FatChocoboTopRight3 - $1AD FatChocoboBottomLeft3 - $1AE FatChocoboBottomRight3 - $1AF Zemus - $1B0 ZemusFallen - $1B1 WaveringFlame - $1B2 Fusoya3 - $1B3 Golbez4 - $1B4 KingGiott2 - $1B5 Luca4 - $1B6 Dwarf30 - $1B7 Scholar2 - $1B8 Elder3 - $1B9 Palom3 - $1BA Porom3 - $1BB BlackMage3 - $1BC WhiteMage3 - $1BD Girl3 - $1BE Boy3 - $1BF Woman1 - $1C0 KarateFighter - $1C1 Rosa4 - $1C2 Cid7 - $1C3 PaladinCecil2 - $1C4 Man3 - $1C5 OldMan3 - $1C6 OldWoman - $1C7 Yang6 - $1C8 Edward3 - $1C9 Edge5 - $1CA AdultRydia4 - $1CB KainHair - $1CC HeartBubble - $1CD King - $1CE Soldier5 - $1CF MilonGoblin - $1D0 Frog - $1D1 Porom4 - $1D2 Cleric3 - $1D3 Dwarf31 - $1D4 Chocobo4 - $1D5 AstroObservatoryInvisible - $1D6 YellowChocobo2 - $1D7 WhiteChocobo2 - $1D8 BlackChocobo2 - $1D9 AstroObservatoryScholar - $1DA SleepingBubble4 - $1DB CrystalSword - $1DC Masamune - $1DD Murasame - $1DE WhiteSpear - $1DF Dwarf32 - $1E0 Namingway4 - $1E1 Namingway5 - $1E2 Namingway6 - $1E3 Namingway7 - $1E4 Namingway8 - $1E5 Edward4 - $1E6 EdwardHarp2 - $1E7 Tellah2 - $1E8 Man4 - $1E9 Woman2 - $1EA Soldier6 - $1EB BlackMage4 - $1EC WhiteMage4 - $1ED Soldier7 -} - -consts(sprite) { - $00 DKCecil - $01 Kain - $02 CRydia - $03 Tellah - $04 Edward - $05 Rosa - $06 Yang - $07 Palom - $08 Porom - $09 PCecil - $0A Cid - $0B ARydia - $0C Edge - $0D Fusoya - $0E Mini - $0F Frog - $10 Pig - $11 Man - $12 Woman - $13 Dancer - $14 OldMan - $15 OldWoman - $16 Boy - $17 Girl - $18 Soldier - $19 BeardedMan - $1A Scholar - $1B BlackMage - $1C WhiteMage - $1D Engineer - $1E Chocobo - $1F Monk - $20 Captain - $21 Bomb - $22 HoodedMonster - $23 Namingway - $24 Golbez - $25 King - $26 Elder - $27 Cleric - $28 Dwarf - $29 Calbrena - $2A Giott - $2B Lugae - $2C Luca - $2D GolbezHand - $2E CecilRosaKiss - $2F Crystal - $30 HeartBubble - $31 Fire - $32 SleepBubble - $33 YangWifeWave - $34 YangWifeSad - $35 EdwardHarp - $36 Splash - $37 Masamune - $38 DancerDress - $39 LegendarySword - $3A RosaTiedUpSick - $3B Rubicant - $3C Sparkle - $3D ElderPraying - $3E BlackMagePraying - $3F ZemusFallen - $40 WhiteMagePraying - $41 Sylph - $42 ZeromusFlame - $43 KainHair - $44 Lightning - $45 Teleportation - $46 FatChocobo00 - $47 FatChocobo10 - $48 FatChocobo01 - $49 FatChocobo11 - $4A Namingway00 - $4B Namingway01 - $4C Namingway10 - $4D Namingway11 - $4E Valvalis - $4F DancerLeg - $50 Queen - $51 Zemus - $52 OctomammTentacles - $53 MagusSister - $54 DarkElf - $55 Bahamut - $56 Kainazzo - $57 Dress - $58 IceWall - $59 WhiteSpear - $5A WomanLyingDown - $5B DeathBall - $5C Mist - $5D DoubleDoor - $5E FallenSoldier - $5F AntlionClawLeft - $60 AntlionClawRight - $61 Anna - $62 GolbezFallen - $63 CrystalSword - $64 AnnaFallen - $65 Transparent - $66 IceMagic - $67 FlameTop - $68 FlameBottom - $69 StonePalom - $6A StonePorom -} - -consts(command) -{ - $00 Fight - $01 Item - $02 White - $03 Black - $04 Call - $05 DarkWave - $06 Jump - $07 Recall //theory - $08 Sing - $09 Hide - $0A Medicine //theory - $0B Pray //theory - $0C Aim - $0D Focus //theory - $0E Kick - $0F Fortify //theory - $10 Twin - $11 Bluff //theory - $12 Cry //theory - $13 Cover - $14 Peep - //$15 Dummy //??? - $16 Dart - $17 Sneak - $18 Ninja - $19 Regen //theory - $1A Change - - $1C Show - $1D Off -} - -consts(element) -{ - $00 Fire - $01 Ice - $02 Lit - $03 Dark - $04 Holy - $05 Air - $06 Absorb - $07 Immune -} - -consts(status) { - $00 Poison - $01 Blind - $02 Mute - $03 Piggy - $04 Mini - $05 Toad - $06 Stone - $07 Swoon - $08 Calcify1 - $09 Calcify2 - $0A Berserk - $0B Charm - $0C Sleep - $0D Stun - $0E Float - $0F Curse - $10 Count - $11 Jump - $12 Twin - $13 Charge - $14 Parry - $15 Egg - $16 Stop - $17 Magnet - $18 Critical - $19 Covered - $1A Blink1 - $1B Blink2 - $1C Armor - $1D Wall - $1E HPLeak - $1F Hidden -} - -consts(race) { - $0 Dragon - $1 Robot - $2 Reptile - $3 Spirit - $4 Giant - $5 Slime - $6 Mage - $7 Zombie -} \ No newline at end of file diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/default.hints b/worlds/ff4fe/FreeEnterpriseForAP/f4c/default.hints deleted file mode 100644 index abc446d42203..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/default.hints +++ /dev/null @@ -1,22 +0,0 @@ -hints { - event $1B map #BaronCastleThroneRoom - event $1D map #BaronCastle - event $1F map #BaronTown - event $20 map #BaronTown - event $21 map #BaronInn - event $3A map #KaipoHospital - event $3B map #WateryPass1F - event $42 map #Overworld - event $5E map #HouseOfWishes - event $64 map #HouseOfWishes - event $66 map #Overworld - event $83 map #Overworld - event $AF map #SealedCaveCrystalRoom - event $C6 map #Overworld - event $C9 map #FeymarchLeviathanRoom - event $CA map #FeymarchLeviathanRoom - event $D1 map #BaronTown - event $D6 map #BaronTown - // event $FA map ??? -- Tower key - event $FA map #Babil5F -} diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/event_common.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/event_common.py deleted file mode 100644 index 9e3239356479..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/event_common.py +++ /dev/null @@ -1,359 +0,0 @@ -def is_placement_command_code(code): - return (code < 0xC0) - -BATCH_COMMAND_CODE = 0xEB - -COMMANDS = { - 0xC0 : ['player move up'], - 0xC1 : ['player move right'], - 0xC2 : ['player move down'], - 0xC3 : ['player move left'], - 0xC4 : ['player face up'], - 0xC5 : ['player face right'], - 0xC6 : ['player face down'], - 0xC7 : ['player face left'], - 0xC8 : ['player invisible'], - 0xC9 : ['player visible'], - 0xCA : ['player wave in'], - 0xCB : ['player wave out'], - 0xCC : ['player bow head'], - 0xCD : ['player lie down'], - 0xCE : ['player toggle turning'], - 0xCF : ['player toggle spinning'], - 0xD0 : ['toggle screen shake'], - 0xD1 : ['screen flash'], - 0xD2 : ['screen blur'], - 0xD3 : ['moon travel'], - 0xD4 : ['fat chocobo'], - 0xD5 : ['open door'], - 0xD6 : ['screen up down'], - 0xD7 : ['toggle run'], - 0xD8 : ['toggle music fade'], - 0xD9 : ['namingway'], - 0xDA : ['toggle screen fade'], - 0xDB : ['toggle status {}', 'status'], - 0xDC : ['inn {}', 'hex'], - 0xDD : ['party leader {}', 'actor'], - 0xDE : ['give hp {}', 'hpmp'], - 0xDF : ['give mp {}', 'hpmp'], - 0xE0 : ['give item {}', 'item'], - 0xE1 : ['take item {}', 'item'], - 0xE2 : ['give spell {} {}', 'spellset', 'spell'], - 0xE3 : ['clear status {}', 'status'], - 0xE4 : ['give status {}', 'status'], - 0xE5 : ['give gp {}', 'gp'], - 0xE6 : ['take gp {}', 'gp'], - 0xE7 : ['give actor {}', 'actor'], - 0xE8 : ['take actor {}', 'actor'], - 0xE9 : ['pause {}', 'decimal'], - 0xEA : ['music {} fade in', 'music'], - 0xEC : ['fight {}', 'formation'], - 0xED : ['shop {}', 'shop'], - 0xEE : ['event message {}', 'decimal'], - 0xEF : ['map message {}', 'decimal'], - 0xF0 : ['message {}', 'message'], - 0xF1 : ['message {}', 'message_high'], - 0xF2 : ['set {}', 'flag'], - 0xF3 : ['clear {}', 'flag'], - 0xF4 : ['activate {}', 'npc'], - 0xF5 : ['deactivate {}', 'npc'], - 0xF6 : ['message {} from bank 3', 'message'], - 0xF7 : ['select item {}', 'item'], - 0xF8 : ['confirm message {}', 'message_high'], - 0xF9 : ['toggle tint {}', 'hex'], - 0xFA : ['music {}', 'music'], - 0xFB : ['sound {}', 'sound'], - #0xFC : ['crash', 'hex'], - 0xFD : ['vfx {}', 'vfx'], - 0xFE : ['load map {} at {} {} {}', 'map', 'decimal', 'decimal', 'mapflags'] -} - -PLACEMENT_COMMANDS = { - 0x0 : 'move up', - 0x1 : 'move right', - 0x2 : 'move down', - 0x3 : 'move left', - 0x4 : 'face up', - 0x5 : 'face right', - 0x6 : 'face down', - 0x7 : 'face left', - 0x8 : 'toggle visible', - 0x9 : 'jump sideways', - 0xA : 'spin', - 0xB : 'spin jump', - 0xC : 'wave in', - 0xD : 'wave out', - 0xE : 'bow head', - 0xF : 'lie down' -} - -VEHICLES = { - 1 : 'on chocobo', - 2 : 'on black chocobo', - 3 : 'on hovercraft', - 4 : 'on enterprise', - 5 : 'on falcon', - 6 : 'on big whale', - 7 : 'on ship', - 0x1F : 'on ship 2' -} - -DEFAULT_EVENT_DESCRIPTIONS = [ - "(no event)", - "Show first message", - "Show second message", - "Show third message", - "Behemoth battle 51", - "Behemoth battle 52", - "Entering Tower of Bab-il", - "Hummingways", - "Rydia learning Sylph", - "Receiving the pan", - "Do you have the clerics' permission?", - "Open a door", - "Locked", - "Namingway", - "Rubicant main battle", - "OUCH!", - "Opening events", - "Cid greeting Cecil", - "Speaking to Kain in Baron", - "Cecil's maid", - "Rosa greeting Cecil", - "Cecil sleeping, Prologue", - "Item selection - soft", - "Sealed with unknown power", - "Finding Odin before Feymarch", - "Odin battle", - "Baigan battle", - "Kainazzo battle, losing twins", - "Devil's Road is sealed", - "Attaching hook to Enterprise", - "Devil's Road to Mysidia", - "Unlock the Waterway door", - "Unlock the Weapon/Armor shop", - "Cecil/Yang fight", - "Staying at a 50 GP Inn", - "Bring up Silvera weapon shop", - "Bring up Baron weapon shop", - "Bring up Baron armor shop", - "Bring up Baron item shop", - "Map message 0", - "Map message 1", - "Map message 2", - "Map message 3", - "Map message 4", - "Map message 5", - "Map message 6", - "Map message 7", - "Mist Dragon battle", - "Mist events", - "Watery Cave camping", - "Staying at a 800 GP Inn", - "Bring up Mist weapon shop", - "Bring up Mist armor shop", - "Bring up Kaipo weapon shop", - "Bring up Kaipo armor shop", - "Bring up Silvera armor shop", - "Bring up Silvera item shop", - "Kaipo Events, Rydia Joining", - "Curing Rosa", - "Watery Cave Tellah Joining", - "Watery Cave Waterfall events", - "Octomamm battle", - "The current is too strong for us to get in!", - "...", - "Giott: Open the door!", - "'Terrible bombardments from the airships'", - "Red Wings bombing Damcyan", - "Tellah fighting Edward, Edward joining", - "Fallen soldier in Damcyan's basement", - "Antlion battle", - "Rydia melting Mt. Hobs ice, learning Fire", - "Yang joining, Mombomb battle", - "Fabul battle", - "Staying at a 100 GP Inn", - "Toroia Clerics after Earth Crystal", - "WA!", - "We also might have been hypnotized", - "Open Fat Chocobo screen", - "Giving a Carrot to a Fat Chocobo", - "Mist mountains on a chocobo", - "Touching a White Chocobo", - "Rosa getting captured", - "Discussion about rescuing Rosa", - "Bring up Agart weapon shop", - "Bring up Agart armor shop", - "Being poisoned by the bartender in Mysidia", - "Staying at a 200 GP Inn", - "Speaking to the White Mage in Mysidia", - "Winning the Crystal Sword", - "Winning the Masamune", - "Bring up Mysidia weapon shop", - "Bring up Mysidia armor shop", - "Bring up Mysidia item shop", - "Being turned into a toad by a Black Mage", - "Talking with Mysidian Elder, twins joining", - "Mt. Ordeals fire put out by Palom", - "Meeting Tellah on Mt. Ordeals", - "First Milon battle", - "Cecil's transformation, Tellah remembering", - "Second Milon battle", - "Paladin Cecil talking to Mysidian Elder", - "Devil's Road to Baron", - "Entering the Tower of Zot first time", - "Resting for free in Fabul", - "Resting for free in Castle Baron", - "Load map 8A at position 07, 06, 00", - "Load map 8A at position 87, 03, 00", - "Load map 8A at position 17, 06, 00", - "Load map 8A at position 97, 03, 00", - "Bring up Toroia armor shop", - "Bring up Toroia weapon shop", - "Bring up Eblan items shop", - "Staying at a 400 GP Inn", - "Meeting Edward in Toroia, receiving TwinHarp", - "Edward in Toroia after defeating the Dark Elf", - "Dark Elf 'Me Attack You' battle", - "Obtaining the Earth Crystal", - "Found Black Chocobo!", - "Save point message", - "Using a Tent", - "Using a Cabin", - "Magus Sisters battle", - "Top of Tower of Zot events", - "Bring up shop 01", - "Valvalis battle", - "HP restoring pot", - "MP restoring pot", - "Magnus Cave entering message", - "Dark Elf 'You Fool' battle", - "Dark Elf 'Fire2' battle", - "Kain regaining his senses", - "Depart Fabul, Leviathan Attack", - "Using Magma Key, opening Underworld", - "HP/MP/Status restoring pot", - "Exit spell", - "Warp spell", - "Characters 2, 5: Toggle Visibility, Message EF00", - "Characters 2, 5: Toggle Visibility, Message EF01", - "Characters 2, 5: Toggle Visibility, Message EF04", - "Entering the Underworld in the airship events", - "Meeting Giott, Calbrena battle, rejoining Rydia", - "Entering the Tower of Zot subsequent times", - "Exiting the Tower of Zot", - "Being turned Mini in Baron's Classroom", - "Staying at a 1600 GP Inn", - "Dwarf in castle basement opens the door", - "Authorized dwarves only!", - "Load Old Waterway map B1F", - "Dr. Lugae battles", - "Super Cannon destruction, Yang leaves", - "Jump from Tower of Bab-il, airship escape", - "Staying at a 3,200 GP Inn", - "Bring up Eblan weapon shop", - "Bring up Eblan armor shop", - "Edge joining, Rubicant battle", - "Ninja into Bab-il", - "Found Black Chocobo!", - "Returning from the moon to Giant of Bab-il", - "Big Whale surfacing", - "'Do you have the Clerics' permission?' when you do", - "Ninja out of Bab-il", - "Fighting Edge's parents", - "Falling to the Underground", - "Winning the Murasame", - "Winning the Holy Lance", - "Finding the Falcon", - "Obtaining the Luca Key", - "Cid meeting Edge, adding the heat shield to the Falcon", - "Opening the Sealed Cave door", - "Feymarch entrance", - "Demon Wall battle", - "Staying at a 6,400 GP Inn", - "Bring up Tomra weapon shop", - "Bring up Tomra armor shop", - "Bring up Kaipo weapon shop", - "Obtaining the Dark Crystal", - "Climbing ropes down", - "Climbing ropes up", - "Bring up Hummingway shop", - "Climbing ropes down, jumping sideways/nothing", - "Climbing ropes up, jumping sideways/nothing", - "Climbing ropes down, spinning, moving up", - "Kain stealing the Dark Crystal and leaving", - "TrapDoor battle 3D", - "TrapDoor battle 3E", - "TrapDoor battle 3F", - "TrapDoor battle 40", - "TrapDoor battle 41", - "TrapDoor battle 42", - "TrapDoor battle 43", - "TrapDoor battle 44", - "TrapDoor battle 45", - "TrapDoor battle 46", - "TrapDoor battle 47", - "TrapDoor battle 48", - "TrapDoor battle 49", - "TrapDoor battle 4A", - "Talking to Giott, attaching the airship drill", - "Drilling to the surface", - "Bring up Dwarf weapons shop", - "Bring up Dwarf armor shop", - "Asura battle", - "Leviathan battle", - "Bring up Feymarch weapon shop", - "Bring up Feymarch armor shop", - "Staying at a 12,800 GP Inn", - "Bring up Tomra item shop", - "Bahamut battle", - "Meeting FuSoYa and having him join", - "Baron Dancer's dance", - "Kaipo Dancer's dance", - "Fabul Dancer's dance", - "Agart Dancer's dance", - "Mysidia Dancer's dance", - "Baron Dancer's dance after killing Kainazzo", - "Silvera 3 Dancers' dance", - "Toroia Dance Troupe's dance", - "Dwarf Castle Dancer's dance", - "Characters 2, 5: Toggle Visibility, Message EF07", - "Reading the magazine?", - "'Sealed with unknown power'", - "Entering the Lunar Subterrane", - "Obtaining the magazine", - "Obtaining a Grimoire 6C", - "Obtaining a Grimoire 6D", - "Obtaining Excalibur", - "Trading the Rat Tail for Adamant", - "Trading the Pink Tail for Adamant Armor", - "Load Big Whale interior", - "Rydia learning Sylph using the Pan", - "Giant of Bab-il CPU battle", - "Zeromus Battle, Ending Events", - "Load Big Whale interior, activate Baron Black Chocobo", - "Boarding Big Whale", - "Travel to/from Moon", - "Resting for free", - "TrapDoor battle 68", - "Element Fiends battle", - "Giving Adamant and Legend Sword to smithy", - "Bring up Toroia pass shop", - "Do you have a pass?", - "Bring up Smith's shop", - "Watery Pass - North exiting message", - "Behemoth battle 50", - "Load Room of Wishes, play 'Long Way to Go'", - "Load Room of Wishes, Yang/Cid/Edward, play 'Long Way to Go'", - "Load Mysidia Crystal Room, play 'The Prelude'", - "Speaking to Sylph before Yang awakes", - "Obtaining a Grimoire 91", - "Trading the Pan for the Spoon", - "Using the Tower Key", - "Visual effect 33", - "Obtaining the Ribbon", - "Viewing the moon through the telescope", - "Two explosions at right side of screen", - "???" -] - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4bin/__init__.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4bin/__init__.py deleted file mode 100644 index b905fb2eb844..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4bin/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .rom import Rom diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4bin/dataarray.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4bin/dataarray.py deleted file mode 100644 index 29f6b0af19bf..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4bin/dataarray.py +++ /dev/null @@ -1,36 +0,0 @@ -import struct - -class DataArray: - def __init__(self, infile, data_address, length, data_size=1): - self.data_address = data_address - infile.seek(data_address) - if data_size == 1: - self._format = '{}B' - elif data_size == 2: - self._format = '<{}H' - else: - raise ValueError("Data array cannot be initialized with data size {}".format(data_size)) - - self._data_size = data_size - self._data = list(struct.unpack(self._format.format(length), infile.read(length * self._data_size))) - self._changed = False - - def __getitem__(self, k): - return self._data[k] - - def __setitem__(self, k, v): - if self._data[k] != v: - self._changed = True - self._data[k] = v - - def save_if_changed(self, outfile): - if self._changed: - outfile.seek(self.data_address) - outfile.write(struct.pack(self._format.format(len(self._data)), *self._data)) - - def __len__(self): - return len(self._data) - - def __iter__(self): - for d in self._data: - yield d diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4bin/datatable.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4bin/datatable.py deleted file mode 100644 index 28acb083e69f..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4bin/datatable.py +++ /dev/null @@ -1,183 +0,0 @@ -''' -Abstraction for ROM data that is stored with/without a pointer table. -''' - -import struct -import math - -class DataTable: - def __init__(self, stream=None, - pointer_address=None, data_address=None, data_count=None, - data_size=None, use_end_pointer=False, data_consume_func=None, - last_data_size=None, data_overflow_address=None, data_filler=None, - nonlinear=False, pointer_offset_regions=None): - - self.pointer_address = pointer_address - self._pointer_offset = 0 - self.data_address = data_address - self.data_count = data_count - self.data_size = data_size - self.use_end_pointer = use_end_pointer - self.data_consume_func = data_consume_func - self.last_data_size = last_data_size - self.data_overflow_address = data_overflow_address - self.data_filler = [data_filler] if type(data_filler) is int else data_filler - self.nonlinear = nonlinear - self.pointer_offset_regions = pointer_offset_regions - - self.changed = False - - if stream is not None: - self.load(stream) - - def load(self, stream): - self._data = [] - - pointer_table = None - if self.pointer_address: - num_pointers = self.data_count - if self.use_end_pointer: - num_pointers += 1 - - stream.seek(self.pointer_address) - pointer_table = list(struct.unpack('<{}H'.format(num_pointers), stream.read(num_pointers * 2))) - - if self.pointer_offset_regions: - region_offset = 0 - for i in range(num_pointers): - if i in self.pointer_offset_regions: - region_offset = self.pointer_offset_regions[i] - pointer_table[i] += region_offset - - self._pointer_offset = min(pointer_table) - else: - stream.seek(self.data_address) - - if self.nonlinear: - unique_pointers = sorted(set(pointer_table)) - unique_pointer_to_index = {} - self._nonlinear_data_indices = [] - for i,pointer in enumerate(unique_pointers): - stream.seek(self.data_address + pointer) - self._data.append(self.data_consume_func(stream)) - unique_pointer_to_index[pointer] = i - - for i in range(self.data_count): - pointer = pointer_table[i] - nonlinear_index = unique_pointer_to_index[pointer] - self._nonlinear_data_indices.append(nonlinear_index) - - else: - for i in range(self.data_count): - if pointer_table: - stream.seek(self.data_address + pointer_table[i]) - - if self.data_consume_func is not None: - data = self.data_consume_func(stream) - if pointer_table and i < len(pointer_table) - 1 and pointer_table[i] + len(data) > pointer_table[i + 1]: - print("Warning: data table at {:X} entry {} ({:X}) overflows into next pointer area ({:X})".format(self.data_address, i, pointer_table[i], pointer_table[i+1])) - else: - if self.data_size: - data_size = self.data_size - elif i == self.data_count - 1 and self.last_data_size is not None: - data_size = self.last_data_size - elif i == self.data_count - 1 and not self.use_end_pointer and self.data_overflow_address is not None: - data_size = (self.data_overflow_address - self.data_address) - pointer_table[i] - else: - data_size = pointer_table[i + 1] - pointer_table[i] - #print('{} {} {}'.format(data_size, pointer_table[i+1], pointer_table[i])) - - data = struct.unpack('{}B'.format(data_size), stream.read(data_size)) - - self._data.append(data) - - def save(self, stream): - if self.data_overflow_address is not None: - total_data_length = sum([len(d) for d in self._data]) - if self.data_address + total_data_length > self.data_overflow_address: - raise ValueError("Can't write to data table at {:X} -- too much data (length {:X}), will overflow".format(self.data_address, total_data_length)) - - if self.pointer_address: - pointer_table = [] - num_pointers = len(self._data) - if self.use_end_pointer: - num_pointers += 1 - - pointer = self._pointer_offset - for i in range(num_pointers): - pointer_table.append(pointer) - if i < len(self._data): - pointer += len(self._data[i]) - - if self.nonlinear: - data_pointers = pointer_table - pointer_table = [] - num_pointers = len(self._nonlinear_data_indices) - for i in range(num_pointers): - pointer_table.append(data_pointers[self._nonlinear_data_indices[i]]) - - if self.pointer_offset_regions: - region_offset = 0 - for i in range(num_pointers): - if i in self.pointer_offset_regions: - region_offset = self.pointer_offset_regions[i] - pointer_table[i] -= region_offset - - stream.seek(self.pointer_address) - stream.write(struct.pack('<{}H'.format(num_pointers), *pointer_table)) - - stream.seek(self.data_address + self._pointer_offset) - for data_id,d in enumerate(self._data): - if not d: - continue - - try: - stream.write(struct.pack('{}B'.format(len(d)), *d)) - except Exception as e: - print('{:X} : {}'.format(data_id, d)) - print("{:X}".format(self.data_address)) - raise e - - if self.data_overflow_address and self.data_filler: - filler_length = self.data_overflow_address - stream.tell() - filler = self.data_filler * int(math.ceil(float(filler_length) / len(self.data_filler))) - stream.write(struct.pack('{}B'.format(filler_length), *filler[:filler_length])) - - def save_if_changed(self, stream): - if self.changed: - self.save(stream) - - def __len__(self): - if self.nonlinear: - return len(self._nonlinear_data_indices) - else: - return len(self._data) - - def __getitem__(self, key): - if self.nonlinear: - return self._data[self._nonlinear_data_indices[key]] - else: - return self._data[key] - - def __setitem__(self, key, value): - if self.nonlinear: - data_index = self._nonlinear_data_indices[key] - reference_count = len([x for x in self._nonlinear_data_indices if x == data_index]) - if reference_count == 1: - self._data[data_index] = value - else: - self._nonlinear_data_indices[key] = len(self._data) - self._data.append(value) - else: - self._data[key] = value - - self.changed = True - - def __iter__(self): - if self.nonlinear: - for i in self._nonlinear_data_indices: - yield self._data[i] - else: - for d in self._data: - yield d - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4bin/rom.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4bin/rom.py deleted file mode 100644 index 0957051e7b07..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4bin/rom.py +++ /dev/null @@ -1,287 +0,0 @@ -''' -Object representing an FF4 rom. Portions of the ROM that -are of interest are exposed as binary data (lists of bytes). -This binary data may then be altered or replaced, and then -saved back to the file, in which case this object handles -the reassignment of space, pointer tables, etc. to match. -''' - -import struct -import math -import hashlib -import io -from .datatable import DataTable -from .dataarray import DataArray - -NUM_NPCS = 511 -NUM_EVENT_SCRIPTS = 256 -NUM_MAPS = 383 -NUM_TEXT_BANK1 = 512 -NUM_TEXT_BANK2 = 768 -NUM_TEXT_BANK3 = 256 -NUM_TEXT_BATTLE = 0xBA -NUM_TEXT_ALERTS = 0x3B -NUM_TEXT_STATUS = 32 -NUM_TEXT_MAP_NAMES = 0x79 -NUM_TEXT_DESCRIPTIONS = 60 -NUM_TEXT_MAP_NAMES = 0x79 -NUM_EVENT_CALLS = 255 -NUM_MONSTERS = 0xE0 -NUM_COMMANDS = 26 -NUM_ITEMS = 256 -NUM_SHOPS = 33 -NUM_ACTORS = 21 -NUM_DROP_TABLES = 64 -NUM_MONSTERS = 0xE0 -NUM_FORMATIONS = 512 -NUM_PLAYER_SPELLS = 0x48 -NUM_NAMED_ENEMY_SPELLS = 0xB0 - NUM_PLAYER_SPELLS -NUM_SPELLS = 0xC0 -NUM_SPELL_SETS = 13 -NUM_AI_CONDITIONS = 0x80 - -FF2USV10_MD5 = '4fa9e542b954dcb954d5ce38188d9d41' -FF2USV11_MD5 = '27d02a4f03e172e029c9b82ac3db79f7' - -class RomError(Exception): - pass - -class Rom: - def __init__(self, rom=None, ignore_checksum=False): - self._raw_rom_data = None - self._patches = [] - self._data_structs = [] - if rom is not None: - self.load_rom(rom, ignore_checksum) - - def load_rom(self, rom, ignore_checksum=False): - if type(rom) is str: - close_stream = True - infile = open(rom, 'rb') - else: - close_stream = False - infile = rom - - self._raw_rom_data = infile.read() - - if not ignore_checksum: - if len(self._raw_rom_data) != 0x100000: - if len(self._raw_rom_data) == 0x100200: - # strip ROM header - self._raw_rom_data = self._raw_rom_data[0x200:] - if close_stream: - infile.close() - infile = io.BytesIO(self._raw_rom_data) - close_stream = False - else: - raise RomError("ROM file size is incorrect; expected 0x100000 (or 0x100200 for headered), got 0x{:X}".format(len(self._raw_rom_data))) - - checksum = hashlib.md5() - checksum.update(self._raw_rom_data) - if checksum.hexdigest().lower() not in [FF2USV10_MD5, FF2USV11_MD5]: - raise RomError("ROM MD5 check failed -- data has checksum {}".format(checksum.hexdigest())) - - self.event_scripts = self.create_data_table(infile, pointer_address=0x90000, data_address=0x90200, data_count=NUM_EVENT_SCRIPTS, last_data_size=1, data_overflow_address=0x97000) - - self.npc_sprites = self.create_data_array(infile, 0x97000, NUM_NPCS) - self.npc_event_calls = self.create_data_table(infile, pointer_address=0x99800, data_address=0x99C00, data_count=NUM_NPCS, use_end_pointer=True, data_overflow_address=0x9A300) - - self.map_infos = self.create_data_table(infile, data_address=0xA9C84, data_count=NUM_MAPS, data_size=13) - self.placement_groups = self.create_data_table(infile, pointer_address=0x98000, data_address=0x98300, data_count=NUM_MAPS, data_consume_func=_consume_placements, data_overflow_address=0x99700, nonlinear=True) - self.map_trigger_sets = self.create_data_table(infile, pointer_address=0xA8000, data_address=0xA8300, data_count=NUM_MAPS, use_end_pointer=True, data_overflow_address=0xA961F) - self.world_trigger_sets = self.create_data_table(infile, pointer_address=0xCFE60, data_address=0xCFE66, data_count=3, data_overflow_address=0xD0000, data_filler=[0x00, 0x00, 0xFF, 0xFF, 0x00]) - - self.map_grids = self.create_data_table(infile, pointer_address=0xB8000, data_address=0xB8000, data_count=NUM_MAPS, use_end_pointer=True, pointer_offset_regions={0x100: 0x8000}) - self.tilesets = self.create_data_table(infile, data_address=0xA0E00, data_count=16, data_size=0x100) - - self.event_calls = self.create_data_table(infile, pointer_address=0x97260, data_address=0x97460, data_count=NUM_EVENT_CALLS, use_end_pointer=True, data_overflow_address=0x97660) - - self.text_bank1 = self.create_data_table(infile, pointer_address=0x80000, data_address=0x80400, data_count=NUM_TEXT_BANK1, data_consume_func=_consume_text, data_overflow_address=0x88000) - self.text_bank2 = self.create_data_table(infile, pointer_address=0x88000, data_address=0x88300, data_count=NUM_MAPS, use_end_pointer=True, data_overflow_address=0x90000) - self.text_bank3 = self.create_data_table(infile, pointer_address=0x9A500, data_address=0x9A700, data_count=NUM_TEXT_BANK3, data_consume_func=_consume_text, data_overflow_address=0x9D200) - self.text_battle = self.create_data_table(infile, pointer_address=0x77200, data_address=0x68000, data_count=NUM_TEXT_BATTLE, data_consume_func=_consume_text, data_overflow_address=0x78000) - self.text_alerts = self.create_data_table(infile, pointer_address=0x7B000, data_address=0x70000, data_count=NUM_TEXT_ALERTS, data_consume_func=_consume_text, data_overflow_address=0x7B400, nonlinear=True) - self.text_status = self.create_data_table(infile, pointer_address=0x7B400, data_address=0x70000, data_count=NUM_TEXT_STATUS, data_consume_func=_consume_text, data_overflow_address=0x7B500, nonlinear=True) - self.text_monster_names = self.create_data_table(infile, data_address=0x71800, data_count=NUM_MONSTERS, data_size=8) - self.text_command_names = self.create_data_table(infile, data_address=0x7A7C6, data_count=NUM_COMMANDS, data_size=5) - self.text_map_names = self.create_data_table(infile, data_address=0xA9620, data_count=NUM_TEXT_MAP_NAMES, data_consume_func=_consume_text, data_overflow_address=0xA9C80) - self.text_item_names = self.create_data_table(infile, data_address=0x78000, data_count=NUM_ITEMS, data_size=9) - self.text_spell_names = self.create_data_table(infile, data_address=0x78900, data_count=NUM_PLAYER_SPELLS, data_size=6) - self.text_enemy_spell_names = self.create_data_table(infile, data_address=0x78AB0, data_count=NUM_NAMED_ENEMY_SPELLS, data_size=8) - - self.monster_gp = self.create_data_array(infile, 0x72000, NUM_MONSTERS, data_size=2) - self.monster_xp = self.create_data_array(infile, 0x721C0, NUM_MONSTERS, data_size=2) - self.monster_gfx = self.create_data_table(infile, data_address=0x7CA00, data_count=NUM_MONSTERS, data_size=4) - self.monsters = self.create_data_table(infile, pointer_address=0x726A0, data_address=0x68000, data_count=NUM_MONSTERS, data_consume_func=_consume_monster, data_overflow_address=0x736c0, nonlinear=True) - self.monster_stats = self.create_data_table(infile, data_address=0x72380, data_count=0xE0, data_size=3) - self.monster_speeds = self.create_data_table(infile, data_address=0x72620, data_count=0x40, data_size=2) - - self.monster_scripts = self.create_data_table(infile, data_address=0x76900, data_count=0x100, data_consume_func=_consume_until(0xFF)) - self.moon_monster_scripts = self.create_data_table(infile, data_address=0x736C0, data_count=0x5A, data_consume_func=_consume_until(0xFF)) - self.ai_conditions = self.create_data_table(infile, data_address=0x76700, data_count=NUM_AI_CONDITIONS, data_size=4) - self.ai_condition_sets = self.create_data_table(infile, data_address=0x76600, data_count=0x62, data_consume_func=_consume_until(0xFF), data_overflow_address=0x76700) - self.ai_groups = self.create_data_table(infile, data_address=0x76030, data_count=0xFE, data_consume_func=_consume_until(0xFF), data_overflow_address=0x76600) - self.ai_hp_thresholds = self.create_data_array(infile, 0x76000, 24, 2) - - self.spells = self.create_data_table(infile, data_address=0x797A0, data_count=NUM_SPELLS, data_size=6) - - self.formations = self.create_data_table(infile, data_address=0x70000, data_count=NUM_FORMATIONS, data_size=8) - - self.shops = self.create_data_table(infile, data_address=0x9A300, data_count=NUM_SHOPS, data_size=8) - - num_npc_active_flag_bytes = int(math.ceil(NUM_NPCS / 8.0)) - self.npc_active_flags = self.create_data_array(infile, 0x97200, num_npc_active_flag_bytes) - - self.actor_name_ids = self.create_data_array(infile, 0x08457, NUM_ACTORS) - self.actor_load_info = self.create_data_array(infile, 0x0669A, NUM_ACTORS) - self.actor_save_info = self.create_data_array(infile, 0x0671D, NUM_ACTORS) - self.actor_commands = self.create_data_table(infile, data_address=0x9FD55, data_count=NUM_ACTORS, data_size=5) - self.actor_gear = self.create_data_table(infile, data_address=0x7AB00, data_count=NUM_ACTORS, data_size=7) - - self.spell_sets = self.create_data_table(infile, data_address=0x7C8C0, data_count=NUM_SPELL_SETS, data_consume_func=_consume_until(0xFF, or_length=24), data_overflow_address=0x7CA00) - self.learned_spells = self.create_data_table(infile, data_address=0x7C700, data_count=NUM_SPELL_SETS, data_consume_func=_consume_until(0xFF), data_overflow_address=0x7C8C0) - - self.drop_tables = self.create_data_table(infile, data_address=0x71F00, data_count=NUM_DROP_TABLES, data_size=4) - - infile.seek(0x9F36D) - self.text_credits = list(struct.unpack('1130B', infile.read(1130))) - self._original_text_credits = list(self.text_credits) - - if close_stream: - infile.close() - - def create_data_table(self, *args, **kwargs): - dt = DataTable(*args, **kwargs) - self._data_structs.append(dt) - return dt - - def create_data_array(self, *args, **kwargs): - da = DataArray(*args, **kwargs) - self._data_structs.append(da) - return da - - def add_patch(self, address, byte_list): - for patch in self._patches: - if address < (patch[0] + len(patch[1])) and (address + len(byte_list)) > patch[0]: - raise RomError("Patch at {:X} conflicts with prior patch at {:X}".format(address, patch[0])) - self._patches.append( (address, byte_list) ) - - def save_rom(self, rom): - if type(rom) is str: - outfile = io.BytesIO() - else: - outfile = rom - - outfile.write(self._raw_rom_data) - - for dt in self._data_structs: - dt.save_if_changed(outfile) - - if self.text_credits != self._original_text_credits: - outfile.seek(0x9F36D) - outfile.write(struct.pack('{}B'.format(len(self.text_credits)), *self.text_credits)) - - # expand rom size to fit patches that apply past the current boundary - outfile.seek(0, 2) - rom_size = outfile.tell() - - if self._patches: - farthest_patch = max(self._patches, key=lambda p:p[0] + len(p[1])) - minimum_rom_size = farthest_patch[0] + len(farthest_patch[1]) - - if rom_size < minimum_rom_size: - # increase rom size to nearest 8mbit boundary - if minimum_rom_size & 0xFFFFF: - target_rom_size = (minimum_rom_size & 0xFF00000) + 0x100000 - else: - target_rom_size = minimum_rom_size - - for i in range(target_rom_size - rom_size): - outfile.write(b"\xFF") - rom_size = target_rom_size - - for patch in self._patches: - outfile.seek(patch[0]) - if isinstance(patch[1], bytes): - outfile.write(patch[1]) - else: - outfile.write(struct.pack('{}B'.format(len(patch[1])), *patch[1])) - - # update checksum - outfile.seek(0) - checksum = sum(struct.unpack('{}B'.format(rom_size), outfile.read(rom_size))) & 0xFFFF - outfile.seek(0x7FDE) - outfile.write(struct.pack('> 1 - elif f == 'b': - result.append(bool(byte & 1)) - byte = byte >> 1 - elif f >= '1' and f <= '8': - size = ord(f) - ord('0') - result.append(byte & ((1 << size) - 1)) - byte = byte >> size - else: - raise ValueError("Invalid format character '{}'".format(f)) - - return tuple(result) - -''' -Packs the given data into a byte according to the provided -format string; see unpack_byte for format description. -''' -def pack_byte(format, *values): - values = list(values) - result = 0 - shift = 0 - for f in format.lower(): - if f == 'x': - shift += 1 - elif f == 'b': - if values.pop(0): - result |= (1 << shift) - shift += 1 - elif f >= '1' and f <= '8': - size = ord(f) - ord('0') - result |= (values.pop(0) & ((1 << size) - 1)) << shift - shift += size - else: - raise ValueError("Invalid format character '{}'".format(f)) - - return result diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/drop_table.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/drop_table.py deleted file mode 100644 index bfcdc71461a8..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/drop_table.py +++ /dev/null @@ -1,19 +0,0 @@ - -class DropTable: - def __init__(self): - self.common = None - self.uncommon = None - self.rare = None - self.mythic = None - - def encode(self): - return [(0 if i is None else i) for i in [self.common, self.uncommon, self.rare, self.mythic]] - -def decode(byte_list): - dt = DropTable() - dt.common = (None if byte_list[0] == 0 else byte_list[0]) - dt.uncommon = (None if byte_list[1] == 0 else byte_list[1]) - dt.rare = (None if byte_list[2] == 0 else byte_list[2]) - dt.mythic = (None if byte_list[3] == 0 else byte_list[3]) - - return dt diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/event_call.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/event_call.py deleted file mode 100644 index 76a7433f4a0e..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/event_call.py +++ /dev/null @@ -1,76 +0,0 @@ - - -class EventCallCondition: - def __init__(self, flag=None, value=None): - self.flag = flag - self.value = value - -class EventCallCase: - def __init__(self): - self.conditions = [] - self.event = None - - def add_condition(self, flag, value): - self.conditions.append( EventCallCondition(flag, value) ) - - def encode(self): - encoding = [] - for condition in self.conditions: - if condition.value: - encoding.append(0xFE) - encoding.append(condition.flag) - - encoding.append(0xFF) - encoding.append(self.event) - - return encoding - - -class EventCall: - def __init__(self): - self.cases = [] - self.parameters = [] - - def encode(self): - encoding = [] - for case in self.cases: - encoding.extend(case.encode()) - encoding.extend(self.parameters) - return encoding - - def contains_event(self, event): - for case in self.cases: - if case.event == event: - return True - - return False - - -def decode(byte_list): - if not byte_list: - return None - - call = EventCall() - data = list(byte_list) - - while 0xFF in data: - case = EventCallCase() - condition_bytes = data[:data.index(0xFF)] - data = data[len(condition_bytes) + 1:] - while condition_bytes: - b = condition_bytes.pop(0) - if b == 0xFE: - b = condition_bytes.pop(0) - case.add_condition(b, True) - else: - case.add_condition(b, False) - - if not data: - # missing last event code? - break - - case.event = data.pop(0) - call.cases.append(case) - - call.parameters.extend(data) - return call diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/formation.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/formation.py deleted file mode 100644 index 2f4f16c713e5..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/formation.py +++ /dev/null @@ -1,60 +0,0 @@ -from .bitutil import * - -REGULAR_MUSIC = 0 -BOSS_MUSIC = 1 -FIEND_MUSIC = 2 -CONTINUE_MUSIC = 3 - -class Formation: - def __init__(self): - self.gfx_bits = 0 - self.back_attack = False - self.boss_death = False - self.eggs = [False, False, False] - self.monster_types = [0,0,0] - self.calling = False - self.transforming = False - self.monster_qtys = [0,0,0] - self.arrangement = 0 - self.no_flee = False - self.no_gameover = False - self.music = False - self.character_battle = False - self.auto_battle = False - self.floating_enemies = False - self.transparent = False - self.cursor_graph_index = 0 - - def encode(self): - encoding = [ - pack_byte('3bbbbb', - self.gfx_bits, self.back_attack, - self.boss_death, self.eggs[2], self.eggs[1], self.eggs[0] - ), - self.monster_types[0], - self.monster_types[1], - self.monster_types[2], - pack_byte('bb222', - self.calling, self.transforming, - self.monster_qtys[2], - self.monster_qtys[1], - self.monster_qtys[0] - ), - self.arrangement, - pack_byte('bb2bbbb', - self.no_flee, self.no_gameover, self.music, - self.character_battle, self.auto_battle, self.floating_enemies, self.transparent - ), - self.cursor_graph_index - ] - return encoding - -def decode(byte_list): - f = Formation() - f.gfx_bits, f.back_attack, f.boss_death, f.eggs[2], f.eggs[1], f.eggs[0] = unpack_byte('3bbbbb', byte_list[0]) - f.monster_types = list(byte_list[1:4]) - f.calling, f.transforming, f.monster_qtys[2], f.monster_qtys[1], f.monster_qtys[0] = unpack_byte('bb222', byte_list[4]) - f.arrangement = byte_list[5] - f.no_flee, f.no_gameover, f.music, f.character_battle, f.auto_battle, f.floating_enemies, f.transparent = unpack_byte('bb2bbbb', byte_list[6]) - f.cursor_graph_index = byte_list[7] - return f diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/map_grid.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/map_grid.py deleted file mode 100644 index 333aed9e3fcc..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/map_grid.py +++ /dev/null @@ -1,47 +0,0 @@ - -# Map grids are indexed like so: map_grid[x][y] - -class MapGrid: - def __init__(self, linear_tiles): - self._grid = [] - for x in range(32): - column = [linear_tiles[i] for i in range(x, len(linear_tiles), 32)] - self._grid.append(column) - - def __getitem__(self, k): - return self._grid[k] - - def encode(self): - runs = [] - for y in range(32): - for x in range(32): - tile = self[x][y] - if not runs or runs[-1][0] != tile or runs[-1][1] == 0xFF: - runs.append([tile, 1]) - else: - runs[-1][1] += 1 - - byte_list = [] - for run in runs: - if run[1] > 1: - byte_list.append(run[0] | 0x80) - byte_list.append(run[1] - 1) - else: - byte_list.append(run[0]) - - return byte_list - -def decode(byte_list): - byte_list = list(byte_list) - linear_tiles = [] - while(byte_list): - b = byte_list.pop(0) - if b & 0x80: - length = byte_list.pop(0) + 1 - linear_tiles.extend([b & 0x7F] * length) - else: - linear_tiles.append(b) - - linear_tiles = linear_tiles[:0x400] - return MapGrid(linear_tiles) - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/map_info.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/map_info.py deleted file mode 100644 index 471ecf81a163..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/map_info.py +++ /dev/null @@ -1,66 +0,0 @@ -from .bitutil import * - -class MapInfo: - def __init__(self): - self.battle_background = 0 - self.can_warp = False - self.can_exit = False - self.battle_background_alt_palette = False - self.magnetic = False - self.grid = 0 - self.tileset = 0 - self.placements = 0 - self.border_tile = 0 - self.border_no_exit = False - self.palette = 0 - self.npc_palette_0 = 0 - self.npc_palette_1 = 0 - self.music = 0 - self.bg_grid = 0 - self.bg_translucent = False - self.bg_scroll_vertical = False - self.bg_scroll_horizontal = False - self.bit75 = False - self.bg_direction = 0 - self.bg_speed = 0 - self.underground_map_grid = False - self.bits81to86 = 0 - self.underground_npcs = False - self.name = 0 - self.treasure_index = 0 - - def encode(self): - return [ - pack_byte('4bbbb', self.battle_background, self.can_warp, self.can_exit, self.battle_background_alt_palette, self.magnetic), - self.grid, - self.tileset, - self.placements, - pack_byte('7b', self.border_tile, self.border_no_exit), - self.palette, - pack_byte('44', self.npc_palette_0, self.npc_palette_1), - self.music, - self.bg_grid, - pack_byte('bbbb22', self.bg_translucent, self.bg_scroll_vertical, self.bg_scroll_horizontal, self.bit75, self.bg_direction, self.bg_speed), - pack_byte('b6b', self.underground_map_grid, self.bits81to86, self.underground_npcs), - self.name, - self.treasure_index - ] - - -def decode(byte_list): - mi = MapInfo() - mi.battle_background, mi.can_warp, mi.can_exit, mi.battle_background_alt_palette, mi.magnetic = unpack_byte('4bbbb', byte_list[0]) - mi.grid = byte_list[1] - mi.tileset = byte_list[2] - mi.placements = byte_list[3] - mi.border_tile, mi.border_no_exit = unpack_byte('7b', byte_list[4]) - mi.palette = byte_list[5] - mi.npc_palette_0, mi.npc_palette_1 = unpack_byte('44', byte_list[6]) - mi.music = byte_list[7] - mi.bg_grid = byte_list[8] - mi.bg_translucent, mi.bg_scroll_vertical, mi.bg_scroll_horizontal, mi.bit75, mi.bg_direction, mi.bg_speed = unpack_byte('bbbb22', byte_list[9]) - mi.underground_map_grid, mi.bits81to86, mi.underground_npcs = unpack_byte('b6b', byte_list[10]) - mi.name = byte_list[11] - mi.treasure_index = byte_list[12] - return mi - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/monster.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/monster.py deleted file mode 100644 index 35183884dd6d..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/monster.py +++ /dev/null @@ -1,136 +0,0 @@ -from .bitutil import * - -def _set_to_bits(s): - val = 0 - for i in s: - val |= (1 << i) - return val - -def _bits_to_set(val): - s = set() - for i in range(16): - if val & (1 << i): - s.add(i) - return s - -class Monster: - def __init__(self): - self.boss = False - self.level = 0 - self.hp = 0 - self.attack_index = 0 - self.defense_index = 0 - self.magic_defense_index = 0 - self.speed_index = 0 - self.drop_index = 0 - self.drop_rate = 0 - self.attack_sequence = 0 - self.attack_elements = set() - self.attack_statuses = set() - self.resist_elements = set() - self.resist_statuses = set() - self.weak_elements = set() - self.spell_power = None - self.races = set() - self.reaction_sequence = None - self.bit72 = False - self.bit73 = False - - def encode(self): - encoded = [ - pack_byte('7b', self.level, self.boss), - self.hp & 0xff, - (self.hp >> 8) & 0xff, - self.attack_index, - self.defense_index, - self.magic_defense_index, - self.speed_index, - pack_byte('62', self.drop_index, self.drop_rate), - self.attack_sequence, - pack_byte('bbbbbbbb', - self.bit72, - self.bit73, - (self.reaction_sequence is not None), - bool(self.races), - (self.spell_power is not None), - bool(self.weak_elements), - bool(self.resist_elements) or bool(self.resist_statuses), - bool(self.attack_elements) or bool(self.attack_statuses) - ) - ] - - if self.attack_elements or self.attack_statuses: - if self.attack_elements: - encoded.append(_set_to_bits(self.attack_elements)) - else: - encoded.append(0) - - if self.attack_statuses: - val = _set_to_bits(self.attack_statuses) - encoded.append(val & 0xff) - encoded.append((val >> 8) & 0xff) - else: - encoded.extend([0,0]) - if self.resist_elements or self.resist_statuses: - if self.resist_elements: - encoded.append(_set_to_bits(self.resist_elements)) - else: - encoded.append(0) - - if self.resist_statuses: - val = _set_to_bits(self.resist_statuses) - encoded.append(val & 0xff) - encoded.append((val >> 8) & 0xff) - else: - encoded.extend([0,0]) - if self.weak_elements: - encoded.append(_set_to_bits(self.weak_elements)) - if self.spell_power is not None: - encoded.append(self.spell_power) - if self.races: - encoded.append(_set_to_bits(self.races)) - if self.reaction_sequence is not None: - encoded.append(self.reaction_sequence) - - return encoded - -def decode(byte_list): - m = Monster() - m.level, m.boss = unpack_byte('7b', byte_list[0]) - m.hp = byte_list[1] | (byte_list[2] << 8) - m.attack_index = byte_list[3] - m.defense_index = byte_list[4] - m.magic_defense_index = byte_list[5] - m.speed_index = byte_list[6] - m.drop_index, m.drop_rate = unpack_byte('62', byte_list[7]) - m.attack_sequence = byte_list[8] - - flags = list(unpack_byte('bbbbbbbb', byte_list[9])) - byte_list = byte_list[10:] - - if flags.pop(): - m.attack_elements = _bits_to_set(byte_list.pop(0)) - m.attack_statuses = _bits_to_set(byte_list[0] | (byte_list[1] << 8)) - byte_list = byte_list[2:] - - if flags.pop(): - m.resist_elements = _bits_to_set(byte_list.pop(0)) - m.resist_statuses = _bits_to_set(byte_list[0] | (byte_list[1] << 8)) - byte_list = byte_list[2:] - - if flags.pop(): - m.weak_elements = _bits_to_set(byte_list.pop(0)) - - if flags.pop(): - m.spell_power = byte_list.pop(0) - - if flags.pop(): - m.races = _bits_to_set(byte_list.pop(0)) - - if flags.pop(): - m.reaction_sequence = byte_list.pop(0) - - m.bit73 = flags.pop() - m.bit72 = flags.pop() - - return m diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/monster_gfx.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/monster_gfx.py deleted file mode 100644 index 7d272970c1e2..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/monster_gfx.py +++ /dev/null @@ -1,15 +0,0 @@ -class MonsterGfx: - def __init__(self): - self.size = 0 - self.palette = 0 - self.pointer = 0 - - def encode(self): - return [self.size, self.palette, self.pointer & 0xff, (self.pointer >> 8) & 0xff] - -def decode(byte_list): - mgfx = MonsterGfx() - mgfx.size = byte_list[0] - mgfx.palette = byte_list[1] - mgfx.pointer = byte_list[2] | (byte_list[3] << 8) - return mgfx diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/npc_placement.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/npc_placement.py deleted file mode 100644 index 5c85a5bdd70e..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/npc_placement.py +++ /dev/null @@ -1,56 +0,0 @@ -from .bitutil import * - -class NpcPlacement: - def __init__(self): - self.npc = 0 - self.x = 0 - self.y = 0 - self.walks = False - self.intangible = False - self.facing = 0 - self.palette = 0 - self.turns = False - self.marches = False - self.speed = 0 - - self.bit13 = False - self.bit14 = False - self.bit21 = False - self.bit22 = False - - def encode(self): - encoding = [0, 0, 0, 0] - - encoding[0] = self.npc - encoding[1] = pack_byte('5bbb', self.x, self.bit13, self.bit14, self.walks) - encoding[2] = pack_byte('5bbb', self.y, self.bit21, self.bit22, self.intangible) - encoding[3] = pack_byte('22bb2', self.facing, self.palette, self.turns, self.marches, self.speed) - - return encoding - -def decode(byte_list): - p = NpcPlacement() - - p.npc = byte_list[0] - p.x, p.bit13, p.bit14, p.walks = unpack_byte('5bbb', byte_list[1]) - p.y, p.bit21, p.bit22, p.intangible = unpack_byte('5bbb', byte_list[2]) - p.facing, p.palette, p.turns, p.marches, p.speed = unpack_byte('22bb2', byte_list[3]) - - return p - -def decode_set(byte_list): - results = [] - for i in range(0, 48, 4): - if i >= len(byte_list): - break - if byte_list[i] == 0: - break - results.append(decode(byte_list[i:i+4])) - return results - -def encode_set(placement_list): - byte_list = [] - for placement in placement_list: - byte_list.extend(placement.encode()) - byte_list.append(0x00) - return byte_list diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/spell.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/spell.py deleted file mode 100644 index 960b593db32a..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/spell.py +++ /dev/null @@ -1,30 +0,0 @@ -from .bitutil import * - -class Spell: - def __init__(self): - self.casting_time = 0 - self.target = 0 - self.param = 0 - self.hit = 0 - self.boss = False - self.effect = 0 - self.damage = False - self.element = 0 - self.impact = False - self.mp_cost = 0 - self.ignore_wall = False - #self.colors = 0 - #self.sprites = 0 - #self.visual1 = 0 - #self.visual2 = 0 - #self.sound = 0 - -def decode(byte_list): - s = Spell() - s.casting_time, s.target = unpack_byte('53', byte_list[0]) - s.param = byte_list[1] - s.hit, s.boss = unpack_byte('7b', byte_list[2]) - s.effect, s.damage = unpack_byte('7b', byte_list[3]) - s.element, s.impact = unpack_byte('7b', byte_list[4]) - s.mp_cost, s.ignore_wall = unpack_byte('7b', byte_list[5]) - return s diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/spell_set.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/spell_set.py deleted file mode 100644 index e99e4f16e7a9..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/spell_set.py +++ /dev/null @@ -1,41 +0,0 @@ -class SpellSet: - def __init__(self): - self.initial_spells = [] - self.learned_spells = {} - - def encode_initial(self): - return (self.initial_spells + [0xFF])[:24] - - def encode_learned(self): - keys = sorted(self.learned_spells) - byte_list = [] - for k in keys: - if type(self.learned_spells[k]) in (list, tuple): - for s in self.learned_spells[k]: - byte_list.append(k) - byte_list.append(s) - else: - byte_list.append(k) - byte_list.append(self.learned_spells[k]) - byte_list.append(0xFF) - return byte_list - -def decode(initial_byte_list, learned_byte_list): - if initial_byte_list[-1] == 0xFF: - initial_byte_list = initial_byte_list[:-1] - if learned_byte_list[-1] == 0xFF: - learned_byte_list = learned_byte_list[:-1] - - ss = SpellSet() - ss.initial_spells = list(initial_byte_list) - ss.learned_spells = {} - for i in range(0, len(learned_byte_list), 2): - lv, s = learned_byte_list[i:i+2] - if lv in ss.learned_spells: - if type(ss.learned_spells) in (list, tuple): - ss.learned_spells[lv].append(s) - else: - ss.learned_spells[lv] = [ss.learned_spells[lv], s] - else: - ss.learned_spells[lv] = s - return ss diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/text.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/text.py deleted file mode 100644 index 06dd26bc23ca..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/text.py +++ /dev/null @@ -1,468 +0,0 @@ -import re - -_RAW_CHAR_MAP = { - 0x01 : "\n", - #0x09 : "\n", - 0x0A : "`", - 0x42 : "A", - 0x43 : "B", - 0x44 : "C", - 0x45 : "D", - 0x46 : "E", - 0x47 : "F", - 0x48 : "G", - 0x49 : "H", - 0x4A : "I", - 0x4B : "J", - 0x4C : "K", - 0x4D : "L", - 0x4E : "M", - 0x4F : "N", - 0x50 : "O", - 0x51 : "P", - 0x52 : "Q", - 0x53 : "R", - 0x54 : "S", - 0x55 : "T", - 0x56 : "U", - 0x57 : "V", - 0x58 : "W", - 0x59 : "X", - 0x5A : "Y", - 0x5B : "Z", - 0x5C : "a", - 0x5D : "b", - 0x5E : "c", - 0x5F : "d", - 0x60 : "e", - 0x61 : "f", - 0x62 : "g", - 0x63 : "h", - 0x64 : "i", - 0x65 : "j", - 0x66 : "k", - 0x67 : "l", - 0x68 : "m", - 0x69 : "n", - 0x6A : "o", - 0x6B : "p", - 0x6C : "q", - 0x6D : "r", - 0x6E : "s", - 0x6F : "t", - 0x70 : "u", - 0x71 : "v", - 0x72 : "w", - 0x73 : "x", - 0x74 : "y", - 0x75 : "z", - 0x80 : "0", - 0x81 : "1", - 0x82 : "2", - 0x83 : "3", - 0x84 : "4", - 0x85 : "5", - 0x86 : "6", - 0x87 : "7", - 0x88 : "8", - 0x89 : "9", - 0x8A : "e ", - 0x8B : " t", - 0x8C : "th", - 0x8D : "he", - 0x8E : "t ", - 0x8F : "ou", - 0x90 : " a", - 0x91 : "s ", - 0x92 : "er", - 0x93 : "in", - 0x94 : "re", - 0x95 : "d ", - 0x96 : "an", - 0x97 : " o", - 0x98 : "on", - 0x99 : "st", - 0x9A : " w", - 0x9B : "o ", - 0x9C : " m", - 0x9D : "ha", - 0x9E : "to", - 0x9F : "is", - 0xA0 : "yo", - 0xA1 : " y", - 0xA2 : " i", - 0xA3 : "al", - 0xA4 : "ar", - 0xA5 : " h", - 0xA6 : "r ", - 0xA7 : " s", - 0xA8 : "at", - 0xA9 : "n ", - 0xAA : " c", - 0xAB : "ng", - 0xAC : "ve", - 0xAD : "ll", - 0xAE : "y ", - 0xAF : "nd", - 0xB0 : "en", - 0xB1 : "ed", - 0xB2 : "hi", - 0xB3 : "or", - 0xB4 : ", ", - 0xB5 : "I ", - 0xB6 : "u ", - 0xB7 : "me", - 0xB8 : "ta", - 0xB9 : " b", - 0xBA : " I", - 0xBB : "te", - 0xBC : "of", - 0xBD : "ea", - 0xBE : "ur", - 0xBF : "l ", - 0xC0 : "'", - 0xC1 : ".", - 0xC2 : "-", - 0xC3 : "_", - 0xC4 : "!", - 0xC5 : "?", - 0xC6 : "%", - 0xC7 : "/", - 0xC8 : ":", - 0xC9 : ",", - 0xCA : " f", - 0xCB : " d", - 0xCC : "ow", - 0xCD : "se", - 0xCE : " ", - 0xCF : "it", - 0xD0 : "et", - 0xD1 : "le", - 0xD2 : "f ", - 0xD3 : " g", - 0xD4 : "es", - 0xD5 : "ro", - 0xD6 : "ne", - 0xD7 : "ry", - 0xD8 : " l", - 0xD9 : "us", - 0xDA : "no", - 0xDB : "ut", - 0xDC : "ca", - 0xDD : "as", - 0xDE : "Th", - 0xDF : "ai", - 0xE0 : "ot", - 0xE1 : "be", - 0xE2 : "el", - 0xE3 : "om", - 0xE4 : "'s", - 0xE5 : "il", - 0xE6 : "de", - 0xE7 : "gh", - 0xE8 : "ay", - 0xE9 : "nt", - 0xEA : "Wh", - 0xEB : "Yo", - 0xEC : "wa", - 0xED : "oo", - 0xEE : "We", - 0xEF : "g ", - 0xF0 : "ge", - 0xF1 : " n", - 0xF2 : "ee", - 0xF3 : "wi", - 0xF4 : " M", - 0xF5 : "ke", - 0xF6 : "we", - 0xF7 : " p", - 0xF8 : "ig", - 0xF9 : "ys", - 0xFA : " B", - 0xFB : "am", - 0xFC : "ld", - 0xFD : " W", - 0xFE : "la", - 0xFF : " ", -} - -_SYMBOLS = { - 0x02 : "space", - 0x06 : "next", - 0x07 : "item", - 0x08 : "amount", - #0x0A : "dot", - 0x21 : "stone", - 0x22 : "frog", - 0x23 : "tiny", - 0x24 : "pig", - 0x25 : "mute", - 0x26 : "blind", - 0x27 : "poison", - 0x28 : "floating", - 0x29 : "claw", - 0x2A : "rod", - 0x2B : "staff", - 0x2C : "darksword", - 0x2D : "sword", - 0x2E : "lightsword", - 0x2F : "spear", - 0x30 : "knife", - 0x31 : "katana", - 0x32 : "shuriken", - 0x33 : "boomerang", - 0x34 : "axe", - 0x35 : "wrench", - 0x36 : "harp", - 0x37 : "bow", - 0x38 : "arrow", - 0x39 : "hammer", - 0x3A : "whip", - 0x3B : "shield", - 0x3C : "helmet", - 0x3D : "armor", - 0x3E : "gauntlet", - 0x3F : "blackmagic", - 0x40 : "whitemagic", - 0x41 : "callmagic", - 0x76 : "flatm", - 0x77 : "flath", - 0x78 : "flatp", - 0x79 : "tent", - 0x7A : "potion", - 0x7B : "shirt", - 0x7C : "ring", - 0x7D : "crystal", - 0x7E : "key", - 0x7F : "tail", -} - -_NAMES = { - 0x00 : "Cecil", - 0x01 : "Kain", - 0x02 : "Rydia", - 0x03 : "Tellah", - 0x04 : "Edward", - 0x05 : "Rosa", - 0x06 : "Yang", - 0x07 : "Palom", - 0x08 : "Porom", - 0x09 : "Cid", - 0x0A : "Edge", - 0x0B : "Fusoya", - 0x0C : "Golbez", - 0x0D : "Anna" -} - -_ENCODE_TREE = {} -_SYMBOL_CODE_LOOKUP = {} - -''' -Given a list of bytes representing a text string in the FF4 ROM, return a decoded -string containing the text contents. If the data contains multiple strings, then -return a list of decoded strings instead. -''' -def decode(byte_list, consts=None): - results = [] - chars = [] - - i = 0 - while i < len(byte_list): - b = byte_list[i] - if b == 0: - results.append(''.join(chars)) - chars = [] - elif b == 0x02: - i += 1 - param = byte_list[i] - #chars.append("[space {}]".format(param)) - chars.append("~" * param) - elif b == 0x03: - i += 1 - param = byte_list[i] - music_name = '${:02X}'.format(param) - if consts and consts.get_name(param, 'music'): - music_name = '#{}'.format(consts.get_name(param, 'music')) - chars.append("[music {}]".format(music_name)) - elif b == 0x04: - i += 1 - param = byte_list[i] - if param in _NAMES: - chars.append('[{}]'.format(_NAMES[param])) - else: - chars.append("[name ${:02X}]".format(param)) - elif b == 0x05: - i += 1 - param = byte_list[i] - chars.append("[pause {}]".format(param)) - elif b == 0x09: - chars.append("\n") - elif b in _RAW_CHAR_MAP: - chars.append(_RAW_CHAR_MAP[b]) - elif b in _SYMBOLS: - chars.append('[{}]'.format(_SYMBOLS[b])) - else: - chars.append('[${:02X}]'.format(b)) - - i += 1 - - if chars: - results.append(''.join(chars)) - - if len(results) > 1: - return results - else: - return results[0] - -''' -Given an ASCII string (with symbol annotations), return an FF4-encoded list of -bytes representing that string. If a list of input strings is provided, then instead -return the concatenation of all those encoded strings. -''' -def encode(text, optimize=False, allow_dual_char=True, fixed_length=None): - _build_encode_maps() - - encoding = [] - - if type(text) in [list, tuple]: - for t in text: - encoding.extend(encode(t, optimize=optimize, allow_dual_char=allow_dual_char, fixed_length=fixed_length)) - else: - for line in text.splitlines(True): - if allow_dual_char and line == "\n": - encoding.append(0x09) - else: - parts = re.split(r'(\[[A-Za-z0-9 \$]+\])', line) - for part in parts: - if part.startswith('[') and part.endswith(']'): - encoding.extend(_encode_symbol(part[1:-1])) - else: - encoding.extend(_encode_raw(part, optimize=optimize, allow_dual_char=allow_dual_char)) - - if fixed_length: - if len(encoding) < fixed_length: - encoding.extend([0xFF] * (fixed_length - len(encoding))) - else: - encoding.append(0x00) - - return encoding - -#----------------------------------------------------------------------------------------------------- - -def _translate_number(number_string): - if re.search(r'^\$[A-Fa-f0-9]+$', number_string): - return int(number_string[1:], 16) - elif re.search(r'^[0-9]+$', number_string): - return int(number_string) - else: - return None - -def _encode_symbol(symbol_name): - symbol_slug = symbol_name.lower().replace(' ', '') - - if symbol_slug in _SYMBOL_CODE_LOOKUP: - return _SYMBOL_CODE_LOOKUP[symbol_slug] - elif _translate_number(symbol_slug) is not None: - val = _translate_number(symbol_slug) - if val < 0 or val > 255: - raise ValueError("Cannot encode raw byte value {0} / ${0:02X} (out of range)".format(val)) - return [val] - else: - parameterized_symbols = { - 'space' : 0x02, - 'music' : 0x03, - 'name' : 0x04, - 'pause' : 0x05, - } - for s in parameterized_symbols: - if symbol_slug.startswith(s): - val = _translate_number(symbol_slug[len(s):]) - if val is None or val < 0 or val > 255: - raise ValueError("Cannot encode symbol parameter value '{}'".format(symbol_slug[len(s):])) - return [parameterized_symbols[s], val] - - raise ValueError("Cannot encode unrecognized symbol '{}'".format(symbol_name)) - - -def _encode_raw(text, optimize=False, allow_dual_char=True): - if text == '': - return [] - - m = re.search(r'^\~+', text) - if m: - return [0x02, len(m.group(0))] + _encode_raw(text[len(m.group(0)):], optimize=optimize, allow_dual_char=allow_dual_char) - - if text[0] not in _ENCODE_TREE: - raise ValueError("Cannot encode text character {} (ordinal {}) in snippet '{}'".format(text[0], ord(text[0]), text)) - - options = [] - snippets = _ENCODE_TREE[text[0]] - for snippet in snippets: - if not allow_dual_char and len(snippet[0]) > 1: - continue - - if text.startswith(snippet[0]): - code = snippet[1] - encoding = [code] + _encode_raw(text[len(snippet[0]):], optimize=optimize, allow_dual_char=allow_dual_char) - if optimize: - options.append(encoding) - else: - return encoding - - options.sort(key=len) - return options[0] - -def _build_encode_maps(): - if _ENCODE_TREE: - return - - for code in _RAW_CHAR_MAP: - t = _RAW_CHAR_MAP[code] - _ENCODE_TREE.setdefault(t[0], []).append( (t, code) ) - - for startchar in _ENCODE_TREE: - _ENCODE_TREE[startchar].sort(key=lambda p: (len(p[0]), p[1]), reverse=True) - - for code in _SYMBOLS: - _SYMBOL_CODE_LOOKUP[_SYMBOLS[code]] = [code] - - for code in _NAMES: - _SYMBOL_CODE_LOOKUP[_NAMES[code].lower()] = [0x04, code] - -#----------------------------------------------------------------------------------------------------- - -def _hex_string_to_byte_list(hex_string): - hex_string = re.sub(r'\s', '', hex_string) - result = [] - for i in range(0, len(hex_string), 2): - result.append(int(hex_string[i:i+2], 16)) - return result - -def _print_hex(byte_list): - print(' '.join('{:02X}'.format(x) for x in byte_list)) - -if __name__ == '__main__': - #print('\n'.join(decode(_hex_string_to_byte_list(''' - # 04 01 C8 55 8D A9 4A 9C D9 8E 6B E8 01 B9 5C 5E 66 9C AE E6 5D 6F 6E 8B 9B 5B 60 68 D9 C4 00 04 01 C8 04 05 C3 01 09 09 09 04 05 C8 04 0C 9A DD 90 67 6E 6A 01 FF 70 AF 92 8B 63 8A 5E 98 6F D5 67 C4 01 BA 6F C0 91 DA 8E 04 01 C0 91 61 5C 70 67 6F C4 01 09 04 01 C8 04 0C C3 8B ED C5 00 04 0A C8 5A BD 63 C4 FF EA 9B 5F 6A 01 A1 8F 8B 63 93 66 FF 94 5C 5E 6F 64 71 A8 B1 01 8B 63 8A 48 64 96 8E BC FA 5C 5D C2 E5 C5 00 04 01 C8 55 A3 66 D8 A8 92 C4 01 FD 8A 68 D9 8E 63 BE D7 C4 00 04 05 C8 49 BE 6D AE 70 6B C4 00 5A 8F 9A 94 6F 5E 63 C4 C4 00 04 0B C8 4A 6F E4 8B 63 8A 5E B3 8A BC 01 8B B2 91 48 64 96 6F C9 8B 63 8A 44 51 56 C4 01 04 0A C8 4E 96 C4 BA 6F C0 91 63 70 F0 C4 01 09 04 0B C8 58 8A 68 D9 8E E6 99 D5 74 8B 8D 01 FF 45 60 61 B0 6E 64 71 8A 54 74 99 60 68 CA 64 6D 99 C4 01 FF 50 8C 92 72 9F 8A A3 67 8B 63 8A 5F FB 5C F0 01 9A 64 AD B9 8A 94 6B DF 94 5F C4 00 04 01 C8 58 8A 5F 64 95 CF C4 01 04 0A C8 4A 8E 99 6A 6B 6B B1 C4 00 04 0C C8 5A 8F FF 6D 70 93 60 95 68 AE 6B 67 96 C4 01 FF 5A 8F A7 9D AD F7 5C AE 61 B3 8B 63 9F C4 01 09 09 04 0B C8 5A 8F C4 00 04 0C C8 48 60 8E 5C 72 E8 C4 01 04 0B C8 45 98 C0 8E 74 8F FF 94 A3 64 75 60 01 9A 63 9B 74 8F 90 94 C5 01 04 0C C8 54 9E 6B A2 6F C4 01 04 0B C8 58 5C 66 8A 70 6B C4 00 04 0C C8 EA AE 5F 64 95 B5 9D 71 8A A3 67 01 8B 9D 8E 9D 6F 94 5F C5 01 09 09 04 0B C8 44 E3 8A 6F 9B 74 8F 6D 01 A7 B0 CD 6E C4 01 FF 45 9B 74 8F FF 94 B7 68 5D 92 A1 8F 6D 01 CA 5C 8C 92 C0 91 69 5C B7 C5 01 04 0C C8 4E AE 61 5C 8C 92 C5 01 FF 49 64 91 69 FB 8A 9F C3 FF 4C 67 70 5A 5C C3 C5 01 09 09 04 00 C8 58 9D 6F C4 C5 01 09 09 09 04 05 C8 55 9D 8E B7 96 6E C3 01 04 0A C8 04 00 E4 C3 B9 D5 8C 92 C4 C5 01 09 09 04 00 C8 04 0C C3 FF 9F C3 9C 74 C3 01 09 09 09 04 0B C8 5A 8F 9A 92 8A 5E 98 6F D5 AD B1 01 B9 AE 5B 60 68 D9 C0 8B 60 D1 6B 5C 8C 74 C1 01 FF 5A 8F A6 5D 67 ED 95 68 5C 5F 8A 64 8E BD 6E 64 92 01 CA B3 A5 64 68 8B 9B D9 8A 74 8F C1 00 04 0A C8 4E 96 C4 01 09 09 09 04 00 C8 B5 9D 71 8A E1 B0 01 CA 64 E7 6F 93 62 9C 74 97 72 A9 5D D5 8C 92 C3 01 09 09 04 0C C8 5A 8F 90 6D 8A 68 AE 5D D5 8C 92 C5 01 09 09 09 04 00 C8 43 DB C3 A2 8E 5E 8F 67 95 9D AC 01 B9 60 B0 9C 60 C3 9A 63 6A 9A DD 01 AA 98 6F D5 AD 60 95 5D AE 5B 60 68 D9 C0 01 8B 60 D1 6B 5C 8C 74 C1 01 04 0C C8 43 70 8E 4A 8E EC 91 B7 C3 01 BA 8E B7 96 91 68 74 A7 8F 67 9A DD 01 FF 99 5C 93 60 95 F3 8C FF 60 71 64 BF 61 B3 01 FF 5B 60 68 D9 8B 9B 70 CD C3 C3 - # ''')))) - - import argparse - parser = argparse.ArgumentParser() - parser.add_argument('data', nargs="+") - parser.add_argument('-d', '--dual_char', action='store_true') - args = parser.parse_args() - input_data = ' '.join(args.data) - - if re.match(r'^\s*([A-Fa-f0-9]{2}\s*)*$', input_data): - # decode text from hex - byte_list = [] - input_data = re.sub(r'[^A-Fa-f0-9]', '', input_data) - for i in range(0, len(input_data), 2): - byte_list.append(int(input_data[i:i+2], 16)) - print(decode(byte_list)) - else: - # encode hex from text - data = encode(input_data, allow_dual_char=args.dual_char) - print(' '.join(["{:02X}".format(b) for b in data])) - - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/tileset.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/tileset.py deleted file mode 100644 index 15d1c3b36f17..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/tileset.py +++ /dev/null @@ -1,67 +0,0 @@ -from .bitutil import * - -NUM_TILES_PER_TILESET = 0x80 - -class Tile: - def __init__(self): - self.layer1 = False - self.layer2 = False - self.bridge_layer = False - self.save_point = False - self.closed_door = False - self.bit5 = False - self.bit6 = False - self.bit7 = False - self.damage = False - self.bit9 = False - self.walk_behind = False - self.bottom_half = False - self.warp = False - self.talkover = False - self.encounters = False - self.trigger = False - - def encode(self): - return [ - pack_byte('bbbbbbbb', - self.layer1, self.layer2, self.bridge_layer, self.save_point, - self.closed_door, self.bit5, self.bit6, self.bit7), - pack_byte('bbbbbbbb', - self.damage, self.bit9, self.walk_behind, self.bottom_half, - self.warp, self.talkover, self.encounters, self.trigger) - ] - - def decode(self, byte_list): - result = unpack_byte('bbbbbbbb', byte_list[0]) - self.layer1 = result[0] - self.layer2 = result[1] - self.bridge_layer = result[2] - self.save_point = result[3] - self.closed_door = result[4] - self.bit5 = result[5] - self.bit6 = result[6] - self.bit7 = result[7] - - result = unpack_byte('bbbbbbbb', byte_list[1]) - self.damage = result[0] - self.bit9 = result[1] - self.walk_behind = result[2] - self.bottom_half = result[3] - self.warp = result[4] - self.talkover = result[5] - self.encounters = result[6] - self.trigger = result[7] - -def decode(byte_list): - tileset = [] - for i in range(NUM_TILES_PER_TILESET): - tile = Tile() - tile.decode(byte_list[i * 2 : (i + 1) * 2]) - tileset.append(tile) - return tileset - -def encode_set(self, tileset): - encoding = [] - for tile in tileset: - encoding.extend(tile.encode()) - return encoding diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/trigger.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/trigger.py deleted file mode 100644 index e12e3d90587c..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/ff4struct/trigger.py +++ /dev/null @@ -1,86 +0,0 @@ -from .bitutil import * - -EVENT = 1 -TREASURE = 2 -TELEPORT = 3 - -class Trigger: - def __init__(self): - self.x = 0 - self.y = 0 - self.type = EVENT - self.event_call = 0 - self.formation = 0 - self.is_miab = False - self.item = None - self.gp = None - self.map = 0 - self.target_x = 0 - self.target_y = 0 - self.target_facing = 0 - - def encode(self): - encoding = [self.x, self.y, 0, 0, 0] - - if self.type == EVENT: - encoding[2] = 0xFF - encoding[3] = self.event_call - elif self.type == TREASURE: - encoding[2] = 0xFE - encoding[3] = pack_byte('6bb', self.formation, self.is_miab, (self.item is not None)) - if self.item is not None: - encoding[4] = self.item - elif self.gp < 1280: - encoding[4] = int(self.gp / 10) - else: - encoding[4] = 0x80 + int(self.gp / 1000) - elif self.type == TELEPORT: - encoding[2] = self.map - if self.target_facing is None: - encoding[3] = self.target_x - else: - encoding[3] = pack_byte('62', self.target_x, self.target_facing) - encoding[4] = self.target_y - - return encoding - -def decode(byte_list): - t = Trigger() - t.x = byte_list[0] - t.y = byte_list[1] - if byte_list[2] == 0xFF: - t.type = EVENT - t.event_call = byte_list[3] - elif byte_list[2] == 0xFE: - t.type = TREASURE - t.formation, t.is_miab, contains_item = unpack_byte('6bb', byte_list[3]) - if contains_item: - t.item = byte_list[4] - elif byte_list[4] >= 0x80: - t.gp = (byte_list[4] - 0x80) * 1000 - else: - t.gp = byte_list[4] * 10 - else: - t.type = TELEPORT - t.map = byte_list[2] - if t.map >= 251 and t.map <= 253: - t.target_x = byte_list[3] - t.target_facing = None - else: - t.target_x, t.target_facing = unpack_byte('62', byte_list[3]) - t.target_y = byte_list[4] - - return t - -def decode_set(byte_list): - results = [] - for i in range(0, len(byte_list), 5): - data = byte_list[i:i+5] - results.append(decode(data)) - return results - -def encode_set(trigger_set): - encoding = [] - for t in trigger_set: - encoding.extend(t.encode()) - return encoding diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar.lark b/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar.lark deleted file mode 100644 index cf683662cdab..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar.lark +++ /dev/null @@ -1,17 +0,0 @@ -// Next steps: -// - annotate value fields with the const type -// - update decompile script according to changed commands -// - write test suite that individually decompiles/recompiles events - -start : top_level_block* - -top_level_block : block_type block_parameters _block_start block_body _block_end - -block_type : identifier - -block_parameters : ["(" /[^)]*/ ")"] - -block_body : block_body_item* - -!block_body_item : /[^{}\n\t\f\r ]*/ - | block_start block_body_item* block_end diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_actor.lark b/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_actor.lark deleted file mode 100644 index 34594891e853..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_actor.lark +++ /dev/null @@ -1,17 +0,0 @@ -actor_block_params : actor_value - -actor_block_body : actor_block_item* - -actor_block_item : "name" number -> name - | "load" load_params -> load - | "save" "to" "slot" number -> save - | "discard" -> discard - | "commands" _block_start command_value* _block_end -> commands - | "right" "hand" item_value [number] -> right_hand - | "left" "hand" item_value [number] -> left_hand - | "head" item_value -> head - | "body" item_value -> body - | "arms" item_value -> arms - -load_params : "from" "slot" number -> slot - | "from" "stats" number -> stats diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_ai.lark b/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_ai.lark deleted file mode 100644 index 44e0b591c4c6..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_ai.lark +++ /dev/null @@ -1,65 +0,0 @@ -ai_script_block_parameters : number -> normal_script - | "moon" number -> moon_script - -ai_script_block_body : ai_script_item* - -?ai_script_item : chain_block - | ai_command - -chain_block : "chain" _block_start ai_command* _block_end - -!ai_command : "use" spell_value ["on" "group"] - | "use" "command" command_value - | "fight" - | "pass" - | "set" "race" races - | "set" "attack" "index" number - | "set" ["magic"] "defense" "index" number - | "speed" speed_delta - | "set" "resistance" elements - | "set" "spell" "power" number - | "set" "weakness" elements - | "set" "sprite" number - | "message" number ["next" "action"] - | "music" music_value - | "condition" condition_delta - | "set" "reaction" reaction_number - | "darken" number - | "debug" number - | "target" target - | "wait" - | "chain" "into" - -cast_on_group_specifier : "on" "group" - -command_or_spell_value : value - -next_action_message_specifier : "next" "action" - -!target : actor_value - | "self" - | "all" "monsters" - | "other" "monsters" - | "type" /[123]/ "monsters" - | ["random"] "front" "row" - | ["random"] "back" "row" - | "stunned" "monster" - | "sleeping" "monster" - | "charmed" "monster" - | "weak" "monster" - | "random" ["other"] "anything" - | "random" ["other"] "monster" - | "all" "characters" - | "dead" "monsters" - -!speed_delta : /[+-]/ number - -condition_delta : "+" "1" -> condition_increment - | number -> condition_set - -reaction_number : number - -races : race_value* -elements: element_value* - - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_common.lark b/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_common.lark deleted file mode 100644 index 56f1a767229d..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_common.lark +++ /dev/null @@ -1,65 +0,0 @@ -// ------------------------------------------------------------------------------------ -// reused data structures - -event_call_body : event_call_case* event_call_default_case [event_call_parameters] -event_call_case : "if" event_call_clause ("," event_call_clause)* ":" event_value -event_call_clause : flag_value -> event_call_true_clause - | "not" flag_value -> event_call_false_clause -event_call_default_case : ["else" ":"] event_value -event_call_parameters : number+ - -// ------------------------------------------------------------------------------------ -// const-resolved elements - -?actor_value : value -> value_actor -?flag_value : value -> value_flag -?item_value : value -> value_item -?map_value : value -> value_map -?music_value : value -> value_music -?npc_value : value -> value_npc -?sound_value : value -> value_sound -?spell_value : value -> value_spell -?spellset_value : value -> value_spellset -?status_value : value -> value_status -?vfx_value : value -> value_vfx -?command_value : value -> value_command -?race_value : value -> value_race -?element_value : value -> value_element -?eventext_value : value -> value_eventext -?sprite_value : value -> value_sprite - -// currently doesn't have a const family but may want to add one in the future -?event_value : number - -// ------------------------------------------------------------------------------------ -// common elements - -direction : "up" -> direction_up - | "right" -> direction_right - | "down" -> direction_down - | "left" -> direction_left - -?value : number - | "#" const_name -> const - -?number : hex_number - | decimal_number - -hex_number : /\$[A-Fa-f0-9]+/ -decimal_number : /[0-9]+/ - -?identifier : /[A-Za-z_][A-Za-z0-9_]*/ -?word : /[^\s\{\}\(\)]+/ -?const_name : /([A-Za-z_][A-Za-z0-9_]*\.)?[A-Za-z_][A-Za-z0-9_]*/ - -_block_start : "{" -_block_end : "}" -!block_start : "{" -!block_end : "}" - -rom_address : hex_number ["unheadered"] -> unheadered_rom_address - | hex_number "headered" -> headered_rom_address - | hex_number "bus" -> bus_address - -%import common.WS -%ignore WS diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_consts.lark b/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_consts.lark deleted file mode 100644 index 7fd787bfa305..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_consts.lark +++ /dev/null @@ -1,7 +0,0 @@ -// ------------------------------------------------------------------------------------ -// Consts - -const_block_params : identifier - -const_block_body : const_definition* -const_definition : number identifier diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_drop_table.lark b/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_drop_table.lark deleted file mode 100644 index aeaff3fa4da9..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_drop_table.lark +++ /dev/null @@ -1,14 +0,0 @@ -droptable_block_params : number - -droptable_block_body : droptable_entry* - -!droptable_entry : rarity droptable_item - -!rarity : "common" - | "uncommon" - | "rare" - | "mythic" - -?droptable_item : item_value - | "none" - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_event.lark b/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_event.lark deleted file mode 100644 index 46200d47b051..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_event.lark +++ /dev/null @@ -1,127 +0,0 @@ -// ------------------------------------------------------------------------------------ -// Event scripts - -event_block_parameters : value - -event_block_body : event_script_item* - -?event_script_item : placement_consts_block - | batch_block - | cancel_marker - | event_command - | extension_command - -placement_consts_block : "consts" "(" "placement" ")" _block_start placement_const_definition* _block_end - -placement_const_definition : number identifier - -batch_block : "batch" [number] _block_start event_command* _block_end - -cancel_marker : "cancel:" -> ev_cancel - -!event_command : placement_specifier placement_command -> evcmd_placement - | "player" player_command -> evcmd_player - | "toggle" "screen" "shake" - | "screen" "flash" - | "screen" "blur" - | "moon" "travel" - | "fat" "chocobo" - | "open" "door" - | "screen" "up" "down" - | "toggle" "run" - | "toggle" "music" "fade" - | "namingway" - | "toggle" "screen" "fade" - | "toggle" "status" statuses - | "inn" value - | "party" "leader" actor_value - | "give" "hp" number -> evcmd_give_hp - | "restore" "hp" -> evcmd_restore_hp - | "give" "mp" number -> evcmd_give_mp - | "restore" "mp" -> evcmd_restore_mp - | "give" "item" item_value - | "take" "item" item_value - | "give" "spell" spellset_value spell_value - | "clear" "status" ["except" statuses] -> evcmd_clear_status - | "give" "status" statuses - | "give" "gp" number - | "take" "gp" number - | "give" "actor" actor_value - | "take" "actor" actor_value - | "pause" number - | "fight" value - | "shop" value - | "event" "message" number - | "map" "message" number - | "message" value [message_bank3_specifier] -> evcmd_message - | "set" flag_value - | "clear" flag_value - | ["de"] "activate" npc_value -> evcmd_npc - | "select" "item" item_value - | "confirm" "message" value -> evcmd_confirm - | "toggle" "tint" number - | "music" music_value [music_fade_in_specifier] -> evcmd_music - | "sound" sound_value - | "vfx" vfx_value - | "load" "map" map_value "at" number number [facing_specifier] load_map_specifier* -> evcmd_load_map - -facing_specifier : "facing" direction - -?load_map_specifier : no_transition_specifier - | vehicle_specifier - | no_launch_specifier - -?placement_specifier : "placement" placement_value - | "p" placement_value - -?placement_value : number - | "#" const_name -> placement_const - -!placement_command : "move" direction - | "face" direction - | "toggle" "visible" - | "jump" "sideways" - | "spin" - | "spin" "jump" - | "wave" "in" - | "wave" "out" - | "bow" "head" - | "lie" "down" - -!player_command : "move" direction - | "face" direction - | "invisible" - | "visible" - | "wave" "in" - | "wave" "out" - | "bow" "head" - | "lie" "down" - | "toggle" "turning" - | "toggle" "spinning" - -statuses : status_value* - -message_bank3_specifier : "from" "bank" "3" - -music_fade_in_specifier : "fade" "in" - -no_transition_specifier : "no" "transition" - -!vehicle_specifier : "on" "chocobo" - | "on" "black" "chocobo" - | "on" "hovercraft" - | "on" "enterprise" - | "on" "falcon" - | "on" "big" "whale" - | "on" "ship" ["2"] - | "on" value - -no_launch_specifier : "no" "launch" - -extension_command : "[" extension_command_bytes "]" [extension_command_block] -extension_command_bytes : eventext_value+ -extension_command_block : _block_start extension_command_block_item* _block_end - -?extension_command_block_item : event_command - | extension_command - | batch_block diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_event_call.lark b/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_event_call.lark deleted file mode 100644 index 0afef7de05af..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_event_call.lark +++ /dev/null @@ -1,13 +0,0 @@ - -eventcall_block_params : number - -eventcall_block_body : [case* default_case [messages]] - -case : "if" condition ("," condition)* ":" value - -condition : flag_value - | "not" flag_value -> not_condition - -default_case: ["else" ":"] value - -messages: "messages" ":" value+ diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_formation.lark b/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_formation.lark deleted file mode 100644 index 5daaf1076484..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_formation.lark +++ /dev/null @@ -1,34 +0,0 @@ -formation_block_params : number - -formation_block_body : formation_block_item* - -formation_block_item : not? "back" "attack" -> back_attack - | not? "boss" "death" -> boss_death - | "eggs" yesno yesno yesno -> eggs - | "monsters" _block_start [monster [monster [monster]]] _block_end -> monsters - | not? "calling" -> calling - | not? "transforming" -> transforming - | "arrangement" value -> arrangement - | "can" "run" -> can_run - | "can't" "run" -> cant_run - | "can" "gameover" -> can_gameover - | "no" "gameover" -> no_gameover - | music_type "music" -> music - | not? "character" "battle" -> character_battle - | not? "auto" "battle" -> auto_battle - | not? "floating" "enemies" -> floating_enemies - | not? "transparent" -> transparent - | "cursor" "graph" value -> cursor_graph_index - | "gfx" "bits" value -> gfx_bits - -monster : value "x" number - -yesno : "yes" -> yes - | "no" -> no - -!not : "not" - -!music_type : "regular" - | "boss" - | "fiend" - | "continue" diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_gfx.lark b/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_gfx.lark deleted file mode 100644 index 81e62770689d..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_gfx.lark +++ /dev/null @@ -1,12 +0,0 @@ -chr_block_params : rom_address [format_specifier] - -!format_specifier : "1bit" - | "2bit" - | "3bit" - | "4bit" - -pal_block_params : rom_address - -pal_block_body : rgb+ - -rgb : value value value diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_hints.lark b/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_hints.lark deleted file mode 100644 index 1fb482a6ba2c..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_hints.lark +++ /dev/null @@ -1,5 +0,0 @@ -start : hints_block* - -hints_block : "hints" _block_start hint* _block_end - -hint : "event" number "map" map_value -> event_map_hint diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_map.lark b/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_map.lark deleted file mode 100644 index e98c9e4a3342..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_map.lark +++ /dev/null @@ -1,48 +0,0 @@ -mapgrid_block_params : map_value [position] - -position : number number - -mapgrid_block_body : mapgrid_element* - -mapgrid_element : /[0-9A-Fa-f]{2}/ -> tile - | "\\" -> eol - - -map_block_params : map_value - -map_block_body : map_info_element* - -map_info_element : "battle" "background" number -> battle_background - | "battle" "background" number "alternate" -> battle_background_alternate - | "warp" enabled_disabled -> warp - | "exit" enabled_disabled -> exit - | "magnetic" enabled_disabled -> magnetic - | "grid" number -> grid - | "tileset" number -> tileset - | "placement" "group" number -> placements - | "border" "tile" number -> border_tile - | "palette" number -> palette - | "npc" "palettes" number number -> npc_palettes - | "music" music_value -> music - | "background" [grid_specifier] [translucent_specifier] [scroll_specifier] [direction_specifier] [speed_specifier] -> background - | "name" "index" number -> name - | "underground" "npcs" -> underground_npcs - | "underground" "map" "grid" -> underground_map_grid - -enabled_disabled : "enabled" -> enabled - | "disabled" -> disabled - -grid_specifier : "grid" number -> bg_grid - -translucent_specifier : "translucent" -> bg_translucent - | "opaque" -> bg_opaque - -scroll_specifier : "scroll" "both" -> bg_scroll_both - | "scroll" "horizontal" -> bg_scroll_horizontal - | "scroll" "vertical" -> bg_scroll_vertical - | "scroll" "none" -> bg_scroll_none - -direction_specifier : "direction" direction -> bg_direction - -speed_specifier : "speed" number -> bg_speed - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_monster.lark b/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_monster.lark deleted file mode 100644 index 594b6b6e0aa3..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_monster.lark +++ /dev/null @@ -1,37 +0,0 @@ -monster_block_params : value - -monster_block_body : monster_block_item* - -monster_block_item : not? "boss" -> boss - | "level" number -> level - | "hp" number -> hp - | "gp" number -> gp - | "xp" number -> xp - | stat "index" number -> stat_index - | "drop" "index" number -> drop_index - | "drop" "rate" number -> drop_rate - | "attack" "sequence" number -> attack_sequence - | attack_resist_weak "element" element_value* ["none"] -> element - | attack_resist "status" status_value* ["none"] -> status - | "spell" "power" number -> spell_power - | "spell" "power" "none" -> spell_power - | "race" race_value* ["none"] -> race - | "reaction" "sequence" number -> reaction_sequence - | "reaction" "sequence" "none" -> reaction_sequence - | "gfx" _block_start gfx_item* _block_end - -!not : "not" - -!stat : "attack" - | "defense" - | "magic" "defense" - | "speed" - -?attack_resist_weak : attack_resist - | "weak" -> weak -?attack_resist : "attack" -> attack - | "resist" -> resist - -gfx_item : "size" value -> gfx_size - | "palette" value -> gfx_palette - | "pointer" value -> gfx_pointer diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_myselfpatch.lark b/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_myselfpatch.lark deleted file mode 100644 index 7d43c804a512..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_myselfpatch.lark +++ /dev/null @@ -1,99 +0,0 @@ -start : item* - -?item : label - | directive - | command - | direct_patch - | raw_data - | string - -label : identifier ":" - | "%" identifier ":" -> local_label - -directive : /\.[a-z]+/ [[identifier] hex_number] - -command : operation [address] - -?operation : /[a-z]{3}/ - -address : "#" immediate_value -> immediate - | addr -> addr - | addr "," "x" -> addr_x - | addr "," "y" -> addr_y - | "(" addr ")" -> addr_indirect - | "(" addr "," "x" ")" -> addr_x_indirect - | "(" addr ")" "," "y" -> addr_indirect_y - | "[" addr "]" -> addr_indirect_long - | "[" addr "]" "," "y" -> addr_indirect_long_y - | hex_byte "," "s" -> sr - | "(" hex_byte "," "s" ")" "," "y" -> sr_indirect_y - | byte "," byte -> block - | "$+" identifier -> branch_label - | "$-" identifier -> branch_label - | "a" -> a - -?byte : hex_byte - | address_expression - -?addr : hex_address - | address_expression - -?immediate_value : number - | address_expression - -address_expression : address_part expression_value - -?address_part : "$." -> address_part_byte - | "$_" -> address_part_short - | "$=" -> address_part_long - | "$`" -> address_part_high - | "$^" -> address_part_bank - -?expression_value : identifier - | "(" expr_p0 ")" - -!expr_p0 : expr_p0 "|" expr_p1 -> expr_binary_op - | expr_p1 -> expr_passthrough -!expr_p1 : expr_p1 "^" expr_p2 -> expr_binary_op - | expr_p2 -> expr_passthrough -!expr_p2 : expr_p2 "&" expr_p3 -> expr_binary_op - | expr_p3 -> expr_passthrough -!expr_p3 : expr_p3 "<<" expr_p4 -> expr_binary_op - | expr_p3 ">>" expr_p4 -> expr_binary_op - | expr_p4 -> expr_passthrough -!expr_p4 : expr_p4 "+" expr_p5 -> expr_binary_op - | expr_p4 "-" expr_p5 -> expr_binary_op - | expr_p5 -> expr_passthrough -!expr_p5 : expr_p5 "*" expr_p6 -> expr_binary_op - | expr_p5 "/" expr_p6 -> expr_binary_op - | expr_p5 "%" expr_p6 -> expr_binary_op - | expr_p6 -> expr_passthrough -expr_p6 : "(" expr_p0 ")" -> expr_passthrough - | number -> expr_passthrough - | identifier -> expr_identifier - -direct_patch : "[" direct_patch_address "]" "=" number - -raw_data : "[[" raw_data_item* "]]" - -?raw_data_item : /[0-9A-Fa-f]{2}/ -> raw_byte - | address_expression - -?number : decimal_number - | hex_number - -decimal_number : /[0-9]+/ - -hex_number : /(\$|0x)[0-9A-Fa-f]+/ - -direct_patch_address : /[0-9A-Fa-f]{6}/ - -hex_address : /\$(0x)?[0-9A-Fa-f]{1,6}/ -hex_byte : /\$(0x)?[0-9A-Fa-f]{2}/ - -?identifier : /[A-Z][A-Za-z0-9_]*/ - -string : /"[^"]*"/ - -%import common.WS -%ignore WS diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_npc.lark b/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_npc.lark deleted file mode 100644 index 221bf1a83670..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_npc.lark +++ /dev/null @@ -1,10 +0,0 @@ -npc_block_params : npc_value - -npc_block_body : npc_block_item* - -npc_block_item : "sprite" sprite_value -> sprite - | "default" "active" -> active - | "default" "inactive" -> inactive - | "eventcall" _block_start block_content* _block_end -> eventcall - -?block_content : /[^{}\n\t\f\r ]*/ diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_patch.lark b/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_patch.lark deleted file mode 100644 index 9cdd06da7806..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_patch.lark +++ /dev/null @@ -1,8 +0,0 @@ -// ------------------------------------------------------------------------------------ -// Raw data patches - -patch_parameters : rom_address - -direct_patch_body : patch_byte* - -?patch_byte : /[A-Fa-f0-9]{2}/ diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_placement.lark b/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_placement.lark deleted file mode 100644 index a4d10b1b5a6c..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_placement.lark +++ /dev/null @@ -1,20 +0,0 @@ -placement_block_parameters : number number - -placement_block_body : placement_block_item* - -placement_block_item : "npc" npc_value -> npc - | "position" number number -> position - | "walking" on_off -> walking - | "tangible" -> tangible - | "intangible" -> intangible - | "face" direction -> face - | "palette" number -> palette - | "turning" on_off -> turning - | "marching" on_off -> marching - | "speed" number -> speed - | "delete" -> delete - -?on_off : "on" -> on - | "true" -> on - | "off" -> off - | "false" -> off diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_shop.lark b/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_shop.lark deleted file mode 100644 index 726636c191d5..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_shop.lark +++ /dev/null @@ -1,3 +0,0 @@ -shop_block_params : number - -shop_block_body : item_value* diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_spell_set.lark b/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_spell_set.lark deleted file mode 100644 index 076a98b39154..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_spell_set.lark +++ /dev/null @@ -1,9 +0,0 @@ -spellset_block_params : spellset_value - -spellset_block_body : initial_block? learned_block? - -initial_block : "initial" _block_start spell_value* _block_end - -learned_block : "learned" _block_start learned_pair* _block_end - -learned_pair : value spell_value diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_text.lark b/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_text.lark deleted file mode 100644 index 84d40d8e01ab..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_text.lark +++ /dev/null @@ -1,12 +0,0 @@ -text_block_parameters : "bank" number "message" value -> bank_text - | "map" map_value "message" value -> map_text - | "battle" "message" number -> battle_text - | "alert" "message" number -> alert_text - | "monster" "name" number -> monster_name_text - | "command" "name" command_value -> command_name_text - | "map" "name" number -> map_name_text - | "item" "name" item_value -> item_name_text - | "spell" "name" spell_value -> spell_name_text - | "status" number -> status_text - | "credits" -> credits_text - | rom_address -> custom_text diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_trigger.lark b/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_trigger.lark deleted file mode 100644 index 9cfc0a142117..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/grammar_trigger.lark +++ /dev/null @@ -1,13 +0,0 @@ -trigger_block_parameters : map_value number - -trigger_block_body : [position] [function] - | "delete" -> delete - -position : "position" number number - -function : "treasure" content ["fight" number] -> treasure - | "teleport" map_value "at" number number ["facing" direction] -> teleport - | "event" "call" number -> event_call - -content : item_value -> item - | decimal_number "gp" -> gp diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/hints.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/hints.py deleted file mode 100644 index 6f8d2419ea14..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/hints.py +++ /dev/null @@ -1,28 +0,0 @@ -import re - -from . import compile_common -from . import lark - -_hints = {} - -class HintsProcessor(lark.Transformer): - def event_map_hint(self, pair): - event_id,map_id = pair - _hints.setdefault('event map', {})[event_id] = map_id - return None - -def load_file(filename): - with open(filename, 'r') as infile: - lines = infile.read().split('\n') - - lines = [re.sub(r'//.*$', '', l) for l in lines] - tree = compile_common.parse('\n'.join(lines), 'hints', 'start') - - HintsProcessor().transform(tree) - -def get_event_map(event_id): - try: - return _hints['event map'][event_id] - except KeyError: - return None - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/LICENSE b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/LICENSE deleted file mode 100644 index aaf210b1d01d..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/LICENSE +++ /dev/null @@ -1,18 +0,0 @@ -Copyright © 2017 Erez Shinan - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/__init__.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/__init__.py deleted file mode 100644 index 272506b78b8c..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .tree import Tree, Transformer, InlineTransformer -from .common import ParseError, GrammarError -from .lark import Lark -from .utils import inline_args - -__version__ = "0.3.1" diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/_constants.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/_constants.py deleted file mode 100644 index 9339b268a8ec..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/_constants.py +++ /dev/null @@ -1,168 +0,0 @@ -# -# Secret Labs' Regular Expression Engine -# -# various symbols used by the regular expression engine. -# run this script to update the _sre include files! -# -# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. -# -# See the __init__.py file for information on usage and redistribution. -# - -"""Internal support module for sre""" - -# update when constants are added or removed - -MAGIC = 20221023 - -from _sre import MAXREPEAT, MAXGROUPS - -# SRE standard exception (access as sre.error) -# should this really be here? - -class error(Exception): - """Exception raised for invalid regular expressions. - - Attributes: - - msg: The unformatted error message - pattern: The regular expression pattern - pos: The index in the pattern where compilation failed (may be None) - lineno: The line corresponding to pos (may be None) - colno: The column corresponding to pos (may be None) - """ - - __module__ = 're' - - def __init__(self, msg, pattern=None, pos=None): - self.msg = msg - self.pattern = pattern - self.pos = pos - if pattern is not None and pos is not None: - msg = '%s at position %d' % (msg, pos) - if isinstance(pattern, str): - newline = '\n' - else: - newline = b'\n' - self.lineno = pattern.count(newline, 0, pos) + 1 - self.colno = pos - pattern.rfind(newline, 0, pos) - if newline in pattern: - msg = '%s (line %d, column %d)' % (msg, self.lineno, self.colno) - else: - self.lineno = self.colno = None - super().__init__(msg) - - -class _NamedIntConstant(int): - def __new__(cls, value, name): - self = super(_NamedIntConstant, cls).__new__(cls, value) - self.name = name - return self - - def __repr__(self): - return self.name - - __reduce__ = None - -MAXREPEAT = _NamedIntConstant(MAXREPEAT, 'MAXREPEAT') - -def _makecodes(*names): - items = [_NamedIntConstant(i, name) for i, name in enumerate(names)] - globals().update({item.name: item for item in items}) - return items - -# operators -OPCODES = _makecodes( - # failure=0 success=1 (just because it looks better that way :-) - 'FAILURE', 'SUCCESS', - - 'ANY', 'ANY_ALL', - 'ASSERT', 'ASSERT_NOT', - 'AT', - 'BRANCH', - 'CATEGORY', - 'CHARSET', 'BIGCHARSET', - 'GROUPREF', 'GROUPREF_EXISTS', - 'IN', - 'INFO', - 'JUMP', - 'LITERAL', - 'MARK', - 'MAX_UNTIL', - 'MIN_UNTIL', - 'NOT_LITERAL', - 'NEGATE', - 'RANGE', - 'REPEAT', - 'REPEAT_ONE', - 'SUBPATTERN', - 'MIN_REPEAT_ONE', - 'ATOMIC_GROUP', - 'POSSESSIVE_REPEAT', - 'POSSESSIVE_REPEAT_ONE', - - 'GROUPREF_IGNORE', - 'IN_IGNORE', - 'LITERAL_IGNORE', - 'NOT_LITERAL_IGNORE', - - 'GROUPREF_LOC_IGNORE', - 'IN_LOC_IGNORE', - 'LITERAL_LOC_IGNORE', - 'NOT_LITERAL_LOC_IGNORE', - - 'GROUPREF_UNI_IGNORE', - 'IN_UNI_IGNORE', - 'LITERAL_UNI_IGNORE', - 'NOT_LITERAL_UNI_IGNORE', - 'RANGE_UNI_IGNORE', - - # The following opcodes are only occurred in the parser output, - # but not in the compiled code. - 'MIN_REPEAT', 'MAX_REPEAT', -) -del OPCODES[-2:] # remove MIN_REPEAT and MAX_REPEAT - -# positions -ATCODES = _makecodes( - 'AT_BEGINNING', 'AT_BEGINNING_LINE', 'AT_BEGINNING_STRING', - 'AT_BOUNDARY', 'AT_NON_BOUNDARY', - 'AT_END', 'AT_END_LINE', 'AT_END_STRING', - - 'AT_LOC_BOUNDARY', 'AT_LOC_NON_BOUNDARY', - - 'AT_UNI_BOUNDARY', 'AT_UNI_NON_BOUNDARY', -) - -# categories -CHCODES = _makecodes( - 'CATEGORY_DIGIT', 'CATEGORY_NOT_DIGIT', - 'CATEGORY_SPACE', 'CATEGORY_NOT_SPACE', - 'CATEGORY_WORD', 'CATEGORY_NOT_WORD', - 'CATEGORY_LINEBREAK', 'CATEGORY_NOT_LINEBREAK', - - 'CATEGORY_LOC_WORD', 'CATEGORY_LOC_NOT_WORD', - - 'CATEGORY_UNI_DIGIT', 'CATEGORY_UNI_NOT_DIGIT', - 'CATEGORY_UNI_SPACE', 'CATEGORY_UNI_NOT_SPACE', - 'CATEGORY_UNI_WORD', 'CATEGORY_UNI_NOT_WORD', - 'CATEGORY_UNI_LINEBREAK', 'CATEGORY_UNI_NOT_LINEBREAK', -) - - - -# flags -SRE_FLAG_TEMPLATE = 1 # template mode (unknown purpose, deprecated) -SRE_FLAG_IGNORECASE = 2 # case insensitive -SRE_FLAG_LOCALE = 4 # honour system locale -SRE_FLAG_MULTILINE = 8 # treat target as multiline string -SRE_FLAG_DOTALL = 16 # treat target as a single string -SRE_FLAG_UNICODE = 32 # use unicode "locale" -SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments -SRE_FLAG_DEBUG = 128 # debugging -SRE_FLAG_ASCII = 256 # use ascii "locale" - -# flags for INFO primitive -SRE_INFO_PREFIX = 1 # has prefix -SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix) -SRE_INFO_CHARSET = 4 # pattern starts with character from given set diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/_parser.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/_parser.py deleted file mode 100644 index 4a492b79e84e..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/_parser.py +++ /dev/null @@ -1,1080 +0,0 @@ -# -# Secret Labs' Regular Expression Engine -# -# convert re-style regular expression to sre pattern -# -# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. -# -# See the __init__.py file for information on usage and redistribution. -# - -"""Internal support module for sre""" - -# XXX: show string offset and offending character for all errors - -from ._constants import * - -SPECIAL_CHARS = ".\\[{()*+?^$|" -REPEAT_CHARS = "*+?{" - -DIGITS = frozenset("0123456789") - -OCTDIGITS = frozenset("01234567") -HEXDIGITS = frozenset("0123456789abcdefABCDEF") -ASCIILETTERS = frozenset("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") - -WHITESPACE = frozenset(" \t\n\r\v\f") - -_REPEATCODES = frozenset({MIN_REPEAT, MAX_REPEAT, POSSESSIVE_REPEAT}) -_UNITCODES = frozenset({ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY}) - -ESCAPES = { - r"\a": (LITERAL, ord("\a")), - r"\b": (LITERAL, ord("\b")), - r"\f": (LITERAL, ord("\f")), - r"\n": (LITERAL, ord("\n")), - r"\r": (LITERAL, ord("\r")), - r"\t": (LITERAL, ord("\t")), - r"\v": (LITERAL, ord("\v")), - r"\\": (LITERAL, ord("\\")) -} - -CATEGORIES = { - r"\A": (AT, AT_BEGINNING_STRING), # start of string - r"\b": (AT, AT_BOUNDARY), - r"\B": (AT, AT_NON_BOUNDARY), - r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]), - r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]), - r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]), - r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]), - r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]), - r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]), - r"\Z": (AT, AT_END_STRING), # end of string -} - -FLAGS = { - # standard flags - "i": SRE_FLAG_IGNORECASE, - "L": SRE_FLAG_LOCALE, - "m": SRE_FLAG_MULTILINE, - "s": SRE_FLAG_DOTALL, - "x": SRE_FLAG_VERBOSE, - # extensions - "a": SRE_FLAG_ASCII, - "t": SRE_FLAG_TEMPLATE, - "u": SRE_FLAG_UNICODE, -} - -TYPE_FLAGS = SRE_FLAG_ASCII | SRE_FLAG_LOCALE | SRE_FLAG_UNICODE -GLOBAL_FLAGS = SRE_FLAG_DEBUG | SRE_FLAG_TEMPLATE - -# Maximal value returned by SubPattern.getwidth(). -# Must be larger than MAXREPEAT, MAXCODE and sys.maxsize. -MAXWIDTH = 1 << 64 - -class State: - # keeps track of state for parsing - def __init__(self): - self.flags = 0 - self.groupdict = {} - self.groupwidths = [None] # group 0 - self.lookbehindgroups = None - self.grouprefpos = {} - @property - def groups(self): - return len(self.groupwidths) - def opengroup(self, name=None): - gid = self.groups - self.groupwidths.append(None) - if self.groups > MAXGROUPS: - raise error("too many groups") - if name is not None: - ogid = self.groupdict.get(name, None) - if ogid is not None: - raise error("redefinition of group name %r as group %d; " - "was group %d" % (name, gid, ogid)) - self.groupdict[name] = gid - return gid - def closegroup(self, gid, p): - self.groupwidths[gid] = p.getwidth() - def checkgroup(self, gid): - return gid < self.groups and self.groupwidths[gid] is not None - - def checklookbehindgroup(self, gid, source): - if self.lookbehindgroups is not None: - if not self.checkgroup(gid): - raise source.error('cannot refer to an open group') - if gid >= self.lookbehindgroups: - raise source.error('cannot refer to group defined in the same ' - 'lookbehind subpattern') - -class SubPattern: - # a subpattern, in intermediate form - def __init__(self, state, data=None): - self.state = state - if data is None: - data = [] - self.data = data - self.width = None - - def dump(self, level=0): - seqtypes = (tuple, list) - for op, av in self.data: - print(level*" " + str(op), end='') - if op is IN: - # member sublanguage - print() - for op, a in av: - print((level+1)*" " + str(op), a) - elif op is BRANCH: - print() - for i, a in enumerate(av[1]): - if i: - print(level*" " + "OR") - a.dump(level+1) - elif op is GROUPREF_EXISTS: - condgroup, item_yes, item_no = av - print('', condgroup) - item_yes.dump(level+1) - if item_no: - print(level*" " + "ELSE") - item_no.dump(level+1) - elif isinstance(av, SubPattern): - print() - av.dump(level+1) - elif isinstance(av, seqtypes): - nl = False - for a in av: - if isinstance(a, SubPattern): - if not nl: - print() - a.dump(level+1) - nl = True - else: - if not nl: - print(' ', end='') - print(a, end='') - nl = False - if not nl: - print() - else: - print('', av) - def __repr__(self): - return repr(self.data) - def __len__(self): - return len(self.data) - def __delitem__(self, index): - del self.data[index] - def __getitem__(self, index): - if isinstance(index, slice): - return SubPattern(self.state, self.data[index]) - return self.data[index] - def __setitem__(self, index, code): - self.data[index] = code - def insert(self, index, code): - self.data.insert(index, code) - def append(self, code): - self.data.append(code) - def getwidth(self): - # determine the width (min, max) for this subpattern - if self.width is not None: - return self.width - lo = hi = 0 - for op, av in self.data: - if op is BRANCH: - i = MAXWIDTH - j = 0 - for av in av[1]: - l, h = av.getwidth() - i = min(i, l) - j = max(j, h) - lo = lo + i - hi = hi + j - elif op is ATOMIC_GROUP: - i, j = av.getwidth() - lo = lo + i - hi = hi + j - elif op is SUBPATTERN: - i, j = av[-1].getwidth() - lo = lo + i - hi = hi + j - elif op in _REPEATCODES: - i, j = av[2].getwidth() - lo = lo + i * av[0] - if av[1] == MAXREPEAT and j: - hi = MAXWIDTH - else: - hi = hi + j * av[1] - elif op in _UNITCODES: - lo = lo + 1 - hi = hi + 1 - elif op is GROUPREF: - i, j = self.state.groupwidths[av] - lo = lo + i - hi = hi + j - elif op is GROUPREF_EXISTS: - i, j = av[1].getwidth() - if av[2] is not None: - l, h = av[2].getwidth() - i = min(i, l) - j = max(j, h) - else: - i = 0 - lo = lo + i - hi = hi + j - elif op is SUCCESS: - break - self.width = min(lo, MAXWIDTH), min(hi, MAXWIDTH) - return self.width - -class Tokenizer: - def __init__(self, string): - self.istext = isinstance(string, str) - self.string = string - if not self.istext: - string = str(string, 'latin1') - self.decoded_string = string - self.index = 0 - self.next = None - self.__next() - def __next(self): - index = self.index - try: - char = self.decoded_string[index] - except IndexError: - self.next = None - return - if char == "\\": - index += 1 - try: - char += self.decoded_string[index] - except IndexError: - raise error("bad escape (end of pattern)", - self.string, len(self.string) - 1) from None - self.index = index + 1 - self.next = char - def match(self, char): - if char == self.next: - self.__next() - return True - return False - def get(self): - this = self.next - self.__next() - return this - def getwhile(self, n, charset): - result = '' - for _ in range(n): - c = self.next - if c not in charset: - break - result += c - self.__next() - return result - def getuntil(self, terminator, name): - result = '' - while True: - c = self.next - self.__next() - if c is None: - if not result: - raise self.error("missing " + name) - raise self.error("missing %s, unterminated name" % terminator, - len(result)) - if c == terminator: - if not result: - raise self.error("missing " + name, 1) - break - result += c - return result - @property - def pos(self): - return self.index - len(self.next or '') - def tell(self): - return self.index - len(self.next or '') - def seek(self, index): - self.index = index - self.__next() - - def error(self, msg, offset=0): - if not self.istext: - msg = msg.encode('ascii', 'backslashreplace').decode('ascii') - return error(msg, self.string, self.tell() - offset) - - def checkgroupname(self, name, offset): - if not (self.istext or name.isascii()): - msg = "bad character in group name %a" % name - raise self.error(msg, len(name) + offset) - if not name.isidentifier(): - msg = "bad character in group name %r" % name - raise self.error(msg, len(name) + offset) - -def _class_escape(source, escape): - # handle escape code inside character class - code = ESCAPES.get(escape) - if code: - return code - code = CATEGORIES.get(escape) - if code and code[0] is IN: - return code - try: - c = escape[1:2] - if c == "x": - # hexadecimal escape (exactly two digits) - escape += source.getwhile(2, HEXDIGITS) - if len(escape) != 4: - raise source.error("incomplete escape %s" % escape, len(escape)) - return LITERAL, int(escape[2:], 16) - elif c == "u" and source.istext: - # unicode escape (exactly four digits) - escape += source.getwhile(4, HEXDIGITS) - if len(escape) != 6: - raise source.error("incomplete escape %s" % escape, len(escape)) - return LITERAL, int(escape[2:], 16) - elif c == "U" and source.istext: - # unicode escape (exactly eight digits) - escape += source.getwhile(8, HEXDIGITS) - if len(escape) != 10: - raise source.error("incomplete escape %s" % escape, len(escape)) - c = int(escape[2:], 16) - chr(c) # raise ValueError for invalid code - return LITERAL, c - elif c == "N" and source.istext: - import unicodedata - # named unicode escape e.g. \N{EM DASH} - if not source.match('{'): - raise source.error("missing {") - charname = source.getuntil('}', 'character name') - try: - c = ord(unicodedata.lookup(charname)) - except (KeyError, TypeError): - raise source.error("undefined character name %r" % charname, - len(charname) + len(r'\N{}')) from None - return LITERAL, c - elif c in OCTDIGITS: - # octal escape (up to three digits) - escape += source.getwhile(2, OCTDIGITS) - c = int(escape[1:], 8) - if c > 0o377: - raise source.error('octal escape value %s outside of ' - 'range 0-0o377' % escape, len(escape)) - return LITERAL, c - elif c in DIGITS: - raise ValueError - if len(escape) == 2: - if c in ASCIILETTERS: - raise source.error('bad escape %s' % escape, len(escape)) - return LITERAL, ord(escape[1]) - except ValueError: - pass - raise source.error("bad escape %s" % escape, len(escape)) - -def _escape(source, escape, state): - # handle escape code in expression - code = CATEGORIES.get(escape) - if code: - return code - code = ESCAPES.get(escape) - if code: - return code - try: - c = escape[1:2] - if c == "x": - # hexadecimal escape - escape += source.getwhile(2, HEXDIGITS) - if len(escape) != 4: - raise source.error("incomplete escape %s" % escape, len(escape)) - return LITERAL, int(escape[2:], 16) - elif c == "u" and source.istext: - # unicode escape (exactly four digits) - escape += source.getwhile(4, HEXDIGITS) - if len(escape) != 6: - raise source.error("incomplete escape %s" % escape, len(escape)) - return LITERAL, int(escape[2:], 16) - elif c == "U" and source.istext: - # unicode escape (exactly eight digits) - escape += source.getwhile(8, HEXDIGITS) - if len(escape) != 10: - raise source.error("incomplete escape %s" % escape, len(escape)) - c = int(escape[2:], 16) - chr(c) # raise ValueError for invalid code - return LITERAL, c - elif c == "N" and source.istext: - import unicodedata - # named unicode escape e.g. \N{EM DASH} - if not source.match('{'): - raise source.error("missing {") - charname = source.getuntil('}', 'character name') - try: - c = ord(unicodedata.lookup(charname)) - except (KeyError, TypeError): - raise source.error("undefined character name %r" % charname, - len(charname) + len(r'\N{}')) from None - return LITERAL, c - elif c == "0": - # octal escape - escape += source.getwhile(2, OCTDIGITS) - return LITERAL, int(escape[1:], 8) - elif c in DIGITS: - # octal escape *or* decimal group reference (sigh) - if source.next in DIGITS: - escape += source.get() - if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and - source.next in OCTDIGITS): - # got three octal digits; this is an octal escape - escape += source.get() - c = int(escape[1:], 8) - if c > 0o377: - raise source.error('octal escape value %s outside of ' - 'range 0-0o377' % escape, - len(escape)) - return LITERAL, c - # not an octal escape, so this is a group reference - group = int(escape[1:]) - if group < state.groups: - if not state.checkgroup(group): - raise source.error("cannot refer to an open group", - len(escape)) - state.checklookbehindgroup(group, source) - return GROUPREF, group - raise source.error("invalid group reference %d" % group, len(escape) - 1) - if len(escape) == 2: - if c in ASCIILETTERS: - raise source.error("bad escape %s" % escape, len(escape)) - return LITERAL, ord(escape[1]) - except ValueError: - pass - raise source.error("bad escape %s" % escape, len(escape)) - -def _uniq(items): - return list(dict.fromkeys(items)) - -def _parse_sub(source, state, verbose, nested): - # parse an alternation: a|b|c - - items = [] - itemsappend = items.append - sourcematch = source.match - start = source.tell() - while True: - itemsappend(_parse(source, state, verbose, nested + 1, - not nested and not items)) - if not sourcematch("|"): - break - if not nested: - verbose = state.flags & SRE_FLAG_VERBOSE - - if len(items) == 1: - return items[0] - - subpattern = SubPattern(state) - - # check if all items share a common prefix - while True: - prefix = None - for item in items: - if not item: - break - if prefix is None: - prefix = item[0] - elif item[0] != prefix: - break - else: - # all subitems start with a common "prefix". - # move it out of the branch - for item in items: - del item[0] - subpattern.append(prefix) - continue # check next one - break - - # check if the branch can be replaced by a character set - set = [] - for item in items: - if len(item) != 1: - break - op, av = item[0] - if op is LITERAL: - set.append((op, av)) - elif op is IN and av[0][0] is not NEGATE: - set.extend(av) - else: - break - else: - # we can store this as a character set instead of a - # branch (the compiler may optimize this even more) - subpattern.append((IN, _uniq(set))) - return subpattern - - subpattern.append((BRANCH, (None, items))) - return subpattern - -def _parse(source, state, verbose, nested, first=False): - # parse a simple pattern - subpattern = SubPattern(state) - - # precompute constants into local variables - subpatternappend = subpattern.append - sourceget = source.get - sourcematch = source.match - _len = len - _ord = ord - - while True: - - this = source.next - if this is None: - break # end of pattern - if this in "|)": - break # end of subpattern - sourceget() - - if verbose: - # skip whitespace and comments - if this in WHITESPACE: - continue - if this == "#": - while True: - this = sourceget() - if this is None or this == "\n": - break - continue - - if this[0] == "\\": - code = _escape(source, this, state) - subpatternappend(code) - - elif this not in SPECIAL_CHARS: - subpatternappend((LITERAL, _ord(this))) - - elif this == "[": - here = source.tell() - 1 - # character set - set = [] - setappend = set.append -## if sourcematch(":"): -## pass # handle character classes - if source.next == '[': - import warnings - warnings.warn( - 'Possible nested set at position %d' % source.tell(), - FutureWarning, stacklevel=nested + 6 - ) - negate = sourcematch("^") - # check remaining characters - while True: - this = sourceget() - if this is None: - raise source.error("unterminated character set", - source.tell() - here) - if this == "]" and set: - break - elif this[0] == "\\": - code1 = _class_escape(source, this) - else: - if set and this in '-&~|' and source.next == this: - import warnings - warnings.warn( - 'Possible set %s at position %d' % ( - 'difference' if this == '-' else - 'intersection' if this == '&' else - 'symmetric difference' if this == '~' else - 'union', - source.tell() - 1), - FutureWarning, stacklevel=nested + 6 - ) - code1 = LITERAL, _ord(this) - if sourcematch("-"): - # potential range - that = sourceget() - if that is None: - raise source.error("unterminated character set", - source.tell() - here) - if that == "]": - if code1[0] is IN: - code1 = code1[1][0] - setappend(code1) - setappend((LITERAL, _ord("-"))) - break - if that[0] == "\\": - code2 = _class_escape(source, that) - else: - if that == '-': - import warnings - warnings.warn( - 'Possible set difference at position %d' % ( - source.tell() - 2), - FutureWarning, stacklevel=nested + 6 - ) - code2 = LITERAL, _ord(that) - if code1[0] != LITERAL or code2[0] != LITERAL: - msg = "bad character range %s-%s" % (this, that) - raise source.error(msg, len(this) + 1 + len(that)) - lo = code1[1] - hi = code2[1] - if hi < lo: - msg = "bad character range %s-%s" % (this, that) - raise source.error(msg, len(this) + 1 + len(that)) - setappend((RANGE, (lo, hi))) - else: - if code1[0] is IN: - code1 = code1[1][0] - setappend(code1) - - set = _uniq(set) - # XXX: should move set optimization to compiler! - if _len(set) == 1 and set[0][0] is LITERAL: - # optimization - if negate: - subpatternappend((NOT_LITERAL, set[0][1])) - else: - subpatternappend(set[0]) - else: - if negate: - set.insert(0, (NEGATE, None)) - # charmap optimization can't be added here because - # global flags still are not known - subpatternappend((IN, set)) - - elif this in REPEAT_CHARS: - # repeat previous item - here = source.tell() - if this == "?": - min, max = 0, 1 - elif this == "*": - min, max = 0, MAXREPEAT - - elif this == "+": - min, max = 1, MAXREPEAT - elif this == "{": - if source.next == "}": - subpatternappend((LITERAL, _ord(this))) - continue - - min, max = 0, MAXREPEAT - lo = hi = "" - while source.next in DIGITS: - lo += sourceget() - if sourcematch(","): - while source.next in DIGITS: - hi += sourceget() - else: - hi = lo - if not sourcematch("}"): - subpatternappend((LITERAL, _ord(this))) - source.seek(here) - continue - - if lo: - min = int(lo) - if min >= MAXREPEAT: - raise OverflowError("the repetition number is too large") - if hi: - max = int(hi) - if max >= MAXREPEAT: - raise OverflowError("the repetition number is too large") - if max < min: - raise source.error("min repeat greater than max repeat", - source.tell() - here) - else: - raise AssertionError("unsupported quantifier %r" % (char,)) - # figure out which item to repeat - if subpattern: - item = subpattern[-1:] - else: - item = None - if not item or item[0][0] is AT: - raise source.error("nothing to repeat", - source.tell() - here + len(this)) - if item[0][0] in _REPEATCODES: - raise source.error("multiple repeat", - source.tell() - here + len(this)) - if item[0][0] is SUBPATTERN: - group, add_flags, del_flags, p = item[0][1] - if group is None and not add_flags and not del_flags: - item = p - if sourcematch("?"): - # Non-Greedy Match - subpattern[-1] = (MIN_REPEAT, (min, max, item)) - elif sourcematch("+"): - # Possessive Match (Always Greedy) - subpattern[-1] = (POSSESSIVE_REPEAT, (min, max, item)) - else: - # Greedy Match - subpattern[-1] = (MAX_REPEAT, (min, max, item)) - - elif this == ".": - subpatternappend((ANY, None)) - - elif this == "(": - start = source.tell() - 1 - capture = True - atomic = False - name = None - add_flags = 0 - del_flags = 0 - if sourcematch("?"): - # options - char = sourceget() - if char is None: - raise source.error("unexpected end of pattern") - if char == "P": - # python extensions - if sourcematch("<"): - # named group: skip forward to end of name - name = source.getuntil(">", "group name") - source.checkgroupname(name, 1) - elif sourcematch("="): - # named backreference - name = source.getuntil(")", "group name") - source.checkgroupname(name, 1) - gid = state.groupdict.get(name) - if gid is None: - msg = "unknown group name %r" % name - raise source.error(msg, len(name) + 1) - if not state.checkgroup(gid): - raise source.error("cannot refer to an open group", - len(name) + 1) - state.checklookbehindgroup(gid, source) - subpatternappend((GROUPREF, gid)) - continue - - else: - char = sourceget() - if char is None: - raise source.error("unexpected end of pattern") - raise source.error("unknown extension ?P" + char, - len(char) + 2) - elif char == ":": - # non-capturing group - capture = False - elif char == "#": - # comment - while True: - if source.next is None: - raise source.error("missing ), unterminated comment", - source.tell() - start) - if sourceget() == ")": - break - continue - - elif char in "=!<": - # lookahead assertions - dir = 1 - if char == "<": - char = sourceget() - if char is None: - raise source.error("unexpected end of pattern") - if char not in "=!": - raise source.error("unknown extension ?<" + char, - len(char) + 2) - dir = -1 # lookbehind - lookbehindgroups = state.lookbehindgroups - if lookbehindgroups is None: - state.lookbehindgroups = state.groups - p = _parse_sub(source, state, verbose, nested + 1) - if dir < 0: - if lookbehindgroups is None: - state.lookbehindgroups = None - if not sourcematch(")"): - raise source.error("missing ), unterminated subpattern", - source.tell() - start) - if char == "=": - subpatternappend((ASSERT, (dir, p))) - else: - subpatternappend((ASSERT_NOT, (dir, p))) - continue - - elif char == "(": - # conditional backreference group - condname = source.getuntil(")", "group name") - if not (condname.isdecimal() and condname.isascii()): - source.checkgroupname(condname, 1) - condgroup = state.groupdict.get(condname) - if condgroup is None: - msg = "unknown group name %r" % condname - raise source.error(msg, len(condname) + 1) - else: - condgroup = int(condname) - if not condgroup: - raise source.error("bad group number", - len(condname) + 1) - if condgroup >= MAXGROUPS: - msg = "invalid group reference %d" % condgroup - raise source.error(msg, len(condname) + 1) - if condgroup not in state.grouprefpos: - state.grouprefpos[condgroup] = ( - source.tell() - len(condname) - 1 - ) - if not (condname.isdecimal() and condname.isascii()): - import warnings - warnings.warn( - "bad character in group name %s at position %d" % - (repr(condname) if source.istext else ascii(condname), - source.tell() - len(condname) - 1), - DeprecationWarning, stacklevel=nested + 6 - ) - state.checklookbehindgroup(condgroup, source) - item_yes = _parse(source, state, verbose, nested + 1) - if source.match("|"): - item_no = _parse(source, state, verbose, nested + 1) - if source.next == "|": - raise source.error("conditional backref with more than two branches") - else: - item_no = None - if not source.match(")"): - raise source.error("missing ), unterminated subpattern", - source.tell() - start) - subpatternappend((GROUPREF_EXISTS, (condgroup, item_yes, item_no))) - continue - - elif char == ">": - # non-capturing, atomic group - capture = False - atomic = True - elif char in FLAGS or char == "-": - # flags - flags = _parse_flags(source, state, char) - if flags is None: # global flags - if not first or subpattern: - raise source.error('global flags not at the start ' - 'of the expression', - source.tell() - start) - verbose = state.flags & SRE_FLAG_VERBOSE - continue - - add_flags, del_flags = flags - capture = False - else: - raise source.error("unknown extension ?" + char, - len(char) + 1) - - # parse group contents - if capture: - try: - group = state.opengroup(name) - except error as err: - raise source.error(err.msg, len(name) + 1) from None - else: - group = None - sub_verbose = ((verbose or (add_flags & SRE_FLAG_VERBOSE)) and - not (del_flags & SRE_FLAG_VERBOSE)) - p = _parse_sub(source, state, sub_verbose, nested + 1) - if not source.match(")"): - raise source.error("missing ), unterminated subpattern", - source.tell() - start) - if group is not None: - state.closegroup(group, p) - if atomic: - assert group is None - subpatternappend((ATOMIC_GROUP, p)) - else: - subpatternappend((SUBPATTERN, (group, add_flags, del_flags, p))) - - elif this == "^": - subpatternappend((AT, AT_BEGINNING)) - - elif this == "$": - subpatternappend((AT, AT_END)) - - else: - raise AssertionError("unsupported special character %r" % (char,)) - - # unpack non-capturing groups - for i in range(len(subpattern))[::-1]: - op, av = subpattern[i] - if op is SUBPATTERN: - group, add_flags, del_flags, p = av - if group is None and not add_flags and not del_flags: - subpattern[i: i+1] = p - - return subpattern - -def _parse_flags(source, state, char): - sourceget = source.get - add_flags = 0 - del_flags = 0 - if char != "-": - while True: - flag = FLAGS[char] - if source.istext: - if char == 'L': - msg = "bad inline flags: cannot use 'L' flag with a str pattern" - raise source.error(msg) - else: - if char == 'u': - msg = "bad inline flags: cannot use 'u' flag with a bytes pattern" - raise source.error(msg) - add_flags |= flag - if (flag & TYPE_FLAGS) and (add_flags & TYPE_FLAGS) != flag: - msg = "bad inline flags: flags 'a', 'u' and 'L' are incompatible" - raise source.error(msg) - char = sourceget() - if char is None: - raise source.error("missing -, : or )") - if char in ")-:": - break - if char not in FLAGS: - msg = "unknown flag" if char.isalpha() else "missing -, : or )" - raise source.error(msg, len(char)) - if char == ")": - state.flags |= add_flags - return None - if add_flags & GLOBAL_FLAGS: - raise source.error("bad inline flags: cannot turn on global flag", 1) - if char == "-": - char = sourceget() - if char is None: - raise source.error("missing flag") - if char not in FLAGS: - msg = "unknown flag" if char.isalpha() else "missing flag" - raise source.error(msg, len(char)) - while True: - flag = FLAGS[char] - if flag & TYPE_FLAGS: - msg = "bad inline flags: cannot turn off flags 'a', 'u' and 'L'" - raise source.error(msg) - del_flags |= flag - char = sourceget() - if char is None: - raise source.error("missing :") - if char == ":": - break - if char not in FLAGS: - msg = "unknown flag" if char.isalpha() else "missing :" - raise source.error(msg, len(char)) - assert char == ":" - if del_flags & GLOBAL_FLAGS: - raise source.error("bad inline flags: cannot turn off global flag", 1) - if add_flags & del_flags: - raise source.error("bad inline flags: flag turned on and off", 1) - return add_flags, del_flags - -def fix_flags(src, flags): - # Check and fix flags according to the type of pattern (str or bytes) - if isinstance(src, str): - if flags & SRE_FLAG_LOCALE: - raise ValueError("cannot use LOCALE flag with a str pattern") - if not flags & SRE_FLAG_ASCII: - flags |= SRE_FLAG_UNICODE - elif flags & SRE_FLAG_UNICODE: - raise ValueError("ASCII and UNICODE flags are incompatible") - else: - if flags & SRE_FLAG_UNICODE: - raise ValueError("cannot use UNICODE flag with a bytes pattern") - if flags & SRE_FLAG_LOCALE and flags & SRE_FLAG_ASCII: - raise ValueError("ASCII and LOCALE flags are incompatible") - return flags - -def parse(str, flags=0, state=None): - # parse 're' pattern into list of (opcode, argument) tuples - - source = Tokenizer(str) - - if state is None: - state = State() - state.flags = flags - state.str = str - - p = _parse_sub(source, state, flags & SRE_FLAG_VERBOSE, 0) - p.state.flags = fix_flags(str, p.state.flags) - - if source.next is not None: - assert source.next == ")" - raise source.error("unbalanced parenthesis") - - for g in p.state.grouprefpos: - if g >= p.state.groups: - msg = "invalid group reference %d" % g - raise error(msg, str, p.state.grouprefpos[g]) - - if flags & SRE_FLAG_DEBUG: - p.dump() - - return p - -def parse_template(source, pattern): - # parse 're' replacement string into list of literals and - # group references - s = Tokenizer(source) - sget = s.get - result = [] - literal = [] - lappend = literal.append - def addliteral(): - if s.istext: - result.append(''.join(literal)) - else: - # The tokenizer implicitly decodes bytes objects as latin-1, we must - # therefore re-encode the final representation. - result.append(''.join(literal).encode('latin-1')) - del literal[:] - def addgroup(index, pos): - if index > pattern.groups: - raise s.error("invalid group reference %d" % index, pos) - addliteral() - result.append(index) - groupindex = pattern.groupindex - while True: - this = sget() - if this is None: - break # end of replacement string - if this[0] == "\\": - # group - c = this[1] - if c == "g": - if not s.match("<"): - raise s.error("missing <") - name = s.getuntil(">", "group name") - if not (name.isdecimal() and name.isascii()): - s.checkgroupname(name, 1) - try: - index = groupindex[name] - except KeyError: - raise IndexError("unknown group name %r" % name) from None - else: - index = int(name) - if index >= MAXGROUPS: - raise s.error("invalid group reference %d" % index, - len(name) + 1) - if not (name.isdecimal() and name.isascii()): - import warnings - warnings.warn( - "bad character in group name %s at position %d" % - (repr(name) if s.istext else ascii(name), - s.tell() - len(name) - 1), - DeprecationWarning, stacklevel=5 - ) - addgroup(index, len(name) + 1) - elif c == "0": - if s.next in OCTDIGITS: - this += sget() - if s.next in OCTDIGITS: - this += sget() - lappend(chr(int(this[1:], 8) & 0xff)) - elif c in DIGITS: - isoctal = False - if s.next in DIGITS: - this += sget() - if (c in OCTDIGITS and this[2] in OCTDIGITS and - s.next in OCTDIGITS): - this += sget() - isoctal = True - c = int(this[1:], 8) - if c > 0o377: - raise s.error('octal escape value %s outside of ' - 'range 0-0o377' % this, len(this)) - lappend(chr(c)) - if not isoctal: - addgroup(int(this[1:]), len(this) - 1) - else: - try: - this = chr(ESCAPES[this][1]) - except KeyError: - if c in ASCIILETTERS: - raise s.error('bad escape %s' % this, len(this)) from None - lappend(this) - else: - lappend(this) - addliteral() - return result diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/common.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/common.py deleted file mode 100644 index 1eebe3ddc570..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/common.py +++ /dev/null @@ -1,123 +0,0 @@ -import re -from . import sre_parse - -class GrammarError(Exception): - pass - -class ParseError(Exception): - pass - - -class UnexpectedToken(ParseError): - def __init__(self, token, expected, seq, index): - self.token = token - self.expected = expected - self.line = getattr(token, 'line', '?') - self.column = getattr(token, 'column', '?') - - try: - context = ' '.join(['%r(%s)' % (t.value, t.type) for t in seq[index:index+5]]) - except AttributeError: - context = seq[index:index+5] - except TypeError: - context = "" - message = ("Unexpected token %r at line %s, column %s.\n" - "Expected: %s\n" - "Context: %s" % (token, self.line, self.column, expected, context)) - - super(UnexpectedToken, self).__init__(message) - - - -def is_terminal(sym): - return isinstance(sym, Terminal) or sym.isupper() or sym[0] == '$' - - -class LexerConf: - def __init__(self, tokens, ignore=(), postlex=None): - self.tokens = tokens - self.ignore = ignore - self.postlex = postlex - -class ParserConf: - def __init__(self, rules, callback, start): - assert all(len(r) == 4 for r in rules) - self.rules = rules - self.callback = callback - self.start = start - - - -class Pattern(object): - def __init__(self, value, flags=None): - self.value = value - self.flags = flags - - def __repr__(self): - return repr(self._get_flags() + self.value) - - # Pattern Hashing assumes all subclasses have a different priority! - def __hash__(self): - return hash((type(self), self.value)) - def __eq__(self, other): - return type(self) == type(other) and self.value == other.value - - def _get_flags(self): - if self.flags: - assert len(self.flags) == 1 - return '(?%s)' % self.flags - return '' - -class PatternStr(Pattern): - def to_regexp(self): - return self._get_flags() + re.escape(self.value) - - @property - def min_width(self): - return len(self.value) - max_width = min_width - -class PatternRE(Pattern): - def to_regexp(self): - return self._get_flags() + self.value - - @property - def min_width(self): - return sre_parse.parse(self.to_regexp()).getwidth()[0] - @property - def max_width(self): - return sre_parse.parse(self.to_regexp()).getwidth()[1] - -class TokenDef(object): - def __init__(self, name, pattern): - assert isinstance(pattern, Pattern), pattern - self.name = name - self.pattern = pattern - - def __repr__(self): - return '%s(%r, %r)' % (type(self).__name__, self.name, self.pattern) - - -class Terminal: - def __init__(self, data): - self.data = data - - def __repr__(self): - return '%r' % self.data - - def __eq__(self, other): - return isinstance(other, type(self)) and self.data == other.data - def __hash__(self): - return hash(self.data) - - -class Terminal_Regexp(Terminal): - def __init__(self, name, regexp): - Terminal.__init__(self, regexp) - self.name = name - self.match = re.compile(regexp).match - -class Terminal_Token(Terminal): - def match(self, other): - return self.data == other.type - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/grammars/common.g b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/grammars/common.g deleted file mode 100644 index a54d49dd10f3..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/grammars/common.g +++ /dev/null @@ -1,47 +0,0 @@ -// -// Numbers -// - -DIGIT: "0".."9" -HEXDIGIT: "a".."f"|"A".."F"|DIGIT - -INT: DIGIT+ -SIGNED_INT: ["+"|"-"] INT -DECIMAL: INT "." INT? | "." INT - -// float = /-?\d+(\.\d+)?([eE][+-]?\d+)?/ -_EXP: ("e"|"E") SIGNED_INT -FLOAT: INT _EXP | DECIMAL _EXP? - -NUMBER: FLOAT | INT -SIGNED_NUMBER: ["+"|"-"] NUMBER - -// -// Strings -// -STRING_INNER: ("\\\""|/[^"]/) -ESCAPED_STRING: "\"" STRING_INNER* "\"" - - -// -// Names (Variables) -// -LCASE_LETTER: "a".."z" -UCASE_LETTER: "A".."Z" - -LETTER: UCASE_LETTER | LCASE_LETTER -WORD: LETTER+ - -CNAME: ("_"|LETTER) ("_"|LETTER|DIGIT)* - - -// -// Whitespace -// -WS_INLINE: (" "|/\t/)+ -WS: /[ \t\f\r\n]/+ - -CR : /\r/ -LF : /\n/ -NEWLINE: (CR? LF)+ - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/indenter.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/indenter.py deleted file mode 100644 index a5f107d6203b..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/indenter.py +++ /dev/null @@ -1,52 +0,0 @@ -"Provides Indentation services for languages with indentation similar to Python" - -from .lexer import Token - -class Indenter: - def __init__(self): - self.paren_level = 0 - self.indent_level = [0] - - def handle_NL(self, token): - if self.paren_level > 0: - return - - yield token - - indent_str = token.rsplit('\n', 1)[1] # Tabs and spaces - indent = indent_str.count(' ') + indent_str.count('\t') * self.tab_len - - if indent > self.indent_level[-1]: - self.indent_level.append(indent) - yield Token.new_borrow_pos(self.INDENT_type, indent_str, token) - else: - while indent < self.indent_level[-1]: - self.indent_level.pop() - yield Token.new_borrow_pos(self.DEDENT_type, indent_str, token) - - assert indent == self.indent_level[-1], '%s != %s' % (indent, self.indent_level[-1]) - - def process(self, stream): - for token in stream: - if token.type == self.NL_type: - for t in self.handle_NL(token): - yield t - else: - yield token - - if token.type in self.OPEN_PAREN_types: - self.paren_level += 1 - elif token.type in self.CLOSE_PAREN_types: - self.paren_level -= 1 - assert self.paren_level >= 0 - - while len(self.indent_level) > 1: - self.indent_level.pop() - yield Token(self.DEDENT_type, '') - - assert self.indent_level == [0], self.indent_level - - # XXX Hack for ContextualLexer. Maybe there's a more elegant solution? - @property - def always_accept(self): - return (self.NL_type,) diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/lark.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/lark.py deleted file mode 100644 index a39a8477e801..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/lark.py +++ /dev/null @@ -1,201 +0,0 @@ -from __future__ import absolute_import - -import os -import time -from collections import defaultdict - -from .utils import STRING_TYPE -from .load_grammar import load_grammar -from .tree import Tree -from .common import LexerConf, ParserConf - -from .lexer import Lexer -from .parse_tree_builder import ParseTreeBuilder -from .parser_frontends import get_frontend - -class LarkOptions(object): - """Specifies the options for Lark - - """ - OPTIONS_DOC = """ - parser - Decides which parser engine to use, "earley" or "lalr". (Default: "earley") - Note: "lalr" requires a lexer - - lexer - Decides whether or not to use a lexer stage - None: Don't use a lexer (scanless, only works with parser="earley") - "standard": Use a standard lexer - "contextual": Stronger lexer (only works with parser="lalr") - "auto" (default): Choose for me based on grammar and parser - - ambiguity - Decides how to handle ambiguity in the parse. Only relevant if parser="earley" - "resolve": The parser will automatically choose the simplest derivation - (it chooses consistently: greedy for tokens, non-greedy for rules) - "explicit": The parser will return all derivations wrapped in "_ambig" tree nodes (i.e. a forest). - - transformer - Applies the transformer to every parse tree - debug - Affects verbosity (default: False) - keep_all_tokens - Don't automagically remove "punctuation" tokens (default: False) - cache_grammar - Cache the Lark grammar (Default: False) - postlex - Lexer post-processing (Default: None) - start - The start symbol (Default: start) - profile - Measure run-time usage in Lark. Read results from the profiler proprety (Default: False) - propagate_positions - Experimental. Don't use yet. - """ - __doc__ += OPTIONS_DOC - def __init__(self, options_dict): - o = dict(options_dict) - - self.debug = bool(o.pop('debug', False)) - self.keep_all_tokens = bool(o.pop('keep_all_tokens', False)) - self.tree_class = o.pop('tree_class', Tree) - self.cache_grammar = o.pop('cache_grammar', False) - self.postlex = o.pop('postlex', None) - self.parser = o.pop('parser', 'earley') - self.lexer = o.pop('lexer', 'auto') - self.transformer = o.pop('transformer', None) - self.start = o.pop('start', 'start') - self.profile = o.pop('profile', False) - self.ambiguity = o.pop('ambiguity', 'auto') - self.propagate_positions = o.pop('propagate_positions', False) - - assert self.parser in ('earley', 'lalr', None) - - if self.parser == 'earley' and self.transformer: - raise ValueError('Cannot specify an auto-transformer when using the Earley algorithm.' - 'Please use your transformer on the resulting parse tree, or use a different algorithm (i.e. lalr)') - - if o: - raise ValueError("Unknown options: %s" % o.keys()) - - -class Profiler: - def __init__(self): - self.total_time = defaultdict(float) - self.cur_section = '__init__' - self.last_enter_time = time.time() - - def enter_section(self, name): - cur_time = time.time() - self.total_time[self.cur_section] += cur_time - self.last_enter_time - self.last_enter_time = cur_time - self.cur_section = name - - def make_wrapper(self, name, f): - def wrapper(*args, **kwargs): - last_section = self.cur_section - self.enter_section(name) - try: - return f(*args, **kwargs) - finally: - self.enter_section(last_section) - - return wrapper - - -class Lark: - def __init__(self, grammar, **options): - """ - grammar : a string or file-object containing the grammar spec (using Lark's ebnf syntax) - options : a dictionary controlling various aspects of Lark. - """ - self.options = LarkOptions(options) - - # Some, but not all file-like objects have a 'name' attribute - try: - source = grammar.name - except AttributeError: - source = '' - cache_file = "larkcache_%s" % str(hash(grammar)%(2**32)) - else: - cache_file = "larkcache_%s" % os.path.basename(source) - - # Drain file-like objects to get their contents - try: - read = grammar.read - except AttributeError: - pass - else: - grammar = read() - - assert isinstance(grammar, STRING_TYPE) - - if self.options.cache_grammar: - raise NotImplementedError("Not available yet") - - assert not self.options.profile, "Feature temporarily disabled" - self.profiler = Profiler() if self.options.profile else None - - if self.options.lexer == 'auto': - if self.options.parser == 'lalr': - self.options.lexer = 'standard' - elif self.options.parser == 'earley': - self.options.lexer = 'dynamic' - else: - assert False, self.options.parser - lexer = self.options.lexer - assert lexer in ('standard', 'contextual', 'dynamic', None) - - if self.options.ambiguity == 'auto': - if self.options.parser == 'earley': - self.options.ambiguity = 'resolve' - else: - assert self.options.parser == 'earley' - assert self.options.ambiguity in ('resolve', 'explicit', 'auto') - - # Parse the grammar file and compose the grammars (TODO) - self.grammar = load_grammar(grammar, source) - - # Compile the EBNF grammar into BNF - tokens, self.rules, self.ignore_tokens = self.grammar.compile(lexer=bool(lexer), start=self.options.start) - - self.lexer_conf = LexerConf(tokens, self.ignore_tokens, self.options.postlex) - - if self.options.parser: - self.parser = self._build_parser() - elif lexer: - self.lexer = self._build_lexer() - - if self.profiler: self.profiler.enter_section('outside_lark') - - __init__.__doc__ += "\nOPTIONS:" + LarkOptions.OPTIONS_DOC - - def _build_lexer(self): - return Lexer(self.lexer_conf.tokens, ignore=self.lexer_conf.ignore) - - def _build_parser(self): - self.parser_class = get_frontend(self.options.parser, self.options.lexer) - self.parse_tree_builder = ParseTreeBuilder(self.options.tree_class, self.options.propagate_positions, self.options.keep_all_tokens) - rules, callback = self.parse_tree_builder.create_tree_builder(self.rules, self.options.transformer) - if self.profiler: - for f in dir(callback): - if not (f.startswith('__') and f.endswith('__')): - setattr(callback, f, self.profiler.make_wrapper('transformer', getattr(callback, f))) - parser_conf = ParserConf(rules, callback, self.options.start) - - return self.parser_class(self.lexer_conf, parser_conf, options=self.options) - - - def lex(self, text): - if not hasattr(self, 'lexer'): - self.lexer = self._build_lexer() - stream = self.lexer.lex(text) - if self.options.postlex: - return self.options.postlex.process(stream) - else: - return stream - - def parse(self, text): - return self.parser.parse(text) - - # if self.profiler: - # self.profiler.enter_section('lex') - # l = list(self.lex(text)) - # self.profiler.enter_section('parse') - # try: - # return self.parser.parse(l) - # finally: - # self.profiler.enter_section('outside_lark') - # else: - # l = list(self.lex(text)) - # return self.parser.parse(l) - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/lexer.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/lexer.py deleted file mode 100644 index 4e6d5b9fef45..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/lexer.py +++ /dev/null @@ -1,244 +0,0 @@ -## Lexer Implementation - -import re - -from .utils import Str, classify -from .common import is_terminal, PatternStr, PatternRE, TokenDef - -class LexError(Exception): - pass - -class UnexpectedInput(LexError): - def __init__(self, seq, lex_pos, line, column): - context = seq[lex_pos:lex_pos+5] - message = "No token defined for: '%s' in %r at line %d" % (seq[lex_pos], context, line) - - super(UnexpectedInput, self).__init__(message) - - self.line = line - self.column = column - self.context = context - -class Token(Str): - def __new__(cls, type_, value, pos_in_stream=None, line=None, column=None): - inst = Str.__new__(cls, value) - inst.type = type_ - inst.pos_in_stream = pos_in_stream - inst.value = value - inst.line = line - inst.column = column - return inst - - @classmethod - def new_borrow_pos(cls, type_, value, borrow_t): - return cls(type_, value, borrow_t.pos_in_stream, line=borrow_t.line, column=borrow_t.column) - - def __repr__(self): - return 'Token(%s, %r)' % (self.type, self.value) - - def __deepcopy__(self, memo): - return Token(self.type, self.value, self.pos_in_stream, self.line, self.column) - -class Regex: - def __init__(self, pattern, flags=()): - self.pattern = pattern - self.flags = flags - -def _regexp_has_newline(r): - return '\n' in r or '\\n' in r or ('(?s)' in r and '.' in r) - -def _create_unless_callback(strs): - mres = build_mres(strs, match_whole=True) - def unless_callback(t): - # if t in strs: - # t.type = strs[t] - for mre, type_from_index in mres: - m = mre.match(t.value) - if m: - value = m.group(0) - t.type = type_from_index[m.lastindex] - break - return t - return unless_callback - -def _create_unless(tokens): - tokens_by_type = classify(tokens, lambda t: type(t.pattern)) - assert len(tokens_by_type) <= 2, tokens_by_type.keys() - embedded_strs = set() - delayed_strs = [] - callback = {} - for retok in tokens_by_type.get(PatternRE, []): - unless = [] # {} - for strtok in tokens_by_type.get(PatternStr, []): - s = strtok.pattern.value - m = re.match(retok.pattern.to_regexp(), s) - if m and m.group(0) == s: - if strtok.pattern.flags: - delayed_strs.append(strtok) - embedded_strs.add(strtok.name) - unless.append(strtok) - if unless: - callback[retok.name] = _create_unless_callback(unless) - - tokens = [t for t in tokens if t.name not in embedded_strs] + delayed_strs - return tokens, callback - - -def _build_mres(tokens, max_size, match_whole): - # Python sets an unreasonable group limit (currently 100) in its re module - # Worse, the only way to know we reached it is by catching an AssertionError! - # This function recursively tries less and less groups until it's successful. - postfix = '$' if match_whole else '' - mres = [] - while tokens: - try: - mre = re.compile(u'|'.join(u'(?P<%s>%s)'%(t.name, t.pattern.to_regexp()+postfix) for t in tokens[:max_size])) - except AssertionError: # Yes, this is what Python provides us.. :/ - return _build_mres(tokens, max_size//2, match_whole) - - mres.append((mre, {i:n for n,i in mre.groupindex.items()} )) - tokens = tokens[max_size:] - return mres - -def build_mres(tokens, match_whole=False): - return _build_mres(tokens, len(tokens), match_whole) - - -class Lexer(object): - def __init__(self, tokens, ignore=()): - assert all(isinstance(t, TokenDef) for t in tokens), tokens - - self.ignore = ignore - self.newline_char = '\n' - tokens = list(tokens) - - # Sanitization - for t in tokens: - try: - re.compile(t.pattern.to_regexp()) - except: - raise LexError("Cannot compile token %s: %s" % (t.name, t.pattern)) - - if t.pattern.min_width == 0: - raise LexError("Lexer does not allow zero-width tokens. (%s: %s)" % (t.name, t.pattern)) - - token_names = {t.name for t in tokens} - for t in ignore: - if t not in token_names: - raise LexError("Token '%s' was marked to ignore but it is not defined!" % t) - - # Init - self.newline_types = [t.name for t in tokens if _regexp_has_newline(t.pattern.to_regexp())] - self.ignore_types = [t for t in ignore] - - tokens.sort(key=lambda x:x.pattern.max_width, reverse=True) - - tokens, self.callback = _create_unless(tokens) - assert all(self.callback.values()) - - self.tokens = tokens - - self.mres = build_mres(tokens) - - - def lex(self, stream): - lex_pos = 0 - line = 1 - col_start_pos = 0 - newline_types = list(self.newline_types) - ignore_types = list(self.ignore_types) - while True: - for mre, type_from_index in self.mres: - m = mre.match(stream, lex_pos) - if m: - value = m.group(0) - type_ = type_from_index[m.lastindex] - to_yield = type_ not in ignore_types - - if to_yield: - t = Token(type_, value, lex_pos, line, lex_pos - col_start_pos) - end_col = t.column + len(value) - if t.type in self.callback: - t = self.callback[t.type](t) - - if type_ in newline_types: - newlines = value.count(self.newline_char) - if newlines: - line += newlines - last_newline_index = value.rindex(self.newline_char) + 1 - col_start_pos = lex_pos + last_newline_index - end_col = len(value) - last_newline_index - - if to_yield: - t.end_line = line - t.end_col = end_col - yield t - - lex_pos += len(value) - break - else: - if lex_pos < len(stream): - raise UnexpectedInput(stream, lex_pos, line, lex_pos - col_start_pos) - break - - -class ContextualLexer: - def __init__(self, tokens, states, ignore=(), always_accept=()): - tokens_by_name = {} - for t in tokens: - assert t.name not in tokens_by_name, t - tokens_by_name[t.name] = t - - lexer_by_tokens = {} - self.lexers = {} - for state, accepts in states.items(): - key = frozenset(accepts) - try: - lexer = lexer_by_tokens[key] - except KeyError: - accepts = set(accepts) | set(ignore) | set(always_accept) - state_tokens = [tokens_by_name[n] for n in accepts if is_terminal(n) and n!='$end'] - lexer = Lexer(state_tokens, ignore=ignore) - lexer_by_tokens[key] = lexer - - self.lexers[state] = lexer - - self.root_lexer = Lexer(tokens, ignore=ignore) - - self.set_parser_state(None) # Needs to be set on the outside - - def set_parser_state(self, state): - self.parser_state = state - - def lex(self, stream): - lex_pos = 0 - line = 1 - col_start_pos = 0 - newline_types = list(self.root_lexer.newline_types) - ignore_types = list(self.root_lexer.ignore_types) - while True: - lexer = self.lexers[self.parser_state] - for mre, type_from_index in lexer.mres: - m = mre.match(stream, lex_pos) - if m: - value = m.group(0) - type_ = type_from_index[m.lastindex] - if type_ not in ignore_types: - t = Token(type_, value, lex_pos, line, lex_pos - col_start_pos) - if t.type in lexer.callback: - t = lexer.callback[t.type](t) - yield t - - if type_ in newline_types: - newlines = value.count(lexer.newline_char) - if newlines: - line += newlines - col_start_pos = lex_pos + value.rindex(lexer.newline_char) - lex_pos += len(value) - break - else: - if lex_pos < len(stream): - print("Allowed tokens:", lexer.tokens) - raise UnexpectedInput(stream, lex_pos, line, lex_pos - col_start_pos) - break - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/load_grammar.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/load_grammar.py deleted file mode 100644 index fea3ec2cbdeb..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/load_grammar.py +++ /dev/null @@ -1,631 +0,0 @@ -"Parses and creates Grammar objects" - -import os.path -import pkgutil -from itertools import chain -import re -from ast import literal_eval -from copy import deepcopy - -from .lexer import Token, UnexpectedInput - -from .parse_tree_builder import ParseTreeBuilder -from .parser_frontends import LALR -from .parsers.lalr_parser import UnexpectedToken -from .common import is_terminal, GrammarError, LexerConf, ParserConf, PatternStr, PatternRE, TokenDef - -from .tree import Tree as T, Transformer, InlineTransformer, Visitor - -__path__ = os.path.dirname(__name__) -IMPORT_PATHS = [os.path.join(__path__, 'grammars')] - -_TOKEN_NAMES = { - '.': 'DOT', - ',': 'COMMA', - ':': 'COLON', - ';': 'SEMICOLON', - '+': 'PLUS', - '-': 'MINUS', - '*': 'STAR', - '/': 'SLASH', - '\\': 'BACKSLASH', - '|': 'VBAR', - '?': 'QMARK', - '!': 'BANG', - '@': 'AT', - '#': 'HASH', - '$': 'DOLLAR', - '%': 'PERCENT', - '^': 'CIRCUMFLEX', - '&': 'AMPERSAND', - '_': 'UNDERSCORE', - '<': 'LESSTHAN', - '>': 'MORETHAN', - '=': 'EQUAL', - '"': 'DBLQUOTE', - '\'': 'QUOTE', - '`': 'BACKQUOTE', - '~': 'TILDE', - '(': 'LPAR', - ')': 'RPAR', - '{': 'LBRACE', - '}': 'RBRACE', - '[': 'LSQB', - ']': 'RSQB', - '\n': 'NEWLINE', - '\r\n': 'CRLF', - '\t': 'TAB', - ' ': 'SPACE', -} - -# Grammar Parser -TOKENS = { - '_LPAR': r'\(', - '_RPAR': r'\)', - '_LBRA': r'\[', - '_RBRA': r'\]', - 'OP': '[+*][?]?|[?](?![a-z])', - '_COLON': ':', - '_OR': r'\|', - '_DOT': r'\.', - 'RULE': '!?[_?]?[a-z][_a-z0-9]*', - 'TOKEN': '_?[A-Z][_A-Z0-9]*', - 'STRING': r'"(\\"|\\\\|[^"\n])*?"i?', - 'REGEXP': r'/(?!/)(\\/|\\\\|[^/\n])*?/i?', - '_NL': r'(\r?\n)+\s*', - 'WS': r'[ \t]+', - 'COMMENT': r'//[^\n]*', - '_TO': '->', - '_IGNORE': r'%ignore', - '_IMPORT': r'%import', - 'NUMBER': r'\d+', -} - -RULES = { - 'start': ['_list'], - '_list': ['_item', '_list _item'], - '_item': ['rule', 'token', 'statement', '_NL'], - - 'rule': ['RULE _COLON expansions _NL', - 'RULE _DOT NUMBER _COLON expansions _NL'], - 'expansions': ['alias', - 'expansions _OR alias', - 'expansions _NL _OR alias'], - - '?alias': ['expansion _TO RULE', 'expansion'], - 'expansion': ['_expansion'], - - '_expansion': ['', '_expansion expr'], - - '?expr': ['atom', - 'atom OP'], - - '?atom': ['_LPAR expansions _RPAR', - 'maybe', - 'name', - 'literal', - 'range'], - - '?name': ['RULE', 'TOKEN'], - - 'maybe': ['_LBRA expansions _RBRA'], - 'range': ['STRING _DOT _DOT STRING'], - - 'token': ['TOKEN _COLON expansions _NL'], - 'statement': ['ignore', 'import'], - 'ignore': ['_IGNORE expansions _NL'], - 'import': ['_IMPORT import_args _NL', - '_IMPORT import_args _TO TOKEN'], - 'import_args': ['_import_args'], - '_import_args': ['name', '_import_args _DOT name'], - - 'literal': ['REGEXP', 'STRING'], -} - - -class EBNF_to_BNF(InlineTransformer): - def __init__(self): - self.new_rules = {} - self.rules_by_expr = {} - self.prefix = 'anon' - self.i = 0 - self.rule_options = None - - def _add_recurse_rule(self, type_, expr): - if expr in self.rules_by_expr: - return self.rules_by_expr[expr] - - new_name = '__%s_%s_%d' % (self.prefix, type_, self.i) - self.i += 1 - t = Token('RULE', new_name, -1) - self.new_rules[new_name] = T('expansions', - [T('expansion', [expr]), T('expansion', [t, expr])]), self.rule_options - self.rules_by_expr[expr] = t - return t - - def expr(self, rule, op): - if op.value == '?': - return T('expansions', [rule, T('expansion', [])]) - elif op.value == '+': - # a : b c+ d - # --> - # a : b _c d - # _c : _c c | c; - return self._add_recurse_rule('plus', rule) - elif op.value == '*': - # a : b c* d - # --> - # a : b _c? d - # _c : _c c | c; - new_name = self._add_recurse_rule('star', rule) - return T('expansions', [new_name, T('expansion', [])]) - assert False, op - - -class SimplifyRule_Visitor(Visitor): - - @staticmethod - def _flatten(tree): - while True: - to_expand = [i for i, child in enumerate(tree.children) - if isinstance(child, T) and child.data == tree.data] - if not to_expand: - break - tree.expand_kids_by_index(*to_expand) - - def expansion(self, tree): - # rules_list unpacking - # a : b (c|d) e - # --> - # a : b c e | b d e - # - # In AST terms: - # expansion(b, expansions(c, d), e) - # --> - # expansions( expansion(b, c, e), expansion(b, d, e) ) - - while True: - self._flatten(tree) - - for i, child in enumerate(tree.children): - if isinstance(child, T) and child.data == 'expansions': - tree.data = 'expansions' - tree.children = [self.visit(T('expansion', [option if i == j else other - for j, other in enumerate(tree.children)])) - for option in child.children] - break - else: - break - - def alias(self, tree): - rule, alias_name = tree.children - if rule.data == 'expansions': - aliases = [] - for child in tree.children[0].children: - aliases.append(T('alias', [child, alias_name])) - tree.data = 'expansions' - tree.children = aliases - - expansions = _flatten - - -class RuleTreeToText(Transformer): - def expansions(self, x): - return x - - def expansion(self, symbols): - return [sym.value for sym in symbols], None - - def alias(self, x): - (expansion, _alias), alias = x - assert _alias is None, (alias, expansion, '-', _alias) - return expansion, alias.value - - -class CanonizeTree(InlineTransformer): - def maybe(self, expr): - return T('expr', [expr, Token('OP', '?', -1)]) - - def tokenmods(self, *args): - if len(args) == 1: - return list(args) - tokenmods, value = args - return tokenmods + [value] - - -class ExtractAnonTokens(InlineTransformer): - "Create a unique list of anonymous tokens. Attempt to give meaningful names to them when we add them" - - def __init__(self, tokens): - self.tokens = tokens - self.token_set = {td.name for td in self.tokens} - self.token_reverse = {td.pattern: td for td in tokens} - self.i = 0 - - def pattern(self, p): - value = p.value - if p in self.token_reverse and p.flags != self.token_reverse[p].pattern.flags: - raise GrammarError(u'Conflicting flags for the same terminal: %s' % p) - - if isinstance(p, PatternStr): - try: - # If already defined, use the user-defined token name - token_name = self.token_reverse[p].name - except KeyError: - # Try to assign an indicative anon-token name, otherwise use a numbered name - try: - token_name = _TOKEN_NAMES[value] - except KeyError: - if value.isalnum() and value[0].isalpha() and ('__' + value.upper()) not in self.token_set: - token_name = '%s%d' % (value.upper(), self.i) - try: - # Make sure we don't have unicode in our token names - token_name.encode('ascii') - except UnicodeEncodeError: - token_name = 'ANONSTR_%d' % self.i - else: - token_name = 'ANONSTR_%d' % self.i - self.i += 1 - - token_name = '__' + token_name - - elif isinstance(p, PatternRE): - if p in self.token_reverse: # Kind of a wierd placement.name - token_name = self.token_reverse[p].name - else: - token_name = 'ANONRE_%d' % self.i - self.i += 1 - else: - assert False, p - - if token_name not in self.token_set: - assert p not in self.token_reverse - self.token_set.add(token_name) - tokendef = TokenDef(token_name, p) - self.token_reverse[p] = tokendef - self.tokens.append(tokendef) - - return Token('TOKEN', token_name, -1) - - -def _literal_to_pattern(literal): - v = literal.value - if v[-1] in 'i': - flags = v[-1] - v = v[:-1] - else: - flags = None - - assert v[0] == v[-1] and v[0] in '"/' - x = v[1:-1].replace("'", r"\'") - s = literal_eval("u'''%s'''" % x) - return {'STRING': PatternStr, - 'REGEXP': PatternRE}[literal.type](s, flags) - - -class PrepareLiterals(InlineTransformer): - def literal(self, literal): - return T('pattern', [_literal_to_pattern(literal)]) - - def range(self, start, end): - assert start.type == end.type == 'STRING' - start = start.value[1:-1] - end = end.value[1:-1] - assert len(start) == len(end) == 1 - regexp = '[%s-%s]' % (start, end) - return T('pattern', [PatternRE(regexp)]) - - -class SplitLiterals(InlineTransformer): - def pattern(self, p): - if isinstance(p, PatternStr) and len(p.value) > 1: - return T('expansion', [T('pattern', [PatternStr(ch, flags=p.flags)]) for ch in p.value]) - return T('pattern', [p]) - - -class TokenTreeToPattern(Transformer): - def pattern(self, ps): - p, = ps - return p - - def expansion(self, items): - if len(items) == 1: - return items[0] - if len({i.flags for i in items}) > 1: - raise GrammarError("Lark doesn't support joining tokens with conflicting flags!") - return PatternRE(''.join(i.to_regexp() for i in items), items[0].flags) - - def expansions(self, exps): - if len(exps) == 1: - return exps[0] - assert all(i.flags is None for i in exps) - return PatternRE('(?:%s)' % ('|'.join(i.to_regexp() for i in exps))) - - def expr(self, args): - inner, op = args - return PatternRE('(?:%s)%s' % (inner.to_regexp(), op), inner.flags) - - -def _interleave(l, item): - for e in l: - yield e - if isinstance(e, T): - if e.data in ('literal', 'range'): - yield item - elif is_terminal(e): - yield item - - -def _choice_of_rules(rules): - return T('expansions', [T('expansion', [Token('RULE', name)]) for name in rules]) - - -def dict_update_safe(d1, d2): - for k, v in d2.items(): - assert k not in d1 - d1[k] = v - - -class Grammar: - def __init__(self, rule_defs, token_defs, ignore): - self.token_defs = token_defs - self.rule_defs = rule_defs - self.ignore = ignore - - def _prepare_scanless_grammar(self, start): - # XXX Pretty hacky! There should be a better way to write this method.. - - rule_defs = deepcopy(self.rule_defs) - term_defs = self.token_defs - - # Implement the "%ignore" feature without a lexer.. - terms_to_ignore = {name: '__' + name for name in self.ignore} - if terms_to_ignore: - assert set(terms_to_ignore) <= {name for name, t in term_defs} - term_defs = [(terms_to_ignore.get(name, name), t) for name, t in term_defs] - expr = Token('RULE', '__ignore') - for r, tree, _o in rule_defs: - for exp in tree.find_data('expansion'): - exp.children = list(_interleave(exp.children, expr)) - if r == start: - exp.children = [expr] + exp.children - for exp in tree.find_data('expr'): - exp.children[0] = T('expansion', list(_interleave(exp.children[:1], expr))) - - _ignore_tree = T('expr', [_choice_of_rules(terms_to_ignore.values()), Token('OP', '?')]) - rule_defs.append(('__ignore', _ignore_tree, None)) - - # Convert all tokens to rules - new_terminal_names = {name: '__token_' + name for name, tree in term_defs} - - for name, tree, options in rule_defs: - for exp in chain(tree.find_data('expansion'), tree.find_data('expr')): - for i, sym in enumerate(exp.children): - if sym in new_terminal_names: - exp.children[i] = Token(sym.type, new_terminal_names[sym]) - - for name, tree in term_defs: - if name.startswith('_'): - options = RuleOptions(filter_out=True) - else: - options = RuleOptions(keep_all_tokens=True, create_token=name) - - name = new_terminal_names[name] - inner_name = name + '_inner' - rule_defs.append((name, _choice_of_rules([inner_name]), None)) - rule_defs.append((inner_name, tree, options)) - - return [], rule_defs - - def compile(self, lexer=False, start=None): - if not lexer: - token_defs, rule_defs = self._prepare_scanless_grammar(start) - else: - token_defs = list(self.token_defs) - rule_defs = self.rule_defs - - # ================= - # Compile Tokens - # ================= - - # Convert token-trees to strings/regexps - transformer = PrepareLiterals() * TokenTreeToPattern() - tokens = [TokenDef(name, transformer.transform(token_tree)) - for name, token_tree in token_defs] - - # ================= - # Compile Rules - # ================= - ebnf_to_bnf = EBNF_to_BNF() - simplify_rule = SimplifyRule_Visitor() - - transformer = PrepareLiterals() - if not lexer: - transformer *= SplitLiterals() - transformer *= ExtractAnonTokens(tokens) # Adds to tokens - - rules = {} - for name, rule_tree, options in rule_defs: - assert name not in rules, name - ebnf_to_bnf.rule_options = RuleOptions( - keep_all_tokens=True) if options and options.keep_all_tokens else None - tree = transformer.transform(rule_tree) - rules[name] = ebnf_to_bnf.transform(tree), options - - dict_update_safe(rules, ebnf_to_bnf.new_rules) - - for tree, _o in rules.values(): - simplify_rule.visit(tree) - - rule_tree_to_text = RuleTreeToText() - rules = {origin: (rule_tree_to_text.transform(tree), options) for origin, (tree, options) in rules.items()} - - return tokens, rules, self.ignore - - -class RuleOptions: - def __init__(self, keep_all_tokens=False, expand1=False, create_token=None, filter_out=False, priority=None): - self.keep_all_tokens = keep_all_tokens - self.expand1 = expand1 - self.create_token = create_token # used for scanless postprocessing - self.priority = priority - - self.filter_out = filter_out # remove this rule from the tree - # used for "token"-rules in scanless - - @classmethod - def from_rule(cls, name, *x): - if len(x) > 1: - priority, expansions = x - priority = int(priority) - else: - expansions, = x - priority = None - - keep_all_tokens = name.startswith('!') - name = name.lstrip('!') - expand1 = name.startswith('?') - name = name.lstrip('?') - - return name, expansions, cls(keep_all_tokens, expand1, priority=priority) - - -_imported_grammars = {} - - -def import_grammar(grammar_path): - if grammar_path not in _imported_grammars: - for import_path in IMPORT_PATHS: - text = pkgutil.get_data(__name__, os.path.join(import_path, grammar_path)).decode() - grammar = load_grammar(text, grammar_path) - _imported_grammars[grammar_path] = grammar - - return _imported_grammars[grammar_path] - - -def resolve_token_references(token_defs): - # TODO Cycles detection - # TODO Solve with transitive closure (maybe) - - token_dict = dict(token_defs) - assert len(token_dict) == len(token_defs), "Same name defined twice?" - - while True: - changed = False - for name, token_tree in token_defs: - for exp in chain(token_tree.find_data('expansion'), token_tree.find_data('expr')): - for i, item in enumerate(exp.children): - if isinstance(item, Token): - if item.type == 'RULE': - raise GrammarError("Rules aren't allowed inside tokens (%s in %s)" % (item, name)) - if item.type == 'TOKEN': - exp.children[i] = token_dict[item] - changed = True - if not changed: - break - - -class GrammarLoader: - def __init__(self): - tokens = [TokenDef(name, PatternRE(value)) for name, value in TOKENS.items()] - - rules = [RuleOptions.from_rule(name, x) for name, x in RULES.items()] - d = {r: ([(x.split(), None) for x in xs], o) for r, xs, o in rules} - rules, callback = ParseTreeBuilder(T).create_tree_builder(d, None) - lexer_conf = LexerConf(tokens, ['WS', 'COMMENT']) - parser_conf = ParserConf(rules, callback, 'start') - self.parser = LALR(lexer_conf, parser_conf) - - self.canonize_tree = CanonizeTree() - - def load_grammar(self, grammar_text, name=''): - "Parse grammar_text, verify, and create Grammar object. Display nice messages on error." - - try: - tree = self.canonize_tree.transform(self.parser.parse(grammar_text + '\n')) - except UnexpectedInput as e: - raise GrammarError("Unexpected input %r at line %d column %d in %s" % (e.context, e.line, e.column, name)) - except UnexpectedToken as e: - if e.expected == ['_COLON']: - raise GrammarError("Missing colon at line %s column %s" % (e.line, e.column)) - elif e.expected == ['RULE']: - raise GrammarError("Missing alias at line %s column %s" % (e.line, e.column)) - elif 'STRING' in e.expected: - raise GrammarError("Expecting a value at line %s column %s" % (e.line, e.column)) - elif e.expected == ['_OR']: - raise GrammarError( - "Newline without starting a new option (Expecting '|') at line %s column %s" % (e.line, e.column)) - raise - - # Extract grammar items - - token_defs = [c.children for c in tree.children if c.data == 'token'] - rule_defs = [c.children for c in tree.children if c.data == 'rule'] - statements = [c.children for c in tree.children if c.data == 'statement'] - assert len(token_defs) + len(rule_defs) + len(statements) == len(tree.children) - - token_defs = [(name.value, t) for name, t in token_defs] - - # Execute statements - ignore = [] - for (stmt,) in statements: - if stmt.data == 'ignore': - expansions, = stmt.children - ignore.append(expansions) - elif stmt.data == 'import': - dotted_path = stmt.children[0].children - name = stmt.children[1] if len(stmt.children) > 1 else dotted_path[-1] - grammar_path = os.path.join(*dotted_path[:-1]) + '.g' - g = import_grammar(grammar_path) - token_tree = dict(g.token_defs)[dotted_path[-1]] - token_defs.append([name.value, token_tree]) - else: - assert False, stmt - - # Verify correctness 1 - for name, _ in token_defs: - if name.startswith('__'): - raise GrammarError('Names starting with double-underscore are reserved (Error at %s)' % name) - - # Handle ignore tokens - ignore_defs = [('__IGNORE_%d' % i, t) for i, t in enumerate(ignore)] - ignore_names = [name for name, _ in ignore_defs] - token_defs += ignore_defs - - # Verify correctness 2 - token_names = set() - for name, _ in token_defs: - if name in token_names: - raise GrammarError("Token '%s' defined more than once" % name) - token_names.add(name) - - # Resolve token references - resolve_token_references(token_defs) - - rules = [RuleOptions.from_rule(*x) for x in rule_defs] - - rule_names = set() - for name, _x, _o in rules: - if name.startswith('__'): - raise GrammarError('Names starting with double-underscore are reserved (Error at %s)' % name) - if name in rule_names: - raise GrammarError("Rule '%s' defined more than once" % name) - rule_names.add(name) - - for name, expansions, _o in rules: - used_symbols = {t for x in expansions.find_data('expansion') - for t in x.scan_values(lambda t: t.type in ('RULE', 'TOKEN'))} - for sym in used_symbols: - if is_terminal(sym): - if sym not in token_names: - raise GrammarError("Token '%s' used but not defined (in rule %s)" % (sym, name)) - else: - if sym not in rule_names: - raise GrammarError("Rule '%s' used but not defined (in rule %s)" % (sym, name)) - - # TODO don't include unused tokens, they can only cause trouble! - - return Grammar(rules, token_defs, ignore_names) - - -load_grammar = GrammarLoader().load_grammar diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parse_tree_builder.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parse_tree_builder.py deleted file mode 100644 index 601372e46617..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parse_tree_builder.py +++ /dev/null @@ -1,128 +0,0 @@ -from .common import is_terminal, GrammarError -from .utils import suppress -from .lexer import Token - -class Callback(object): - pass - - -def create_expand1_tree_builder_function(tree_builder): - def expand1(children): - if len(children) == 1: - return children[0] - else: - return tree_builder(children) - return expand1 - -def create_token_wrapper(tree_builder, name): - def join_children(children): - children = [Token(name, ''.join(children))] - return tree_builder(children) - return join_children - -def create_rule_handler(expansion, usermethod, keep_all_tokens, filter_out): - # if not keep_all_tokens: - to_include = [(i, not is_terminal(sym) and sym.startswith('_')) - for i, sym in enumerate(expansion) - if keep_all_tokens - or not ((is_terminal(sym) and sym.startswith('_')) or sym in filter_out) - ] - - if len(to_include) < len(expansion) or any(to_expand for i, to_expand in to_include): - def _build_ast(match): - children = [] - for i, to_expand in to_include: - if to_expand: - children += match[i].children - else: - children.append(match[i]) - - return usermethod(children) - return _build_ast - - # else, if no filtering required.. - return usermethod - -def propagate_positions_wrapper(f): - def _f(args): - res = f(args) - - if args: - for a in args: - with suppress(AttributeError): - res.line = a.line - res.column = a.column - break - - for a in reversed(args): - with suppress(AttributeError): - res.end_line = a.end_line - res.end_col = a.end_col - break - - return res - - return _f - -class ParseTreeBuilder: - def __init__(self, tree_class, propagate_positions=False, keep_all_tokens=False): - self.tree_class = tree_class - self.propagate_positions = propagate_positions - self.always_keep_all_tokens = keep_all_tokens - - def _create_tree_builder_function(self, name): - tree_class = self.tree_class - def tree_builder_f(children): - return tree_class(name, children) - return tree_builder_f - - - - def create_tree_builder(self, rules, transformer): - callback = Callback() - new_rules = [] - - filter_out = set() - for origin, (expansions, options) in rules.items(): - if options and options.filter_out: - assert origin.startswith('_') # Just to make sure - filter_out.add(origin) - - for origin, (expansions, options) in rules.items(): - keep_all_tokens = self.always_keep_all_tokens or (options.keep_all_tokens if options else False) - expand1 = options.expand1 if options else False - create_token = options.create_token if options else False - - _origin = origin - - for expansion, alias in expansions: - if alias and origin.startswith('_'): - raise Exception("Rule %s is marked for expansion (it starts with an underscore) and isn't allowed to have aliases (alias=%s)" % (origin, alias)) - - try: - f = transformer._get_func(alias or _origin) - except AttributeError: - if alias: - f = self._create_tree_builder_function(alias) - else: - f = self._create_tree_builder_function(_origin) - if expand1: - f = create_expand1_tree_builder_function(f) - - if create_token: - f = create_token_wrapper(f, create_token) - - - alias_handler = create_rule_handler(expansion, f, keep_all_tokens, filter_out) - - if self.propagate_positions: - alias_handler = propagate_positions_wrapper(alias_handler) - - callback_name = 'autoalias_%s_%s' % (_origin, '_'.join(expansion)) - if hasattr(callback, callback_name): - raise GrammarError("Rule expansion '%s' already exists in rule %s" % (' '.join(expansion), origin)) - setattr(callback, callback_name, alias_handler) - - new_rules.append(( _origin, expansion, callback_name, options )) - - return new_rules, callback diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parser_frontends.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parser_frontends.py deleted file mode 100644 index 7a7f68594241..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parser_frontends.py +++ /dev/null @@ -1,164 +0,0 @@ -import re -from . import sre_parse - -from .lexer import Lexer, ContextualLexer, Token - -from .common import is_terminal, GrammarError, ParserConf, Terminal_Regexp, Terminal_Token -from .parsers import lalr_parser, earley, xearley - -class WithLexer: - def __init__(self, lexer_conf): - self.lexer_conf = lexer_conf - self.lexer = Lexer(lexer_conf.tokens, ignore=lexer_conf.ignore) - - def lex(self, text): - stream = self.lexer.lex(text) - if self.lexer_conf.postlex: - return self.lexer_conf.postlex.process(stream) - else: - return stream - - -class LALR(WithLexer): - def __init__(self, lexer_conf, parser_conf, options=None): - WithLexer.__init__(self, lexer_conf) - - self.parser_conf = parser_conf - self.parser = lalr_parser.Parser(parser_conf) - - def parse(self, text): - tokens = self.lex(text) - return self.parser.parse(tokens) - - -class LALR_ContextualLexer: - def __init__(self, lexer_conf, parser_conf, options=None): - self.lexer_conf = lexer_conf - self.parser_conf = parser_conf - - self.parser = lalr_parser.Parser(parser_conf) - - d = {idx:t.keys() for idx, t in self.parser.analysis.states_idx.items()} - always_accept = lexer_conf.postlex.always_accept if lexer_conf.postlex else () - self.lexer = ContextualLexer(lexer_conf.tokens, d, ignore=lexer_conf.ignore, always_accept=always_accept) - - def parse(self, text): - tokens = self.lexer.lex(text) - if self.lexer_conf.postlex: - tokens = self.lexer_conf.postlex.process(tokens) - return self.parser.parse(tokens, self.lexer.set_parser_state) - - -def tokenize_text(text): - new_text = [] - line = 1 - col_start_pos = 0 - for i, ch in enumerate(text): - if '\n' in ch: - line += ch.count('\n') - col_start_pos = i + ch.rindex('\n') - new_text.append(Token('CHAR', ch, line=line, column=i - col_start_pos)) - return new_text - -class Earley_NoLex: - def __init__(self, lexer_conf, parser_conf, options=None): - self.token_by_name = {t.name:t for t in lexer_conf.tokens} - - rules = [(n, list(self._prepare_expansion(x)), a, o) for n,x,a,o in parser_conf.rules] - - resolve_ambiguity = (options.ambiguity=='resolve') if options else True - self.parser = earley.Parser(rules, - parser_conf.start, - parser_conf.callback, - resolve_ambiguity=resolve_ambiguity) - - def _prepare_expansion(self, expansion): - for sym in expansion: - if is_terminal(sym): - regexp = self.token_by_name[sym].pattern.to_regexp() - width = sre_parse.parse(regexp).getwidth() - if width != (1,1): - raise GrammarError('Scanless parsing (lexer=None) requires all tokens to have a width of 1 (terminal %s: %s is %s)' % (sym, regexp, width)) - yield Terminal_Regexp(sym, regexp) - else: - yield sym - - def parse(self, text): - new_text = tokenize_text(text) - return self.parser.parse(new_text) - -class Earley(WithLexer): - def __init__(self, lexer_conf, parser_conf, options=None): - WithLexer.__init__(self, lexer_conf) - - rules = [(n, self._prepare_expansion(x), a, o) for n,x,a,o in parser_conf.rules] - - resolve_ambiguity = (options.ambiguity=='resolve') if options else True - self.parser = earley.Parser(rules, - parser_conf.start, - parser_conf.callback, - resolve_ambiguity=resolve_ambiguity) - - def _prepare_expansion(self, expansion): - return [Terminal_Token(sym) if is_terminal(sym) else sym for sym in expansion] - - def parse(self, text): - tokens = self.lex(text) - return self.parser.parse(tokens) - - -class XEarley: - def __init__(self, lexer_conf, parser_conf, options=None): - self.token_by_name = {t.name:t for t in lexer_conf.tokens} - - rules = [(n, list(self._prepare_expansion(x)), a, o) for n,x,a,o in parser_conf.rules] - - resolve_ambiguity = (options.ambiguity=='resolve') if options else True - ignore = [Terminal_Regexp(x, self.token_by_name[x].pattern.to_regexp()) for x in lexer_conf.ignore] - - self.parser = xearley.Parser(rules, - parser_conf.start, - parser_conf.callback, - resolve_ambiguity=resolve_ambiguity, - ignore=ignore, - ) - - def _prepare_expansion(self, expansion): - for sym in expansion: - if is_terminal(sym): - regexp = self.token_by_name[sym].pattern.to_regexp() - width = sre_parse.parse(regexp).getwidth() - assert width - yield Terminal_Regexp(sym, regexp) - else: - yield sym - - def parse(self, text): - return self.parser.parse(text) - -def get_frontend(parser, lexer): - if parser=='lalr': - if lexer is None: - raise ValueError('The LALR parser requires use of a lexer') - elif lexer == 'standard': - return LALR - elif lexer == 'contextual': - return LALR_ContextualLexer - else: - raise ValueError('Unknown lexer: %s' % lexer) - elif parser=='earley': - if lexer is None: - return Earley_NoLex - elif lexer=='standard': - return Earley - elif lexer=='dynamic': - return XEarley - elif lexer=='contextual': - raise ValueError('The Earley parser does not support the contextual parser') - else: - raise ValueError('Unknown lexer: %s' % lexer) - else: - raise ValueError('Unknown parser: %s' % parser) - - - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parsers/__init__.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parsers/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parsers/earley.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parsers/earley.py deleted file mode 100644 index 2368183aee4b..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parsers/earley.py +++ /dev/null @@ -1,305 +0,0 @@ -"This module implements an Earley Parser" - -# The parser uses a parse-forest to keep track of derivations and ambiguations. -# When the parse ends successfully, a disambiguation stage resolves all ambiguity -# (right now ambiguity resolution is not developed beyond the needs of lark) -# Afterwards the parse tree is reduced (transformed) according to user callbacks. -# I use the no-recursion version of Transformer and Visitor, because the tree might be -# deeper than Python's recursion limit (a bit absurd, but that's life) -# -# The algorithm keeps track of each state set, using a corresponding Column instance. -# Column keeps track of new items using NewsList instances. -# -# Author: Erez Shinan (2017) -# Email : erezshin@gmail.com - -from functools import cmp_to_key - -from ..utils import compare -from ..common import ParseError, UnexpectedToken, Terminal -from ..tree import Tree, Visitor_NoRecurse, Transformer_NoRecurse -from .grammar_analysis import GrammarAnalyzer - - -class EndToken: - type = '$end' - -class Derivation(Tree): - def __init__(self, rule, items=None): - Tree.__init__(self, 'drv', items or []) - self.rule = rule - - def _pretty_label(self): # Nicer pretty for debugging the parser - return self.rule.origin if self.rule else self.data - -END_TOKEN = EndToken() - -class Item(object): - "An Earley Item, the atom of the algorithm." - - def __init__(self, rule, ptr, start, tree): - self.rule = rule - self.ptr = ptr - self.start = start - self.tree = tree if tree is not None else Derivation(self.rule) - - @property - def expect(self): - return self.rule.expansion[self.ptr] - - @property - def is_complete(self): - return self.ptr == len(self.rule.expansion) - - def advance(self, tree): - assert self.tree.data == 'drv' - new_tree = Derivation(self.rule, self.tree.children + [tree]) - return Item(self.rule, self.ptr+1, self.start, new_tree) - - def __eq__(self, other): - return self.start is other.start and self.ptr == other.ptr and self.rule == other.rule - def __hash__(self): - return hash((self.rule, self.ptr, id(self.start))) - - def __repr__(self): - before = list(map(str, self.rule.expansion[:self.ptr])) - after = list(map(str, self.rule.expansion[self.ptr:])) - return '<(%d) %s : %s * %s>' % (id(self.start), self.rule.origin, ' '.join(before), ' '.join(after)) - - -class NewsList(list): - "Keeps track of newly added items (append-only)" - - def __init__(self, initial=None): - list.__init__(self, initial or []) - self.last_iter = 0 - - def get_news(self): - i = self.last_iter - self.last_iter = len(self) - return self[i:] - - - -class Column: - "An entry in the table, aka Earley Chart. Contains lists of items." - def __init__(self, i): - self.i = i - self.to_reduce = NewsList() - self.to_predict = NewsList() - self.to_scan = NewsList() - self.item_count = 0 - - self.added = set() - self.completed = {} - - def add(self, items): - """Sort items into scan/predict/reduce newslists - - Makes sure only unique items are added. - """ - - for item in items: - - if item.is_complete: - # XXX Potential bug: What happens if there's ambiguity in an empty rule? - if item.rule.expansion and item in self.completed: - old_tree = self.completed[item].tree - if old_tree.data != '_ambig': - new_tree = old_tree.copy() - new_tree.rule = old_tree.rule - old_tree.set('_ambig', [new_tree]) - old_tree.rule = None # No longer a 'drv' node - - if item.tree.children[0] is old_tree: # XXX a little hacky! - raise ParseError("Infinite recursion in grammar! (Rule %s)" % item.rule) - - old_tree.children.append(item.tree) - else: - self.completed[item] = item - self.to_reduce.append(item) - else: - if item not in self.added: - self.added.add(item) - if isinstance(item.expect, Terminal): - self.to_scan.append(item) - else: - self.to_predict.append(item) - - self.item_count += 1 # Only count if actually added - - def __nonzero__(self): - return bool(self.item_count) - -class Parser: - def __init__(self, rules, start_symbol, callback, resolve_ambiguity=True): - self.analysis = GrammarAnalyzer(rules, start_symbol) - self.start_symbol = start_symbol - self.resolve_ambiguity = resolve_ambiguity - - self.postprocess = {} - self.predictions = {} - for rule in self.analysis.rules: - if rule.origin != '$root': # XXX kinda ugly - a = rule.alias - self.postprocess[rule] = a if callable(a) else (a and getattr(callback, a)) - self.predictions[rule.origin] = [x.rule for x in self.analysis.expand_rule(rule.origin)] - - def parse(self, stream, start_symbol=None): - # Define parser functions - start_symbol = start_symbol or self.start_symbol - - def predict(nonterm, column): - assert not isinstance(nonterm, Terminal), nonterm - return [Item(rule, 0, column, None) for rule in self.predictions[nonterm]] - - def complete(item): - name = item.rule.origin - return [i.advance(item.tree) for i in item.start.to_predict if i.expect == name] - - def predict_and_complete(column): - while True: - to_predict = {x.expect for x in column.to_predict.get_news() - if x.ptr} # if not part of an already predicted batch - to_reduce = column.to_reduce.get_news() - if not (to_predict or to_reduce): - break - - for nonterm in to_predict: - column.add( predict(nonterm, column) ) - for item in to_reduce: - column.add( complete(item) ) - - def scan(i, token, column): - to_scan = column.to_scan.get_news() - - next_set = Column(i) - next_set.add(item.advance(token) for item in to_scan if item.expect.match(token)) - - if not next_set: - expect = {i.expect for i in column.to_scan} - raise UnexpectedToken(token, expect, stream, i) - - return next_set - - # Main loop starts - column0 = Column(0) - column0.add(predict(start_symbol, column0)) - - column = column0 - for i, token in enumerate(stream): - predict_and_complete(column) - column = scan(i, token, column) - - predict_and_complete(column) - - # Parse ended. Now build a parse tree - solutions = [n.tree for n in column.to_reduce - if n.rule.origin==start_symbol and n.start is column0] - - if not solutions: - raise ParseError('Incomplete parse: Could not find a solution to input') - elif len(solutions) == 1: - tree = solutions[0] - else: - tree = Tree('_ambig', solutions) - - if self.resolve_ambiguity: - ResolveAmbig().visit(tree) - - return ApplyCallbacks(self.postprocess).transform(tree) - - - -class ApplyCallbacks(Transformer_NoRecurse): - def __init__(self, postprocess): - self.postprocess = postprocess - - def drv(self, tree): - children = tree.children - callback = self.postprocess[tree.rule] - if callback: - return callback(children) - else: - return Tree(tree.rule.origin, children) - -def _compare_rules(rule1, rule2): - if rule1.origin != rule2.origin: - if rule1.options and rule2.options: - if rule1.options.priority is not None and rule2.options.priority is not None: - assert rule1.options.priority != rule2.options.priority, "Priority is the same between both rules: %s == %s" % (rule1, rule2) - return -compare(rule1.options.priority, rule2.options.priority) - - return 0 - - c = compare( len(rule1.expansion), len(rule2.expansion)) - if rule1.origin.startswith('__'): # XXX hack! We need to set priority in parser, not here - c = -c - return c - -def _compare_drv(tree1, tree2): - if not (isinstance(tree1, Tree) and isinstance(tree2, Tree)): - return -compare(tree1, tree2) - - try: - rule1, rule2 = tree1.rule, tree2.rule - except AttributeError: - # Probably trees that don't take part in this parse (better way to distinguish?) - return -compare(tree1, tree2) - - # XXX These artifacts can appear due to imperfections in the ordering of Visitor_NoRecurse, - # when confronted with duplicate (same-id) nodes. Fixing this ordering is possible, but would be - # computationally inefficient. So we handle it here. - if tree1.data == '_ambig': - _resolve_ambig(tree1) - if tree2.data == '_ambig': - _resolve_ambig(tree2) - - c = _compare_rules(tree1.rule, tree2.rule) - if c: - return c - - # rules are "equal", so compare trees - for t1, t2 in zip(tree1.children, tree2.children): - c = _compare_drv(t1, t2) - if c: - return c - - return compare(len(tree1.children), len(tree2.children)) - - -def _resolve_ambig(tree): - assert tree.data == '_ambig' - - best = min(tree.children, key=cmp_to_key(_compare_drv)) - assert best.data == 'drv' - tree.set('drv', best.children) - tree.rule = best.rule # needed for applying callbacks - - assert tree.data != '_ambig' - -class ResolveAmbig(Visitor_NoRecurse): - def _ambig(self, tree): - _resolve_ambig(tree) - - -# RULES = [ -# ('a', ['d']), -# ('d', ['b']), -# ('b', ['C']), -# ('b', ['b', 'C']), -# ('b', ['C', 'b']), -# ] -# p = Parser(RULES, 'a') -# for x in p.parse('CC'): -# print x.pretty() - -#--------------- -# RULES = [ -# ('s', ['a', 'a']), -# ('a', ['b', 'b']), -# ('b', ['C'], lambda (x,): x), -# ('b', ['b', 'C']), -# ] -# p = Parser(RULES, 's', {}) -# print p.parse('CCCCC').pretty() diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parsers/grammar_analysis.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parsers/grammar_analysis.py deleted file mode 100644 index 7dff9ced92e9..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parsers/grammar_analysis.py +++ /dev/null @@ -1,157 +0,0 @@ - -from ..utils import bfs, fzset -from ..common import GrammarError, is_terminal - -class Rule(object): - """ - origin : a symbol - expansion : a list of symbols - """ - def __init__(self, origin, expansion, alias=None, options=None): - self.origin = origin - self.expansion = expansion - self.alias = alias - self.options = options - - def __repr__(self): - return '<%s : %s>' % (self.origin, ' '.join(map(str,self.expansion))) - -class RulePtr(object): - def __init__(self, rule, index): - assert isinstance(rule, Rule) - assert index <= len(rule.expansion) - self.rule = rule - self.index = index - - def __repr__(self): - before = self.rule.expansion[:self.index] - after = self.rule.expansion[self.index:] - return '<%s : %s * %s>' % (self.rule.origin, ' '.join(before), ' '.join(after)) - - @property - def next(self): - return self.rule.expansion[self.index] - - def advance(self, sym): - assert self.next == sym - return RulePtr(self.rule, self.index+1) - - @property - def is_satisfied(self): - return self.index == len(self.rule.expansion) - - def __eq__(self, other): - return self.rule == other.rule and self.index == other.index - def __hash__(self): - return hash((self.rule, self.index)) - - -def pairs(lst): - return zip(lst[:-1], lst[1:]) - -def update_set(set1, set2): - copy = set(set1) - set1 |= set2 - return set1 != copy - -def calculate_sets(rules): - """Calculate FOLLOW sets. - - Adapted from: http://lara.epfl.ch/w/cc09:algorithm_for_first_and_follow_sets""" - symbols = {sym for rule in rules for sym in rule.expansion} | {rule.origin for rule in rules} - symbols.add('$root') # what about other unused rules? - - # foreach grammar rule X ::= Y(1) ... Y(k) - # if k=0 or {Y(1),...,Y(k)} subset of NULLABLE then - # NULLABLE = NULLABLE union {X} - # for i = 1 to k - # if i=1 or {Y(1),...,Y(i-1)} subset of NULLABLE then - # FIRST(X) = FIRST(X) union FIRST(Y(i)) - # for j = i+1 to k - # if i=k or {Y(i+1),...Y(k)} subset of NULLABLE then - # FOLLOW(Y(i)) = FOLLOW(Y(i)) union FOLLOW(X) - # if i+1=j or {Y(i+1),...,Y(j-1)} subset of NULLABLE then - # FOLLOW(Y(i)) = FOLLOW(Y(i)) union FIRST(Y(j)) - # until none of NULLABLE,FIRST,FOLLOW changed in last iteration - - NULLABLE = set() - FIRST = {} - FOLLOW = {} - for sym in symbols: - FIRST[sym]={sym} if is_terminal(sym) else set() - FOLLOW[sym]=set() - - changed = True - while changed: - changed = False - - for rule in rules: - if set(rule.expansion) <= NULLABLE: - if update_set(NULLABLE, {rule.origin}): - changed = True - - for i, sym in enumerate(rule.expansion): - if set(rule.expansion[:i]) <= NULLABLE: - if update_set(FIRST[rule.origin], FIRST[sym]): - changed = True - if i==len(rule.expansion)-1 or set(rule.expansion[i:]) <= NULLABLE: - if update_set(FOLLOW[sym], FOLLOW[rule.origin]): - changed = True - - for j in range(i+1, len(rule.expansion)): - if set(rule.expansion[i+1:j]) <= NULLABLE: - if update_set(FOLLOW[sym], FIRST[rule.expansion[j]]): - changed = True - - return FIRST, FOLLOW, NULLABLE - - -class GrammarAnalyzer(object): - def __init__(self, rule_tuples, start_symbol, debug=False): - self.start_symbol = start_symbol - self.debug = debug - rule_tuples = list(rule_tuples) - rule_tuples.append(('$root', [start_symbol, '$end'])) - rule_tuples = [(t[0], t[1], None, None) if len(t)==2 else t for t in rule_tuples] - - self.rules = set() - self.rules_by_origin = {o: [] for o, _x, _a, _opt in rule_tuples} - for origin, exp, alias, options in rule_tuples: - r = Rule( origin, exp, alias, options ) - self.rules.add(r) - self.rules_by_origin[origin].append(r) - - for r in self.rules: - for sym in r.expansion: - if not (is_terminal(sym) or sym in self.rules_by_origin): - raise GrammarError("Using an undefined rule: %s" % sym) - - self.init_state = self.expand_rule(start_symbol) - - self.FIRST, self.FOLLOW, self.NULLABLE = calculate_sets(self.rules) - - def expand_rule(self, rule): - "Returns all init_ptrs accessible by rule (recursive)" - init_ptrs = set() - def _expand_rule(rule): - assert not is_terminal(rule), rule - - for r in self.rules_by_origin[rule]: - init_ptr = RulePtr(r, 0) - init_ptrs.add(init_ptr) - - if r.expansion: # if not empty rule - new_r = init_ptr.next - if not is_terminal(new_r): - yield new_r - - _ = list(bfs([rule], _expand_rule)) - - return fzset(init_ptrs) - - def _first(self, r): - if is_terminal(r): - return {r} - else: - return {rp.next for rp in self.expand_rule(r) if is_terminal(rp.next)} - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parsers/lalr_analysis.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parsers/lalr_analysis.py deleted file mode 100644 index caa41c9a88ed..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parsers/lalr_analysis.py +++ /dev/null @@ -1,73 +0,0 @@ -"""This module builds a LALR(1) transition-table for lalr_parser.py - -For now, shift/reduce conflicts are automatically resolved as shifts. -""" - -# Author: Erez Shinan (2017) -# Email : erezshin@gmail.com - -import logging -from collections import defaultdict - -from ..utils import classify, classify_bool, bfs, fzset -from ..common import GrammarError, is_terminal - -from .grammar_analysis import GrammarAnalyzer - -ACTION_SHIFT = 0 - -class LALR_Analyzer(GrammarAnalyzer): - - def compute_lookahead(self): - - self.states = {} - def step(state): - lookahead = defaultdict(list) - sat, unsat = classify_bool(state, lambda rp: rp.is_satisfied) - for rp in sat: - for term in self.FOLLOW.get(rp.rule.origin, ()): - lookahead[term].append(('reduce', rp.rule)) - - d = classify(unsat, lambda rp: rp.next) - for sym, rps in d.items(): - rps = {rp.advance(sym) for rp in rps} - - for rp in set(rps): - if not rp.is_satisfied and not is_terminal(rp.next): - rps |= self.expand_rule(rp.next) - - lookahead[sym].append(('shift', fzset(rps))) - yield fzset(rps) - - for k, v in lookahead.items(): - if len(v) > 1: - if self.debug: - logging.warn("Shift/reduce conflict for %s: %s. Resolving as shift.", k, v) - for x in v: - # XXX resolving shift/reduce into shift, like PLY - # Give a proper warning - if x[0] == 'shift': - lookahead[k] = [x] - - for k, v in lookahead.items(): - if not len(v) == 1: - raise GrammarError("Collision in %s: %s" %(k, v)) - - self.states[state] = {k:v[0] for k, v in lookahead.items()} - - for _ in bfs([self.init_state], step): - pass - - # -- - self.enum = list(self.states) - self.enum_rev = {s:i for i,s in enumerate(self.enum)} - self.states_idx = {} - - for s, la in self.states.items(): - la = {k:(ACTION_SHIFT, self.enum_rev[v[1]]) if v[0]=='shift' - else (v[0], (v[1], len(v[1].expansion))) # Reduce - for k,v in la.items()} - self.states_idx[ self.enum_rev[s] ] = la - - - self.init_state_idx = self.enum_rev[self.init_state] diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parsers/lalr_parser.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parsers/lalr_parser.py deleted file mode 100644 index bd519d13e740..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parsers/lalr_parser.py +++ /dev/null @@ -1,83 +0,0 @@ -"""This module implements a LALR(1) Parser -""" -# Author: Erez Shinan (2017) -# Email : erezshin@gmail.com - -from ..common import ParseError, UnexpectedToken - -from .lalr_analysis import LALR_Analyzer, ACTION_SHIFT - -class Parser(object): - def __init__(self, parser_conf): - assert all(o is None or o.priority is None for n,x,a,o in parser_conf.rules), "LALR doesn't yet support prioritization" - self.analysis = LALR_Analyzer(parser_conf.rules, parser_conf.start) - self.analysis.compute_lookahead() - self.callbacks = {rule: getattr(parser_conf.callback, rule.alias or rule.origin, None) - for rule in self.analysis.rules} - - def parse(self, seq, set_state=None): - i = 0 - token = None - stream = iter(seq) - states_idx = self.analysis.states_idx - - state_stack = [self.analysis.init_state_idx] - value_stack = [] - - if set_state: set_state(self.analysis.init_state_idx) - - def get_action(key): - state = state_stack[-1] - try: - return states_idx[state][key] - except KeyError: - expected = states_idx[state].keys() - - raise UnexpectedToken(token, expected, seq, i) - - def reduce(rule, size, end=False): - if size: - s = value_stack[-size:] - del state_stack[-size:] - del value_stack[-size:] - else: - s = [] - - res = self.callbacks[rule](s) - - if end and len(state_stack) == 1 and rule.origin == self.analysis.start_symbol: - return res - - _action, new_state = get_action(rule.origin) - assert _action == ACTION_SHIFT - state_stack.append(new_state) - value_stack.append(res) - - # Main LALR-parser loop - try: - token = next(stream) - i += 1 - while True: - action, arg = get_action(token.type) - - if action == ACTION_SHIFT: - state_stack.append(arg) - value_stack.append(token) - if set_state: set_state(arg) - token = next(stream) - i += 1 - else: - reduce(*arg) - except StopIteration: - pass - - while True: - _action, rule = get_action('$end') - assert _action == 'reduce' - res = reduce(*rule, end=True) - if res: - assert state_stack == [self.analysis.init_state_idx] and not value_stack, len(state_stack) - return res - - - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parsers/nearley.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parsers/nearley.py deleted file mode 100644 index ed529e11435a..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parsers/nearley.py +++ /dev/null @@ -1,152 +0,0 @@ -"My name is Earley" - -from ..utils import classify -from ..common import ParseError, UnexpectedToken - -xrange = range - -class MatchFailed(object): - pass - -class AbortParseMatch(Exception): - pass - - -class Rule(object): - def __init__(self, name, symbols, postprocess): - self.name = name - self.symbols = symbols - self.postprocess = postprocess - -class State(object): - def __init__(self, rule, expect, reference, data=None): - self.rule = rule - self.expect = expect - self.reference = reference - self.data = data or [] - - self.is_complete = (self.expect == len(self.rule.symbols)) - if not self.is_complete: - self.expect_symbol = self.rule.symbols[self.expect] - self.is_terminal = isinstance(self.expect_symbol, tuple) - else: - self.is_terminal = False - - def next_state(self, data): - return State(self.rule, self.expect+1, self.reference, self.data + [data]) - - def consume_terminal(self, inp): - if not self.is_complete and self.is_terminal: - # PORT: originally tests regexp - - if self.expect_symbol[1] is not None: - match = self.expect_symbol[1].match(inp) - if match: - return self.next_state(inp) - - elif self.expect_symbol[0] == inp.type: - return self.next_state(inp) - - def consume_nonterminal(self, inp): - if not self.is_complete and not self.is_terminal: - - if self.expect_symbol == inp: - return self.next_state(inp) - - def process(self, location, ind, table, rules, added_rules): - - if self.is_complete: - # Completed a rule - if self.rule.postprocess: - try: - self.data = self.rule.postprocess(self.data) - except AbortParseMatch: - self.data = MatchFailed - - if self.data is not MatchFailed: - for s in table[self.reference]: - x = s.consume_nonterminal(self.rule.name) - if x: - x.data[-1] = self.data - x.epsilon_closure(location, ind, table) - - else: - exp = self.rule.symbols[self.expect] - if isinstance(exp, tuple): - return - - for r in rules[exp]: - assert r.name == exp - if r not in added_rules: - if r.symbols: - added_rules.add(r) - State(r, 0, location).epsilon_closure(location, ind, table) - else: - # Empty rule - new_copy = self.consume_nonterminal(r.name) - new_copy.data[-1] = r.postprocess([]) if r.postprocess else [] - - new_copy.epsilon_closure(location, ind, table) - - def epsilon_closure(self, location, ind, table): - col = table[location] - col.append(self) - - if not self.is_complete: - for i in xrange(ind): - state = col[i] - if state.is_complete and state.reference == location: - x = self.consume_nonterminal(state.rule.name) - if x: - x.data[-1] = state.data - x.epsilon_closure(location, ind, table) - - -class Parser(object): - def __init__(self, rules, start=None): - self.rules = [Rule(r['name'], r['symbols'], r.get('postprocess', None)) for r in rules] - self.rules_by_name = classify(self.rules, lambda r: r.name) - self.start = start or self.rules[0].name - - def advance_to(self, table, added_rules): - n = len(table)-1 - for w, s in enumerate(table[n]): - s.process(n, w, table, self.rules_by_name, added_rules) - - def parse(self, stream): - initial_rules = set(self.rules_by_name[self.start]) - table = [[State(r, 0, 0) for r in initial_rules]] - self.advance_to(table, initial_rules) - - i = 0 - - while i < len(stream): - col = [] - - token = stream[i] - for s in table[-1]: - x = s.consume_terminal(token) - if x: - col.append(x) - - if not col: - expected = {s.expect_symbol for s in table[-1] if s.is_terminal} - raise UnexpectedToken(stream[i], expected, stream, i) - - table.append(col) - self.advance_to(table, set()) - - i += 1 - - res = list(self.finish(table)) - if not res: - raise ParseError('Incomplete parse') - return res - - def finish(self, table): - for t in table[-1]: - if (t.rule.name == self.start - and t.expect == len(t.rule.symbols) - and t.reference == 0 - and t.data is not MatchFailed): - yield t.data diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parsers/old_earley.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parsers/old_earley.py deleted file mode 100644 index 24c1e4b15d6f..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parsers/old_earley.py +++ /dev/null @@ -1,180 +0,0 @@ -"This module implements an Earley Parser" - -# The algorithm keeps track of each state set, using a corresponding Column instance. -# Column keeps track of new items using NewsList instances. -# -# Author: Erez Shinan (2017) -# Email : erezshin@gmail.com - -from ..common import ParseError, UnexpectedToken, is_terminal -from .grammar_analysis import GrammarAnalyzer - -class EndToken: - type = '$end' - -END_TOKEN = EndToken() - -class Item(object): - def __init__(self, rule, ptr, start, data): - self.rule = rule - self.ptr = ptr - self.start = start - self.data = data - - @property - def expect(self): - return self.rule.expansion[self.ptr] - - @property - def is_complete(self): - return self.ptr == len(self.rule.expansion) - - def advance(self, data): - return Item(self.rule, self.ptr+1, self.start, self.data + [data]) - - def __eq__(self, other): - return self.start is other.start and self.ptr == other.ptr and self.rule == other.rule - def __hash__(self): - return hash((self.rule, self.ptr, id(self.start))) - - def __repr__(self): - before = map(str, self.rule.expansion[:self.ptr]) - after = map(str, self.rule.expansion[self.ptr:]) - return '<(%d) %s : %s * %s>' % (id(self.start), self.rule.origin, ' '.join(before), ' '.join(after)) - - -class NewsList(list): - "Keeps track of newly added items (append-only)" - - def __init__(self, initial=None): - list.__init__(self, initial or []) - self.last_iter = 0 - - def get_news(self): - i = self.last_iter - self.last_iter = len(self) - return self[i:] - - -class Column: - "An entry in the table, aka Earley Chart" - def __init__(self): - self.to_reduce = NewsList() - self.to_predict = NewsList() - self.to_scan = NewsList() - self.item_count = 0 - - self.added = set() - - def add(self, items): - """Sort items into scan/predict/reduce newslists - - Makes sure only unique items are added. - """ - - added = self.added - for item in items: - - if item.is_complete: - - # (We must allow repetition of empty rules) - # if item.rule.expansion: - - # This is an important test to avoid infinite-loops, - # For example for the rule: - # a: a | "b" - # If we can detect these cases statically, we can remove - # this test an gain a tiny performance boost - # - # if item in added: - # continue - # added.add(item) - - self.to_reduce.append(item) - else: - if is_terminal(item.expect): - self.to_scan.append(item) - else: - if item in added: - continue - added.add(item) - self.to_predict.append(item) - - self.item_count += 1 # Only count if actually added - - def __nonzero__(self): - return bool(self.item_count) - -class Parser: - def __init__(self, parser_conf): - - self.analysis = GrammarAnalyzer(parser_conf.rules, parser_conf.start) - self.start = parser_conf.start - - self.postprocess = {} - self.predictions = {} - for rule in self.analysis.rules: - if rule.origin != '$root': # XXX kinda ugly - a = rule.alias - self.postprocess[rule] = a if callable(a) else getattr(parser_conf.callback, a) - self.predictions[rule.origin] = [x.rule for x in self.analysis.expand_rule(rule.origin)] - - def parse(self, stream, start=None): - # Define parser functions - start = start or self.start - - def predict(nonterm, i): - assert not is_terminal(nonterm), nonterm - return [Item(rule, 0, i, []) for rule in self.predictions[nonterm]] - - def complete(item): - name = item.rule.origin - item.data = self.postprocess[item.rule](item.data) - return [i.advance(item.data) for i in item.start.to_predict if i.expect == name] - - def process_column(i, token, cur_set): - next_set = Column() - - while True: - to_predict = {x.expect for x in cur_set.to_predict.get_news() - if x.ptr} # if not part of an already predicted batch - to_reduce = cur_set.to_reduce.get_news() - if not (to_predict or to_reduce): - break - - for nonterm in to_predict: - cur_set.add( predict(nonterm, cur_set) ) - for item in to_reduce: - cur_set.add( complete(item) ) - - - if token is not END_TOKEN: - for item in cur_set.to_scan.get_news(): - match = item.expect[0](token) if callable(item.expect[0]) else item.expect[0] == token.type - if match: - next_set.add([item.advance(stream[i])]) - - if not next_set and token is not END_TOKEN: - expect = {i.expect[-1] for i in cur_set.to_scan} - raise UnexpectedToken(token, expect, stream, i) - - return cur_set, next_set - - # Main loop starts - column0 = Column() - column0.add(predict(start, column0)) - - cur_set = column0 - for i, char in enumerate(stream): - _, cur_set = process_column(i, char, cur_set) - - last_set, _ = process_column(len(stream), END_TOKEN, cur_set) - - # Parse ended. Now build a parse tree - solutions = [n.data for n in last_set.to_reduce - if n.rule.origin==start and n.start is column0] - - if not solutions: - raise ParseError('Incomplete parse: Could not find a solution to input') - - return solutions diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parsers/xearley.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parsers/xearley.py deleted file mode 100644 index ba86c5c546fa..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/parsers/xearley.py +++ /dev/null @@ -1,135 +0,0 @@ -"This module implements an experimental Earley Parser with a dynamic lexer" - -# The parser uses a parse-forest to keep track of derivations and ambiguations. -# When the parse ends successfully, a disambiguation stage resolves all ambiguity -# (right now ambiguity resolution is not developed beyond the needs of lark) -# Afterwards the parse tree is reduced (transformed) according to user callbacks. -# I use the no-recursion version of Transformer and Visitor, because the tree might be -# deeper than Python's recursion limit (a bit absurd, but that's life) -# -# The algorithm keeps track of each state set, using a corresponding Column instance. -# Column keeps track of new items using NewsList instances. -# -# Instead of running a lexer beforehand, or using a costy char-by-char method, this parser -# uses regular expressions by necessity, achieving high-performance while maintaining all of -# Earley's power in parsing any CFG. -# -# -# Author: Erez Shinan (2017) -# Email : erezshin@gmail.com - -from collections import defaultdict - -from ..common import ParseError, UnexpectedToken, Terminal -from ..lexer import Token -from ..tree import Tree -from .grammar_analysis import GrammarAnalyzer - -from .earley import ResolveAmbig, ApplyCallbacks, Item, NewsList, Derivation, END_TOKEN, Column - -class Parser: - def __init__(self, rules, start_symbol, callback, resolve_ambiguity=True, ignore=()): - self.analysis = GrammarAnalyzer(rules, start_symbol) - self.start_symbol = start_symbol - self.resolve_ambiguity = resolve_ambiguity - self.ignore = list(ignore) - - - self.postprocess = {} - self.predictions = {} - for rule in self.analysis.rules: - if rule.origin != '$root': # XXX kinda ugly - a = rule.alias - self.postprocess[rule] = a if callable(a) else (a and getattr(callback, a)) - self.predictions[rule.origin] = [x.rule for x in self.analysis.expand_rule(rule.origin)] - - def parse(self, stream, start_symbol=None): - # Define parser functions - start_symbol = start_symbol or self.start_symbol - delayed_matches = defaultdict(list) - - text_line = 1 - text_column = 0 - - def predict(nonterm, column): - assert not isinstance(nonterm, Terminal), nonterm - return [Item(rule, 0, column, None) for rule in self.predictions[nonterm]] - - def complete(item): - name = item.rule.origin - return [i.advance(item.tree) for i in item.start.to_predict if i.expect == name] - - def predict_and_complete(column): - while True: - to_predict = {x.expect for x in column.to_predict.get_news() - if x.ptr} # if not part of an already predicted batch - to_reduce = column.to_reduce.get_news() - if not (to_predict or to_reduce): - break - - for nonterm in to_predict: - column.add( predict(nonterm, column) ) - for item in to_reduce: - column.add( complete(item) ) - - def scan(i, token, column): - for x in self.ignore: - m = x.match(stream, i) - if m: - return column - - to_scan = column.to_scan.get_news() - - for item in to_scan: - m = item.expect.match(stream, i) - if m: - t = Token(item.expect.name, m.group(0), i, text_line, text_column) - delayed_matches[m.end()].append(item.advance(t)) - - s = m.group(0) - for j in range(1, len(s)): - m = item.expect.match(s[:-j]) - if m: - delayed_matches[m.end()].append(item.advance(m.group(0))) - - next_set = Column(i+1) - next_set.add(delayed_matches[i+1]) - del delayed_matches[i+1] # No longer needed, so unburden memory - - return next_set - - # Main loop starts - column0 = Column(0) - column0.add(predict(start_symbol, column0)) - - column = column0 - for i, token in enumerate(stream): - predict_and_complete(column) - column = scan(i, token, column) - - if token == '\n': - text_line += 1 - text_column = 0 - else: - text_column += 1 - - - predict_and_complete(column) - - # Parse ended. Now build a parse tree - solutions = [n.tree for n in column.to_reduce - if n.rule.origin==start_symbol and n.start is column0] - - if not solutions: - raise ParseError('Incomplete parse: Could not find a solution to input') - elif len(solutions) == 1: - tree = solutions[0] - else: - tree = Tree('_ambig', solutions) - - if self.resolve_ambiguity: - ResolveAmbig().visit(tree) - - return ApplyCallbacks(self.postprocess).transform(tree) - - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/reconstruct.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/reconstruct.py deleted file mode 100644 index 590a8e73cbe3..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/reconstruct.py +++ /dev/null @@ -1,106 +0,0 @@ -import re -from collections import defaultdict - -from .tree import Tree -from .common import is_terminal, ParserConf, PatternStr, Terminal -from .lexer import Token -from .parsers import earley - - - -def is_discarded_terminal(t): - return is_terminal(t) and t.startswith('_') - -def is_iter_empty(i): - try: - _ = next(i) - return False - except StopIteration: - return True - -class Reconstructor: - def __init__(self, parser): - # Recreate the rules to assume a standard lexer - _tokens, rules, _grammar_extra = parser.grammar.compile(lexer='standard', start='whatever') - tokens = {t.name:t for t in _tokens} - - token_res = {t.name:re.compile(t.pattern.to_regexp()) for t in _tokens} - - class MatchTerminal(Terminal): - def match(self, other): - if isinstance(other, Tree): - return False - return token_res[self.data].match(other) is not None - - class MatchTree(Terminal): - def match(self, other): - try: - return self.data == other.data - except AttributeError: - return False - - class WriteTokens: - def __init__(self, name, expansion): - self.name = name - self.expansion = expansion - - def f(self, args): - args2 = iter(args) - to_write = [] - for sym in self.expansion: - if is_discarded_terminal(sym): - t = tokens[sym] - assert isinstance(t.pattern, PatternStr) - to_write.append(t.pattern.value) - else: - x = next(args2) - if isinstance(x, list): - to_write += x - else: - if isinstance(x, Token): - assert x.type == sym, x - else: - assert x.data == sym, x - to_write.append(x) - - assert is_iter_empty(args2) - - return to_write - - d = defaultdict(list) - for name, (expansions, _o) in rules.items(): - for expansion, alias in expansions: - if alias: - d[alias].append(expansion) - d[name].append([alias]) - else: - d[name].append(expansion) - - rules = [] - expand1s = {name for name, (_x, options) in parser.rules.items() - if options and options.expand1} - - for name, expansions in d.items(): - for expansion in expansions: - reduced = [sym if sym.startswith('_') or sym in expand1s else - MatchTerminal(sym) if is_terminal(sym) else MatchTree(sym) - for sym in expansion if not is_discarded_terminal(sym)] - - rules.append((name, reduced, WriteTokens(name, expansion).f, None)) - self.rules = rules - - - def _reconstruct(self, tree): - # TODO: ambiguity? - parser = earley.Parser(self.rules, tree.data, {}) - res = parser.parse(tree.children) - for item in res: - if isinstance(item, Tree): - for x in self._reconstruct(item): - yield x - else: - yield item - - def reconstruct(self, tree): - return ''.join(self._reconstruct(tree)) - diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/sre_parse.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/sre_parse.py deleted file mode 100644 index 7661f42590e0..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/sre_parse.py +++ /dev/null @@ -1,6 +0,0 @@ -import warnings -warnings.warn(f"module {__name__!r} is deprecated", - DeprecationWarning, - stacklevel=2) -from . import _parser as _ -globals().update({k: v for k, v in vars(_).items() if k[:2] != '__'}) diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/tools/__init__.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/tools/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/tools/nearley.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/tools/nearley.py deleted file mode 100644 index c3cce0fc8d31..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/tools/nearley.py +++ /dev/null @@ -1,144 +0,0 @@ -"Converts between Lark and Nearley grammars. Work in progress!" - -import os.path -import sys - - -from ...lark import Lark, InlineTransformer, Transformer - -nearley_grammar = r""" - start: (ruledef|directive)+ - - directive: "@" NAME (STRING|NAME) - | "@" JS -> js_code - ruledef: NAME "->" expansions - | NAME REGEXP "->" expansions -> macro - expansions: expansion ("|" expansion)* - - expansion: expr+ js - - ?expr: item [":" /[+*?]/] - - ?item: rule|string|regexp - | "(" expansions ")" - - rule: NAME - string: STRING - regexp: REGEXP - JS: /(?s){%.*?%}/ - js: JS? - - NAME: /[a-zA-Z_$]\w*/ - COMMENT: /\#[^\n]*/ - REGEXP: /\[.*?\]/ - STRING: /".*?"/ - - %import common.WS - %ignore WS - %ignore COMMENT - - """ - -nearley_grammar_parser = Lark(nearley_grammar, parser='earley', lexer='standard') - -def _get_rulename(name): - name = {'_': '_ws_maybe', '__':'_ws'}.get(name, name) - return 'n_' + name.replace('$', '__DOLLAR__').lower() - -class NearleyToLark(InlineTransformer): - def __init__(self): - self._count = 0 - self.extra_rules = {} - self.extra_rules_rev = {} - self.alias_js_code = {} - - def _new_function(self, code): - name = 'alias_%d' % self._count - self._count += 1 - - self.alias_js_code[name] = code - return name - - def _extra_rule(self, rule): - if rule in self.extra_rules_rev: - return self.extra_rules_rev[rule] - - name = 'xrule_%d' % len(self.extra_rules) - assert name not in self.extra_rules - self.extra_rules[name] = rule - self.extra_rules_rev[rule] = name - return name - - def rule(self, name): - return _get_rulename(name) - - def ruledef(self, name, exps): - return '!%s: %s' % (_get_rulename(name), exps) - - def expr(self, item, op): - rule = '(%s)%s' % (item, op) - return self._extra_rule(rule) - - def regexp(self, r): - return '/%s/' % r - - def string(self, s): - return self._extra_rule(s) - - def expansion(self, *x): - x, js = x[:-1], x[-1] - if js.children: - js_code ,= js.children - js_code = js_code[2:-2] - alias = '-> ' + self._new_function(js_code) - else: - alias = '' - return ' '.join(x) + alias - - def expansions(self, *x): - return '%s' % ('\n |'.join(x)) - - def start(self, *rules): - return '\n'.join(filter(None, rules)) - -def _nearley_to_lark(g, builtin_path, n2l, js_code): - rule_defs = [] - - tree = nearley_grammar_parser.parse(g) - for statement in tree.children: - if statement.data == 'directive': - directive, arg = statement.children - if directive == 'builtin': - with open(os.path.join(builtin_path, arg[1:-1])) as f: - text = f.read() - rule_defs += _nearley_to_lark(text, builtin_path, n2l, js_code) - else: - assert False, directive - elif statement.data == 'js_code': - code ,= statement.children - code = code[2:-2] - js_code.append(code) - elif statement.data == 'macro': - pass # TODO Add support for macros! - elif statement.data == 'ruledef': - rule_defs.append( n2l.transform(statement) ) - else: - raise Exception("Unknown statement: %s" % statement) - - return rule_defs - - -def main(): - if len(sys.argv) < 3: - print("Reads Nearley grammar (with js functions) outputs an equivalent lark parser.") - print("Usage: %s " % sys.argv[0]) - return - - fn, start, nearley_lib = sys.argv[1:] - with open(fn) as f: - grammar = f.read() - - -if __name__ == '__main__': - main() - # test() diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/tree.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/tree.py deleted file mode 100644 index 962ac24eace5..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/tree.py +++ /dev/null @@ -1,172 +0,0 @@ -from copy import deepcopy - -from .utils import inline_args - -class Tree(object): - def __init__(self, data, children): - self.data = data - self.children = list(children) - - def __repr__(self): - return 'Tree(%s, %s)' % (self.data, self.children) - - def _pretty_label(self): - return self.data - - def _pretty(self, level, indent_str): - if len(self.children) == 1 and not isinstance(self.children[0], Tree): - return [ indent_str*level, self._pretty_label(), '\t', '%s' % self.children[0], '\n'] - - l = [ indent_str*level, self._pretty_label(), '\n' ] - for n in self.children: - if isinstance(n, Tree): - l += n._pretty(level+1, indent_str) - else: - l += [ indent_str*(level+1), '%s' % n, '\n' ] - - return l - - def pretty(self, indent_str=' '): - return ''.join(self._pretty(0, indent_str)) - - def expand_kids_by_index(self, *indices): - for i in sorted(indices, reverse=True): # reverse so that changing tail won't affect indices - kid = self.children[i] - self.children[i:i+1] = kid.children - - def __eq__(self, other): - try: - return self.data == other.data and self.children == other.children - except AttributeError: - return False - - def __hash__(self): - return hash((self.data, tuple(self.children))) - - def find_pred(self, pred): - if pred(self): - yield self - - for c in self.children: - if isinstance(c, Tree): - for t in c.find_pred(pred): - yield t - - def find_data(self, data): - return self.find_pred(lambda t: t.data == data) - - def scan_values(self, pred): - for c in self.children: - if isinstance(c, Tree): - for t in c.scan_values(pred): - yield t - else: - if pred(c): - yield c - - def iter_subtrees(self): - visited = set() - q = [self] - - while q: - subtree = q.pop() - if id(subtree) in visited: - continue # already been here from another branch - visited.add(id(subtree)) - yield subtree - q += [c for c in subtree.children if isinstance(c, Tree)] - - - def __deepcopy__(self, memo): - return type(self)(self.data, deepcopy(self.children, memo)) - - def copy(self): - return type(self)(self.data, self.children) - def set(self, data, children): - self.data = data - self.children = children - - - -class Transformer(object): - def _get_func(self, name): - return getattr(self, name) - - def transform(self, tree): - items = [self.transform(c) if isinstance(c, Tree) else c for c in tree.children] - try: - f = self._get_func(tree.data) - except AttributeError: - return self.__default__(tree.data, items) - else: - return f(items) - - def __default__(self, data, children): - return Tree(data, children) - - def __mul__(self, other): - return TransformerChain(self, other) - - -class TransformerChain(object): - def __init__(self, *transformers): - self.transformers = transformers - - def transform(self, tree): - for t in self.transformers: - tree = t.transform(tree) - return tree - - def __mul__(self, other): - return TransformerChain(*self.transformers + (other,)) - - - -class InlineTransformer(Transformer): - def _get_func(self, name): # use super()._get_func - return inline_args(getattr(self, name)).__get__(self) - - -class Visitor(object): - def visit(self, tree): - for child in tree.children: - if isinstance(child, Tree): - self.visit(child) - - f = getattr(self, tree.data, self.__default__) - f(tree) - return tree - - def __default__(self, tree): - pass - - -class Visitor_NoRecurse(Visitor): - def visit(self, tree): - subtrees = list(tree.iter_subtrees()) - - for subtree in reversed(subtrees): - getattr(self, subtree.data, self.__default__)(subtree) - return tree - - -class Transformer_NoRecurse(Transformer): - def transform(self, tree): - subtrees = list(tree.iter_subtrees()) - - def _t(t): - # Assumes t is already transformed - try: - f = self._get_func(t.data) - except AttributeError: - return self.__default__(t) - else: - return f(t) - - for subtree in reversed(subtrees): - subtree.children = [_t(c) if isinstance(c, Tree) else c for c in subtree.children] - - return _t(tree) - - def __default__(self, t): - return t diff --git a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/utils.py b/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/utils.py deleted file mode 100644 index 32ff6347d2d0..000000000000 --- a/worlds/ff4fe/FreeEnterpriseForAP/f4c/lark/utils.py +++ /dev/null @@ -1,106 +0,0 @@ -import functools -import types -from collections import deque -from contextlib import contextmanager - -class fzset(frozenset): - def __repr__(self): - return '{%s}' % ', '.join(map(repr, self)) - - -def classify_bool(seq, pred): - true_elems = [] - false_elems = [] - - for elem in seq: - if pred(elem): - true_elems.append(elem) - else: - false_elems.append(elem) - - return true_elems, false_elems - -def classify(seq, key=None): - d = {} - for item in seq: - k = key(item) if (key is not None) else item - if k in d: - d[k].append(item) - else: - d[k] = [item] - return d - -def bfs(initial, expand): - open_q = deque(list(initial)) - visited = set(open_q) - while open_q: - node = open_q.popleft() - yield node - for next_node in expand(node): - if next_node not in visited: - visited.add(next_node) - open_q.append(next_node) - - - - -STRING_TYPE = str - -Str = type(u'') - - -def inline_args(f): - # print '@@', f.__name__, type(f), isinstance(f, types.FunctionType), isinstance(f, types.TypeType), isinstance(f, types.BuiltinFunctionType) - if isinstance(f, types.FunctionType): - @functools.wraps(f) - def _f_func(self, args): - return f(self, *args) - return _f_func - elif isinstance(f, (type, types.BuiltinFunctionType)): - @functools.wraps(f) - def _f_builtin(_self, args): - return f(*args) - return _f_builtin - elif isinstance(f, types.MethodType): - @functools.wraps(f.__func__) - def _f(self, args): - return f.__func__(self, *args) - return _f - else: - @functools.wraps(f.__call__.__func__) - def _f(self, args): - return f.__call__.__func__(self, *args) - return _f - - - -def compare(a, b): - if a == b: - return 0 - elif a > b: - return 1 - else: - return -1 - - -try: - from contextlib import suppress # Python 3 -except ImportError: - @contextmanager - def suppress(*excs): - '''Catch and dismiss the provided exception - - >>> x = 'hello' - >>> with suppress(IndexError): - ... x = x[10] - >>> x - 'hello' - ''' - try: - yield - except excs: - pass - - - - diff --git a/worlds/sm/docs/en_Super Metroid.md b/worlds/sm/docs/en_Super Metroid.md index 5b990b638771..c8c1d0faabee 100644 --- a/worlds/sm/docs/en_Super Metroid.md +++ b/worlds/sm/docs/en_Super Metroid.md @@ -24,7 +24,7 @@ certain items to your own world. ## What does another world's item look like in Super Metroid? Two unique item sprites have been added to the game to represent items belonging to another world. Progression items have -a small up arrow on the sprite and non-progression don't.A unique item sprite has been added to the game to represent items belonging to another world. +a small up arrow on the sprite and non-progression don't. ## When the player receives an item, what happens? diff --git a/worlds/sm/variaRandomizer/patches/common/ips/basepatch.ips b/worlds/sm/variaRandomizer/patches/common/ips/basepatch.ips deleted file mode 100644 index efc38e262f19..000000000000 Binary files a/worlds/sm/variaRandomizer/patches/common/ips/basepatch.ips and /dev/null differ