diff --git a/corral.json b/corral.json index cd42ca7..d26ffa9 100644 --- a/corral.json +++ b/corral.json @@ -3,6 +3,10 @@ "eohippus" ], "deps": [ + { + "locator": "github.com/ponylang/appdirs.git", + "version": "0.1.5" + }, { "locator": "github.com/chalcolith/kiuatan.git", "version": "1.5.2" diff --git a/eohippus-cli/main.pony b/eohippus-cli/main.pony index 6acf87b..7426f08 100644 --- a/eohippus-cli/main.pony +++ b/eohippus-cli/main.pony @@ -1,3 +1,4 @@ +use "appdirs" use "files" use "logger" @@ -9,8 +10,6 @@ use server = "../eohippus/server" actor Main let _env: Env let _auth: FileAuth - let _context: parser.Context - let _grammar: parser.NamedRule val new create(env: Env) => _env = env @@ -31,11 +30,9 @@ actor Main server.EohippusLogFormatter) end - _context = parser.Context([]) - _grammar = parser.Builder(_context).src_file.src_file - - let pony_path = server.ServerUtils.get_pony_path(env) - let ponyc = server.ServerUtils.find_ponyc(env) + let parser_context = parser.Context([]) + let parser_grammar: parser.NamedRule val = + parser.Builder(parser_context).src_file.src_file let workspace_dir = try @@ -45,24 +42,85 @@ actor Main end let workspace_path = FilePath(_auth, workspace_dir) + env.err.print("workspace_path: " + workspace_path.path) + + let workspace_cache = FilePath( + _auth, Path.join(workspace_path.path, ".eohippus")) + if not _check_cache(workspace_cache, "workspace cache") then + return + end + env.err.print("workspace_cache: " + workspace_cache.path) + + let appdirs = AppDirs(env.vars, "eohippus") + let global_cache = + try + FilePath(_auth, appdirs.user_cache_dir()?) + else + env.err.print("unable to get user cache dir") + return + end + if not _check_cache(global_cache, "global_cache") then + return + end + env.err.print("global_cache: " + global_cache.path) - env.err.print("workspace_path " + workspace_path.path) + let pony_path_dirs = server.ServerUtils.get_pony_path_dirs(env) + env.err.print("pony_path_dirs:") + for path in pony_path_dirs.values() do + env.err.print(" " + path.path) + end - analyzer.EohippusAnalyzer( - logger, - _auth, - _grammar, + let ponyc = server.ServerUtils.find_ponyc(env) + match ponyc + | let ponyc_path: FilePath => + env.err.print("ponyc_path: " + ponyc_path.path) + else + env.err.print("ponyc_path: None") + end + + let pony_packages = server.ServerUtils.find_pony_packages(env, ponyc) + match pony_packages + | let pony_packages_path: FilePath => + env.err.print("pony_packages_path: " + pony_packages_path.path) + else + env.err.print("pony_packages_path: None") + end + + let analyzer_context = analyzer.AnalyzerContext( + FileAuth(env.root), workspace_path, - None, - pony_path, + workspace_cache, + global_cache, + pony_path_dirs, ponyc, - None, - this) + pony_packages, + parser_grammar) + + let analyze = analyzer.EohippusAnalyzer(logger, analyzer_context, this) + analyze.analyze() + + fun _check_cache(path: FilePath, name: String): Bool => + try + if (not path.exists()) and (not path.mkdir()) then + _env.err.print( + "unable to create " + name + ": " + path.path) + return false + end + let info = FileInfo(path)? + if not info.directory then + _env.err.print(name + " is not a directory: " + path.path) + return false + end + else + _env.err.print("unable to access " + name + ": " + path.path) + return false + end + true be parsed_file( analyze: analyzer.Analyzer, task_id: USize, - canonical_name: String, + canonical_name: FilePath, syntax_tree: ast.Node, line_beginnings: ReadSeq[parser.Loc] val) => @@ -81,7 +139,7 @@ actor Main be analyzed_file( analyze: analyzer.Analyzer, task_id: USize, - canonical_path: String, + canonical_path: FilePath, syntax_tree: (ast.Node | None), file_scope: (analyzer.Scope val | None), parse_errors: ReadSeq[analyzer.AnalyzerError] val, @@ -95,19 +153,19 @@ actor Main be analyze_failed( analyze: analyzer.Analyzer, task_id: USize, - canonical_path: String, + canonical_path: FilePath, errors: ReadSeq[analyzer.AnalyzerError] val) => print_errors(errors, canonical_path) fun print_errors( errors: ReadSeq[analyzer.AnalyzerError] val, - path: (String | None)) + path: (FilePath | None)) => for e in errors.values() do match path - | let path': String => - if e.canonical_path != path' then + | let path': FilePath => + if e.canonical_path.path != path'.path then continue end end @@ -124,6 +182,6 @@ actor Main "HINT" end _env.err.print( - e.canonical_path + ":" + (e.line + 1).string() + ":" + + e.canonical_path.path + ":" + (e.line + 1).string() + ":" + (e.column + 1).string() + ": " + kind + ": " + e.message) end diff --git a/eohippus-lsp/main.pony b/eohippus-lsp/main.pony index 83316bb..29fed34 100644 --- a/eohippus-lsp/main.pony +++ b/eohippus-lsp/main.pony @@ -1,3 +1,4 @@ +use "files" use "logger" use "net" @@ -34,7 +35,15 @@ actor Main Error, env.err, {(s: String): String => s }, ls.EohippusLogFormatter) end - let config = recover val ls.ServerConfig(options.ponyc_executable) end + let config = + recover val + let ponyc_path = + match options.ponyc_executable + | let path_str: String => + FilePath(FileAuth(env.root), path_str) + end + ls.ServerConfig(ponyc_path) + end match options.command | StdioCommand => diff --git a/eohippus-vscode/test_folder/builtin_test/ini.pony b/eohippus-vscode/test_folder/builtin_test/ini.pony new file mode 100644 index 0000000..38144f0 --- /dev/null +++ b/eohippus-vscode/test_folder/builtin_test/ini.pony @@ -0,0 +1,147 @@ +""" +# Ini package + +The Ini package provides support for parsing +[INI file](https://en.wikipedia.org/wiki/INI_file) formatted text. + +* Currently _does not_ support multi-line entries. +* Any keys not in a section will be placed in the section "" + +# Example code +```pony +// Parses the file 'example.ini' in the current working directory +// Output all the content +use "ini" +use "files" + +actor Main + new create(env:Env) => + try + let ini_file = File(FilePath(FileAuth(env.root), "example.ini")) + let sections = IniParse(ini_file.lines())? + for section in sections.keys() do + env.out.print("Section name is: " + section) + for key in sections(section)?.keys() do + env.out.print(key + " = " + sections(section)?(key)?) + end + end + end +``` +""" +primitive IniIncompleteSection +primitive IniNoDelimiter + +type IniError is + ( IniIncompleteSection + | IniNoDelimiter + ) + +interface IniNotify + """ + Notifications for INI parsing. + """ + fun ref apply(section: String, key: String, value: String): Bool + """ + This is called for every valid entry in the INI file. If key/value pairs + occur before a section name, the section can be an empty string. Return + false to halt processing. + """ + + fun ref add_section(section: String): Bool => + """ + This is called for every valid section in the INI file. Return false + to halt processing. + """ + true + + fun ref errors(line: USize, err: IniError): Bool => + """ + This is called for each error encountered. Return false to halt processing. + """ + true + +primitive Ini + """ + A streaming parser for INI formatted lines of test. + """ + fun apply(lines: Iterator[String box], f: IniNotify): Bool => + """ + This accepts a string iterator and calls the IniNotify for each new entry. + If any errors are encountered, this will return false. Otherwise, it + returns true. + """ + var section = "" + var lineno = USize(0) + var ok = true + + for line in lines do + lineno = lineno + 1 + var current = line.clone() + current.strip() + + if current.size() == 0 then + continue + end + + try + match current(0)? + | ';' | '#' => + // Skip comments. + continue + | '[' => + try + current.delete(current.find("]", 1)?, -1) + current.delete(0) + section = consume current + if not f.add_section(section) then + return ok + end + else + ok = false + + if not f.errors(lineno, IniIncompleteSection) then + return false + end + end + else + try + let delim = try + current.find("=")? + else + current.find(":")? + end + + let value = current.substring(delim + 1) + value.strip() + + current.delete(delim, -1) + current.strip() + + try + let comment = try + value.find(";")? + else + value.find("#")? + end + + match value(comment.usize() - 1)? + | ' ' | '\t' => + value.delete(comment, -1) + value.rstrip() + end + end + + if not f(section, consume current, consume value) then + return ok + end + else + ok = false + + if not f.errors(lineno, IniNoDelimiter) then + return false + end + end + end + end + end + ok diff --git a/eohippus/analyzer/analyzer.pony b/eohippus/analyzer/analyzer.pony index ebb342c..25714ad 100644 --- a/eohippus/analyzer/analyzer.pony +++ b/eohippus/analyzer/analyzer.pony @@ -1,1764 +1,10 @@ -use "collections" use "files" -use "itertools" -use "logger" -use "time" - -use ast = "../ast" -use json = "../json" -use linter = "../linter" use parser = "../parser" -use ".." interface tag Analyzer - be open_file(task_id: USize, canonical_path: String, parse: parser.Parser) - be update_file(task_id: USize, canonical_path: String, parse: parser.Parser) - be close_file(task_id: USize, canonical_path: String) + be open_file(task_id: USize, canonical_path: FilePath, parse: parser.Parser) + be update_file(task_id: USize, canonical_path: FilePath, parse: parser.Parser) + be close_file(task_id: USize, canonical_path: FilePath) be request_info( - task_id: USize, canonical_path: String, notify: AnalyzerRequestNotify) + task_id: USize, canonical_path: FilePath, notify: AnalyzerRequestNotify) be dispose() - -actor EohippusAnalyzer is Analyzer - let _log: Logger[String] - let _auth: FileAuth - var _workspace: (FilePath | None) - var _storage_path: (FilePath | None) - var _pony_path: ReadSeq[FilePath] - var _ponyc_executable: (FilePath | None) - var _pony_packages_path: (FilePath | None) - let _grammar: parser.NamedRule val - let _notify: AnalyzerNotify - - let _lint_configs: Map[String, linter.Config val] = _lint_configs.create() - - let _src_items: Map[String, SrcItem] = _src_items.create() - let _src_item_queue: Queue[SrcItem] = _src_item_queue.create() - var _num_process_messages: USize = 0 - - var _analysis_task_id: USize = 0 - let _workspace_errors: Map[String, Array[AnalyzerError]] = - _workspace_errors.create() - let _parse_errors: Map[String, Array[AnalyzerError]] = - _parse_errors.create() - let _lint_errors: Map[String, Array[AnalyzerError]] = - _lint_errors.create() - let _analyze_errors: Map[String, Array[AnalyzerError]] = - _analyze_errors.create() - - let _pending_requests: Map[String, MapIs[AnalyzerRequestNotify, Set[USize]]] = - _pending_requests.create() - - var _iteration: USize = 0 - var _disposing: Bool = false - - new create( - log: Logger[String], - auth: FileAuth, - grammar: parser.NamedRule val, - workspace: (FilePath | None), - storage_path: (FilePath | None), - pony_path: ReadSeq[FilePath] val, - ponyc_executable: (FilePath | None), - pony_packages_path: (FilePath | None), - notify: AnalyzerNotify) - => - _log = log - _auth = auth - _grammar = grammar - _workspace = workspace - _storage_path = storage_path - _pony_path = pony_path - _ponyc_executable = ponyc_executable - _pony_packages_path = pony_packages_path - _notify = notify - - match _workspace - | let fp: FilePath => - let ws = - match try fp.canonical()? end - | let fp': FilePath => - _workspace = fp' - fp' - else - fp - end - try - let info = FileInfo(ws)? - if not info.directory then - _log(Error) and _log.log(fp.path + " is not a directory") - _workspace_errors(ws.path) = - [ AnalyzerError( - ws.path, AnalyzeError, "workspace is not a directory") ] - _workspace = None - end - else - _log(Error) and _log.log(fp.path + " does not exist") - _workspace_errors(ws.path) = - [ AnalyzerError( - ws.path, AnalyzeError, "workspace directory does not exist") ] - _workspace = None - end - end - match _workspace - | let ws: FilePath => - _log(Fine) and _log.log("workspace is " + ws.path) - else - _log(Fine) and _log.log("workspace is None") - end - - match _storage_path - | let fp: FilePath => - let sp = - match try fp.canonical()? end - | let fp': FilePath => - _storage_path = fp' - fp' - else - fp - end - try - let info = FileInfo(sp)? - if not info.directory then - _log(Error) and _log.log(fp.path + " is not a directory") - _workspace_errors(sp.path) = - [ AnalyzerError( - sp.path, AnalyzeError, "storage path is not a directory") ] - _storage_path = None - end - else - _log(Error) and _log.log(fp.path + " unable to stat") - _workspace_errors(sp.path) = - [ AnalyzerError( - sp.path, AnalyzeError, "unable to stat storage path") ] - _storage_path = None - end - else - match _workspace - | let fp: FilePath => - try - let sp = fp.join(".eohippus")? - if (not sp.exists() and not sp.mkdir()) then - _log(Error) and _log.log("unable to create " + sp.path) - _workspace_errors(sp.path) = - [ AnalyzerError( - sp.path, AnalyzeError, "unable to create storage directory") ] - _storage_path = None - else - try - let fi = FileInfo(sp)? - if not fi.directory then - _log(Error) and _log.log(sp.path + " is not a directory") - _workspace_errors(sp.path) = - [ AnalyzerError( - sp.path, AnalyzeError, "storage path is not a directory") ] - else - _storage_path = sp - end - else - _log(Error) and _log.log(sp.path + " unable to stat") - _workspace_errors(sp.path) = - [ AnalyzerError( - sp.path, AnalyzeError, "unable to stat storage path") ] - end - end - else - _log(Error) and _log.log("unable to build storage path") - _workspace_errors(fp.path) = - [ AnalyzerError( - fp.path, AnalyzeError, "unable to build storage path") ] - end - end - end - match _storage_path - | let sp: FilePath => - _log(Fine) and _log.log("storage_path is " + sp.path) - else - _log(Fine) and _log.log("storage path is None") - end - - match _ponyc_executable - | let fp: FilePath => - // calling code should set this from PATH - let pe = - match try fp.canonical()? end - | let fp': FilePath => - _ponyc_executable = fp' - fp' - else - fp - end - try - let info = FileInfo(pe)? - if not info.file then - _log(Error) and _log.log(fp.path + " is not a file") - _workspace_errors(pe.path) = - [ AnalyzerError( - pe.path, AnalyzeError, "ponyc executable is not a file") ] - _ponyc_executable = None - end - else - _log(Error) and _log.log(fp.path + " does not exist") - _workspace_errors(pe.path) = - [ AnalyzerError( - pe.path, AnalyzeError, "ponyc executable does not exist") ] - end - end - match _ponyc_executable - | let pe: FilePath => - _log(Fine) and _log.log("ponyc_executable is " + pe.path) - else - _log(Fine) and _log.log("ponyc_executable is None") - end - - match _pony_packages_path - | let fp: FilePath => - let pp = - match try fp.canonical()? end - | let fp': FilePath => - _pony_packages_path = fp' - fp' - else - fp - end - try - let info = FileInfo(pp)? - if not info.directory then - _log(Error) and _log.log(fp.path + " is not a directory") - _workspace_errors(pp.path) = - [ AnalyzerError( - pp.path, AnalyzeError, "pony packages path is not a directory") ] - _pony_packages_path = None - end - else - _log(Error) and _log.log(fp.path + " does not exist") - _workspace_errors(pp.path) = - [ AnalyzerError( - pp.path, AnalyzeError, "pony packages path does not exist") ] - _pony_packages_path = None - end - else - match _ponyc_executable - | let pe: FilePath => - try - (let dir, _) = Path.split(pe.path) - let pp = - FilePath(_auth, Path.join(dir, "../../packages")).canonical()? - if pp.exists() then - let fi = FileInfo(pp)? - if fi.directory then - _pony_packages_path = pp - end - end - end - end - end - - // if we are in a workspace, start analyzing - match _workspace - | let workspace_path: FilePath => - match _pony_packages_path - | let pp: FilePath => - _log(Fine) and _log.log("pony_packages_path is " + pp.path) - analyze(_analysis_task_id, workspace_path.path, [ pp.path ]) - else - _log(Fine) and _log.log("pony_packages_path is None") - analyze(_analysis_task_id, workspace_path.path, []) - end - _analysis_task_id = _analysis_task_id + 1 - end - - fun ref _get_next_task_id(): USize => - let result = _analysis_task_id - _analysis_task_id = _analysis_task_id + 1 - result - - be analyze( - task_id: USize, - canonical_path: String, - extra_paths: Array[String] val) - => - if _disposing then return end - _log(Fine) and _log.log(task_id.string() + ": analyzing " + canonical_path) - - _workspace_errors.clear() - _parse_errors.clear() - _lint_errors.clear() - _analyze_errors.clear() - - let all_paths: Array[String] trn = Array[String](1 + extra_paths.size()) - let workspace_path = - try - let fp = FilePath(_auth, canonical_path) - let fi = FileInfo(fp)? - if fi.directory then - canonical_path - else - (let dir, _) = Path.split(canonical_path) - dir - end - else - _log(Error) and _log.log( - task_id.string() + "error opening " + canonical_path) - _notify.analyze_failed( - this, - task_id, - canonical_path, - [ AnalyzerError( - canonical_path, - AnalyzeError, - "error opening " + canonical_path) - ]) - return - end - all_paths.push(workspace_path) - - for path in extra_paths.values() do - try - let fp = FilePath(_auth, path) - let fi = FileInfo(fp)? - if fi.directory then - all_paths.push(path) - else - (let dir, _) = Path.split(path) - all_paths.push(dir) - end - else - _log(Error) and _log.log(task_id.string() + "error opening " + path) - end - end - - for (i, path) in all_paths.pairs() do - let fp = FilePath(_auth, path) - fp.walk(this~_analyze_directory(i, task_id, workspace_path)) - end - - fun ref _analyze_directory( - path_num: USize, - task_id: USize, - workspace_path: String, - dir_path: FilePath, - entries: Array[String]) - => - // skip directories starting with '.' - let to_remove = Array[USize] - for (i, entry) in entries.pairs() do - try - if entry(0)? == '.' then - to_remove.unshift(i) - end - end - end - for index in to_remove.values() do - entries.remove(index, 1) - end - - // skip directories without Pony source files - var has_pony_source = false - for entry in entries.values() do - if _is_pony_file(entry) then - has_pony_source = true - break - end - end - if not has_pony_source then - return - end - - // enqueue package item - let package_path = dir_path.path - if _src_items.contains(package_path) then - return - end - - let package = SrcPackageItem(package_path) - package.task_id = task_id - package.is_workspace = package_path == workspace_path - - _log(Fine) and _log.log( - task_id.string() + ": enqueueing " + package_path) - - // enqueue source file items - for entry in entries.values() do - if _is_pony_file(entry) then - let file_canonical_path = Path.join(dir_path.path, entry) - if _src_items.contains(file_canonical_path) then - continue - end - - let src_file = SrcFileItem(file_canonical_path) - src_file.task_id = task_id - src_file.parent_package = package - src_file.schedule = _schedule(U64.from[USize](path_num) * 1000) - _src_items.update(file_canonical_path, src_file) - _enqueue_src_item(src_file) - package.dependencies.push(src_file) - _log(Fine) and _log.log( - task_id.string() + ": enqueueing " + file_canonical_path) - end - end - - _src_items.update(package_path, package) - _enqueue_src_item(package) - - fun tag _is_pony_file(fname: String): Bool => - let ext_size = ".pony".size() - if fname.size() <= ext_size then - return false - end - let index = ISize.from[USize](fname.size() - ext_size) - fname.compare_sub( - ".pony", ext_size, index where ignore_case = true) is Equal - - be open_file(task_id: USize, canonical_path: String, parse: parser.Parser) => - if _disposing then return end - _log(Fine) and _log.log(task_id.string() + ": opening " + canonical_path) - match try _src_items(canonical_path)? end - | let src_file: SrcFileItem => - src_file.task_id = task_id - src_file.state = AnalysisStart - src_file.schedule = _schedule(0) - src_file.is_open = true - src_file.parse = parse - _enqueue_src_item(src_file) - else - let src_file = SrcFileItem(canonical_path) - src_file.task_id = task_id - src_file.state = AnalysisStart - src_file.is_open = true - src_file.schedule = _schedule(0) - src_file.parse = parse - _src_items.update(canonical_path, src_file) - _enqueue_src_item(src_file) - end - - be update_file( - task_id: USize, - canonical_path: String, - parse: parser.Parser) - => - if _disposing then return end - _log(Fine) and _log.log(task_id.string() + ": updating " + canonical_path) - match try _src_items(canonical_path)? end - | let src_file: SrcFileItem => - src_file.task_id = task_id - src_file.schedule = _schedule(300) - src_file.is_open = true - src_file.parse = parse - _log(Fine) and _log.log(task_id.string() + ": found in-memory file") - _enqueue_src_item(src_file, AnalysisStart) - else - let src_file = SrcFileItem(canonical_path) - src_file.task_id = task_id - src_file.is_open = true - src_file.schedule = _schedule(300) - src_file.parse = parse - _src_items.update(canonical_path, src_file) - _enqueue_src_item(src_file, AnalysisStart) - _log(Fine) and _log.log( - task_id.string() + ": in-memory file not found; creating") - end - - be close_file(task_id: USize, canonical_path: String) => - match try _src_items(canonical_path)? end - | let src_file: SrcFileItem => - src_file.is_open = false - end - - be request_info( - task_id: USize, canonical_path: String, notify: AnalyzerRequestNotify) - => - _log(Fine) and _log.log(task_id.string() + ": request " + canonical_path) - - if not _src_items.contains(canonical_path) then - analyze(task_id, canonical_path, []) - end - - let notifys = - match try _pending_requests(canonical_path)? end - | let notifys': MapIs[AnalyzerRequestNotify, Set[USize]] => - notifys' - else - let notifys' = MapIs[AnalyzerRequestNotify, Set[USize]] - _pending_requests.update(canonical_path, notifys') - notifys' - end - let task_ids = - match try notifys(notify)? end - | let task_ids': Set[USize] => - task_ids' - else - let task_ids = Set[USize] - notifys.update(notify, task_ids) - task_ids - end - task_ids.set(task_id) - _process_src_item_queue() - - be dispose() => - _disposing = true - - fun ref _push_error( - errors: Map[String, Array[AnalyzerError]], - new_error: AnalyzerError) - => - let arr = - match try errors(new_error.canonical_path)? end - | let arr': Array[AnalyzerError] => - arr' - else - let arr' = Array[AnalyzerError] - errors(new_error.canonical_path) = arr' - arr' - end - arr.push(new_error) - - fun ref _clear_errors( - canonical_path: String, - errors: Map[String, Array[AnalyzerError]]) - => - try - errors.remove(canonical_path)? - end - - fun ref _clear_and_push( - canonical_path: String, - errors: Map[String, Array[AnalyzerError]], - new_error: AnalyzerError) - => - let arr = - match try errors(new_error.canonical_path)? end - | let arr': Array[AnalyzerError] => - arr'.clear() - arr' - else - let arr' = Array[AnalyzerError] - errors(new_error.canonical_path) = arr' - arr' - end - arr.push(new_error) - - fun _collect_errors( - errors: Map[String, Array[AnalyzerError]], - canonical_path: (String | None) = None) - : Array[AnalyzerError] val - => - let result: Array[AnalyzerError] trn = Array[AnalyzerError] - match canonical_path - | let key: String => - try - for err in errors(key)?.values() do - result.push(err) - end - end - else - for key in errors.keys() do - try - for err in errors(key)?.values() do - result.push(err) - end - end - end - end - consume result - - fun ref _enqueue_src_item( - src_item: SrcItem, - new_state: (SrcItemState | None) = None) - => - match new_state - | let new_state': SrcItemState => - src_item.set_state(new_state') - end - - _src_item_queue.push(src_item) - _process_src_item_queue() - - fun ref _process_src_item_queue() => - if _num_process_messages == 0 then - _num_process_messages = _num_process_messages + 1 - _process_src_item_queue_aux() - end - - be _process_src_item_queue_aux() => - if _num_process_messages > 0 then - _num_process_messages = _num_process_messages - 1 - end - - if _disposing then return end - - if (_iteration % 100) == 0 then - _log_src_item_queue() - end - _iteration = _iteration + 1 - - try - match _src_item_queue.pop()? - | let file_item: SrcFileItem => - _process_file_item(file_item) - | let package_item: SrcPackageItem => - _process_package_item(package_item) - end - end - - _process_pending_requests() - - if _src_item_queue.size() > 0 then - _process_src_item_queue() - end - - fun ref _log_src_item_queue() => - _log(Fine) and _log.log("ITEMS: " + _get_item_stats(_src_items.values())) - _log(Fine) and _log.log( - "QUEUE: " + _get_item_stats(_src_item_queue.values())) - - fun _get_item_stats(iter: Iterator[SrcItem]): String => - var num_starting: USize = 0 - var num_parsing: USize = 0 - var num_scoping: USize = 0 - var num_linting: USize = 0 - var num_error: USize = 0 - var num_up_to_date: USize = 0 - - for item in iter do - match item.get_state() - | AnalysisStart => - num_starting = num_starting + 1 - | AnalysisParse => - num_parsing = num_parsing + 1 - | AnalysisScope => - num_scoping = num_scoping + 1 - | AnalysisLint => - num_linting = num_linting + 1 - | AnalysisError => - num_error = num_error + 1 - | AnalysisUpToDate => - num_up_to_date = num_up_to_date + 1 - end - end - let str: String trn = String - str.append(num_starting.string()) - str.append(", ") - str.append(num_parsing.string()) - str.append(", ") - str.append(num_scoping.string()) - str.append(", ") - str.append(num_linting.string()) - str.append(", ") - str.append(num_error.string()) - str.append(", ") - str.append(num_up_to_date.string()) - consume str - - fun ref _process_package_item(package_item: SrcPackageItem) => - // count things - var num_starting: USize = 0 - var num_parsing: USize = 0 - var num_scoping: USize = 0 - var num_linting: USize = 0 - var num_error: USize = 0 - var num_up_to_date: USize = 0 - - for dep in package_item.dependencies.values() do - match dep.get_state() - | AnalysisStart => - num_starting = num_starting + 1 - | AnalysisParse => - num_parsing = num_parsing + 1 - | AnalysisScope => - num_scoping = num_scoping + 1 - | AnalysisLint => - num_linting = num_linting + 1 - | AnalysisError => - num_error = num_error + 1 - | AnalysisUpToDate => - num_up_to_date = num_up_to_date + 1 - end - end - - if num_error > 0 then - _log(Error) and _log.log( - package_item.task_id.string() + ": PACKAGE ERROR: " + - package_item.canonical_path) - - package_item.state = AnalysisError - - if package_item.is_workspace then - _log(Fine) and _log.log( - package_item.task_id.string() + ": workspace ERROR; notifying") - _notify_workspace(package_item) - end - elseif num_up_to_date == package_item.dependencies.size() then - _log(Fine) and _log.log( - package_item.task_id.string() + ": package up to date: " + - package_item.canonical_path) - - package_item.state = AnalysisUpToDate - - if package_item.is_workspace then - _log(Fine) and _log.log( - package_item.task_id.string() + ": workspace up to date; notifying") - _notify_workspace(package_item) - end - else - _enqueue_src_item(package_item) - end - - fun ref _notify_workspace(package_item: SrcPackageItem) => - _notify.analyzed_workspace( - this, - package_item.task_id, - _collect_errors(_workspace_errors), - _collect_errors(_parse_errors), - _collect_errors(_lint_errors), - _collect_errors(_analyze_errors)) - - fun ref _process_file_item(src_file: SrcFileItem) => - var needs_push = false - match src_file.state - | AnalysisStart => - try - src_file.storage_prefix = _storage_prefix(src_file.canonical_path)? - end - - try _workspace_errors.remove(src_file.canonical_path)? end - try _parse_errors.remove(src_file.canonical_path)? end - try _lint_errors.remove(src_file.canonical_path)? end - try _analyze_errors.remove(src_file.canonical_path)? end - - if src_file.is_open then - if _is_due(src_file.schedule) then - src_file.state = AnalysisParse - end - else - src_file.state = AnalysisParse - end - - if src_file.state is AnalysisParse then - _log(Fine) and _log.log( - src_file.task_id.string() + ": " + src_file.canonical_path + - " => Parsing") - else - _log(Fine) and _log.log( - src_file.task_id.string() + ": " + src_file.canonical_path + - ": NOT PARSING!") - end - needs_push = true - | AnalysisParse => - if src_file.is_open then - _parse_open_file(src_file) - else - _parse_disk_file(src_file) - end - | AnalysisScope => - _scope(src_file) - | AnalysisLint => - _lint(src_file) - | AnalysisError => - var errors: Array[AnalyzerError] trn = Array[AnalyzerError] - try - for err in _workspace_errors(src_file.canonical_path)?.values() do - errors.push(err) - end - end - try - for err in _parse_errors(src_file.canonical_path)?.values() do - errors.push(err) - end - end - try - for err in _lint_errors(src_file.canonical_path)?.values() do - errors.push(err) - end - end - try - for err in _analyze_errors(src_file.canonical_path)?.values() do - errors.push(err) - end - end - let errors': Array[AnalyzerError] val = consume errors - _notify.analyze_failed( - this, - src_file.task_id, - src_file.canonical_path, - errors') - - // try to free up some memory - if not src_file.is_open then - src_file.compact() - end - | AnalysisUpToDate => - _log(Fine) and _log.log( - src_file.task_id.string() + ": file up to date: " + - src_file.canonical_path) - - _notify.analyzed_file( - this, - src_file.task_id, - src_file.canonical_path, - src_file.syntax_tree, - None, - _collect_errors(_parse_errors), - _collect_errors(_lint_errors), - _collect_errors(_analyze_errors)) - - // try to free up some memory - if not src_file.is_open then - src_file.compact() - end - end - if needs_push then - _enqueue_src_item(src_file) - end - - fun ref _process_pending_requests() => - let paths_done = Array[String] - - for (canonical_path, notifys) in _pending_requests.pairs() do - match try _src_items(canonical_path)? end - | let file_item: SrcFileItem => - match file_item.state - | AnalysisUpToDate => - _pending_request_succeeded(file_item, notifys) - paths_done.push(canonical_path) - | AnalysisError => - _pending_request_failed(file_item, notifys) - paths_done.push(canonical_path) - end - | let package_item: SrcPackageItem => - let up_to_date = Iter[SrcItem](package_item.dependencies.values()) - .all({(pi) => pi.get_state()() is AnalysisUpToDate()}) - if up_to_date then - _pending_request_succeeded(package_item, notifys) - paths_done.push(canonical_path) - else - let any_errors = Iter[SrcItem](package_item.dependencies.values()) - .any({(pi) => pi.get_state()() is AnalysisError()}) - if any_errors then - _pending_request_failed(package_item, notifys) - paths_done.push(canonical_path) - end - end - end - end - - for path in paths_done.values() do - try - _pending_requests.remove(path)? - end - end - - fun ref _pending_request_succeeded( - src_item: (SrcFileItem | SrcPackageItem), - notifys: MapIs[AnalyzerRequestNotify, Set[USize]]) - => - match src_item - | let file_item: SrcFileItem => - match (file_item.syntax_tree, file_item.nodes_by_index, file_item.scope) - | - (let st: ast.Node, let nbi: Map[USize, ast.Node] val, let sc: Scope val) - => - for (notify, task_ids) in notifys.pairs() do - for task_id in task_ids.values() do - _log(Fine) and _log.log( - task_id.string() + ": request succeeded: " - + file_item.canonical_path) - notify.request_succeeded( - task_id, file_item.canonical_path, st, nbi, sc) - end - end - end - | let package_item: SrcPackageItem => - let package_scope: Scope trn = Scope( - PackageScope, - package_item.canonical_path, - package_item.canonical_path, - (0, 0, USize.max_value(), USize.max_value()), - USize.max_value()) - - for dep in package_item.dependencies.values() do - match dep - | let file_item: SrcFileItem => - match file_item.scope - | let child_scope: Scope val => - package_scope.add_child(child_scope) - end - end - end - - let package_scope': Scope val = consume package_scope - - for (notify, task_ids) in notifys.pairs() do - for task_id in task_ids.values() do - _log(Fine) and _log.log( - task_id.string() + ": request succeeded: " - + package_item.canonical_path) - notify.request_succeeded( - task_id, - package_item.canonical_path, - None, - Map[USize, ast.Node], - package_scope') - end - end - end - - fun ref _pending_request_failed( - src_item: (SrcFileItem | SrcPackageItem), - notifys: MapIs[AnalyzerRequestNotify, Set[USize]]) - => - for (notify, task_ids) in notifys.pairs() do - for task_id in task_ids.values() do - _log(Fine) and _log.log( - task_id.string() + ": request failed: " + - src_item.get_canonical_path()) - notify.request_failed( - task_id, src_item.get_canonical_path(), "analysis failed") - end - end - - fun _schedule(millis: U64): (I64, I64) => - (var secs, var nanos) = Time.now() - nanos = nanos + I64.from[U64](Nanos.from_millis(millis)) - while nanos > 1_000_000_000 do - nanos = nanos - 1_000_000_000 - secs = secs + 1 - end - (secs, nanos) - - fun _is_due(schedule: (I64, I64)): Bool => - (let secs, let nanos) = Time.now() - if secs > schedule._1 then - true - elseif secs < schedule._1 then - false - else - nanos > schedule._2 - end - - fun ref _parse_open_file(src_file: SrcFileItem) => - if _disposing then return end - - _log(Fine) and _log.log( - src_file.task_id.string() + ": parsing in memory " + - src_file.canonical_path) - - let task_id = src_file.task_id - let canonical_path = src_file.canonical_path - match src_file.parse - | let parse': parser.Parser => - _parse(task_id, canonical_path, parse') - else - _log(Error) and _log.log( - task_id.string() + ": parse failed for " + canonical_path + "; no data") - end - - fun ref _parse_disk_file(src_file: SrcFileItem) => - if _disposing then return end - - _log(Fine) and _log.log( - src_file.task_id.string() + ": parsing on disk " + - src_file.canonical_path) - - let src_file_path = FilePath(_auth, src_file.canonical_path) - let syntax_tree_path = FilePath(_auth, _syntax_tree_path(src_file)) - if - syntax_tree_path.exists() and - (not _source_is_newer(src_file_path, syntax_tree_path)) - then - _log(Fine) and _log.log( - src_file.task_id.string() + ": cache is newer; not parsing " + - src_file.canonical_path) - - match _get_syntax_tree(src_file) - | let syntax_tree: ast.Node => - _collect_error_sections(src_file.canonical_path, syntax_tree) - src_file.syntax_tree = syntax_tree - else - _log(Error) and _log.log( - src_file.task_id.string() + ": unable to load syntax for " + - src_file.canonical_path) - return - end - - _enqueue_src_item(src_file, AnalysisScope) - return - end - - let task_id = src_file.task_id - let canonical_path = src_file.canonical_path - match OpenFile(FilePath(_auth, src_file.canonical_path)) - | let file: File ref => - let source = file.read(file.size()) - let segments: Array[ReadSeq[U8] val] val = - [ as ReadSeq[U8] val: consume source ] - let parse = parser.Parser(segments) - _parse(task_id, canonical_path, parse) - else - _log(Error) and _log.log("unable to read " + canonical_path) - _push_error(_workspace_errors, AnalyzerError( - canonical_path, AnalyzeError, "unable to read file")) - _enqueue_src_item(src_file, AnalysisError) - end - - fun ref _parse( - task_id: USize, - canonical_path: String, - parse: parser.Parser) - => - if _disposing then return end - - _log(Fine) and _log.log(task_id.string() + ": parsing " + canonical_path) - let self: EohippusAnalyzer tag = this - parse.parse( - _grammar, - parser.Data(canonical_path), - {(result: parser.Result, values: ast.NodeSeq) => - match result - | let success: parser.Success => - try - match values(0)? - | let node: ast.NodeWith[ast.SrcFile] => - // _log(Fine) and _log.log( - // task_id.string() + ": got SrcFile for " + canonical_path) - self._parsed_file(task_id, canonical_path, node) - else - _log(Error) and _log.log( - task_id.string() + ": " + canonical_path + - ": root node was not SrcFile") - self._parse_failed( - task_id, canonical_path, "root node was not SrcFile") - end - else - _log(Error) and _log.log( - task_id.string() + canonical_path + "failed to get SrcFile node") - self._parse_failed( - task_id, canonical_path, "failed to get SrcFile node") - end - | let failure: parser.Failure => - _log(Error) and _log.log( - task_id.string() + ": " + canonical_path + ": " + - failure.get_message()) - self._parse_failed(task_id, canonical_path, failure.get_message()) - end - }) - - fun ref _collect_error_sections(canonical_path: String, node: ast.Node) => - match node - | let es: ast.NodeWith[ast.ErrorSection] => - let si = es.src_info() - match (si.line, si.column, si.next_line, si.next_column) - | (let l: USize, let c: USize, let nl: USize, let nc: USize) => - // _log(Fine) and _log.log( - // "ErrorSection " + canonical_path + ": " + l.string() + ":" + - // c.string() + "-" + nl.string() + ":" + nc.string()) - - _push_error( - _parse_errors, - AnalyzerError( - canonical_path, AnalyzeError, es.data().message, l, c, nl, nc)) - end - end - for child in node.children().values() do - _collect_error_sections(canonical_path, child) - end - - be _parsed_file( - task_id: USize, - canonical_path: String, - node: ast.NodeWith[ast.SrcFile]) - => - if _disposing then return end - _process_src_item_queue() - - match try _src_items(canonical_path)? end - | let src_file: SrcFileItem => - if src_file.task_id != task_id then - _log(Fine) and _log.log( - "abandoning parse for task_id " + task_id.string() + - "; src_file is newer: " + src_file.task_id.string()) - return - end - - _clear_errors(canonical_path, _parse_errors) - (let syntax_tree, let lb, let errors) = ast.SyntaxTree.add_line_info(node) - if src_file.is_open then - _notify.parsed_file(this, task_id, canonical_path, syntax_tree, lb) - end - - if errors.size() > 0 then - for (n, message) in errors.values() do - _log(Error) and _log.log( - task_id.string() + ": line error " + canonical_path + ": " + message) - - let si = n.src_info() - match (si.line, si.column, si.next_line, si.next_column) - | (let l: USize, let c: USize, let nl: USize, let nc: USize) => - _push_error( - _parse_errors, - AnalyzerError(canonical_path, AnalyzeError, message, l, c, nl, nc)) - else - _push_error( - _parse_errors, - AnalyzerError(canonical_path, AnalyzeError, message)) - end - end - _enqueue_src_item(src_file, AnalysisError) - return - end - - src_file.syntax_tree = syntax_tree - src_file.make_indices() - _write_syntax_tree(src_file) - _collect_error_sections(canonical_path, syntax_tree) - - _log(Fine) and _log.log( - src_file.task_id.string() + ": " + src_file.canonical_path + - " => Scoping") - - _enqueue_src_item(src_file, AnalysisScope) - else - _log(Error) and _log.log( - task_id.string() + ": parsed untracked source file " + canonical_path) - end - - be _parse_failed( - task_id: USize, - canonical_path: String, - message: String, - line: USize = 0, - column: USize = 0, - next_line: USize = 0, - next_column: USize = 0) - => - if _disposing then return end - _process_src_item_queue() - - match try _src_items(canonical_path)? end - | let src_file: SrcFileItem => - if src_file.task_id != task_id then - _log(Fine) and _log.log( - task_id.string() + ": ignoring failed parse for " + canonical_path + - "; src_file is newer: " + src_file.task_id.string()) - return - end - - _log(Error) and _log.log( - task_id.string() + ": parse failed for " + canonical_path) - - _push_error(_parse_errors, AnalyzerError( - canonical_path, - AnalyzeError, - message, - line, - column, - next_line, - next_column)) - - let error_section = ast.NodeWith[ast.ErrorSection]( - ast.SrcInfo(canonical_path), [], ast.ErrorSection(message)) - let node = ast.NodeWith[ast.SrcFile]( - ast.SrcInfo(canonical_path), - [ error_section ], - ast.SrcFile(canonical_path, [], [])) - _write_syntax_tree(src_file, node) - - _log(Fine) and _log.log( - src_file.task_id.string() + ": " + src_file.canonical_path + - " => Error") - - _enqueue_src_item(src_file, AnalysisError) - end - - fun ref _write_syntax_tree( - src_file: SrcFileItem, - syntax_tree: (ast.Node | None) = None) - => - _log(Fine) and _log.log( - src_file.task_id.string() + ": writing syntax tree for " + - src_file.canonical_path) - - let st = - match - try - syntax_tree as ast.Node - else - try src_file.syntax_tree as ast.Node end - end - | let node: ast.Node => - node - else - _log(Error) and _log.log("unable to get syntax tree to write") - return - end - - let syntax_tree_path = _syntax_tree_path(src_file) - (let dir, _) = Path.split(syntax_tree_path) - let dir_path = FilePath(_auth, dir) - if (not dir_path.exists()) and (not dir_path.mkdir()) then - _log(Error) and _log.log("unable to create directory " + dir_path.path) - _push_error(_workspace_errors, AnalyzerError( - dir_path.path, AnalyzeError, "unable to create storage directory")) - return - end - - match CreateFile(FilePath(_auth, syntax_tree_path)) - | let file: File => - file.set_length(0) - let json_item = st.get_json() - let json_str = - ifdef debug then - json_item.get_string(true) - else - json_item.get_string(false) - end - _log(Fine) and _log.log( - src_file.task_id.string() + ": writing " + syntax_tree_path) - if not file.write(consume json_str) then - _log(Error) and _log.log( - src_file.task_id.string() + ": unable to write syntax tree file " + - syntax_tree_path) - _push_error(_workspace_errors, AnalyzerError( - src_file.canonical_path, - AnalyzeError, - "unable to write syntax tree file" + syntax_tree_path)) - end - else - _log(Error) and _log.log( - src_file.canonical_path + ": unable to create syntax tree file " + - syntax_tree_path) - _push_error(_workspace_errors, AnalyzerError( - src_file.canonical_path, - AnalyzeError, - "unable to create syntax tree file " + syntax_tree_path)) - end - - fun ref _get_syntax_tree(src_file: SrcFileItem): (ast.Node | None) => - match src_file.syntax_tree - | let node: ast.Node => - node - else - let syntax_path = FilePath(_auth, _syntax_tree_path(src_file)) - match OpenFile(syntax_path) - | let file: File => - let json_str = recover val file.read_string(file.size()) end - match json.Parse(json_str) - | let obj: json.Object => - match ast.ParseNode(src_file.canonical_path, obj) - | let node: ast.Node => - _log(Fine) and _log.log( - src_file.task_id.string() + ": loaded " + syntax_path.path) - src_file.syntax_tree = node - src_file.make_indices() - return node - | let err: String => - _log(Error) and _log.log( - src_file.task_id.string() + ": error loading " + - syntax_path.path + ": " + err) - end - | let item: json.Item => - _log(Error) and _log.log( - src_file.task_id.string() + ": error loading " + syntax_path.path + - ": a syntax tree must be an object") - | let err: json.ParseError => - _log(Error) and _log.log( - src_file.task_id.string() + ": error loading " + syntax_path.path + - ":" + err.index.string() + - ": " + err.message) - end - else - _log(Error) and _log.log( - src_file.task_id.string() + ": error opening " + syntax_path.path) - end - None - end - - fun ref _scope(src_file: SrcFileItem) => - if _disposing then return end - _process_src_item_queue() - - _log(Fine) and _log.log( - src_file.task_id.string() + ": scoping " + src_file.canonical_path) - - let src_file_path = FilePath(_auth, src_file.canonical_path) - let scope_path = FilePath(_auth, _scope_path(src_file)) - if - scope_path.exists() and - (not _source_is_newer(src_file_path, scope_path)) - then - _log(Fine) and _log.log( - src_file.task_id.string() + ": cache is newer; not scoping " + - src_file.canonical_path) - match _get_scope(src_file) - | let scope: Scope => - src_file.scope = scope - _enqueue_src_item(src_file, AnalysisLint) - return - else - _log(Error) and _log.log( - src_file.task_id.string() + ": unable to load scope for " + - src_file.canonical_path) - return - end - end - - match _get_syntax_tree(src_file) - | let syntax_tree: ast.Node => - let scoper = Scoper(_log, this) - scoper.scope_syntax_tree( - src_file.task_id, - src_file.canonical_path, - syntax_tree, - src_file.node_indices) - else - _log(Error) and _log.log( - src_file.task_id.string() + ": failed to get syntax tree for " + - src_file.canonical_path) - _enqueue_src_item(src_file, AnalysisError) - end - - fun ref _get_scope(src_file: SrcFileItem): (Scope | None) => - match src_file.scope - | let scope: Scope => - scope - else - let scope_path = FilePath(_auth, _scope_path(src_file)) - match OpenFile(scope_path) - | let file: File => - let json_str = recover val file.read_string(file.size()) end - match recover val json.Parse(json_str) end - | let obj: json.Object val => - match recover val ParseScopeJson(obj, None) end - | let scope: Scope val => - _log(Fine) and _log.log( - src_file.task_id.string() + ": loaded " + scope_path.path) - src_file.scope = scope - src_file.make_indices() - return scope - | let err: String => - _log(Error) and _log.log( - src_file.task_id.string() + ": error loading " + scope_path.path + - err) - end - | let item: json.Item val => - _log(Error) and _log.log( - src_file.task_id.string() + ": error loading " + scope_path.path + - ": a scope file must be an object") - | let err: json.ParseError => - _log(Error) and _log.log( - src_file.task_id.string() + ": error loading " + scope_path.path + - ":" + err.index.string() + ": " + err.message) - end - else - _log(Error) and _log.log( - src_file.task_id.string() + ": error opening " + scope_path.path) - end - None - end - - be scoped_file( - task_id: USize, - canonical_path: String, - syntax_tree: ast.Node, - scope: Scope val) - => - if _disposing then return end - _process_src_item_queue() - - match try _src_items(canonical_path)? end - | let src_file: SrcFileItem => - if src_file.task_id != task_id then - _log(Fine) and _log.log( - task_id.string() + ": abandoning scope for " + canonical_path + - "; src_file is newer: " + src_file.task_id.string()) - return - end - - _log(Fine) and _log.log( - task_id.string() + ": scoped " + canonical_path) - - src_file.syntax_tree = syntax_tree - src_file.scope = scope - src_file.make_indices() - - _write_syntax_tree(src_file) - _write_scope(src_file) - - //_process_imports(canonical_path, scope') - - _log(Fine) and _log.log( - task_id.string() + ": " + canonical_path + " => Linting") - - _enqueue_src_item(src_file, AnalysisLint) - else - _log(Error) and _log.log( - task_id.string() + ": scoped unknown file " + canonical_path) - end - - // fun ref _process_imports(canonical_path: String, scope: Scope) => - // (let base_path, _) = Path.split(scope.name) - - // var i: USize = 0 - // while i < scope.imports.size() do - // try - // (let alias, let import_path) = scope.imports(i)? - // match _try_analyze_import(base_path, import_path) - // | let canonical_import_path: String => - // try scope.imports.update(i, (alias, canonical_import_path))? end - // i = i + 1 - // continue - // else - // var found = false - // for pp in _pony_path.values() do - // match _try_analyze_import(pp.path, import_path) - // | let canonical_import_path: String => - // try scope.imports.update(i, (alias, canonical_import_path))? end - // found = true - // break - // end - // end - - // if found then - // i = i + 1 - // continue - // else - // match _pony_packages_path - // | let ppp: FilePath => - // match _try_analyze_import(ppp.path, import_path) - // | let canonical_import_path: String => - // try scope.imports.update(i, (alias, canonical_import_path))? end - // i = i + 1 - // continue - // end - // end - // end - // end - // _log(Error) and _log.log( - // "unable to resolve package " + import_path + " for " + canonical_path) - // end - // i = i + 1 - // end - - // fun ref _try_analyze_import(base_path: String, import_path: String) - // : (String | None) - // => - // let combined_path = FilePath(_auth, Path.join(base_path, import_path)) - // if combined_path.exists() then - // let canonical_path = - // try - // combined_path.canonical()? - // else - // combined_path - // end - // if not _src_items.contains(canonical_path.path) then - // analyze(_get_next_task_id(), canonical_path.path) - // else - // _log(Fine) and _log.log( - // "not analyzing existing import " + canonical_path.path) - // end - // return canonical_path.path - // end - - be scope_failed( - task_id: USize, - canonical_path: String, - errors: ReadSeq[ast.TraverseError] val) - => - if _disposing then return end - _process_src_item_queue() - - match try _src_items(canonical_path)? end - | let src_file: SrcFileItem => - if src_file.task_id != task_id then - _log(Fine) and _log.log( - task_id.string() + ": ignoring failed scope for " + canonical_path + - "; src_file is newer: " + src_file.task_id.string()) - return - end - - for (node, message) in errors.values() do - _log(Error) and _log.log( - src_file.task_id.string() + ": scope error: " + message) - let si = node.src_info() - match (si.line, si.column, si.next_line, si.next_column) - | (let l: USize, let c: USize, let nl: USize, let nc: USize) => - _push_error( - _analyze_errors, - AnalyzerError( - canonical_path, AnalyzeError, message, l, c, nl, nc)) - else - _push_error( - _analyze_errors, - AnalyzerError(canonical_path, AnalyzeError, message)) - end - end - _enqueue_src_item(src_file, AnalysisError) - else - _log(Error) and _log.log(task_id.string() + ": failed to scope unknown " + - canonical_path) - end - - fun ref _write_scope(src_file: SrcFileItem) => - _log(Fine) and _log.log( - src_file.task_id.string() + ": writing scope file for " + - src_file.canonical_path) - - let scope = - match src_file.scope - | let scope': Scope val => - scope' - else - _log(Error) and _log.log( - src_file.task_id.string() + ": no scope for " + - src_file.canonical_path) - return - end - - let scope_path = _scope_path(src_file) - (let dir, _) = Path.split(scope_path) - let dir_path = FilePath(_auth, dir) - if (not dir_path.exists()) and (not dir_path.mkdir()) then - _log(Error) and _log.log( - src_file.task_id.string() + "unable to create directory " + - dir_path.path) - _push_error(_workspace_errors, AnalyzerError( - dir_path.path, AnalyzeError, "unable to create storage directory")) - end - - match CreateFile(FilePath(_auth, scope_path)) - | let file: File => - file.set_length(0) - let json_item = scope.get_json() - let json_str = - ifdef debug then - json_item.get_string(true) - else - json_item.get_string(false) - end - _log(Fine) and _log.log( - src_file.task_id.string() + ": writing " + scope_path) - if not file.write(consume json_str) then - _log(Error) and _log.log( - src_file.task_id.string() + ": unable to write scope file " + - scope_path) - _push_error(_workspace_errors, AnalyzerError( - src_file.canonical_path, - AnalyzeError, - "unable to write scope file" + scope_path)) - end - else - _log(Error) and _log.log( - src_file.task_id.string() + ": unable to create scope file " + - scope_path) - _push_error(_workspace_errors, AnalyzerError( - src_file.canonical_path, - AnalyzeError, - "unable to create syntax tree file" + scope_path)) - end - - fun ref _lint(src_file: SrcFileItem) => - if _disposing then return end - - _log(Fine) and _log.log( - src_file.task_id.string() + ": linting " + src_file.canonical_path) - - let syntax_tree = - match _get_syntax_tree(src_file) - | let node: ast.Node => - node - else - _log(Error) and _log.log( - src_file.task_id.string() + ": unable to get syntax tree for " + - src_file.canonical_path) - _enqueue_src_item(src_file, AnalysisError) - return - end - - src_file.state = AnalysisLint - let config = _get_lint_config(src_file) - let canonical_path = src_file.canonical_path - let self: EohippusAnalyzer = this - let lint = linter.Linter( - config, - object tag is linter.LinterNotify - be lint_completed( - lint': linter.Linter, - task_id': USize, - tree': ast.Node, - issues': ReadSeq[linter.Issue] val, - errors': ReadSeq[ast.TraverseError] val) - => - self._linted_file(task_id', canonical_path, issues', errors') - - be linter_failed(task_id': USize, message': String) => - self._lint_failed(task_id', canonical_path, message') - end) - lint.lint(src_file.task_id, syntax_tree) - - be _linted_file( - task_id: USize, - canonical_path: String, - issues: ReadSeq[linter.Issue] val, - errors: ReadSeq[ast.TraverseError] val) - => - if _disposing then return end - _process_src_item_queue() - - match try _src_items(canonical_path)? end - | let src_file: SrcFileItem => - if src_file.task_id != task_id then - _log(Fine) and _log.log( - task_id.string() + ": abandoning lint for " + canonical_path + - "; src_file is newer: " + src_file.task_id.string()) - return - end - - _log(Fine) and _log.log( - task_id.string() + ": linted " + canonical_path + "; " + - issues.size().string() + " issues, " + errors.size().string() + - " errors") - - for issue in issues.values() do - try - let start = issue.start.head()?.src_info() - let next = issue.next.head()?.src_info() - match (start.line, start.column, next.line, next.column) - | (let l: USize, let c: USize, let nl: USize, let nc: USize) => - _push_error(_lint_errors, AnalyzerError( - canonical_path, - AnalyzeWarning, - issue.rule.message(), - l, - c, - nl, - nc)) - end - end - end - for (node, message) in errors.values() do - let si = node.src_info() - match (si.line, si.column, si.next_line, si.next_column) - | (let l: USize, let c: USize, let nl: USize, let nc: USize) => - _push_error(_lint_errors, AnalyzerError( - canonical_path, AnalyzeError, message, l, c, nl, nc)) - end - end - - _log(Fine) and _log.log( - src_file.task_id.string() + ": " + src_file.canonical_path + - " => UpToDate") - _enqueue_src_item(src_file, AnalysisUpToDate) - else - _log(Error) and _log.log( - task_id.string() + ": linted unknown file " + canonical_path) - end - - be _lint_failed(task_id: USize, canonical_path: String, message: String) => - if _disposing then return end - _process_src_item_queue() - - match try _src_items(canonical_path)? end - | let src_file: SrcFileItem => - if src_file.task_id != task_id then - _log(Fine) and _log.log( - task_id.string() + ": ignoring failed lint for " + canonical_path + - "; src_file is newer: " + src_file.task_id.string()) - return - end - - _log(Error) and _log.log( - task_id.string() + ": lint failed for " + canonical_path + ": " + - message) - - _push_error(_lint_errors, AnalyzerError( - canonical_path, AnalyzeError, "lint failed: " + message)) - _enqueue_src_item(src_file, AnalysisError) - else - _log(Error) and _log.log( - task_id.string() + ": failed to lint unknown file " + canonical_path) - end - - fun ref _get_lint_config(src_file: SrcFileItem): linter.Config val => - var cur_path = src_file.canonical_path - repeat - (var dir_path, _) = Path.split(cur_path) - try - return _lint_configs(dir_path)? - else - let editor_config_path = Path.join(dir_path, ".editorconfig") - let config_file_path = FilePath(_auth, editor_config_path) - if config_file_path.exists() then - match linter.EditorConfig.read(config_file_path) - | let config: linter.Config val => - _log(Fine) and _log.log( - src_file.task_id.string() + ": found .editorconfig " + - config_file_path.path) - _lint_configs.update(dir_path, config) - return config - | let err: String => - _log(Error) and _log.log( - src_file.task_id.string() + ": unable to read " + - config_file_path.path) - end - elseif - try - (dir_path == "") or - (dir_path == "/") or - ((dir_path.size() == 3) and (dir_path(1)? == ':')) - else - false - end - then - break - end - cur_path = dir_path - end - until false end - linter.EditorConfig.default() - - fun _storage_prefix(canonical_path: String): String ? => - match (_workspace, _storage_path) - | (let workspace_path: FilePath, let storage_path: FilePath) => - if - canonical_path.compare_sub( - workspace_path.path, workspace_path.path.size(), 0, 0) is Equal - then - let rest = canonical_path.substring( - ISize.from[USize](workspace_path.path.size() + 1)) - Path.join(storage_path.path, consume rest) - else - let rest = canonical_path.clone() .> replace(":", "_") - Path.join(storage_path.path, consume rest) - end - else - _log(Warn) and _log.log("no workspace or storage") - error - end - - fun _source_is_newer(source: FilePath, other: FilePath): Bool => - (let source_secs, let source_nanos) = - try - FileInfo(source)?.modified_time - else - _log(Error) and _log.log("unable to stat " + source.path) - return false - end - (let other_secs, let other_nanos) = - try - FileInfo(other)?.modified_time - else - _log(Error) and _log.log("unable to stat " + other.path) - return false - end - if source_secs > other_secs then - return true - elseif source_secs < other_secs then - return false - else - return source_nanos > other_nanos - end - - fun _syntax_tree_path(src_file: SrcFileItem box): String => - src_file.storage_prefix + ".syntax.json" - - fun _scope_path(src_file: SrcFileItem box): String => - src_file.storage_prefix + ".scope.json" diff --git a/eohippus/analyzer/analyzer_context.pony b/eohippus/analyzer/analyzer_context.pony new file mode 100644 index 0000000..b8ca7fa --- /dev/null +++ b/eohippus/analyzer/analyzer_context.pony @@ -0,0 +1,52 @@ +use "files" +use "logger" + +use parser = "../parser" + +class val AnalyzerContext + let file_auth: FileAuth + let workspace: FilePath + let workspace_cache: FilePath + let global_cache: FilePath + let pony_path_dirs: ReadSeq[FilePath] val + let ponyc_executable: (FilePath | None) + let pony_packages_path: (FilePath | None) + let grammar: parser.NamedRule val + + new val create( + file_auth': FileAuth, + workspace': FilePath, + workspace_cache': FilePath, + global_cache': FilePath, + pony_path_dirs': ReadSeq[FilePath] val, + ponyc_executable': (FilePath | None), + pony_packages_path': (FilePath | None), + grammar': parser.NamedRule val) + => + file_auth = file_auth' + workspace = workspace' + workspace_cache = workspace_cache' + global_cache = global_cache' + pony_path_dirs = pony_path_dirs' + ponyc_executable = ponyc_executable' + pony_packages_path = pony_packages_path' + + grammar = grammar' + + fun get_cache(canonical_path: FilePath): FilePath => + let fcp = canonical_path.path + let wsp = workspace.path + let wsl = wsp.size() + + let comp = + ifdef windows then + fcp.compare_sub(wsp, wsl where ignore_case = true) + else + fcp.compare_sub(wsp, wsl) + end + + if comp is Equal then + return workspace_cache + else + return global_cache + end diff --git a/eohippus/analyzer/analyzer_error.pony b/eohippus/analyzer/analyzer_error.pony index 24cc2b7..706d164 100644 --- a/eohippus/analyzer/analyzer_error.pony +++ b/eohippus/analyzer/analyzer_error.pony @@ -1,3 +1,5 @@ +use "files" + primitive AnalyzeError primitive AnalyzeWarning primitive AnalyzeInfo @@ -7,7 +9,7 @@ type AnalyzeSeverity is (AnalyzeError | AnalyzeWarning | AnalyzeInfo | AnalyzeHint) class val AnalyzerError - let canonical_path: String + let canonical_path: FilePath let severity: AnalyzeSeverity let message: String let line: USize @@ -16,7 +18,7 @@ class val AnalyzerError let next_column: USize new val create( - canonical_path': String, + canonical_path': FilePath, severity': AnalyzeSeverity, message': String, line': USize = 0, diff --git a/eohippus/analyzer/analyzer_notify.pony b/eohippus/analyzer/analyzer_notify.pony index aa2d71d..b59349d 100644 --- a/eohippus/analyzer/analyzer_notify.pony +++ b/eohippus/analyzer/analyzer_notify.pony @@ -1,4 +1,5 @@ use "collections" +use "files" use ast = "../ast" use parser = "../parser" @@ -7,7 +8,7 @@ interface tag AnalyzerNotify be parsed_file( analyze: Analyzer, task_id: USize, - canonical_name: String, + canonical_path: FilePath, syntax_tree: ast.Node, line_beginnings: ReadSeq[parser.Loc] val) @@ -22,7 +23,7 @@ interface tag AnalyzerNotify be analyzed_file( analyze: Analyzer, task_id: USize, - canonical_path: String, + canonical_path: FilePath, syntax_tree: (ast.Node | None), file_scope: (Scope val | None), parse_errors: ReadSeq[AnalyzerError] val, @@ -32,18 +33,18 @@ interface tag AnalyzerNotify be analyze_failed( analyze: Analyzer, task_id: USize, - canonical_path: String, + canonical_path: FilePath, errors: ReadSeq[AnalyzerError] val) interface tag AnalyzerRequestNotify be request_succeeded( task_id: USize, - canonical_path: String, + canonical_path: FilePath, syntax_tree: (ast.Node | None), nodes_by_index: Map[USize, ast.Node] val, scope: Scope val) be request_failed( task_id: USize, - canonical_path: String, + canonical_path: FilePath, message: String) diff --git a/eohippus/analyzer/eohippus_analyzer.pony b/eohippus/analyzer/eohippus_analyzer.pony new file mode 100644 index 0000000..0956e6b --- /dev/null +++ b/eohippus/analyzer/eohippus_analyzer.pony @@ -0,0 +1,1606 @@ +use "collections" +use "files" +use "itertools" +use "logger" +use "time" + +use ast = "../ast" +use json = "../json" +use linter = "../linter" +use parser = "../parser" +use ".." + +actor EohippusAnalyzer is Analyzer + let _log: Logger[String] + + let _context: AnalyzerContext + let _notify: AnalyzerNotify + + let _lint_configs: Map[String, linter.Config val] = _lint_configs.create() + + let _src_items: Map[String, SrcItem] = _src_items.create() + let _src_item_queue: Array[SrcItem] = _src_item_queue.create() + var _process_queued: Bool = false + + var _analysis_task_id: USize = 0 + let _workspace_errors: Map[String, Array[AnalyzerError]] = + _workspace_errors.create() + let _parse_errors: Map[String, Array[AnalyzerError]] = + _parse_errors.create() + let _lint_errors: Map[String, Array[AnalyzerError]] = + _lint_errors.create() + let _analyze_errors: Map[String, Array[AnalyzerError]] = + _analyze_errors.create() + + let _pending_requests: Map[String, MapIs[AnalyzerRequestNotify, Set[USize]]] = + _pending_requests.create() + + var _iteration: USize = 0 + var _disposing: Bool = false + + new create( + log: Logger[String], + context: AnalyzerContext, + notify: AnalyzerNotify) + => + _log = log + _context = context + _notify = notify + + fun ref _get_next_task_id(): USize => + let result = _analysis_task_id + _analysis_task_id = _analysis_task_id + 1 + result + + be analyze() => + if _disposing then return end + + var task_id = _get_next_task_id() + + let workspace_path = _context.workspace + let workspace_cache = _context.workspace_cache + let global_cache = _context.global_cache + + _log(Fine) and _log.log( + task_id.string() + ": analyzing " + workspace_path.path) + + _src_items.clear() + _src_items.compact() + _src_item_queue.clear() + _src_item_queue.compact() + + _workspace_errors.clear() + _workspace_errors.compact() + _parse_errors.clear() + _parse_errors.compact() + _lint_errors.clear() + _lint_errors.compact() + _analyze_errors.clear() + _analyze_errors.compact() + + _analyze_dir(task_id, true, workspace_path, workspace_cache, _schedule(0)) + + // var schedule = _schedule(250) + // for pony_path in _context.pony_path_dirs.values() do + // try + // let info = FileInfo(pony_path)? + // if info.directory then + // task_id = _get_next_task_id() + // _analyze_dir(task_id, false, pony_path, global_cache, schedule) + // end + // end + // end + + // schedule = _schedule(500) + // match _context.pony_packages_path + // | let pony_packages_path: FilePath => + // task_id = _get_next_task_id() + // _analyze_dir(task_id, false, pony_packages_path, global_cache, schedule) + // end + + be _analyze_dir( + task_id: USize, + is_workspace: Bool, + src_path: FilePath, + cache_path: FilePath, + schedule: (I64, I64)) + => + try + let info = FileInfo(src_path)? + if info.directory then + src_path.walk( + this~_walk_dir(task_id, is_workspace, cache_path, schedule)) + else + _log(Error) and _log.log( + task_id.string() + ": " + src_path.path + ": not a directory") + end + else + _log(Error) and _log.log( + task_id.string() + ": " + src_path.path + ": does not exist") + end + + fun ref _walk_dir( + task_id: USize, + is_workspace: Bool, + cache_path: FilePath, + schedule: (I64, I64), + dir_path: FilePath, + entries: Array[String]) + => + // skip directories starting with '.' + let to_remove = Array[USize] + for (i, entry) in entries.pairs() do + try + if entry(0)? == '.' then + to_remove.unshift(i) + end + end + end + for index in to_remove.values() do + entries.remove(index, 1) + end + + // skip directories without Pony source files + var has_pony_source = false + for entry in entries.values() do + if _is_pony_file(entry) then + has_pony_source = true + break + end + end + if not has_pony_source then + return + end + + // enqueue package item + let package_path = dir_path + if _src_items.contains(package_path.path) then + return + end + + let package = SrcPackageItem(package_path, cache_path) + package.task_id = task_id + package.is_workspace = is_workspace + + _log(Fine) and _log.log( + task_id.string() + ": enqueueing package " + package_path.path) + + // enqueue source file items + for entry in entries.values() do + if _is_pony_file(entry) then + try + let file_path = dir_path.join(entry)? + if _src_items.contains(file_path.path) then + continue + end + + let src_file = SrcFileItem(file_path, cache_path) + src_file.task_id = task_id + src_file.parent_package = package + src_file.schedule = schedule + _src_items.update(file_path.path, src_file) + _enqueue_src_item(src_file) + package.dependencies.push(src_file) + + _log(Fine) and _log.log( + task_id.string() + ": enqueueing file " + + src_file.canonical_path.path) + end + end + end + + _src_items.update(package_path.path, package) + _enqueue_src_item(package) + + fun tag _is_pony_file(fname: String): Bool => + let ext_size = ".pony".size() + if fname.size() <= ext_size then + return false + end + let index = ISize.from[USize](fname.size() - ext_size) + fname.compare_sub( + ".pony", ext_size, index where ignore_case = true) is Equal + + be open_file( + task_id: USize, + canonical_path: FilePath, + parse: parser.Parser) + => + if _disposing then return end + _log(Fine) and _log.log( + task_id.string() + ": opening " + canonical_path.path) + match try _src_items(canonical_path.path)? end + | let src_file: SrcFileItem => + src_file.task_id = task_id + src_file.state = AnalysisStart + src_file.schedule = _schedule(0) + src_file.is_open = true + src_file.parse = parse + _enqueue_src_item(src_file) + else + let src_file = SrcFileItem( + canonical_path, _context.get_cache(canonical_path)) + src_file.task_id = task_id + src_file.state = AnalysisStart + src_file.is_open = true + src_file.schedule = _schedule(0) + src_file.parse = parse + _src_items.update(canonical_path.path, src_file) + _enqueue_src_item(src_file) + end + + be update_file( + task_id: USize, + canonical_path: FilePath, + parse: parser.Parser) + => + if _disposing then return end + _log(Fine) and _log.log( + task_id.string() + ": updating " + canonical_path.path) + match try _src_items(canonical_path.path)? end + | let src_file: SrcFileItem => + src_file.task_id = task_id + src_file.schedule = _schedule(300) + src_file.is_open = true + src_file.parse = parse + _log(Fine) and _log.log(task_id.string() + ": found in-memory file") + _enqueue_src_item(src_file, AnalysisStart) + else + let src_file = SrcFileItem( + canonical_path, _context.get_cache(canonical_path)) + src_file.task_id = task_id + src_file.is_open = true + src_file.schedule = _schedule(300) + src_file.parse = parse + _src_items.update(canonical_path.path, src_file) + _enqueue_src_item(src_file, AnalysisStart) + _log(Fine) and _log.log( + task_id.string() + ": in-memory file not found; creating") + end + + be close_file(task_id: USize, canonical_path: FilePath) => + match try _src_items(canonical_path.path)? end + | let src_file: SrcFileItem => + src_file.is_open = false + end + + be request_info( + task_id: USize, + canonical_path: FilePath, + notify: AnalyzerRequestNotify) + => + if _disposing then return end + + _log(Fine) and _log.log( + task_id.string() + ": request " + canonical_path.path) + + if not _src_items.contains(canonical_path.path) then + let file_dir = Path.split(canonical_path.path)._1 + _analyze_dir( + task_id, + false, + FilePath(_context.file_auth, file_dir), + _context.get_cache(canonical_path), + _schedule(0)) + end + + let notifys = + match try _pending_requests(canonical_path.path)? end + | let notifys': MapIs[AnalyzerRequestNotify, Set[USize]] => + notifys' + else + let notifys' = MapIs[AnalyzerRequestNotify, Set[USize]] + _pending_requests.update(canonical_path.path, notifys') + notifys' + end + let task_ids = + match try notifys(notify)? end + | let task_ids': Set[USize] => + task_ids' + else + let task_ids = Set[USize] + notifys.update(notify, task_ids) + task_ids + end + task_ids.set(task_id) + _process_src_item_queue() + + be dispose() => + _disposing = true + + fun ref _push_error( + errors: Map[String, Array[AnalyzerError]], + new_error: AnalyzerError) + => + let arr = + match try errors(new_error.canonical_path.path)? end + | let arr': Array[AnalyzerError] => + arr' + else + let arr' = Array[AnalyzerError] + errors(new_error.canonical_path.path) = arr' + arr' + end + arr.push(new_error) + + fun ref _clear_errors( + canonical_path: FilePath, + errors: Map[String, Array[AnalyzerError]]) + => + try + errors.remove(canonical_path.path)? + errors.compact() + end + + fun ref _clear_and_push( + canonical_path: FilePath, + errors: Map[String, Array[AnalyzerError]], + new_error: AnalyzerError) + => + let arr = + match try errors(new_error.canonical_path.path)? end + | let arr': Array[AnalyzerError] => + arr'.clear() + arr' + else + let arr' = Array[AnalyzerError] + errors(new_error.canonical_path.path) = arr' + arr' + end + arr.push(new_error) + + fun _collect_errors( + errors: Map[String, Array[AnalyzerError]], + canonical_path: (FilePath | None) = None) + : Array[AnalyzerError] val + => + let result: Array[AnalyzerError] trn = Array[AnalyzerError] + match canonical_path + | let key: FilePath => + try + for err in errors(key.path)?.values() do + result.push(err) + end + end + else + for key in errors.keys() do + try + for err in errors(key)?.values() do + result.push(err) + end + end + end + end + consume result + + fun ref _enqueue_src_item( + src_item: SrcItem, + new_state: (SrcItemState | None) = None) + => + match new_state + | let new_state': SrcItemState => + src_item.set_state(new_state') + end + + _src_item_queue.push(src_item) + _process_src_item_queue() + + fun ref _process_src_item_queue() => + if not _process_queued then + _process_queued = true + _process_src_item_queue_aux() + end + + be _process_src_item_queue_aux() => + _process_queued = false + + if _disposing then return end + + // if (_iteration % 500) == 0 then + // _log_src_item_queue() + // end + _iteration = _iteration + 1 + + try + match _src_item_queue.shift()? + | let file_item: SrcFileItem => + _process_file_item(file_item) + | let package_item: SrcPackageItem => + _process_package_item(package_item) + end + end + + _process_pending_requests() + + if _src_item_queue.size() > 0 then + _process_src_item_queue() + end + + fun ref _log_src_item_queue() => + _log(Fine) and _log.log( + "PACKAGE ITEMS: " + _get_item_stats(_src_items.values(), true)) + _log(Fine) and _log.log( + "PACKAGE QUEUE: " + _get_item_stats(_src_item_queue.values(), true)) + _log(Fine) and _log.log( + "FILE ITEMS: " + _get_item_stats(_src_items.values(), false)) + _log(Fine) and _log.log( + "FILE QUEUE: " + _get_item_stats(_src_item_queue.values(), false)) + + fun _get_item_stats(iter: Iterator[SrcItem], is_pkg: Bool): String => + var num_starting: USize = 0 + var num_parsing: USize = 0 + var num_scoping: USize = 0 + var num_linting: USize = 0 + var num_error: USize = 0 + var num_up_to_date: USize = 0 + + for item in iter do + match item + | let pkg: SrcPackageItem => + if not is_pkg then continue end + else + if is_pkg then continue end + end + + match item.get_state() + | AnalysisStart => + num_starting = num_starting + 1 + | AnalysisParse => + num_parsing = num_parsing + 1 + | AnalysisScope => + num_scoping = num_scoping + 1 + | AnalysisLint => + num_linting = num_linting + 1 + | AnalysisError => + num_error = num_error + 1 + | AnalysisUpToDate => + num_up_to_date = num_up_to_date + 1 + end + end + let str: String trn = String + str.append("start ") + str.append(num_starting.string()) + str.append(", parse ") + str.append(num_parsing.string()) + str.append(", scope ") + str.append(num_scoping.string()) + str.append(", lint ") + str.append(num_linting.string()) + str.append(", error ") + str.append(num_error.string()) + str.append(", done ") + str.append(num_up_to_date.string()) + consume str + + fun ref _process_package_item(package_item: SrcPackageItem) => + // count things + var num_starting: USize = 0 + var num_parsing: USize = 0 + var num_scoping: USize = 0 + var num_linting: USize = 0 + var num_error: USize = 0 + var num_up_to_date: USize = 0 + + for dep in package_item.dependencies.values() do + match dep.get_state() + | AnalysisStart => + num_starting = num_starting + 1 + | AnalysisParse => + num_parsing = num_parsing + 1 + | AnalysisScope => + num_scoping = num_scoping + 1 + | AnalysisLint => + num_linting = num_linting + 1 + | AnalysisError => + num_error = num_error + 1 + | AnalysisUpToDate => + num_up_to_date = num_up_to_date + 1 + end + end + + if num_error > 0 then + _log(Error) and _log.log( + package_item.task_id.string() + ": PACKAGE ERROR: " + + package_item.canonical_path.path) + + package_item.state = AnalysisError + + if package_item.is_workspace then + _log(Fine) and _log.log( + package_item.task_id.string() + ": workspace ERROR; notifying") + _notify_workspace(package_item) + end + elseif num_up_to_date == package_item.dependencies.size() then + _log(Fine) and _log.log( + package_item.task_id.string() + ": package up to date: " + + package_item.canonical_path.path) + + package_item.state = AnalysisUpToDate + + if package_item.is_workspace then + _log(Fine) and _log.log( + package_item.task_id.string() + ": workspace up to date; notifying") + _notify_workspace(package_item) + end + else + var new_state = package_item.state + if num_starting == 0 then + if num_parsing > 0 then + new_state = AnalysisParse + elseif num_scoping > 0 then + new_state = AnalysisScope + elseif num_linting > 0 then + new_state = AnalysisLint + end + end + + if new_state isnt package_item.state then + _log_src_item_queue() + end + + // if (_iteration % 100) == 0 then + // _log(Fine) and _log.log( + // package_item.task_id.string() + ": workspace in progress") + // end + + _enqueue_src_item(package_item, new_state) + end + + fun ref _notify_workspace(package_item: SrcPackageItem) => + _notify.analyzed_workspace( + this, + package_item.task_id, + _collect_errors(_workspace_errors), + _collect_errors(_parse_errors), + _collect_errors(_lint_errors), + _collect_errors(_analyze_errors)) + + fun ref _process_file_item(src_file: SrcFileItem) => + var needs_push = false + match src_file.state + | AnalysisStart => + src_file.cache_prefix = _cache_prefix(src_file) + + try _workspace_errors.remove(src_file.canonical_path.path)? end + try _parse_errors.remove(src_file.canonical_path.path)? end + try _lint_errors.remove(src_file.canonical_path.path)? end + try _analyze_errors.remove(src_file.canonical_path.path)? end + + if _is_due(src_file.schedule) then + src_file.state = AnalysisParse + end + + if src_file.state is AnalysisParse then + _log(Fine) and _log.log( + src_file.task_id.string() + ": " + src_file.canonical_path.path + + " => Parsing") + end + needs_push = true + | AnalysisParse => + if src_file.is_open then + _parse_open_file(src_file) + else + _parse_disk_file(src_file) + end + | AnalysisScope => + _scope(src_file) + | AnalysisLint => + _lint(src_file) + | AnalysisError => + var errors: Array[AnalyzerError] trn = Array[AnalyzerError] + try + for err in _workspace_errors(src_file.canonical_path.path)?.values() do + errors.push(err) + end + end + try + for err in _parse_errors(src_file.canonical_path.path)?.values() do + errors.push(err) + end + end + try + for err in _lint_errors(src_file.canonical_path.path)?.values() do + errors.push(err) + end + end + try + for err in _analyze_errors(src_file.canonical_path.path)?.values() do + errors.push(err) + end + end + let errors': Array[AnalyzerError] val = consume errors + _notify.analyze_failed( + this, + src_file.task_id, + src_file.canonical_path, + errors') + + // try to free up some memory + if not src_file.is_open then + src_file.compact() + end + | AnalysisUpToDate => + _log(Fine) and _log.log( + src_file.task_id.string() + ": file up to date: " + + src_file.canonical_path.path) + + _notify.analyzed_file( + this, + src_file.task_id, + src_file.canonical_path, + src_file.syntax_tree, + None, + _collect_errors(_parse_errors), + _collect_errors(_lint_errors), + _collect_errors(_analyze_errors)) + + // try to free up some memory + if not src_file.is_open then + src_file.compact() + end + end + if needs_push then + _enqueue_src_item(src_file) + end + + fun ref _process_pending_requests() => + let paths_done = Array[String] + + for (canonical_path, notifys) in _pending_requests.pairs() do + match try _src_items(canonical_path)? end + | let file_item: SrcFileItem => + match file_item.state + | AnalysisUpToDate => + _pending_request_succeeded(file_item, notifys) + paths_done.push(canonical_path) + | AnalysisError => + _pending_request_failed(file_item, notifys) + paths_done.push(canonical_path) + end + | let package_item: SrcPackageItem => + let up_to_date = Iter[SrcItem](package_item.dependencies.values()) + .all({(pi) => pi.get_state()() is AnalysisUpToDate()}) + if up_to_date then + _pending_request_succeeded(package_item, notifys) + paths_done.push(canonical_path) + else + let any_errors = Iter[SrcItem](package_item.dependencies.values()) + .any({(pi) => pi.get_state()() is AnalysisError()}) + if any_errors then + _pending_request_failed(package_item, notifys) + paths_done.push(canonical_path) + end + end + end + end + + for path in paths_done.values() do + try + _pending_requests.remove(path)? + end + end + + fun ref _pending_request_succeeded( + src_item: (SrcFileItem | SrcPackageItem), + notifys: MapIs[AnalyzerRequestNotify, Set[USize]]) + => + match src_item + | let file_item: SrcFileItem => + match (file_item.syntax_tree, file_item.nodes_by_index, file_item.scope) + | + (let st: ast.Node, let nbi: Map[USize, ast.Node] val, let sc: Scope val) + => + for (notify, task_ids) in notifys.pairs() do + for task_id in task_ids.values() do + _log(Fine) and _log.log( + task_id.string() + ": request succeeded: " + + file_item.canonical_path.path) + notify.request_succeeded( + task_id, file_item.canonical_path, st, nbi, sc) + end + end + end + | let package_item: SrcPackageItem => + let package_scope: Scope trn = Scope( + PackageScope, + package_item.canonical_path.path, + package_item.canonical_path, + (0, 0, USize.max_value(), USize.max_value()), + USize.max_value()) + + for dep in package_item.dependencies.values() do + match dep + | let file_item: SrcFileItem => + match file_item.scope + | let child_scope: Scope val => + package_scope.add_child(child_scope) + end + end + end + + let package_scope': Scope val = consume package_scope + + for (notify, task_ids) in notifys.pairs() do + for task_id in task_ids.values() do + _log(Fine) and _log.log( + task_id.string() + ": request succeeded: " + + package_item.canonical_path.path) + notify.request_succeeded( + task_id, + package_item.canonical_path, + None, + Map[USize, ast.Node], + package_scope') + end + end + end + + fun ref _pending_request_failed( + src_item: (SrcFileItem | SrcPackageItem), + notifys: MapIs[AnalyzerRequestNotify, Set[USize]]) + => + for (notify, task_ids) in notifys.pairs() do + for task_id in task_ids.values() do + _log(Fine) and _log.log( + task_id.string() + ": request failed: " + + src_item.get_canonical_path().path) + notify.request_failed( + task_id, src_item.get_canonical_path(), "analysis failed") + end + end + + fun _schedule(millis: U64): (I64, I64) => + (var secs, var nanos) = Time.now() + nanos = nanos + I64.from[U64](Nanos.from_millis(millis)) + while nanos > 1_000_000_000 do + nanos = nanos - 1_000_000_000 + secs = secs + 1 + end + (secs, nanos) + + fun _is_due(schedule: (I64, I64)): Bool => + (let secs, let nanos) = Time.now() + if secs > schedule._1 then + true + elseif secs < schedule._1 then + false + else + nanos > schedule._2 + end + + fun ref _parse_open_file(src_file: SrcFileItem) => + if _disposing then return end + + _log(Fine) and _log.log( + src_file.task_id.string() + ": parsing in memory " + + src_file.canonical_path.path) + + let task_id = src_file.task_id + let canonical_path = src_file.canonical_path + match src_file.parse + | let parse': parser.Parser => + _parse(task_id, canonical_path, parse') + else + _log(Error) and _log.log( + task_id.string() + ": parse failed for " + canonical_path.path + + "; no data") + end + + fun ref _parse_disk_file(src_file: SrcFileItem) => + if _disposing then return end + + _log(Fine) and _log.log( + src_file.task_id.string() + ": parsing on disk " + + src_file.canonical_path.path) + + let src_file_path = src_file.canonical_path + let syntax_tree_path = _syntax_tree_path(src_file) + if + syntax_tree_path.exists() and + (not _source_is_newer(src_file_path, syntax_tree_path)) + then + _log(Fine) and _log.log( + src_file.task_id.string() + ": cache is newer; not parsing " + + src_file.canonical_path.path) + + match _get_syntax_tree(src_file) + | let syntax_tree: ast.Node => + _collect_error_sections(src_file.canonical_path, syntax_tree) + src_file.syntax_tree = syntax_tree + else + _log(Error) and _log.log( + src_file.task_id.string() + ": unable to load syntax for " + + src_file.canonical_path.path) + return + end + + _enqueue_src_item(src_file, AnalysisScope) + return + end + + let task_id = src_file.task_id + let canonical_path = src_file.canonical_path + match OpenFile(src_file.canonical_path) + | let file: File ref => + let source = file.read(file.size()) + let segments: Array[ReadSeq[U8] val] val = + [ as ReadSeq[U8] val: consume source ] + let parse = parser.Parser(segments) + _parse(task_id, canonical_path, parse) + else + _log(Error) and _log.log("unable to read " + canonical_path.path) + _push_error(_workspace_errors, AnalyzerError( + canonical_path, AnalyzeError, "unable to read file")) + _enqueue_src_item(src_file, AnalysisError) + end + + fun ref _parse( + task_id: USize, + canonical_path: FilePath, + parse: parser.Parser) + => + if _disposing then return end + + // _log(Fine) and _log.log( + // task_id.string() + ": parsing " + canonical_path.path) + let self: EohippusAnalyzer tag = this + parse.parse( + _context.grammar, + parser.Data(canonical_path.path), + {(result: parser.Result, values: ast.NodeSeq) => + match result + | let success: parser.Success => + try + match values(0)? + | let node: ast.NodeWith[ast.SrcFile] => + // _log(Fine) and _log.log( + // task_id.string() + ": got SrcFile for " + canonical_path) + self._parsed_file(task_id, canonical_path, node) + else + _log(Error) and _log.log( + task_id.string() + ": " + canonical_path.path + + ": root node was not SrcFile") + self._parse_failed( + task_id, canonical_path, "root node was not SrcFile") + end + else + _log(Error) and _log.log( + task_id.string() + ": " + canonical_path.path + + ": failed to get SrcFile node") + self._parse_failed( + task_id, canonical_path, "failed to get SrcFile node") + end + | let failure: parser.Failure => + _log(Error) and _log.log( + task_id.string() + ": " + canonical_path.path + ": " + + failure.get_message()) + self._parse_failed(task_id, canonical_path, failure.get_message()) + end + }) + + fun ref _collect_error_sections(canonical_path: FilePath, node: ast.Node) => + match node + | let es: ast.NodeWith[ast.ErrorSection] => + let si = es.src_info() + match (si.line, si.column, si.next_line, si.next_column) + | (let l: USize, let c: USize, let nl: USize, let nc: USize) => + // _log(Fine) and _log.log( + // "ErrorSection " + canonical_path + ": " + l.string() + ":" + + // c.string() + "-" + nl.string() + ":" + nc.string()) + + _push_error( + _parse_errors, + AnalyzerError( + canonical_path, AnalyzeError, es.data().message, l, c, nl, nc)) + end + end + for child in node.children().values() do + _collect_error_sections(canonical_path, child) + end + + be _parsed_file( + task_id: USize, + canonical_path: FilePath, + node: ast.NodeWith[ast.SrcFile]) + => + if _disposing then return end + _process_src_item_queue() + + match try _src_items(canonical_path.path)? end + | let src_file: SrcFileItem => + if src_file.task_id != task_id then + _log(Fine) and _log.log( + "abandoning parse for task_id " + task_id.string() + + "; src_file is newer: " + src_file.task_id.string()) + return + end + + _clear_errors(canonical_path, _parse_errors) + (let syntax_tree, let lb, let errors) = ast.SyntaxTree.add_line_info(node) + if src_file.is_open then + _notify.parsed_file(this, task_id, canonical_path, syntax_tree, lb) + end + + if errors.size() > 0 then + for (n, message) in errors.values() do + _log(Error) and _log.log( + task_id.string() + ": line error " + canonical_path.path + + ": " + message) + + let si = n.src_info() + match (si.line, si.column, si.next_line, si.next_column) + | (let l: USize, let c: USize, let nl: USize, let nc: USize) => + _push_error( + _parse_errors, + AnalyzerError( + canonical_path, AnalyzeError, message, l, c, nl, nc)) + else + _push_error( + _parse_errors, + AnalyzerError(canonical_path, AnalyzeError, message)) + end + end + _enqueue_src_item(src_file, AnalysisError) + return + end + + src_file.syntax_tree = syntax_tree + src_file.make_indices() + _write_syntax_tree(src_file) + _collect_error_sections(canonical_path, syntax_tree) + + _log(Fine) and _log.log( + src_file.task_id.string() + ": " + src_file.canonical_path.path + + " => Scoping") + + _enqueue_src_item(src_file, AnalysisScope) + else + _log(Error) and _log.log( + task_id.string() + ": parsed untracked source file " + + canonical_path.path) + end + + be _parse_failed( + task_id: USize, + canonical_path: FilePath, + message: String, + line: USize = 0, + column: USize = 0, + next_line: USize = 0, + next_column: USize = 0) + => + if _disposing then return end + _process_src_item_queue() + + match try _src_items(canonical_path.path)? end + | let src_file: SrcFileItem => + if src_file.task_id != task_id then + _log(Fine) and _log.log( + task_id.string() + ": ignoring failed parse for " + + canonical_path.path + "; src_file is newer: " + + src_file.task_id.string()) + return + end + + _log(Error) and _log.log( + task_id.string() + ": parse failed for " + canonical_path.path) + + _push_error(_parse_errors, AnalyzerError( + canonical_path, + AnalyzeError, + message, + line, + column, + next_line, + next_column)) + + let error_section = ast.NodeWith[ast.ErrorSection]( + ast.SrcInfo(canonical_path.path), [], ast.ErrorSection(message)) + let node = ast.NodeWith[ast.SrcFile]( + ast.SrcInfo(canonical_path.path), + [ error_section ], + ast.SrcFile(canonical_path.path, [], [])) + _write_syntax_tree(src_file, node) + + _log(Fine) and _log.log( + src_file.task_id.string() + ": " + src_file.canonical_path.path + + " => Error") + + _enqueue_src_item(src_file, AnalysisError) + end + + fun ref _write_syntax_tree( + src_file: SrcFileItem, + syntax_tree: (ast.Node | None) = None) + => + _log(Fine) and _log.log( + src_file.task_id.string() + ": writing syntax tree for " + + src_file.canonical_path.path) + + let st = + match + try + syntax_tree as ast.Node + else + try src_file.syntax_tree as ast.Node end + end + | let node: ast.Node => + node + else + _log(Error) and _log.log("unable to get syntax tree to write") + return + end + + let syntax_tree_path = _syntax_tree_path(src_file) + let dir_path = FilePath( + _context.file_auth, Path.split(syntax_tree_path.path)._1) + if (not dir_path.exists()) and (not dir_path.mkdir()) then + _log(Error) and _log.log("unable to create directory " + dir_path.path) + _push_error(_workspace_errors, AnalyzerError( + dir_path, AnalyzeError, "unable to create storage directory")) + return + end + + match CreateFile(syntax_tree_path) + | let file: File => + file.set_length(0) + let json_item = st.get_json() + let json_str = + ifdef debug then + json_item.get_string(true) + else + json_item.get_string(false) + end + _log(Fine) and _log.log( + src_file.task_id.string() + ": writing " + syntax_tree_path.path) + if not file.write(consume json_str) then + _log(Error) and _log.log( + src_file.task_id.string() + ": unable to write syntax tree file " + + syntax_tree_path.path) + _push_error(_workspace_errors, AnalyzerError( + src_file.canonical_path, + AnalyzeError, + "unable to write syntax tree file" + syntax_tree_path.path)) + end + else + _log(Error) and _log.log( + src_file.canonical_path.path + ": unable to create syntax tree file " + + syntax_tree_path.path) + _push_error(_workspace_errors, AnalyzerError( + src_file.canonical_path, + AnalyzeError, + "unable to create syntax tree file " + syntax_tree_path.path)) + end + + fun ref _get_syntax_tree(src_file: SrcFileItem): (ast.Node | None) => + match src_file.syntax_tree + | let node: ast.Node => + node + else + let syntax_path = _syntax_tree_path(src_file) + match OpenFile(syntax_path) + | let file: File => + let json_str = recover val file.read_string(file.size()) end + match json.Parse(json_str) + | let obj: json.Object => + match ast.ParseNode(src_file.canonical_path.path, obj) + | let node: ast.Node => + _log(Fine) and _log.log( + src_file.task_id.string() + ": loaded " + syntax_path.path) + src_file.syntax_tree = node + src_file.make_indices() + return node + | let err: String => + _log(Error) and _log.log( + src_file.task_id.string() + ": error loading " + + syntax_path.path + ": " + err) + end + | let item: json.Item => + _log(Error) and _log.log( + src_file.task_id.string() + ": error loading " + syntax_path.path + + ": a syntax tree must be an object") + | let err: json.ParseError => + _log(Error) and _log.log( + src_file.task_id.string() + ": error loading " + syntax_path.path + + ":" + err.index.string() + + ": " + err.message) + end + else + _log(Error) and _log.log( + src_file.task_id.string() + ": error opening " + syntax_path.path) + end + None + end + + fun ref _scope(src_file: SrcFileItem) => + if _disposing then return end + _process_src_item_queue() + + _log(Fine) and _log.log( + src_file.task_id.string() + ": scoping " + src_file.canonical_path.path) + + let src_file_path = src_file.canonical_path + let scope_path = _scope_path(src_file) + if + scope_path.exists() and + (not _source_is_newer(src_file_path, scope_path)) + then + _log(Fine) and _log.log( + src_file.task_id.string() + ": cache is newer; not scoping " + + src_file.canonical_path.path) + match _get_scope(src_file) + | let scope: Scope => + src_file.scope = scope + _enqueue_src_item(src_file, AnalysisLint) + return + else + _log(Error) and _log.log( + src_file.task_id.string() + ": unable to load scope for " + + src_file.canonical_path.path) + return + end + end + + match _get_syntax_tree(src_file) + | let syntax_tree: ast.Node => + let scoper = Scoper(_log, this) + scoper.scope_syntax_tree( + src_file.task_id, + src_file.canonical_path, + syntax_tree, + src_file.node_indices) + else + _log(Error) and _log.log( + src_file.task_id.string() + ": failed to get syntax tree for " + + src_file.canonical_path.path) + _enqueue_src_item(src_file, AnalysisError) + end + + fun ref _get_scope(src_file: SrcFileItem): (Scope | None) => + match src_file.scope + | let scope: Scope => + scope + else + let scope_path = _scope_path(src_file) + match OpenFile(scope_path) + | let file: File => + let json_str = recover val file.read_string(file.size()) end + match recover val json.Parse(json_str) end + | let obj: json.Object val => + match recover val ParseScopeJson(_context.file_auth, obj, None) end + | let scope: Scope val => + _log(Fine) and _log.log( + src_file.task_id.string() + ": loaded " + scope_path.path) + src_file.scope = scope + src_file.make_indices() + return scope + | let err: String => + _log(Error) and _log.log( + src_file.task_id.string() + ": error loading " + scope_path.path + + err) + end + | let item: json.Item val => + _log(Error) and _log.log( + src_file.task_id.string() + ": error loading " + scope_path.path + + ": a scope file must be an object") + | let err: json.ParseError => + _log(Error) and _log.log( + src_file.task_id.string() + ": error loading " + scope_path.path + + ":" + err.index.string() + ": " + err.message) + end + else + _log(Error) and _log.log( + src_file.task_id.string() + ": error opening " + scope_path.path) + end + None + end + + be scoped_file( + task_id: USize, + canonical_path: FilePath, + syntax_tree: ast.Node, + scope: Scope val) + => + if _disposing then return end + _process_src_item_queue() + + match try _src_items(canonical_path.path)? end + | let src_file: SrcFileItem => + if src_file.task_id != task_id then + _log(Fine) and _log.log( + task_id.string() + ": abandoning scope for " + canonical_path.path + + "; src_file is newer: " + src_file.task_id.string()) + return + end + + _log(Fine) and _log.log( + task_id.string() + ": scoped " + canonical_path.path) + + src_file.syntax_tree = syntax_tree + src_file.scope = scope + src_file.make_indices() + + _write_syntax_tree(src_file) + _write_scope(src_file) + + //_process_imports(canonical_path, scope') + + _log(Fine) and _log.log( + task_id.string() + ": " + canonical_path.path + " => Linting") + + _enqueue_src_item(src_file, AnalysisLint) + else + _log(Error) and _log.log( + task_id.string() + ": scoped unknown file " + canonical_path.path) + end + + // fun ref _process_imports(canonical_path: String, scope: Scope) => + // (let base_path, _) = Path.split(scope.name) + + // var i: USize = 0 + // while i < scope.imports.size() do + // try + // (let alias, let import_path) = scope.imports(i)? + // match _try_analyze_import(base_path, import_path) + // | let canonical_import_path: String => + // try scope.imports.update(i, (alias, canonical_import_path))? end + // i = i + 1 + // continue + // else + // var found = false + // for pp in _pony_path.values() do + // match _try_analyze_import(pp.path, import_path) + // | let canonical_import_path: String => + // try scope.imports.update(i, (alias, canonical_import_path))? end + // found = true + // break + // end + // end + + // if found then + // i = i + 1 + // continue + // else + // match _pony_packages_path + // | let ppp: FilePath => + // match _try_analyze_import(ppp.path, import_path) + // | let canonical_import_path: String => + // try scope.imports.update(i, (alias, canonical_import_path))? end + // i = i + 1 + // continue + // end + // end + // end + // end + // _log(Error) and _log.log( + // "unable to resolve package " + import_path + " for " + canonical_path) + // end + // i = i + 1 + // end + + // fun ref _try_analyze_import(base_path: String, import_path: String) + // : (String | None) + // => + // let combined_path = FilePath(_auth, Path.join(base_path, import_path)) + // if combined_path.exists() then + // let canonical_path = + // try + // combined_path.canonical()? + // else + // combined_path + // end + // if not _src_items.contains(canonical_path.path) then + // analyze(_get_next_task_id(), canonical_path.path) + // else + // _log(Fine) and _log.log( + // "not analyzing existing import " + canonical_path.path) + // end + // return canonical_path.path + // end + + be scope_failed( + task_id: USize, + canonical_path: FilePath, + errors: ReadSeq[ast.TraverseError] val) + => + if _disposing then return end + _process_src_item_queue() + + match try _src_items(canonical_path.path)? end + | let src_file: SrcFileItem => + if src_file.task_id != task_id then + _log(Fine) and _log.log( + task_id.string() + ": ignoring failed scope for " + + canonical_path.path + "; src_file is newer: " + + src_file.task_id.string()) + return + end + + for (node, message) in errors.values() do + _log(Error) and _log.log( + src_file.task_id.string() + ": scope error: " + message) + let si = node.src_info() + match (si.line, si.column, si.next_line, si.next_column) + | (let l: USize, let c: USize, let nl: USize, let nc: USize) => + _push_error( + _analyze_errors, + AnalyzerError( + canonical_path, AnalyzeError, message, l, c, nl, nc)) + else + _push_error( + _analyze_errors, + AnalyzerError(canonical_path, AnalyzeError, message)) + end + end + _enqueue_src_item(src_file, AnalysisError) + else + _log(Error) and _log.log(task_id.string() + ": failed to scope unknown " + + canonical_path.path) + end + + fun ref _write_scope(src_file: SrcFileItem) => + _log(Fine) and _log.log( + src_file.task_id.string() + ": writing scope file for " + + src_file.canonical_path.path) + + let scope = + match src_file.scope + | let scope': Scope val => + scope' + else + _log(Error) and _log.log( + src_file.task_id.string() + ": no scope for " + + src_file.canonical_path.path) + return + end + + let scope_path = _scope_path(src_file) + let dir_path = FilePath(_context.file_auth, Path.split(scope_path.path)._1) + if (not dir_path.exists()) and (not dir_path.mkdir()) then + _log(Error) and _log.log( + src_file.task_id.string() + "unable to create directory " + + dir_path.path) + _push_error(_workspace_errors, AnalyzerError( + dir_path, AnalyzeError, "unable to create cache directory")) + end + + match CreateFile(scope_path) + | let file: File => + file.set_length(0) + let json_item = scope.get_json() + let json_str = + ifdef debug then + json_item.get_string(true) + else + json_item.get_string(false) + end + _log(Fine) and _log.log( + src_file.task_id.string() + ": writing " + scope_path.path) + if not file.write(consume json_str) then + _log(Error) and _log.log( + src_file.task_id.string() + ": unable to write scope file " + + scope_path.path) + _push_error(_workspace_errors, AnalyzerError( + src_file.canonical_path, + AnalyzeError, + "unable to write scope file" + scope_path.path)) + end + else + _log(Error) and _log.log( + src_file.task_id.string() + ": unable to create scope file " + + scope_path.path) + _push_error(_workspace_errors, AnalyzerError( + src_file.canonical_path, + AnalyzeError, + "unable to create syntax tree file" + scope_path.path)) + end + + fun ref _lint(src_file: SrcFileItem) => + if _disposing then return end + + _log(Fine) and _log.log( + src_file.task_id.string() + ": linting " + src_file.canonical_path.path) + + let syntax_tree = + match _get_syntax_tree(src_file) + | let node: ast.Node => + node + else + _log(Error) and _log.log( + src_file.task_id.string() + ": unable to get syntax tree for " + + src_file.canonical_path.path) + _enqueue_src_item(src_file, AnalysisError) + return + end + + src_file.state = AnalysisLint + let config = _get_lint_config(src_file) + let canonical_path = src_file.canonical_path + let self: EohippusAnalyzer = this + let lint = linter.Linter( + config, + object tag is linter.LinterNotify + be lint_completed( + lint': linter.Linter, + task_id': USize, + tree': ast.Node, + issues': ReadSeq[linter.Issue] val, + errors': ReadSeq[ast.TraverseError] val) + => + self._linted_file(task_id', canonical_path, issues', errors') + + be linter_failed(task_id': USize, message': String) => + self._lint_failed(task_id', canonical_path, message') + end) + lint.lint(src_file.task_id, syntax_tree) + + be _linted_file( + task_id: USize, + canonical_path: FilePath, + issues: ReadSeq[linter.Issue] val, + errors: ReadSeq[ast.TraverseError] val) + => + if _disposing then return end + _process_src_item_queue() + + match try _src_items(canonical_path.path)? end + | let src_file: SrcFileItem => + if src_file.task_id != task_id then + _log(Fine) and _log.log( + task_id.string() + ": abandoning lint for " + canonical_path.path + + "; src_file is newer: " + src_file.task_id.string()) + return + end + + _log(Fine) and _log.log( + task_id.string() + ": linted " + canonical_path.path + "; " + + issues.size().string() + " issues, " + errors.size().string() + + " errors") + + for issue in issues.values() do + try + let start = issue.start.head()?.src_info() + let next = issue.next.head()?.src_info() + match (start.line, start.column, next.line, next.column) + | (let l: USize, let c: USize, let nl: USize, let nc: USize) => + _push_error(_lint_errors, AnalyzerError( + canonical_path, + AnalyzeWarning, + issue.rule.message(), + l, + c, + nl, + nc)) + end + end + end + for (node, message) in errors.values() do + let si = node.src_info() + match (si.line, si.column, si.next_line, si.next_column) + | (let l: USize, let c: USize, let nl: USize, let nc: USize) => + _push_error(_lint_errors, AnalyzerError( + canonical_path, AnalyzeError, message, l, c, nl, nc)) + end + end + + _log(Fine) and _log.log( + src_file.task_id.string() + ": " + src_file.canonical_path.path + + " => UpToDate") + _enqueue_src_item(src_file, AnalysisUpToDate) + else + _log(Error) and _log.log( + task_id.string() + ": linted unknown file " + canonical_path.path) + end + + be _lint_failed(task_id: USize, canonical_path: FilePath, message: String) => + if _disposing then return end + _process_src_item_queue() + + match try _src_items(canonical_path.path)? end + | let src_file: SrcFileItem => + if src_file.task_id != task_id then + _log(Fine) and _log.log( + task_id.string() + ": ignoring failed lint for " + + canonical_path.path + "; src_file is newer: " + + src_file.task_id.string()) + return + end + + _log(Error) and _log.log( + task_id.string() + ": lint failed for " + canonical_path.path + ": " + + message) + + _push_error(_lint_errors, AnalyzerError( + canonical_path, AnalyzeError, "lint failed: " + message)) + _enqueue_src_item(src_file, AnalysisError) + else + _log(Error) and _log.log( + task_id.string() + ": failed to lint unknown file " + + canonical_path.path) + end + + fun ref _get_lint_config(src_file: SrcFileItem): linter.Config val => + var cur_path = src_file.canonical_path.path + repeat + (var dir_path, _) = Path.split(cur_path) + try + return _lint_configs(dir_path)? + else + let editor_config_path = Path.join(dir_path, ".editorconfig") + let config_file_path = FilePath(_context.file_auth, editor_config_path) + if config_file_path.exists() then + match linter.EditorConfig.read(config_file_path) + | let config: linter.Config val => + _log(Fine) and _log.log( + src_file.task_id.string() + ": found .editorconfig " + + config_file_path.path) + _lint_configs.update(dir_path, config) + return config + | let err: String => + _log(Error) and _log.log( + src_file.task_id.string() + ": unable to read " + + config_file_path.path) + end + elseif + try + (dir_path == "") or + (dir_path == "/") or + ((dir_path.size() == 3) and (dir_path(1)? == ':')) + else + false + end + then + break + end + cur_path = dir_path + end + until false end + linter.EditorConfig.default() + + fun _cache_prefix(file_item: SrcFileItem): String => + let fcp = file_item.canonical_path.path + let cache_base = Path.split(file_item.cache_path.path)._1 + + let comp = + ifdef windows then + fcp.compare_sub(cache_base, cache_base.size() where ignore_case = true) + else + fcp.compare_sub(cache_base, cache_base.size()) + end + + if comp is Equal then + let rest = fcp.substring(ISize.from[USize](cache_base.size() + 1)) + Path.join(file_item.cache_path.path, consume rest) + else + let rest = fcp.clone() .> replace(":", "_") + Path.join(file_item.cache_path.path, consume rest) + end + + fun _source_is_newer(source: FilePath, other: FilePath): Bool => + (let source_secs, let source_nanos) = + try + FileInfo(source)?.modified_time + else + _log(Error) and _log.log("unable to stat " + source.path) + return false + end + (let other_secs, let other_nanos) = + try + FileInfo(other)?.modified_time + else + _log(Error) and _log.log("unable to stat " + other.path) + return false + end + if source_secs > other_secs then + return true + elseif source_secs < other_secs then + return false + else + return source_nanos > other_nanos + end + + fun _syntax_tree_path(src_file: SrcFileItem box): FilePath => + FilePath(_context.file_auth, src_file.cache_prefix + ".syntax.json") + + fun _scope_path(src_file: SrcFileItem box): FilePath => + FilePath(_context.file_auth, src_file.cache_prefix + ".scope.json") diff --git a/eohippus/analyzer/scope.pony b/eohippus/analyzer/scope.pony index 4469932..1a6fc24 100644 --- a/eohippus/analyzer/scope.pony +++ b/eohippus/analyzer/scope.pony @@ -1,4 +1,5 @@ use "collections" +use "files" use ast = "../ast" use json = "../json" @@ -27,7 +28,7 @@ type ScopeItem is (USize, String, String) class val Scope let kind: ScopeKind let name: String - let canonical_path: String + let canonical_path: FilePath var range: SrcRange let index: USize var parent: (Scope box | None) @@ -38,7 +39,7 @@ class val Scope new create( kind': ScopeKind, name': String, - canonical_path': String, + canonical_path': FilePath, range': SrcRange, index': USize, parent': (Scope box | None) = None) @@ -120,7 +121,7 @@ class val Scope props.push(("kind", kind_string)) props.push(("name", name)) if (kind is PackageScope) or (kind is FileScope) then - props.push(("canonical_path", canonical_path)) + props.push(("canonical_path", canonical_path.path)) end props.push( ( "range", @@ -167,6 +168,7 @@ class val Scope primitive ParseScopeJson fun apply( + auth: FileAuth, scope_item: json.Item, parent: (Scope ref | None)) : (Scope ref | String) @@ -200,13 +202,13 @@ primitive ParseScopeJson let canonical_path = match try scope_obj("canonical_path")? end | let str: String box => - str.clone() + FilePath(auth, str.clone()) else match parent | let parent': Scope box => parent'.canonical_path else - "" + FilePath(auth, "") end end let range = @@ -272,7 +274,7 @@ primitive ParseScopeJson match try scope_obj("children")? end | let seq: json.Sequence => for item in seq.values() do - match ParseScopeJson(item, scope) + match ParseScopeJson(auth, item, scope) | let child: Scope ref => scope.add_child(child) | let err: String => diff --git a/eohippus/analyzer/scope_visitor.pony b/eohippus/analyzer/scope_visitor.pony index 980dbe9..b501863 100644 --- a/eohippus/analyzer/scope_visitor.pony +++ b/eohippus/analyzer/scope_visitor.pony @@ -1,4 +1,5 @@ use "collections" +use "files" use "logger" use ast = "../ast" @@ -20,7 +21,7 @@ class ScopeVisitor is ast.Visitor[ScopeState] new create( log: Logger[String], - canonical_path: String, + canonical_path: FilePath, node_indices: MapIs[ast.Node, USize] val) => _log = log @@ -28,7 +29,7 @@ class ScopeVisitor is ast.Visitor[ScopeState] file_scope = Scope( FileScope, - canonical_path, + canonical_path.path, canonical_path, (0, 0, USize.max_value(), USize.max_value()), _next_index = _next_index + 1, diff --git a/eohippus/analyzer/scoper.pony b/eohippus/analyzer/scoper.pony index ed7b9b4..243cf77 100644 --- a/eohippus/analyzer/scoper.pony +++ b/eohippus/analyzer/scoper.pony @@ -1,4 +1,5 @@ use "collections" +use "files" use "logger" use ast = "../ast" @@ -6,12 +7,12 @@ use ast = "../ast" interface tag _ScoperNotify be scoped_file( task_id: USize, - canonical_path: String, + canonical_path: FilePath, syntax_tree: ast.Node, scope: Scope val) be scope_failed( task_id: USize, - canonical_path: String, + canonical_path: FilePath, errors: ReadSeq[ast.TraverseError] val) actor Scoper @@ -24,7 +25,7 @@ actor Scoper be scope_syntax_tree( task_id: USize, - canonical_path: String, + canonical_path: FilePath, syntax_tree: ast.Node, node_indices: MapIs[ast.Node, USize] val) => diff --git a/eohippus/analyzer/src_item.pony b/eohippus/analyzer/src_item.pony index e87703a..bcf4afc 100644 --- a/eohippus/analyzer/src_item.pony +++ b/eohippus/analyzer/src_item.pony @@ -17,7 +17,7 @@ primitive AnalysisLint fun apply(): USize => 3 primitive AnalysisUpToDate - fun apply(): USize => 1000 + fun apply(): USize => 4 primitive AnalysisError fun apply(): USize => USize.max_value() @@ -30,12 +30,17 @@ type SrcItemState is | AnalysisUpToDate | AnalysisError ) -type SrcItem is (SrcFileItem | SrcPackageItem) +trait SrcItem + fun get_canonical_path(): FilePath + fun get_state(): SrcItemState + fun ref set_state(state': SrcItemState) -class SrcFileItem - let canonical_path: String +class SrcFileItem is SrcItem + let canonical_path: FilePath + + let cache_path: FilePath + var cache_prefix: String = "" - var storage_prefix: String = "" var parent_package: (SrcPackageItem | None) = None let dependencies: Array[SrcItem] = [] @@ -54,10 +59,11 @@ class SrcFileItem var scope_indices: MapIs[Scope, USize] val = scope_indices.create() var scopes_by_index: Map[USize, Scope] val = scopes_by_index.create() - new create(canonical_path': String) => + new create(canonical_path': FilePath, cache_path': FilePath) => canonical_path = canonical_path' + cache_path = cache_path' - fun get_canonical_path(): String => canonical_path + fun get_canonical_path(): FilePath => canonical_path fun get_state(): SrcItemState => state fun ref set_state(state': SrcItemState) => state = state' @@ -118,10 +124,12 @@ class SrcFileItem scope_indices = scope_indices.create() scopes_by_index = scopes_by_index.create() -class SrcPackageItem - let canonical_path: String +class SrcPackageItem is SrcItem + let canonical_path: FilePath + + let cache_path: FilePath + var cache_prefix: String = "" - var storage_prefix: String = "" var is_workspace: Bool = false var parent_package: (SrcPackageItem | None) = None let dependencies: Array[SrcItem] = [] @@ -129,9 +137,10 @@ class SrcPackageItem var task_id: USize = 0 var state: SrcItemState = AnalysisStart - new create(canonical_path': String) => + new create(canonical_path': FilePath, cache_path': FilePath) => canonical_path = canonical_path' + cache_path = cache_path' - fun get_canonical_path(): String => canonical_path + fun get_canonical_path(): FilePath => canonical_path fun get_state(): SrcItemState => state fun ref set_state(state': SrcItemState) => state = state' diff --git a/eohippus/analyzer/tasks/find_definition.pony b/eohippus/analyzer/tasks/find_definition.pony index 88f534f..3086be1 100644 --- a/eohippus/analyzer/tasks/find_definition.pony +++ b/eohippus/analyzer/tasks/find_definition.pony @@ -1,4 +1,5 @@ use "collections" +use "files" use "logger" use ast = "../../ast" @@ -7,20 +8,20 @@ use ".." interface tag FindDefinitionNotify be definition_found( task_id: USize, - canonical_path: String, + canonical_path: FilePath, range: SrcRange) be definition_failed( task_id: USize, message: String) class SearchFileItem - let canonical_path: String + let canonical_path: FilePath let syntax_tree: (ast.Node | None) let nodes_by_index: Map[USize, ast.Node] val let scope: Scope new create( - canonical_path': String, + canonical_path': FilePath, syntax_tree': (ast.Node | None), nodes_by_index': Map[USize, ast.Node] val, scope': Scope) @@ -36,13 +37,13 @@ actor FindDefinition is AnalyzerRequestNotify let log: Logger[String] let analyzer: Analyzer let task_id: USize - let canonical_path: String + let canonical_path: FilePath let line: USize let column: USize let notify: FindDefinitionNotify var span: String = "!INVALID!" - let paths_to_search: Array[(String, (SearchFileItem | None))] = + let paths_to_search: Array[(FilePath, (SearchFileItem | None))] = paths_to_search.create() var finished: Bool = false @@ -50,7 +51,7 @@ actor FindDefinition is AnalyzerRequestNotify log': Logger[String], analyzer': Analyzer, task_id': USize, - canonical_path': String, + canonical_path': FilePath, line': USize, column': USize, notify': FindDefinitionNotify) @@ -68,14 +69,14 @@ actor FindDefinition is AnalyzerRequestNotify be request_succeeded( task_id': USize, - canonical_path': String, + canonical_path': FilePath, syntax_tree': (ast.Node | None), nodes_by_index': Map[USize, ast.Node] val, scope': Scope val) => // find array index and update data for (i, pending) in paths_to_search.pairs() do - if pending._1 == canonical_path' then + if pending._1.path == canonical_path'.path then try let item = SearchFileItem( canonical_path', @@ -90,14 +91,14 @@ actor FindDefinition is AnalyzerRequestNotify be request_failed( task_id': USize, - canonical_path': String, + canonical_path': FilePath, message': String) => log(Error) and log.log( task_id'.string() + ": analysis request failed: " + message') for (i, pending) in paths_to_search.pairs() do - if canonical_path' == pending._1 then + if canonical_path'.path == pending._1.path then try paths_to_search.delete(i)? end break end @@ -115,8 +116,8 @@ actor FindDefinition is AnalyzerRequestNotify if not finished then // is there data in the first item in the array? match try paths_to_search(0)? end - | (let cp: String, let sfi: SearchFileItem) => - if cp == canonical_path then + | (let cp: FilePath, let sfi: SearchFileItem) => + if cp.path == canonical_path.path then match sfi.syntax_tree | let st: ast.Node => // this is our original file @@ -131,7 +132,7 @@ actor FindDefinition is AnalyzerRequestNotify else finished = true notify.definition_failed( - task_id, "no syntax tree for " + canonical_path) + task_id, "no syntax tree for " + canonical_path.path) end else // we're in a sibling, or import, or builtin diff --git a/eohippus/queue.pony b/eohippus/queue.pony index 94052c0..4e3306a 100644 --- a/eohippus/queue.pony +++ b/eohippus/queue.pony @@ -24,6 +24,16 @@ class Queue[A] let index = (_start + i) % _array.size() _array(index)? + fun ref clear() => + _array.clear() + _array.compact() + _size = 0 + _start = 0 + _next = 0 + + fun ref compact() => + _array.compact() + fun ref pop(): A ? => if _size == 0 then error diff --git a/eohippus/server/eohippus_server.pony b/eohippus/server/eohippus_server.pony index 59c040a..d0122cf 100644 --- a/eohippus/server/eohippus_server.pony +++ b/eohippus/server/eohippus_server.pony @@ -1,3 +1,4 @@ +use "appdirs" use "collections" use "files" use "logger" @@ -310,12 +311,12 @@ actor EohippusServer is Server request_id: String, params: rpc_data.DefinitionParams) => - let cp = _get_canonical_path(params.textDocument().uri()).path + let canonical_path = _get_canonical_path(params.textDocument().uri()) let task_id = _get_next_task_id() _pending_requests.update(task_id, request_id) _handle_request( _handle_text_document_definition( - FileAuth(_env.root), _workspaces, task_id, params, cp)) + FileAuth(_env.root), _workspaces, task_id, params, canonical_path)) be notification_exit() => _log(Fine) and _log.log("notification: exit") @@ -360,41 +361,116 @@ actor EohippusServer is Server _client_data.rootPath = rootPath be open_workspace(name: String, client_uri: String) => - _log(Fine) and _log.log("open workspace " + name + " " + client_uri) + _log(Fine) and _log.log("opening workspace " + name + " " + client_uri) if not _workspaces.by_client_uri.contains(client_uri) then - let canonical_path = _get_canonical_path(client_uri) - let pony_path = ServerUtils.get_pony_path(_env) - let ponyc_executable = + let auth = FileAuth(_env.root) + + let workspace_path = _get_canonical_path(client_uri) + if not workspace_path.exists() then + _log(Error) and _log.log( + "workspace does not exist: " + workspace_path.path) + return + end + _log(Fine) and _log.log("workspace_path: " + workspace_path.path) + + let workspace_cache = FilePath( + auth, Path.join(workspace_path.path, ".eohippus")) + if not _check_cache(workspace_cache, "workspace cache") then + return + end + _log(Fine) and _log.log("workspace_cache: " + workspace_cache.path) + + let appdirs = AppDirs(_env.vars, "eohippus") + let global_cache = + try + FilePath(auth, appdirs.user_cache_dir()?) + else + _log(Error) and _log.log("unable to get user cache dir") + return + end + if not _check_cache(global_cache, "global_cache") then + return + end + _log(Fine) and _log.log("global_cache: " + global_cache.path) + + let pony_path_dirs = ServerUtils.get_pony_path_dirs(_env) + _log(Fine) and _log.log("pony_path_dirs:") + for path in pony_path_dirs.values() do + _log(Fine) and _log.log(" " + path.path) + end + + let ponyc = match _config.ponyc_executable - | let str: String => - FilePath(FileAuth(_env.root), str) + | let ponyc_path: FilePath => + if ponyc_path.exists() then + ponyc_path + end else ServerUtils.find_ponyc(_env) end - let analyze = analyzer.EohippusAnalyzer( - _log, FileAuth(_env.root), _parser_grammar - where - workspace = canonical_path, - storage_path = None, - pony_path = pony_path, - ponyc_executable = ponyc_executable, - pony_packages_path = None, - notify = this) + match ponyc + | let ponyc_path: FilePath => + _log(Fine) and _log.log("ponyc_path: " + ponyc_path.path) + else + _log(Fine) and _log.log("ponyc_path: None") + end + + let pony_packages = ServerUtils.find_pony_packages(_env, ponyc) + match pony_packages + | let pony_packages_path: FilePath => + _log(Fine) and _log.log( + "pony_packages_path: " + pony_packages_path.path) + else + _log(Fine) and _log.log("pony_packages_path: None") + end + + let analyzer_context = analyzer.AnalyzerContext( + auth, + workspace_path, + workspace_cache, + global_cache, + pony_path_dirs, + ponyc, + pony_packages, + _parser_grammar) + + let analyze = analyzer.EohippusAnalyzer(_log, analyzer_context, this) + analyze.analyze() + let workspace = WorkspaceInfo( - name, client_uri, canonical_path.path, this, analyze) + name, client_uri, workspace_path, this, analyze) _workspaces.by_client_uri.update(client_uri, workspace) - _workspaces.by_canonical_path.update(canonical_path.path, workspace) + _workspaces.by_canonical_path.update(workspace_path.path, workspace) _workspaces.by_analyzer.update(analyze, workspace) else _log(Warn) and _log.log("workspace " + client_uri + " already open") end + fun _check_cache(path: FilePath, name: String): Bool => + try + if (not path.exists()) and (not path.mkdir()) then + _log(Error) and _log.log("unable to create " + name + ": " + path.path) + return false + end + let info = FileInfo(path)? + if not info.directory then + _log(Error) and _log.log(name + " is not a directory: " + path.path) + return false + end + else + _log(Error) and _log.log("unable to access " + name + ": " + path.path) + return false + end + true + fun _clear_errors( - canonical_path: String, + canonical_path: FilePath, dest: Map[String, Array[analyzer.AnalyzerError]]) => try - dest(canonical_path)?.clear() + let map = dest(canonical_path.path)? + map.clear() + map.compact() end fun _add_errors( @@ -404,10 +480,10 @@ actor EohippusServer is Server for err in src.values() do let arr = try - dest(err.canonical_path)? + dest(err.canonical_path.path)? else let arr' = Array[analyzer.AnalyzerError] - dest(err.canonical_path) = arr' + dest(err.canonical_path.path) = arr' arr' end arr.push(err) @@ -429,10 +505,10 @@ actor EohippusServer is Server end fun _notify_file_diagnostics( - canonical_path: String, + canonical_path_str: String, errors: Array[analyzer.AnalyzerError]) => - let client_uri = StringUtil.get_client_uri(canonical_path) + let client_uri = StringUtil.get_client_uri(canonical_path_str) let diagnostics: Array[rpc_data.Diagnostic] trn = [] for err in errors.values() do let range' = _get_range(err) @@ -471,9 +547,10 @@ actor EohippusServer is Server errors: Map[String, Array[analyzer.AnalyzerError]]) => var num_sent: USize = 0 - for canonical_path in errors.keys() do + for canonical_path_str in errors.keys() do try - _notify_file_diagnostics(canonical_path, errors(canonical_path)?) + _notify_file_diagnostics( + canonical_path_str, errors(canonical_path_str)?) end num_sent = num_sent + 1 end @@ -483,18 +560,19 @@ actor EohippusServer is Server be parsed_file( analyze: analyzer.Analyzer, task_id: USize, - canonical_path: String, + canonical_path: FilePath, syntax_tree: ast.Node, line_beginnings: ReadSeq[parser.Loc] val) => - match try _src_files.by_canonical_path(canonical_path)? end + match try _src_files.by_canonical_path(canonical_path.path)? end | let src_file: SrcFileInfo => - _log(Fine) and _log.log(task_id.string() + ": parsed " + canonical_path) + _log(Fine) and _log.log( + task_id.string() + ": parsed " + canonical_path.path) src_file.syntax_tree = syntax_tree src_file.set_line_beginnings(line_beginnings) else _log(Fine) and _log.log( - task_id.string() + " parsed unknown " + canonical_path) + task_id.string() + " parsed unknown " + canonical_path.path) end be analyzed_workspace( @@ -524,7 +602,7 @@ actor EohippusServer is Server be analyzed_file( analyze: analyzer.Analyzer, task_id: USize, - canonical_path: String, + canonical_path: FilePath, syntax_tree: (ast.Node | None), file_scope: (analyzer.Scope val | None), parse_errors: ReadSeq[analyzer.AnalyzerError] val, @@ -532,7 +610,7 @@ actor EohippusServer is Server analyze_errors: ReadSeq[analyzer.AnalyzerError] val) => _log(Fine) and _log.log( - task_id.string() + ": file analyzed: " + canonical_path) + task_id.string() + ": file analyzed: " + canonical_path.path) if parse_errors.size() > 0 then _log(Fine) and _log.log( " " + parse_errors.size().string() + " parse errors") @@ -554,15 +632,12 @@ actor EohippusServer is Server _add_errors(analyze_errors, workspace.errors) if _client_data.text_document_publish_diagnostics() then - if workspace.errors.contains(canonical_path) then - _log(Fine) and _log.log(task_id.string() + ": sending diagnostics") - _notify_workspace_diagnostics(workspace.errors) - // try - // _log(Fine) and _log.log( - // task_id.string() + ": sending diagnostics for " + canonical_path) - // _notify_file_diagnostics( - // canonical_path, workspace.errors(canonical_path)?) - // end + try + _log(Fine) and _log.log( + task_id.string() + ": sending diagnostics for " + + canonical_path.path) + _notify_file_diagnostics( + canonical_path.path, workspace.errors(canonical_path.path)?) end end end @@ -570,11 +645,11 @@ actor EohippusServer is Server be analyze_failed( analyze: analyzer.Analyzer, task_id: USize, - canonical_path: String, + canonical_path: FilePath, errors: ReadSeq[analyzer.AnalyzerError] val) => _log(Fine) and _log.log( - task_id.string() + ": analyze failed: " + canonical_path) + task_id.string() + ": analyze failed: " + canonical_path.path) fun ref _get_request_id(task_id: USize): (I128 | String | None) => let result = @@ -591,10 +666,10 @@ actor EohippusServer is Server be definition_found( task_id: USize, - canonical_path: String, + canonical_path: FilePath, range: analyzer.SrcRange) => - let client_uri = StringUtil.get_client_uri(canonical_path) + let client_uri = StringUtil.get_client_uri(canonical_path.path) let request_id = _get_request_id(task_id) let start' = object val is rpc_data.Position diff --git a/eohippus/server/handlers/text_document/definition.pony b/eohippus/server/handlers/text_document/definition.pony index f6fc2cf..de8655a 100644 --- a/eohippus/server/handlers/text_document/definition.pony +++ b/eohippus/server/handlers/text_document/definition.pony @@ -18,24 +18,28 @@ class Definition workspaces: Workspaces, task_id: USize, params: rpc_data.DefinitionParams, - canonical_path: String) + canonical_path: FilePath) : ((ServerState | None), (I32 | None)) => _log(Fine) and _log.log( task_id.string() + ": request definition: " + params.textDocument().uri() + ":" + params.position().line().string() + ":" + params.position().character().string()) - let uri = params.textDocument().uri() - let workspace = workspaces.get_workspace(auth, _config, canonical_path) - let position = params.position() + try + let uri = params.textDocument().uri() + let workspace = workspaces.get_workspace(auth, _config, canonical_path)? + let position = params.position() - tasks.FindDefinition( - _log, - workspace.analyze, - task_id, - canonical_path, - USize.from[I128](position.line()), - USize.from[I128](position.character()), - workspace.server) + tasks.FindDefinition( + _log, + workspace.analyze, + task_id, + canonical_path, + USize.from[I128](position.line()), + USize.from[I128](position.character()), + workspace.server) + else + _log(Error) and _log.log(task_id.string() + ": error getting workspace") + end (None, None) diff --git a/eohippus/server/handlers/text_document/did_change.pony b/eohippus/server/handlers/text_document/did_change.pony index 6d593b2..670439b 100644 --- a/eohippus/server/handlers/text_document/did_change.pony +++ b/eohippus/server/handlers/text_document/did_change.pony @@ -23,21 +23,27 @@ class DidChange => _log(Fine) and _log.log( task_id.string() + ": notification : textDocument/didChange") + let uri = params.textDocument().uri() try let info = src_files.by_client_uri(uri)? - info.did_change( - task_id, - params.textDocument(), - params.contentChanges()) - let workspace = workspaces.get_workspace( - auth, _config, info.canonical_path.path) - match info.parse - | let parse': parser.Parser => - workspace.analyze.update_file(task_id, info.canonical_path.path, parse') + try + info.did_change( + task_id, + params.textDocument(), + params.contentChanges()) + let workspace = workspaces.get_workspace( + auth, _config, info.canonical_path)? + match info.parse + | let parse': parser.Parser => + workspace.analyze.update_file(task_id, info.canonical_path, parse') + end + else + _log(Error) and _log.log(task_id.string() + ": error getting workspace") end else _log(Error) and _log.log( task_id.string() + ": no open info found for " + uri) end + (None, None) diff --git a/eohippus/server/handlers/text_document/did_close.pony b/eohippus/server/handlers/text_document/did_close.pony index c453611..5df0809 100644 --- a/eohippus/server/handlers/text_document/did_close.pony +++ b/eohippus/server/handlers/text_document/did_close.pony @@ -22,15 +22,22 @@ class DidClose => _log(Fine) and _log.log( task_id.string() + ": notification: textDocument/didClose") - let uri = params.textDocument().uri() + + let uri = params.textDocument().uri() try let info = src_files.by_client_uri(uri)? - let workspace = workspaces.get_workspace( - auth, _config, info.canonical_path.path) - workspace.analyze.close_file(task_id, info.canonical_path.path) - src_files.by_client_uri.remove(uri)? - src_files.by_canonical_path.remove(info.canonical_path.path)? + + try + let workspace = workspaces.get_workspace( + auth, _config, info.canonical_path)? + workspace.analyze.close_file(task_id, info.canonical_path) + src_files.by_client_uri.remove(uri)? + src_files.by_canonical_path.remove(info.canonical_path.path)? + else + _log(Error) and _log.log(task_id.string() + ": error getting workspace") + end else _log(Error) and _log.log(task_id.string() + ": no info found for " + uri) end + (None, None) diff --git a/eohippus/server/handlers/text_document/did_open.pony b/eohippus/server/handlers/text_document/did_open.pony index a948f89..f575cf7 100644 --- a/eohippus/server/handlers/text_document/did_open.pony +++ b/eohippus/server/handlers/text_document/did_open.pony @@ -61,9 +61,13 @@ class DidOpen task_id, params.textDocument().version(), params.textDocument().text()) - let workspace = workspaces.get_workspace( - auth, _config, sfi.canonical_path.path) - workspace.analyze.open_file(task_id, sfi.canonical_path.path, parse) + try + let workspace = workspaces.get_workspace( + auth, _config, sfi.canonical_path)? + workspace.analyze.open_file(task_id, sfi.canonical_path, parse) + else + _log(Error) and _log.log("unable to get workspace") + end else _log(Error) and _log.log("unable to open " + params.textDocument().uri()) end diff --git a/eohippus/server/server.pony b/eohippus/server/server.pony index 260b702..a5e64cf 100644 --- a/eohippus/server/server.pony +++ b/eohippus/server/server.pony @@ -1,3 +1,5 @@ +use "files" + use analyzer = "../analyzer" use ast = "../ast" use c_caps = "rpc/data/client_capabilities" @@ -52,7 +54,7 @@ interface tag Server is (analyzer.AnalyzerNotify & tasks.FindDefinitionNotify) be parsed_file( analyze: analyzer.Analyzer, task_id: USize, - canonical_name: String, + canonical_name: FilePath, syntax_tree: ast.Node, line_beginnings: ReadSeq[parser.Loc] val) @@ -66,7 +68,7 @@ interface tag Server is (analyzer.AnalyzerNotify & tasks.FindDefinitionNotify) be analyzed_file( analyze: analyzer.Analyzer, task_id: USize, - canonical_path: String, + canonical_path: FilePath, syntax_tree: (ast.Node | None), file_scope: (analyzer.Scope val | None), parse_errors: ReadSeq[analyzer.AnalyzerError] val, @@ -75,13 +77,13 @@ interface tag Server is (analyzer.AnalyzerNotify & tasks.FindDefinitionNotify) be analyze_failed( analyze: analyzer.Analyzer, task_id: USize, - canonical_path: String, + canonical_path: FilePath, errors: ReadSeq[analyzer.AnalyzerError] val) // FindDefinitionNotify be definition_found( task_id: USize, - canonical_path: String, + canonical_path: FilePath, range: analyzer.SrcRange) be definition_failed( task_id: USize, diff --git a/eohippus/server/server_config.pony b/eohippus/server/server_config.pony index 6ce699a..105b745 100644 --- a/eohippus/server/server_config.pony +++ b/eohippus/server/server_config.pony @@ -1,57 +1,7 @@ use "files" class val ServerConfig - let ponyc_executable: (String | None) + let ponyc_executable: (FilePath | None) - new create(ponyc_executable': (String | None)) => + new create(ponyc_executable': (FilePath | None)) => ponyc_executable = ponyc_executable' - -primitive ServerUtils - fun get_pony_path(env: Env): ReadSeq[FilePath] val => - let pony_path: Array[FilePath] trn = [] - for env_var in env.vars.values() do - if - env_var.compare_sub("PONYPATH", 8 where ignore_case = true) is Equal - then - try - let index = env_var.find("=")? - for - dir_path in Path.split_list(env_var.substring(index + 1)).values() - do - let fp = FilePath(FileAuth(env.root), dir_path) - if fp.exists() then - pony_path.push(fp) - end - end - end - end - end - consume pony_path - - fun find_ponyc(env: Env): (FilePath | None) => - for env_var in env.vars.values() do - if env_var.compare_sub("PATH", 4 where ignore_case = true) is Equal then - try - let index = env_var.find("=")? - for path_path in - Path.split_list(env_var.substring(index + 1)).values() - do - let ponyc_path = - ifdef windows then - Path.join(path_path, "ponyc.exe") - else - Path.join(path_path, "ponyc") - end - let ponyc_file_path = FilePath(FileAuth(env.root), ponyc_path) - if ponyc_file_path.exists() then - return - try - ponyc_file_path.canonical()? - else - ponyc_file_path - end - end - end - end - end - end diff --git a/eohippus/server/server_utils.pony b/eohippus/server/server_utils.pony new file mode 100644 index 0000000..2afc21a --- /dev/null +++ b/eohippus/server/server_utils.pony @@ -0,0 +1,73 @@ +use "files" + +primitive ServerUtils + fun get_pony_path_dirs(env: Env): ReadSeq[FilePath] val => + let pony_path: Array[FilePath] trn = [] + for env_var in env.vars.values() do + if + env_var.compare_sub("PONYPATH", 8 where ignore_case = true) is Equal + then + try + let index = env_var.find("=")? + for + dir_path in Path.split_list(env_var.substring(index + 1)).values() + do + let fp = FilePath(FileAuth(env.root), dir_path) + if fp.exists() then + pony_path.push(fp) + end + end + end + end + end + consume pony_path + + fun find_ponyc(env: Env): (FilePath | None) => + for env_var in env.vars.values() do + if env_var.compare_sub("PATH", 4 where ignore_case = true) is Equal then + try + let index = env_var.find("=")? + for path_path in + Path.split_list(env_var.substring(index + 1)).values() + do + let ponyc_path = + ifdef windows then + Path.join(path_path, "ponyc.exe") + else + Path.join(path_path, "ponyc") + end + let ponyc_file_path = FilePath(FileAuth(env.root), ponyc_path) + if ponyc_file_path.exists() then + return + try + ponyc_file_path.canonical()? + else + ponyc_file_path + end + end + end + end + end + end + + fun find_pony_packages( + env: Env, + ponyc: (FilePath | None)) + : (FilePath | None) + => + match ponyc + | let ponyc_path: FilePath => + let zero_down = Path.split(ponyc_path.path)._1 + let one_down = Path.split(zero_down)._1 + var packages_path = FilePath( + FileAuth(env.root), Path.join(one_down, "packages")) + if packages_path.exists() then + return packages_path + end + let two_down = Path.split(one_down)._1 + packages_path = FilePath( + FileAuth(env.root), Path.join(two_down, "packages")) + if packages_path.exists() then + return packages_path + end + end diff --git a/eohippus/server/workspace_info.pony b/eohippus/server/workspace_info.pony index 3e70a08..a2649f7 100644 --- a/eohippus/server/workspace_info.pony +++ b/eohippus/server/workspace_info.pony @@ -8,7 +8,7 @@ use parser = "../parser" class WorkspaceInfo let name: String let client_uri: String - let canonical_path: String + let canonical_path: FilePath let server: Server let analyze: analyzer.Analyzer @@ -17,7 +17,7 @@ class WorkspaceInfo new create( name': String, client_uri': String, - canonical_path': String, + canonical_path': FilePath, server': Server, analyze': analyzer.Analyzer) => @@ -51,32 +51,45 @@ class Workspaces fun ref get_workspace( auth: FileAuth, config: ServerConfig, - canonical_path: String) - : WorkspaceInfo + canonical_path: FilePath) + : WorkspaceInfo? => + // TODO: refine this to handle multiple workspaces for (ws_path, workspace) in by_canonical_path.pairs() do - if canonical_path.compare_sub(ws_path, ws_path.size()) is Equal then - return workspace - end + return workspace end - (let dir, _) = Path.split(canonical_path) - _log(Fine) and _log.log("creating ad-hoc workspace for " + dir) - let ponyc = - match config.ponyc_executable - | let path: String => - FilePath(auth, path) - end - let analyze = analyzer.EohippusAnalyzer( - _log, - auth, - _grammar, - FilePath(auth, dir), - None, - [], - ponyc, - None, - _server) - let workspace = WorkspaceInfo(dir, dir, dir, _server, analyze) - by_canonical_path.update(dir, workspace) - by_analyzer.update(analyze, workspace) - workspace + error + + // for (ws_path, workspace) in by_canonical_path.pairs() do + // if canonical_path.compare_sub(ws_path, ws_path.size()) is Equal then + // return workspace + // end + // end + + // let workspace_path = FilePath(auth, Path.split(canonical_path)._1) + // _log(Fine) and _log.log( + // "creating ad-hoc workspace for " + workspace_path.path) + + // let analyzer_context = analyzer.AnalyzerContext( + + // ) + + // let ponyc = + // match config.ponyc_executable + // | let path: String => + // FilePath(auth, path) + // end + // let analyze = analyzer.EohippusAnalyzer( + // _log, + // auth, + // _grammar, + // FilePath(auth, dir), + // None, + // [], + // ponyc, + // None, + // _server) + // let workspace = WorkspaceInfo(dir, dir, dir, _server, analyze) + // by_canonical_path.update(dir, workspace) + // by_analyzer.update(analyze, workspace) + // workspace