Skip to content

Commit

Permalink
chore: remove leading ">>>" in logging
Browse files Browse the repository at this point in the history
  • Loading branch information
BoboTiG committed Sep 23, 2024
1 parent 98ead1d commit d34fff9
Show file tree
Hide file tree
Showing 7 changed files with 29 additions and 29 deletions.
4 changes: 2 additions & 2 deletions wikidict/check_word.py
Original file line number Diff line number Diff line change
Expand Up @@ -371,9 +371,9 @@ def check_word(word: str, locale: str) -> int:
errors = len(results)
for result in results:
log.error(result)
log.warning(">>> [%s] - Errors: %s", word, errors)
log.warning("[%s] - Errors: %s", word, errors)
else:
log.debug(">>> [%s] - OK", word)
log.debug("[%s] - OK", word)

return errors

Expand Down
6 changes: 3 additions & 3 deletions wikidict/check_words.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,10 @@ def get_words_to_tackle(
else:
output_dir = Path(os.getenv("CWD", "")) / "data" / locale
if not (file := render.get_latest_json_file(output_dir)):
log.error(">>> No dump found. Run with --parse first ... ")
log.error("No dump found. Run with --parse first ... ")
return []

log.info(">>> Loading %s ...", file)
log.info("Loading %s ...", file)
words = list(render.load(file).keys())

if count == -1:
Expand Down Expand Up @@ -66,6 +66,6 @@ def main(locale: str, count: int, is_random: bool, offset: str, input_file: str)
err = pool.map(partial(local_check, locale=locale), words)

if errors := sum(err):
log.warning(">>> TOTAL Errors: %d", errors)
log.warning("TOTAL Errors: %d", errors)

return errors
10 changes: 5 additions & 5 deletions wikidict/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,10 +187,10 @@ def compute_checksum(file: Path) -> None:
checksum = hashlib.new(ASSET_CHECKSUM_ALGO, file.read_bytes()).hexdigest()
checksum_file = file.with_suffix(f"{file.suffix}.{ASSET_CHECKSUM_ALGO}")
checksum_file.write_text(f"{checksum} {file.name}")
log.info(">>> Crafted %s (%s)", checksum_file.name, checksum)
log.info("Crafted %s (%s)", checksum_file.name, checksum)

def summary(self, file: Path) -> None:
log.info(">>> Generated %s (%d bytes)", file.name, file.stat().st_size)
log.info("Generated %s (%d bytes)", file.name, file.stat().st_size)
self.compute_checksum(file)


Expand Down Expand Up @@ -546,10 +546,10 @@ def run_formatter(

def load(file: Path) -> Words:
"""Load the big JSON file containing all words and their details."""
log.info(">>> Loading %s ...", file)
log.info("Loading %s ...", file)
with file.open(encoding="utf-8") as fh:
words: Words = {key: Word(*values) for key, values in json.load(fh).items()}
log.info(">>> Loaded %d words from %s", len(words), file)
log.info("Loaded %d words from %s", len(words), file)
return words


Expand Down Expand Up @@ -600,7 +600,7 @@ def main(locale: str) -> int:
output_dir = Path(os.getenv("CWD", "")) / "data" / locale
file = get_latest_json_file(output_dir)
if not file:
log.error(">>> No dump found. Run with --render first ... ")
log.error("No dump found. Run with --render first ... ")
return 1

# Get all words from the database
Expand Down
10 changes: 5 additions & 5 deletions wikidict/download.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def decompress(file: Path, callback: Callable[[str, int, bool], None]) -> Path:
if output.is_file():
return output

msg = f">>> Uncompressing into {output.name}"
msg = f"Uncompressing into {output.name}"
log.info(msg)

comp = bz2.BZ2Decompressor()
Expand Down Expand Up @@ -64,7 +64,7 @@ def fetch_pages(date: str, locale: str, output_dir: Path, callback: Callable[[st
return output

url = DUMP_URL.format(locale, date)
msg = f">>> Fetching {url}"
msg = f"Fetching {url}"
log.info(msg)

with output.open(mode="wb") as fh, requests.get(url, stream=True) as req:
Expand Down Expand Up @@ -94,12 +94,12 @@ def main(locale: str) -> int:
file = fetch_pages(snapshot, locale, output_dir, callback_progress)
except HTTPError:
(output_dir / f"pages-{snapshot}.xml.bz2").unlink(missing_ok=True)
log.exception(">>> Wiktionary dump is ongoing ... ")
log.info(">>> Will use the previous one.")
log.exception("Wiktionary dump is ongoing ... ")
log.info("Will use the previous one.")
snapshot = snapshots[-2]
file = fetch_pages(snapshot, locale, output_dir, callback_progress)

decompress(file, callback_progress)

log.info(">>> Retrieval done!")
log.info("Retrieval done!")
return 0
10 changes: 5 additions & 5 deletions wikidict/find_templates.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,13 +51,13 @@ def find_templates(in_words: dict[str, str], locale: str) -> None:
f.write(f" - {entry!r}\n")
else:
f.write(f" - {entries[0]!r}\n")
log.info(">>> File sections.txt created.")
log.info("File sections.txt created.")

if templates:
with open("templates.txt", "w", encoding="utf-8") as f:
for template, entry in sorted(templates.items()):
f.write(f"{entry!r} => {template!r}\n")
log.info(">>> File templates.txt created.")
log.info("File templates.txt created.")
else:
log.warning("No templates found.")

Expand All @@ -68,12 +68,12 @@ def main(locale: str) -> int:
output_dir = Path(os.getenv("CWD", "")) / "data" / locale
file = get_latest_json_file(output_dir)
if not file:
log.error(">>> No dump found. Run with --parse first ... ")
log.error("No dump found. Run with --parse first ... ")
return 1

log.info(">>> Loading %s ...", file)
log.info("Loading %s ...", file)
in_words: dict[str, str] = load(file)

log.info(">>> Working, please be patient ...")
log.info("Working, please be patient ...")
find_templates(in_words, locale)
return 0
8 changes: 4 additions & 4 deletions wikidict/parse.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def process(file: Path, locale: str) -> dict[str, str]:
"""Process the big XML file and retain only information we are interested in."""
words: dict[str, str] = defaultdict(str)

log.info(">>> Processing %s ...", file)
log.info("Processing %s ...", file)
for element in xml_iter_parse(file):
word, code = xml_parse_element(element, locale)
if word and code and ":" not in word:
Expand All @@ -81,7 +81,7 @@ def save(snapshot: str, words: dict[str, str], output_dir: Path) -> None:
with raw_data.open(mode="w", encoding="utf-8") as fh:
json.dump(words, fh, indent=4, sort_keys=True)

log.info(">>> Saved %s words into %s", len(words), raw_data)
log.info("Saved %s words into %s", len(words), raw_data)


def get_latest_xml_file(output_dir: Path) -> Path | None:
Expand All @@ -96,12 +96,12 @@ def main(locale: str) -> int:
output_dir = Path(os.getenv("CWD", "")) / "data" / locale
file = get_latest_xml_file(output_dir)
if not file:
log.error(">>> No dump found. Run with --download first ... ")
log.error("No dump found. Run with --download first ... ")
return 1

date = file.stem.split("-")[1]
if not (output_dir / f"data_wikicode-{date}.json").is_file():
words = process(file, locale)
save(date, words, output_dir)
log.info(">>> Parse done!")
log.info("Parse done!")
return 0
10 changes: 5 additions & 5 deletions wikidict/render.py
Original file line number Diff line number Diff line change
Expand Up @@ -472,7 +472,7 @@ def load(file: Path) -> dict[str, str]:
"""Load the JSON file containing all words and their details."""
with file.open(encoding="utf-8") as fh:
words: dict[str, str] = json.load(fh)
log.info(">>> Loaded %d words from %s", len(words), file)
log.info("Loaded %d words from %s", len(words), file)
return words


Expand Down Expand Up @@ -507,7 +507,7 @@ def save(snapshot: str, words: Words, output_dir: Path) -> None:
raw_data = output_dir / f"data-{snapshot}.json"
with raw_data.open(mode="w", encoding="utf-8") as fh:
json.dump(words, fh, indent=4, sort_keys=True)
log.info(">>> Saved %d words into %s", len(words), raw_data)
log.info("Saved %d words into %s", len(words), raw_data)


def get_latest_json_file(output_dir: Path) -> Path | None:
Expand All @@ -522,10 +522,10 @@ def main(locale: str, workers: int = multiprocessing.cpu_count()) -> int:
output_dir = Path(os.getenv("CWD", "")) / "data" / locale
file = get_latest_json_file(output_dir)
if not file:
log.error(">>> No dump found. Run with --parse first ... ")
log.error("No dump found. Run with --parse first ... ")
return 1

log.info(">>> Loading %s ...", file)
log.info("Loading %s ...", file)
in_words: dict[str, str] = load(file)

workers = workers or multiprocessing.cpu_count()
Expand All @@ -536,5 +536,5 @@ def main(locale: str, workers: int = multiprocessing.cpu_count()) -> int:
date = file.stem.split("-")[1]
save(date, words, output_dir)

log.info(">>> Render done!")
log.info("Render done!")
return 0

0 comments on commit d34fff9

Please sign in to comment.